content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/critical_moments.R
\name{Critical_Moments_Functions}
\alias{Critical_Moments_Functions}
\alias{critical_moment_breakage}
\alias{critical_moment_overturning}
\title{Critical Moments Functions}
\usage{
critical_moment_breakage(dbh, ht, cr_depth, mor, fknot)
critical_moment_overturning(c_reg, stem_density, stem_vol)
}
\arguments{
\item{dbh}{The dbh (cm) of a tree.}
\item{ht}{The height (m) of a tree.}
\item{cr_depth}{The length (m) of the tree crown.}
\item{mor}{Modulus of Rupture (MPa) of green wood.}
\item{fknot}{Knot factor. Dimensionless.}
\item{c_reg}{Regression coefficients (N m kg-1) of uprooting moment against stem weight.}
\item{stem_density}{Density (kg m-3) of green wood of the stem.}
\item{stem_vol}{Volume (m3) of the stem of the mean tree in the stand.}
}
\description{
Calculate the critical moments for breakage and overturning
}
| /man/Critical_Moments_Functions.Rd | no_license | MarineDuperat/fgr | R | false | true | 938 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/critical_moments.R
\name{Critical_Moments_Functions}
\alias{Critical_Moments_Functions}
\alias{critical_moment_breakage}
\alias{critical_moment_overturning}
\title{Critical Moments Functions}
\usage{
critical_moment_breakage(dbh, ht, cr_depth, mor, fknot)
critical_moment_overturning(c_reg, stem_density, stem_vol)
}
\arguments{
\item{dbh}{The dbh (cm) of a tree.}
\item{ht}{The height (m) of a tree.}
\item{cr_depth}{The length (m) of the tree crown.}
\item{mor}{Modulus of Rupture (MPa) of green wood.}
\item{fknot}{Knot factor. Dimensionless.}
\item{c_reg}{Regression coefficients (N m kg-1) of uprooting moment against stem weight.}
\item{stem_density}{Density (kg m-3) of green wood of the stem.}
\item{stem_vol}{Volume (m3) of the stem of the mean tree in the stand.}
}
\description{
Calculate the critical moments for breakage and overturning
}
|
# *------------------------------------------------------------------
# | FUNCTION NAME: pca_plot_wrapper
# | FILE NAME: pca_plot.R
# | DATE:
# | CREATED BY: Jim Stagge
# *------------------------------------------------------------------
# | Parameter:
# | In: data - a dataframe with PCA importance data
# | write_folder - location to save plots
# | write_file - file name for plots
# |
# | Out:
# |
# | Desc: Runs the following three PCA diagnostic plots
# |
# *------------------------------------------------------------------
pca_plot_wrapper <- function(data, write_folder, write_file){
require(ggplot2)
require(svglite)
### Set up output folders
dir.create(file.path(file.path(write_folder,"png"), write_file), recursive=TRUE)
dir.create(file.path(file.path(write_folder,"pdf"), write_file), recursive=TRUE)
dir.create(file.path(file.path(write_folder,"svg"), write_file), recursive=TRUE)
### Run Eigen Plot
p <- pca_eigen_plot(importance=data)
### Save Eigen Plot
plot_name <- paste0(write_file, "_eigen")
ggsave(file.path(file.path(file.path(write_folder,"png"), write_file), paste0(plot_name, ".png")), p, width=4, height=3, dpi=600)
ggsave(file.path(file.path(file.path(write_folder,"svg"), write_file), paste0(plot_name, ".svg")), p, width=4, height=3)
ggsave(file.path(file.path(file.path(write_folder,"pdf"), write_file), paste0(plot_name, ".pdf")), p, width=4, height=3)
### Run Variance Explained Plot
p <- pca_var_plot(importance=data)
### Save Eigen Plot
plot_name <- paste0(write_file, "_var")
ggsave(file.path(file.path(file.path(write_folder,"png"), write_file), paste0(plot_name, ".png")), p, width=4, height=3, dpi=600)
ggsave(file.path(file.path(file.path(write_folder,"svg"), write_file), paste0(plot_name, ".svg")), p, width=4, height=3)
ggsave(file.path(file.path(file.path(write_folder,"pdf"), write_file), paste0(plot_name, ".pdf")), p, width=4, height=3)
### Run Eigen Plot
p <- pca_cum_var_plot(importance=data)
### Save Eigen Plot
plot_name <- paste0(write_file, "_cum_var")
ggsave(file.path(file.path(file.path(write_folder,"png"), write_file), paste0(plot_name, ".png")), p, width=4, height=3, dpi=600)
ggsave(file.path(file.path(file.path(write_folder,"svg"), write_file), paste0(plot_name, ".svg")), p, width=4, height=3)
ggsave(file.path(file.path(file.path(write_folder,"pdf"), write_file), paste0(plot_name, ".pdf")), p, width=4, height=3)
}
# *------------------------------------------------------------------
# | FUNCTION NAME: pca_loading_plot_wrapper
# | FILE NAME: pca_loading_plot_wrapper.R
# | DATE:
# | CREATED BY: Jim Stagge
# *------------------------------------------------------------------
# | Parameter:
# | In: pca_loading - a dataframe of loadings
# | pc.n - number of pcs to plot
# | write_folder - folder to write figures
# | write_file - file name to write figures
# | Out:
# |
# | Desc: This function runs through all PC loading figures.
# *------------------------------------------------------------------
pca_loading_plot_wrapper <- function(pca_loading, pc.n, write_folder, write_file){
require(ggplot2)
require(svglite)
### Calculate contribution of loading to total by first finding absolute value, summing
### and dividing each PC loading by sum
abs_load <- abs(pca_loading)
load_contrib <- sweep(abs_load, 2, colSums(abs_load), "/")
### Create loop through all PCs
for (k in seq(1,pc.n)) {
### Create a data frame for loading map
plot_df <- data.frame(ID=rownames(pca_loading), Loading=pca_loading[,k], Contribution=load_contrib[,k])
plot_df <- merge(wadr_site, plot_df, by="ID")
### Re-sort data frame listing biggest contribution first
plot_df <- plot_df[order(-plot_df$Contribution),]
### Set the plotting limits based on max and min coordinates
lon.lim <- c(min(plot_df$Lon)-0.25,max(plot_df$Lon)+0.25)
lat.lim <- c(min(plot_df$Lat)-0.25,max(plot_df$Lat)+0.25)
### Make species uppercase
plot_df$genus <- cap_first(as.character(plot_df$genus))
### Create plot
p <- loading_map(plot_data=plot_df, map_underlay=map_big, map_borders=states, site_locations=site_info, lon.lim=lon.lim, lat.lim=lat.lim)
### Save Loading Plot
plot_name <- paste0(write_file, "_load_map_PC_",k,"_all")
ggsave(file.path(file.path(file.path(write_folder,"png"), write_file), paste0(plot_name, ".png")), p, width=6, height=7, dpi=600)
ggsave(file.path(file.path(file.path(write_folder,"svg"), write_file), paste0(plot_name, ".svg")), p, width=6, height=7)
ggsave(file.path(file.path(file.path(write_folder,"pdf"), write_file), paste0(plot_name, ".pdf")), p, width=6, height=7)
### Add labels
p <- p + geom_text_repel(data=plot_df, aes(x=Lon, y=Lat, label = ID), size = 3, nudge_x=0.2, nudge_y=0.05, box.padding = unit(0.5, 'lines'))
### Save Loading Plot with Labels
plot_name <- paste0(write_file, "_load_map_PC_",k,"_all_labels")
ggsave(file.path(file.path(file.path(write_folder,"png"), write_file), paste0(plot_name, ".png")), p, width=6, height=7, dpi=600)
ggsave(file.path(file.path(file.path(write_folder,"svg"), write_file), paste0(plot_name, ".svg")), p, width=6, height=7)
ggsave(file.path(file.path(file.path(write_folder,"pdf"), write_file), paste0(plot_name, ".pdf")), p, width=6, height=7)
### Extract a subset of sites
plot_subset <- subset(plot_df, Contribution > mean(plot_df$Contribution)*1.2)
### Create subset plot
p <- loading_map(plot_data=plot_subset, map_underlay=map_big, map_borders=states, site_locations=site_info, lon.lim=lon.lim, lat.lim=lat.lim)
### Save Loading Plot
plot_name <- paste0(write_file, "_load_map_PC_",k,"_subset")
ggsave(file.path(file.path(file.path(write_folder,"png"), write_file), paste0(plot_name, ".png")), p, width=6, height=7, dpi=600)
ggsave(file.path(file.path(file.path(write_folder,"svg"), write_file), paste0(plot_name, ".svg")), p, width=6, height=7)
ggsave(file.path(file.path(file.path(write_folder,"pdf"), write_file), paste0(plot_name, ".pdf")), p, width=6, height=7)
### Save to publication
if (k %in% seq(1,5) & write_file== "pca_impute"){
plot_name <- paste0("fig_4",toupper(letters)[k])
ggsave(file.path(file.path(pub_path,"png"), paste0(plot_name , "_map.png")), p, width=6, height=7, dpi=600)
ggsave(file.path(file.path(pub_path,"svg"), paste0(plot_name , "_map.svg")), p, width=6, height=7)
ggsave(file.path(file.path(pub_path,"pdf"), paste0(plot_name , "_map.pdf")), p, width=6, height=7)
}
### Add labels
p <- p + geom_text_repel(data=plot_subset, aes(x=Lon, y=Lat, label = ID), size = 3, nudge_x=0.2, nudge_y=0.05, box.padding = unit(0.5, 'lines'))
### Save Loading Plot with Labels
plot_name <- paste0(write_file, "_load_map_PC_",k,"_subset_labels")
ggsave(file.path(file.path(file.path(write_folder,"png"), write_file), paste0(plot_name, ".png")), p, width=6, height=7, dpi=600)
ggsave(file.path(file.path(file.path(write_folder,"svg"), write_file), paste0(plot_name, ".svg")), p, width=6, height=7)
ggsave(file.path(file.path(file.path(write_folder,"pdf"), write_file), paste0(plot_name, ".pdf")), p, width=6, height=7)
### Create loading plot by genus
p <- loading_genus(plot_data=plot_df, pc_number = k)
### Save Loading Plot by species
plot_name <- paste0(write_file, "_load_species_PC_",k)
ggsave(file.path(file.path(file.path(write_folder,"png"), write_file), paste0(plot_name, ".png")), p$plot, width=8, height=4, dpi=600)
ggsave(file.path(file.path(file.path(write_folder,"svg"), write_file), paste0(plot_name, ".svg")), p$plot, width=8, height=4)
ggsave(file.path(file.path(file.path(write_folder,"pdf"), write_file), paste0(plot_name, ".pdf")), p$plot, width=8, height=4)
### Save to publication
if (k %in% seq(1,5) & write_file== "pca_impute"){
plot_name <- paste0("fig_4",toupper(letters)[k])
ggsave(file.path(file.path(pub_path,"png"), paste0(plot_name , "_species.png")), p$plot, width=8, height=4, dpi=600)
ggsave(file.path(file.path(pub_path,"svg"), paste0(plot_name , "_species.svg")), p$plot, width=8, height=4)
ggsave(file.path(file.path(pub_path,"pdf"), paste0(plot_name , "_species.pdf")), p$plot, width=8, height=4)
}
### Save Loading Plot by species Legend
plot_name <- paste0(write_file, "_load_species_PC_",k,"_legend")
ggsave(file.path(file.path(file.path(write_folder,"png"), write_file), paste0(plot_name, ".png")), p$legend, width=3, height=4, dpi=600)
ggsave(file.path(file.path(file.path(write_folder,"svg"), write_file), paste0(plot_name, ".svg")), p$legend, width=3, height=4)
ggsave(file.path(file.path(file.path(write_folder,"pdf"), write_file), paste0(plot_name, ".pdf")), p$legend, width=3, height=4)
### Save to publication
if (k == 1 & write_file== "pca_impute"){
plot_name <- "fig_4"
ggsave(file.path(file.path(pub_path,"png"), paste0(plot_name , "_legend.png")), p$legend, width=3, height=4, dpi=600)
ggsave(file.path(file.path(pub_path,"svg"), paste0(plot_name , "_legend.svg")), p$legend, width=3, height=4)
ggsave(file.path(file.path(pub_path,"pdf"), paste0(plot_name , "_legend.pdf")), p$legend, width=3, height=4)
}
}
}
| /code/functions/pca_plot_wrappers.R | permissive | jstagge/monthly_paleo | R | false | false | 9,081 | r |
# *------------------------------------------------------------------
# | FUNCTION NAME: pca_plot_wrapper
# | FILE NAME: pca_plot.R
# | DATE:
# | CREATED BY: Jim Stagge
# *------------------------------------------------------------------
# | Parameter:
# | In: data - a dataframe with PCA importance data
# | write_folder - location to save plots
# | write_file - file name for plots
# |
# | Out:
# |
# | Desc: Runs the following three PCA diagnostic plots
# |
# *------------------------------------------------------------------
pca_plot_wrapper <- function(data, write_folder, write_file){
require(ggplot2)
require(svglite)
### Set up output folders
dir.create(file.path(file.path(write_folder,"png"), write_file), recursive=TRUE)
dir.create(file.path(file.path(write_folder,"pdf"), write_file), recursive=TRUE)
dir.create(file.path(file.path(write_folder,"svg"), write_file), recursive=TRUE)
### Run Eigen Plot
p <- pca_eigen_plot(importance=data)
### Save Eigen Plot
plot_name <- paste0(write_file, "_eigen")
ggsave(file.path(file.path(file.path(write_folder,"png"), write_file), paste0(plot_name, ".png")), p, width=4, height=3, dpi=600)
ggsave(file.path(file.path(file.path(write_folder,"svg"), write_file), paste0(plot_name, ".svg")), p, width=4, height=3)
ggsave(file.path(file.path(file.path(write_folder,"pdf"), write_file), paste0(plot_name, ".pdf")), p, width=4, height=3)
### Run Variance Explained Plot
p <- pca_var_plot(importance=data)
### Save Eigen Plot
plot_name <- paste0(write_file, "_var")
ggsave(file.path(file.path(file.path(write_folder,"png"), write_file), paste0(plot_name, ".png")), p, width=4, height=3, dpi=600)
ggsave(file.path(file.path(file.path(write_folder,"svg"), write_file), paste0(plot_name, ".svg")), p, width=4, height=3)
ggsave(file.path(file.path(file.path(write_folder,"pdf"), write_file), paste0(plot_name, ".pdf")), p, width=4, height=3)
### Run Eigen Plot
p <- pca_cum_var_plot(importance=data)
### Save Eigen Plot
plot_name <- paste0(write_file, "_cum_var")
ggsave(file.path(file.path(file.path(write_folder,"png"), write_file), paste0(plot_name, ".png")), p, width=4, height=3, dpi=600)
ggsave(file.path(file.path(file.path(write_folder,"svg"), write_file), paste0(plot_name, ".svg")), p, width=4, height=3)
ggsave(file.path(file.path(file.path(write_folder,"pdf"), write_file), paste0(plot_name, ".pdf")), p, width=4, height=3)
}
# *------------------------------------------------------------------
# | FUNCTION NAME: pca_loading_plot_wrapper
# | FILE NAME: pca_loading_plot_wrapper.R
# | DATE:
# | CREATED BY: Jim Stagge
# *------------------------------------------------------------------
# | Parameter:
# | In: pca_loading - a dataframe of loadings
# | pc.n - number of pcs to plot
# | write_folder - folder to write figures
# | write_file - file name to write figures
# | Out:
# |
# | Desc: This function runs through all PC loading figures.
# *------------------------------------------------------------------
pca_loading_plot_wrapper <- function(pca_loading, pc.n, write_folder, write_file){
require(ggplot2)
require(svglite)
### Calculate contribution of loading to total by first finding absolute value, summing
### and dividing each PC loading by sum
abs_load <- abs(pca_loading)
load_contrib <- sweep(abs_load, 2, colSums(abs_load), "/")
### Create loop through all PCs
for (k in seq(1,pc.n)) {
### Create a data frame for loading map
plot_df <- data.frame(ID=rownames(pca_loading), Loading=pca_loading[,k], Contribution=load_contrib[,k])
plot_df <- merge(wadr_site, plot_df, by="ID")
### Re-sort data frame listing biggest contribution first
plot_df <- plot_df[order(-plot_df$Contribution),]
### Set the plotting limits based on max and min coordinates
lon.lim <- c(min(plot_df$Lon)-0.25,max(plot_df$Lon)+0.25)
lat.lim <- c(min(plot_df$Lat)-0.25,max(plot_df$Lat)+0.25)
### Make species uppercase
plot_df$genus <- cap_first(as.character(plot_df$genus))
### Create plot
p <- loading_map(plot_data=plot_df, map_underlay=map_big, map_borders=states, site_locations=site_info, lon.lim=lon.lim, lat.lim=lat.lim)
### Save Loading Plot
plot_name <- paste0(write_file, "_load_map_PC_",k,"_all")
ggsave(file.path(file.path(file.path(write_folder,"png"), write_file), paste0(plot_name, ".png")), p, width=6, height=7, dpi=600)
ggsave(file.path(file.path(file.path(write_folder,"svg"), write_file), paste0(plot_name, ".svg")), p, width=6, height=7)
ggsave(file.path(file.path(file.path(write_folder,"pdf"), write_file), paste0(plot_name, ".pdf")), p, width=6, height=7)
### Add labels
p <- p + geom_text_repel(data=plot_df, aes(x=Lon, y=Lat, label = ID), size = 3, nudge_x=0.2, nudge_y=0.05, box.padding = unit(0.5, 'lines'))
### Save Loading Plot with Labels
plot_name <- paste0(write_file, "_load_map_PC_",k,"_all_labels")
ggsave(file.path(file.path(file.path(write_folder,"png"), write_file), paste0(plot_name, ".png")), p, width=6, height=7, dpi=600)
ggsave(file.path(file.path(file.path(write_folder,"svg"), write_file), paste0(plot_name, ".svg")), p, width=6, height=7)
ggsave(file.path(file.path(file.path(write_folder,"pdf"), write_file), paste0(plot_name, ".pdf")), p, width=6, height=7)
### Extract a subset of sites
plot_subset <- subset(plot_df, Contribution > mean(plot_df$Contribution)*1.2)
### Create subset plot
p <- loading_map(plot_data=plot_subset, map_underlay=map_big, map_borders=states, site_locations=site_info, lon.lim=lon.lim, lat.lim=lat.lim)
### Save Loading Plot
plot_name <- paste0(write_file, "_load_map_PC_",k,"_subset")
ggsave(file.path(file.path(file.path(write_folder,"png"), write_file), paste0(plot_name, ".png")), p, width=6, height=7, dpi=600)
ggsave(file.path(file.path(file.path(write_folder,"svg"), write_file), paste0(plot_name, ".svg")), p, width=6, height=7)
ggsave(file.path(file.path(file.path(write_folder,"pdf"), write_file), paste0(plot_name, ".pdf")), p, width=6, height=7)
### Save to publication
if (k %in% seq(1,5) & write_file== "pca_impute"){
plot_name <- paste0("fig_4",toupper(letters)[k])
ggsave(file.path(file.path(pub_path,"png"), paste0(plot_name , "_map.png")), p, width=6, height=7, dpi=600)
ggsave(file.path(file.path(pub_path,"svg"), paste0(plot_name , "_map.svg")), p, width=6, height=7)
ggsave(file.path(file.path(pub_path,"pdf"), paste0(plot_name , "_map.pdf")), p, width=6, height=7)
}
### Add labels
p <- p + geom_text_repel(data=plot_subset, aes(x=Lon, y=Lat, label = ID), size = 3, nudge_x=0.2, nudge_y=0.05, box.padding = unit(0.5, 'lines'))
### Save Loading Plot with Labels
plot_name <- paste0(write_file, "_load_map_PC_",k,"_subset_labels")
ggsave(file.path(file.path(file.path(write_folder,"png"), write_file), paste0(plot_name, ".png")), p, width=6, height=7, dpi=600)
ggsave(file.path(file.path(file.path(write_folder,"svg"), write_file), paste0(plot_name, ".svg")), p, width=6, height=7)
ggsave(file.path(file.path(file.path(write_folder,"pdf"), write_file), paste0(plot_name, ".pdf")), p, width=6, height=7)
### Create loading plot by genus
p <- loading_genus(plot_data=plot_df, pc_number = k)
### Save Loading Plot by species
plot_name <- paste0(write_file, "_load_species_PC_",k)
ggsave(file.path(file.path(file.path(write_folder,"png"), write_file), paste0(plot_name, ".png")), p$plot, width=8, height=4, dpi=600)
ggsave(file.path(file.path(file.path(write_folder,"svg"), write_file), paste0(plot_name, ".svg")), p$plot, width=8, height=4)
ggsave(file.path(file.path(file.path(write_folder,"pdf"), write_file), paste0(plot_name, ".pdf")), p$plot, width=8, height=4)
### Save to publication
if (k %in% seq(1,5) & write_file== "pca_impute"){
plot_name <- paste0("fig_4",toupper(letters)[k])
ggsave(file.path(file.path(pub_path,"png"), paste0(plot_name , "_species.png")), p$plot, width=8, height=4, dpi=600)
ggsave(file.path(file.path(pub_path,"svg"), paste0(plot_name , "_species.svg")), p$plot, width=8, height=4)
ggsave(file.path(file.path(pub_path,"pdf"), paste0(plot_name , "_species.pdf")), p$plot, width=8, height=4)
}
### Save Loading Plot by species Legend
plot_name <- paste0(write_file, "_load_species_PC_",k,"_legend")
ggsave(file.path(file.path(file.path(write_folder,"png"), write_file), paste0(plot_name, ".png")), p$legend, width=3, height=4, dpi=600)
ggsave(file.path(file.path(file.path(write_folder,"svg"), write_file), paste0(plot_name, ".svg")), p$legend, width=3, height=4)
ggsave(file.path(file.path(file.path(write_folder,"pdf"), write_file), paste0(plot_name, ".pdf")), p$legend, width=3, height=4)
### Save to publication
if (k == 1 & write_file== "pca_impute"){
plot_name <- "fig_4"
ggsave(file.path(file.path(pub_path,"png"), paste0(plot_name , "_legend.png")), p$legend, width=3, height=4, dpi=600)
ggsave(file.path(file.path(pub_path,"svg"), paste0(plot_name , "_legend.svg")), p$legend, width=3, height=4)
ggsave(file.path(file.path(pub_path,"pdf"), paste0(plot_name , "_legend.pdf")), p$legend, width=3, height=4)
}
}
}
|
library('ROCR')
# calculate confusion matrix
calCM <- function(predictions,references,target){
confusionMatrix <- table(truth = c(predictions==references), prediction = c(predictions==target))
return (confusionMatrix)
}
calSensitivity <- function(confusionMatrix){
return (confusionMatrix[1]/(confusionMatrix[1]+confusionMatrix[3]))
}
calSpecificity <- function(confusionMatrix){
return (confusionMatrix[4]/(confusionMatrix[2]+confusionMatrix[4]))
}
calPrecision <- function(confusionMatrix){
return (confusionMatrix[1]/(confusionMatrix[1]+confusionMatrix[2]))
}
calF1 <- function(confusionMatrix){
recall <- calSensitivity(confusionMatrix)
precision <- calPrecision(confusionMatrix)
return (2*precision*recall)/(precision+recall)
}
calAUC <- function(predscore, reference) {
eval <- prediction(predscore, reference)
auc <- attributes(performance(eval, 'auc'))$y.values[[1]]
return (auc)
}
predscore_func<-function(predscore, query_m)
{
pred_score <- c()
if(query_m == "male"){
pred_score <- predscore
}
else if (query_m == "female") {
pred_score <- (1-predscore)
} else {
stop(paste("ERROR: unknown query function", query_m))
}
return (pred_score)
}
# read parameters
args = commandArgs(trailingOnly=TRUE)
if (length(args)==0) {
stop("USAGE: Rscript hw2_105753005.R --target male/female --files method1.csv method2.csv method3.csv method4.csv method5.csv method6.csv method7.csv method8.csv method9.csv method10.csv --out result.csv", call.=FALSE)
}
# parse parameters
i<-1
while(i < length(args))
{
if(args[i] == "--target"){
query_m<-args[i+1]
i<-i+1
}else if(args[i] == "--files"){
j<-grep("-", c(args[(i+1):length(args)], "-"))[1]
files<-args[(i+1):(i+j-1)]
i<-i+j-1
}else if(args[i] == "--out"){
out_f<-args[i+1]
i<-i+1
}else{
stop(paste("Unknown flag", args[i]), call.=FALSE)
}
i<-i+1
}
print("PROCESS")
print(paste("query mode :", query_m))
print(paste("output file:", out_f))
print(paste("files :", files))
# read files
methods<-c()
sensitivitys <- c()
specificitys <- c()
F1s <- c()
AUCs <- c()
for(file in files)
{
method<-gsub(".csv", "", basename(file))
d<-read.table(file, header=T,sep=",")
d$pred.score <- predscore_func(d$pred.score , query_m)
cm <- calCM(d$prediction,d$reference,query_m)
sensitivity <- round(calSpecificity(cm),digits=2)
specificity <- round(calSpecificity(cm),digits=2)
F1 <- round(calF1(cm),digits=2)
AUC <- round(calAUC(d$pred.score,d$reference),digits=2)
methods<-c(methods,method)
sensitivitys <- c(sensitivitys,sensitivity)
specificitys <- c(specificitys,specificity)
F1s <- c(F1s,F1)
AUCs <- c(AUCs,AUC)
}
out_data<-data.frame(method=methods, sensitivity=sensitivitys, specificity=specificitys, F1 = F1s, AUC = AUCs, stringsAsFactors = F)
highest <- c("highest")
for(x in c(2:5)){
index<-apply(out_data[x], 2, which.max)
highest <- c(highest,methods[index])
}
print(highest)
# output file
out_data<-rbind(out_data,highest)
write.table(out_data, file=out_f, row.names = F, quote = F)
| /hw2/hw2_105753005.R | no_license | cwsu/1052DataScience | R | false | false | 3,183 | r | library('ROCR')
# calculate confusion matrix
calCM <- function(predictions,references,target){
confusionMatrix <- table(truth = c(predictions==references), prediction = c(predictions==target))
return (confusionMatrix)
}
calSensitivity <- function(confusionMatrix){
return (confusionMatrix[1]/(confusionMatrix[1]+confusionMatrix[3]))
}
calSpecificity <- function(confusionMatrix){
return (confusionMatrix[4]/(confusionMatrix[2]+confusionMatrix[4]))
}
calPrecision <- function(confusionMatrix){
return (confusionMatrix[1]/(confusionMatrix[1]+confusionMatrix[2]))
}
calF1 <- function(confusionMatrix){
recall <- calSensitivity(confusionMatrix)
precision <- calPrecision(confusionMatrix)
return (2*precision*recall)/(precision+recall)
}
calAUC <- function(predscore, reference) {
eval <- prediction(predscore, reference)
auc <- attributes(performance(eval, 'auc'))$y.values[[1]]
return (auc)
}
predscore_func<-function(predscore, query_m)
{
pred_score <- c()
if(query_m == "male"){
pred_score <- predscore
}
else if (query_m == "female") {
pred_score <- (1-predscore)
} else {
stop(paste("ERROR: unknown query function", query_m))
}
return (pred_score)
}
# read parameters
args = commandArgs(trailingOnly=TRUE)
if (length(args)==0) {
stop("USAGE: Rscript hw2_105753005.R --target male/female --files method1.csv method2.csv method3.csv method4.csv method5.csv method6.csv method7.csv method8.csv method9.csv method10.csv --out result.csv", call.=FALSE)
}
# parse parameters
i<-1
while(i < length(args))
{
if(args[i] == "--target"){
query_m<-args[i+1]
i<-i+1
}else if(args[i] == "--files"){
j<-grep("-", c(args[(i+1):length(args)], "-"))[1]
files<-args[(i+1):(i+j-1)]
i<-i+j-1
}else if(args[i] == "--out"){
out_f<-args[i+1]
i<-i+1
}else{
stop(paste("Unknown flag", args[i]), call.=FALSE)
}
i<-i+1
}
print("PROCESS")
print(paste("query mode :", query_m))
print(paste("output file:", out_f))
print(paste("files :", files))
# read files
methods<-c()
sensitivitys <- c()
specificitys <- c()
F1s <- c()
AUCs <- c()
for(file in files)
{
method<-gsub(".csv", "", basename(file))
d<-read.table(file, header=T,sep=",")
d$pred.score <- predscore_func(d$pred.score , query_m)
cm <- calCM(d$prediction,d$reference,query_m)
sensitivity <- round(calSpecificity(cm),digits=2)
specificity <- round(calSpecificity(cm),digits=2)
F1 <- round(calF1(cm),digits=2)
AUC <- round(calAUC(d$pred.score,d$reference),digits=2)
methods<-c(methods,method)
sensitivitys <- c(sensitivitys,sensitivity)
specificitys <- c(specificitys,specificity)
F1s <- c(F1s,F1)
AUCs <- c(AUCs,AUC)
}
out_data<-data.frame(method=methods, sensitivity=sensitivitys, specificity=specificitys, F1 = F1s, AUC = AUCs, stringsAsFactors = F)
highest <- c("highest")
for(x in c(2:5)){
index<-apply(out_data[x], 2, which.max)
highest <- c(highest,methods[index])
}
print(highest)
# output file
out_data<-rbind(out_data,highest)
write.table(out_data, file=out_f, row.names = F, quote = F)
|
context("client")
test_that("http(s) clients work as expected", {
mlflow_clear_test_dir("mlruns")
with_mock(.env = "mlflow", mlflow_rest = function(..., client) {
args <- list(...)
expect_true(paste(args, collapse = "/") == "experiments/list")
list(experiments = c(1, 2, 3))
}, {
with_mock(.env = "mlflow", mlflow_register_local_server = function(...) NA, {
env <- list(
MLFLOW_USERNAME = "DonaldDuck",
MLFLOW_PASSWORD = "Quack",
MLFLOW_TOKEN = "$$$",
MLFLOW_INSECURE = "True"
)
with_envvar(env, {
http_host <- "http://remote"
client1 <- mlflow:::mlflow_client(http_host)
config <- client1$get_host_creds()
print(config)
expect_true(config$host == http_host)
expect_true(config$username == "DonaldDuck")
expect_true(config$password == "Quack")
expect_true(config$token == "$$$")
expect_true(config$insecure == "True")
https_host <- "https://remote"
client2 <- mlflow:::mlflow_client("https://remote")
config <- client2$get_host_creds()
expect_true(config$host == https_host)
env_str <- paste(env, collapse = "|")
env_str_2 <- paste(client2$get_cli_env(), collapse = "|")
expect_true(env_str == env_str_2)
})
with_mock(.env = "mlflow", mlflow_server = function(...) list(server_url = "local_server"), {
client3 <- mlflow:::mlflow_client()
config <- client3$get_host_creds()
expect_true(config$host == "local_server")
})
})
})
})
test_that("rest call handles errors correctly", {
mlflow_clear_test_dir("mlruns")
mock_client <- mlflow:::new_mlflow_client_impl(get_host_creds = function() {
mlflow:::new_mlflow_host_creds(host = "localhost")
})
with_mock(.env = "httr", POST = function(...) {
httr:::response(
status_code = 400,
content = charToRaw(paste("{\"error_code\":\"INVALID_PARAMETER_VALUE\",",
"\"message\":\"experiment_id must be set to a non-zero value\"}",
sep = "")
)
)}, {
error_msg_regexp <- paste(
"API request to endpoint \'runs/create\' failed with error code 400",
"INVALID_PARAMETER_VALUE",
"experiment_id must be set to a non-zero value",
sep = ".*")
expect_error(
mlflow:::mlflow_rest( "runs", "create", client = mock_client, verb = "POST"),
error_msg_regexp
)
})
with_mock(.env = "httr", GET = function(...) {
httr:::response(
status_code = 500,
content = charToRaw(paste("some text."))
)
}, {
error_msg_regexp <- paste(
"API request to endpoint \'runs/create\' failed with error code 500",
"some text",
sep = ".*")
expect_error(
mlflow:::mlflow_rest( "runs", "create", client = mock_client, verb = "GET"),
error_msg_regexp
)
})
with_mock(.env = "httr", POST = function(...) {
httr:::response(
status_code = 503,
content = as.raw(c(0, 255))
)
}, {
error_msg_regexp <- paste(
"API request to endpoint \'runs/create\' failed with error code 503",
"00 ff",
sep = ".*")
expect_error(
mlflow:::mlflow_rest( "runs", "create", client = mock_client, verb = "POST"),
error_msg_regexp
)
})
})
| /mlflow/R/mlflow/tests/testthat/test-client.R | permissive | qubole/mlflow | R | false | false | 3,549 | r | context("client")
test_that("http(s) clients work as expected", {
mlflow_clear_test_dir("mlruns")
with_mock(.env = "mlflow", mlflow_rest = function(..., client) {
args <- list(...)
expect_true(paste(args, collapse = "/") == "experiments/list")
list(experiments = c(1, 2, 3))
}, {
with_mock(.env = "mlflow", mlflow_register_local_server = function(...) NA, {
env <- list(
MLFLOW_USERNAME = "DonaldDuck",
MLFLOW_PASSWORD = "Quack",
MLFLOW_TOKEN = "$$$",
MLFLOW_INSECURE = "True"
)
with_envvar(env, {
http_host <- "http://remote"
client1 <- mlflow:::mlflow_client(http_host)
config <- client1$get_host_creds()
print(config)
expect_true(config$host == http_host)
expect_true(config$username == "DonaldDuck")
expect_true(config$password == "Quack")
expect_true(config$token == "$$$")
expect_true(config$insecure == "True")
https_host <- "https://remote"
client2 <- mlflow:::mlflow_client("https://remote")
config <- client2$get_host_creds()
expect_true(config$host == https_host)
env_str <- paste(env, collapse = "|")
env_str_2 <- paste(client2$get_cli_env(), collapse = "|")
expect_true(env_str == env_str_2)
})
with_mock(.env = "mlflow", mlflow_server = function(...) list(server_url = "local_server"), {
client3 <- mlflow:::mlflow_client()
config <- client3$get_host_creds()
expect_true(config$host == "local_server")
})
})
})
})
test_that("rest call handles errors correctly", {
mlflow_clear_test_dir("mlruns")
mock_client <- mlflow:::new_mlflow_client_impl(get_host_creds = function() {
mlflow:::new_mlflow_host_creds(host = "localhost")
})
with_mock(.env = "httr", POST = function(...) {
httr:::response(
status_code = 400,
content = charToRaw(paste("{\"error_code\":\"INVALID_PARAMETER_VALUE\",",
"\"message\":\"experiment_id must be set to a non-zero value\"}",
sep = "")
)
)}, {
error_msg_regexp <- paste(
"API request to endpoint \'runs/create\' failed with error code 400",
"INVALID_PARAMETER_VALUE",
"experiment_id must be set to a non-zero value",
sep = ".*")
expect_error(
mlflow:::mlflow_rest( "runs", "create", client = mock_client, verb = "POST"),
error_msg_regexp
)
})
with_mock(.env = "httr", GET = function(...) {
httr:::response(
status_code = 500,
content = charToRaw(paste("some text."))
)
}, {
error_msg_regexp <- paste(
"API request to endpoint \'runs/create\' failed with error code 500",
"some text",
sep = ".*")
expect_error(
mlflow:::mlflow_rest( "runs", "create", client = mock_client, verb = "GET"),
error_msg_regexp
)
})
with_mock(.env = "httr", POST = function(...) {
httr:::response(
status_code = 503,
content = as.raw(c(0, 255))
)
}, {
error_msg_regexp <- paste(
"API request to endpoint \'runs/create\' failed with error code 503",
"00 ff",
sep = ".*")
expect_error(
mlflow:::mlflow_rest( "runs", "create", client = mock_client, verb = "POST"),
error_msg_regexp
)
})
})
|
#these plots were constructed here and copied into the .Rmd
#plot 1 -- total hunters
plot_total_hunting_licenses <- function(){
#Add main dataframe
file <- "./data_tidy/hunting_licenses_by_state_1960_and_2020.csv"
colClasses <- c(rep("character", 3), "integer", "character", "numeric")
df.00 <- data.table::fread(file = file, data.table = F, colClasses = colClasses)
df.01 <- dplyr::filter(df.00, key == "certified_paid_hunting_license_holders")
df.02 <- df.01 %>%
group_by(year) %>%
summarize(total_licenses = sum(value))
df.03 <- tidyr::gather(df.02, key = key, value = value, -year)
df.03$key <- "tot_hunters_licenses"
# Add FHWAR line plot
file <- "./data_pure/usfw/2020-11-20-fw_nat_survey_fhwar_1955_2016.csv"
colClasses <- c("integer", "character", "integer", "character", "integer", "character", "numeric")
df.fhwar <- read.csv(file = file, header = T, colClasses = colClasses)
df.fhwar <- select(df.fhwar, year, total_all_hunters)
df.fhwar <- tidyr::gather(df.fhwar, key = key, value = value, -year)
df.fhwar$key[which(df.fhwar$year %in% 1955:1990)] <- "tot_hunters_pre_1991"
df.fhwar$key[which(df.fhwar$year %in% 1990:2020)] <- "tot_hunters_post_1991"
#rbind
df.04 <- rbind(df.03, df.fhwar)
df.04$key <- factor(df.04$key, levels = c("tot_hunters_licenses",
"tot_hunters_pre_1991",
"tot_hunters_post_1991")
)
#plot total number of hunting licenses
p <- ggplot(df.04, aes(year, value, group = key, colour = key))
p <- p + geom_line()
p <- p + geom_point()
p <- p + scale_y_continuous(limits = c(0, 22000000),
name = "",
breaks = c(0, 50e5, 100e5, 150e5, 200e5),
labels = c("0", "5m", "10m", "15m", "20m")
)
p <- p + scale_x_continuous(name = "")
p <- p + scale_colour_manual(values = c("#4582ec", "#ffa600", "#ff5ca4"))
p <- p + ggtitle("Total U.S. Hunters from License and Surveys \n1955 - 2020")
p <- p + theme_gdocs()
filename <- "./figs/total_us_hunters_from_license_and_survey_data_1955_2020.jpg"
ggsave(p, filename = filename, height = 5, width = 8, dpi = 300, units = "in")
p
}
plot_total_hunting_licenses()
#plot 2 -- participaton rate
plot_pct_hunters_from_FHWAR_and_hunters_licenses <- function(){
calculate_pct_hunting_licenses <- function(){
#Add main dataframe
file <- "./data_tidy/hunting_licenses_by_state_1960_and_2020.csv"
colClasses <- c(rep("character", 3), "integer", "character", "numeric")
df.00 <- data.table::fread(file = file, data.table = F, colClasses = colClasses)
#total up the number of hunting licenses
df.01 <- dplyr::filter(df.00, key == "certified_paid_hunting_license_holders")
df.02 <- df.01 %>%
group_by(year) %>%
summarize(total_licenses = sum(value))
#total up the pop among the states
df.03 <- dplyr::filter(df.00, key == "pop")
df.04 <- df.03 %>%
group_by(year) %>%
summarize(tot_pop = sum(value))
#figure out a per capita on the totals
df.04$pct_hunting_licenses <- round((df.02$total_licenses / df.04$tot_pop) * 100, 2)
df.04 <- df.04[, c(1,3)]
df.04 <- tidyr::gather(df.04, key = key, value = value, -year)
df.04
}
calculate_pct_hunters_from_survey <- function(){
# Add FHWAR line plot
file <- "./data_pure/usfw/2020-11-20-fw_nat_survey_fhwar_1955_2016.csv"
colClasses <- c("integer", "character", "integer", "character", "integer", "character", "numeric")
df.fhwar <- read.csv(file = file, header = T, colClasses = colClasses)
df.fhwar <- select(df.fhwar, year, "participation_rate_calculated")
df.fhwar$participation_rate_calculated <- round(df.fhwar$participation_rate_calculated *100, 2)
df.fhwar <- tidyr::gather(df.fhwar, key = key, value = value, -year)
df.fhwar$key[which(df.fhwar$year %in% 1955:1990)] <- "part_rate_pre_1991"
df.fhwar$key[which(df.fhwar$year %in% 1990:2020)] <- "part_rate_post_1991"
df.fhwar
}
df <- rbind(calculate_pct_hunters_from_survey(), calculate_pct_hunting_licenses())
df$key <- factor(df$key, levels = c("pct_hunting_licenses", "part_rate_pre_1991", "part_rate_post_1991"))
p <- ggplot(df, aes(year, value, group = key, colour = key))
p <- p + geom_line()#4582EC
p <- p + geom_point(size = 3)
p <- p + scale_y_continuous(limits = c(0, 12),
name = "",
breaks = c(0, 3.0, 6.0, 9.0, 12),
labels = c("0.0%", "3.0%", "6.0%", "9.0%", "12.0%")
)
p <- p + scale_x_continuous(name = "")
p <- p + scale_colour_manual(values = c("#4582ec", "#ffa600", "#ff5ca4"))
p <- p + ggtitle("Hunting Licenses vs. Survey Participation Rate")
p <- p + theme_gdocs()
filename <- "./figs/hunting_licenses_vs_survey_part_rate_1955_2020.jpg"
ggsave(p, filename = filename, height = 5, width = 8, dpi = 300, units = "in")
p
}
#plot 3 -- population to hunter growth
Plot_change_in_hunters_license_to_population <- function(){
file <- "./data_pure/total_annual_hl_and_pop_1960-2020.csv"
colClasses <- c("integer", "integer", "integer", "numeric", "numeric")
df.pct <- read.csv(file = file, header = T, colClasses = colClasses)
df.pct <- tidyr::gather(df.pct, key = key, value = value, -year)
df.pct <- dplyr::filter(df.pct, key == "pct_increase_population" | key == "pct_increase_hunting_lic")
p <- ggplot(df.pct, aes(year, value, group = key, color = key))
p <- p + geom_line()
p <- p + geom_point(size = 3)
p <- p + scale_color_manual(values=c("#4582EC", "#ffa600"))
p <- p + theme_gdocs()
p <- p + scale_x_continuous(name = "")
p <- p + ggtitle("Pct. Increase in Population and Hunting Licenses \nbase year = 1960")
filename <- "./figs/pct_increase_in_population_and_hunting_licenses_1960_to_2020.jpg"
ggsave(p, filename = filename, height = 5, width = 8, dpi = 300, unit = "in")
p
}
#plot 4 --
plot_cost_per_hunter_idx_for_inflation <- function(){
library(ggthemes)
create_df_cost_per_hunter <- function(){
#Add main dataframe
file <- "./data_tidy/hunting_licenses_by_state_1960_and_2020.csv"
colClasses <- c(rep("character", 3), "integer", "character", "numeric")
df.00 <- data.table::fread(file = file, data.table = F, colClasses = colClasses)
#total cost to hunters
df.01 <- dplyr::filter(df.00, key == "total_gross_cost_to_hunters")
df.02 <-df.01 %>%
group_by(year) %>%
summarize(total_cost = sum(value))
#total hunters by year
df.03 <- dplyr::filter(df.00, key == "certified_paid_hunting_license_holders")
df.04 <- df.03 %>%
group_by(year) %>%
summarize(total_hunters = sum(value))
#merge
df.05 <- dplyr::left_join(df.02, df.04)
#total cost per hunter
df.05$cost_per_hunter <- df.05$total_cost / df.05$total_hunters
df.05 <- dplyr::select(df.05, year, cost_per_hunter)
df.06 <- tidyr::gather(df.05, key = key, value = value, -year)
#total cost per person
df.07 <- dplyr::filter(df.00, key == "pop")
df.08 <- df.07 %>%
group_by(year) %>%
summarize(total_pop = sum(value))
df.09 <- merge(df.02, df.08)
df.09$cost_per_person <- df.09$total_cost / df.09$total_pop
df.09 <- dplyr::select(df.09, year, cost_per_person)
df.09 <- tidyr::gather(df.09, key = key, value = value, -year)
#join cost_per_hunter to cost_per_person
df.10 <- rbind(df.06, df.09)
df.10
}
create_df_for_infl_idx <- function(){
#build inflation adjusted series for comparison
source("./R/functions.R")
df.infl <- data.frame(total_cost_1970 = 101608879,
const_dol_2020 = convert_to_constant_dollars(price = 1010608879, from_date = 1970, to_date = 2019),
total_hunt_1970 = 15658318,
total_hunt_2020 = 15151724,
total_pop_1970 = 202455416,
total_pop_2020 = 331794992,
infl_adj_cost_per_hunter_1970 = 101608879 / 15658318,
infl_adj_cost_per_hunter_2020 = 681900329.51 / 15151724,
infl_adj_cost_per_person_1970 = 101608879 / 202455416,
infl_adj_cost_per_person_2020 = 681900329.51 / 331794992
)
df.infl <- tidyr::gather(df.infl, key = key, value = value)
df.infl$year <- stringr::str_sub(df.infl$key, -4)
df.infl$year <- as.integer(df.infl$year)
df.infl$key <-gsub("_1970|_2020", "", df.infl$key)
df.infl <- df.infl[grep("infl_adj", df.infl$key), ]
df.infl
}
create_plot_for_hunters <- function(){
df.10 <- create_df_cost_per_hunter()
df.infl <- create_df_for_infl_idx()
#plot
library(ggthemes)
p <- ggplot(dplyr::filter(df.10, key == "cost_per_hunter"), aes(year, value, group = key, colour = key))
p <- p + geom_line()
p <- p + geom_point(size = 3)
p <- p + scale_y_continuous(name = "",
limits = c(0, 60),
breaks = c(0, 15, 30, 45, 60),
labels = c("$0", "$15", "$30", "$45", "$60")
)
p <- p + scale_color_manual(values=c("#4582EC", "#00d0ff"))
df.infl.1 <- dplyr::filter(df.infl, key == "infl_adj_cost_per_hunter")
df.infl.1$key <- gsub("_per_hunter", "", df.infl.1$key)
p <- p + geom_line(data = df.infl.1, aes(year, value, group = key, colour = key))
p <- p + geom_point(data = df.infl.1, aes(year, value, group = key, colour = key), size = 3)
p <- p + theme_gdocs()
p <- p + scale_x_continuous(name = "",
limits = c(1960, 2020))
p <- p + ggtitle("Gross Cost Per Hunter Indexed to Inflation \n1970-2020")
filename <- "./figs/gross_cost_per_hunter_indexed_to_inflation_1970_2020.jpg"
ggsave(p, filename = filename, height = 5, width = 8, dpi = 300, units = "in")
p
}
create_plot_for_hunters()
}
plot_cost_per_hunter_idx_for_inflation()
#plot 5 --
plot_cost_per_person_idx_for_inflation <- function(){
library(ggthemes)
create_df_cost_per_hunter <- function(){
#Add main dataframe
file <- "./data_tidy/hunting_licenses_by_state_1960_and_2020.csv"
colClasses <- c(rep("character", 3), "integer", "character", "numeric")
df.00 <- data.table::fread(file = file, data.table = F, colClasses = colClasses)
#total cost to hunters
df.01 <- dplyr::filter(df.00, key == "total_gross_cost_to_hunters")
df.02 <-df.01 %>%
group_by(year) %>%
summarize(total_cost = sum(value))
#total hunters by year
df.03 <- dplyr::filter(df.00, key == "certified_paid_hunting_license_holders")
df.04 <- df.03 %>%
group_by(year) %>%
summarize(total_hunters = sum(value))
#merge
df.05 <- dplyr::left_join(df.02, df.04)
#total cost per hunter
df.05$cost_per_hunter <- df.05$total_cost / df.05$total_hunters
df.05 <- dplyr::select(df.05, year, cost_per_hunter)
df.06 <- tidyr::gather(df.05, key = key, value = value, -year)
#total cost per person
df.07 <- dplyr::filter(df.00, key == "pop")
df.08 <- df.07 %>%
group_by(year) %>%
summarize(total_pop = sum(value))
df.09 <- merge(df.02, df.08)
df.09$cost_per_person <- df.09$total_cost / df.09$total_pop
df.09 <- dplyr::select(df.09, year, cost_per_person)
df.09 <- tidyr::gather(df.09, key = key, value = value, -year)
#join cost_per_hunter to cost_per_person
df.10 <- rbind(df.06, df.09)
df.10
}
create_df_for_infl_idx <- function(){
#build inflation adjusted series for comparison
source("./R/functions.R")
df.infl <- data.frame(total_cost_1970 = 101608879,
const_dol_2020 = convert_to_constant_dollars(price = 1010608879, from_date = 1970, to_date = 2019),
total_hunt_1970 = 15658318,
total_hunt_2020 = 15151724,
total_pop_1970 = 202455416,
total_pop_2020 = 331794992,
infl_adj_cost_per_hunter_1970 = 101608879 / 15658318,
infl_adj_cost_per_hunter_2020 = 681900329.51 / 15151724,
infl_adj_cost_per_person_1970 = 101608879 / 202455416,
infl_adj_cost_per_person_2020 = 681900329.51 / 331794992
)
df.infl <- tidyr::gather(df.infl, key = key, value = value)
df.infl$year <- stringr::str_sub(df.infl$key, -4)
df.infl$year <- as.integer(df.infl$year)
df.infl$key <-gsub("_1970|_2020", "", df.infl$key)
df.infl <- df.infl[grep("infl_adj", df.infl$key), ]
df.infl
}
create_plot_for_person <- function(){
df.10 <- create_df_cost_per_hunter()
df.infl <- create_df_for_infl_idx()
p <- ggplot(dplyr::filter(df.10, key == "cost_per_person"), aes(year, value, group = key, colour = key))
p <- p + geom_line()
p <- p + geom_point(size = 3)
p
p <- p + scale_y_continuous(name = "",
limits = c(0, 3),
breaks = c(0, .75, 1.50, 2.25, 3),
labels = c("$0.0", "$0.75", "$1.50", "$2.25", "$3.00")
)
p <- p + scale_color_manual(values=c("#ffa600", "#ffdc00"))
df.infl.2 <- dplyr::filter(df.infl, key == "infl_adj_cost_per_person")
df.infl.2$key <- gsub("_per_person", "", df.infl.2$key)
p <- p + geom_line(data = df.infl.2, aes(year, value, group = key, colour = key))
p <- p + geom_point(data = df.infl.2, aes(year, value, group = key, colour = key), size = 3)
p <- p + theme_gdocs()
p <- p + scale_x_continuous(name = "",
limits = c(1960, 2020))
p <- p + ggtitle("Gross Cost Per Person Indexed to Inflation\n1970-2020")
filename <- "./figs/gross_cost_per_person_indexed_to_inflation_1970_2020.jpg"
ggsave(p, filename = filename, height = 5, width = 8, dpi = 300, units = "in")
p
}
create_plot_for_person()
}
plot_cost_per_person_idx_for_inflation()
| /R/plot_national _charts.R | permissive | RobWiederstein/hunting_licenses | R | false | false | 16,385 | r | #these plots were constructed here and copied into the .Rmd
#plot 1 -- total hunters
plot_total_hunting_licenses <- function(){
#Add main dataframe
file <- "./data_tidy/hunting_licenses_by_state_1960_and_2020.csv"
colClasses <- c(rep("character", 3), "integer", "character", "numeric")
df.00 <- data.table::fread(file = file, data.table = F, colClasses = colClasses)
df.01 <- dplyr::filter(df.00, key == "certified_paid_hunting_license_holders")
df.02 <- df.01 %>%
group_by(year) %>%
summarize(total_licenses = sum(value))
df.03 <- tidyr::gather(df.02, key = key, value = value, -year)
df.03$key <- "tot_hunters_licenses"
# Add FHWAR line plot
file <- "./data_pure/usfw/2020-11-20-fw_nat_survey_fhwar_1955_2016.csv"
colClasses <- c("integer", "character", "integer", "character", "integer", "character", "numeric")
df.fhwar <- read.csv(file = file, header = T, colClasses = colClasses)
df.fhwar <- select(df.fhwar, year, total_all_hunters)
df.fhwar <- tidyr::gather(df.fhwar, key = key, value = value, -year)
df.fhwar$key[which(df.fhwar$year %in% 1955:1990)] <- "tot_hunters_pre_1991"
df.fhwar$key[which(df.fhwar$year %in% 1990:2020)] <- "tot_hunters_post_1991"
#rbind
df.04 <- rbind(df.03, df.fhwar)
df.04$key <- factor(df.04$key, levels = c("tot_hunters_licenses",
"tot_hunters_pre_1991",
"tot_hunters_post_1991")
)
#plot total number of hunting licenses
p <- ggplot(df.04, aes(year, value, group = key, colour = key))
p <- p + geom_line()
p <- p + geom_point()
p <- p + scale_y_continuous(limits = c(0, 22000000),
name = "",
breaks = c(0, 50e5, 100e5, 150e5, 200e5),
labels = c("0", "5m", "10m", "15m", "20m")
)
p <- p + scale_x_continuous(name = "")
p <- p + scale_colour_manual(values = c("#4582ec", "#ffa600", "#ff5ca4"))
p <- p + ggtitle("Total U.S. Hunters from License and Surveys \n1955 - 2020")
p <- p + theme_gdocs()
filename <- "./figs/total_us_hunters_from_license_and_survey_data_1955_2020.jpg"
ggsave(p, filename = filename, height = 5, width = 8, dpi = 300, units = "in")
p
}
plot_total_hunting_licenses()
#plot 2 -- participaton rate
plot_pct_hunters_from_FHWAR_and_hunters_licenses <- function(){
calculate_pct_hunting_licenses <- function(){
#Add main dataframe
file <- "./data_tidy/hunting_licenses_by_state_1960_and_2020.csv"
colClasses <- c(rep("character", 3), "integer", "character", "numeric")
df.00 <- data.table::fread(file = file, data.table = F, colClasses = colClasses)
#total up the number of hunting licenses
df.01 <- dplyr::filter(df.00, key == "certified_paid_hunting_license_holders")
df.02 <- df.01 %>%
group_by(year) %>%
summarize(total_licenses = sum(value))
#total up the pop among the states
df.03 <- dplyr::filter(df.00, key == "pop")
df.04 <- df.03 %>%
group_by(year) %>%
summarize(tot_pop = sum(value))
#figure out a per capita on the totals
df.04$pct_hunting_licenses <- round((df.02$total_licenses / df.04$tot_pop) * 100, 2)
df.04 <- df.04[, c(1,3)]
df.04 <- tidyr::gather(df.04, key = key, value = value, -year)
df.04
}
calculate_pct_hunters_from_survey <- function(){
# Add FHWAR line plot
file <- "./data_pure/usfw/2020-11-20-fw_nat_survey_fhwar_1955_2016.csv"
colClasses <- c("integer", "character", "integer", "character", "integer", "character", "numeric")
df.fhwar <- read.csv(file = file, header = T, colClasses = colClasses)
df.fhwar <- select(df.fhwar, year, "participation_rate_calculated")
df.fhwar$participation_rate_calculated <- round(df.fhwar$participation_rate_calculated *100, 2)
df.fhwar <- tidyr::gather(df.fhwar, key = key, value = value, -year)
df.fhwar$key[which(df.fhwar$year %in% 1955:1990)] <- "part_rate_pre_1991"
df.fhwar$key[which(df.fhwar$year %in% 1990:2020)] <- "part_rate_post_1991"
df.fhwar
}
df <- rbind(calculate_pct_hunters_from_survey(), calculate_pct_hunting_licenses())
df$key <- factor(df$key, levels = c("pct_hunting_licenses", "part_rate_pre_1991", "part_rate_post_1991"))
p <- ggplot(df, aes(year, value, group = key, colour = key))
p <- p + geom_line()#4582EC
p <- p + geom_point(size = 3)
p <- p + scale_y_continuous(limits = c(0, 12),
name = "",
breaks = c(0, 3.0, 6.0, 9.0, 12),
labels = c("0.0%", "3.0%", "6.0%", "9.0%", "12.0%")
)
p <- p + scale_x_continuous(name = "")
p <- p + scale_colour_manual(values = c("#4582ec", "#ffa600", "#ff5ca4"))
p <- p + ggtitle("Hunting Licenses vs. Survey Participation Rate")
p <- p + theme_gdocs()
filename <- "./figs/hunting_licenses_vs_survey_part_rate_1955_2020.jpg"
ggsave(p, filename = filename, height = 5, width = 8, dpi = 300, units = "in")
p
}
#plot 3 -- population to hunter growth
Plot_change_in_hunters_license_to_population <- function(){
file <- "./data_pure/total_annual_hl_and_pop_1960-2020.csv"
colClasses <- c("integer", "integer", "integer", "numeric", "numeric")
df.pct <- read.csv(file = file, header = T, colClasses = colClasses)
df.pct <- tidyr::gather(df.pct, key = key, value = value, -year)
df.pct <- dplyr::filter(df.pct, key == "pct_increase_population" | key == "pct_increase_hunting_lic")
p <- ggplot(df.pct, aes(year, value, group = key, color = key))
p <- p + geom_line()
p <- p + geom_point(size = 3)
p <- p + scale_color_manual(values=c("#4582EC", "#ffa600"))
p <- p + theme_gdocs()
p <- p + scale_x_continuous(name = "")
p <- p + ggtitle("Pct. Increase in Population and Hunting Licenses \nbase year = 1960")
filename <- "./figs/pct_increase_in_population_and_hunting_licenses_1960_to_2020.jpg"
ggsave(p, filename = filename, height = 5, width = 8, dpi = 300, unit = "in")
p
}
#plot 4 --
plot_cost_per_hunter_idx_for_inflation <- function(){
library(ggthemes)
create_df_cost_per_hunter <- function(){
#Add main dataframe
file <- "./data_tidy/hunting_licenses_by_state_1960_and_2020.csv"
colClasses <- c(rep("character", 3), "integer", "character", "numeric")
df.00 <- data.table::fread(file = file, data.table = F, colClasses = colClasses)
#total cost to hunters
df.01 <- dplyr::filter(df.00, key == "total_gross_cost_to_hunters")
df.02 <-df.01 %>%
group_by(year) %>%
summarize(total_cost = sum(value))
#total hunters by year
df.03 <- dplyr::filter(df.00, key == "certified_paid_hunting_license_holders")
df.04 <- df.03 %>%
group_by(year) %>%
summarize(total_hunters = sum(value))
#merge
df.05 <- dplyr::left_join(df.02, df.04)
#total cost per hunter
df.05$cost_per_hunter <- df.05$total_cost / df.05$total_hunters
df.05 <- dplyr::select(df.05, year, cost_per_hunter)
df.06 <- tidyr::gather(df.05, key = key, value = value, -year)
#total cost per person
df.07 <- dplyr::filter(df.00, key == "pop")
df.08 <- df.07 %>%
group_by(year) %>%
summarize(total_pop = sum(value))
df.09 <- merge(df.02, df.08)
df.09$cost_per_person <- df.09$total_cost / df.09$total_pop
df.09 <- dplyr::select(df.09, year, cost_per_person)
df.09 <- tidyr::gather(df.09, key = key, value = value, -year)
#join cost_per_hunter to cost_per_person
df.10 <- rbind(df.06, df.09)
df.10
}
create_df_for_infl_idx <- function(){
#build inflation adjusted series for comparison
source("./R/functions.R")
df.infl <- data.frame(total_cost_1970 = 101608879,
const_dol_2020 = convert_to_constant_dollars(price = 1010608879, from_date = 1970, to_date = 2019),
total_hunt_1970 = 15658318,
total_hunt_2020 = 15151724,
total_pop_1970 = 202455416,
total_pop_2020 = 331794992,
infl_adj_cost_per_hunter_1970 = 101608879 / 15658318,
infl_adj_cost_per_hunter_2020 = 681900329.51 / 15151724,
infl_adj_cost_per_person_1970 = 101608879 / 202455416,
infl_adj_cost_per_person_2020 = 681900329.51 / 331794992
)
df.infl <- tidyr::gather(df.infl, key = key, value = value)
df.infl$year <- stringr::str_sub(df.infl$key, -4)
df.infl$year <- as.integer(df.infl$year)
df.infl$key <-gsub("_1970|_2020", "", df.infl$key)
df.infl <- df.infl[grep("infl_adj", df.infl$key), ]
df.infl
}
create_plot_for_hunters <- function(){
df.10 <- create_df_cost_per_hunter()
df.infl <- create_df_for_infl_idx()
#plot
library(ggthemes)
p <- ggplot(dplyr::filter(df.10, key == "cost_per_hunter"), aes(year, value, group = key, colour = key))
p <- p + geom_line()
p <- p + geom_point(size = 3)
p <- p + scale_y_continuous(name = "",
limits = c(0, 60),
breaks = c(0, 15, 30, 45, 60),
labels = c("$0", "$15", "$30", "$45", "$60")
)
p <- p + scale_color_manual(values=c("#4582EC", "#00d0ff"))
df.infl.1 <- dplyr::filter(df.infl, key == "infl_adj_cost_per_hunter")
df.infl.1$key <- gsub("_per_hunter", "", df.infl.1$key)
p <- p + geom_line(data = df.infl.1, aes(year, value, group = key, colour = key))
p <- p + geom_point(data = df.infl.1, aes(year, value, group = key, colour = key), size = 3)
p <- p + theme_gdocs()
p <- p + scale_x_continuous(name = "",
limits = c(1960, 2020))
p <- p + ggtitle("Gross Cost Per Hunter Indexed to Inflation \n1970-2020")
filename <- "./figs/gross_cost_per_hunter_indexed_to_inflation_1970_2020.jpg"
ggsave(p, filename = filename, height = 5, width = 8, dpi = 300, units = "in")
p
}
create_plot_for_hunters()
}
plot_cost_per_hunter_idx_for_inflation()
#plot 5 --
plot_cost_per_person_idx_for_inflation <- function(){
library(ggthemes)
create_df_cost_per_hunter <- function(){
#Add main dataframe
file <- "./data_tidy/hunting_licenses_by_state_1960_and_2020.csv"
colClasses <- c(rep("character", 3), "integer", "character", "numeric")
df.00 <- data.table::fread(file = file, data.table = F, colClasses = colClasses)
#total cost to hunters
df.01 <- dplyr::filter(df.00, key == "total_gross_cost_to_hunters")
df.02 <-df.01 %>%
group_by(year) %>%
summarize(total_cost = sum(value))
#total hunters by year
df.03 <- dplyr::filter(df.00, key == "certified_paid_hunting_license_holders")
df.04 <- df.03 %>%
group_by(year) %>%
summarize(total_hunters = sum(value))
#merge
df.05 <- dplyr::left_join(df.02, df.04)
#total cost per hunter
df.05$cost_per_hunter <- df.05$total_cost / df.05$total_hunters
df.05 <- dplyr::select(df.05, year, cost_per_hunter)
df.06 <- tidyr::gather(df.05, key = key, value = value, -year)
#total cost per person
df.07 <- dplyr::filter(df.00, key == "pop")
df.08 <- df.07 %>%
group_by(year) %>%
summarize(total_pop = sum(value))
df.09 <- merge(df.02, df.08)
df.09$cost_per_person <- df.09$total_cost / df.09$total_pop
df.09 <- dplyr::select(df.09, year, cost_per_person)
df.09 <- tidyr::gather(df.09, key = key, value = value, -year)
#join cost_per_hunter to cost_per_person
df.10 <- rbind(df.06, df.09)
df.10
}
create_df_for_infl_idx <- function(){
#build inflation adjusted series for comparison
source("./R/functions.R")
df.infl <- data.frame(total_cost_1970 = 101608879,
const_dol_2020 = convert_to_constant_dollars(price = 1010608879, from_date = 1970, to_date = 2019),
total_hunt_1970 = 15658318,
total_hunt_2020 = 15151724,
total_pop_1970 = 202455416,
total_pop_2020 = 331794992,
infl_adj_cost_per_hunter_1970 = 101608879 / 15658318,
infl_adj_cost_per_hunter_2020 = 681900329.51 / 15151724,
infl_adj_cost_per_person_1970 = 101608879 / 202455416,
infl_adj_cost_per_person_2020 = 681900329.51 / 331794992
)
df.infl <- tidyr::gather(df.infl, key = key, value = value)
df.infl$year <- stringr::str_sub(df.infl$key, -4)
df.infl$year <- as.integer(df.infl$year)
df.infl$key <-gsub("_1970|_2020", "", df.infl$key)
df.infl <- df.infl[grep("infl_adj", df.infl$key), ]
df.infl
}
create_plot_for_person <- function(){
df.10 <- create_df_cost_per_hunter()
df.infl <- create_df_for_infl_idx()
p <- ggplot(dplyr::filter(df.10, key == "cost_per_person"), aes(year, value, group = key, colour = key))
p <- p + geom_line()
p <- p + geom_point(size = 3)
p
p <- p + scale_y_continuous(name = "",
limits = c(0, 3),
breaks = c(0, .75, 1.50, 2.25, 3),
labels = c("$0.0", "$0.75", "$1.50", "$2.25", "$3.00")
)
p <- p + scale_color_manual(values=c("#ffa600", "#ffdc00"))
df.infl.2 <- dplyr::filter(df.infl, key == "infl_adj_cost_per_person")
df.infl.2$key <- gsub("_per_person", "", df.infl.2$key)
p <- p + geom_line(data = df.infl.2, aes(year, value, group = key, colour = key))
p <- p + geom_point(data = df.infl.2, aes(year, value, group = key, colour = key), size = 3)
p <- p + theme_gdocs()
p <- p + scale_x_continuous(name = "",
limits = c(1960, 2020))
p <- p + ggtitle("Gross Cost Per Person Indexed to Inflation\n1970-2020")
filename <- "./figs/gross_cost_per_person_indexed_to_inflation_1970_2020.jpg"
ggsave(p, filename = filename, height = 5, width = 8, dpi = 300, units = "in")
p
}
create_plot_for_person()
}
plot_cost_per_person_idx_for_inflation()
|
#' Add together two numbers
#' @param x A real number
#' @param y A real number
#' @return the sume of \code{x} and \code{y}
#' @examples
#' add(1,1)
add <- function(x,y){
x + y
}
| /Reiss/R/add.R | no_license | jordaneissner/Eissner_project | R | false | false | 184 | r | #' Add together two numbers
#' @param x A real number
#' @param y A real number
#' @return the sume of \code{x} and \code{y}
#' @examples
#' add(1,1)
add <- function(x,y){
x + y
}
|
newlesson <- emajorDV::create_lessonTemplate()
newlesson$topic <- "在Rmd knit的html插入Javascript"
newlesson$description <- "過去我們在Chrome裡對Rmd的html output進行javascript的測試,然而有時在Chrome可以成功的效果,等到加入Rmd的after_body設定時效果卻出不來。emajorDV的webService模組可以解決這問題。"
newlesson$date <- "2020-09-01"
newlesson$onlineUrl <- "https://hackmd.io/@fnik77ehTXKTYsEGmxLihA/B1L9P8smv/edit"
newlesson$downloadUrl <- list(
list(link="https://www.dropbox.com/s/lh2jdvv5623yn8p/storyboard.zip?dl=1",
zip=T)
)
library(emajorDV)
add_lesson(newlesson)
| /coursePlan.R | no_license | emajortaiwan/home | R | false | false | 629 | r | newlesson <- emajorDV::create_lessonTemplate()
newlesson$topic <- "在Rmd knit的html插入Javascript"
newlesson$description <- "過去我們在Chrome裡對Rmd的html output進行javascript的測試,然而有時在Chrome可以成功的效果,等到加入Rmd的after_body設定時效果卻出不來。emajorDV的webService模組可以解決這問題。"
newlesson$date <- "2020-09-01"
newlesson$onlineUrl <- "https://hackmd.io/@fnik77ehTXKTYsEGmxLihA/B1L9P8smv/edit"
newlesson$downloadUrl <- list(
list(link="https://www.dropbox.com/s/lh2jdvv5623yn8p/storyboard.zip?dl=1",
zip=T)
)
library(emajorDV)
add_lesson(newlesson)
|
## Making a dotplot of LefSe-identified OTUs--UPDATED
# 3.18.16
# Anna M. Seekatz
# adapted from http://polisci.msu.edu/jacoby/research/dotplots/tpm/Creating%20figures/Creating%20Figure%204.R
library(plyr)
library(reshape2)
library(Hmisc)
library(lattice)
library(dplyr)
# files used:
# updated_meta_fromkrishna_03.08.16.txt: updated meta
# suberin.relOTUs.txt: relative abundance of OTUs, filtered (same info applicable as before)
# erinfmt.new.taxonomy.names.txt: edited taxonomy files with OTU info (same info applicable as before)
# updated_erinsubset_all.lefse.results.txt: new lefse results, edited from mothur
# meta and OTU files
meta<-read.table(file="updated_meta_fromkrishna_03.08.16.txt", header=TRUE)
sub.otus<-read.table(file="../suberin.relOTUs.txt", header=TRUE)
meta2<-meta[-which(meta$seqID %in% c("DA3240", "DA3260_P", "DA3298", "DA3299", "DA3376")), ]
meta<-meta2
tax<-read.table(file="../erinfmt.new.taxonomy.names.txt", header=TRUE)
keep<-as.character(colnames(sub.otus[1:506]))
filtered.tax<-tax[tax$OTU %in% keep, ]
colnames(sub.otus)<-filtered.tax$taxname
sub.otus$sampleID<-rownames(sub.otus)
# merge meta and OTUs:
sub.otus$sampleID<-as.factor(sub.otus$sampleID)
sub.all<-merge(meta, sub.otus, by.x="seqID", by.y="sampleID")
rownames(sub.all)<-sub.all$seqID
# lefse results (filtered file to include only the significant ones)
lefse<-read.table(file="updated_erinsubset_all.lefse.results.txt", header=TRUE)
# get taxnames for lefse OTU file:
keep<-as.character(lefse$OTU)
filtered.tax<-tax[tax$OTU %in% keep, ]
lefse$otuname<-filtered.tax$taxname
lefse <- lefse[order(lefse$clinical_Class, lefse$otuname),]
lefse.otus<-as.character(lefse$otuname[1:5])
# for severe ones:
#lefse <- lefse[order(lefse$severe_Class, lefse$otuname),]
#lefse.otus<-as.character(lefse$otuname[1:7])
# these were all the sign. OTUs in the index samples of recurrent vs. nonrecurrent patients
#### Fig. 4B: plotting lefse OTUs by group
# these are the OTUs significant between positive and negative samples
# let's limit our files to only the lefse results, and whatever category you are using:
otus.df<-sub.all[, which(colnames(sub.all) %in% lefse.otus)]
otus.df$sampleID<-rownames(otus.df)
groups<-sub.all[, c("seqID", "group_reinfection")]
otus<-merge(groups, otus.df, by.x="seqID", by.y="sampleID")
otus<-otus[,2:7]
# now you have your list of significant otus in a dataframe!
# create your stats summary dataframe:
# column names: Otu00001_Enterobacteriaceae,Otu00003_Bacteroides,Otu00012_Clostridium_XI,Otu00014_Streptococcus, Otu00050_Bacteria
# column names, with "": "Otu00001_Enterobacteriaceae","Otu00003_Bacteroides","Otu00012_Clostridium_XI","Otu00014_Streptococcus", "Otu00050_Bacteria"
x<-otus
means<-aggregate(cbind(Otu00001_Enterobacteriaceae,Otu00003_Bacteroides,Otu00012_Clostridium_XI,Otu00014_Streptococcus, Otu00050_Bacteria) ~ group_reinfection,data = x,FUN=function(x) mean =mean(x) )
means2<-melt(means, id.vars = "group_reinfection", measure.vars = c("Otu00001_Enterobacteriaceae","Otu00003_Bacteroides","Otu00012_Clostridium_XI","Otu00014_Streptococcus", "Otu00050_Bacteria"))
otu.means<-means2$value
sds<-aggregate(cbind(Otu00001_Enterobacteriaceae,Otu00003_Bacteroides,Otu00012_Clostridium_XI,Otu00014_Streptococcus, Otu00050_Bacteria) ~ group_reinfection,data = x,FUN=function(x) sd =sd(x) )
sds2<-melt(sds, id.vars = "group_reinfection", measure.vars = c("Otu00001_Enterobacteriaceae","Otu00003_Bacteroides","Otu00012_Clostridium_XI","Otu00014_Streptococcus", "Otu00050_Bacteria"))
otu.sds<-sds2$value
otu.highsd<-otu.means+sds2$value
otu.lowsd<-otu.means-sds2$value
medians<-aggregate(cbind(Otu00001_Enterobacteriaceae,Otu00003_Bacteroides,Otu00012_Clostridium_XI,Otu00014_Streptococcus, Otu00050_Bacteria) ~ group_reinfection,data = x,FUN=function(x) median =median(x) )
medians2<-melt(medians, id.vars = "group_reinfection", measure.vars = c("Otu00001_Enterobacteriaceae","Otu00003_Bacteroides","Otu00012_Clostridium_XI","Otu00014_Streptococcus", "Otu00050_Bacteria"))
otu.medians<-medians2$value
ses<-aggregate(cbind(Otu00001_Enterobacteriaceae,Otu00003_Bacteroides,Otu00012_Clostridium_XI,Otu00014_Streptococcus, Otu00050_Bacteria) ~ group_reinfection,data = x,FUN=function(x) se=sd(x)/sqrt(length(x)) )
ses2<-melt(ses, id.vars = "group_reinfection", measure.vars = c("Otu00001_Enterobacteriaceae","Otu00003_Bacteroides","Otu00012_Clostridium_XI","Otu00014_Streptococcus", "Otu00050_Bacteria"))
otu.ses<-ses2$value
otu.highse<-otu.means+ses2$value
otu.lowse<-otu.means-ses2$value
highq<-aggregate(cbind(Otu00001_Enterobacteriaceae,Otu00003_Bacteroides,Otu00012_Clostridium_XI,Otu00014_Streptococcus, Otu00050_Bacteria) ~ group_reinfection,data = x,FUN=function(x) highq=quantile(x, 0.75) )
highq2<-melt(ses, id.vars = "group_reinfection", measure.vars = c("Otu00001_Enterobacteriaceae","Otu00003_Bacteroides","Otu00012_Clostridium_XI","Otu00014_Streptococcus", "Otu00050_Bacteria"))
otu.highq<-otu.medians+highq2$value
lowq<-aggregate(cbind(Otu00001_Enterobacteriaceae,Otu00003_Bacteroides,Otu00012_Clostridium_XI,Otu00014_Streptococcus, Otu00050_Bacteria) ~ group_reinfection,data = x,FUN=function(x) lowq=quantile(x, 0.75) )
lowq2<-melt(ses, id.vars = "group_reinfection", measure.vars = c("Otu00001_Enterobacteriaceae","Otu00003_Bacteroides","Otu00012_Clostridium_XI","Otu00014_Streptococcus", "Otu00050_Bacteria"))
otu.lowq<-otu.medians-lowq2$value
# add all the data together:
labels<-paste(as.character(means2$group_reinfection), as.character(means2$variable))
summary<-data.frame(label=labels,
mean = otu.means,
sd = otu.sds,
se = otu.ses,
median = otu.medians,
highq = otu.highq,
lowq = otu.lowq,
highse = otu.highse,
lowse = otu.lowse,
highsd = otu.highsd,
lowsd = otu.lowsd
)
summary$sequence <- seq(1, length(summary$label))
summary$label <- reorder(summary$label, summary$sequence)
summary$otu <- means2$variable
summary$group <-means2$group_reinfection
# plot it:
labels <- summary$otu
labels <- gsub("_", ": ", labels)
summary$otu<-gsub("_", ": ", summary$otu)
averages <- summary$mean
ranges <- summary$se
groupnames<-as.character(unique(summary$group))
# option 1 (used as manuscript Supplemental Figure S1:
dotchart(averages, labels=labels, xlab='relative abundance (mean + se)', pch=20, col=c("chartreuse3", "orange", "darkgoldenrod"),
groups=summary$group, cex=0.7,
xlim=c(min(averages-ranges)-1, max(averages+ranges)+1))
segments(averages-ranges,
c(15, 8, 1, 16, 9, 2, 17, 10, 3, 18, 11, 4, 19, 12, 5),
averages+ranges, c(15, 8, 1, 16, 9, 2, 17, 10, 3, 18, 11, 4, 19, 12, 5),
col=c("chartreuse3", "orange", "darkgoldenrod"), lwd=2)
# note: adding the bars on your graph is a bit confusing
# you may have to play around with the ordering, since this is a grouped dotplot
# future ways of making this step simpler are appreciated!
# option 2:
## this one was used for Fig 4:
dotchart(averages, labels="", xlab='relative abundance (mean + se)', pch=20, col=c("chartreuse3", "orange", "darkgoldenrod"),
groups=as.factor(summary$otu), cex=0.8,
xlim=c(min(averages-ranges)-1, max(averages+ranges)+1))
segments(averages-ranges,
c(21:23, 16:18, 11:13, 6:8, 1:3),
averages+ranges, c(21:23, 16:18, 11:13, 6:8, 1:3),
col=c("chartreuse3", "orange", "darkgoldenrod"), lwd=2)
legend("bottomright", groupnames, col=c("chartreuse3", "orange", "darkgoldenrod"), pch=19, cex=0.6, bg="white")
#Looks BEAUTIFUL
#### Fig. 4A: plotting lefse OTUs by clinical status (negative vs. positive)
# these are the OTUs significant between positive and negative samples
otus.df<-sub.all[, which(colnames(sub.all) %in% lefse.otus)]
otus.df$sampleID<-rownames(otus.df)
groups<-sub.all[, c("seqID", "POS_NEG")]
otus.clinical<-merge(groups, otus.df, by="sampleID")
otus.clinical<-otus.clinical[,2:7]
otus.df<-sub.all[, which(colnames(sub.all) %in% lefse.otus)]
otus.df$sampleID<-rownames(otus.df)
groups<-sub.all[, c("seqID", "POS_NEG")]
otus.clinical<-merge(groups, otus.df, by.x="seqID", by.y="sampleID")
otus.clinical<-otus.clinical[,2:7]
# create your stats summary dataframe:
# column names: Otu00001_Enterobacteriaceae,Otu00003_Bacteroides,Otu00012_Clostridium_XI,Otu00014_Streptococcus, Otu00050_Bacteria
# column names, with "": "Otu00001_Enterobacteriaceae","Otu00003_Bacteroides","Otu00012_Clostridium_XI","Otu00014_Streptococcus", "Otu00050_Bacteria"
x<-otus.clinical
means<-aggregate(cbind(Otu00001_Enterobacteriaceae,Otu00003_Bacteroides,Otu00012_Clostridium_XI,Otu00014_Streptococcus, Otu00050_Bacteria) ~ POS_NEG,data = x,FUN=function(x) mean =mean(x) )
means2<-melt(means, id.vars = "POS_NEG", measure.vars = c("Otu00001_Enterobacteriaceae","Otu00003_Bacteroides","Otu00012_Clostridium_XI","Otu00014_Streptococcus", "Otu00050_Bacteria"))
otu.means<-means2$value
sds<-aggregate(cbind(Otu00001_Enterobacteriaceae,Otu00003_Bacteroides,Otu00012_Clostridium_XI,Otu00014_Streptococcus, Otu00050_Bacteria) ~ POS_NEG,data = x,FUN=function(x) sd =sd(x) )
sds2<-melt(sds, id.vars = "POS_NEG", measure.vars = c("Otu00001_Enterobacteriaceae","Otu00003_Bacteroides","Otu00012_Clostridium_XI","Otu00014_Streptococcus", "Otu00050_Bacteria"))
otu.sds<-sds2$value
otu.highsd<-otu.means+sds2$value
otu.lowsd<-otu.means-sds2$value
medians<-aggregate(cbind(Otu00001_Enterobacteriaceae,Otu00003_Bacteroides,Otu00012_Clostridium_XI,Otu00014_Streptococcus, Otu00050_Bacteria) ~ POS_NEG,data = x,FUN=function(x) median =median(x) )
medians2<-melt(medians, id.vars = "POS_NEG", measure.vars = c("Otu00001_Enterobacteriaceae","Otu00003_Bacteroides","Otu00012_Clostridium_XI","Otu00014_Streptococcus", "Otu00050_Bacteria"))
otu.medians<-medians2$value
ses<-aggregate(cbind(Otu00001_Enterobacteriaceae,Otu00003_Bacteroides,Otu00012_Clostridium_XI,Otu00014_Streptococcus, Otu00050_Bacteria) ~ POS_NEG,data = x,FUN=function(x) se=sd(x)/sqrt(length(x)) )
ses2<-melt(ses, id.vars = "POS_NEG", measure.vars = c("Otu00001_Enterobacteriaceae","Otu00003_Bacteroides","Otu00012_Clostridium_XI","Otu00014_Streptococcus", "Otu00050_Bacteria"))
otu.ses<-ses2$value
otu.highse<-otu.means+ses2$value
otu.lowse<-otu.means-ses2$value
highq<-aggregate(cbind(Otu00001_Enterobacteriaceae,Otu00003_Bacteroides,Otu00012_Clostridium_XI,Otu00014_Streptococcus, Otu00050_Bacteria) ~ POS_NEG,data = x,FUN=function(x) highq=quantile(x, 0.75) )
highq2<-melt(ses, id.vars = "POS_NEG", measure.vars = c("Otu00001_Enterobacteriaceae","Otu00003_Bacteroides","Otu00012_Clostridium_XI","Otu00014_Streptococcus", "Otu00050_Bacteria"))
otu.highq<-otu.medians+highq2$value
lowq<-aggregate(cbind(Otu00001_Enterobacteriaceae,Otu00003_Bacteroides,Otu00012_Clostridium_XI,Otu00014_Streptococcus, Otu00050_Bacteria) ~ POS_NEG,data = x,FUN=function(x) lowq=quantile(x, 0.75) )
lowq2<-melt(ses, id.vars = "POS_NEG", measure.vars = c("Otu00001_Enterobacteriaceae","Otu00003_Bacteroides","Otu00012_Clostridium_XI","Otu00014_Streptococcus", "Otu00050_Bacteria"))
otu.lowq<-otu.medians-lowq2$value
# add all the data together:
labels<-paste(as.character(means2$POS_NEG), as.character(means2$variable))
summary<-data.frame(label=labels,
mean = otu.means,
sd = otu.sds,
se = otu.ses,
median = otu.medians,
highq = otu.highq,
lowq = otu.lowq,
highse = otu.highse,
lowse = otu.lowse,
highsd = otu.highsd,
lowsd = otu.lowsd
)
summary$sequence <- seq(1, length(summary$label))
summary$label <- reorder(summary$label, summary$sequence)
summary$otu <- means2$variable
summary$group <-means2$POS_NEG
# plot it:
labels <- summary$otu
labels <- gsub("_", ": ", labels)
labels <- gsub("000", "", labels)
summary$otu<-gsub("_", ": ", summary$otu)
summary$otu<-gsub("000", "", summary$otu)
averages <- summary$mean
ranges <- summary$se
groupnames<-as.character(unique(summary$group))
# option 1 (used as manuscript Figure 4):
dotchart(averages, labels="", xlab='relative abundance (mean + se)', pch=20, col=c("grey47", "magenta"),
groups=as.factor(summary$otu), cex=0.8, cex.lab=0.8,
xlim=c(min(averages-ranges)-1, max(averages+ranges)+1))
segments(averages-ranges,
c(17:18, 13:14, 9:10, 5:6, 1:2),
averages+ranges, c(17:18, 13:14, 9:10, 5:6, 1:2),
col=c("grey47", "magenta"), lwd=2)
legend("bottomright", groupnames, col=c("grey47", "magenta"), pch=19, cex=0.6, bg="white")
# option 2:
dotchart(averages, labels=labels, xlab='relative abundance (mean + se)', pch=20, col=c("grey47", "magenta"),
groups=summary$group, cex=0.7,
xlim=c(min(averages-ranges)-1, max(averages+ranges)+1))
segments(averages-ranges,
c(8, 1, 9, 2, 10, 3, 11, 4, 12, 5),
averages+ranges, c(8, 1, 9, 2, 10, 3, 11, 4, 12, 5),
col=c("grey47", "magenta"), lwd=2)
# note: adding the bars on your graph is a bit confusing
# you may have to play around with the ordering, since this is a grouped dotplot
# future ways of making this step simpler are appreciated!
###
#---
###
# for severity:
# used the same steps above to get 'lefse.otus' that were different with severity
otus.df<-sub.all[, which(colnames(sub.all) %in% lefse.otus)]
otus.df$sampleID<-rownames(otus.df)
groups<-sub.all[, c("seqID", "severeidsa")]
otus.severe<-merge(groups, otus.df, by.x="seqID", by.y="sampleID")
otus.severe<-otus.severe[,2:9]
# create your stats summary dataframe:
# column names: Otu00004_Escherichia, Otu00015_Lachnospiraceae, Otu00017_Bifidobacterium, Otu00030_Blautia, Otu00044_Flavonifractor, Otu00050_Bacteria, Otu00097_Lactococcus
# column names, with "": "Otu00004_Escherichia", "Otu00015_Lachnospiraceae", "Otu00017_Bifidobacterium", "Otu00030_Blautia", "Otu00044_Flavonifractor", "Otu00050_Bacteria", "Otu00097_Lactococcus"
x<-otus.severe
means<-aggregate(cbind(Otu00004_Escherichia, Otu00015_Lachnospiraceae, Otu00017_Bifidobacterium, Otu00030_Blautia, Otu00044_Flavonifractor, Otu00050_Bacteria, Otu00097_Lactococcus) ~ severeidsa,data = x,FUN=function(x) mean =mean(x) )
means2<-melt(means, id.vars = "severeidsa", measure.vars = c("Otu00004_Escherichia", "Otu00015_Lachnospiraceae", "Otu00017_Bifidobacterium", "Otu00030_Blautia", "Otu00044_Flavonifractor", "Otu00050_Bacteria", "Otu00097_Lactococcus"))
otu.means<-means2$value
sds<-aggregate(cbind(Otu00004_Escherichia, Otu00015_Lachnospiraceae, Otu00017_Bifidobacterium, Otu00030_Blautia, Otu00044_Flavonifractor, Otu00050_Bacteria, Otu00097_Lactococcus) ~ severeidsa,data = x,FUN=function(x) sd =sd(x) )
sds2<-melt(sds, id.vars = "severeidsa", measure.vars = c("Otu00004_Escherichia", "Otu00015_Lachnospiraceae", "Otu00017_Bifidobacterium", "Otu00030_Blautia", "Otu00044_Flavonifractor", "Otu00050_Bacteria", "Otu00097_Lactococcus"))
otu.sds<-sds2$value
otu.highsd<-otu.means+sds2$value
otu.lowsd<-otu.means-sds2$value
medians<-aggregate(cbind(Otu00004_Escherichia, Otu00015_Lachnospiraceae, Otu00017_Bifidobacterium, Otu00030_Blautia, Otu00044_Flavonifractor, Otu00050_Bacteria, Otu00097_Lactococcus) ~ severeidsa,data = x,FUN=function(x) median =median(x) )
medians2<-melt(medians, id.vars = "severeidsa", measure.vars = c("Otu00004_Escherichia", "Otu00015_Lachnospiraceae", "Otu00017_Bifidobacterium", "Otu00030_Blautia", "Otu00044_Flavonifractor", "Otu00050_Bacteria", "Otu00097_Lactococcus"))
otu.medians<-medians2$value
ses<-aggregate(cbind(Otu00004_Escherichia, Otu00015_Lachnospiraceae, Otu00017_Bifidobacterium, Otu00030_Blautia, Otu00044_Flavonifractor, Otu00050_Bacteria, Otu00097_Lactococcus) ~ severeidsa,data = x,FUN=function(x) se=sd(x)/sqrt(length(x)) )
ses2<-melt(ses, id.vars = "severeidsa", measure.vars = c("Otu00004_Escherichia", "Otu00015_Lachnospiraceae", "Otu00017_Bifidobacterium", "Otu00030_Blautia", "Otu00044_Flavonifractor", "Otu00050_Bacteria", "Otu00097_Lactococcus"))
otu.ses<-ses2$value
otu.highse<-otu.means+ses2$value
otu.lowse<-otu.means-ses2$value
highq<-aggregate(cbind(Otu00004_Escherichia, Otu00015_Lachnospiraceae, Otu00017_Bifidobacterium, Otu00030_Blautia, Otu00044_Flavonifractor, Otu00050_Bacteria, Otu00097_Lactococcus) ~ severeidsa,data = x,FUN=function(x) highq=quantile(x, 0.75) )
highq2<-melt(ses, id.vars = "severeidsa", measure.vars = c("Otu00004_Escherichia", "Otu00015_Lachnospiraceae", "Otu00017_Bifidobacterium", "Otu00030_Blautia", "Otu00044_Flavonifractor", "Otu00050_Bacteria", "Otu00097_Lactococcus"))
otu.highq<-otu.medians+highq2$value
lowq<-aggregate(cbind(Otu00004_Escherichia, Otu00015_Lachnospiraceae, Otu00017_Bifidobacterium, Otu00030_Blautia, Otu00044_Flavonifractor, Otu00050_Bacteria, Otu00097_Lactococcus) ~ severeidsa,data = x,FUN=function(x) lowq=quantile(x, 0.75) )
lowq2<-melt(ses, id.vars = "severeidsa", measure.vars = c("Otu00004_Escherichia", "Otu00015_Lachnospiraceae", "Otu00017_Bifidobacterium", "Otu00030_Blautia", "Otu00044_Flavonifractor", "Otu00050_Bacteria", "Otu00097_Lactococcus"))
otu.lowq<-otu.medians-lowq2$value
# add all the data together:
labels<-paste(as.character(means2$severeidsa), as.character(means2$variable))
summary<-data.frame(label=labels,
mean = otu.means,
sd = otu.sds,
se = otu.ses,
median = otu.medians,
highq = otu.highq,
lowq = otu.lowq,
highse = otu.highse,
lowse = otu.lowse,
highsd = otu.highsd,
lowsd = otu.lowsd
)
summary$sequence <- seq(1, length(summary$label))
summary$label <- reorder(summary$label, summary$sequence)
summary$otu <- means2$variable
summary$group <-means2$severeidsa
# plot it:
labels <- summary$otu
labels <- gsub("_", ": ", labels)
labels <- gsub("000", "", labels)
summary$otu<-gsub("_", ": ", summary$otu)
summary$otu<-gsub("000", "", summary$otu)
averages <- summary$mean
ranges <- summary$se
groupnames<-as.character(unique(summary$group))
# option 1 (used as manuscript Figure 4):
# since the first OTU is significantly more abundant than others, let's split these up into 2 quadrants to emphasize the others...
summary1<-summary[1:2, ]
labels1 <- summary1$otu
labels1 <- gsub("_", ": ", labels)
labels1 <- gsub("000", "", labels)
summary1$otu<-gsub("_", ": ", summary1$otu)
summary1$otu<-gsub("000", "", summary1$otu)
averages1 <- summary1$mean
ranges1 <- summary1$se
groupnames1<-as.character(unique(summary1$group))
summary2<-summary[3:14, ]
labels2 <- summary2$otu
labels2 <- gsub("_", ": ", labels)
labels2 <- gsub("000", "", labels)
summary2$otu<-gsub("_", ": ", summary2$otu)
summary2$otu<-gsub("000", "", summary2$otu)
averages2 <- summary2$mean
ranges2 <- summary2$se
groupnames2<-as.character(unique(summary2$group))
# summary 1:
par(mfrow=c(2,1))
dotchart(averages1, labels="", xlab='relative abundance (mean + se)', pch=20, col=c("blue", "red"),
groups=as.factor(summary1$otu), cex=0.8, cex.lab=0.8,
xlim=c(min(averages1-ranges1)-1, max(averages1+ranges1)+1))
segments(averages1-ranges1,
c(1:2),
averages1+ranges1, c(1:2),
col=c("blue", "red"), lwd=2)
legend("bottomright", c("not severe", "severe"), col=c("blue", "red"), pch=19, cex=0.6, bg="white")
# summary 2:
dotchart(averages2, labels="", xlab='relative abundance (mean + se)', pch=20, col=c("blue", "red"),
groups=as.factor(summary2$otu), cex=0.8, cex.lab=0.8,
xlim=c(min(averages2-ranges2)-1, max(averages2+ranges2)+1))
segments(averages2-ranges2,
c(21:22, 17:18, 13:14, 9:10, 5:6, 1:2),
averages2+ranges2, c(21:22, 17:18, 13:14, 9:10, 5:6, 1:2),
col=c("blue", "red"), lwd=2)
legend("bottomright", c("not severe", "severe"), col=c("blue", "red"), pch=19, cex=0.6, bg="white")
### this still doesn't give even graphs...just go with summary, and chop it down:
# summary (all):
dotchart(averages, labels="", xlab='relative abundance (mean + se)', pch=20, col=c("blue", "red"),
groups=as.factor(summary$otu), cex=0.8, cex.lab=0.8,
xlim=c(0, max(averages+ranges)+1))
segments(averages-ranges,
c(25:26, 21:22, 17:18, 13:14, 9:10, 5:6, 1:2),
averages+ranges, c(25:26, 21:22, 17:18, 13:14, 9:10, 5:6, 1:2),
col=c("blue", "red"), lwd=2)
legend("bottomright", c("not severe", "severe"), col=c("blue", "red"), pch=19, cex=0.6, bg="white")
# option 2:
dotchart(averages, labels=labels, xlab='relative abundance (mean + se)', pch=20, col=c("blue", "red"),
groups=summary$group, cex=0.7,
xlim=c(min(averages-ranges)-1, max(averages+ranges)+1))
segments(averages-ranges,
c(8, 1, 9, 2, 10, 3, 11, 4, 12, 5),
averages+ranges, c(8, 1, 9, 2, 10, 3, 11, 4, 12, 5),
col=c("blue", "red"), lwd=2)
# note: adding the bars on your graph is a bit confusing
# you may have to play around with the ordering, since this is a grouped dotplot
# future ways of making this step simpler are appreciated!
| /REVISED_manuscript.code/UPDATED_Fig3.dotplot.R | no_license | ayitbarek/ERIN.recurrence | R | false | false | 20,486 | r | ## Making a dotplot of LefSe-identified OTUs--UPDATED
# 3.18.16
# Anna M. Seekatz
# adapted from http://polisci.msu.edu/jacoby/research/dotplots/tpm/Creating%20figures/Creating%20Figure%204.R
library(plyr)
library(reshape2)
library(Hmisc)
library(lattice)
library(dplyr)
# files used:
# updated_meta_fromkrishna_03.08.16.txt: updated meta
# suberin.relOTUs.txt: relative abundance of OTUs, filtered (same info applicable as before)
# erinfmt.new.taxonomy.names.txt: edited taxonomy files with OTU info (same info applicable as before)
# updated_erinsubset_all.lefse.results.txt: new lefse results, edited from mothur
# meta and OTU files
meta<-read.table(file="updated_meta_fromkrishna_03.08.16.txt", header=TRUE)
sub.otus<-read.table(file="../suberin.relOTUs.txt", header=TRUE)
meta2<-meta[-which(meta$seqID %in% c("DA3240", "DA3260_P", "DA3298", "DA3299", "DA3376")), ]
meta<-meta2
tax<-read.table(file="../erinfmt.new.taxonomy.names.txt", header=TRUE)
keep<-as.character(colnames(sub.otus[1:506]))
filtered.tax<-tax[tax$OTU %in% keep, ]
colnames(sub.otus)<-filtered.tax$taxname
sub.otus$sampleID<-rownames(sub.otus)
# merge meta and OTUs:
sub.otus$sampleID<-as.factor(sub.otus$sampleID)
sub.all<-merge(meta, sub.otus, by.x="seqID", by.y="sampleID")
rownames(sub.all)<-sub.all$seqID
# lefse results (filtered file to include only the significant ones)
lefse<-read.table(file="updated_erinsubset_all.lefse.results.txt", header=TRUE)
# get taxnames for lefse OTU file:
keep<-as.character(lefse$OTU)
filtered.tax<-tax[tax$OTU %in% keep, ]
lefse$otuname<-filtered.tax$taxname
lefse <- lefse[order(lefse$clinical_Class, lefse$otuname),]
lefse.otus<-as.character(lefse$otuname[1:5])
# for severe ones:
#lefse <- lefse[order(lefse$severe_Class, lefse$otuname),]
#lefse.otus<-as.character(lefse$otuname[1:7])
# these were all the sign. OTUs in the index samples of recurrent vs. nonrecurrent patients
#### Fig. 4B: plotting lefse OTUs by group
# these are the OTUs significant between positive and negative samples
# let's limit our files to only the lefse results, and whatever category you are using:
otus.df<-sub.all[, which(colnames(sub.all) %in% lefse.otus)]
otus.df$sampleID<-rownames(otus.df)
groups<-sub.all[, c("seqID", "group_reinfection")]
otus<-merge(groups, otus.df, by.x="seqID", by.y="sampleID")
otus<-otus[,2:7]
# now you have your list of significant otus in a dataframe!
# create your stats summary dataframe:
# column names: Otu00001_Enterobacteriaceae,Otu00003_Bacteroides,Otu00012_Clostridium_XI,Otu00014_Streptococcus, Otu00050_Bacteria
# column names, with "": "Otu00001_Enterobacteriaceae","Otu00003_Bacteroides","Otu00012_Clostridium_XI","Otu00014_Streptococcus", "Otu00050_Bacteria"
x<-otus
means<-aggregate(cbind(Otu00001_Enterobacteriaceae,Otu00003_Bacteroides,Otu00012_Clostridium_XI,Otu00014_Streptococcus, Otu00050_Bacteria) ~ group_reinfection,data = x,FUN=function(x) mean =mean(x) )
means2<-melt(means, id.vars = "group_reinfection", measure.vars = c("Otu00001_Enterobacteriaceae","Otu00003_Bacteroides","Otu00012_Clostridium_XI","Otu00014_Streptococcus", "Otu00050_Bacteria"))
otu.means<-means2$value
sds<-aggregate(cbind(Otu00001_Enterobacteriaceae,Otu00003_Bacteroides,Otu00012_Clostridium_XI,Otu00014_Streptococcus, Otu00050_Bacteria) ~ group_reinfection,data = x,FUN=function(x) sd =sd(x) )
sds2<-melt(sds, id.vars = "group_reinfection", measure.vars = c("Otu00001_Enterobacteriaceae","Otu00003_Bacteroides","Otu00012_Clostridium_XI","Otu00014_Streptococcus", "Otu00050_Bacteria"))
otu.sds<-sds2$value
otu.highsd<-otu.means+sds2$value
otu.lowsd<-otu.means-sds2$value
medians<-aggregate(cbind(Otu00001_Enterobacteriaceae,Otu00003_Bacteroides,Otu00012_Clostridium_XI,Otu00014_Streptococcus, Otu00050_Bacteria) ~ group_reinfection,data = x,FUN=function(x) median =median(x) )
medians2<-melt(medians, id.vars = "group_reinfection", measure.vars = c("Otu00001_Enterobacteriaceae","Otu00003_Bacteroides","Otu00012_Clostridium_XI","Otu00014_Streptococcus", "Otu00050_Bacteria"))
otu.medians<-medians2$value
ses<-aggregate(cbind(Otu00001_Enterobacteriaceae,Otu00003_Bacteroides,Otu00012_Clostridium_XI,Otu00014_Streptococcus, Otu00050_Bacteria) ~ group_reinfection,data = x,FUN=function(x) se=sd(x)/sqrt(length(x)) )
ses2<-melt(ses, id.vars = "group_reinfection", measure.vars = c("Otu00001_Enterobacteriaceae","Otu00003_Bacteroides","Otu00012_Clostridium_XI","Otu00014_Streptococcus", "Otu00050_Bacteria"))
otu.ses<-ses2$value
otu.highse<-otu.means+ses2$value
otu.lowse<-otu.means-ses2$value
highq<-aggregate(cbind(Otu00001_Enterobacteriaceae,Otu00003_Bacteroides,Otu00012_Clostridium_XI,Otu00014_Streptococcus, Otu00050_Bacteria) ~ group_reinfection,data = x,FUN=function(x) highq=quantile(x, 0.75) )
highq2<-melt(ses, id.vars = "group_reinfection", measure.vars = c("Otu00001_Enterobacteriaceae","Otu00003_Bacteroides","Otu00012_Clostridium_XI","Otu00014_Streptococcus", "Otu00050_Bacteria"))
otu.highq<-otu.medians+highq2$value
lowq<-aggregate(cbind(Otu00001_Enterobacteriaceae,Otu00003_Bacteroides,Otu00012_Clostridium_XI,Otu00014_Streptococcus, Otu00050_Bacteria) ~ group_reinfection,data = x,FUN=function(x) lowq=quantile(x, 0.75) )
lowq2<-melt(ses, id.vars = "group_reinfection", measure.vars = c("Otu00001_Enterobacteriaceae","Otu00003_Bacteroides","Otu00012_Clostridium_XI","Otu00014_Streptococcus", "Otu00050_Bacteria"))
otu.lowq<-otu.medians-lowq2$value
# add all the data together:
labels<-paste(as.character(means2$group_reinfection), as.character(means2$variable))
summary<-data.frame(label=labels,
mean = otu.means,
sd = otu.sds,
se = otu.ses,
median = otu.medians,
highq = otu.highq,
lowq = otu.lowq,
highse = otu.highse,
lowse = otu.lowse,
highsd = otu.highsd,
lowsd = otu.lowsd
)
summary$sequence <- seq(1, length(summary$label))
summary$label <- reorder(summary$label, summary$sequence)
summary$otu <- means2$variable
summary$group <-means2$group_reinfection
# plot it:
labels <- summary$otu
labels <- gsub("_", ": ", labels)
summary$otu<-gsub("_", ": ", summary$otu)
averages <- summary$mean
ranges <- summary$se
groupnames<-as.character(unique(summary$group))
# option 1 (used as manuscript Supplemental Figure S1:
dotchart(averages, labels=labels, xlab='relative abundance (mean + se)', pch=20, col=c("chartreuse3", "orange", "darkgoldenrod"),
groups=summary$group, cex=0.7,
xlim=c(min(averages-ranges)-1, max(averages+ranges)+1))
segments(averages-ranges,
c(15, 8, 1, 16, 9, 2, 17, 10, 3, 18, 11, 4, 19, 12, 5),
averages+ranges, c(15, 8, 1, 16, 9, 2, 17, 10, 3, 18, 11, 4, 19, 12, 5),
col=c("chartreuse3", "orange", "darkgoldenrod"), lwd=2)
# note: adding the bars on your graph is a bit confusing
# you may have to play around with the ordering, since this is a grouped dotplot
# future ways of making this step simpler are appreciated!
# option 2:
## this one was used for Fig 4:
dotchart(averages, labels="", xlab='relative abundance (mean + se)', pch=20, col=c("chartreuse3", "orange", "darkgoldenrod"),
groups=as.factor(summary$otu), cex=0.8,
xlim=c(min(averages-ranges)-1, max(averages+ranges)+1))
segments(averages-ranges,
c(21:23, 16:18, 11:13, 6:8, 1:3),
averages+ranges, c(21:23, 16:18, 11:13, 6:8, 1:3),
col=c("chartreuse3", "orange", "darkgoldenrod"), lwd=2)
legend("bottomright", groupnames, col=c("chartreuse3", "orange", "darkgoldenrod"), pch=19, cex=0.6, bg="white")
#Looks BEAUTIFUL
#### Fig. 4A: plotting lefse OTUs by clinical status (negative vs. positive)
# these are the OTUs significant between positive and negative samples
otus.df<-sub.all[, which(colnames(sub.all) %in% lefse.otus)]
otus.df$sampleID<-rownames(otus.df)
groups<-sub.all[, c("seqID", "POS_NEG")]
otus.clinical<-merge(groups, otus.df, by="sampleID")
otus.clinical<-otus.clinical[,2:7]
otus.df<-sub.all[, which(colnames(sub.all) %in% lefse.otus)]
otus.df$sampleID<-rownames(otus.df)
groups<-sub.all[, c("seqID", "POS_NEG")]
otus.clinical<-merge(groups, otus.df, by.x="seqID", by.y="sampleID")
otus.clinical<-otus.clinical[,2:7]
# create your stats summary dataframe:
# column names: Otu00001_Enterobacteriaceae,Otu00003_Bacteroides,Otu00012_Clostridium_XI,Otu00014_Streptococcus, Otu00050_Bacteria
# column names, with "": "Otu00001_Enterobacteriaceae","Otu00003_Bacteroides","Otu00012_Clostridium_XI","Otu00014_Streptococcus", "Otu00050_Bacteria"
x<-otus.clinical
means<-aggregate(cbind(Otu00001_Enterobacteriaceae,Otu00003_Bacteroides,Otu00012_Clostridium_XI,Otu00014_Streptococcus, Otu00050_Bacteria) ~ POS_NEG,data = x,FUN=function(x) mean =mean(x) )
means2<-melt(means, id.vars = "POS_NEG", measure.vars = c("Otu00001_Enterobacteriaceae","Otu00003_Bacteroides","Otu00012_Clostridium_XI","Otu00014_Streptococcus", "Otu00050_Bacteria"))
otu.means<-means2$value
sds<-aggregate(cbind(Otu00001_Enterobacteriaceae,Otu00003_Bacteroides,Otu00012_Clostridium_XI,Otu00014_Streptococcus, Otu00050_Bacteria) ~ POS_NEG,data = x,FUN=function(x) sd =sd(x) )
sds2<-melt(sds, id.vars = "POS_NEG", measure.vars = c("Otu00001_Enterobacteriaceae","Otu00003_Bacteroides","Otu00012_Clostridium_XI","Otu00014_Streptococcus", "Otu00050_Bacteria"))
otu.sds<-sds2$value
otu.highsd<-otu.means+sds2$value
otu.lowsd<-otu.means-sds2$value
medians<-aggregate(cbind(Otu00001_Enterobacteriaceae,Otu00003_Bacteroides,Otu00012_Clostridium_XI,Otu00014_Streptococcus, Otu00050_Bacteria) ~ POS_NEG,data = x,FUN=function(x) median =median(x) )
medians2<-melt(medians, id.vars = "POS_NEG", measure.vars = c("Otu00001_Enterobacteriaceae","Otu00003_Bacteroides","Otu00012_Clostridium_XI","Otu00014_Streptococcus", "Otu00050_Bacteria"))
otu.medians<-medians2$value
ses<-aggregate(cbind(Otu00001_Enterobacteriaceae,Otu00003_Bacteroides,Otu00012_Clostridium_XI,Otu00014_Streptococcus, Otu00050_Bacteria) ~ POS_NEG,data = x,FUN=function(x) se=sd(x)/sqrt(length(x)) )
ses2<-melt(ses, id.vars = "POS_NEG", measure.vars = c("Otu00001_Enterobacteriaceae","Otu00003_Bacteroides","Otu00012_Clostridium_XI","Otu00014_Streptococcus", "Otu00050_Bacteria"))
otu.ses<-ses2$value
otu.highse<-otu.means+ses2$value
otu.lowse<-otu.means-ses2$value
highq<-aggregate(cbind(Otu00001_Enterobacteriaceae,Otu00003_Bacteroides,Otu00012_Clostridium_XI,Otu00014_Streptococcus, Otu00050_Bacteria) ~ POS_NEG,data = x,FUN=function(x) highq=quantile(x, 0.75) )
highq2<-melt(ses, id.vars = "POS_NEG", measure.vars = c("Otu00001_Enterobacteriaceae","Otu00003_Bacteroides","Otu00012_Clostridium_XI","Otu00014_Streptococcus", "Otu00050_Bacteria"))
otu.highq<-otu.medians+highq2$value
lowq<-aggregate(cbind(Otu00001_Enterobacteriaceae,Otu00003_Bacteroides,Otu00012_Clostridium_XI,Otu00014_Streptococcus, Otu00050_Bacteria) ~ POS_NEG,data = x,FUN=function(x) lowq=quantile(x, 0.75) )
lowq2<-melt(ses, id.vars = "POS_NEG", measure.vars = c("Otu00001_Enterobacteriaceae","Otu00003_Bacteroides","Otu00012_Clostridium_XI","Otu00014_Streptococcus", "Otu00050_Bacteria"))
otu.lowq<-otu.medians-lowq2$value
# add all the data together:
labels<-paste(as.character(means2$POS_NEG), as.character(means2$variable))
summary<-data.frame(label=labels,
mean = otu.means,
sd = otu.sds,
se = otu.ses,
median = otu.medians,
highq = otu.highq,
lowq = otu.lowq,
highse = otu.highse,
lowse = otu.lowse,
highsd = otu.highsd,
lowsd = otu.lowsd
)
summary$sequence <- seq(1, length(summary$label))
summary$label <- reorder(summary$label, summary$sequence)
summary$otu <- means2$variable
summary$group <-means2$POS_NEG
# plot it:
labels <- summary$otu
labels <- gsub("_", ": ", labels)
labels <- gsub("000", "", labels)
summary$otu<-gsub("_", ": ", summary$otu)
summary$otu<-gsub("000", "", summary$otu)
averages <- summary$mean
ranges <- summary$se
groupnames<-as.character(unique(summary$group))
# option 1 (used as manuscript Figure 4):
dotchart(averages, labels="", xlab='relative abundance (mean + se)', pch=20, col=c("grey47", "magenta"),
groups=as.factor(summary$otu), cex=0.8, cex.lab=0.8,
xlim=c(min(averages-ranges)-1, max(averages+ranges)+1))
segments(averages-ranges,
c(17:18, 13:14, 9:10, 5:6, 1:2),
averages+ranges, c(17:18, 13:14, 9:10, 5:6, 1:2),
col=c("grey47", "magenta"), lwd=2)
legend("bottomright", groupnames, col=c("grey47", "magenta"), pch=19, cex=0.6, bg="white")
# option 2:
dotchart(averages, labels=labels, xlab='relative abundance (mean + se)', pch=20, col=c("grey47", "magenta"),
groups=summary$group, cex=0.7,
xlim=c(min(averages-ranges)-1, max(averages+ranges)+1))
segments(averages-ranges,
c(8, 1, 9, 2, 10, 3, 11, 4, 12, 5),
averages+ranges, c(8, 1, 9, 2, 10, 3, 11, 4, 12, 5),
col=c("grey47", "magenta"), lwd=2)
# note: adding the bars on your graph is a bit confusing
# you may have to play around with the ordering, since this is a grouped dotplot
# future ways of making this step simpler are appreciated!
###
#---
###
# for severity:
# used the same steps above to get 'lefse.otus' that were different with severity
otus.df<-sub.all[, which(colnames(sub.all) %in% lefse.otus)]
otus.df$sampleID<-rownames(otus.df)
groups<-sub.all[, c("seqID", "severeidsa")]
otus.severe<-merge(groups, otus.df, by.x="seqID", by.y="sampleID")
otus.severe<-otus.severe[,2:9]
# create your stats summary dataframe:
# column names: Otu00004_Escherichia, Otu00015_Lachnospiraceae, Otu00017_Bifidobacterium, Otu00030_Blautia, Otu00044_Flavonifractor, Otu00050_Bacteria, Otu00097_Lactococcus
# column names, with "": "Otu00004_Escherichia", "Otu00015_Lachnospiraceae", "Otu00017_Bifidobacterium", "Otu00030_Blautia", "Otu00044_Flavonifractor", "Otu00050_Bacteria", "Otu00097_Lactococcus"
x<-otus.severe
means<-aggregate(cbind(Otu00004_Escherichia, Otu00015_Lachnospiraceae, Otu00017_Bifidobacterium, Otu00030_Blautia, Otu00044_Flavonifractor, Otu00050_Bacteria, Otu00097_Lactococcus) ~ severeidsa,data = x,FUN=function(x) mean =mean(x) )
means2<-melt(means, id.vars = "severeidsa", measure.vars = c("Otu00004_Escherichia", "Otu00015_Lachnospiraceae", "Otu00017_Bifidobacterium", "Otu00030_Blautia", "Otu00044_Flavonifractor", "Otu00050_Bacteria", "Otu00097_Lactococcus"))
otu.means<-means2$value
sds<-aggregate(cbind(Otu00004_Escherichia, Otu00015_Lachnospiraceae, Otu00017_Bifidobacterium, Otu00030_Blautia, Otu00044_Flavonifractor, Otu00050_Bacteria, Otu00097_Lactococcus) ~ severeidsa,data = x,FUN=function(x) sd =sd(x) )
sds2<-melt(sds, id.vars = "severeidsa", measure.vars = c("Otu00004_Escherichia", "Otu00015_Lachnospiraceae", "Otu00017_Bifidobacterium", "Otu00030_Blautia", "Otu00044_Flavonifractor", "Otu00050_Bacteria", "Otu00097_Lactococcus"))
otu.sds<-sds2$value
otu.highsd<-otu.means+sds2$value
otu.lowsd<-otu.means-sds2$value
medians<-aggregate(cbind(Otu00004_Escherichia, Otu00015_Lachnospiraceae, Otu00017_Bifidobacterium, Otu00030_Blautia, Otu00044_Flavonifractor, Otu00050_Bacteria, Otu00097_Lactococcus) ~ severeidsa,data = x,FUN=function(x) median =median(x) )
medians2<-melt(medians, id.vars = "severeidsa", measure.vars = c("Otu00004_Escherichia", "Otu00015_Lachnospiraceae", "Otu00017_Bifidobacterium", "Otu00030_Blautia", "Otu00044_Flavonifractor", "Otu00050_Bacteria", "Otu00097_Lactococcus"))
otu.medians<-medians2$value
ses<-aggregate(cbind(Otu00004_Escherichia, Otu00015_Lachnospiraceae, Otu00017_Bifidobacterium, Otu00030_Blautia, Otu00044_Flavonifractor, Otu00050_Bacteria, Otu00097_Lactococcus) ~ severeidsa,data = x,FUN=function(x) se=sd(x)/sqrt(length(x)) )
ses2<-melt(ses, id.vars = "severeidsa", measure.vars = c("Otu00004_Escherichia", "Otu00015_Lachnospiraceae", "Otu00017_Bifidobacterium", "Otu00030_Blautia", "Otu00044_Flavonifractor", "Otu00050_Bacteria", "Otu00097_Lactococcus"))
otu.ses<-ses2$value
otu.highse<-otu.means+ses2$value
otu.lowse<-otu.means-ses2$value
highq<-aggregate(cbind(Otu00004_Escherichia, Otu00015_Lachnospiraceae, Otu00017_Bifidobacterium, Otu00030_Blautia, Otu00044_Flavonifractor, Otu00050_Bacteria, Otu00097_Lactococcus) ~ severeidsa,data = x,FUN=function(x) highq=quantile(x, 0.75) )
highq2<-melt(ses, id.vars = "severeidsa", measure.vars = c("Otu00004_Escherichia", "Otu00015_Lachnospiraceae", "Otu00017_Bifidobacterium", "Otu00030_Blautia", "Otu00044_Flavonifractor", "Otu00050_Bacteria", "Otu00097_Lactococcus"))
otu.highq<-otu.medians+highq2$value
lowq<-aggregate(cbind(Otu00004_Escherichia, Otu00015_Lachnospiraceae, Otu00017_Bifidobacterium, Otu00030_Blautia, Otu00044_Flavonifractor, Otu00050_Bacteria, Otu00097_Lactococcus) ~ severeidsa,data = x,FUN=function(x) lowq=quantile(x, 0.75) )
lowq2<-melt(ses, id.vars = "severeidsa", measure.vars = c("Otu00004_Escherichia", "Otu00015_Lachnospiraceae", "Otu00017_Bifidobacterium", "Otu00030_Blautia", "Otu00044_Flavonifractor", "Otu00050_Bacteria", "Otu00097_Lactococcus"))
otu.lowq<-otu.medians-lowq2$value
# add all the data together:
labels<-paste(as.character(means2$severeidsa), as.character(means2$variable))
summary<-data.frame(label=labels,
mean = otu.means,
sd = otu.sds,
se = otu.ses,
median = otu.medians,
highq = otu.highq,
lowq = otu.lowq,
highse = otu.highse,
lowse = otu.lowse,
highsd = otu.highsd,
lowsd = otu.lowsd
)
summary$sequence <- seq(1, length(summary$label))
summary$label <- reorder(summary$label, summary$sequence)
summary$otu <- means2$variable
summary$group <-means2$severeidsa
# plot it:
labels <- summary$otu
labels <- gsub("_", ": ", labels)
labels <- gsub("000", "", labels)
summary$otu<-gsub("_", ": ", summary$otu)
summary$otu<-gsub("000", "", summary$otu)
averages <- summary$mean
ranges <- summary$se
groupnames<-as.character(unique(summary$group))
# option 1 (used as manuscript Figure 4):
# since the first OTU is significantly more abundant than others, let's split these up into 2 quadrants to emphasize the others...
summary1<-summary[1:2, ]
labels1 <- summary1$otu
labels1 <- gsub("_", ": ", labels)
labels1 <- gsub("000", "", labels)
summary1$otu<-gsub("_", ": ", summary1$otu)
summary1$otu<-gsub("000", "", summary1$otu)
averages1 <- summary1$mean
ranges1 <- summary1$se
groupnames1<-as.character(unique(summary1$group))
summary2<-summary[3:14, ]
labels2 <- summary2$otu
labels2 <- gsub("_", ": ", labels)
labels2 <- gsub("000", "", labels)
summary2$otu<-gsub("_", ": ", summary2$otu)
summary2$otu<-gsub("000", "", summary2$otu)
averages2 <- summary2$mean
ranges2 <- summary2$se
groupnames2<-as.character(unique(summary2$group))
# summary 1:
par(mfrow=c(2,1))
dotchart(averages1, labels="", xlab='relative abundance (mean + se)', pch=20, col=c("blue", "red"),
groups=as.factor(summary1$otu), cex=0.8, cex.lab=0.8,
xlim=c(min(averages1-ranges1)-1, max(averages1+ranges1)+1))
segments(averages1-ranges1,
c(1:2),
averages1+ranges1, c(1:2),
col=c("blue", "red"), lwd=2)
legend("bottomright", c("not severe", "severe"), col=c("blue", "red"), pch=19, cex=0.6, bg="white")
# summary 2:
dotchart(averages2, labels="", xlab='relative abundance (mean + se)', pch=20, col=c("blue", "red"),
groups=as.factor(summary2$otu), cex=0.8, cex.lab=0.8,
xlim=c(min(averages2-ranges2)-1, max(averages2+ranges2)+1))
segments(averages2-ranges2,
c(21:22, 17:18, 13:14, 9:10, 5:6, 1:2),
averages2+ranges2, c(21:22, 17:18, 13:14, 9:10, 5:6, 1:2),
col=c("blue", "red"), lwd=2)
legend("bottomright", c("not severe", "severe"), col=c("blue", "red"), pch=19, cex=0.6, bg="white")
### this still doesn't give even graphs...just go with summary, and chop it down:
# summary (all):
dotchart(averages, labels="", xlab='relative abundance (mean + se)', pch=20, col=c("blue", "red"),
groups=as.factor(summary$otu), cex=0.8, cex.lab=0.8,
xlim=c(0, max(averages+ranges)+1))
segments(averages-ranges,
c(25:26, 21:22, 17:18, 13:14, 9:10, 5:6, 1:2),
averages+ranges, c(25:26, 21:22, 17:18, 13:14, 9:10, 5:6, 1:2),
col=c("blue", "red"), lwd=2)
legend("bottomright", c("not severe", "severe"), col=c("blue", "red"), pch=19, cex=0.6, bg="white")
# option 2:
dotchart(averages, labels=labels, xlab='relative abundance (mean + se)', pch=20, col=c("blue", "red"),
groups=summary$group, cex=0.7,
xlim=c(min(averages-ranges)-1, max(averages+ranges)+1))
segments(averages-ranges,
c(8, 1, 9, 2, 10, 3, 11, 4, 12, 5),
averages+ranges, c(8, 1, 9, 2, 10, 3, 11, 4, 12, 5),
col=c("blue", "red"), lwd=2)
# note: adding the bars on your graph is a bit confusing
# you may have to play around with the ordering, since this is a grouped dotplot
# future ways of making this step simpler are appreciated!
|
# Data manipulations
library(dplyr)
library(nycflights13)
#pada contoh ini akan digunakan contoh data flight
ls_data <- list(flights,airlines, airports, planes, weather)
#cek data
head(ls_data[1])
head(ls_data[2])
head(ls_data[3])
head(ls_data[4])
head(ls_data[5])
#mengambil data dari list
#-----to data frame
df <- as.data.frame(ls_data[1])
#-----to array
arr <- df$tailnum #just an example
#-----to obj
obj <- arr[12] #just an example
## beberapa fungsi yang sering digunakan pada dplyr
#-----select (Memilih kolom)
df <- select(flights, year, dep_time)
head(df)
df <- select(flights, -year)
head(df)
#-----filter
df <- filter(flights, hour>=5)
head(df)
#-----arange
df <- arrange(flights, hour)
#-----group by & summirise
df <- flights %>% group_by(origin) %>% summarise(flighthour = sum(hour),
avdist = mean(distance))
#-----distict
distinct(flights, origin)
#-----mutate
df <- flights %>% group_by(origin) %>% summarise(flighthour = mean(hour),
avdist = mean(distance)) %>% mutate(av_a = avdist/flighthour)
#----50 Example DPLYR--------
#from
URL <- "https://www.listendata.com/2016/08/dplyr-tutorial.html#"
mydata = read.csv("https://raw.githubusercontent.com/deepanshu88/data/master/sampledata.csv")
## 1. Random from df
sample_n(mydata, 2)
## 2. random in percent
sample_frac(mydata, 0.1) #10%
## 3. remove duplicate row
x <- distinct(mydata)
## 4. remove duplicate par
x <- distinct(mydata, Index, .keep_all = T)
## 5. Select
select(mydata, Index, State)
## 6. Select several column
select(mydata, Index:Y2005)
## 7.Eliminating some column
x <- select(mydata, -Index)
## 8. Select variable start string "Y"
x <- select(mydata, -starts_with("Y"))
## 9. Select variable contain string "I"
x <- select(mydata, contains("I"))
## 10. Reorder
x <- select(mydata, State, everything())
## 11. Rename variable
x1 <- rename(df, origin.flight=origin)
names(df)
names(x1)
## 12. filter column
x1 <- filter(df, origin=="JFK")
head(x1)
x1 <- filter(df, origin %in% c("JFK","LGA"))
head(x1)
x1 <- filter(flights, month %in% c(3,5,7) & carrier %in% c("UA", "AA","B6","DL")) #using and
view(x1)
x1 <- filter(flights, month %in% c(3) | carrier %in% c("UA", "AA")) #using OR
View(x1)
x1<-filter(flights, !month %in% c(1,12,5)) #exclude some variable
distinct(x1, month)
x1 <- filter(flights, grepl("AA",tailnum)) #filter tailnum contains AA word
View(x1) | /ds2_dplyr.R | no_license | MuhammadBhakti/R_ds_notes | R | false | false | 2,471 | r | # Data manipulations
library(dplyr)
library(nycflights13)
#pada contoh ini akan digunakan contoh data flight
ls_data <- list(flights,airlines, airports, planes, weather)
#cek data
head(ls_data[1])
head(ls_data[2])
head(ls_data[3])
head(ls_data[4])
head(ls_data[5])
#mengambil data dari list
#-----to data frame
df <- as.data.frame(ls_data[1])
#-----to array
arr <- df$tailnum #just an example
#-----to obj
obj <- arr[12] #just an example
## beberapa fungsi yang sering digunakan pada dplyr
#-----select (Memilih kolom)
df <- select(flights, year, dep_time)
head(df)
df <- select(flights, -year)
head(df)
#-----filter
df <- filter(flights, hour>=5)
head(df)
#-----arange
df <- arrange(flights, hour)
#-----group by & summirise
df <- flights %>% group_by(origin) %>% summarise(flighthour = sum(hour),
avdist = mean(distance))
#-----distict
distinct(flights, origin)
#-----mutate
df <- flights %>% group_by(origin) %>% summarise(flighthour = mean(hour),
avdist = mean(distance)) %>% mutate(av_a = avdist/flighthour)
#----50 Example DPLYR--------
#from
URL <- "https://www.listendata.com/2016/08/dplyr-tutorial.html#"
mydata = read.csv("https://raw.githubusercontent.com/deepanshu88/data/master/sampledata.csv")
## 1. Random from df
sample_n(mydata, 2)
## 2. random in percent
sample_frac(mydata, 0.1) #10%
## 3. remove duplicate row
x <- distinct(mydata)
## 4. remove duplicate par
x <- distinct(mydata, Index, .keep_all = T)
## 5. Select
select(mydata, Index, State)
## 6. Select several column
select(mydata, Index:Y2005)
## 7.Eliminating some column
x <- select(mydata, -Index)
## 8. Select variable start string "Y"
x <- select(mydata, -starts_with("Y"))
## 9. Select variable contain string "I"
x <- select(mydata, contains("I"))
## 10. Reorder
x <- select(mydata, State, everything())
## 11. Rename variable
x1 <- rename(df, origin.flight=origin)
names(df)
names(x1)
## 12. filter column
x1 <- filter(df, origin=="JFK")
head(x1)
x1 <- filter(df, origin %in% c("JFK","LGA"))
head(x1)
x1 <- filter(flights, month %in% c(3,5,7) & carrier %in% c("UA", "AA","B6","DL")) #using and
view(x1)
x1 <- filter(flights, month %in% c(3) | carrier %in% c("UA", "AA")) #using OR
View(x1)
x1<-filter(flights, !month %in% c(1,12,5)) #exclude some variable
distinct(x1, month)
x1 <- filter(flights, grepl("AA",tailnum)) #filter tailnum contains AA word
View(x1) |
#Kyle Schneider
# Chapter 16 & 17 Homework
# DATA 320
# 10/23/2019
# Exercises 16.11 #1, 3
#1) A famous athlete has an impressive career, winning 70% of her 500 career matches. However,
#this athlete gets criticized because in important events, such as the Olympics, she has a
#losing record of 8 wins and 9 losses. Perform a Chi squared test to determine if this losing
#record can be simply due to chance as opposed to not performing well under pressure.
wins = 500*.7
wins
losses = 500*.3
two_by_two <- data.frame(success = c("Win", "Loss"),
olympicCount = c(8,9),
nonOlympics = c(wins, losses))
two_by_two
chisq_test <- two_by_two %>%
select(-success) %>%
chisq.test()
chisq_test
# theres an 8% chances that it was obtained by chance alone, so she does preform worse during
# important matches
#3) Compute the odds ratio of “losing under pressure” along with a confidence interval.
odds_important <- (two_by_two$olympicCount[1] / sum(two_by_two$olympicCount)) /
(two_by_two$olympicCount[2] / sum(two_by_two$olympicCount))
odds_important
odds_normal <- (two_by_two$nonOlympics[1] / sum(two_by_two$nonOlympics)) /
(two_by_two$nonOlympics[2] / sum(two_by_two$nonOlympics))
odds_normal
log_or <- log( odds_important / odds_normal )
se <- two_by_two %>%
select(-success) %>%
summarize(se = sqrt(sum(1/olympicCount) + sum(1/nonOlympics))) %>%
pull(se)
ci <- log_or + c(-1,1) * qnorm(0.975) * se
ci
exp(ci)
# Exercises 17.3 #1-5
library(dslabs)
data(heights)
x <- heights %>% filter(sex == "Male") %>%
pull(height)
#1)Mathematically speaking, x is our population. Using the urn analogy, we have an
#urn with the values of x in it. What are the average and standard deviation of our
#population?
mean(x)
sd(x)
#2)Call the population average computed above μ and the standard deviation σ. Now
#take a sample of size 50, with replacement, and construct an estimate for μ and σ.
N = 50
X = sample(x, N, replace = TRUE)
mean(X)
sd(X)
#3)What does the theory tell us about the sample average ¯X and how it is related to μ?
#B. It is a random variable with expected value μ and standard error σ/√N.
#4)So how is this useful? We are going to use an oversimplified yet illustrative example.
#Suppose we want to know the average height of our male students, but we only get to
#measure 50 of the 708. We will use ¯X as our estimate. We know from the answer to exercise
#3 that the standard estimate of our error ¯X−μ is σ/√N. We want to compute this, but we
#don’t know σ. Based on what is described in this section, show your estimate of σ.
sd(X)
#5)Now that we have an estimate of σ, let’s call our estimate s. Construct a 95% confidence
#interval for μ.
mean(X) + c(-1, 1)*qnorm(1 - 0.05/2) * sd(X) / sqrt(N)
| /16-17_HW.R | no_license | Kyleschneiderx/R-for-data-science | R | false | false | 2,829 | r | #Kyle Schneider
# Chapter 16 & 17 Homework
# DATA 320
# 10/23/2019
# Exercises 16.11 #1, 3
#1) A famous athlete has an impressive career, winning 70% of her 500 career matches. However,
#this athlete gets criticized because in important events, such as the Olympics, she has a
#losing record of 8 wins and 9 losses. Perform a Chi squared test to determine if this losing
#record can be simply due to chance as opposed to not performing well under pressure.
wins = 500*.7
wins
losses = 500*.3
two_by_two <- data.frame(success = c("Win", "Loss"),
olympicCount = c(8,9),
nonOlympics = c(wins, losses))
two_by_two
chisq_test <- two_by_two %>%
select(-success) %>%
chisq.test()
chisq_test
# theres an 8% chances that it was obtained by chance alone, so she does preform worse during
# important matches
#3) Compute the odds ratio of “losing under pressure” along with a confidence interval.
odds_important <- (two_by_two$olympicCount[1] / sum(two_by_two$olympicCount)) /
(two_by_two$olympicCount[2] / sum(two_by_two$olympicCount))
odds_important
odds_normal <- (two_by_two$nonOlympics[1] / sum(two_by_two$nonOlympics)) /
(two_by_two$nonOlympics[2] / sum(two_by_two$nonOlympics))
odds_normal
log_or <- log( odds_important / odds_normal )
se <- two_by_two %>%
select(-success) %>%
summarize(se = sqrt(sum(1/olympicCount) + sum(1/nonOlympics))) %>%
pull(se)
ci <- log_or + c(-1,1) * qnorm(0.975) * se
ci
exp(ci)
# Exercises 17.3 #1-5
library(dslabs)
data(heights)
x <- heights %>% filter(sex == "Male") %>%
pull(height)
#1)Mathematically speaking, x is our population. Using the urn analogy, we have an
#urn with the values of x in it. What are the average and standard deviation of our
#population?
mean(x)
sd(x)
#2)Call the population average computed above μ and the standard deviation σ. Now
#take a sample of size 50, with replacement, and construct an estimate for μ and σ.
N = 50
X = sample(x, N, replace = TRUE)
mean(X)
sd(X)
#3)What does the theory tell us about the sample average ¯X and how it is related to μ?
#B. It is a random variable with expected value μ and standard error σ/√N.
#4)So how is this useful? We are going to use an oversimplified yet illustrative example.
#Suppose we want to know the average height of our male students, but we only get to
#measure 50 of the 708. We will use ¯X as our estimate. We know from the answer to exercise
#3 that the standard estimate of our error ¯X−μ is σ/√N. We want to compute this, but we
#don’t know σ. Based on what is described in this section, show your estimate of σ.
sd(X)
#5)Now that we have an estimate of σ, let’s call our estimate s. Construct a 95% confidence
#interval for μ.
mean(X) + c(-1, 1)*qnorm(1 - 0.05/2) * sd(X) / sqrt(N)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/process_contributions.R
\name{process_contributions}
\alias{process_contributions}
\title{Process contributions data}
\usage{
process_contributions(contributions)
}
\arguments{
\item{contributions}{A dataframe containing Members' contribution data}
}
\value{
A dataframe containing contributions attributed to single members, including the number of words per contribution
}
\description{
\code{process_contributions} Processes Members' contributions data
}
| /man/process_contributions.Rd | permissive | eliseuberoi/clparlysearch | R | false | true | 536 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/process_contributions.R
\name{process_contributions}
\alias{process_contributions}
\title{Process contributions data}
\usage{
process_contributions(contributions)
}
\arguments{
\item{contributions}{A dataframe containing Members' contribution data}
}
\value{
A dataframe containing contributions attributed to single members, including the number of words per contribution
}
\description{
\code{process_contributions} Processes Members' contributions data
}
|
library(tidyverse)
library(fs)
library(ComplexHeatmap)
# Common definitions ------------------------------------------------------
EXPERIMENT_NAMES <- c(
"pnet_original" = "original setup",
"pnet_deterministic" = "deterministic inputs",
"pnet_shuffled" = "shuffled labels",
"mskimpact_nsclc_original_biased" = "lung",
"mskimpact_bc_original_biased" = "breast",
"mskimpact_cc_original_biased" = "colorectal",
"mskimpact_pc_original_biased" = "prostate",
"mskimpact_nsclc_original_corrected" = "lung",
"mskimpact_bc_original_corrected" = "breast",
"mskimpact_cc_original_corrected" = "colorectal",
"mskimpact_pc_original_corrected" = "prostate"
)
# for MSK-IMPACT from https://wesandersonpalettes.tumblr.com/
# (The deeper you go, the weirder life gets.)
EXPERIMENT_COLORS <- c(
"pnet_original" = "gray50",
"pnet_deterministic" = "#3182bd",
"pnet_shuffled" = "#e69f00",
"mskimpact_nsclc_original" = "#c5a495",
"mskimpact_bc_original" = "#7b6da8",
"mskimpact_cc_original" = "#f3d28f",
"mskimpact_pc_original" = "#235135"
)
ORIGINAL_SEED_COLOR <- "red"
# ggplot functions --------------------------------------------------------
BASE_TEXT_SIZE_MM = 1.76 # mm, corresponds to 5 pt, use in geom_text()
BASE_TEXT_SIZE_PT = 5 # pt, use in theme()
BASE_LINEWIDTH = 0.25 # pt
BASE_BOXPLOT_SIZE = 0.5
#' Common theme for figures in the publication.
#'
#' This theme bases upon `theme_bw()` and ensures
#' - common line widths of `BASE_LINEWIDTH`
#' - common text sizes of `BASE_TEXT_SIZE_PT`
#' - a uniform plot margin of 1 mm
#' - a medium strip text, an empty strip background, and
#' 1 mm padding between strip text and panel
#'
#' @param rotate_x_labels If `TRUE`, rotate x-axis tick labels by 90°.
#' @param ... Other parameters passed to `theme_bw()`.
#'
#' @return A theme object.
theme_pub <- function(rotate_x_labels = FALSE, ...){
res <-
theme_bw(...) +
theme(
line = element_line(linewidth = BASE_LINEWIDTH),
axis.text = element_text(color = "black", size = BASE_TEXT_SIZE_PT),
axis.title = element_text(color = "black", size = BASE_TEXT_SIZE_PT),
legend.background = element_blank(),
legend.text = element_text(color = "black", size = BASE_TEXT_SIZE_PT),
legend.title = element_text(size = BASE_TEXT_SIZE_PT),
panel.border = element_rect(linewidth = BASE_LINEWIDTH * 2),
plot.margin = unit(c(1, 1, 1, 1), "mm"),
strip.background = element_blank(),
strip.text = element_text(
color = "black",
size = BASE_TEXT_SIZE_PT
),
strip.text.x = element_text(margin = margin(b = 1, unit = "mm")),
strip.text.y = element_text(margin = margin(l = 1, unit = "mm")),
plot.title = element_text(size = BASE_TEXT_SIZE_PT, face = "bold")
)
if (rotate_x_labels)
res <-
res +
theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.5))
res
}
#' Save a publication-quality plot.
#'
#' @param filename Filename, will be saved in subfolder `plots/final`.
#' @param plot Plot object.
#' @param width Width in cm.
#' @param height Height in cm.
#' @param type Type of image file.
#' @param dpi Resolution.
#' @param ... Other parameters passed to the plotting function.
ggsave_publication <- function(filename,
plot = NULL,
width = 4,
height = 4,
type = "pdf",
dpi = 1200,
...) {
filename <- str_glue("plots/{filename}.{type}")
filename %>%
path_dir() %>%
dir_create()
if (is.null(plot)) {
# if last_plot() is available, use ggsave()
ggsave(
filename,
dpi = dpi,
units = "cm",
limitsize = FALSE,
width = width,
height = height,
...
)
} else {
# for non-ggplot objects, use the base R functions directly;
# only png and pdf backends are supported
if (type == "png") {
png(
filename,
res = dpi,
units = "cm",
width = width,
height = height,
...
)
} else if (type == "pdf") {
pdf(
filename,
width = width / 2.54, # dimensions for pdf() must be inches
height = height / 2.54,
...
)
} else {
stop("Type", type, "cannot be saved.")
}
print(plot)
dev.off()
}
}
# shorthand for adding a facet title as secondary axis
# use like scale_x_continuous (sec.axis = facet_title("..."))
facet_title <- function(name) {
dup_axis(name = name, breaks = NULL, labels = NULL)
}
# Heatmap appearance ------------------------------------------------------
ht_opt(
simple_anno_size = unit(1.5, "mm"),
COLUMN_ANNO_PADDING = unit(1, "pt"),
DENDROGRAM_PADDING = unit(1, "pt"),
HEATMAP_LEGEND_PADDING = unit(1, "mm"),
ROW_ANNO_PADDING = unit(1, "pt"),
TITLE_PADDING = unit(2, "mm"),
heatmap_row_title_gp = gpar(fontsize = BASE_TEXT_SIZE_PT),
heatmap_row_names_gp = gpar(fontsize = BASE_TEXT_SIZE_PT),
heatmap_column_title_gp = gpar(fontsize = BASE_TEXT_SIZE_PT),
heatmap_column_names_gp = gpar(fontsize = BASE_TEXT_SIZE_PT),
legend_labels_gp = gpar(fontsize = BASE_TEXT_SIZE_PT),
legend_title_gp = gpar(fontsize = BASE_TEXT_SIZE_PT),
legend_border = FALSE
)
| /scripts/styling.R | permissive | csbg/pnet_robustness | R | false | false | 5,340 | r | library(tidyverse)
library(fs)
library(ComplexHeatmap)
# Common definitions ------------------------------------------------------
EXPERIMENT_NAMES <- c(
"pnet_original" = "original setup",
"pnet_deterministic" = "deterministic inputs",
"pnet_shuffled" = "shuffled labels",
"mskimpact_nsclc_original_biased" = "lung",
"mskimpact_bc_original_biased" = "breast",
"mskimpact_cc_original_biased" = "colorectal",
"mskimpact_pc_original_biased" = "prostate",
"mskimpact_nsclc_original_corrected" = "lung",
"mskimpact_bc_original_corrected" = "breast",
"mskimpact_cc_original_corrected" = "colorectal",
"mskimpact_pc_original_corrected" = "prostate"
)
# for MSK-IMPACT from https://wesandersonpalettes.tumblr.com/
# (The deeper you go, the weirder life gets.)
EXPERIMENT_COLORS <- c(
"pnet_original" = "gray50",
"pnet_deterministic" = "#3182bd",
"pnet_shuffled" = "#e69f00",
"mskimpact_nsclc_original" = "#c5a495",
"mskimpact_bc_original" = "#7b6da8",
"mskimpact_cc_original" = "#f3d28f",
"mskimpact_pc_original" = "#235135"
)
ORIGINAL_SEED_COLOR <- "red"
# ggplot functions --------------------------------------------------------
BASE_TEXT_SIZE_MM = 1.76 # mm, corresponds to 5 pt, use in geom_text()
BASE_TEXT_SIZE_PT = 5 # pt, use in theme()
BASE_LINEWIDTH = 0.25 # pt
BASE_BOXPLOT_SIZE = 0.5
#' Common theme for figures in the publication.
#'
#' This theme bases upon `theme_bw()` and ensures
#' - common line widths of `BASE_LINEWIDTH`
#' - common text sizes of `BASE_TEXT_SIZE_PT`
#' - a uniform plot margin of 1 mm
#' - a medium strip text, an empty strip background, and
#' 1 mm padding between strip text and panel
#'
#' @param rotate_x_labels If `TRUE`, rotate x-axis tick labels by 90°.
#' @param ... Other parameters passed to `theme_bw()`.
#'
#' @return A theme object.
theme_pub <- function(rotate_x_labels = FALSE, ...){
res <-
theme_bw(...) +
theme(
line = element_line(linewidth = BASE_LINEWIDTH),
axis.text = element_text(color = "black", size = BASE_TEXT_SIZE_PT),
axis.title = element_text(color = "black", size = BASE_TEXT_SIZE_PT),
legend.background = element_blank(),
legend.text = element_text(color = "black", size = BASE_TEXT_SIZE_PT),
legend.title = element_text(size = BASE_TEXT_SIZE_PT),
panel.border = element_rect(linewidth = BASE_LINEWIDTH * 2),
plot.margin = unit(c(1, 1, 1, 1), "mm"),
strip.background = element_blank(),
strip.text = element_text(
color = "black",
size = BASE_TEXT_SIZE_PT
),
strip.text.x = element_text(margin = margin(b = 1, unit = "mm")),
strip.text.y = element_text(margin = margin(l = 1, unit = "mm")),
plot.title = element_text(size = BASE_TEXT_SIZE_PT, face = "bold")
)
if (rotate_x_labels)
res <-
res +
theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.5))
res
}
#' Save a publication-quality plot.
#'
#' @param filename Filename, will be saved in subfolder `plots/final`.
#' @param plot Plot object.
#' @param width Width in cm.
#' @param height Height in cm.
#' @param type Type of image file.
#' @param dpi Resolution.
#' @param ... Other parameters passed to the plotting function.
ggsave_publication <- function(filename,
plot = NULL,
width = 4,
height = 4,
type = "pdf",
dpi = 1200,
...) {
filename <- str_glue("plots/{filename}.{type}")
filename %>%
path_dir() %>%
dir_create()
if (is.null(plot)) {
# if last_plot() is available, use ggsave()
ggsave(
filename,
dpi = dpi,
units = "cm",
limitsize = FALSE,
width = width,
height = height,
...
)
} else {
# for non-ggplot objects, use the base R functions directly;
# only png and pdf backends are supported
if (type == "png") {
png(
filename,
res = dpi,
units = "cm",
width = width,
height = height,
...
)
} else if (type == "pdf") {
pdf(
filename,
width = width / 2.54, # dimensions for pdf() must be inches
height = height / 2.54,
...
)
} else {
stop("Type", type, "cannot be saved.")
}
print(plot)
dev.off()
}
}
# shorthand for adding a facet title as secondary axis
# use like scale_x_continuous (sec.axis = facet_title("..."))
facet_title <- function(name) {
dup_axis(name = name, breaks = NULL, labels = NULL)
}
# Heatmap appearance ------------------------------------------------------
ht_opt(
simple_anno_size = unit(1.5, "mm"),
COLUMN_ANNO_PADDING = unit(1, "pt"),
DENDROGRAM_PADDING = unit(1, "pt"),
HEATMAP_LEGEND_PADDING = unit(1, "mm"),
ROW_ANNO_PADDING = unit(1, "pt"),
TITLE_PADDING = unit(2, "mm"),
heatmap_row_title_gp = gpar(fontsize = BASE_TEXT_SIZE_PT),
heatmap_row_names_gp = gpar(fontsize = BASE_TEXT_SIZE_PT),
heatmap_column_title_gp = gpar(fontsize = BASE_TEXT_SIZE_PT),
heatmap_column_names_gp = gpar(fontsize = BASE_TEXT_SIZE_PT),
legend_labels_gp = gpar(fontsize = BASE_TEXT_SIZE_PT),
legend_title_gp = gpar(fontsize = BASE_TEXT_SIZE_PT),
legend_border = FALSE
)
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/AgainStart.R
\docType{data}
\name{AGGRESSION}
\alias{AGGRESSION}
\title{TV and Behavior}
\format{A data frame with 16 observations on the following two variables:
\itemize{
\item \code{violence} (an integer vector)
\item \code{noviolence} (an integer vector)
}}
\source{
Gibbons, J. D. (1977) \emph{Nonparametric Methods for Quantitavie Analysis}. American Science Press.
}
\usage{
AGGRESSION
}
\description{
Data regarding the aggressive behavior in relation to exposure to violent television programs.
}
\details{
This is data regarding aggressive behavior in relation to exposure to violent television programs from Gibbons (1997) with the following exposition: \dQuote{\ldots a group of children are matched as well as possible as regards home environment, genetic factors, intelligence, parental attitudes, and so forth, in an effort to minimize factors other than TV that might influence a tendency for aggressive behavior. In each of the resulting 16 pairs, one child is randomly selected to view the most violent shows on TV, while the other watches cartoons, situation comedies, and the like. The children are then subjected to a series of tests designed to produce an ordinal measure of their aggression factors.} (pages 143-144)
}
\examples{
AL <- reshape(AGGRESSION, varying = c("violence", "noviolence"),
v.names = "aggression", direction = "long")
ggplot(data = AL, aes(x = factor(time), y = aggression, fill = factor(time))) +
geom_boxplot() + labs(x = "") + scale_x_discrete(breaks = c(1, 2),
labels = c("Violence", "No Violence")) + guides(fill = FALSE) + scale_fill_brewer()
rm(AL)
with(data = AGGRESSION,
wilcox.test(violence, noviolence, paired = TRUE, alternative = "greater"))
}
\references{
Ugarte, M. D., Militino, A. F., and Arnholt, A. T. 2015. \emph{Probability and Statistics with R}, Second Edition. Chapman & Hall / CRC.
}
\keyword{datasets}
| /man/AGGRESSION.Rd | no_license | darokun/PASWR2 | R | false | false | 1,962 | rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/AgainStart.R
\docType{data}
\name{AGGRESSION}
\alias{AGGRESSION}
\title{TV and Behavior}
\format{A data frame with 16 observations on the following two variables:
\itemize{
\item \code{violence} (an integer vector)
\item \code{noviolence} (an integer vector)
}}
\source{
Gibbons, J. D. (1977) \emph{Nonparametric Methods for Quantitavie Analysis}. American Science Press.
}
\usage{
AGGRESSION
}
\description{
Data regarding the aggressive behavior in relation to exposure to violent television programs.
}
\details{
This is data regarding aggressive behavior in relation to exposure to violent television programs from Gibbons (1997) with the following exposition: \dQuote{\ldots a group of children are matched as well as possible as regards home environment, genetic factors, intelligence, parental attitudes, and so forth, in an effort to minimize factors other than TV that might influence a tendency for aggressive behavior. In each of the resulting 16 pairs, one child is randomly selected to view the most violent shows on TV, while the other watches cartoons, situation comedies, and the like. The children are then subjected to a series of tests designed to produce an ordinal measure of their aggression factors.} (pages 143-144)
}
\examples{
AL <- reshape(AGGRESSION, varying = c("violence", "noviolence"),
v.names = "aggression", direction = "long")
ggplot(data = AL, aes(x = factor(time), y = aggression, fill = factor(time))) +
geom_boxplot() + labs(x = "") + scale_x_discrete(breaks = c(1, 2),
labels = c("Violence", "No Violence")) + guides(fill = FALSE) + scale_fill_brewer()
rm(AL)
with(data = AGGRESSION,
wilcox.test(violence, noviolence, paired = TRUE, alternative = "greater"))
}
\references{
Ugarte, M. D., Militino, A. F., and Arnholt, A. T. 2015. \emph{Probability and Statistics with R}, Second Edition. Chapman & Hall / CRC.
}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/analyse_crps_news.R
\name{analyse_crps_news}
\alias{analyse_crps_news}
\title{Analyses a dataframe with news body for occurences of the each of the currencySymbols}
\usage{
analyse_crps_news(databodynews, currencySymbols)
}
\arguments{
\item{databodynews}{dataframe with news body and their timestamp}
\item{currencySymbols}{a vector of characters representing the cryptocurrencys Symbols to be analysed}
}
\value{
Dataframe with a boolean column per cryptosymbol, representing if the cryptocurrency was cited in the news article
}
\description{
Analyses a dataframe with news body for occurences of the each of the currencySymbols
}
| /CryptoShiny/man/analyse_crps_news.Rd | permissive | fernandopf/ThinkRProject | R | false | true | 713 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/analyse_crps_news.R
\name{analyse_crps_news}
\alias{analyse_crps_news}
\title{Analyses a dataframe with news body for occurences of the each of the currencySymbols}
\usage{
analyse_crps_news(databodynews, currencySymbols)
}
\arguments{
\item{databodynews}{dataframe with news body and their timestamp}
\item{currencySymbols}{a vector of characters representing the cryptocurrencys Symbols to be analysed}
}
\value{
Dataframe with a boolean column per cryptosymbol, representing if the cryptocurrency was cited in the news article
}
\description{
Analyses a dataframe with news body for occurences of the each of the currencySymbols
}
|
## Setup and Run FBBmodel
##syntax: nohup env SPECIES=1 LEAF=31 CHAIN=1 R --vanilla < FBB_main.R > log1-31-1 &
startTime = proc.time()[3] # Start timer
#****************** USER PARAMETERS ***********************************
species.id <- as.numeric(system("echo $SPECIES",intern=TRUE))
leaf <- as.numeric(system("echo $LEAF",intern=TRUE))
chain <- as.numeric(system("echo $CHAIN",intern=TRUE))
niter = 100000 # number of MCMC iterations to compute
nchains = 3 # number of chains that will be used
progressEvery = 10000 # How frequent to print progress
## File parameters
filePath = "/home/wolz1/Biomath/" # "/Users/wolzy4u/Desktop/Biomath/"
datFile = "Kdata_Project.csv" # "Biomath_Processed_Data.csv"
saveDirDat = "FBB_soyface_output_dat/" # "FBB_output_dat/"
setupFile = "FBB_setup.R"
funcFile = "FBB_functions.R"
mcmcFile = "FBB_mcmc.R"
datFile = paste(filePath, datFile, sep="")
saveDirDat = paste(filePath, saveDirDat, sep="")
setupFile = paste(filePath, setupFile,sep="")
funcFile = paste(filePath, funcFile, sep="")
mcmcFile = paste(filePath, mcmcFile, sep="")
dir.create(saveDirDat,showWarnings=FALSE,recursive=TRUE)
## Toggles and parameters
compute.pA = TRUE # Whether or not to compute pA
compute.pgs = TRUE # Whether or not to compute pgs
compute.DA = TRUE # Whether or not to compute DA
compute.Dgs = TRUE # Whether or not to compute Dgs
track.An.pred = TRUE # Whether or not to track An.pred (TRUE = yes)
track.gs.pred = TRUE # Whether or not to track gs.pred (TRUE = yes)
sample.Vcmax = TRUE # Whether or not to sample Vcmax (TRUE = yes)
prior.mu.Vcmax = 4.61
prior.sd.Vcmax = 0.32
sample.Jmax = TRUE # Whether or not to sample Jmax (TRUE = yes)
prior.mu.Jmax = 5.16
prior.sd.Jmax = 0.32
sample.R = TRUE # Whether or not to sample R (TRUE = yes)
prior.mu.R = -0.69
prior.sd.R = 1
R.lb = 0 # lower bound on R
R.ub = 10 # upper bound on R
sample.Gstar = TRUE # Whether or not to sample Gstar (TRUE = yes)
prior.mu.Gstar = 3.75
prior.sd.Gstar = 1
sample.alpha = TRUE # Whether or not to sample alpha (TRUE = yes)
prior.mu.alpha = -0.15
prior.sd.alpha = 0.1
alpha.lb = 0 # lower bound on alpha
alpha.ub = 1 # upper bound on alpha
sample.m = TRUE # Whether or not to sample alpha (TRUE = yes)
prior.mu.m = 10
prior.sd.m = 10
sample.g0 = TRUE # Whether or not to sample alpha (TRUE = yes)
prior.mu.g0 = 0
prior.sd.g0 = 0.1
sample.tauF = TRUE # Whether or not to sample tauF (TRUE = yes)
prior.s1.tauF = 0.1
prior.s2.tauF = 0.1
tauF.lb = 0 # lower bound on tauF
tauF.ub = 100 # upper bound on tauF
sample.tauBB = TRUE # Whether or not to sample tauBB (TRUE = yes)
prior.s1.tauBB = 0.1
prior.s2.tauBB = 0.00001
tauBB.lb = 0 # lower bound on tauBB
tauBB.ub = 100 # upper bound on tauBB
## Initial conditions
Vcmax.ic = c(88.7,200,30)
Jmax.ic = c(144.8,300,50)
R.ic = c(0.6,2,0.01)
Gstar.ic = c(29.8,60,5)
alpha.ic = c(0.85,0.99,0.3)
m.ic = c(12.8,20,1)
g0.ic = c(0,1,-1)
tauF.ic = c(0.4,1,0.01)
tauBB.ic = c(0.033,0.05,0.001)
## Jump SD
jumpSD.Vcmax = 5
jumpSD.Jmax = 10
jumpSD.R = 1
jumpSD.Gstar = 5
jumpSD.alpha = 0.1
jumpSD.m = 3
jumpSD.g0 = 0.05
jumpSD.tauF = 0.01
jumpSD.tauBB = 0.01
## Constants
O = 210 # [02] in ppt (millimol/mol)
Kc = 275 # Michaelis-Menton constant of RuBisCO for C02
Ko = 400 # Michaelis-Menton constant of RuBisCO for O
phi = 0.85 # maximum dark-adapted quantum yield of PSII
beta = 0.5 # fraction of absorbed quanta that reasches PSII
theta = 0.7 # empirical curvature factor
#**********************************************************************
## FOR EACH LEAF - CHAIN
runName = paste(species.id, leaf, chain, sep="-")
## Run setup file
source(setupFile)
## Load functions (in separate file for convenience)
source(funcFile)
## Initial Conditions
Vcmax = Vcmax.ic[chain] # max velocity of carboxylation (micromol/m^2/s)
Jmax = Jmax.ic[chain] # max rate of electron transport (micromol/m^2/s)
R = R.ic[chain] # day respiration (micromol/m^2/s)
Gstar = Gstar.ic[chain] # CO2 compensation pt in the absense of dark resp
alpha = alpha.ic[chain] # absorbance of leaves (3.1)
m = m.ic[chain] # slope of Ball-Berry model
g0 = g0.ic[chain] # y-intercept of Ball-Berry model
tauF = tauF.ic[chain] # variance of Farquhar model
tauBB = tauBB.ic[chain] # variance of Ball-Berry model
## Run MCMC
source(mcmcFile,print.eval=TRUE)
save.image(paste(saveDirDat, runName, ".Rdata", sep=""))
#**********************************************************************
elapsedTime = proc.time()[3] - startTime
print(elapsedTime)
efficiency = elapsedTime/niter
print(paste('Seconds/Iteration:', efficiency))
| /modules/photosynthesis/code/FBB_main.R | permissive | PecanProject/pecan | R | false | false | 4,756 | r | ## Setup and Run FBBmodel
##syntax: nohup env SPECIES=1 LEAF=31 CHAIN=1 R --vanilla < FBB_main.R > log1-31-1 &
startTime = proc.time()[3] # Start timer
#****************** USER PARAMETERS ***********************************
species.id <- as.numeric(system("echo $SPECIES",intern=TRUE))
leaf <- as.numeric(system("echo $LEAF",intern=TRUE))
chain <- as.numeric(system("echo $CHAIN",intern=TRUE))
niter = 100000 # number of MCMC iterations to compute
nchains = 3 # number of chains that will be used
progressEvery = 10000 # How frequent to print progress
## File parameters
filePath = "/home/wolz1/Biomath/" # "/Users/wolzy4u/Desktop/Biomath/"
datFile = "Kdata_Project.csv" # "Biomath_Processed_Data.csv"
saveDirDat = "FBB_soyface_output_dat/" # "FBB_output_dat/"
setupFile = "FBB_setup.R"
funcFile = "FBB_functions.R"
mcmcFile = "FBB_mcmc.R"
datFile = paste(filePath, datFile, sep="")
saveDirDat = paste(filePath, saveDirDat, sep="")
setupFile = paste(filePath, setupFile,sep="")
funcFile = paste(filePath, funcFile, sep="")
mcmcFile = paste(filePath, mcmcFile, sep="")
dir.create(saveDirDat,showWarnings=FALSE,recursive=TRUE)
## Toggles and parameters
compute.pA = TRUE # Whether or not to compute pA
compute.pgs = TRUE # Whether or not to compute pgs
compute.DA = TRUE # Whether or not to compute DA
compute.Dgs = TRUE # Whether or not to compute Dgs
track.An.pred = TRUE # Whether or not to track An.pred (TRUE = yes)
track.gs.pred = TRUE # Whether or not to track gs.pred (TRUE = yes)
sample.Vcmax = TRUE # Whether or not to sample Vcmax (TRUE = yes)
prior.mu.Vcmax = 4.61
prior.sd.Vcmax = 0.32
sample.Jmax = TRUE # Whether or not to sample Jmax (TRUE = yes)
prior.mu.Jmax = 5.16
prior.sd.Jmax = 0.32
sample.R = TRUE # Whether or not to sample R (TRUE = yes)
prior.mu.R = -0.69
prior.sd.R = 1
R.lb = 0 # lower bound on R
R.ub = 10 # upper bound on R
sample.Gstar = TRUE # Whether or not to sample Gstar (TRUE = yes)
prior.mu.Gstar = 3.75
prior.sd.Gstar = 1
sample.alpha = TRUE # Whether or not to sample alpha (TRUE = yes)
prior.mu.alpha = -0.15
prior.sd.alpha = 0.1
alpha.lb = 0 # lower bound on alpha
alpha.ub = 1 # upper bound on alpha
sample.m = TRUE # Whether or not to sample alpha (TRUE = yes)
prior.mu.m = 10
prior.sd.m = 10
sample.g0 = TRUE # Whether or not to sample alpha (TRUE = yes)
prior.mu.g0 = 0
prior.sd.g0 = 0.1
sample.tauF = TRUE # Whether or not to sample tauF (TRUE = yes)
prior.s1.tauF = 0.1
prior.s2.tauF = 0.1
tauF.lb = 0 # lower bound on tauF
tauF.ub = 100 # upper bound on tauF
sample.tauBB = TRUE # Whether or not to sample tauBB (TRUE = yes)
prior.s1.tauBB = 0.1
prior.s2.tauBB = 0.00001
tauBB.lb = 0 # lower bound on tauBB
tauBB.ub = 100 # upper bound on tauBB
## Initial conditions
Vcmax.ic = c(88.7,200,30)
Jmax.ic = c(144.8,300,50)
R.ic = c(0.6,2,0.01)
Gstar.ic = c(29.8,60,5)
alpha.ic = c(0.85,0.99,0.3)
m.ic = c(12.8,20,1)
g0.ic = c(0,1,-1)
tauF.ic = c(0.4,1,0.01)
tauBB.ic = c(0.033,0.05,0.001)
## Jump SD
jumpSD.Vcmax = 5
jumpSD.Jmax = 10
jumpSD.R = 1
jumpSD.Gstar = 5
jumpSD.alpha = 0.1
jumpSD.m = 3
jumpSD.g0 = 0.05
jumpSD.tauF = 0.01
jumpSD.tauBB = 0.01
## Constants
O = 210 # [02] in ppt (millimol/mol)
Kc = 275 # Michaelis-Menton constant of RuBisCO for C02
Ko = 400 # Michaelis-Menton constant of RuBisCO for O
phi = 0.85 # maximum dark-adapted quantum yield of PSII
beta = 0.5 # fraction of absorbed quanta that reasches PSII
theta = 0.7 # empirical curvature factor
#**********************************************************************
## FOR EACH LEAF - CHAIN
runName = paste(species.id, leaf, chain, sep="-")
## Run setup file
source(setupFile)
## Load functions (in separate file for convenience)
source(funcFile)
## Initial Conditions
Vcmax = Vcmax.ic[chain] # max velocity of carboxylation (micromol/m^2/s)
Jmax = Jmax.ic[chain] # max rate of electron transport (micromol/m^2/s)
R = R.ic[chain] # day respiration (micromol/m^2/s)
Gstar = Gstar.ic[chain] # CO2 compensation pt in the absense of dark resp
alpha = alpha.ic[chain] # absorbance of leaves (3.1)
m = m.ic[chain] # slope of Ball-Berry model
g0 = g0.ic[chain] # y-intercept of Ball-Berry model
tauF = tauF.ic[chain] # variance of Farquhar model
tauBB = tauBB.ic[chain] # variance of Ball-Berry model
## Run MCMC
source(mcmcFile,print.eval=TRUE)
save.image(paste(saveDirDat, runName, ".Rdata", sep=""))
#**********************************************************************
elapsedTime = proc.time()[3] - startTime
print(elapsedTime)
efficiency = elapsedTime/niter
print(paste('Seconds/Iteration:', efficiency))
|
#
# Please see:
# http://www.cnblogs.com/getong/archive/2013/04/01/2993139.html
#
png(filename="redis-alloc-benchmarks.png",width=1400, height=900)
Sys.setlocale(, "en_US.UTF-8")
oldpar <- par(lwd=4)
AlphazeroRedis <- read.table("AlphazeroRedis.alloc.tmp")
GaryburdRedigo <- read.table("GaryburdRedigo.alloc.tmp")
GosexyRedis <- read.table("GosexyRedis.alloc.tmp")
Simonz05Godis <- read.table("Simonz05Godis.alloc.tmp")
FishRedis <- read.table("FishRedis.alloc.tmp")
plot(AlphazeroRedis$V1, type="o", ylim = c(0, 400), col = "black", axes=FALSE, ann=FALSE)
text(2, AlphazeroRedis$V1[2], cex=2, pos=3, col="black", "AlphazeroRedis")
axis(1, at=1:9, lab=c("Ping","Set","Get","Incr", "LPush", "LRange10", "LRange100", "LRange1000", ""))
axis(2, las=0, at=40*0: 400)
box()
title(xlab="Operation", col = "black")
title(ylab="alloc/op", col = "black")
title(main = "Go drivers for redis")
lines(GaryburdRedigo, col = "red")
text(6, GaryburdRedigo$V1[6], cex=2, pos=1, col="red", "GaryburdRedigo")
lines(GosexyRedis, col = "blue")
text(2, GosexyRedis$V1[2], pos=1,col="blue", cex=2, "GosexyRedis")
lines(Simonz05Godis, col = "yellow")
text(4, Simonz05Godis$V1[4],pos=3, col="yellow",cex=2, "Simonz05Godis")
lines(FishRedis, col = "gray")
text(3, FishRedis$V1[3],pos=1,cex=2, col="gray", "FishRedis")
par(oldpar)
dev.off()
| /_benchmarks/go-redis-alloc-data.R | permissive | go-fish/redis | R | false | false | 1,317 | r | #
# Please see:
# http://www.cnblogs.com/getong/archive/2013/04/01/2993139.html
#
png(filename="redis-alloc-benchmarks.png",width=1400, height=900)
Sys.setlocale(, "en_US.UTF-8")
oldpar <- par(lwd=4)
AlphazeroRedis <- read.table("AlphazeroRedis.alloc.tmp")
GaryburdRedigo <- read.table("GaryburdRedigo.alloc.tmp")
GosexyRedis <- read.table("GosexyRedis.alloc.tmp")
Simonz05Godis <- read.table("Simonz05Godis.alloc.tmp")
FishRedis <- read.table("FishRedis.alloc.tmp")
plot(AlphazeroRedis$V1, type="o", ylim = c(0, 400), col = "black", axes=FALSE, ann=FALSE)
text(2, AlphazeroRedis$V1[2], cex=2, pos=3, col="black", "AlphazeroRedis")
axis(1, at=1:9, lab=c("Ping","Set","Get","Incr", "LPush", "LRange10", "LRange100", "LRange1000", ""))
axis(2, las=0, at=40*0: 400)
box()
title(xlab="Operation", col = "black")
title(ylab="alloc/op", col = "black")
title(main = "Go drivers for redis")
lines(GaryburdRedigo, col = "red")
text(6, GaryburdRedigo$V1[6], cex=2, pos=1, col="red", "GaryburdRedigo")
lines(GosexyRedis, col = "blue")
text(2, GosexyRedis$V1[2], pos=1,col="blue", cex=2, "GosexyRedis")
lines(Simonz05Godis, col = "yellow")
text(4, Simonz05Godis$V1[4],pos=3, col="yellow",cex=2, "Simonz05Godis")
lines(FishRedis, col = "gray")
text(3, FishRedis$V1[3],pos=1,cex=2, col="gray", "FishRedis")
par(oldpar)
dev.off()
|
predict.SemiParBIVProbit <- function(object, eq, ...){
if(missing(eq)) stop("You must provide the equation number.")
if(eq > object$l.flist) stop("The fitted model has a smaller number of equations.")
#if(object$surv == TRUE) predict.gam <- predict.SemiParBIVProbitB
if(eq==1){ ss.pred <- object$gam1
ind <- 1:object$X1.d2
}
if(eq==2){ ss.pred <- object$gam2
ind <- (object$X1.d2+1):(object$X1.d2+object$X2.d2)
}
if(eq==3){ ss.pred <- object$gam3
ind <- (object$X1.d2+object$X2.d2+1):(object$X1.d2+object$X2.d2+object$X3.d2) }
if(eq==4){ ss.pred <- object$gam4
ind <- (object$X1.d2+object$X2.d2+object$X3.d2+1):(object$X1.d2+object$X2.d2+object$X3.d2+object$X4.d2) }
if(eq==5){ ss.pred <- object$gam5
ind <- (object$X1.d2+object$X2.d2+object$X3.d2+object$X4.d2+1):(object$X1.d2+object$X2.d2+object$X3.d2+object$X4.d2+object$X5.d2) }
if(eq==6){ ss.pred <- object$gam6
ind <- (object$X1.d2+object$X2.d2+object$X3.d2+object$X4.d2+object$X5.d2+1):(object$X1.d2+object$X2.d2+object$X3.d2+object$X4.d2+object$X5.d2+object$X6.d2) }
if(eq==7){ ss.pred <- object$gam7
ind <- (object$X1.d2+object$X2.d2+object$X3.d2+object$X4.d2+object$X5.d2+object$X6.d2+1):(object$X1.d2+object$X2.d2+object$X3.d2+object$X4.d2+object$X5.d2+object$X6.d2+object$X7.d2) }
if(eq==8){ ss.pred <- object$gam8
ind <- (object$X1.d2+object$X2.d2+object$X3.d2+object$X4.d2+object$X5.d2+object$X6.d2+object$X7.d2+1):(object$X1.d2+object$X2.d2+object$X3.d2+object$X4.d2+object$X5.d2+object$X6.d2+object$X7.d2+object$X8.d2) }
ss.pred$coefficients <- object$coefficients[ind]
ss.pred$Vp <- object$Vb[ind,ind]
ss.pred$Vp.t <- object$Vb.t[ind,ind]
ss.pred$sig2 <- 1
ss.pred$scale.estimated <- FALSE
#predict.gam(ss.pred, ...)
predict(ss.pred, ...)
}
| /R/predict.SemiParBIVProbit.r | no_license | cran/SemiParBIVProbit | R | false | false | 2,139 | r | predict.SemiParBIVProbit <- function(object, eq, ...){
if(missing(eq)) stop("You must provide the equation number.")
if(eq > object$l.flist) stop("The fitted model has a smaller number of equations.")
#if(object$surv == TRUE) predict.gam <- predict.SemiParBIVProbitB
if(eq==1){ ss.pred <- object$gam1
ind <- 1:object$X1.d2
}
if(eq==2){ ss.pred <- object$gam2
ind <- (object$X1.d2+1):(object$X1.d2+object$X2.d2)
}
if(eq==3){ ss.pred <- object$gam3
ind <- (object$X1.d2+object$X2.d2+1):(object$X1.d2+object$X2.d2+object$X3.d2) }
if(eq==4){ ss.pred <- object$gam4
ind <- (object$X1.d2+object$X2.d2+object$X3.d2+1):(object$X1.d2+object$X2.d2+object$X3.d2+object$X4.d2) }
if(eq==5){ ss.pred <- object$gam5
ind <- (object$X1.d2+object$X2.d2+object$X3.d2+object$X4.d2+1):(object$X1.d2+object$X2.d2+object$X3.d2+object$X4.d2+object$X5.d2) }
if(eq==6){ ss.pred <- object$gam6
ind <- (object$X1.d2+object$X2.d2+object$X3.d2+object$X4.d2+object$X5.d2+1):(object$X1.d2+object$X2.d2+object$X3.d2+object$X4.d2+object$X5.d2+object$X6.d2) }
if(eq==7){ ss.pred <- object$gam7
ind <- (object$X1.d2+object$X2.d2+object$X3.d2+object$X4.d2+object$X5.d2+object$X6.d2+1):(object$X1.d2+object$X2.d2+object$X3.d2+object$X4.d2+object$X5.d2+object$X6.d2+object$X7.d2) }
if(eq==8){ ss.pred <- object$gam8
ind <- (object$X1.d2+object$X2.d2+object$X3.d2+object$X4.d2+object$X5.d2+object$X6.d2+object$X7.d2+1):(object$X1.d2+object$X2.d2+object$X3.d2+object$X4.d2+object$X5.d2+object$X6.d2+object$X7.d2+object$X8.d2) }
ss.pred$coefficients <- object$coefficients[ind]
ss.pred$Vp <- object$Vb[ind,ind]
ss.pred$Vp.t <- object$Vb.t[ind,ind]
ss.pred$sig2 <- 1
ss.pred$scale.estimated <- FALSE
#predict.gam(ss.pred, ...)
predict(ss.pred, ...)
}
|
x = rnorm(500) | /hard-coded/chart-types/histogram/basic-histogram/init.r | no_license | VukDukic/documentation | R | false | false | 14 | r | x = rnorm(500) |
#Understanding noise
pf = read.csv("pseudo_facebook.tsv", sep = '\t')
str(pf)
library(ggplot2)
install.packages("dplyr")
library(dplyr)
#another method to do the above summarisation
age_groups <- group_by(pf, age)
pf.fc_by_age <- pf %>% group_by(age) %>%
summarise(friend_count_mean = mean(friend_count),
friend_count_median = median(friend_count),
n = n()) %>%
arrange(age)
head(pf.fc_by_age)
#Understanding noise: Age to Age Months
p1 <- ggplot(aes(age, friend_count_mean), data = subset(pf.fc_by_age, age< 71))+
geom_line()+
geom_smooth()
head(pf.fc_by_age, 10)
#making new variable 'age_with_months'
pf$age_with_months <- NULL
pf$age_with_months <- pf$age + (12- pf$dob_month)/12
str(pf$age_with_months)
#Age with month means
pf.fc_by_age_months <- pf %>% group_by(age_with_months)%>%
summarise(friend_count_mean = mean(friend_count),
friend_count_median = median(friend_count), n = n())%>%
arrange(age_with_months)
head(pf.fc_by_age_months, 10)
# plot between friend_count_mean and the new variable, age_with_months
p2 <- ggplot(aes(age_with_months, friend_count_mean), data = subset(pf.fc_by_age_months, age_with_months< 71))+
geom_line()+
geom_smooth()+
ylim(0, 500)
jpeg(file = "friend_count_meanvsage_with_months.jpeg")
library(gridExtra)
grid.arrange(p1, p2, ncol = 1)
str(pf.fc_by_age)
subset(pf.fc_by_age, age<71)
help('geom_smooth')
| /UnderstandingNoise.R | no_license | nitish1008/Scatter_plots | R | false | false | 1,412 | r | #Understanding noise
pf = read.csv("pseudo_facebook.tsv", sep = '\t')
str(pf)
library(ggplot2)
install.packages("dplyr")
library(dplyr)
#another method to do the above summarisation
age_groups <- group_by(pf, age)
pf.fc_by_age <- pf %>% group_by(age) %>%
summarise(friend_count_mean = mean(friend_count),
friend_count_median = median(friend_count),
n = n()) %>%
arrange(age)
head(pf.fc_by_age)
#Understanding noise: Age to Age Months
p1 <- ggplot(aes(age, friend_count_mean), data = subset(pf.fc_by_age, age< 71))+
geom_line()+
geom_smooth()
head(pf.fc_by_age, 10)
#making new variable 'age_with_months'
pf$age_with_months <- NULL
pf$age_with_months <- pf$age + (12- pf$dob_month)/12
str(pf$age_with_months)
#Age with month means
pf.fc_by_age_months <- pf %>% group_by(age_with_months)%>%
summarise(friend_count_mean = mean(friend_count),
friend_count_median = median(friend_count), n = n())%>%
arrange(age_with_months)
head(pf.fc_by_age_months, 10)
# plot between friend_count_mean and the new variable, age_with_months
p2 <- ggplot(aes(age_with_months, friend_count_mean), data = subset(pf.fc_by_age_months, age_with_months< 71))+
geom_line()+
geom_smooth()+
ylim(0, 500)
jpeg(file = "friend_count_meanvsage_with_months.jpeg")
library(gridExtra)
grid.arrange(p1, p2, ncol = 1)
str(pf.fc_by_age)
subset(pf.fc_by_age, age<71)
help('geom_smooth')
|
#'Picks Fields of Focus
#'
#'Takes the output of \code{getField()} and squashes down rows that are not of interest to prepare the
#'tibble for use as the input to \code{mkNCDB()}.
#'@param d Tibble produced by \code{getFields()}.
#'@param picks Short names of fields you wish to keep.
#'@return Input dataframe with rows not wanted collapsed to strings. This output feeds into \code{mkNCDB()}.
#'@note Inspired by a function of the same name in \pkg{SEERaBomb}.
#'@examples
#' library(NCDBR)
#' d=getFields()
#' pickFields(d)
#'@name pickFields
#'@export
#'
#'
#'
pickFields<-function(d,picks=c("casenum","facID","fac","facLoc","agedx","sex","race",
"ins","inc","educ","urban","crow","charlson","seqnum",
"CoC","yrdx","histo3","stage","d2t","radiatn","d2c",
"chemo","hct","surv","alive") ) {
# head(d,3)
nms=NULL #kill check warning that arises 3 line downs
(nBytesP1=sum(d$width)+1)
ncols=dim(d)[1]
d=d%>%filter(nms%in%picks)
# rownames(d)<-d$nms
# d=d[picks,]
(N=length(picks)) # number of columns in our version of interest
# if("surv" %in% d$nms) d["surv","type"]="double" # do this in mkNCDB
# if("crow" %in% d$nms) d["crow","type"]="double"
df=d[1,,drop=F] # assume casenum is always in picks
for (i in 2:N) {
# print(i)
if (d$start[i]==(up<-d$start[i-1]+d$width[i-1]) )
df=rbind(df,d[i,]) else {
df=rbind(df,data.frame(start=up,width=(d$start[i]-up),names=" ",nms=" ",type="string",labs="",Levs=NA))
df=rbind(df,d[i,])
}
}
if ((up<-d$start[i]+d$width[i])<nBytesP1)
df=rbind(df,data.frame(start=up,width=(nBytesP1-up),names=" ",nms=" ",type="string",labs="",Levs=NA))
df
}
| /R/pickFields.R | no_license | ZhuoXI-ai/NCDBR | R | false | false | 1,764 | r | #'Picks Fields of Focus
#'
#'Takes the output of \code{getField()} and squashes down rows that are not of interest to prepare the
#'tibble for use as the input to \code{mkNCDB()}.
#'@param d Tibble produced by \code{getFields()}.
#'@param picks Short names of fields you wish to keep.
#'@return Input dataframe with rows not wanted collapsed to strings. This output feeds into \code{mkNCDB()}.
#'@note Inspired by a function of the same name in \pkg{SEERaBomb}.
#'@examples
#' library(NCDBR)
#' d=getFields()
#' pickFields(d)
#'@name pickFields
#'@export
#'
#'
#'
pickFields<-function(d,picks=c("casenum","facID","fac","facLoc","agedx","sex","race",
"ins","inc","educ","urban","crow","charlson","seqnum",
"CoC","yrdx","histo3","stage","d2t","radiatn","d2c",
"chemo","hct","surv","alive") ) {
# head(d,3)
nms=NULL #kill check warning that arises 3 line downs
(nBytesP1=sum(d$width)+1)
ncols=dim(d)[1]
d=d%>%filter(nms%in%picks)
# rownames(d)<-d$nms
# d=d[picks,]
(N=length(picks)) # number of columns in our version of interest
# if("surv" %in% d$nms) d["surv","type"]="double" # do this in mkNCDB
# if("crow" %in% d$nms) d["crow","type"]="double"
df=d[1,,drop=F] # assume casenum is always in picks
for (i in 2:N) {
# print(i)
if (d$start[i]==(up<-d$start[i-1]+d$width[i-1]) )
df=rbind(df,d[i,]) else {
df=rbind(df,data.frame(start=up,width=(d$start[i]-up),names=" ",nms=" ",type="string",labs="",Levs=NA))
df=rbind(df,d[i,])
}
}
if ((up<-d$start[i]+d$width[i])<nBytesP1)
df=rbind(df,data.frame(start=up,width=(nBytesP1-up),names=" ",nms=" ",type="string",labs="",Levs=NA))
df
}
|
#### Assignment 4 ####
# 1. [4 Points] The BayesFactor package contains a function regressionBF. Use regressionBF on the dTransform data -- use the threshold values (S, M, L, and so on) to predict RE (refractive error). Look at the Bayes Factor output, what does it seem to tell you? The help shows an example of how the BF for each model can be tested against the full model (all thresholds). Put this line in the script with a comment that notes the best model. How does this model compare to what we saw with the stepwise model selection we did in previous lectures?
dFull <- read.csv('https://raw.githubusercontent.com/hashtagcpt/biostats2/master/full_data.csv')
make_dTransform <- function(dFull) {
# strip thresh values for data frame and log transform
t1<-log10(dFull$thr1)
t2<-log10(dFull$thr2)
t3<-log10(dFull$thr3)
t4<-log10(dFull$thr4)
t5<-log10(dFull$thr5)
t6<-log10(dFull$thr6)
# factor will change a numeric variable to a factor which is useful for creating categorical variables
dFull$subject <- factor(dFull$subject)
# assign RE variable
RE <- dFull$RE
subject <- dFull$subject
# make a data.frame
dTransform <- data.frame(subject,RE,t1,t2,t3,t4,t5,t6)
colnames(dTransform) <- c('subject','RE','A','L','M','Sneg','Spos','S')
return(dTransform)
}
dTransform <- make_dTransform(dFull)
library(BayesFactor)
regressionBF(RE ~ A + L +M + Sneg + Spos + S, data = dTransform)
output = regressionBF(RE ~ A + L + M + Sneg + Spos + S, data = dTransform)
#Bayes Factor value more than 1 indicates that this factor has a higher probability than the null hypothesis in playing a factor for RE.
output/output[63]
#2. [3 Points] A colleague is going to run an experiment where they are going to compute a correlation test. Beforehand, they ask you how many subjects they need to run. Based on previous work, they believe the r for their experiment could be between .2 and .6, their funders want a *Type II error* probability of no more than 0.4, and they will use a conventional alpha of .05. Calculate what the minimum and the maximum number of subjects they need to run for their experiment to be adequately powered, given their funder's constraints.
install.packages('pwr')
library(pwr)
pwr.r.test(r = 0.2, sig.level = 0.05, power = 1-0.4)
pwr.r.test(r = 0.6, sig.level = 0.05, power = 1-0.4)
# 3. [3 Points]
# This function will help you create some simulated psychometric data...
datasim <- function(contrasts, pcorrect, ntrials) {
for (tmp in 1:length(contrasts)) {
contrast <- rep(contrasts[tmp], ntrials)
correct <- rbinom(ntrials, 1, pcorrect[tmp])
if (tmp == 1) {
data <- data.frame(contrast, correct)
} else {
data <- rbind(data, data.frame(contrast, correct))
}
}
return(data)
}
# Create some data with contrast levels for the variable "contrasts" 0.01 to 0.1 in 10 steps. Create a vector that goes from 0 to 1 for "pcorrect". Set ntrials equal to 20. Fit and plot the resulting psychometric function. What is 50% threshold level for this resulting function?
contrasts <-c(0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.1)
pcorrect <- c(0, 1)
library(tidyverse)
library(psyphy)
#Create a vector that goes from 0 to 1 for "pcorrect"
contrast <- seq(0.01, 0.1, length.out = 10)
pcorrect <- seq(0.0, 1.0,length.out = 10)
# Create data
dat.contast <- datasim(contrast, pcorrect, ntrials=20)
dc_summary <- dat.contast %>% group_by(contrast,correct) %>% tally() %>% ungroup() %>% complete(contrast, correct, fill = list(n = 1))
dc_summary <- subset(dc_summary, correct == 1)
dc_summary$incorrect <- 20 - dc_summary$n
dc_summary$correct <- dc_summary$n
# Take the absolute value of negative contrast values
dc_summary$contrast <- abs(dc_summary$contrast)
# Fit the psychometric function
psymet_fit <- glm(formula = cbind(correct, incorrect) ~ log(contrast), family = binomial, data = dc_summary)
xseq <- seq(0.01, 0.1, len = 100)
psymet_fit.pred <- predict(psymet_fit, newdata = data.frame(contrast = xseq), type = "response", se.fit = TRUE)
psymet_pred_df <- data.frame(xseq, psymet_fit.pred$fit)
colnames(psymet_pred_df) <- c('contrast','fit')
psymet_plot <- ggplot(data = dc_summary, aes(x = contrast, y = correct / 20)) + geom_point(size = 2) + geom_line(data = psymet_pred_df, aes(x = contrast, y = fit), size = 1) + ylab('proportion correct') + theme(text = element_text(size=14), axis.text.x = element_text(size = 14), axis.text.y = element_text(size=14))
psymet_plot
# The 50% threshold is around 0.050 based on the graph | /assignment_4/Emily_Jeong_Assignment_4.R | no_license | hashtagcpt/biostats-2 | R | false | false | 4,574 | r | #### Assignment 4 ####
# 1. [4 Points] The BayesFactor package contains a function regressionBF. Use regressionBF on the dTransform data -- use the threshold values (S, M, L, and so on) to predict RE (refractive error). Look at the Bayes Factor output, what does it seem to tell you? The help shows an example of how the BF for each model can be tested against the full model (all thresholds). Put this line in the script with a comment that notes the best model. How does this model compare to what we saw with the stepwise model selection we did in previous lectures?
dFull <- read.csv('https://raw.githubusercontent.com/hashtagcpt/biostats2/master/full_data.csv')
make_dTransform <- function(dFull) {
# strip thresh values for data frame and log transform
t1<-log10(dFull$thr1)
t2<-log10(dFull$thr2)
t3<-log10(dFull$thr3)
t4<-log10(dFull$thr4)
t5<-log10(dFull$thr5)
t6<-log10(dFull$thr6)
# factor will change a numeric variable to a factor which is useful for creating categorical variables
dFull$subject <- factor(dFull$subject)
# assign RE variable
RE <- dFull$RE
subject <- dFull$subject
# make a data.frame
dTransform <- data.frame(subject,RE,t1,t2,t3,t4,t5,t6)
colnames(dTransform) <- c('subject','RE','A','L','M','Sneg','Spos','S')
return(dTransform)
}
dTransform <- make_dTransform(dFull)
library(BayesFactor)
regressionBF(RE ~ A + L +M + Sneg + Spos + S, data = dTransform)
output = regressionBF(RE ~ A + L + M + Sneg + Spos + S, data = dTransform)
#Bayes Factor value more than 1 indicates that this factor has a higher probability than the null hypothesis in playing a factor for RE.
output/output[63]
#2. [3 Points] A colleague is going to run an experiment where they are going to compute a correlation test. Beforehand, they ask you how many subjects they need to run. Based on previous work, they believe the r for their experiment could be between .2 and .6, their funders want a *Type II error* probability of no more than 0.4, and they will use a conventional alpha of .05. Calculate what the minimum and the maximum number of subjects they need to run for their experiment to be adequately powered, given their funder's constraints.
install.packages('pwr')
library(pwr)
pwr.r.test(r = 0.2, sig.level = 0.05, power = 1-0.4)
pwr.r.test(r = 0.6, sig.level = 0.05, power = 1-0.4)
# 3. [3 Points]
# This function will help you create some simulated psychometric data...
datasim <- function(contrasts, pcorrect, ntrials) {
for (tmp in 1:length(contrasts)) {
contrast <- rep(contrasts[tmp], ntrials)
correct <- rbinom(ntrials, 1, pcorrect[tmp])
if (tmp == 1) {
data <- data.frame(contrast, correct)
} else {
data <- rbind(data, data.frame(contrast, correct))
}
}
return(data)
}
# Create some data with contrast levels for the variable "contrasts" 0.01 to 0.1 in 10 steps. Create a vector that goes from 0 to 1 for "pcorrect". Set ntrials equal to 20. Fit and plot the resulting psychometric function. What is 50% threshold level for this resulting function?
contrasts <-c(0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.1)
pcorrect <- c(0, 1)
library(tidyverse)
library(psyphy)
#Create a vector that goes from 0 to 1 for "pcorrect"
contrast <- seq(0.01, 0.1, length.out = 10)
pcorrect <- seq(0.0, 1.0,length.out = 10)
# Create data
dat.contast <- datasim(contrast, pcorrect, ntrials=20)
dc_summary <- dat.contast %>% group_by(contrast,correct) %>% tally() %>% ungroup() %>% complete(contrast, correct, fill = list(n = 1))
dc_summary <- subset(dc_summary, correct == 1)
dc_summary$incorrect <- 20 - dc_summary$n
dc_summary$correct <- dc_summary$n
# Take the absolute value of negative contrast values
dc_summary$contrast <- abs(dc_summary$contrast)
# Fit the psychometric function
psymet_fit <- glm(formula = cbind(correct, incorrect) ~ log(contrast), family = binomial, data = dc_summary)
xseq <- seq(0.01, 0.1, len = 100)
psymet_fit.pred <- predict(psymet_fit, newdata = data.frame(contrast = xseq), type = "response", se.fit = TRUE)
psymet_pred_df <- data.frame(xseq, psymet_fit.pred$fit)
colnames(psymet_pred_df) <- c('contrast','fit')
psymet_plot <- ggplot(data = dc_summary, aes(x = contrast, y = correct / 20)) + geom_point(size = 2) + geom_line(data = psymet_pred_df, aes(x = contrast, y = fit), size = 1) + ylab('proportion correct') + theme(text = element_text(size=14), axis.text.x = element_text(size = 14), axis.text.y = element_text(size=14))
psymet_plot
# The 50% threshold is around 0.050 based on the graph |
#-------------------------------------------------------------------------------
# IBPNEW 2012
# Code to generate the .dat file for input into the ADMB model
# Uses Lowestoft format input files
# David Miller, some code adapted from FLR-project
# Date: 3 Oct 2012
#-------------------------------------------------------------------------------
rm(list=ls())
# FLR
# install.packages(repos="http://flr-project.org/R")
library(FLCore);library(mgcv)
library(FLAssess);
library(stockassessment)
library(FLEDA); library(splines);
library(scales); library(gplots);library(grid); library(gridExtra); library(latticeExtra)
library(sas7bdat)
library(TMB); library(FLSAM)
# Set paths to folders
Path <- "D:/Repository/Turbot/assessment runs/"
dataPath <- paste(Path,"Lowestoft files/",sep="")
outPath <- paste(Path,"trial_runs_2017/Output/",sep="")
codePath <- paste(Path,"Trial_runs_2017/",sep="")
## Source methods/functions
source(paste(codePath,"03a_setupStockIndices.r",sep=""))
run <- "lowWeightAge1"
sens <- ""
### ------------------------------------------------------------------------------------------------------
### 2. Read and process assessment input data
### ------------------------------------------------------------------------------------------------------
indices <- FLIndices(list(window(trim(indices[[1]],age=1:6),start=2004),window(indices[[2]],start=1991),indices[[3]]))
### ------------------------------------------------------------------------------------------------------
### 3. Setup data structure for SAM assessment
### ------------------------------------------------------------------------------------------------------
TUR <- stock
TUR.tun <- indices
TUR.ctrl <- FLSAM.control(TUR,TUR.tun)
TUR.ctrl@states["catch",] <- c(0:6,rep(7,3))
TUR.ctrl@cor.F <- 2
TUR.ctrl@catchabilities["SNS",ac(1:6)] <- c(0:2,rep(3,3)) + 101
TUR.ctrl@catchabilities["BTS-ISIS",ac(1:7)] <- c(0,0,1,1,rep(2,3)) + 201
TUR.ctrl@catchabilities["NL_LPUE",ac(1)] <- 0 + 301
TUR.ctrl@f.vars["catch",] <- c(0,1,2,2,3,3,3,4,4,4)
TUR.ctrl@logN.vars[] <- c(0,rep(1,9))
TUR.ctrl@obs.vars["catch",] <- c(0,1,2,2,3,3,4,4,4,4) + 101
TUR.ctrl@obs.vars["SNS",ac(1:6)] <- c(0,0,1,2,3,3) + 201
TUR.ctrl@obs.vars["BTS-ISIS",ac(1:7)] <- c(0,0,0,1,2,3,3) + 301
TUR.ctrl@obs.vars["NL_LPUE",ac(1)] <- 0 + 401
TUR.ctrl@cor.obs[] <- NA
TUR.ctrl@cor.obs["SNS",1:5] <- c(0,rep(1,4))
TUR.ctrl@cor.obs.Flag[2] <- af("AR")
TUR.ctrl@biomassTreat[4] <- 2
TUR.ctrl <- update(TUR.ctrl)
TUR.ctrl@obs.weight[] <- 1; TUR.ctrl@obs.weight[1,1] <- 1/10
### ------------------------------------------------------------------------------------------------------
### 4. Run assessment
### ------------------------------------------------------------------------------------------------------
TUR.sam <- FLSAM(TUR,TUR.tun,TUR.ctrl)
TUR.ctrl@residuals <- FALSE; TUR.sam@control@residuals <- FALSE
TUR.retro <- retro(TUR,TUR.tun,TUR.ctrl,retro=7,base.assess=TUR.sam)
### ------------------------------------------------------------------------------------------------------
### 5. Diagnostics
### ------------------------------------------------------------------------------------------------------
source(file.path(codePath,"03b_runDiagnostics.r"))
| /assessment runs/Trial_runs_2017/03s_lowWeightAge1Run.r | no_license | ices-eg/wg_IBPTur.27.4 | R | false | false | 3,679 | r | #-------------------------------------------------------------------------------
# IBPNEW 2012
# Code to generate the .dat file for input into the ADMB model
# Uses Lowestoft format input files
# David Miller, some code adapted from FLR-project
# Date: 3 Oct 2012
#-------------------------------------------------------------------------------
rm(list=ls())
# FLR
# install.packages(repos="http://flr-project.org/R")
library(FLCore);library(mgcv)
library(FLAssess);
library(stockassessment)
library(FLEDA); library(splines);
library(scales); library(gplots);library(grid); library(gridExtra); library(latticeExtra)
library(sas7bdat)
library(TMB); library(FLSAM)
# Set paths to folders
Path <- "D:/Repository/Turbot/assessment runs/"
dataPath <- paste(Path,"Lowestoft files/",sep="")
outPath <- paste(Path,"trial_runs_2017/Output/",sep="")
codePath <- paste(Path,"Trial_runs_2017/",sep="")
## Source methods/functions
source(paste(codePath,"03a_setupStockIndices.r",sep=""))
run <- "lowWeightAge1"
sens <- ""
### ------------------------------------------------------------------------------------------------------
### 2. Read and process assessment input data
### ------------------------------------------------------------------------------------------------------
indices <- FLIndices(list(window(trim(indices[[1]],age=1:6),start=2004),window(indices[[2]],start=1991),indices[[3]]))
### ------------------------------------------------------------------------------------------------------
### 3. Setup data structure for SAM assessment
### ------------------------------------------------------------------------------------------------------
TUR <- stock
TUR.tun <- indices
TUR.ctrl <- FLSAM.control(TUR,TUR.tun)
TUR.ctrl@states["catch",] <- c(0:6,rep(7,3))
TUR.ctrl@cor.F <- 2
TUR.ctrl@catchabilities["SNS",ac(1:6)] <- c(0:2,rep(3,3)) + 101
TUR.ctrl@catchabilities["BTS-ISIS",ac(1:7)] <- c(0,0,1,1,rep(2,3)) + 201
TUR.ctrl@catchabilities["NL_LPUE",ac(1)] <- 0 + 301
TUR.ctrl@f.vars["catch",] <- c(0,1,2,2,3,3,3,4,4,4)
TUR.ctrl@logN.vars[] <- c(0,rep(1,9))
TUR.ctrl@obs.vars["catch",] <- c(0,1,2,2,3,3,4,4,4,4) + 101
TUR.ctrl@obs.vars["SNS",ac(1:6)] <- c(0,0,1,2,3,3) + 201
TUR.ctrl@obs.vars["BTS-ISIS",ac(1:7)] <- c(0,0,0,1,2,3,3) + 301
TUR.ctrl@obs.vars["NL_LPUE",ac(1)] <- 0 + 401
TUR.ctrl@cor.obs[] <- NA
TUR.ctrl@cor.obs["SNS",1:5] <- c(0,rep(1,4))
TUR.ctrl@cor.obs.Flag[2] <- af("AR")
TUR.ctrl@biomassTreat[4] <- 2
TUR.ctrl <- update(TUR.ctrl)
TUR.ctrl@obs.weight[] <- 1; TUR.ctrl@obs.weight[1,1] <- 1/10
### ------------------------------------------------------------------------------------------------------
### 4. Run assessment
### ------------------------------------------------------------------------------------------------------
TUR.sam <- FLSAM(TUR,TUR.tun,TUR.ctrl)
TUR.ctrl@residuals <- FALSE; TUR.sam@control@residuals <- FALSE
TUR.retro <- retro(TUR,TUR.tun,TUR.ctrl,retro=7,base.assess=TUR.sam)
### ------------------------------------------------------------------------------------------------------
### 5. Diagnostics
### ------------------------------------------------------------------------------------------------------
source(file.path(codePath,"03b_runDiagnostics.r"))
|
## ----options, echo=FALSE, warning=FALSE, message=FALSE-------------------
options(width=120)
opts_chunk$set(comment=NA, fig.width=6, fig.height=5, size='tiny', out.width='0.6\\textwidth', fig.align='center', message=FALSE)
## ----libraries, message=FALSE, warning=FALSE, echo=FALSE-----------------
library("dplyr")
library("ggplot2")
library("gridExtra")
# library("xtable")
# library("Sleuth3")
## ----set_seed, echo=FALSE------------------------------------------------
set.seed(2)
## ----echo=FALSE----------------------------------------------------------
tomato = structure(list(Variety = structure(c(1L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L,
2L, 2L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L), .Label = c("A",
"B", "C"), class = "factor"), Density = c(10L, 10L, 10L, 20L,
20L, 20L, 30L, 30L, 30L, 40L, 40L, 40L, 10L, 10L, 10L, 20L, 20L,
20L, 30L, 30L, 30L, 40L, 40L, 40L, 10L, 10L, 10L, 20L, 20L, 20L,
30L, 30L, 30L, 40L, 40L, 40L), Yield = c(7.9, 9.2, 10.5, 11.2,
12.8, 13.3, 12.1, 12.6, 14, 9.1, 10.8, 12.5, 8.1, 8.6, 10.1,
11.5, 12.7, 13.7, 13.7, 14.4, 15.4, 11.3, 12.5, 14.5, 15.3, 16.1,
17.5, 16.6, 18.5, 19.2, 18, 20.8, 21, 17.2, 18.4, 18.9)), .Names = c("Variety",
"Density", "Yield"), class = "data.frame", row.names = c(NA,
-36L))
tomato$Variety = relevel(tomato$Variety, ref="C")
ggplot(tomato, aes(x=Density, y=Yield, color=Variety)) + geom_jitter(height=0, width=0.1) + theme_bw()
## ------------------------------------------------------------------------
sm = tomato %>%
group_by(Variety, Density) %>%
summarize(n = n(),
mean = mean(Yield),
sd = sd(Yield))
sm
## ------------------------------------------------------------------------
ggplot(sm, aes(x=Density, y=mean, col=Variety)) + geom_line() + labs(y="Mean Yield") + theme_bw()
## ----echo=TRUE-----------------------------------------------------------
tomato$Density = factor(tomato$Density)
m = lm(Yield~Variety*Density, tomato)
anova(m)
## ----echo=TRUE-----------------------------------------------------------
library(emmeans)
emmeans(m, pairwise~Variety)
## ----echo=TRUE-----------------------------------------------------------
emmeans(m, pairwise~Density)
## ----echo=TRUE-----------------------------------------------------------
emmeans(m, pairwise~Variety*Density)
## ------------------------------------------------------------------------
tomato_unbalanced = tomato[-19,]
ggplot(tomato_unbalanced, aes(x=Density, y=Yield, color=Variety)) + geom_jitter(height=0, width=0.1) + theme_bw()
## ------------------------------------------------------------------------
sm_unbalanced = tomato_unbalanced %>%
group_by(Variety, Density) %>%
summarize(n = n(),
mean = mean(Yield),
sd = sd(Yield))
sm_unbalanced
## ----echo=TRUE-----------------------------------------------------------
m = lm(Yield~Variety*Density, tomato_unbalanced)
anova(m)
## ----echo=TRUE-----------------------------------------------------------
emmeans(m, pairwise~Variety)
## ----echo=TRUE-----------------------------------------------------------
emmeans(m, pairwise~Density)
## ----echo=TRUE-----------------------------------------------------------
emmeans(m, pairwise~Variety*Density)
## ------------------------------------------------------------------------
tomato_incomplete = tomato %>%
filter(!(Variety == "B" & Density == 30)) %>%
mutate(VarietyDensity = paste0(Variety,Density))
ggplot(tomato_incomplete, aes(x=Density, y=Yield, color=Variety)) + geom_jitter(height=0, width=0.1) + theme_bw()
## ------------------------------------------------------------------------
sm_incomplete = tomato_incomplete %>%
group_by(Variety, Density) %>%
summarize(n = n(),
mean = mean(Yield),
sd = sd(Yield))
sm_incomplete
## ----echo=TRUE-----------------------------------------------------------
m <- lm(Yield ~ Variety*Density, data=tomato_incomplete)
anova(m)
## ----echo=TRUE-----------------------------------------------------------
m = lm(Yield~Variety:Density, tomato_incomplete)
anova(m)
## ----echo=TRUE-----------------------------------------------------------
# Note the -1 in order to construct the contrast
m = lm(Yield ~ VarietyDensity, tomato_incomplete)
em <- emmeans(m, ~ VarietyDensity)
contrast(em, method = list(
# A10 A20 A30 A40 B10 B20 B40 C10 C20 C30 C40
"C-B" = c( 0, 0, 0, 0, -1, -1, -1, 1, 1, 0, 1)/3,
"C-A" = c( -1, -1, -1, -1, 0, 0, 0, 1, 1, 1, 1)/4,
"B-A" = c( -1, -1, 0, -1, 1, 1, 1, 0, 0, 0, 0)/3)) %>%
confint
## ----echo=TRUE-----------------------------------------------------------
m = lm(Yield~Variety:Density, tomato_incomplete)
emmeans(m, pairwise~Variety:Density)
# We could have used the VarietyDensity model, but this looks nicer
## ----echo=FALSE----------------------------------------------------------
ggplot(tomato, aes(x=Density, y=Yield, color=Variety)) + geom_jitter(height=0, width=0.1) + theme_bw()
## ----fig.width=10,out.width='0.9\\textwidth',echo=FALSE------------------
tomato = structure(list(Variety = structure(c(1L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L,
2L, 2L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L), .Label = c("A",
"B", "C"), class = "factor"), Density = c(10L, 10L, 10L, 20L,
20L, 20L, 30L, 30L, 30L, 40L, 40L, 40L, 10L, 10L, 10L, 20L, 20L,
20L, 30L, 30L, 30L, 40L, 40L, 40L, 10L, 10L, 10L, 20L, 20L, 20L,
30L, 30L, 30L, 40L, 40L, 40L), Yield = c(7.9, 9.2, 10.5, 11.2,
12.8, 13.3, 12.1, 12.6, 14, 9.1, 10.8, 12.5, 8.1, 8.6, 10.1,
11.5, 12.7, 13.7, 13.7, 14.4, 15.4, 11.3, 12.5, 14.5, 15.3, 16.1,
17.5, 16.6, 18.5, 19.2, 18, 20.8, 21, 17.2, 18.4, 18.9)), .Names = c("Variety",
"Density", "Yield"), class = "data.frame", row.names = c(NA,
-36L))
tomato$Variety = relevel(tomato$Variety, ref="C")
g1 = ggplot(tomato, aes(x=Density, y=Yield)) + geom_jitter(height=0, width=0.1) +
stat_smooth(method="lm", formula=y~x+I(x^2), se=FALSE, color="black") +
labs(title="No variety") + theme_bw()
# Need to construct the parallel curves
lines = with(tomato,expand.grid(Density=seq(min(Density),max(Density),length=41),
Variety=levels(Variety)))
lines$Yield <- predict(lm(Yield~Density+I(Density^2)+Variety, tomato),lines)
g2 = ggplot(tomato, aes(x=Density, y=Yield, color=Variety)) + geom_jitter(height=0, width=0.1) +
geom_line(data=lines) + labs(title="Parallel curves") + theme_bw()
g3 = ggplot(tomato, aes(x=Density, y=Yield, color=Variety)) + geom_jitter(height=0, width=0.1) +
stat_smooth(method="lm", formula=y~x+I(x^2), se=FALSE) +
labs(title="Independent curves") + theme_bw()
grid.arrange(g1,g2,g3, ncol=3)
## ------------------------------------------------------------------------
summary(lm(Yield~Density+I(Density^2), tomato))
## ------------------------------------------------------------------------
summary(lm(Yield~Density+I(Density^2) + Variety, tomato))
## ------------------------------------------------------------------------
summary(lm(Yield~Density*Variety+I(Density^2)*Variety, tomato))
## ----out.width='0.6\\textwidth', echo=FALSE------------------------------
set.seed(20121204)
opar = par(mar=rep(0,4))
plot(0,0, type="n", axes=F,
xlab='', ylab='', xlim=c(0.5,6.5), ylim=c(0.5,6.5))
segments(1:7-.5, .5, 1:7-.5, 6.5)
segments(.5, 1:7-.5, 6.5, 1:7-.5)
trts = rep(paste(rep(c("A","B","C"),each=4), rep(seq(10,40,by=10), 3), sep=""),3)
text(rep(1:6, each=6), rep(1:6, 6), sample(trts))
par(opar)
## ----out.width='0.6\\textwidth', echo=FALSE------------------------------
set.seed(20121204)
opar = par(mar=rep(0,4))
plot(0,0, type="n", axes=F,
xlab='', ylab='', xlim=c(0,8.5), ylim=c(0,7.5))
segments(1:9-.5, .5, 1:9-.5, 6.5)
for (i in c(.5, 3.5, 6.5)) segments(i, 1:7-.5, i+2, 1:7-.5)
trts = paste(rep(c("A","B","C"),each=4), rep(seq(10,40,by=10), 3), sep="")
for (i in c(1, 4, 7)) text(rep(c(i,i+1), each=2), rep(1:6, 2), sample(trts))
text(c(1.5,4.5,7.5), 0, paste("Block", 1:3))
par(opar)
## ----out.width='0.4\\textwidth', fig.width=4, fig.height=3, echo=FALSE----
set.seed(20121204)
opar = par(mar=rep(0,4))
plot(0,0, type="n", axes=F,
xlab='', ylab='', xlim=c(0,5.5), ylim=c(0,4))
segments(1:6-.5, .5, 1:6-.5, 3.5)
for (i in c(.5, 3.5)) segments(i, 1:4-.5, i+2, 1:4-.5)
trts = rep(c("A","B","C"),each=2)
for (i in c(1, 4)) text(rep(c(i,i+1), each=3), rep(1:3, 2), sample(trts))
text(c(1,2,4,5), .3, paste("Block", 1:2))
text(c(1.5,4.5), 3.7, c("Blocked","Unblocked"))
par(opar)
| /courses/stat587Eng/slides/Regression/R09-Two-way_ANOVA/R09-Two-way_ANOVA.R | no_license | ChenghaoDing/jarad.github.com | R | false | false | 8,606 | r | ## ----options, echo=FALSE, warning=FALSE, message=FALSE-------------------
options(width=120)
opts_chunk$set(comment=NA, fig.width=6, fig.height=5, size='tiny', out.width='0.6\\textwidth', fig.align='center', message=FALSE)
## ----libraries, message=FALSE, warning=FALSE, echo=FALSE-----------------
library("dplyr")
library("ggplot2")
library("gridExtra")
# library("xtable")
# library("Sleuth3")
## ----set_seed, echo=FALSE------------------------------------------------
set.seed(2)
## ----echo=FALSE----------------------------------------------------------
tomato = structure(list(Variety = structure(c(1L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L,
2L, 2L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L), .Label = c("A",
"B", "C"), class = "factor"), Density = c(10L, 10L, 10L, 20L,
20L, 20L, 30L, 30L, 30L, 40L, 40L, 40L, 10L, 10L, 10L, 20L, 20L,
20L, 30L, 30L, 30L, 40L, 40L, 40L, 10L, 10L, 10L, 20L, 20L, 20L,
30L, 30L, 30L, 40L, 40L, 40L), Yield = c(7.9, 9.2, 10.5, 11.2,
12.8, 13.3, 12.1, 12.6, 14, 9.1, 10.8, 12.5, 8.1, 8.6, 10.1,
11.5, 12.7, 13.7, 13.7, 14.4, 15.4, 11.3, 12.5, 14.5, 15.3, 16.1,
17.5, 16.6, 18.5, 19.2, 18, 20.8, 21, 17.2, 18.4, 18.9)), .Names = c("Variety",
"Density", "Yield"), class = "data.frame", row.names = c(NA,
-36L))
tomato$Variety = relevel(tomato$Variety, ref="C")
ggplot(tomato, aes(x=Density, y=Yield, color=Variety)) + geom_jitter(height=0, width=0.1) + theme_bw()
## ------------------------------------------------------------------------
sm = tomato %>%
group_by(Variety, Density) %>%
summarize(n = n(),
mean = mean(Yield),
sd = sd(Yield))
sm
## ------------------------------------------------------------------------
ggplot(sm, aes(x=Density, y=mean, col=Variety)) + geom_line() + labs(y="Mean Yield") + theme_bw()
## ----echo=TRUE-----------------------------------------------------------
tomato$Density = factor(tomato$Density)
m = lm(Yield~Variety*Density, tomato)
anova(m)
## ----echo=TRUE-----------------------------------------------------------
library(emmeans)
emmeans(m, pairwise~Variety)
## ----echo=TRUE-----------------------------------------------------------
emmeans(m, pairwise~Density)
## ----echo=TRUE-----------------------------------------------------------
emmeans(m, pairwise~Variety*Density)
## ------------------------------------------------------------------------
tomato_unbalanced = tomato[-19,]
ggplot(tomato_unbalanced, aes(x=Density, y=Yield, color=Variety)) + geom_jitter(height=0, width=0.1) + theme_bw()
## ------------------------------------------------------------------------
sm_unbalanced = tomato_unbalanced %>%
group_by(Variety, Density) %>%
summarize(n = n(),
mean = mean(Yield),
sd = sd(Yield))
sm_unbalanced
## ----echo=TRUE-----------------------------------------------------------
m = lm(Yield~Variety*Density, tomato_unbalanced)
anova(m)
## ----echo=TRUE-----------------------------------------------------------
emmeans(m, pairwise~Variety)
## ----echo=TRUE-----------------------------------------------------------
emmeans(m, pairwise~Density)
## ----echo=TRUE-----------------------------------------------------------
emmeans(m, pairwise~Variety*Density)
## ------------------------------------------------------------------------
tomato_incomplete = tomato %>%
filter(!(Variety == "B" & Density == 30)) %>%
mutate(VarietyDensity = paste0(Variety,Density))
ggplot(tomato_incomplete, aes(x=Density, y=Yield, color=Variety)) + geom_jitter(height=0, width=0.1) + theme_bw()
## ------------------------------------------------------------------------
sm_incomplete = tomato_incomplete %>%
group_by(Variety, Density) %>%
summarize(n = n(),
mean = mean(Yield),
sd = sd(Yield))
sm_incomplete
## ----echo=TRUE-----------------------------------------------------------
m <- lm(Yield ~ Variety*Density, data=tomato_incomplete)
anova(m)
## ----echo=TRUE-----------------------------------------------------------
m = lm(Yield~Variety:Density, tomato_incomplete)
anova(m)
## ----echo=TRUE-----------------------------------------------------------
# Note the -1 in order to construct the contrast
m = lm(Yield ~ VarietyDensity, tomato_incomplete)
em <- emmeans(m, ~ VarietyDensity)
contrast(em, method = list(
# A10 A20 A30 A40 B10 B20 B40 C10 C20 C30 C40
"C-B" = c( 0, 0, 0, 0, -1, -1, -1, 1, 1, 0, 1)/3,
"C-A" = c( -1, -1, -1, -1, 0, 0, 0, 1, 1, 1, 1)/4,
"B-A" = c( -1, -1, 0, -1, 1, 1, 1, 0, 0, 0, 0)/3)) %>%
confint
## ----echo=TRUE-----------------------------------------------------------
m = lm(Yield~Variety:Density, tomato_incomplete)
emmeans(m, pairwise~Variety:Density)
# We could have used the VarietyDensity model, but this looks nicer
## ----echo=FALSE----------------------------------------------------------
ggplot(tomato, aes(x=Density, y=Yield, color=Variety)) + geom_jitter(height=0, width=0.1) + theme_bw()
## ----fig.width=10,out.width='0.9\\textwidth',echo=FALSE------------------
tomato = structure(list(Variety = structure(c(1L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L,
2L, 2L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L), .Label = c("A",
"B", "C"), class = "factor"), Density = c(10L, 10L, 10L, 20L,
20L, 20L, 30L, 30L, 30L, 40L, 40L, 40L, 10L, 10L, 10L, 20L, 20L,
20L, 30L, 30L, 30L, 40L, 40L, 40L, 10L, 10L, 10L, 20L, 20L, 20L,
30L, 30L, 30L, 40L, 40L, 40L), Yield = c(7.9, 9.2, 10.5, 11.2,
12.8, 13.3, 12.1, 12.6, 14, 9.1, 10.8, 12.5, 8.1, 8.6, 10.1,
11.5, 12.7, 13.7, 13.7, 14.4, 15.4, 11.3, 12.5, 14.5, 15.3, 16.1,
17.5, 16.6, 18.5, 19.2, 18, 20.8, 21, 17.2, 18.4, 18.9)), .Names = c("Variety",
"Density", "Yield"), class = "data.frame", row.names = c(NA,
-36L))
tomato$Variety = relevel(tomato$Variety, ref="C")
g1 = ggplot(tomato, aes(x=Density, y=Yield)) + geom_jitter(height=0, width=0.1) +
stat_smooth(method="lm", formula=y~x+I(x^2), se=FALSE, color="black") +
labs(title="No variety") + theme_bw()
# Need to construct the parallel curves
lines = with(tomato,expand.grid(Density=seq(min(Density),max(Density),length=41),
Variety=levels(Variety)))
lines$Yield <- predict(lm(Yield~Density+I(Density^2)+Variety, tomato),lines)
g2 = ggplot(tomato, aes(x=Density, y=Yield, color=Variety)) + geom_jitter(height=0, width=0.1) +
geom_line(data=lines) + labs(title="Parallel curves") + theme_bw()
g3 = ggplot(tomato, aes(x=Density, y=Yield, color=Variety)) + geom_jitter(height=0, width=0.1) +
stat_smooth(method="lm", formula=y~x+I(x^2), se=FALSE) +
labs(title="Independent curves") + theme_bw()
grid.arrange(g1,g2,g3, ncol=3)
## ------------------------------------------------------------------------
summary(lm(Yield~Density+I(Density^2), tomato))
## ------------------------------------------------------------------------
summary(lm(Yield~Density+I(Density^2) + Variety, tomato))
## ------------------------------------------------------------------------
summary(lm(Yield~Density*Variety+I(Density^2)*Variety, tomato))
## ----out.width='0.6\\textwidth', echo=FALSE------------------------------
set.seed(20121204)
opar = par(mar=rep(0,4))
plot(0,0, type="n", axes=F,
xlab='', ylab='', xlim=c(0.5,6.5), ylim=c(0.5,6.5))
segments(1:7-.5, .5, 1:7-.5, 6.5)
segments(.5, 1:7-.5, 6.5, 1:7-.5)
trts = rep(paste(rep(c("A","B","C"),each=4), rep(seq(10,40,by=10), 3), sep=""),3)
text(rep(1:6, each=6), rep(1:6, 6), sample(trts))
par(opar)
## ----out.width='0.6\\textwidth', echo=FALSE------------------------------
set.seed(20121204)
opar = par(mar=rep(0,4))
plot(0,0, type="n", axes=F,
xlab='', ylab='', xlim=c(0,8.5), ylim=c(0,7.5))
segments(1:9-.5, .5, 1:9-.5, 6.5)
for (i in c(.5, 3.5, 6.5)) segments(i, 1:7-.5, i+2, 1:7-.5)
trts = paste(rep(c("A","B","C"),each=4), rep(seq(10,40,by=10), 3), sep="")
for (i in c(1, 4, 7)) text(rep(c(i,i+1), each=2), rep(1:6, 2), sample(trts))
text(c(1.5,4.5,7.5), 0, paste("Block", 1:3))
par(opar)
## ----out.width='0.4\\textwidth', fig.width=4, fig.height=3, echo=FALSE----
set.seed(20121204)
opar = par(mar=rep(0,4))
plot(0,0, type="n", axes=F,
xlab='', ylab='', xlim=c(0,5.5), ylim=c(0,4))
segments(1:6-.5, .5, 1:6-.5, 3.5)
for (i in c(.5, 3.5)) segments(i, 1:4-.5, i+2, 1:4-.5)
trts = rep(c("A","B","C"),each=2)
for (i in c(1, 4)) text(rep(c(i,i+1), each=3), rep(1:3, 2), sample(trts))
text(c(1,2,4,5), .3, paste("Block", 1:2))
text(c(1.5,4.5), 3.7, c("Blocked","Unblocked"))
par(opar)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/H4PackagesOfoegbuKingsley-package.R
\docType{package}
\name{H4PackagesOfoegbuKingsley-package}
\alias{H4PackagesOfoegbuKingsley}
\alias{H4PackagesOfoegbuKingsley-package}
\title{H4PackagesOfoegbuKingsley: Creating a Package}
\description{
What the package does (one paragraph).
}
\author{
\strong{Maintainer}: Kingsley Ofoegbu \email{kd6889a@student.american.edu} (\href{https://orcid.org/YOUR-ORCID-ID}{ORCID})
}
\keyword{internal}
| /man/H4PackagesOfoegbuKingsley-package.Rd | permissive | Khayy/NewPackageHomework | R | false | true | 512 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/H4PackagesOfoegbuKingsley-package.R
\docType{package}
\name{H4PackagesOfoegbuKingsley-package}
\alias{H4PackagesOfoegbuKingsley}
\alias{H4PackagesOfoegbuKingsley-package}
\title{H4PackagesOfoegbuKingsley: Creating a Package}
\description{
What the package does (one paragraph).
}
\author{
\strong{Maintainer}: Kingsley Ofoegbu \email{kd6889a@student.american.edu} (\href{https://orcid.org/YOUR-ORCID-ID}{ORCID})
}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/waffle.R
\name{waffle}
\alias{waffle}
\title{Composition Waffle Chart.}
\usage{
waffle(data, title = NULL, subtitle = NULL, caption = NULL)
}
\arguments{
\item{data}{input data.frame}
\item{title}{input data.frame}
\item{subtitle}{date variable}
\item{caption}{date variable}
}
\value{
An object of class \code{ggplot}
}
\description{
waffle function will draw Waffle Chart for Composition analysis.
}
\examples{
plot<- waffle(data=mpg$class,title = "Title",caption="caption")
plot
}
| /man/waffle.Rd | permissive | HeeseokMoon/ggedachart | R | false | true | 566 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/waffle.R
\name{waffle}
\alias{waffle}
\title{Composition Waffle Chart.}
\usage{
waffle(data, title = NULL, subtitle = NULL, caption = NULL)
}
\arguments{
\item{data}{input data.frame}
\item{title}{input data.frame}
\item{subtitle}{date variable}
\item{caption}{date variable}
}
\value{
An object of class \code{ggplot}
}
\description{
waffle function will draw Waffle Chart for Composition analysis.
}
\examples{
plot<- waffle(data=mpg$class,title = "Title",caption="caption")
plot
}
|
#' aggregate the assays with the specific group of sample and fun.
#' @rdname mp_aggregate-methods
#' @param .data MPSE object, required
#' @param .abundance the column names of abundance, default is Abundance.
#' @param .group the column names of sample meta-data, required
#' @param fun a function to compute the summary statistics, default is sum.
#' @param keep_colData logical whether to keep the sample meta-data with \code{.group} as row names,
#' default is TRUE.
#' @param ... additional parameters, see also \code{\link[stats]{aggregate}}.
#' @return a new object with .group as column names in assays
#' @export
#' @examples
#' \dontrun{
#' data(mouse.time.mpse)
#' newmpse <- mouse.time.mpse %>%
#' mp_aggregate(.group = time)
#' newmpse
#' }
setGeneric("mp_aggregate", function(.data, .abundance, .group, fun=sum, keep_colData=TRUE, ...)standardGeneric("mp_aggregate"))
.internal_mp_aggregate <- function(.data, .abundance, .group, fun=sum, keep_colData=TRUE, ...){
.abundance <- rlang::enquo(.abundance)
.group <- rlang::enquo(.group)
if (rlang::quo_is_missing(.abundance)){
.abundance <- as.symbol("Abundance")
}
assayda <- .data %>%
mp_extract_assays(.abundance=!!.abundance, byRow=FALSE) %>%
tibble::as_tibble(rownames="Sample")
sampleda <- .data %>% mp_extract_sample()
sampleda1 <- sampleda %>% select(!!rlang::sym("Sample"), !!.group)
assayda %<>%
left_join(sampleda1, by="Sample") %>%
dplyr::select(-!!rlang::sym("Sample"))
colData(.data) <- NULL
fma <- as.formula(paste0(". ~", rlang::as_name(.group)))
assayda <- stats::aggregate(fma, data=assayda, FUN=fun, ...)
assayda %<>%
tibble::column_to_rownames(var=rlang::as_name(.group)) %>%
t()
newda <- MPSE(assays=list(Abundance=assayda))
taxatree(newda) <- taxatree(.data)
otutree(newda) <- otutree(.data)
refsequence(newda) <- refsequence(.data)
SummarizedExperiment::rowData(newda) <- SummarizedExperiment::rowData(.data)
if (keep_colData){
sampleda %<>% dplyr::rename(.Old.Sample=!!rlang::sym("Sample"), Sample=!!.group) %>%
.internal_nest_tibble(columns="Sample")
if (ncol(sampleda)>1){
colData(newda) <- sampleda %>%
tibble::column_to_rownames(var="Sample") %>%
S4Vectors::DataFrame(check.names=FALSE)
}
}
return (newda)
}
#' @rdname mp_aggregate-methods
#' @aliases mp_aggregate,MPSE
#' @exportMethod mp_aggregate
setMethod("mp_aggregate", signature(.data="MPSE"), .internal_mp_aggregate)
.internal_nest_tibble <- function(x, columns){
nm <- colnames(x)
nm %<>% stats::setNames(nm)
nm <- nm[!nm %in% columns]
nm %<>% lapply(., function(x)x)
x <- do.call(tidyr::nest, c(list(x), nm))
x <- check_single_nrow_in_nest(x, columns)
return(x)
}
check_single_nrow_in_nest <- function(da, columns){
indnm <- da %>%
apply(., 2, function(x)all(lapply(x, function(i)nrow(unique(i))==1) %>% unlist()))
indnm <- indnm[indnm] %>% names()
indnm <- indnm[!indnm %in% columns]
if (length(indnm)>0){
for( i in indnm){
da %<>% dplyr::mutate(!!rlang::sym(i):=as.vector(unlist(lapply(!!rlang::sym(i), function(x)unique(x)))))
}
}
return(da)
}
| /R/method-mp_aggregate.R | no_license | YuLab-SMU/MicrobiotaProcess | R | false | false | 3,398 | r | #' aggregate the assays with the specific group of sample and fun.
#' @rdname mp_aggregate-methods
#' @param .data MPSE object, required
#' @param .abundance the column names of abundance, default is Abundance.
#' @param .group the column names of sample meta-data, required
#' @param fun a function to compute the summary statistics, default is sum.
#' @param keep_colData logical whether to keep the sample meta-data with \code{.group} as row names,
#' default is TRUE.
#' @param ... additional parameters, see also \code{\link[stats]{aggregate}}.
#' @return a new object with .group as column names in assays
#' @export
#' @examples
#' \dontrun{
#' data(mouse.time.mpse)
#' newmpse <- mouse.time.mpse %>%
#' mp_aggregate(.group = time)
#' newmpse
#' }
setGeneric("mp_aggregate", function(.data, .abundance, .group, fun=sum, keep_colData=TRUE, ...)standardGeneric("mp_aggregate"))
.internal_mp_aggregate <- function(.data, .abundance, .group, fun=sum, keep_colData=TRUE, ...){
.abundance <- rlang::enquo(.abundance)
.group <- rlang::enquo(.group)
if (rlang::quo_is_missing(.abundance)){
.abundance <- as.symbol("Abundance")
}
assayda <- .data %>%
mp_extract_assays(.abundance=!!.abundance, byRow=FALSE) %>%
tibble::as_tibble(rownames="Sample")
sampleda <- .data %>% mp_extract_sample()
sampleda1 <- sampleda %>% select(!!rlang::sym("Sample"), !!.group)
assayda %<>%
left_join(sampleda1, by="Sample") %>%
dplyr::select(-!!rlang::sym("Sample"))
colData(.data) <- NULL
fma <- as.formula(paste0(". ~", rlang::as_name(.group)))
assayda <- stats::aggregate(fma, data=assayda, FUN=fun, ...)
assayda %<>%
tibble::column_to_rownames(var=rlang::as_name(.group)) %>%
t()
newda <- MPSE(assays=list(Abundance=assayda))
taxatree(newda) <- taxatree(.data)
otutree(newda) <- otutree(.data)
refsequence(newda) <- refsequence(.data)
SummarizedExperiment::rowData(newda) <- SummarizedExperiment::rowData(.data)
if (keep_colData){
sampleda %<>% dplyr::rename(.Old.Sample=!!rlang::sym("Sample"), Sample=!!.group) %>%
.internal_nest_tibble(columns="Sample")
if (ncol(sampleda)>1){
colData(newda) <- sampleda %>%
tibble::column_to_rownames(var="Sample") %>%
S4Vectors::DataFrame(check.names=FALSE)
}
}
return (newda)
}
#' @rdname mp_aggregate-methods
#' @aliases mp_aggregate,MPSE
#' @exportMethod mp_aggregate
setMethod("mp_aggregate", signature(.data="MPSE"), .internal_mp_aggregate)
.internal_nest_tibble <- function(x, columns){
nm <- colnames(x)
nm %<>% stats::setNames(nm)
nm <- nm[!nm %in% columns]
nm %<>% lapply(., function(x)x)
x <- do.call(tidyr::nest, c(list(x), nm))
x <- check_single_nrow_in_nest(x, columns)
return(x)
}
check_single_nrow_in_nest <- function(da, columns){
indnm <- da %>%
apply(., 2, function(x)all(lapply(x, function(i)nrow(unique(i))==1) %>% unlist()))
indnm <- indnm[indnm] %>% names()
indnm <- indnm[!indnm %in% columns]
if (length(indnm)>0){
for( i in indnm){
da %<>% dplyr::mutate(!!rlang::sym(i):=as.vector(unlist(lapply(!!rlang::sym(i), function(x)unique(x)))))
}
}
return(da)
}
|
setwd("F://R//Rfiles")
forestfiles <- read.csv("forestfires (1).csv")
View(forestfiles)
attach(forestfiles)
table(area)
#after making the catogorical variable to factor variable
#need to normalize the other numerical datas
forestfiles[,c(3:11)] <- scale(forestfiles[,c(3:11)])
View(forestfiles)
str(forestfiles)
#partition of data
library(caret)
datas <- createDataPartition(forestfiles$size_category, p = 0.75, list = FALSE)
Train_data <- forestfiles[datas,]
Test_data <- forestfiles[-datas,]
View(Train_data)
Train_data <- Train_data[,-c(1,2)]
View(Train_data)
Test_data <- Test_data[,-c(1,2)]
#building model
library(e1071)
model1 <- svm(Train_data$size_category~., data = Train_data, kernel = "linear")
summary(model1)
pred <- predict(model1, newdata = Test_data[,-31])
table(pred, Test_data$size_category)
mean(pred == Test_data$size_category) # 0.8984
library(kernlab)
model2 <- ksvm(Train_data$size_category~., data = Train_data, kernel = "anovadot")
summary(model2)
pred2 <- predict(model2, newdata = Test_data[,-31])
mean(pred2 == Test_data$size_category) #0.92
#anovadot makes a best model
install.packages("ElemStatLearn")
library(ElemStatLearn)
set = Train_data
| /forestfiresdummy.R | no_license | ganeshbalajiai/RCodeSupportVectorMachines | R | false | false | 1,237 | r | setwd("F://R//Rfiles")
forestfiles <- read.csv("forestfires (1).csv")
View(forestfiles)
attach(forestfiles)
table(area)
#after making the catogorical variable to factor variable
#need to normalize the other numerical datas
forestfiles[,c(3:11)] <- scale(forestfiles[,c(3:11)])
View(forestfiles)
str(forestfiles)
#partition of data
library(caret)
datas <- createDataPartition(forestfiles$size_category, p = 0.75, list = FALSE)
Train_data <- forestfiles[datas,]
Test_data <- forestfiles[-datas,]
View(Train_data)
Train_data <- Train_data[,-c(1,2)]
View(Train_data)
Test_data <- Test_data[,-c(1,2)]
#building model
library(e1071)
model1 <- svm(Train_data$size_category~., data = Train_data, kernel = "linear")
summary(model1)
pred <- predict(model1, newdata = Test_data[,-31])
table(pred, Test_data$size_category)
mean(pred == Test_data$size_category) # 0.8984
library(kernlab)
model2 <- ksvm(Train_data$size_category~., data = Train_data, kernel = "anovadot")
summary(model2)
pred2 <- predict(model2, newdata = Test_data[,-31])
mean(pred2 == Test_data$size_category) #0.92
#anovadot makes a best model
install.packages("ElemStatLearn")
library(ElemStatLearn)
set = Train_data
|
##########################################################################################
################## Module gestion de table, creation d'une matrice de confusion #######
##########################################################################################
# Points abordes :
# - creer un data frame
# - manipuler un data frame
# - ecrire fichier CSV, TXT
#############################################################a#############################
# Derniere mise e jour 26/08/2015
# remi.dannunzio@fao.org
##########################################################################################
##########################################################################################
################## Options de base, paquets
##########################################################################################
options(stringsAsFactors=FALSE)
setwd("/media/xubuntu/data_ofgt/")
### Creer un vecteur: fonctions "<-" et ":"
vecteur <- 0:20
### Repeter des elements: fonction "rep"
rep("bonjour",3)
### Creer un vecteur: fonction "c()"
code_1990 <- c(0,1,0,1,rep(0,17))
code_2000 <- rep(0,21)
code_2015 <- code_2000
code_2000[10]<-1
code_2015[c(7,8,10,17,19)]<-1
### Creer une table: fonction "data.frame()"
df <- data.frame(vecteur)
### Associer des vecteur en colonne: fonction "cbind()"
df <- cbind(vecteur,code_1990)
df <- cbind(df,code_2000)
df <- cbind(df,code_2015)
### Afficher la structure d'un objet: fonction "str"
str(df)
### Changer le type d'un objet: fonction "as"
df <- as.data.frame(df)
str(df)
### Afficher le haut d'une table: fonction "head"
head(df)
head(df,2)
### Afficher le nom des colonnes d'une table: fonction "names"
names(df)
### Changer le nom d'une colonne d'une table
names(df)[1] <- "classe_origine"
### Concatener des elements : fonction "paste"
paste("je commence "," R",sep="avec")
paste("classe",vecteur,sep="")
### Ajouter une colonne dans une table
df$colonne_nouvelle <- paste("classe",vecteur,sep="")
### Extraire une colonne d'une table: fonction "$"
df2 <- df$classe_origine
df2
### Tester une condition
df2 == vecteur
### Suppimer un objet: fonction "rm"
rm(df2)
### Afficher la longueur d'un vecteur/table: fonction "length"
length(df$classe_origine)
### Afficher la classe d'un vecteur/table: fonction "class"
class(df$colonne_nouvelle)
class(df$classe_origine)
### Extraire un element / une ligne / une colonne: fonction "[,]"
df[5,]
df[df$classe_origine > 10,]
df[,"classe_origine"]
df[df$classe_origine == 13,]
### Determiner les valeurs uniques: fonction "unique"
unique(df$code_1990)
### Afficher les niveaux d'une variable: fonction "levels"
levels(df$code_1990)
### Changer le type d'une variable: fonction "as.XXXXX"
### NB: plusieurs fonctions imbriquees, l'indentation est automatique
(legend <- levels(as.factor(df$code_1990)
)
)
### Afficher un comptage des elements par colonne: fonction "table"
table(df$code_1990)
table(df$code_1990,df$code_2000)
### Creer un sous-dataset
out <- df[,1:4]
### Exporter resultats en CSV
write.csv(file="data/table/classes.csv",out,row.names=F)
| /modules_R/module_1_table/module1_fichier_table.R | no_license | lecrabe/training_geospatial_rci | R | false | false | 3,142 | r | ##########################################################################################
################## Module gestion de table, creation d'une matrice de confusion #######
##########################################################################################
# Points abordes :
# - creer un data frame
# - manipuler un data frame
# - ecrire fichier CSV, TXT
#############################################################a#############################
# Derniere mise e jour 26/08/2015
# remi.dannunzio@fao.org
##########################################################################################
##########################################################################################
################## Options de base, paquets
##########################################################################################
options(stringsAsFactors=FALSE)
setwd("/media/xubuntu/data_ofgt/")
### Creer un vecteur: fonctions "<-" et ":"
vecteur <- 0:20
### Repeter des elements: fonction "rep"
rep("bonjour",3)
### Creer un vecteur: fonction "c()"
code_1990 <- c(0,1,0,1,rep(0,17))
code_2000 <- rep(0,21)
code_2015 <- code_2000
code_2000[10]<-1
code_2015[c(7,8,10,17,19)]<-1
### Creer une table: fonction "data.frame()"
df <- data.frame(vecteur)
### Associer des vecteur en colonne: fonction "cbind()"
df <- cbind(vecteur,code_1990)
df <- cbind(df,code_2000)
df <- cbind(df,code_2015)
### Afficher la structure d'un objet: fonction "str"
str(df)
### Changer le type d'un objet: fonction "as"
df <- as.data.frame(df)
str(df)
### Afficher le haut d'une table: fonction "head"
head(df)
head(df,2)
### Afficher le nom des colonnes d'une table: fonction "names"
names(df)
### Changer le nom d'une colonne d'une table
names(df)[1] <- "classe_origine"
### Concatener des elements : fonction "paste"
paste("je commence "," R",sep="avec")
paste("classe",vecteur,sep="")
### Ajouter une colonne dans une table
df$colonne_nouvelle <- paste("classe",vecteur,sep="")
### Extraire une colonne d'une table: fonction "$"
df2 <- df$classe_origine
df2
### Tester une condition
df2 == vecteur
### Suppimer un objet: fonction "rm"
rm(df2)
### Afficher la longueur d'un vecteur/table: fonction "length"
length(df$classe_origine)
### Afficher la classe d'un vecteur/table: fonction "class"
class(df$colonne_nouvelle)
class(df$classe_origine)
### Extraire un element / une ligne / une colonne: fonction "[,]"
df[5,]
df[df$classe_origine > 10,]
df[,"classe_origine"]
df[df$classe_origine == 13,]
### Determiner les valeurs uniques: fonction "unique"
unique(df$code_1990)
### Afficher les niveaux d'une variable: fonction "levels"
levels(df$code_1990)
### Changer le type d'une variable: fonction "as.XXXXX"
### NB: plusieurs fonctions imbriquees, l'indentation est automatique
(legend <- levels(as.factor(df$code_1990)
)
)
### Afficher un comptage des elements par colonne: fonction "table"
table(df$code_1990)
table(df$code_1990,df$code_2000)
### Creer un sous-dataset
out <- df[,1:4]
### Exporter resultats en CSV
write.csv(file="data/table/classes.csv",out,row.names=F)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/factors_single.R
\name{factors_single}
\alias{factors_single}
\alias{factors_single.CMF}
\alias{factors_single.CMF_implicit}
\alias{factors_single.ContentBased}
\alias{factors_single.OMF_explicit}
\alias{factors_single.OMF_implicit}
\title{Calculate latent factors for a new user}
\usage{
factors_single(model, ...)
\method{factors_single}{CMF}(
model,
X = NULL,
X_col = NULL,
X_val = NULL,
U = NULL,
U_col = NULL,
U_val = NULL,
U_bin = NULL,
weight = NULL,
output_bias = FALSE,
...
)
\method{factors_single}{CMF_implicit}(
model,
X = NULL,
X_col = NULL,
X_val = NULL,
U = NULL,
U_col = NULL,
U_val = NULL,
...
)
\method{factors_single}{ContentBased}(model, U = NULL, U_col = NULL, U_val = NULL, ...)
\method{factors_single}{OMF_explicit}(
model,
X = NULL,
X_col = NULL,
X_val = NULL,
U = NULL,
U_col = NULL,
U_val = NULL,
weight = NULL,
output_bias = FALSE,
output_A = FALSE,
exact = FALSE,
...
)
\method{factors_single}{OMF_implicit}(
model,
X = NULL,
X_col = NULL,
X_val = NULL,
U = NULL,
U_col = NULL,
U_val = NULL,
output_A = FALSE,
...
)
}
\arguments{
\item{model}{A collective matrix factorization model from this package - see
\link{fit_models} for details.}
\item{...}{Not used.}
\item{X}{New `X` data, either as a numeric vector (class `numeric`), or as
a sparse vector from package `Matrix` (class `dsparseVector`). If the `X` to
which the model was fit was a `data.frame`, the column/item indices will have
been reindexed internally, and the numeration can be found under
`model$info$item_mapping`. Alternatively, can instead pass the column indices
and values and let the model reindex them (see `X_col` and `X_val`).
Should pass at most one of `X` or `X_col`+`X_val`.}
\item{X_col}{New `X` data in sparse vector format, with `X_col` denoting the
items/columns which are not missing. If the `X` to which the model was fit was
a `data.frame`, here should pass IDs matching to the second column of that `X`,
which will be reindexed internally. Otherwise, should have column indices with
numeration starting at 1 (passed as an integer vector).
Should pass at most one of `X` or `X_col`+`X_val`.}
\item{X_val}{New `X` data in sparse vector format, with `X_val` denoting the
associated values to each entry in `X_col`
(should be a numeric vector of the same length as `X_col`).
Should pass at most one of `X` or `X_col`+`X_val`.}
\item{U}{New `U` data, either as a numeric vector (class `numeric`), or as a
sparse vector from package `Matrix` (class `dsparseVector`). Alternatively,
if `U` is sparse, can instead pass the indices of the non-missing columns
and their values separately (see `U_col`).
Should pass at most one of `U` or `U_col`+`U_val`.}
\item{U_col}{New `U` data in sparse vector format, with `U_col` denoting the
attributes/columns which are not missing. Should have numeration starting at 1
(should be an integer vector).
Should pass at most one of `U` or `U_col`+`U_val`.}
\item{U_val}{New `U` data in sparse vector format, with `U_val` denoting the
associated values to each entry in `U_col`
(should be a numeric vector of the same length as `U_col`).
Should pass at most one of `U` or `U_col`+`U_val`.}
\item{U_bin}{Binary columns of `U` on which a sigmoid transformation will be
applied. Should be passed as a numeric vector. Note that `U` and `U_bin` are
not mutually exclusive.}
\item{weight}{(Only for the explicit-feedback models)
Associated weight to each non-missing observation in `X`. Must have the same
number of entries as `X` - that is, if passing a dense vector of length `n`,
`weight` should be a numeric vector of length `n` too, if passing a sparse
vector, should have a length corresponding to the number of non-missing elements.}
\item{output_bias}{Whether to also return the user bias determined by the model
given the data in `X`.}
\item{output_A}{Whether to return the raw `A` factors (the free offset).}
\item{exact}{(In the `OMF_explicit` model)
Whether to calculate `A` and `Am` with the regularization applied
to `A` instead of to `Am` (if using the L-BFGS method, this is how the model
was fit). This is usually a slower procedure.
Only relevant when passing `X` data.}
}
\value{
If passing `output_bias=FALSE`, `output_A=FALSE`, and in the
implicit-feedback models, will return a vector with the obtained latent factors.
If passing any of the earlier options, will return a list with the following
entries: \itemize{
\item `factors`, which will contain the obtained factors for this new user.
\item `bias`, which will contain the obtained bias for this new user
(if passing `output_bias=TRUE`).
\item `A` (if passing `output_A=TRUE`), which will contain the raw `A` vector
(which is added to the factors determined from user attributes in order to
obtain the factorization parameters).
}
}
\description{
Determine latent factors for a new user, given either `X` data
(a.k.a. "warm-start"), or `U` data (a.k.a. "cold-start"), or both.
For example usage, see the main section \link{fit_models}.
}
\details{
Note that, regardless of whether the model was fit with the L-BFGS or
ALS method with CG or Cholesky solver, the new factors will be determined through the
Cholesky method or through the precomputed matrices (e.g. a simple matrix-vector multiply
for the `ContentBased` model), unless passing `U_bin` in which case they will be
determined through the same L-BFGS method with which the model was fit.
}
\seealso{
\link{factors} \link{topN_new}
}
| /man/factors_single.Rd | permissive | pandey2000/cmfrec | R | false | true | 5,580 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/factors_single.R
\name{factors_single}
\alias{factors_single}
\alias{factors_single.CMF}
\alias{factors_single.CMF_implicit}
\alias{factors_single.ContentBased}
\alias{factors_single.OMF_explicit}
\alias{factors_single.OMF_implicit}
\title{Calculate latent factors for a new user}
\usage{
factors_single(model, ...)
\method{factors_single}{CMF}(
model,
X = NULL,
X_col = NULL,
X_val = NULL,
U = NULL,
U_col = NULL,
U_val = NULL,
U_bin = NULL,
weight = NULL,
output_bias = FALSE,
...
)
\method{factors_single}{CMF_implicit}(
model,
X = NULL,
X_col = NULL,
X_val = NULL,
U = NULL,
U_col = NULL,
U_val = NULL,
...
)
\method{factors_single}{ContentBased}(model, U = NULL, U_col = NULL, U_val = NULL, ...)
\method{factors_single}{OMF_explicit}(
model,
X = NULL,
X_col = NULL,
X_val = NULL,
U = NULL,
U_col = NULL,
U_val = NULL,
weight = NULL,
output_bias = FALSE,
output_A = FALSE,
exact = FALSE,
...
)
\method{factors_single}{OMF_implicit}(
model,
X = NULL,
X_col = NULL,
X_val = NULL,
U = NULL,
U_col = NULL,
U_val = NULL,
output_A = FALSE,
...
)
}
\arguments{
\item{model}{A collective matrix factorization model from this package - see
\link{fit_models} for details.}
\item{...}{Not used.}
\item{X}{New `X` data, either as a numeric vector (class `numeric`), or as
a sparse vector from package `Matrix` (class `dsparseVector`). If the `X` to
which the model was fit was a `data.frame`, the column/item indices will have
been reindexed internally, and the numeration can be found under
`model$info$item_mapping`. Alternatively, can instead pass the column indices
and values and let the model reindex them (see `X_col` and `X_val`).
Should pass at most one of `X` or `X_col`+`X_val`.}
\item{X_col}{New `X` data in sparse vector format, with `X_col` denoting the
items/columns which are not missing. If the `X` to which the model was fit was
a `data.frame`, here should pass IDs matching to the second column of that `X`,
which will be reindexed internally. Otherwise, should have column indices with
numeration starting at 1 (passed as an integer vector).
Should pass at most one of `X` or `X_col`+`X_val`.}
\item{X_val}{New `X` data in sparse vector format, with `X_val` denoting the
associated values to each entry in `X_col`
(should be a numeric vector of the same length as `X_col`).
Should pass at most one of `X` or `X_col`+`X_val`.}
\item{U}{New `U` data, either as a numeric vector (class `numeric`), or as a
sparse vector from package `Matrix` (class `dsparseVector`). Alternatively,
if `U` is sparse, can instead pass the indices of the non-missing columns
and their values separately (see `U_col`).
Should pass at most one of `U` or `U_col`+`U_val`.}
\item{U_col}{New `U` data in sparse vector format, with `U_col` denoting the
attributes/columns which are not missing. Should have numeration starting at 1
(should be an integer vector).
Should pass at most one of `U` or `U_col`+`U_val`.}
\item{U_val}{New `U` data in sparse vector format, with `U_val` denoting the
associated values to each entry in `U_col`
(should be a numeric vector of the same length as `U_col`).
Should pass at most one of `U` or `U_col`+`U_val`.}
\item{U_bin}{Binary columns of `U` on which a sigmoid transformation will be
applied. Should be passed as a numeric vector. Note that `U` and `U_bin` are
not mutually exclusive.}
\item{weight}{(Only for the explicit-feedback models)
Associated weight to each non-missing observation in `X`. Must have the same
number of entries as `X` - that is, if passing a dense vector of length `n`,
`weight` should be a numeric vector of length `n` too, if passing a sparse
vector, should have a length corresponding to the number of non-missing elements.}
\item{output_bias}{Whether to also return the user bias determined by the model
given the data in `X`.}
\item{output_A}{Whether to return the raw `A` factors (the free offset).}
\item{exact}{(In the `OMF_explicit` model)
Whether to calculate `A` and `Am` with the regularization applied
to `A` instead of to `Am` (if using the L-BFGS method, this is how the model
was fit). This is usually a slower procedure.
Only relevant when passing `X` data.}
}
\value{
If passing `output_bias=FALSE`, `output_A=FALSE`, and in the
implicit-feedback models, will return a vector with the obtained latent factors.
If passing any of the earlier options, will return a list with the following
entries: \itemize{
\item `factors`, which will contain the obtained factors for this new user.
\item `bias`, which will contain the obtained bias for this new user
(if passing `output_bias=TRUE`).
\item `A` (if passing `output_A=TRUE`), which will contain the raw `A` vector
(which is added to the factors determined from user attributes in order to
obtain the factorization parameters).
}
}
\description{
Determine latent factors for a new user, given either `X` data
(a.k.a. "warm-start"), or `U` data (a.k.a. "cold-start"), or both.
For example usage, see the main section \link{fit_models}.
}
\details{
Note that, regardless of whether the model was fit with the L-BFGS or
ALS method with CG or Cholesky solver, the new factors will be determined through the
Cholesky method or through the precomputed matrices (e.g. a simple matrix-vector multiply
for the `ContentBased` model), unless passing `U_bin` in which case they will be
determined through the same L-BFGS method with which the model was fit.
}
\seealso{
\link{factors} \link{topN_new}
}
|
#' Function generates a base dataset from ImmuneSpace
#'
#' @param assay assay name in ImmuneSpace
#' @param con ImmuneSpaceR connection object
#' @param studies list of ImmuneSpace studies to use in filtering
#' @export
#'
getImmuneResponseData <- function(assay, con, studies){
dt <- con$getDataset(assay, original_view = TRUE)
dt <- dt[ dt$study_accession %in% studies, ]
}
#' Filter immdata list elements by age filter
#'
#' @param immdata list of assay data.table(s)
#' @param ages age cutoffs, either one or two sets
#' @export
#'
filterImmdataByAge <- function(immdata, ages){
filteredImmdata <- lapply(immdata, function(dt){
if(length(ages) == 2){
dt <- dt[ dt$age_imputed >= ages[[1]] &
dt$age_imputed < ages[[2]] ]
}else{
dt <- dt[ (dt$age_imputed >= ages[[1]] &
dt$age_imputed < ages[[2]]) |
(dt$age_imputed >= ages[[3]] &
dt$age_imputed < ages[[4]]) ]
}
return(dt)
})
}
#' Generate a single response call data.table from multiple assays
#'
#' @param immdata_age_group list of assay data.table(s)
#' @export
#'
selectResponsesToUse <- function(immdata_age_group){
tmp <- rbindlist(immdata_age_group, fill = TRUE)
colsToUse <- c(
"participant_id", "study_accession", "arm_accession", "cohort",
"race", "ethnicity", "gender", "age_imputed",
"vaccine", "vaccine_type", "pathogen", "assay", "adjuvant",
"MFC", "maxRBA",
"maxStrain_MFC", "maxStrain_RBA",
"ImmResp_baseline_value_MFC", "ImmResp_baseline_timepoint_MFC",
"ImmResp_postVax_value_MFC", "ImmResp_postVax_timepoint_MFC",
"ImmResp_baseline_value_RBA", "ImmResp_baseline_timepoint_RBA",
"ImmResp_postVax_value_RBA", "ImmResp_postVax_timepoint_RBA"
)
dynamicCols <- grep("_p\\d+$", colnames(tmp), value = TRUE)
colsToUse <- c(colsToUse, dynamicCols)
tmp <- tmp[, ..colsToUse]
tmp <- tmp[ !duplicated(tmp) ] # after rm biosample id and other cols, mostly dupes
tmp <- tmp[ , numAssays := .N, by = participant_id ]
# select assay to use
singleAssay <- tmp[ numAssays == 1 ]
needFiltering <- tmp[ numAssays > 1 ]
haiSubs <- needFiltering[ assay == "hai" ]
nabSubs <- needFiltering[ assay == "neut_ab_titer" &
!participant_id %in% unique(haiSubs$participant_id) ]
res <- rbindlist(list(singleAssay, haiSubs, nabSubs))
if( length(res$participant_id) != length(unique(res$participant_id)) ){
stop("filtering not done correctly")
}
return(res)
}
| /R/immuneResponsePreProcessing.R | no_license | RGLab/ImmuneSignatures2 | R | false | false | 2,542 | r | #' Function generates a base dataset from ImmuneSpace
#'
#' @param assay assay name in ImmuneSpace
#' @param con ImmuneSpaceR connection object
#' @param studies list of ImmuneSpace studies to use in filtering
#' @export
#'
getImmuneResponseData <- function(assay, con, studies){
dt <- con$getDataset(assay, original_view = TRUE)
dt <- dt[ dt$study_accession %in% studies, ]
}
#' Filter immdata list elements by age filter
#'
#' @param immdata list of assay data.table(s)
#' @param ages age cutoffs, either one or two sets
#' @export
#'
filterImmdataByAge <- function(immdata, ages){
filteredImmdata <- lapply(immdata, function(dt){
if(length(ages) == 2){
dt <- dt[ dt$age_imputed >= ages[[1]] &
dt$age_imputed < ages[[2]] ]
}else{
dt <- dt[ (dt$age_imputed >= ages[[1]] &
dt$age_imputed < ages[[2]]) |
(dt$age_imputed >= ages[[3]] &
dt$age_imputed < ages[[4]]) ]
}
return(dt)
})
}
#' Generate a single response call data.table from multiple assays
#'
#' @param immdata_age_group list of assay data.table(s)
#' @export
#'
selectResponsesToUse <- function(immdata_age_group){
tmp <- rbindlist(immdata_age_group, fill = TRUE)
colsToUse <- c(
"participant_id", "study_accession", "arm_accession", "cohort",
"race", "ethnicity", "gender", "age_imputed",
"vaccine", "vaccine_type", "pathogen", "assay", "adjuvant",
"MFC", "maxRBA",
"maxStrain_MFC", "maxStrain_RBA",
"ImmResp_baseline_value_MFC", "ImmResp_baseline_timepoint_MFC",
"ImmResp_postVax_value_MFC", "ImmResp_postVax_timepoint_MFC",
"ImmResp_baseline_value_RBA", "ImmResp_baseline_timepoint_RBA",
"ImmResp_postVax_value_RBA", "ImmResp_postVax_timepoint_RBA"
)
dynamicCols <- grep("_p\\d+$", colnames(tmp), value = TRUE)
colsToUse <- c(colsToUse, dynamicCols)
tmp <- tmp[, ..colsToUse]
tmp <- tmp[ !duplicated(tmp) ] # after rm biosample id and other cols, mostly dupes
tmp <- tmp[ , numAssays := .N, by = participant_id ]
# select assay to use
singleAssay <- tmp[ numAssays == 1 ]
needFiltering <- tmp[ numAssays > 1 ]
haiSubs <- needFiltering[ assay == "hai" ]
nabSubs <- needFiltering[ assay == "neut_ab_titer" &
!participant_id %in% unique(haiSubs$participant_id) ]
res <- rbindlist(list(singleAssay, haiSubs, nabSubs))
if( length(res$participant_id) != length(unique(res$participant_id)) ){
stop("filtering not done correctly")
}
return(res)
}
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{LinearizedSVRTrain}
\alias{LinearizedSVRTrain}
\title{LinearizedSVRTrain}
\usage{
LinearizedSVRTrain(X, Y, C = 1, epsilon = 0.01, nump = floor(sqrt(N)),
ktype = rbfdot, kpar, prototypes = c("kmeans", "random"),
clusterY = FALSE, epsilon.up = epsilon, epsilon.down = epsilon,
expectile = NULL, scale = TRUE, sigest = sigma.est)
}
\arguments{
\item{X}{matrix of examples, one example per row.}
\item{Y}{vector of target values. Must be the same length as the number of rows in \code{X}.}
\item{C}{cost of constraints violation}
\item{epsilon}{tolerance of termination criterion for optimization}
\item{nump}{number of prototypes by which to represent each example in \code{X}}
\item{ktype}{kernel-generating function, typically from the \pkg{kernlab} package}
\item{kpar}{a list of any parameters necessary for \code{ktype}. See Details.}
\item{prototypes}{the method by which prototypes will be chosen}
\item{clusterY}{whether to cluster \code{X} and \code{Y} jointly
when using \code{prototypes="kmeans"}. Otherwise \code{X} is
clustered without influence from \code{Y}.}
\item{epsilon.up}{allows you to use a different setting for
\code{epsilon} in the positive direction.}
\item{epsilon.down}{allows you to use a different setting for
\code{epsilon} in the negative direction.}
\item{expectile}{if non-null, do expectile regression using the
given expectile value. Currently uses the \code{expectreg}
package.}
\item{scale}{a boolean value indicating whether \code{X} and \code{Y} should
be normalized (to zero-mean and unit-variance) before learning.}
\item{sigest}{if the kernel expects a \code{sigma} parameter and none is
provided in \code{kpar}, this parameter specifies a function to use to
compute it.}
}
\value{
a model object that can later be used as the first
argument for the \code{predict()} method.
}
\description{
Train a prototype-based Linearized Support-Vector Regression model
}
\details{
This function trains a new LinearizedSVR model based on \code{X}
and \code{Y}. See \link{LinearizedSVR-package} for an explanation
of how such models are defined.
}
\examples{
dat <- rbind(data.frame(y=2, x1=rnorm(500, 1), x2=rnorm(500, 1)),
data.frame(y=1, x1=rnorm(500,-1), x2=rnorm(500,-1)))
mod <- LinearizedSVRTrain(X=as.matrix(dat[-1]), Y=dat$y, nump=6)
res <- predict(mod, newdata=as.matrix(dat[-1]))
plot(x2 ~ x1, dat, col=c("red","green")[1+(res>1.5)], pch=c(3,20)[dat$y])
}
\seealso{
LinearizedSVR-package
}
| /man/LinearizedSVRTrain.Rd | no_license | cran/LinearizedSVR | R | false | false | 2,596 | rd | % Generated by roxygen2 (4.0.1): do not edit by hand
\name{LinearizedSVRTrain}
\alias{LinearizedSVRTrain}
\title{LinearizedSVRTrain}
\usage{
LinearizedSVRTrain(X, Y, C = 1, epsilon = 0.01, nump = floor(sqrt(N)),
ktype = rbfdot, kpar, prototypes = c("kmeans", "random"),
clusterY = FALSE, epsilon.up = epsilon, epsilon.down = epsilon,
expectile = NULL, scale = TRUE, sigest = sigma.est)
}
\arguments{
\item{X}{matrix of examples, one example per row.}
\item{Y}{vector of target values. Must be the same length as the number of rows in \code{X}.}
\item{C}{cost of constraints violation}
\item{epsilon}{tolerance of termination criterion for optimization}
\item{nump}{number of prototypes by which to represent each example in \code{X}}
\item{ktype}{kernel-generating function, typically from the \pkg{kernlab} package}
\item{kpar}{a list of any parameters necessary for \code{ktype}. See Details.}
\item{prototypes}{the method by which prototypes will be chosen}
\item{clusterY}{whether to cluster \code{X} and \code{Y} jointly
when using \code{prototypes="kmeans"}. Otherwise \code{X} is
clustered without influence from \code{Y}.}
\item{epsilon.up}{allows you to use a different setting for
\code{epsilon} in the positive direction.}
\item{epsilon.down}{allows you to use a different setting for
\code{epsilon} in the negative direction.}
\item{expectile}{if non-null, do expectile regression using the
given expectile value. Currently uses the \code{expectreg}
package.}
\item{scale}{a boolean value indicating whether \code{X} and \code{Y} should
be normalized (to zero-mean and unit-variance) before learning.}
\item{sigest}{if the kernel expects a \code{sigma} parameter and none is
provided in \code{kpar}, this parameter specifies a function to use to
compute it.}
}
\value{
a model object that can later be used as the first
argument for the \code{predict()} method.
}
\description{
Train a prototype-based Linearized Support-Vector Regression model
}
\details{
This function trains a new LinearizedSVR model based on \code{X}
and \code{Y}. See \link{LinearizedSVR-package} for an explanation
of how such models are defined.
}
\examples{
dat <- rbind(data.frame(y=2, x1=rnorm(500, 1), x2=rnorm(500, 1)),
data.frame(y=1, x1=rnorm(500,-1), x2=rnorm(500,-1)))
mod <- LinearizedSVRTrain(X=as.matrix(dat[-1]), Y=dat$y, nump=6)
res <- predict(mod, newdata=as.matrix(dat[-1]))
plot(x2 ~ x1, dat, col=c("red","green")[1+(res>1.5)], pch=c(3,20)[dat$y])
}
\seealso{
LinearizedSVR-package
}
|
# 주사위 (1,2,3,4,5,6) 4번 던져서 나오는 숫자의 합 x에 대한 확률
library(prob)
rolldie(5)
S <- rolldie(4)
dim(S)
str(S)
x <- apply(S, 1, sum)
x
x.freq <- table(x)
length(x.freq)
x.freq <- x.freq / length(x)
x.freq
plot(x.freq, type="h")
# temp <- matrix(c(1,2,3,4), nrow=2)
# temp
# apply(temp,1,sum)
# 이산확률변수 -> 이산확률분포
# 50개 제품중 8개가 불량이 있는 상자로부터
# 10개의 제품을 랜덤 샘플링시 발견되는 불량 개수 x의 확률분포는?
npop <- 50
nsamp1 <- 10
ndef <- 8
d <- choose(npop,nsamp1)
freq <- choose(ndef, 0:nsamp1) * choose(npop-ndef, nsamp1-(0:nsamp1))
freq
fx <- freq /d
fx
plot(0:10,fx,type="h")
# 여덟명이 각각의 모자를 들고 모였는데, 갑자기 정전이 되서 아무 모자나 들고 집으로 감
# 자기 자신의 모자를 들고 간 신사의 수를 x라고 할때, 확률변수 x의 확률은?
options(stringsAsFactors = F)
hats <- LETTERS[1:8]
S <- urnsamples(hats, size=8, ordered = T)
str(S)
dim(S)
rowN <- nrow(S)
ncol(S)
table(S)
checkFunc <- function(x){sum(x==hats)}
X <- apply(S,1,checkFunc)
X.freq2 <- table(X)
X.prob2 <- round(X.freq2/rowN,6)
sum(X.prob2)
plot(X.prob2, type="h")
#주사위 3개를 던짐. 짝수의 개수
S <- rolldie(3)
rowN <- nrow(S)
S
rowN
checkFunc <- function(x) sum(1-x%%2)
Y <- apply(S,1,checkFunc)
table(Y)
# 주사위를 두번 던지는 시행.
# 눈의 최대치 X, 눈의 최소치 Y
# Z=XY 의 기대값은?
S1 <- rolldie(2)
str(S1)
X1 <- apply(S1, 1, max)
table(X1)
X2 <- apply(S1, 1, min)
table(X2)
temp <- table(X1,X2)/nrow(S1)
temp
class(temp)
XY <- (1:6 %o% 1:6)
XY
(as.vector(XY)) %*% as.vector(XY)
S3 <- tosscoin(4)
S3
nrow(S3)
#앞면 뒷면 갯수를 세는 함수
countH <- function(x) sum(x=='H')
countT <- function(x) sum(x=='T')
# 확률 변수 변수 생성
X3 <- apply(S3, 1, countH)
Y3 <- apply(S3, 1, countT)
V3 <- Y3 -X3
W3 <- abs(V3)
par(mfrow=c(2,3))
plot(X3,Y3)
plot(X3,V3)
plot(X3,W3)
plot(Y3,W3)
plot(V3,W3)
# 평균, 중앙값(mean, median)
# 주사위 4개 던졌을때
# list <- 합, 평균, 최대치, 최소치
S5 <- rolldie(4)
N5 <- nrow(S5)
X5_sum <- apply(S5, 1, sum)
X5_mean <- apply(S5, 1, mean)
# 불량률이 0.03인 공정에서 20개의 표본을 추출하여 검사하고 발견한
# 불량 개수를 X 라 할때, X의 확률분포 함수, 2개의 불량률이 발견될 확률.
dbinom(0:2, 20, 0.03)
# 정규분포와 관련 분포
# 1. 기대값 중심으로 대칭이며, 중심위치는 엎어놓은 종 모양의 분포
# dnorm(), pnorm(), qnorm(), rnorm()
# 표준정규분표 기대값 = 0, 표준편차 =1
# x축 -7 ~ 7
x <- (-140:140)/20
dnorm(x, 0, 1)
dnorm(x, 0, 2)
dnorm(x, 2, 1)
dnorm(x, 2, 2)
data <- matrix(c(dnorm(x, 0, 1), dnorm(x, 0, 2), dnorm(x, 2, 1), dnorm(x, 2, 2)),
ncol=4,
byrow = F)
data
par(mfrow=c(2,2))
plot(x, data[,1], type="l")
plot(x, data[,2], type="l")
plot(x, data[,3], type="l")
plot(x, data[,4], type="l")
segments(0,0,0,max(data[,1]), lty=2,col=4)
# 확률변수 x가 N(2,2^2)을 따를때, 구간 P(-1<x<4)를 구하시오.
x3 <- (-140:140)/70
x3
mu3 <- 2; sig3 <- 2; a3<- -1; b3<- 4
fx3 <- matrix(c(dnorm(x3,0,1),
dnorm(x3,mu3,sig3)),
ncol=2, byrow=F)
fx3
px3 <- pnorm(4, mu3, sig3); px3
px4 <- pnorm(-1, mu3, sig3); px4
px3 - px4
plot(x3, fx3[,2], type='l')
x5 <- c(a3, seq(a3, b3, 0.01),b3)
# 1. 성공확률이 0.2인 n회의 시행에서 나온 성공횟수를 Xn이라 할때, n=10,20,50
# 각각에 대해 Xn의 확률 분포함수를 그래프로 출력하라. (dbinom)
x10 <- 0:10; x20 <- 0:20; x50 <- 0:50
par(mfrow=c(1,3))
plot(x10, dbinom(x10,10,0.2), type="h")
plot(x20, dbinom(x20,20,0.2), type="h")
plot(x50, dbinom(x50,50,0.2), type="h")
# 2. 성공확률이 20%이고 200개의 단위로 구성된 모집단에서 비복원추출한 n개의
# 표본에서 나온 성공회수를 Xn이라고 할때, n=10,20,50 각각에 대해 Xn의
# 확률분포함수를 그래프로 출력하시오.(dhyper)
x10 <- 0:10; x20 <- 0:20; x50 <- 0:50
plot(x10, dhyper(x10, 40, 160, 10), type="h")
plot(x20, dhyper(x20, 40, 160, 20), type="h")
plot(x50, dhyper(x50, 40, 160, 50), type="h")
# 3.단위 제품 당 평균 결점수가 3개인 n개의 단위 제품에서 나온 결점수를 Xn이라고
# 할때, n=2,5,10 각각에 대해 Xn의 확률 분포 함수를 그래프로 출력하라.(dpois)
x2 <- 0:12; x5 <- 0:30; x10 <- 0:60
par(mfrow=c(1,3))
plot(x2, dpois(x2, 6), type="h")
plot(x5, dpois(x5, 15), type="h")
#중심 극한정리(central limit theorem)
library(ggplot2)
install.packages("devtools")
install.packages("kassambara/easyGgplot2")
library(devtools)
library(easyGgplot2)
library(data.table)
data <- data.table(read.table("ME_2765tmax.txt", header=F))
str(data)
class(data)
colnames(data) <- c("StateID","YearDay","Year","Month","MonthDay","MaxTemp")
colnames(data)
.
| /181010test.R | permissive | EnteLee/RPractice | R | false | false | 4,919 | r | # 주사위 (1,2,3,4,5,6) 4번 던져서 나오는 숫자의 합 x에 대한 확률
library(prob)
rolldie(5)
S <- rolldie(4)
dim(S)
str(S)
x <- apply(S, 1, sum)
x
x.freq <- table(x)
length(x.freq)
x.freq <- x.freq / length(x)
x.freq
plot(x.freq, type="h")
# temp <- matrix(c(1,2,3,4), nrow=2)
# temp
# apply(temp,1,sum)
# 이산확률변수 -> 이산확률분포
# 50개 제품중 8개가 불량이 있는 상자로부터
# 10개의 제품을 랜덤 샘플링시 발견되는 불량 개수 x의 확률분포는?
npop <- 50
nsamp1 <- 10
ndef <- 8
d <- choose(npop,nsamp1)
freq <- choose(ndef, 0:nsamp1) * choose(npop-ndef, nsamp1-(0:nsamp1))
freq
fx <- freq /d
fx
plot(0:10,fx,type="h")
# 여덟명이 각각의 모자를 들고 모였는데, 갑자기 정전이 되서 아무 모자나 들고 집으로 감
# 자기 자신의 모자를 들고 간 신사의 수를 x라고 할때, 확률변수 x의 확률은?
options(stringsAsFactors = F)
hats <- LETTERS[1:8]
S <- urnsamples(hats, size=8, ordered = T)
str(S)
dim(S)
rowN <- nrow(S)
ncol(S)
table(S)
checkFunc <- function(x){sum(x==hats)}
X <- apply(S,1,checkFunc)
X.freq2 <- table(X)
X.prob2 <- round(X.freq2/rowN,6)
sum(X.prob2)
plot(X.prob2, type="h")
#주사위 3개를 던짐. 짝수의 개수
S <- rolldie(3)
rowN <- nrow(S)
S
rowN
checkFunc <- function(x) sum(1-x%%2)
Y <- apply(S,1,checkFunc)
table(Y)
# 주사위를 두번 던지는 시행.
# 눈의 최대치 X, 눈의 최소치 Y
# Z=XY 의 기대값은?
S1 <- rolldie(2)
str(S1)
X1 <- apply(S1, 1, max)
table(X1)
X2 <- apply(S1, 1, min)
table(X2)
temp <- table(X1,X2)/nrow(S1)
temp
class(temp)
XY <- (1:6 %o% 1:6)
XY
(as.vector(XY)) %*% as.vector(XY)
S3 <- tosscoin(4)
S3
nrow(S3)
#앞면 뒷면 갯수를 세는 함수
countH <- function(x) sum(x=='H')
countT <- function(x) sum(x=='T')
# 확률 변수 변수 생성
X3 <- apply(S3, 1, countH)
Y3 <- apply(S3, 1, countT)
V3 <- Y3 -X3
W3 <- abs(V3)
par(mfrow=c(2,3))
plot(X3,Y3)
plot(X3,V3)
plot(X3,W3)
plot(Y3,W3)
plot(V3,W3)
# 평균, 중앙값(mean, median)
# 주사위 4개 던졌을때
# list <- 합, 평균, 최대치, 최소치
S5 <- rolldie(4)
N5 <- nrow(S5)
X5_sum <- apply(S5, 1, sum)
X5_mean <- apply(S5, 1, mean)
# 불량률이 0.03인 공정에서 20개의 표본을 추출하여 검사하고 발견한
# 불량 개수를 X 라 할때, X의 확률분포 함수, 2개의 불량률이 발견될 확률.
dbinom(0:2, 20, 0.03)
# 정규분포와 관련 분포
# 1. 기대값 중심으로 대칭이며, 중심위치는 엎어놓은 종 모양의 분포
# dnorm(), pnorm(), qnorm(), rnorm()
# 표준정규분표 기대값 = 0, 표준편차 =1
# x축 -7 ~ 7
x <- (-140:140)/20
dnorm(x, 0, 1)
dnorm(x, 0, 2)
dnorm(x, 2, 1)
dnorm(x, 2, 2)
data <- matrix(c(dnorm(x, 0, 1), dnorm(x, 0, 2), dnorm(x, 2, 1), dnorm(x, 2, 2)),
ncol=4,
byrow = F)
data
par(mfrow=c(2,2))
plot(x, data[,1], type="l")
plot(x, data[,2], type="l")
plot(x, data[,3], type="l")
plot(x, data[,4], type="l")
segments(0,0,0,max(data[,1]), lty=2,col=4)
# 확률변수 x가 N(2,2^2)을 따를때, 구간 P(-1<x<4)를 구하시오.
x3 <- (-140:140)/70
x3
mu3 <- 2; sig3 <- 2; a3<- -1; b3<- 4
fx3 <- matrix(c(dnorm(x3,0,1),
dnorm(x3,mu3,sig3)),
ncol=2, byrow=F)
fx3
px3 <- pnorm(4, mu3, sig3); px3
px4 <- pnorm(-1, mu3, sig3); px4
px3 - px4
plot(x3, fx3[,2], type='l')
x5 <- c(a3, seq(a3, b3, 0.01),b3)
# 1. 성공확률이 0.2인 n회의 시행에서 나온 성공횟수를 Xn이라 할때, n=10,20,50
# 각각에 대해 Xn의 확률 분포함수를 그래프로 출력하라. (dbinom)
x10 <- 0:10; x20 <- 0:20; x50 <- 0:50
par(mfrow=c(1,3))
plot(x10, dbinom(x10,10,0.2), type="h")
plot(x20, dbinom(x20,20,0.2), type="h")
plot(x50, dbinom(x50,50,0.2), type="h")
# 2. 성공확률이 20%이고 200개의 단위로 구성된 모집단에서 비복원추출한 n개의
# 표본에서 나온 성공회수를 Xn이라고 할때, n=10,20,50 각각에 대해 Xn의
# 확률분포함수를 그래프로 출력하시오.(dhyper)
x10 <- 0:10; x20 <- 0:20; x50 <- 0:50
plot(x10, dhyper(x10, 40, 160, 10), type="h")
plot(x20, dhyper(x20, 40, 160, 20), type="h")
plot(x50, dhyper(x50, 40, 160, 50), type="h")
# 3.단위 제품 당 평균 결점수가 3개인 n개의 단위 제품에서 나온 결점수를 Xn이라고
# 할때, n=2,5,10 각각에 대해 Xn의 확률 분포 함수를 그래프로 출력하라.(dpois)
x2 <- 0:12; x5 <- 0:30; x10 <- 0:60
par(mfrow=c(1,3))
plot(x2, dpois(x2, 6), type="h")
plot(x5, dpois(x5, 15), type="h")
#중심 극한정리(central limit theorem)
library(ggplot2)
install.packages("devtools")
install.packages("kassambara/easyGgplot2")
library(devtools)
library(easyGgplot2)
library(data.table)
data <- data.table(read.table("ME_2765tmax.txt", header=F))
str(data)
class(data)
colnames(data) <- c("StateID","YearDay","Year","Month","MonthDay","MaxTemp")
colnames(data)
.
|
### Table 1
tab1 = hundat %>%
group_by(Variable = contract) %>%
summarise('Mean East' = wtd.mean(hwage[region2 == 'East'], pb040[region2 == 'East']),
'Mean West' = wtd.mean(hwage[region2 == 'West'], pb040[region2 == 'West']),
'N' = n(),
'N(East)/N' = length(hwage[region2 == 'East']) / n())
tab1 = hundat %>%
group_by(Variable = firmsize) %>%
summarise('Mean East' = wtd.mean(hwage[region2 == 'East'], pb040[region2 == 'East']),
'Mean West' = wtd.mean(hwage[region2 == 'West'], pb040[region2 == 'West']),
'N' = n(),
'N(East)/N' = length(hwage[region2 == 'East']) / n()) %>%
bind_rows(tab1)
tab1 = hundat %>%
group_by(Variable = sector) %>%
summarise('Mean East' = wtd.mean(hwage[region2 == 'East'], pb040[region2 == 'East']),
'Mean West' = wtd.mean(hwage[region2 == 'West'], pb040[region2 == 'West']),
'N' = n(),
'N(East)/N' = length(hwage[region2 == 'East']) / n()) %>%
bind_rows(tab1)
tab1 = hundat %>%
group_by(Variable = isco) %>%
summarise('Mean East' = wtd.mean(hwage[region2 == 'East'], pb040[region2 == 'East']),
'Mean West' = wtd.mean(hwage[region2 == 'West'], pb040[region2 == 'West']),
'N' = n(),
'N(East)/N' = length(hwage[region2 == 'East']) / n()) %>%
bind_rows(tab1)
tab1 = hundat %>%
group_by(Variable = urban) %>%
summarise('Mean East' = wtd.mean(hwage[region2 == 'East'], pb040[region2 == 'East']),
'Mean West' = wtd.mean(hwage[region2 == 'West'], pb040[region2 == 'West']),
'N' = n(),
'N(East)/N' = length(hwage[region2 == 'East']) / n()) %>%
bind_rows(tab1)
tab1 = hundat %>%
group_by(Variable = union) %>%
summarise('Mean East' = wtd.mean(hwage[region2 == 'East'], pb040[region2 == 'East']),
'Mean West' = wtd.mean(hwage[region2 == 'West'], pb040[region2 == 'West']),
'N' = n(),
'N(East)/N' = length(hwage[region2 == 'East']) / n()) %>%
bind_rows(tab1)
tab1 = hundat %>%
group_by(Variable = educ) %>%
summarise('Mean East' = wtd.mean(hwage[region2 == 'East'], pb040[region2 == 'East']),
'Mean West' = wtd.mean(hwage[region2 == 'West'], pb040[region2 == 'West']),
'N' = n(),
'N(East)/N' = length(hwage[region2 == 'East']) / n()) %>%
bind_rows(tab1)
tab1 = hundat %>%
group_by(Variable = female) %>%
summarise('Mean East' = wtd.mean(hwage[region2 == 'East'], pb040[region2 == 'East']),
'Mean West' = wtd.mean(hwage[region2 == 'West'], pb040[region2 == 'West']),
'N' = n(),
'N(East)/N' = length(hwage[region2 == 'East']) / n()) %>%
bind_rows(tab1)
tab1 = kable(tab1,
'latex',
caption = "Pooled Sample nach Datenfilterung und Stundenlohnunterschiede zwischen Westen und Osten",
booktabs = T,
escape = F,
col.names = c("Variable", linebreak(c("Mean hwage\nEast", "Mean hwage\nWest"), align = "r"), "N", "N(East)/N")) %>%
kable_styling(font_size = 10, full_width = F) %>%
group_rows("Sex", 1, 2) %>%
group_rows("Education", 3, 5) %>%
group_rows("Union", 6, 7) %>%
group_rows("Urbanisation", 8, 9) %>%
group_rows("ISCO", 10, 11) %>%
group_rows("Sector", 12, 14) %>%
group_rows("Firmsize", 15, 16) %>%
group_rows("Contract", 17, 18) %>%
footnote(general = 'EU-SILC 2005-2017, eigene Berechnungen.',
footnote_as_chunk = T,
general_title = "Quelle:")
### Table 2
tab2 = indicators_p1 %>% melt(id = c('rb010', 'stat'))
tab2$rb010 = tab2$rb010
tab2 = tab2 %>% spread(rb010, value)
tab2$stat = factor(tab2$stat, levels = c('Mean',
'Q50',
'Gini',
'P80/P20',
'Top10'))
tab2$variable = factor(tab2$variable,
levels = c('pretax_factor_eq',
'pretax_nation_eq',
'posttax_disp_eq'),
labels = c('Factor', 'Nation', 'Disposable'))
tab2 = tab2[order(tab2$stat, tab2$variable),]
tab2 = tab2 %>%
select(-stat) %>%
kable('latex',
booktabs = T,
caption = "Ungleichheitsindikatoren nach Eurostat",
row.names = FALSE,
col.names = c("", c(2005:2017))) %>%
kable_styling(latex_options = 'scale_down') %>%
group_rows("Mean", 1, 3) %>%
group_rows("Median", 4, 6) %>%
group_rows("Gini", 7, 9) %>%
group_rows("P80/P20", 10, 12) %>%
group_rows("Top10", 13, 15) %>%
footnote(general = 'EU-SILC 2005-2017, eigene Berechnungen.',
footnote_as_chunk = T,
general_title = "Quelle:") %>%
landscape()
### Table 3
tab3 = indicators_p2 %>% melt(id = c('pb010', 'stat'))
tab3$pb010 = tab3$pb010
tab3 = tab3 %>% spread(pb010, value)
tab3$stat = factor(tab3$stat, levels = c('Mean',
'Q50',
'Gini',
'P80/P20',
'Top10'))
tab3$variable = factor(tab3$variable,
levels = c('pretax_factor_20',
'pretax_nation_20',
'posttax_disp_20'),
labels = c('Factor', 'Nation', 'Disposable'))
tab3 = tab3[order(tab3$stat, tab3$variable),]
tab3 = tab3 %>%
select(-stat) %>%
kable('latex',
booktabs = T,
caption = "Ungleichheitsindikatoren nach WID World",
row.names = FALSE,
col.names = c("", c(2005:2017))) %>%
kable_styling(latex_options = 'scale_down') %>%
group_rows("Mean", 1, 3) %>%
group_rows("Median", 4, 6) %>%
group_rows("Gini", 7, 9) %>%
group_rows("P80/P20", 10, 12) %>%
group_rows("Top10", 13, 15) %>%
footnote(general = 'EU-SILC 2005-2017, eigene Berechnungen.',
footnote_as_chunk = T,
general_title = "Quelle:") %>%
landscape()
### Table 4
tab4 = results2
tab4$year = tab4$year
tab4$lower = round(tab4$lower, digits = 4)
tab4$upper = round(tab4$upper, digits = 4)
tab4$value = round(tab4$value, digits = 4)
tab4 = unite(tab4, CI, lower, upper, sep = "-")
tab4 = melt(tab4, id = c('year', 'stat'))
tab4 = spread(tab4, year, value)
tab4$variable = factor(tab4$variable, levels = c('value', 'CI'), labels = c('Estimate', 'CI'))
tab4 = tab4 %>%
select(-stat) %>%
kable('latex',
booktabs = T,
caption = "Dekomposition - Einfaches Model",
row.names = FALSE,
col.names = c("", c(2006:2017))) %>%
kable_styling(latex_options = 'scale_down') %>%
group_rows('Diffrence (D)', 1, 2) %>%
group_rows('Endowment (X)', 3, 4) %>%
group_rows('Price (P)', 5, 6) %>%
footnote(general = 'EU-SILC 2005-2017, eigene Berechnungen.',
footnote_as_chunk = T,
general_title = "Quelle:") %>%
landscape()
### Table 5
tab5 = results
tab5$year = tab5$year
tab5$lower = round(tab5$lower, digits = 4)
tab5$upper = round(tab5$upper, digits = 4)
tab5$value = round(tab5$value, digits = 4)
tab5 = unite(tab5, CI, lower, upper, sep = "-")
tab5 = melt(tab5, id = c('year', 'stat'))
tab5 = spread(tab5, year, value)
tab5$variable = factor(tab5$variable,
levels = c('value', 'CI'),
labels = c('Estimate', 'CI'))
tab5 = tab5 %>%
select(-stat) %>%
kable('latex',
booktabs = T,
caption = "Dekomposition - Erweitertes Model",
row.names = FALSE,
col.names = c("", c(2006:2017))) %>%
kable_styling(latex_options = 'scale_down') %>%
group_rows('Diffrence (D)', 1, 2) %>%
group_rows('Endowment (X)', 3, 4) %>%
group_rows('Price (P)', 5, 6) %>%
footnote(general = 'EU-SILC 2005-2017, eigene Berechnungen.',
footnote_as_chunk = T,
general_title = "Quelle:") %>%
landscape() | /reports/hun/hun_table.R | permissive | Verteilungr/ineq_project | R | false | false | 7,968 | r | ### Table 1
tab1 = hundat %>%
group_by(Variable = contract) %>%
summarise('Mean East' = wtd.mean(hwage[region2 == 'East'], pb040[region2 == 'East']),
'Mean West' = wtd.mean(hwage[region2 == 'West'], pb040[region2 == 'West']),
'N' = n(),
'N(East)/N' = length(hwage[region2 == 'East']) / n())
tab1 = hundat %>%
group_by(Variable = firmsize) %>%
summarise('Mean East' = wtd.mean(hwage[region2 == 'East'], pb040[region2 == 'East']),
'Mean West' = wtd.mean(hwage[region2 == 'West'], pb040[region2 == 'West']),
'N' = n(),
'N(East)/N' = length(hwage[region2 == 'East']) / n()) %>%
bind_rows(tab1)
tab1 = hundat %>%
group_by(Variable = sector) %>%
summarise('Mean East' = wtd.mean(hwage[region2 == 'East'], pb040[region2 == 'East']),
'Mean West' = wtd.mean(hwage[region2 == 'West'], pb040[region2 == 'West']),
'N' = n(),
'N(East)/N' = length(hwage[region2 == 'East']) / n()) %>%
bind_rows(tab1)
tab1 = hundat %>%
group_by(Variable = isco) %>%
summarise('Mean East' = wtd.mean(hwage[region2 == 'East'], pb040[region2 == 'East']),
'Mean West' = wtd.mean(hwage[region2 == 'West'], pb040[region2 == 'West']),
'N' = n(),
'N(East)/N' = length(hwage[region2 == 'East']) / n()) %>%
bind_rows(tab1)
tab1 = hundat %>%
group_by(Variable = urban) %>%
summarise('Mean East' = wtd.mean(hwage[region2 == 'East'], pb040[region2 == 'East']),
'Mean West' = wtd.mean(hwage[region2 == 'West'], pb040[region2 == 'West']),
'N' = n(),
'N(East)/N' = length(hwage[region2 == 'East']) / n()) %>%
bind_rows(tab1)
tab1 = hundat %>%
group_by(Variable = union) %>%
summarise('Mean East' = wtd.mean(hwage[region2 == 'East'], pb040[region2 == 'East']),
'Mean West' = wtd.mean(hwage[region2 == 'West'], pb040[region2 == 'West']),
'N' = n(),
'N(East)/N' = length(hwage[region2 == 'East']) / n()) %>%
bind_rows(tab1)
tab1 = hundat %>%
group_by(Variable = educ) %>%
summarise('Mean East' = wtd.mean(hwage[region2 == 'East'], pb040[region2 == 'East']),
'Mean West' = wtd.mean(hwage[region2 == 'West'], pb040[region2 == 'West']),
'N' = n(),
'N(East)/N' = length(hwage[region2 == 'East']) / n()) %>%
bind_rows(tab1)
tab1 = hundat %>%
group_by(Variable = female) %>%
summarise('Mean East' = wtd.mean(hwage[region2 == 'East'], pb040[region2 == 'East']),
'Mean West' = wtd.mean(hwage[region2 == 'West'], pb040[region2 == 'West']),
'N' = n(),
'N(East)/N' = length(hwage[region2 == 'East']) / n()) %>%
bind_rows(tab1)
tab1 = kable(tab1,
'latex',
caption = "Pooled Sample nach Datenfilterung und Stundenlohnunterschiede zwischen Westen und Osten",
booktabs = T,
escape = F,
col.names = c("Variable", linebreak(c("Mean hwage\nEast", "Mean hwage\nWest"), align = "r"), "N", "N(East)/N")) %>%
kable_styling(font_size = 10, full_width = F) %>%
group_rows("Sex", 1, 2) %>%
group_rows("Education", 3, 5) %>%
group_rows("Union", 6, 7) %>%
group_rows("Urbanisation", 8, 9) %>%
group_rows("ISCO", 10, 11) %>%
group_rows("Sector", 12, 14) %>%
group_rows("Firmsize", 15, 16) %>%
group_rows("Contract", 17, 18) %>%
footnote(general = 'EU-SILC 2005-2017, eigene Berechnungen.',
footnote_as_chunk = T,
general_title = "Quelle:")
### Table 2
tab2 = indicators_p1 %>% melt(id = c('rb010', 'stat'))
tab2$rb010 = tab2$rb010
tab2 = tab2 %>% spread(rb010, value)
tab2$stat = factor(tab2$stat, levels = c('Mean',
'Q50',
'Gini',
'P80/P20',
'Top10'))
tab2$variable = factor(tab2$variable,
levels = c('pretax_factor_eq',
'pretax_nation_eq',
'posttax_disp_eq'),
labels = c('Factor', 'Nation', 'Disposable'))
tab2 = tab2[order(tab2$stat, tab2$variable),]
tab2 = tab2 %>%
select(-stat) %>%
kable('latex',
booktabs = T,
caption = "Ungleichheitsindikatoren nach Eurostat",
row.names = FALSE,
col.names = c("", c(2005:2017))) %>%
kable_styling(latex_options = 'scale_down') %>%
group_rows("Mean", 1, 3) %>%
group_rows("Median", 4, 6) %>%
group_rows("Gini", 7, 9) %>%
group_rows("P80/P20", 10, 12) %>%
group_rows("Top10", 13, 15) %>%
footnote(general = 'EU-SILC 2005-2017, eigene Berechnungen.',
footnote_as_chunk = T,
general_title = "Quelle:") %>%
landscape()
### Table 3
tab3 = indicators_p2 %>% melt(id = c('pb010', 'stat'))
tab3$pb010 = tab3$pb010
tab3 = tab3 %>% spread(pb010, value)
tab3$stat = factor(tab3$stat, levels = c('Mean',
'Q50',
'Gini',
'P80/P20',
'Top10'))
tab3$variable = factor(tab3$variable,
levels = c('pretax_factor_20',
'pretax_nation_20',
'posttax_disp_20'),
labels = c('Factor', 'Nation', 'Disposable'))
tab3 = tab3[order(tab3$stat, tab3$variable),]
tab3 = tab3 %>%
select(-stat) %>%
kable('latex',
booktabs = T,
caption = "Ungleichheitsindikatoren nach WID World",
row.names = FALSE,
col.names = c("", c(2005:2017))) %>%
kable_styling(latex_options = 'scale_down') %>%
group_rows("Mean", 1, 3) %>%
group_rows("Median", 4, 6) %>%
group_rows("Gini", 7, 9) %>%
group_rows("P80/P20", 10, 12) %>%
group_rows("Top10", 13, 15) %>%
footnote(general = 'EU-SILC 2005-2017, eigene Berechnungen.',
footnote_as_chunk = T,
general_title = "Quelle:") %>%
landscape()
### Table 4
tab4 = results2
tab4$year = tab4$year
tab4$lower = round(tab4$lower, digits = 4)
tab4$upper = round(tab4$upper, digits = 4)
tab4$value = round(tab4$value, digits = 4)
tab4 = unite(tab4, CI, lower, upper, sep = "-")
tab4 = melt(tab4, id = c('year', 'stat'))
tab4 = spread(tab4, year, value)
tab4$variable = factor(tab4$variable, levels = c('value', 'CI'), labels = c('Estimate', 'CI'))
tab4 = tab4 %>%
select(-stat) %>%
kable('latex',
booktabs = T,
caption = "Dekomposition - Einfaches Model",
row.names = FALSE,
col.names = c("", c(2006:2017))) %>%
kable_styling(latex_options = 'scale_down') %>%
group_rows('Diffrence (D)', 1, 2) %>%
group_rows('Endowment (X)', 3, 4) %>%
group_rows('Price (P)', 5, 6) %>%
footnote(general = 'EU-SILC 2005-2017, eigene Berechnungen.',
footnote_as_chunk = T,
general_title = "Quelle:") %>%
landscape()
### Table 5
tab5 = results
tab5$year = tab5$year
tab5$lower = round(tab5$lower, digits = 4)
tab5$upper = round(tab5$upper, digits = 4)
tab5$value = round(tab5$value, digits = 4)
tab5 = unite(tab5, CI, lower, upper, sep = "-")
tab5 = melt(tab5, id = c('year', 'stat'))
tab5 = spread(tab5, year, value)
tab5$variable = factor(tab5$variable,
levels = c('value', 'CI'),
labels = c('Estimate', 'CI'))
tab5 = tab5 %>%
select(-stat) %>%
kable('latex',
booktabs = T,
caption = "Dekomposition - Erweitertes Model",
row.names = FALSE,
col.names = c("", c(2006:2017))) %>%
kable_styling(latex_options = 'scale_down') %>%
group_rows('Diffrence (D)', 1, 2) %>%
group_rows('Endowment (X)', 3, 4) %>%
group_rows('Price (P)', 5, 6) %>%
footnote(general = 'EU-SILC 2005-2017, eigene Berechnungen.',
footnote_as_chunk = T,
general_title = "Quelle:") %>%
landscape() |
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix(): <- function (x = matrix() ) {
inv = NULL
set = function (y) {
x <<- y
inv<<- NULL
}
get = function () x
setinv = function (inverse) inv <<- inverse
getinv = function() inv
list(set=set, get=get, setinv=setinv, getinv=getinv)
}
cacheSolve <- function (x, ...) {
inv=x$getinv()
if (!is.null(inv)){
message("getting from cache")
return(inv)
}
mat.data = x$get()
inv = solve(mat.data, ...)
x$setinv(inv)
return(inv)
}
| /cachematrix.R | no_license | RomanJohnson/ProgrammingAssignment2 | R | false | false | 663 | r | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix(): <- function (x = matrix() ) {
inv = NULL
set = function (y) {
x <<- y
inv<<- NULL
}
get = function () x
setinv = function (inverse) inv <<- inverse
getinv = function() inv
list(set=set, get=get, setinv=setinv, getinv=getinv)
}
cacheSolve <- function (x, ...) {
inv=x$getinv()
if (!is.null(inv)){
message("getting from cache")
return(inv)
}
mat.data = x$get()
inv = solve(mat.data, ...)
x$setinv(inv)
return(inv)
}
|
library(MOTE)
### Name: d.dep.t.diff
### Title: d for Dependent t with SD Difference Scores Denominator
### Aliases: d.dep.t.diff
### Keywords: dependent effect size, t-test
### ** Examples
#The following example is derived from the "dept_data" dataset included
#in the MOTE library.
#In a study to test the effects of science fiction movies on people's
#belief in the supernatural, seven people completed a measure of belief
#in the supernatural before and after watching a popular science fiction movie.
#Higher scores indicated higher levels of belief. The mean difference score was 1.14,
#while the standard deviation of the difference scores was 2.12.
#You can type in the numbers directly as shown below,
#or refer to your dataset within the function.
d.dep.t.diff(mdiff = 1.14, sddiff = 2.12, n = 7, a = .05)
d.dep.t.diff(1.14, 2.12, 7, .05)
d.dep.t.diff(mdiff = mean(dept_data$before - dept_data$after),
sddiff = sd(dept_data$before - dept_data$after),
n = length(dept_data$before),
a = .05)
#The mean measure of belief on the pretest was 5.57, with a standard
#deviation of 1.99. The posttest scores appeared lower (M = 4.43, SD = 2.88)
#but the dependent t-test was not significant using alpha = .05,
#t(7) = 1.43, p = .203, d_z = 0.54. The effect size was a medium
#effect suggesting that the movie may have influenced belief
#in the supernatural.
| /data/genthat_extracted_code/MOTE/examples/d.dep.t.diff.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 1,437 | r | library(MOTE)
### Name: d.dep.t.diff
### Title: d for Dependent t with SD Difference Scores Denominator
### Aliases: d.dep.t.diff
### Keywords: dependent effect size, t-test
### ** Examples
#The following example is derived from the "dept_data" dataset included
#in the MOTE library.
#In a study to test the effects of science fiction movies on people's
#belief in the supernatural, seven people completed a measure of belief
#in the supernatural before and after watching a popular science fiction movie.
#Higher scores indicated higher levels of belief. The mean difference score was 1.14,
#while the standard deviation of the difference scores was 2.12.
#You can type in the numbers directly as shown below,
#or refer to your dataset within the function.
d.dep.t.diff(mdiff = 1.14, sddiff = 2.12, n = 7, a = .05)
d.dep.t.diff(1.14, 2.12, 7, .05)
d.dep.t.diff(mdiff = mean(dept_data$before - dept_data$after),
sddiff = sd(dept_data$before - dept_data$after),
n = length(dept_data$before),
a = .05)
#The mean measure of belief on the pretest was 5.57, with a standard
#deviation of 1.99. The posttest scores appeared lower (M = 4.43, SD = 2.88)
#but the dependent t-test was not significant using alpha = .05,
#t(7) = 1.43, p = .203, d_z = 0.54. The effect size was a medium
#effect suggesting that the movie may have influenced belief
#in the supernatural.
|
#### Fitting YAMS to hald pike data ####
# author: Kim Whoriskey and Henrik Baktoft
setwd("~/Desktop/yams")
rm(list=ls())
library(data.table)
library(ggplot2)
library(yaps)
library(TMB)
library(dplyr)
library(gridExtra)
library(mgcv)
library(sp)
source('yapsfunctions.R')
source('issmfunctions.R')
source("simtrack.R")
fmf <- wesanderson::wes_palette("FantasticFox1", type = "discrete")
# TMB likelihoods
compile("yaps_ssm.cpp")
dyn.load(dynlib("yaps_ssm"))
compile("yaps_hmm.cpp")
dyn.load(dynlib("yaps_hmm"))
# bring in sync data to extract hydro positions
sync <- readRDS('data/sync.RDS')
hydros <- data.table::data.table(sync$pl$TRUE_H)
colnames(hydros) <- c('hx','hy','hz')
# bring in time of arrival data for model fitting (see yams_prepdata.R)
toalist <- readRDS('output/toalist.RDS')
# extract toa and burst interval vector
toa <- toalist$toa
bivec <- toalist$seq
# 20,000 pings, so we have to split the data up
# manually input rbi_min and _max, which are 10 and 30 for this dataset
# burst interval mins and maxes
rbi_min <- 10
rbi_max <- 30
########################################################
### run all of the mods for the pike
# first split the data into five groups
modidx <- rep(1:5, each=5000) # 32539
toamats <- split(data.table(toa), factor(modidx)) %>% lapply(as.matrix)
bivecs <- split(bivec, factor(modidx))
# going to run models four times
# have starting values for hmm at log(1), thats svhmm1
# have starting values of hmm to match ssm, ie at log(60), svhmm60
# each of these for two states and three states, m2 vs m3
# all done with a timescale of 60
# 2 states, hmm at log(1)
mods <- list()
for(i in 1:length(toamats)){
mods[[i]] <- tryCatch(fit_issm(inp_init = getmodinp(hydros,
toamats[[i]], sdInits=1,
rbi_min=rbi_min, rbi_max=rbi_max, biTable=bivecs[[i]],
m = 1, b = NULL, timescale=60, dstart=60),
inp_ssm = getmodinp(hydros,
toamats[[i]], sdInits=1,
rbi_min=rbi_min, rbi_max=rbi_max, biTable=bivecs[[i]],
m=2, b = NULL, timescale=60, dstart=60),
maxsteps=10,
m=2,
logdstart = rep(log(1), 2), # this sets the starting values of the hmm
fixsteps=TRUE,
allowfc=TRUE,
jiggle_fc=0.01,
initssmargs = list(inner_control = list(maxit=1500),
mapping = list(working_A = factor(matrix(NA)))),
ssmargs = list(inner_control = list(maxit=1500),
mapping = list(#logD_xy = factor(c(NA, NA)),
working_A = factor(matrix(NA, nrow=2, ncol=2))),
optimizer='nlminb'),
timescalehmm=60,
setinitstatdist=1), error=function(e)e)
paste('finished mod', i)
}
saveRDS(mods, file="output/pikemodsm2ts60svhmm1.RDS")
# 2 states, hmm at log(60)
mods <- list()
for(i in 1:length(toamats)){
mods[[i]] <- tryCatch(fit_issm(inp_init = getmodinp(hydros,
toamats[[i]], sdInits=1,
rbi_min=rbi_min, rbi_max=rbi_max, biTable=bivecs[[i]],
m = 1, b = NULL, timescale=60, dstart=60),
inp_ssm = getmodinp(hydros,
toamats[[i]], sdInits=1,
rbi_min=rbi_min, rbi_max=rbi_max, biTable=bivecs[[i]],
m=2, b = NULL, timescale=60, dstart=60),
maxsteps=10,
m=2,
logdstart = rep(log(60), 2), # this sets the starting values of the hmm
fixsteps=TRUE,
allowfc=TRUE,
jiggle_fc=0.01,
initssmargs = list(inner_control = list(maxit=1500),
mapping = list(working_A = factor(matrix(NA)))),
ssmargs = list(inner_control = list(maxit=1500),
mapping = list(#logD_xy = factor(c(NA, NA)),
working_A = factor(matrix(NA, nrow=2, ncol=2))),
optimizer='nlminb'),
timescalehmm=60,
setinitstatdist=1), error=function(e)e)
paste('finished mod', i)
}
saveRDS(mods, file="output/pikemodsm2ts60svhmm60.RDS")
# 3 states, hmm at log(1)
mods <- list()
for(i in 1:length(toamats)){
mods[[i]] <- tryCatch(fit_issm(inp_init = getmodinp(hydros,
toamats[[i]], sdInits=1,
rbi_min=rbi_min, rbi_max=rbi_max, biTable=bivecs[[i]],
m = 1, b = NULL, timescale=60, dstart=60),
inp_ssm = getmodinp(hydros,
toamats[[i]], sdInits=1,
rbi_min=rbi_min, rbi_max=rbi_max, biTable=bivecs[[i]],
m=3, b = NULL, timescale=60, dstart=60),
maxsteps=10,
m=3,
logdstart = rep(log(1), 3), # this sets the starting values of the hmm
fixsteps=TRUE,
allowfc=TRUE,
jiggle_fc=0.01,
initssmargs = list(inner_control = list(maxit=1500),
mapping = list(working_A = factor(matrix(NA)))),
ssmargs = list(inner_control = list(maxit=1500),
mapping = list(#logD_xy = factor(c(NA, NA)),
working_A = factor(matrix(NA, nrow=3, ncol=3))),
optimizer='nlminb'),
timescalehmm=60,
setinitstatdist=1), error=function(e)e)
paste('finished mod', i)
}
saveRDS(mods, file="output/pikemodsm3ts60svhmm1.RDS")
# 3 states, hmm at log(60)
mods <- list()
for(i in 1:length(toamats)){
mods[[i]] <- tryCatch(fit_issm(inp_init = getmodinp(hydros,
toamats[[i]], sdInits=1,
rbi_min=rbi_min, rbi_max=rbi_max, biTable=bivecs[[i]],
m = 1, b = NULL, timescale=60, dstart=60),
inp_ssm = getmodinp(hydros,
toamats[[i]], sdInits=1,
rbi_min=rbi_min, rbi_max=rbi_max, biTable=bivecs[[i]],
m=3, b = NULL, timescale=60, dstart=60),
maxsteps=10,
m=3,
logdstart = rep(log(60), 3), # this sets the starting values of the hmm
fixsteps=TRUE,
allowfc=TRUE,
jiggle_fc=0.01,
initssmargs = list(inner_control = list(maxit=1500),
mapping = list(working_A = factor(matrix(NA)))),
ssmargs = list(inner_control = list(maxit=1500),
mapping = list(#logD_xy = factor(c(NA, NA)),
working_A = factor(matrix(NA, nrow=3, ncol=3))),
optimizer='nlminb'),
timescalehmm=60,
setinitstatdist=1), error=function(e)e)
paste('finished mod', i)
}
saveRDS(mods, file="output/pikemodsm3ts60svhmm60.RDS")
| /yams_pike.R | no_license | kimwhoriskey/yams | R | false | false | 8,838 | r | #### Fitting YAMS to hald pike data ####
# author: Kim Whoriskey and Henrik Baktoft
setwd("~/Desktop/yams")
rm(list=ls())
library(data.table)
library(ggplot2)
library(yaps)
library(TMB)
library(dplyr)
library(gridExtra)
library(mgcv)
library(sp)
source('yapsfunctions.R')
source('issmfunctions.R')
source("simtrack.R")
fmf <- wesanderson::wes_palette("FantasticFox1", type = "discrete")
# TMB likelihoods
compile("yaps_ssm.cpp")
dyn.load(dynlib("yaps_ssm"))
compile("yaps_hmm.cpp")
dyn.load(dynlib("yaps_hmm"))
# bring in sync data to extract hydro positions
sync <- readRDS('data/sync.RDS')
hydros <- data.table::data.table(sync$pl$TRUE_H)
colnames(hydros) <- c('hx','hy','hz')
# bring in time of arrival data for model fitting (see yams_prepdata.R)
toalist <- readRDS('output/toalist.RDS')
# extract toa and burst interval vector
toa <- toalist$toa
bivec <- toalist$seq
# 20,000 pings, so we have to split the data up
# manually input rbi_min and _max, which are 10 and 30 for this dataset
# burst interval mins and maxes
rbi_min <- 10
rbi_max <- 30
########################################################
### run all of the mods for the pike
# first split the data into five groups
modidx <- rep(1:5, each=5000) # 32539
toamats <- split(data.table(toa), factor(modidx)) %>% lapply(as.matrix)
bivecs <- split(bivec, factor(modidx))
# going to run models four times
# have starting values for hmm at log(1), thats svhmm1
# have starting values of hmm to match ssm, ie at log(60), svhmm60
# each of these for two states and three states, m2 vs m3
# all done with a timescale of 60
# 2 states, hmm at log(1)
mods <- list()
for(i in 1:length(toamats)){
mods[[i]] <- tryCatch(fit_issm(inp_init = getmodinp(hydros,
toamats[[i]], sdInits=1,
rbi_min=rbi_min, rbi_max=rbi_max, biTable=bivecs[[i]],
m = 1, b = NULL, timescale=60, dstart=60),
inp_ssm = getmodinp(hydros,
toamats[[i]], sdInits=1,
rbi_min=rbi_min, rbi_max=rbi_max, biTable=bivecs[[i]],
m=2, b = NULL, timescale=60, dstart=60),
maxsteps=10,
m=2,
logdstart = rep(log(1), 2), # this sets the starting values of the hmm
fixsteps=TRUE,
allowfc=TRUE,
jiggle_fc=0.01,
initssmargs = list(inner_control = list(maxit=1500),
mapping = list(working_A = factor(matrix(NA)))),
ssmargs = list(inner_control = list(maxit=1500),
mapping = list(#logD_xy = factor(c(NA, NA)),
working_A = factor(matrix(NA, nrow=2, ncol=2))),
optimizer='nlminb'),
timescalehmm=60,
setinitstatdist=1), error=function(e)e)
paste('finished mod', i)
}
saveRDS(mods, file="output/pikemodsm2ts60svhmm1.RDS")
# 2 states, hmm at log(60)
mods <- list()
for(i in 1:length(toamats)){
mods[[i]] <- tryCatch(fit_issm(inp_init = getmodinp(hydros,
toamats[[i]], sdInits=1,
rbi_min=rbi_min, rbi_max=rbi_max, biTable=bivecs[[i]],
m = 1, b = NULL, timescale=60, dstart=60),
inp_ssm = getmodinp(hydros,
toamats[[i]], sdInits=1,
rbi_min=rbi_min, rbi_max=rbi_max, biTable=bivecs[[i]],
m=2, b = NULL, timescale=60, dstart=60),
maxsteps=10,
m=2,
logdstart = rep(log(60), 2), # this sets the starting values of the hmm
fixsteps=TRUE,
allowfc=TRUE,
jiggle_fc=0.01,
initssmargs = list(inner_control = list(maxit=1500),
mapping = list(working_A = factor(matrix(NA)))),
ssmargs = list(inner_control = list(maxit=1500),
mapping = list(#logD_xy = factor(c(NA, NA)),
working_A = factor(matrix(NA, nrow=2, ncol=2))),
optimizer='nlminb'),
timescalehmm=60,
setinitstatdist=1), error=function(e)e)
paste('finished mod', i)
}
saveRDS(mods, file="output/pikemodsm2ts60svhmm60.RDS")
# 3 states, hmm at log(1)
mods <- list()
for(i in 1:length(toamats)){
mods[[i]] <- tryCatch(fit_issm(inp_init = getmodinp(hydros,
toamats[[i]], sdInits=1,
rbi_min=rbi_min, rbi_max=rbi_max, biTable=bivecs[[i]],
m = 1, b = NULL, timescale=60, dstart=60),
inp_ssm = getmodinp(hydros,
toamats[[i]], sdInits=1,
rbi_min=rbi_min, rbi_max=rbi_max, biTable=bivecs[[i]],
m=3, b = NULL, timescale=60, dstart=60),
maxsteps=10,
m=3,
logdstart = rep(log(1), 3), # this sets the starting values of the hmm
fixsteps=TRUE,
allowfc=TRUE,
jiggle_fc=0.01,
initssmargs = list(inner_control = list(maxit=1500),
mapping = list(working_A = factor(matrix(NA)))),
ssmargs = list(inner_control = list(maxit=1500),
mapping = list(#logD_xy = factor(c(NA, NA)),
working_A = factor(matrix(NA, nrow=3, ncol=3))),
optimizer='nlminb'),
timescalehmm=60,
setinitstatdist=1), error=function(e)e)
paste('finished mod', i)
}
saveRDS(mods, file="output/pikemodsm3ts60svhmm1.RDS")
# 3 states, hmm at log(60)
mods <- list()
for(i in 1:length(toamats)){
mods[[i]] <- tryCatch(fit_issm(inp_init = getmodinp(hydros,
toamats[[i]], sdInits=1,
rbi_min=rbi_min, rbi_max=rbi_max, biTable=bivecs[[i]],
m = 1, b = NULL, timescale=60, dstart=60),
inp_ssm = getmodinp(hydros,
toamats[[i]], sdInits=1,
rbi_min=rbi_min, rbi_max=rbi_max, biTable=bivecs[[i]],
m=3, b = NULL, timescale=60, dstart=60),
maxsteps=10,
m=3,
logdstart = rep(log(60), 3), # this sets the starting values of the hmm
fixsteps=TRUE,
allowfc=TRUE,
jiggle_fc=0.01,
initssmargs = list(inner_control = list(maxit=1500),
mapping = list(working_A = factor(matrix(NA)))),
ssmargs = list(inner_control = list(maxit=1500),
mapping = list(#logD_xy = factor(c(NA, NA)),
working_A = factor(matrix(NA, nrow=3, ncol=3))),
optimizer='nlminb'),
timescalehmm=60,
setinitstatdist=1), error=function(e)e)
paste('finished mod', i)
}
saveRDS(mods, file="output/pikemodsm3ts60svhmm60.RDS")
|
run_CCInx <- function(path, test, clusters, species) {
assertthat::assert_that(assertthat::is.dir(path))
assertthat::assert_that(assertthat::is.readable(path))
assertthat::assert_that(is.character(clusters))
fs::path(path, "INTERACTION", "CCInx") %>% fs::dir_create()
species <- switch(species,
mouse = "mmusculus",
human = "hsapiens"
)
DE_type <- "1vsAll"
# check if DE_folder exist
dir_exist <- fs::path(path, "DIFFERENTIAL_EXPRESSION", DE_type) %>% fs::dir_exists()
if (!dir_exist) {
stringr::str_c("Differential expression was not run. Please run differential expression with DE_type set to ", DE_type) %>%
rlang::abort()
}
available_test <- fs::path(path, "DIFFERENTIAL_EXPRESSION", DE_type) %>%
fs::dir_ls() %>%
fs::path_file()
if (!is.null(test)) {
assertthat::assert_that(is.character(test))
} else {
test <- available_test
}
if (any(!test %in% available_test)) {
wrong_test <- test[!test %in% available_test]
test <- setdiff(test, wrong_test)
warning("The following test are not available, they will be ignored :\n", stringr::str_c(" - ", wrong_test, collapse = "\n"))
}
# read the different test
DE_data <- purrr::map_df(test, ~ fs::path(path, "DIFFERENTIAL_EXPRESSION", DE_type, .x, paste(.x, DE_type, "markers", sep = "_"), ext = "txt") %>%
readr::read_table2(col_types = test_cols_spec(.x)) %>%
dplyr::select(gene, logFC, adj.P.Val, tested_cluster)) # not filtering allow information in plot on p.value
# select unique rows based on genes and cluster
DE_data %<>% dplyr::distinct(gene, tested_cluster, .keep_all = TRUE)
# check clusters
# if clusters is missing
available_clust <- DE_data[["tested_cluster"]] %>%
unique() %>%
as.character()
wrong_clust <- !all(clusters %in% available_clust)
if (wrong_clust) {
stringr::str_c(
"One or more of the specified cluster does not exist in differential analysis results.\n",
"Available clusters are : ",
stringr::str_c(available_clust, collapse = ",")
) %>%
rlang::abort()
}
# make it a named list by cluster
GeneStatList <- available_clust %>%
purrr::set_names(., nm = .) %>%
purrr::map(~ dplyr::filter(DE_data, tested_cluster == .) %>% tibble::column_to_rownames("gene")) %>%
purrr::keep(names(.) %in% clusters) # only the requested cluster by the user
interaction_network <- CCInx::BuildCCInx(GeneStatList, GeneMagnitude = "logFC", GeneStatistic = "adj.P.Val", Species = species)
return(interaction_network)
}
test_cols_spec <- function(test) {
if (any(c("wilcox", "bimod", "t", "poisson", "negbinom", "LR", "MAST", "roc", "DEseq2") == test)) {
readr::cols(
p_val = readr::col_number(),
logFC = readr::col_number(),
pct.1 = readr::col_number(),
pct.2 = readr::col_number(),
adj.P.Val = readr::col_number(),
tested_cluster = readr::col_factor(),
control_cluster = readr::col_factor(),
gene = readr::col_character(),
min.pct = readr::col_number(),
cluster_conditions = readr::col_character()
)
} else if (stringr::str_detect(test, "EdgeR")) {
readr::cols(
logFC = readr::col_number(),
logCPM = readr::col_number(),
F = readr::col_number(),
adj.P.Val = readr::col_number(),
FDR = readr::col_number(),
tested_cluster = readr::col_factor(),
control_cluster = readr::col_factor(),
gene = readr::col_character(),
min.pct = readr::col_number(),
cluster_conditions = readr::col_character()
)
} else if (stringr::str_detect(test, "Limma")) {
readr::cols(
logFC = readr::col_number(),
AveExpr = readr::col_number(),
t = readr::col_number(),
P.Value = readr::col_number(),
adj.P.Val = readr::col_number(),
B = readr::col_number(),
tested_cluster = readr::col_factor(),
control_cluster = readr::col_factor(),
gene = readr::col_character(),
min.pct = readr::col_number(),
cluster_conditions = readr::col_character()
)
}
}
# path <- "/home/aurel/Nextcloud/Documents/Cours/M2_GENIOMHE/Stage/Project/NF1-MPNST/NF1-MPNST_20190212/"
#
# DE_type <- "1vsAll"
# test <- c("t", "bimod") # as many as you want
# Result visualization
# library(shiny)
# CCInx::ViewCCInx(test_ccinx)
| /Interaction/test_CCInx.R | no_license | abeaude/M2_report | R | false | false | 4,317 | r | run_CCInx <- function(path, test, clusters, species) {
assertthat::assert_that(assertthat::is.dir(path))
assertthat::assert_that(assertthat::is.readable(path))
assertthat::assert_that(is.character(clusters))
fs::path(path, "INTERACTION", "CCInx") %>% fs::dir_create()
species <- switch(species,
mouse = "mmusculus",
human = "hsapiens"
)
DE_type <- "1vsAll"
# check if DE_folder exist
dir_exist <- fs::path(path, "DIFFERENTIAL_EXPRESSION", DE_type) %>% fs::dir_exists()
if (!dir_exist) {
stringr::str_c("Differential expression was not run. Please run differential expression with DE_type set to ", DE_type) %>%
rlang::abort()
}
available_test <- fs::path(path, "DIFFERENTIAL_EXPRESSION", DE_type) %>%
fs::dir_ls() %>%
fs::path_file()
if (!is.null(test)) {
assertthat::assert_that(is.character(test))
} else {
test <- available_test
}
if (any(!test %in% available_test)) {
wrong_test <- test[!test %in% available_test]
test <- setdiff(test, wrong_test)
warning("The following test are not available, they will be ignored :\n", stringr::str_c(" - ", wrong_test, collapse = "\n"))
}
# read the different test
DE_data <- purrr::map_df(test, ~ fs::path(path, "DIFFERENTIAL_EXPRESSION", DE_type, .x, paste(.x, DE_type, "markers", sep = "_"), ext = "txt") %>%
readr::read_table2(col_types = test_cols_spec(.x)) %>%
dplyr::select(gene, logFC, adj.P.Val, tested_cluster)) # not filtering allow information in plot on p.value
# select unique rows based on genes and cluster
DE_data %<>% dplyr::distinct(gene, tested_cluster, .keep_all = TRUE)
# check clusters
# if clusters is missing
available_clust <- DE_data[["tested_cluster"]] %>%
unique() %>%
as.character()
wrong_clust <- !all(clusters %in% available_clust)
if (wrong_clust) {
stringr::str_c(
"One or more of the specified cluster does not exist in differential analysis results.\n",
"Available clusters are : ",
stringr::str_c(available_clust, collapse = ",")
) %>%
rlang::abort()
}
# make it a named list by cluster
GeneStatList <- available_clust %>%
purrr::set_names(., nm = .) %>%
purrr::map(~ dplyr::filter(DE_data, tested_cluster == .) %>% tibble::column_to_rownames("gene")) %>%
purrr::keep(names(.) %in% clusters) # only the requested cluster by the user
interaction_network <- CCInx::BuildCCInx(GeneStatList, GeneMagnitude = "logFC", GeneStatistic = "adj.P.Val", Species = species)
return(interaction_network)
}
test_cols_spec <- function(test) {
if (any(c("wilcox", "bimod", "t", "poisson", "negbinom", "LR", "MAST", "roc", "DEseq2") == test)) {
readr::cols(
p_val = readr::col_number(),
logFC = readr::col_number(),
pct.1 = readr::col_number(),
pct.2 = readr::col_number(),
adj.P.Val = readr::col_number(),
tested_cluster = readr::col_factor(),
control_cluster = readr::col_factor(),
gene = readr::col_character(),
min.pct = readr::col_number(),
cluster_conditions = readr::col_character()
)
} else if (stringr::str_detect(test, "EdgeR")) {
readr::cols(
logFC = readr::col_number(),
logCPM = readr::col_number(),
F = readr::col_number(),
adj.P.Val = readr::col_number(),
FDR = readr::col_number(),
tested_cluster = readr::col_factor(),
control_cluster = readr::col_factor(),
gene = readr::col_character(),
min.pct = readr::col_number(),
cluster_conditions = readr::col_character()
)
} else if (stringr::str_detect(test, "Limma")) {
readr::cols(
logFC = readr::col_number(),
AveExpr = readr::col_number(),
t = readr::col_number(),
P.Value = readr::col_number(),
adj.P.Val = readr::col_number(),
B = readr::col_number(),
tested_cluster = readr::col_factor(),
control_cluster = readr::col_factor(),
gene = readr::col_character(),
min.pct = readr::col_number(),
cluster_conditions = readr::col_character()
)
}
}
# path <- "/home/aurel/Nextcloud/Documents/Cours/M2_GENIOMHE/Stage/Project/NF1-MPNST/NF1-MPNST_20190212/"
#
# DE_type <- "1vsAll"
# test <- c("t", "bimod") # as many as you want
# Result visualization
# library(shiny)
# CCInx::ViewCCInx(test_ccinx)
|
library(shiny)
library(ggplot2)
plotAreaT <- function(section = "upper", q1 = -1.5, label.quantiles=TRUE, dist = "T", xlab=" ") {
require(ggplot2)
x <- seq(-4, 4, by = 0.01)
df <- 10
data <- data.frame(x = x, density = dt(x, df))
p <- ggplot(data, aes(x, y=density)) + geom_line() + xlab(xlab)
quantile1 <- annotate("text", label = paste("t* =", q1), x = q1, y = 0, size = 8, colour = "black")
quantile2 <- NULL
if (section == "upper") {
section <- subset(data, x > q1)
area <- pt(q1, df, lower.tail = FALSE)
p_value <- annotate("text", label = paste("P(", toupper(dist), ">", q1, ") = ",
round(area, 4)), x = 2.9, y = 0.3, size = 8, colour = "black")
} else if (section == "lower") {
section <- subset(data, x < q1)
area <- pt(q1, df)
p_value <- annotate("text", label = paste("P(", toupper(dist), "<", q1, ") = ",
round(area, 4)), x = 2.9, y = 0.3, size = 8, colour = "black")
} else if (section == "both"){
section1 <- subset(data, x > abs(q1))
p <- p + geom_ribbon(data=section1, aes(ymin=0, ymax=density, fill="blue", alpha=.4))
section <- subset(data, x < -abs(q1))
area <- pt(abs(q1), df, lower.tail = FALSE) + pt(-abs(q1), df)
p_value <- annotate("text", label = paste("2*P(", toupper(dist), ">", abs(q1), ") = ",
round(area, 4)), x = 2.9, y = 0.3, size = 8, colour = "black")
quantile2 <- annotate("text", label = paste("t* =", -q1), x = -q1, y = 0, size = 8, colour = "black")
}
p + p_value + quantile1 + quantile2 + #geom_vline(xintercept = 0, color = "blue") + annotate("text", label = "0", x = 0, y = 0, size = 5) +
geom_ribbon(data=section, aes(ymin=0, ymax=density, fill="blue", alpha=.4))+theme(legend.position = "none")
}
shinyServer(function(input, output, session) {
output$plot1 <- renderPlot({
#Koshke is the man -- http://stackoverflow.com/questions/14313285/ggplot2-theme-with-no-axes-or-grid
library(grid)
p <- plotAreaT(section=input$type, q1=input$q1)
p <- p + theme(line = element_blank(),
text = element_blank(),
title = element_blank())
gt <- ggplot_gtable(ggplot_build(p))
ge <- subset(gt$layout, name == "panel")
print(grid.draw(gt[ge$t:ge$b, ge$l:ge$r]))
})
}) | /repos/apps-master/shiny/apps/testing/server.R | permissive | babiato/flaskapp1 | R | false | false | 2,368 | r | library(shiny)
library(ggplot2)
plotAreaT <- function(section = "upper", q1 = -1.5, label.quantiles=TRUE, dist = "T", xlab=" ") {
require(ggplot2)
x <- seq(-4, 4, by = 0.01)
df <- 10
data <- data.frame(x = x, density = dt(x, df))
p <- ggplot(data, aes(x, y=density)) + geom_line() + xlab(xlab)
quantile1 <- annotate("text", label = paste("t* =", q1), x = q1, y = 0, size = 8, colour = "black")
quantile2 <- NULL
if (section == "upper") {
section <- subset(data, x > q1)
area <- pt(q1, df, lower.tail = FALSE)
p_value <- annotate("text", label = paste("P(", toupper(dist), ">", q1, ") = ",
round(area, 4)), x = 2.9, y = 0.3, size = 8, colour = "black")
} else if (section == "lower") {
section <- subset(data, x < q1)
area <- pt(q1, df)
p_value <- annotate("text", label = paste("P(", toupper(dist), "<", q1, ") = ",
round(area, 4)), x = 2.9, y = 0.3, size = 8, colour = "black")
} else if (section == "both"){
section1 <- subset(data, x > abs(q1))
p <- p + geom_ribbon(data=section1, aes(ymin=0, ymax=density, fill="blue", alpha=.4))
section <- subset(data, x < -abs(q1))
area <- pt(abs(q1), df, lower.tail = FALSE) + pt(-abs(q1), df)
p_value <- annotate("text", label = paste("2*P(", toupper(dist), ">", abs(q1), ") = ",
round(area, 4)), x = 2.9, y = 0.3, size = 8, colour = "black")
quantile2 <- annotate("text", label = paste("t* =", -q1), x = -q1, y = 0, size = 8, colour = "black")
}
p + p_value + quantile1 + quantile2 + #geom_vline(xintercept = 0, color = "blue") + annotate("text", label = "0", x = 0, y = 0, size = 5) +
geom_ribbon(data=section, aes(ymin=0, ymax=density, fill="blue", alpha=.4))+theme(legend.position = "none")
}
shinyServer(function(input, output, session) {
output$plot1 <- renderPlot({
#Koshke is the man -- http://stackoverflow.com/questions/14313285/ggplot2-theme-with-no-axes-or-grid
library(grid)
p <- plotAreaT(section=input$type, q1=input$q1)
p <- p + theme(line = element_blank(),
text = element_blank(),
title = element_blank())
gt <- ggplot_gtable(ggplot_build(p))
ge <- subset(gt$layout, name == "panel")
print(grid.draw(gt[ge$t:ge$b, ge$l:ge$r]))
})
}) |
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{create_file_structure}
\alias{create_file_structure}
\alias{within_file_structure}
\title{Create a file structure for tests.}
\usage{
create_file_structure(files, expr, dir)
within_file_structure(files, expr, dir)
}
\arguments{
\item{files}{character or list or NULL. A nested file structure. The names of the
list will decide the names of the files, and the terminal nodes should be
strings which will populate the file bodies. One can also specify a
character (for example, \code{files = c('a','b','c')} will create three
files with those filenames). By default, \code{files = NULL} in which case
simply an empty directory will be created.}
\item{expr}{expression. An expression to evaluate within which the files
should exist, but outside of which the files should be unlinked. If
missing, the directory of the will be returned. Otherwise, the
value obtained in this expression will be returned.}
\item{dir}{character. The directory in which to create the files. If
missing, a temporary directory will be created instead using
the built-in \code{tempfile()} helper.}
}
\value{
the directory in which this file structure exists, if \code{expr}
is not missing. If \code{expr} was provided, its return value will be
returned instead.
}
\description{
A helper function for creating hierarchical file structures
when, e.g., testing functions which rely on presence of files.
An alias for create_file_structure that only allows expressions.
}
\details{
For example, when files need to be present for a function we are testing,
it would be very cumbersome to create these files manually. Instead, we can
do the following:
\code{test_dir <- create_file_structure(list(dir1 = list('file1',
file2.r = 'print("Sample R code")'), file3.csv = "a,b,c\n1,2,3"))}
with the return value being a test directory containing these structured files.
An additional feature is that expressions can be evaluated within the scope
of the hierarchical files existing, with the files getting deleted after
the expression executes:
\code{create_file_structure(list(a = "hello\nworld"),
cat(readLines(file.path(tempdir, 'a'))[[2]]))}
The above will print \code{"world"}. (\code{tempdir} is set automatically
within the scope of the expression to the directory that was created for
the temporary files.)
}
\examples{
\dontrun{
library(testthat)
test_dir <- create_file_structure(list(test = 'blah', 'test2'))
# Now test_dir is the location of a directory containing a file 'test'
# with the string 'blah' and an empty file 'test2'.
test_dir <- create_file_structure(list(alphabet = as.list(LETTERS)))
# Now test_dir is the location of a directory containing a subdirectory
# 'alphabet' with the files 'A', 'B', ..., 'Z' (all empty).
test_dir <- create_file_structure(list(a = 'hello'), {
cat(readLines(file.path(tempdir, 'a')))
})
}
library(testthat)
expect_output(within_file_structure(list(a = 'hello'), {
cat(readLines(file.path(tempdir, 'a')))
}), 'hello')
# The above will create a directory with a file named "a" containing the string
# 'hello', print it by reading the file, and then unlink the directory.
}
| /man/create_file_structure.Rd | no_license | arturochian/testthatsomemore | R | false | false | 3,192 | rd | % Generated by roxygen2 (4.0.1): do not edit by hand
\name{create_file_structure}
\alias{create_file_structure}
\alias{within_file_structure}
\title{Create a file structure for tests.}
\usage{
create_file_structure(files, expr, dir)
within_file_structure(files, expr, dir)
}
\arguments{
\item{files}{character or list or NULL. A nested file structure. The names of the
list will decide the names of the files, and the terminal nodes should be
strings which will populate the file bodies. One can also specify a
character (for example, \code{files = c('a','b','c')} will create three
files with those filenames). By default, \code{files = NULL} in which case
simply an empty directory will be created.}
\item{expr}{expression. An expression to evaluate within which the files
should exist, but outside of which the files should be unlinked. If
missing, the directory of the will be returned. Otherwise, the
value obtained in this expression will be returned.}
\item{dir}{character. The directory in which to create the files. If
missing, a temporary directory will be created instead using
the built-in \code{tempfile()} helper.}
}
\value{
the directory in which this file structure exists, if \code{expr}
is not missing. If \code{expr} was provided, its return value will be
returned instead.
}
\description{
A helper function for creating hierarchical file structures
when, e.g., testing functions which rely on presence of files.
An alias for create_file_structure that only allows expressions.
}
\details{
For example, when files need to be present for a function we are testing,
it would be very cumbersome to create these files manually. Instead, we can
do the following:
\code{test_dir <- create_file_structure(list(dir1 = list('file1',
file2.r = 'print("Sample R code")'), file3.csv = "a,b,c\n1,2,3"))}
with the return value being a test directory containing these structured files.
An additional feature is that expressions can be evaluated within the scope
of the hierarchical files existing, with the files getting deleted after
the expression executes:
\code{create_file_structure(list(a = "hello\nworld"),
cat(readLines(file.path(tempdir, 'a'))[[2]]))}
The above will print \code{"world"}. (\code{tempdir} is set automatically
within the scope of the expression to the directory that was created for
the temporary files.)
}
\examples{
\dontrun{
library(testthat)
test_dir <- create_file_structure(list(test = 'blah', 'test2'))
# Now test_dir is the location of a directory containing a file 'test'
# with the string 'blah' and an empty file 'test2'.
test_dir <- create_file_structure(list(alphabet = as.list(LETTERS)))
# Now test_dir is the location of a directory containing a subdirectory
# 'alphabet' with the files 'A', 'B', ..., 'Z' (all empty).
test_dir <- create_file_structure(list(a = 'hello'), {
cat(readLines(file.path(tempdir, 'a')))
})
}
library(testthat)
expect_output(within_file_structure(list(a = 'hello'), {
cat(readLines(file.path(tempdir, 'a')))
}), 'hello')
# The above will create a directory with a file named "a" containing the string
# 'hello', print it by reading the file, and then unlink the directory.
}
|
### Yule-Walker method
yule_walker_algorithm_given_acvs <- function(acvs, p=length(acvs)-1)
{
blpc <- vector(mode="list", length=p)
phis <- acvs[2]/acvs[1]
pev <- rep(acvs[1],p+1)
blpc[[1]] <- phis
pacs <- rep(phis,p)
pev[2] <- pev[1]*(1-phis^2)
if(p > 1)
{
for(k in 2:p)
{
old_phis <- phis
phis <- rep(0,k)
## compute kth order pacs (reflection coefficient)
phis[k] <- (acvs[k+1] - sum(old_phis*acvs[k:2]))/pev[k]
phis[1:(k-1)] <- old_phis - phis[k]*rev(old_phis)
blpc[[k]] <- phis
pacs[k] <- phis[k]
pev[k+1] <- pev[k]*(1-phis[k]^2)
}
}
return(list(coeffs=phis,
innov_var=pev[p+1],
pev=pev,pacs=pacs,
blpc=blpc))
}
| /R-code/yule_walker_algorithm_given_acvs.R | no_license | dmn001/sauts | R | false | false | 828 | r | ### Yule-Walker method
yule_walker_algorithm_given_acvs <- function(acvs, p=length(acvs)-1)
{
blpc <- vector(mode="list", length=p)
phis <- acvs[2]/acvs[1]
pev <- rep(acvs[1],p+1)
blpc[[1]] <- phis
pacs <- rep(phis,p)
pev[2] <- pev[1]*(1-phis^2)
if(p > 1)
{
for(k in 2:p)
{
old_phis <- phis
phis <- rep(0,k)
## compute kth order pacs (reflection coefficient)
phis[k] <- (acvs[k+1] - sum(old_phis*acvs[k:2]))/pev[k]
phis[1:(k-1)] <- old_phis - phis[k]*rev(old_phis)
blpc[[k]] <- phis
pacs[k] <- phis[k]
pev[k+1] <- pev[k]*(1-phis[k]^2)
}
}
return(list(coeffs=phis,
innov_var=pev[p+1],
pev=pev,pacs=pacs,
blpc=blpc))
}
|
# CMort LA Pollution and Temperature Study
# ARIMA 1 MLR with Cor Errors (no lag, no seasonl categorical variable)
#EDA
library(tidyverse)
library(GGally)
library(astsa)
library(tswge)
CM = read.csv(file.choose(),header = TRUE)
head(CM)
ggpairs(CM[2:4]) #matrix of scatter plots
#forecast Particles
plotts.sample.wge(CM$part) #freq near .0192 (annual)
CM_52 = artrans.wge(CM$part, c(rep(0,51),1))
plotts.sample.wge(CM_52) #looks like some low freq?
aic5.wge(CM_52) #picks ARMA(2,1) assume stationary
aic5.wge(CM_52,type = "bic") #picks ARMA(0,0)
ljung.wge(CM_52)$pval #FTR Ho
ljung.wge(CM_52, K = 48)$pval #FTR Ho
#Going with white noise despite peak at 0 in Spec D.
#est = est.arma.wge(CM_52, p = 3, q = 2)
#CM_52_AR2_MA1 = artrans.wge(CM_52,est$phi)
predsPart = fore.aruma.wge(CM$part,s = 52, n.ahead = 20)
plot(predsPart$f, type = "l")
plot(seq(1,508,1), CM$part, type = "l",xlim = c(0,528), ylab = "Temperature", main = "20 Week Particulate Forecast")
lines(seq(509,528,1), predsPart$f, type = "l", col = "red")
#forecast Temp
plotts.sample.wge(CM$temp) #freq near .0192 (annual)
CM_52 = artrans.wge(CM$temp, c(rep(0,51),1))
plotts.sample.wge(CM_52) #looks like some low freq?
aic5.wge(CM_52) #picks ARMA(0,0)
aic5.wge(CM_52,type = "bic") #picks ARMA(0,0)
ljung.wge(CM_52)$pval
ljung.wge(CM_52, K = 48)$pval #barely rejects
acf(CM_52,lag.max = 48) # acf looks consistent with white noise
predsTemp = fore.aruma.wge(CM$temp,s = 52, n.ahead = 20)
plot(predsTemp$f, type = "l")
plot(seq(1,508,1), CM$temp, type = "l",xlim = c(0,528), ylab = "Temperature", main = "20 Week Temperature Forecast")
lines(seq(509,528,1), predsTemp$f, type = "l", col = "red")
# Model cmort based on predicted part and temp using MLR with Cor Erros
#assuming data is loaded in dataframe CM
ksfit = lm(cmort~temp+part+Week, data = CM)
phi = aic.wge(ksfit$residuals) #AR(2)
fit = arima(CM$cmort, order = c(phi$p,0,phi$q), seasonal = list(order = c(1,0,0), period = 52), xreg = cbind(CM$temp, CM$part, CM$Week))
# Check for whiteness of residuals
acf(fit$residuals)
ljung.wge(fit$residuals) # pval = .059
ljung.wge(fit$residuals, K = 48) # pval = .004
#load the forecasted Part and Temp in a data frame
next20 = data.frame(temp = predsTemp$f, part = predsPart$f, Week = seq(509,528,1))
#get predictions
predsCMort = predict(fit,newxreg = next20)
#plot next 20 cmort wrt time
plot(seq(1,508,1), CM$cmort, type = "l",xlim = c(0,528), ylab = "Cardiac Mortality", main = "20 Week Cardiac Mortality Forecast")
lines(seq(509,528,1), predsCMort$pred, type = "l", col = "red")
#Find ASE Need to forecast last 30 of known series.
CMsmall = CM[1:478,]
ksfit = lm(cmort~temp+part+Week, data = CMsmall)
phi = aic.wge(ksfit$residuals)
fit = arima(CMsmall$cmort,order = c(phi$p,0,0), seasonal = list(order = c(1,0,0), period = 52), xreg = cbind(CMsmall$temp, CMsmall$part, CMsmall$Week))
last30 = data.frame(temp = CM$temp[479:508], part = CM$part[479:508], CMWeek = seq(479,508,1))
#get predictions
predsCMort = predict(fit,newxreg = last30)
ASE = mean((CM$cmort[479:508] - predsCMort$pred)^2)
ASE
plot(seq(1,508,1), CM$cmort, type = "l",xlim = c(0,528), ylab = "Cardiac Mortality", main = "Last 30 Week Cardiac Mortality Forecast")
lines(seq(479,508,1), predsCMort$pred, type = "l", col = "red")
#Find ASE Need to forecast last 30 of known series. With temp lag 1
CMsmall = CM[1:478,]
CMsmall$temp_1 = dplyr::lag(CMsmall$temp,1)
CM$temp_1 = dplyr::lag(CM$temp,1)
ksfit = lm(cmort~temp_1+part+Week, data = CMsmall)
phi = aic.wge(ksfit$residuals)
fit = arima(CMsmall$cmort,order = c(phi$p,0,0), seasonal = list(order = c(1,0,0), period = 52), xreg = cbind(CMsmall$temp, CMsmall$part, CMsmall$Week))
last30 = data.frame(temp = CM$temp_1[479:508], part = CM$part[479:508], Week = seq(479,508,1))
#get predictions
predsCMort = predict(fit,newxreg = last30)
plot(seq(1,508,1), CM$cmort, type = "l",xlim = c(0,528), ylab = "Cardiac Mortality", main = "Last 30 Week Cardiac Mortality Forecast")
lines(seq(479,508,1), predsCMort$pred, type = "l", col = "red")
ASE = mean((CM$cmort[479:508] - predsCMort$pred)^2)
ASE
######################################################################
#ARIMA 2: attempt at categorical variable for week but arima takes only continuous variables
CM = read.csv(file.choose(),header = TRUE)
head(CM)
#forecast Particles
px = plotts.sample.wge(CM$part) #freq near .0192 (annual)
CM_52 = artrans.wge(CM$part, c(rep(0,51),1))
px = plotts.sample.wge(CM_52) #looks like some low freq?
aic5.wge(CM_52) #picks ARMA(2,1) assume stationary
aic5.wge(CM_52,type = "bic") #picks ARMA(0,0)
ljung.wge(CM_52)$pval #FTR Ho
ljung.wge(CM_52, K = 48)$pval #FTR Ho
#Going with white noise despite peak at 0 in Spec D.
#est = est.arma.wge(CM_52, p = 3, q = 2)
#CM_52_AR2_MA1 = artrans.wge(CM_52,est$phi)
predsPart = fore.aruma.wge(CM$part,s = 52, n.ahead = 20)
#forecast Temp
px = plotts.sample.wge(CM$temp) #freq near .0192 (annual)
CM_52 = artrans.wge(CM$temp, c(rep(0,51),1))
px = plotts.sample.wge(CM_52) #looks like some low freq?
aic5.wge(CM_52) #picks ARMA(0,0)
aic5.wge(CM_52,type = "bic") #picks ARMA(0,0)
ljung.wge(CM_52)$pval
ljung.wge(CM_52, K = 48)$pval #barely rejects
acf(CM_52,lag.max = 48) # acf looks consistent with white noise
predsTemp = fore.aruma.wge(CM$temp,s = 52, n.ahead = 20)
# Model cmort based on predicted part and temp using MLR with Cor Erros
#assuming data is loaded in dataframe CM
CM$FWeek = as.factor(CM$Week%%52)
form = ~ . # Since we are using all variables in the dataframe
form = ~ cmort+temp+part+Week+FWeek # or explicitly add variables to use
CMexpanded = model.matrix(form, data = CM) %>%
as_tibble() %>%
dplyr::select(-"(Intercept)")
# colnames(CMexpanded)
ksfit = lm(cmort~., data = CMexpanded)
summary(ksfit)
## Evaluate Residuals
px = tswge::plotts.sample.wge(ksfit$residuals)
## Looks stationary AR(1) ,ay be appropriate
aic_resids = aic.wge(ksfit$residuals) # Picks AR(2)
print(aic_resids)
fit = arima(CMexpanded$cmort,order = c(aic_resids$p,0,aic_resids$q), xreg = CMexpanded %>% dplyr::select(-cmort))
print(fit)
AIC(fit) #AIC = 3088.997
# Check for whiteness of residuals
acf(fit$residuals)
ljung.wge(fit$residuals) # pval = .0808
ljung.wge(fit$residuals, K = 48) # pval = 0.123916
#load the forecasted Part and Temp in a data frame
next20 = data.frame(temp = predsTemp$f, part = predsPart$f, Week = seq(509,528,1), FWeek = factor(seq(509,528,1)%%52, levels = levels(CM$FWeek)))
form = ~ temp + part + Week + FWeek # Remove the dependent variable 'cmort'
next20expanded = model.matrix(form, data = next20) %>%
as_tibble() %>%
dplyr::select(-"(Intercept)")
# colnames(next20expanded)
#get predictions
predsCMort = predict(fit,newxreg = next20expanded) #creates error because of factor
#plot next 20 cmort wrt time
plot(seq(1,508,1), CM$cmort, type = "l",xlim = c(0,528), ylab = "Cardiac Mortality", main = "20 Week Cardiac Mortality Forecast")
lines(seq(509,528,1), predsCMort$pred, type = "l", col = "red")
#####################################
#predict residuals manually
plotts.sample.wge(ksfit$residuals)
phi = aic.wge(ksfit$residuals)
resids = fore.arma.wge(ksfit$residuals,phi = phi$phi,n.ahead = 20)
#predict trend manually
preds = predict(ksfit, newdata = next20)
predsFinal = preds + resids$f
#plot next 20 cmort wrt time
plot(seq(1,508,1), CM$cmort, type = "l",xlim = c(0,528), ylab = "Cardiac Mortality", main = "20 Week Cardiac Mortality Forecast")
lines(seq(509,528,1), predsFinal, type = "l", col = "red")
#Find ASE Need to forecast last 30 of known series.
CMsmall = CM[1:478,]
ksfit = lm(cmort~temp+part+Week+FWeek, data = CMsmall)
phi = aic.wge(ksfit$residuals)
fit = arima(CMsmall$cmort,order = c(phi$p,0,0), seasonal = list(order = c(1,0,0), period = 52), xreg = cbind(CMsmall$temp, CMsmall$part, CMsmall$Week, CMsmall$FWeek))
AIC(fit) #AIC = 2972
# This is actually cheating. We should forecast the temp and part as before.
last30 = data.frame(temp = CM$temp[479:508], part = CM$part[479:508], Week = seq(479,508,1), FWeek = as.factor(seq(479,508,1)%%52))
#get predictions
predsCMort = predict(fit,newxreg = last30)
#predict residuals manually
plotts.sample.wge(ksfit$residuals)
phi = aic.wge(ksfit$residuals)
resids = fore.arma.wge(ksfit$residuals,phi = phi$phi,n.ahead = 30)
#predict trend manually
preds = predict(ksfit, newdata = last30)
predsFinal = preds + resids$f
plot(seq(1,508,1), CM$cmort, type = "l",xlim = c(0,528), ylab = "Cardiac Mortality", main = "20 Week Cardiac Mortality Forecast")
lines(seq(479,508,1), predsFinal, type = "l", col = "red")
ASE = mean((CM$cmort[479:508] - predsFinal)^2,na.rm = TRUE)
ASE
#ARIMA 3: categorical variable
#With Lagged Temp
library(dplyr)
#Lag Temperature 1
ccf(CM$temp,CM$cmort)
CM$temp1 = dplyr::lag(CM$temp,1)
#forecast Particles
plotts.sample.wge(CM$part) #freq near .0192 (annual)
CM_52 = artrans.wge(CM$part, c(rep(0,51),1))
plotts.sample.wge(CM_52) #looks like some low freq?
aic5.wge(CM_52) #picks ARMA(2,1) assume stationary
aic5.wge(CM_52,type = "bic") #picks ARMA(0,0)
ljung.wge(CM_52)$pval #FTR Ho
ljung.wge(CM_52, K = 48)$pval #FTR Ho
#Going with white noise despite peak at 0 in Spec D.
#est = est.arma.wge(CM_52, p = 3, q = 2)
#CM_52_AR2_MA1 = artrans.wge(CM_52,est$phi)
predsPart = fore.aruma.wge(CM$part,s = 52, n.ahead = 20)
#forecast Temp
plotts.sample.wge(CM$temp) #freq near .0192 (annual)
CM_52 = artrans.wge(CM$temp, c(rep(0,51),1))
plotts.sample.wge(CM_52) #looks like some low freq?
aic5.wge(CM_52) #picks ARMA(0,0)
aic5.wge(CM_52,type = "bic") #picks ARMA(0,0)
ljung.wge(CM_52)$pval
ljung.wge(CM_52, K = 48)$pval #barely rejects
acf(CM_52,lag.max = 48) # acf looks consistent with white noise
predsTemp = fore.aruma.wge(CM$temp,s = 52, n.ahead = 20)
# Model cmort based on predicted part and temp using MLR with Cor Erros
#assuming data is loaded in dataframe CM
CM$FWeek = as.factor(CM$Week%%52)
ksfit = lm(cmort~temp1+part+Week+FWeek, data = CM)
phi = aic.wge(ksfit$residuals)
fit = arima(CM$cmort,order = c(phi$p,0,0), xreg = cbind(CM$temp1, CM$part, CM$Week, CM$FWeek))
AIC(fit) #AIC = 3151
# Check for whiteness of residuals
acf(fit$residuals)
ljung.wge(fit$residuals) # pval = .066
ljung.wge(fit$residuals, K = 48) # pval = .0058
predsTemp$f1 = dplyr::lag(predsTemp$f,1)
#load the forecasted Part and Temp in a data frame
next20 = data.frame(temp1 = predsTemp$f1, part = predsPart$f, Week = seq(509,528,1), FWeek = as.factor(seq(509,528,1)%%52))
#get predictions
predsCMort = predict(fit,newxreg = next20) #creates error because of factor
#predict residuals manually
plotts.sample.wge(ksfit$residuals)
phi = aic.wge(ksfit$residuals)
resids = fore.arma.wge(ksfit$residuals,phi = phi$phi,n.ahead = 20)
#predict trend manually
preds = predict(ksfit, newdata = next20)
predsFinal = preds + resids$f
#plot next 20 cmort wrt time
plot(seq(1,508,1), cmort, type = "l",xlim = c(0,528), ylab = "Cardiac Mortality", main = "20 Week Cardiac Mortality Forecast")
lines(seq(509,528,1), predsFinal, type = "l", col = "red")
#Find ASE Need to forecast last 30 of known series.
CMsmall = CM[2:478,]
ksfit = lm(cmort~temp1+part+Week+FWeek, data = CMsmall)
phi = aic.wge(ksfit$residuals)
fit = arima(CMsmall$cmort,order = c(phi$p,0,0), seasonal = list(order = c(1,0,0), period = 52), xreg = cbind(CMsmall$temp1, CMsmall$part, CMsmall$Week, CMsmall$FWeek))
last30 = data.frame(temp1 = CM$temp1[479:508], part = CM$part[479:508], Week = seq(479,508,1), FWeek = as.factor(seq(479,508,1)%%52))
#get predictions
predsCMort = predict(fit,newxreg = last30) #Showing Error ... why we have to predict resids manually (next step)
#predict residuals manually
plotts.sample.wge(ksfit$residuals)
phi = aic.wge(ksfit$residuals)
resids = fore.arma.wge(ksfit$residuals,phi = phi$phi,n.ahead = 30)
#predict trend manually
preds = predict(ksfit, newdata = last30)
predsFinal = preds + resids$f
plot(seq(1,508,1), CM$cmort, type = "l",xlim = c(0,528), ylab = "Cardiac Mortality", main = "20 Week Cardiac Mortality Forecast")
lines(seq(479,508,1), predsFinal, type = "l", col = "red")
ASE = mean((CM$cmort[479:508] - predsFinal)^2,na.rm = TRUE)
ASE
#ARIMA 4: categorical variable
#With Lagged Temp and Part
library(dplyr)
#Lag Temperature 1
ccf(CM$temp,CM$cmort)
CM$temp1 = dplyr::lag(CM$temp,1)
#Lag Particles lag 7 on particles
ccf(CM$part,CM$cmort)
CM$part7 = dplyr::lag(CM$temp,7)
#forecast Particles
plotts.sample.wge(CM$part) #freq near .0192 (annual)
CM_52 = artrans.wge(CM$part, c(rep(0,51),1))
plotts.sample.wge(CM_52) #looks like some low freq?
aic5.wge(CM_52) #picks ARMA(2,1) assume stationary
aic5.wge(CM_52,type = "bic") #picks ARMA(0,0)
ljung.wge(CM_52)$pval #FTR Ho
ljung.wge(CM_52, K = 48)$pval #FTR Ho
#Going with white noise despite peak at 0 in Spec D.
#est = est.arma.wge(CM_52, p = 3, q = 2)
#CM_52_AR2_MA1 = artrans.wge(CM_52,est$phi)
predsPart = fore.aruma.wge(CM$part,s = 52, n.ahead = 20)
#forecast Temp
plotts.sample.wge(CM$temp) #freq near .0192 (annual)
CM_52 = artrans.wge(CM$temp, c(rep(0,51),1))
plotts.sample.wge(CM_52) #looks like some low freq?
aic5.wge(CM_52) #picks ARMA(0,0)
aic5.wge(CM_52,type = "bic") #picks ARMA(0,0)
ljung.wge(CM_52)$pval
ljung.wge(CM_52, K = 48)$pval #barely rejects
acf(CM_52,lag.max = 48) # acf looks consistent with white noise
predsTemp = fore.aruma.wge(CM$temp,s = 52, n.ahead = 20)
# Model cmort based on predicted part and temp using MLR with Cor Erros
#assuming data is loaded in dataframe CM
CM$FWeek = as.factor(CM$Week%%52)
ksfit = lm(cmort~temp1+part7+Week+FWeek, data = CM)
AIC(ksfit)
phi = aic.wge(ksfit$residuals)
fit = arima(CM$cmort,order = c(phi$p,0,0), xreg = cbind(CM$temp1, CM$part7, CM$Week, CM$FWeek))
AIC(fit) #AIC = 3151
# Check for whiteness of residuals
acf(fit$residuals)
ljung.wge(fit$residuals) # pval = .066
ljung.wge(fit$residuals, K = 48) # pval = .0058
predsTemp$f1 = dplyr::lag(predsTemp$f,1)
predsPart$f1 = dplyr::lag(predsPart$f,7)
#load the forecasted Part and Temp in a data frame
next20 = data.frame(temp1 = predsTemp$f1, part = predsPart7$f, Week = seq(509,528,1), FWeek = as.factor(seq(509,528,1)%%52))
#get predictions
predsCMort = predict(fit,newxreg = next20) #creates error because of factor
#predict residuals manually
plotts.sample.wge(ksfit$residuals)
phi = aic.wge(ksfit$residuals)
resids = fore.arma.wge(ksfit$residuals,phi = phi$phi,n.ahead = 20)
#predict trend manually
preds = predict(ksfit, newdata = next20)
predsFinal = preds + resids$f
#plot next 20 cmort wrt time
plot(seq(1,508,1), cmort, type = "l",xlim = c(0,528), ylab = "Cardiac Mortality", main = "20 Week Cardiac Mortality Forecast")
lines(seq(509,528,1), predsFinal, type = "l", col = "red")
####### ASE #######
#Find ASE Need to forecast last 30 of known series.
CMsmall = CM[2:478,]
ksfit = lm(cmort~temp1+part7+Week+FWeek, data = CMsmall)
summary(ksfit)
AIC(ksfit)
phi = aic.wge(ksfit$residuals)
fit = arima(CMsmall$cmort,order = c(phi$p,0,0), seasonal = list(order = c(1,0,0), period = 52),
xreg = cbind(CMsmall$temp1, CMsmall$part7, CMsmall$Week, CMsmall$FWeek))
AIC(fit)
last30 = data.frame(temp1 = CM$temp1[479:508], part7 = CM$part7[479:508], Week = seq(479,508,1), FWeek = as.factor(seq(479,508,1)%%52))
#get predictions
predsCMort = predict(fit,newxreg = last30) #Showing Error ... why we have to predict resids manually (next step)
#predict residuals manually
plotts.sample.wge(ksfit$residuals)
phi = aic.wge(ksfit$residuals)
resids = fore.arma.wge(ksfit$residuals,phi = phi$phi,n.ahead = 30)
#predict trend manually
preds = predict(ksfit, newdata = last30)
predsFinal = preds + resids$f
plot(seq(1,508,1), CM$cmort, type = "l",xlim = c(0,528), ylab = "Cardiac Mortality", main = "20 Week Cardiac Mortality Forecast")
lines(seq(479,508,1), predsFinal, type = "l", col = "red")
ASE = mean((CM$cmort[479:508] - predsFinal)^2,na.rm = TRUE)
ASE
############ VAR MODELS ##########################
#VAR Model 1 Forecasts Seasonally Differenced Data
#Difference all series to make them stationary (assumptoin of VAR)
# Doesn't have to be white... just stationary
library(vars)
CM = read.csv(file.choose(),header = TRUE)
CM_52 = artrans.wge(CM$cmort,c(rep(0,51),1))
Part_52 = artrans.wge(CM$part,c(rep(0,51),1))
Temp_52 = artrans.wge(CM$temp,c(rep(0,51),1))
#VARSelect on Differenced Data chooses 2
VARselect(cbind(CM_52, Part_52, Temp_52),lag.max = 10, type = "both")
#VAR with p = 2
CMortDiffVAR = VAR(cbind(CM_52, Part_52, Temp_52),type = "both",p = 2)
preds=predict(CMortDiffVAR,n.ahead=20)
#We have predicted differences .... calculate actual cardiac mortalities
startingPoints = CM$cmort[456:475]
CMortForcasts = preds$fcst$CM_52[,1:3] + startingPoints
#Plot
dev.off()
plot(seq(1,508,1), CM$cmort, type = "l",xlim = c(0,528), ylab = "Cardiac Mortality", main = "20 Week Cardiac Mortality Forecast")
lines(seq(509,528,1), as.data.frame(CMortForcasts)$fcst, type = "l", col = "red")
#Find ASE using last 30
CM_52 = artrans.wge(CMsmall$cmort,c(rep(0,51),1))
Part_52 = artrans.wge(CMsmall$part,c(rep(0,51),1))
Temp_52 = artrans.wge(CMsmall$temp,c(rep(0,51),1))
VARselect(cbind(CM_52, Part_52, Temp_52),lag.max = 10, type = "both")
CMortDiffVAR = VAR(cbind(CM_52, Part_52, Temp_52),type = "both",p = 2)
preds=predict(CMortDiffVAR,n.ahead=30)
startingPoints = CM$cmort[428:457]
CMortForcasts = preds$fcst$CM_52[,1:3] + startingPoints
dev.off()
plot(seq(1,508,1), CM$cmort, type = "l",xlim = c(0,508), ylab = "Cardiac Mortality", main = "20 Week Cardiac Mortality Forecast")
lines(seq(479,508,1), CMortForcasts[,1], type = "l", col = "red")
ASE = mean((CM$cmort[479:508] - CMortForcasts[,1])^2)
ASE
#Find ASE using last 52
CMsmall = CM[1:456,] # 456 = 508-52
CM_52 = artrans.wge(CMsmall$cmort,c(rep(0,51),1))
Part_52 = artrans.wge(CMsmall$part,c(rep(0,51),1))
Temp_52 = artrans.wge(CMsmall$temp,c(rep(0,51),1))
VARselect(cbind(CM_52, Part_52, Temp_52),lag.max = 10, type = "both")
CMortDiffVAR = VAR(cbind(CM_52, Part_52, Temp_52),type = "both",p = 2)
preds=predict(CMortDiffVAR,n.ahead=52)
startingPoints = CM$cmort[405:456]
CMortForcasts = preds$fcst$CM_52[,1:3] + startingPoints
dev.off()
plot(seq(1,508,1), CM$cmort, type = "l",xlim = c(0,508), ylab = "Cardiac Mortality", main = "20 Week Cardiac Mortality Forecast")
lines(seq(457,508,1), CMortForcasts[,1], type = "l", col = "red")
ASE = mean((CM$cmort[457:508] - CMortForcasts[,1])^2)
ASE
#VAR Model 2 Forecasts Seasonal Dummy
CM = read.csv(file.choose(),header = TRUE)
#VARSelect on Seasonal Data chooses 2
VARselect(cbind(CM$cmort, CM$part, CM$temp),lag.max = 10, season = 52, type = "both")
#VAR with p = 2
CMortVAR = VAR(cbind(CM$cmort, CM$part, CM$temp),season = 52, type = "both",p = 2)
preds=predict(CMortVAR,n.ahead=20)
#Plot
plot(seq(1,508,1), CM$cmort, type = "l",xlim = c(0,528), ylab = "Cardiac Mortality", main = "20 Week Cardiac Mortality Forecast")
lines(seq(509,528,1), preds$fcst$y1[,1], type = "l", col = "red")
#Find ASE using last 30
CMsmall = CM[1:478,]
VARselect(cbind(CMsmall$cmort, CMsmall$part, CMsmall$temp),lag.max = 10, season = 52, type = "both")
CMortVAR = VAR(cbind(CMsmall$cmort, CMsmall$part, CMsmall$temp),season = 52, type = "both",p = 2)
preds=predict(CMortVAR,n.ahead=30)
#Plot
plot(seq(1,508,1), CM$cmort, type = "l",xlim = c(0,508), ylab = "Cardiac Mortality", main = "20 Week Cardiac Mortality Forecast")
lines(seq(479,508,1), preds$fcst$y1[,1], type = "l", col = "red")
ASE = mean((CM$cmort[479:508] - preds$fcst$y1[,1])^2)
ASE
#Find ASE using last 52
CMsmall = CM[1:456,] # 456 = 508-52
VARselect(cbind(CMsmall$cmort[2:456], CMsmall$part[2:456], CMsmall$temp[2:456]),lag.max = 10, season = 52, type = "both")
CMortVAR = VAR(cbind(CMsmall$cmort[2:456], CMsmall$part[2:456], CMsmall$temp[2:456]),season = 52, type = "both",p = 2)
preds=predict(CMortVAR,n.ahead=52)
#Plot
plot(seq(1,508,1), CM$cmort, type = "l",xlim = c(0,508), ylab = "Cardiac Mortality", main = "20 Week Cardiac Mortality Forecast")
lines(seq(457,508,1), preds$fcst$y1[,1], type = "l", col = "red")
ASE = mean((CM$cmort[457:508] - preds$fcst$y1[,1])^2)
ASE
#VAR Model 3 seasonal with Lag 1 Temp
CM = read.csv(file.choose(),header = TRUE)
CM$temp_1 = dplyr::lag(CM$temp,1)
ggpairs(CM[,-7])
VARselect(cbind(CM$cmort[2:479], CM$part[2:479], CM$temp_1[2:479]),lag.max = 10, season = 52, type = "both")
#VAR with p = 2
CMortVAR = VAR(cbind(CM$cmort[2:479], CM$part[2:479], CM$temp_1[2:479]),season = 52, type = "both",p = 2)
preds=predict(CMortVAR,n.ahead=20)
#Plot
plot(seq(1,508,1), CM$cmort, type = "l",xlim = c(0,528), ylab = "Cardiac Mortality", main = "20 Week Cardiac Mortality Forecast")
lines(seq(509,528,1), preds$fcst$y1[,1], type = "l", col = "red")
#Find ASE using last 30
CMsmall = CM[1:479,]
VARselect(cbind(CMsmall$cmort[2:478], CMsmall$part[2:478], CMsmall$temp_1[2:478]),lag.max = 10, season = 52, type = "both")
CMortVAR = VAR(cbind(CMsmall$cmort[2:478], CMsmall$part[2:478], CMsmall$temp_1[2:478]),season = 52, type = "both",p = 2)
preds=predict(CMortVAR,n.ahead=30)
#Plot
plot(seq(1,508,1), CM$cmort, type = "l",xlim = c(0,508), ylab = "Cardiac Mortality", main = "20 Week Cardiac Mortality Forecast")
lines(seq(479,508,1), preds$fcst$y1[,1], type = "l", col = "red")
ASE = mean((CM$cmort[479:508] - preds$fcst$y1[,1])^2)
ASE
#Find ASE using last 52
CMsmall = CM[1:456,] # 456 = 508-52
VARselect(cbind(CMsmall$cmort[2:456], CMsmall$part[2:456], CMsmall$temp_1[2:456]),lag.max = 10, season = 52, type = "both")
CMortVAR = VAR(cbind(CMsmall$cmort[2:456], CMsmall$part[2:456], CMsmall$temp_1[2:456]),season = 52, type = "both",p = 2)
preds=predict(CMortVAR,n.ahead=52)
#Plot
plot(seq(1,508,1), CM$cmort, type = "l",xlim = c(0,508), ylab = "Cardiac Mortality", main = "20 Week Cardiac Mortality Forecast")
lines(seq(457,508,1), preds$fcst$y1[,1], type = "l", col = "red")
ASE = mean((CM$cmort[457:508] - preds$fcst$y1[,1])^2)
ASE
#Sandbox 1 s = 52
#forecast Particles
plotts.sample.wge(CM$part) #freq near .0192 (annual)
CM_52 = artrans.wge(CM$part, c(rep(0,51),1))
plotts.sample.wge(CM_52) #looks like some low freq?
aic5.wge(CM_52) #picks ARMA(2,1) assume stationary
aic5.wge(CM_52,type = "bic") #picks ARMA(0,0)
ljung.wge(CM_52)$pval #FTR Ho
ljung.wge(CM_52, K = 48)$pval #FTR Ho
#Going with white noise despite peak at 0 in Spec D.
#est = est.arma.wge(CM_52, p = 3, q = 2)
#CM_52_AR2_MA1 = artrans.wge(CM_52,est$phi)
predsPart = fore.aruma.wge(CM$part,s = 52, n.ahead = 20)
#forecast Temp
plotts.sample.wge(CM$temp) #freq near .0192 (annual)
CM_52 = artrans.wge(CM$temp, c(rep(0,51),1))
plotts.sample.wge(CM_52) #looks like some low freq?
aic5.wge(CM_52) #picks ARMA(0,0)
aic5.wge(CM_52,type = "bic") #picks ARMA(0,0)
ljung.wge(CM_52)$pval
ljung.wge(CM_52, K = 48)$pval #barely rejects
acf(CM_52,lag.max = 48) # acf looks consistent with white noise
predsTemp = fore.aruma.wge(CM$temp,s = 52, n.ahead = 20)
# Model cmort based on predicted part and temp using MLR with Cor Erros
#assuming data is loaded in dataframe CM
ksfit = lm(cmort~temp+part+Week, data = CM)
phi = aic.wge(ksfit$residuals)
attach(CM)
fit = arima(cmort,order = c(phi$p,0,0), seasonal = list(order = c(0,1,0), period = 52),xreg = cbind(temp, part, Week))
AIC(fit)
# Check for whiteness of residuals
acf(fit$residuals)
ljung.wge(fit$residuals) # pval = .059
ljung.wge(fit$residuals, K = 48) # pval = .004
#load the forecasted Part and Temp in a data frame
next20 = data.frame(temp = predsTemp$f, part = predsPart$f, Week = seq(509,528,1))
#get predictions
predsCMort = predict(fit,newxreg = next20)
#plot next 20 cmort wrt time
plot(seq(1,508,1), cmort, type = "l",xlim = c(0,528), ylab = "Cardiac Mortality", main = "20 Week Cardiac Mortality Forecast")
lines(seq(509,528,1), predsCMort$pred, type = "l", col = "red")
# Univariate
foreCmort = fore.aruma.wge(CM$cmort,s = 52, n.ahead = 30, lastn = TRUE, limits = FALSE)
ASE = mean((CM$cmort[479:508] - foreCmort$f)^2)
ASE
# Univariate ASE 50 back
foreCmort = fore.aruma.wge(CM$cmort,s = 52, n.ahead = 52, lastn = TRUE, limits = FALSE)
ASE = mean((CM$cmort[457:508] - foreCmort$f)^2)
ASE
#Show the effect of "type = {"const", trend", "both", "none")
data(Canada)
VAR(Canada, p = 2, type = "const")
VAR(Canada, p = 2, type = "trend")
VAR(Canada, p = 2, type = "both")
VAR(Canada, p = 2, type = "none")
VAR(cbind(CM_52, Part_52),type = "const",p = 2, exogen = Temp_52)
VAR(cbind(CM_52, Part_52),type = "trend",p = 2, exogen = Temp_52)
VAR(cbind(CM_52, Part_52),type = "both",p = 2, exogen = Temp_52)
VAR(cbind(CM_52, Part_52),type = "none",p = 2, exogen = Temp_52)
| /Code/week12_Cardiac Mortality Forecast.R | no_license | anhnguyendepocen/DS6373_TimeSeries | R | false | false | 24,662 | r | # CMort LA Pollution and Temperature Study
# ARIMA 1 MLR with Cor Errors (no lag, no seasonl categorical variable)
#EDA
library(tidyverse)
library(GGally)
library(astsa)
library(tswge)
CM = read.csv(file.choose(),header = TRUE)
head(CM)
ggpairs(CM[2:4]) #matrix of scatter plots
#forecast Particles
plotts.sample.wge(CM$part) #freq near .0192 (annual)
CM_52 = artrans.wge(CM$part, c(rep(0,51),1))
plotts.sample.wge(CM_52) #looks like some low freq?
aic5.wge(CM_52) #picks ARMA(2,1) assume stationary
aic5.wge(CM_52,type = "bic") #picks ARMA(0,0)
ljung.wge(CM_52)$pval #FTR Ho
ljung.wge(CM_52, K = 48)$pval #FTR Ho
#Going with white noise despite peak at 0 in Spec D.
#est = est.arma.wge(CM_52, p = 3, q = 2)
#CM_52_AR2_MA1 = artrans.wge(CM_52,est$phi)
predsPart = fore.aruma.wge(CM$part,s = 52, n.ahead = 20)
plot(predsPart$f, type = "l")
plot(seq(1,508,1), CM$part, type = "l",xlim = c(0,528), ylab = "Temperature", main = "20 Week Particulate Forecast")
lines(seq(509,528,1), predsPart$f, type = "l", col = "red")
#forecast Temp
plotts.sample.wge(CM$temp) #freq near .0192 (annual)
CM_52 = artrans.wge(CM$temp, c(rep(0,51),1))
plotts.sample.wge(CM_52) #looks like some low freq?
aic5.wge(CM_52) #picks ARMA(0,0)
aic5.wge(CM_52,type = "bic") #picks ARMA(0,0)
ljung.wge(CM_52)$pval
ljung.wge(CM_52, K = 48)$pval #barely rejects
acf(CM_52,lag.max = 48) # acf looks consistent with white noise
predsTemp = fore.aruma.wge(CM$temp,s = 52, n.ahead = 20)
plot(predsTemp$f, type = "l")
plot(seq(1,508,1), CM$temp, type = "l",xlim = c(0,528), ylab = "Temperature", main = "20 Week Temperature Forecast")
lines(seq(509,528,1), predsTemp$f, type = "l", col = "red")
# Model cmort based on predicted part and temp using MLR with Cor Erros
#assuming data is loaded in dataframe CM
ksfit = lm(cmort~temp+part+Week, data = CM)
phi = aic.wge(ksfit$residuals) #AR(2)
fit = arima(CM$cmort, order = c(phi$p,0,phi$q), seasonal = list(order = c(1,0,0), period = 52), xreg = cbind(CM$temp, CM$part, CM$Week))
# Check for whiteness of residuals
acf(fit$residuals)
ljung.wge(fit$residuals) # pval = .059
ljung.wge(fit$residuals, K = 48) # pval = .004
#load the forecasted Part and Temp in a data frame
next20 = data.frame(temp = predsTemp$f, part = predsPart$f, Week = seq(509,528,1))
#get predictions
predsCMort = predict(fit,newxreg = next20)
#plot next 20 cmort wrt time
plot(seq(1,508,1), CM$cmort, type = "l",xlim = c(0,528), ylab = "Cardiac Mortality", main = "20 Week Cardiac Mortality Forecast")
lines(seq(509,528,1), predsCMort$pred, type = "l", col = "red")
#Find ASE Need to forecast last 30 of known series.
CMsmall = CM[1:478,]
ksfit = lm(cmort~temp+part+Week, data = CMsmall)
phi = aic.wge(ksfit$residuals)
fit = arima(CMsmall$cmort,order = c(phi$p,0,0), seasonal = list(order = c(1,0,0), period = 52), xreg = cbind(CMsmall$temp, CMsmall$part, CMsmall$Week))
last30 = data.frame(temp = CM$temp[479:508], part = CM$part[479:508], CMWeek = seq(479,508,1))
#get predictions
predsCMort = predict(fit,newxreg = last30)
ASE = mean((CM$cmort[479:508] - predsCMort$pred)^2)
ASE
plot(seq(1,508,1), CM$cmort, type = "l",xlim = c(0,528), ylab = "Cardiac Mortality", main = "Last 30 Week Cardiac Mortality Forecast")
lines(seq(479,508,1), predsCMort$pred, type = "l", col = "red")
#Find ASE Need to forecast last 30 of known series. With temp lag 1
CMsmall = CM[1:478,]
CMsmall$temp_1 = dplyr::lag(CMsmall$temp,1)
CM$temp_1 = dplyr::lag(CM$temp,1)
ksfit = lm(cmort~temp_1+part+Week, data = CMsmall)
phi = aic.wge(ksfit$residuals)
fit = arima(CMsmall$cmort,order = c(phi$p,0,0), seasonal = list(order = c(1,0,0), period = 52), xreg = cbind(CMsmall$temp, CMsmall$part, CMsmall$Week))
last30 = data.frame(temp = CM$temp_1[479:508], part = CM$part[479:508], Week = seq(479,508,1))
#get predictions
predsCMort = predict(fit,newxreg = last30)
plot(seq(1,508,1), CM$cmort, type = "l",xlim = c(0,528), ylab = "Cardiac Mortality", main = "Last 30 Week Cardiac Mortality Forecast")
lines(seq(479,508,1), predsCMort$pred, type = "l", col = "red")
ASE = mean((CM$cmort[479:508] - predsCMort$pred)^2)
ASE
######################################################################
#ARIMA 2: attempt at categorical variable for week but arima takes only continuous variables
CM = read.csv(file.choose(),header = TRUE)
head(CM)
#forecast Particles
px = plotts.sample.wge(CM$part) #freq near .0192 (annual)
CM_52 = artrans.wge(CM$part, c(rep(0,51),1))
px = plotts.sample.wge(CM_52) #looks like some low freq?
aic5.wge(CM_52) #picks ARMA(2,1) assume stationary
aic5.wge(CM_52,type = "bic") #picks ARMA(0,0)
ljung.wge(CM_52)$pval #FTR Ho
ljung.wge(CM_52, K = 48)$pval #FTR Ho
#Going with white noise despite peak at 0 in Spec D.
#est = est.arma.wge(CM_52, p = 3, q = 2)
#CM_52_AR2_MA1 = artrans.wge(CM_52,est$phi)
predsPart = fore.aruma.wge(CM$part,s = 52, n.ahead = 20)
#forecast Temp
px = plotts.sample.wge(CM$temp) #freq near .0192 (annual)
CM_52 = artrans.wge(CM$temp, c(rep(0,51),1))
px = plotts.sample.wge(CM_52) #looks like some low freq?
aic5.wge(CM_52) #picks ARMA(0,0)
aic5.wge(CM_52,type = "bic") #picks ARMA(0,0)
ljung.wge(CM_52)$pval
ljung.wge(CM_52, K = 48)$pval #barely rejects
acf(CM_52,lag.max = 48) # acf looks consistent with white noise
predsTemp = fore.aruma.wge(CM$temp,s = 52, n.ahead = 20)
# Model cmort based on predicted part and temp using MLR with Cor Erros
#assuming data is loaded in dataframe CM
CM$FWeek = as.factor(CM$Week%%52)
form = ~ . # Since we are using all variables in the dataframe
form = ~ cmort+temp+part+Week+FWeek # or explicitly add variables to use
CMexpanded = model.matrix(form, data = CM) %>%
as_tibble() %>%
dplyr::select(-"(Intercept)")
# colnames(CMexpanded)
ksfit = lm(cmort~., data = CMexpanded)
summary(ksfit)
## Evaluate Residuals
px = tswge::plotts.sample.wge(ksfit$residuals)
## Looks stationary AR(1) ,ay be appropriate
aic_resids = aic.wge(ksfit$residuals) # Picks AR(2)
print(aic_resids)
fit = arima(CMexpanded$cmort,order = c(aic_resids$p,0,aic_resids$q), xreg = CMexpanded %>% dplyr::select(-cmort))
print(fit)
AIC(fit) #AIC = 3088.997
# Check for whiteness of residuals
acf(fit$residuals)
ljung.wge(fit$residuals) # pval = .0808
ljung.wge(fit$residuals, K = 48) # pval = 0.123916
#load the forecasted Part and Temp in a data frame
next20 = data.frame(temp = predsTemp$f, part = predsPart$f, Week = seq(509,528,1), FWeek = factor(seq(509,528,1)%%52, levels = levels(CM$FWeek)))
form = ~ temp + part + Week + FWeek # Remove the dependent variable 'cmort'
next20expanded = model.matrix(form, data = next20) %>%
as_tibble() %>%
dplyr::select(-"(Intercept)")
# colnames(next20expanded)
#get predictions
predsCMort = predict(fit,newxreg = next20expanded) #creates error because of factor
#plot next 20 cmort wrt time
plot(seq(1,508,1), CM$cmort, type = "l",xlim = c(0,528), ylab = "Cardiac Mortality", main = "20 Week Cardiac Mortality Forecast")
lines(seq(509,528,1), predsCMort$pred, type = "l", col = "red")
#####################################
#predict residuals manually
plotts.sample.wge(ksfit$residuals)
phi = aic.wge(ksfit$residuals)
resids = fore.arma.wge(ksfit$residuals,phi = phi$phi,n.ahead = 20)
#predict trend manually
preds = predict(ksfit, newdata = next20)
predsFinal = preds + resids$f
#plot next 20 cmort wrt time
plot(seq(1,508,1), CM$cmort, type = "l",xlim = c(0,528), ylab = "Cardiac Mortality", main = "20 Week Cardiac Mortality Forecast")
lines(seq(509,528,1), predsFinal, type = "l", col = "red")
#Find ASE Need to forecast last 30 of known series.
CMsmall = CM[1:478,]
ksfit = lm(cmort~temp+part+Week+FWeek, data = CMsmall)
phi = aic.wge(ksfit$residuals)
fit = arima(CMsmall$cmort,order = c(phi$p,0,0), seasonal = list(order = c(1,0,0), period = 52), xreg = cbind(CMsmall$temp, CMsmall$part, CMsmall$Week, CMsmall$FWeek))
AIC(fit) #AIC = 2972
# This is actually cheating. We should forecast the temp and part as before.
last30 = data.frame(temp = CM$temp[479:508], part = CM$part[479:508], Week = seq(479,508,1), FWeek = as.factor(seq(479,508,1)%%52))
#get predictions
predsCMort = predict(fit,newxreg = last30)
#predict residuals manually
plotts.sample.wge(ksfit$residuals)
phi = aic.wge(ksfit$residuals)
resids = fore.arma.wge(ksfit$residuals,phi = phi$phi,n.ahead = 30)
#predict trend manually
preds = predict(ksfit, newdata = last30)
predsFinal = preds + resids$f
plot(seq(1,508,1), CM$cmort, type = "l",xlim = c(0,528), ylab = "Cardiac Mortality", main = "20 Week Cardiac Mortality Forecast")
lines(seq(479,508,1), predsFinal, type = "l", col = "red")
ASE = mean((CM$cmort[479:508] - predsFinal)^2,na.rm = TRUE)
ASE
#ARIMA 3: categorical variable
#With Lagged Temp
library(dplyr)
#Lag Temperature 1
ccf(CM$temp,CM$cmort)
CM$temp1 = dplyr::lag(CM$temp,1)
#forecast Particles
plotts.sample.wge(CM$part) #freq near .0192 (annual)
CM_52 = artrans.wge(CM$part, c(rep(0,51),1))
plotts.sample.wge(CM_52) #looks like some low freq?
aic5.wge(CM_52) #picks ARMA(2,1) assume stationary
aic5.wge(CM_52,type = "bic") #picks ARMA(0,0)
ljung.wge(CM_52)$pval #FTR Ho
ljung.wge(CM_52, K = 48)$pval #FTR Ho
#Going with white noise despite peak at 0 in Spec D.
#est = est.arma.wge(CM_52, p = 3, q = 2)
#CM_52_AR2_MA1 = artrans.wge(CM_52,est$phi)
predsPart = fore.aruma.wge(CM$part,s = 52, n.ahead = 20)
#forecast Temp
plotts.sample.wge(CM$temp) #freq near .0192 (annual)
CM_52 = artrans.wge(CM$temp, c(rep(0,51),1))
plotts.sample.wge(CM_52) #looks like some low freq?
aic5.wge(CM_52) #picks ARMA(0,0)
aic5.wge(CM_52,type = "bic") #picks ARMA(0,0)
ljung.wge(CM_52)$pval
ljung.wge(CM_52, K = 48)$pval #barely rejects
acf(CM_52,lag.max = 48) # acf looks consistent with white noise
predsTemp = fore.aruma.wge(CM$temp,s = 52, n.ahead = 20)
# Model cmort based on predicted part and temp using MLR with Cor Erros
#assuming data is loaded in dataframe CM
CM$FWeek = as.factor(CM$Week%%52)
ksfit = lm(cmort~temp1+part+Week+FWeek, data = CM)
phi = aic.wge(ksfit$residuals)
fit = arima(CM$cmort,order = c(phi$p,0,0), xreg = cbind(CM$temp1, CM$part, CM$Week, CM$FWeek))
AIC(fit) #AIC = 3151
# Check for whiteness of residuals
acf(fit$residuals)
ljung.wge(fit$residuals) # pval = .066
ljung.wge(fit$residuals, K = 48) # pval = .0058
predsTemp$f1 = dplyr::lag(predsTemp$f,1)
#load the forecasted Part and Temp in a data frame
next20 = data.frame(temp1 = predsTemp$f1, part = predsPart$f, Week = seq(509,528,1), FWeek = as.factor(seq(509,528,1)%%52))
#get predictions
predsCMort = predict(fit,newxreg = next20) #creates error because of factor
#predict residuals manually
plotts.sample.wge(ksfit$residuals)
phi = aic.wge(ksfit$residuals)
resids = fore.arma.wge(ksfit$residuals,phi = phi$phi,n.ahead = 20)
#predict trend manually
preds = predict(ksfit, newdata = next20)
predsFinal = preds + resids$f
#plot next 20 cmort wrt time
plot(seq(1,508,1), cmort, type = "l",xlim = c(0,528), ylab = "Cardiac Mortality", main = "20 Week Cardiac Mortality Forecast")
lines(seq(509,528,1), predsFinal, type = "l", col = "red")
#Find ASE Need to forecast last 30 of known series.
CMsmall = CM[2:478,]
ksfit = lm(cmort~temp1+part+Week+FWeek, data = CMsmall)
phi = aic.wge(ksfit$residuals)
fit = arima(CMsmall$cmort,order = c(phi$p,0,0), seasonal = list(order = c(1,0,0), period = 52), xreg = cbind(CMsmall$temp1, CMsmall$part, CMsmall$Week, CMsmall$FWeek))
last30 = data.frame(temp1 = CM$temp1[479:508], part = CM$part[479:508], Week = seq(479,508,1), FWeek = as.factor(seq(479,508,1)%%52))
#get predictions
predsCMort = predict(fit,newxreg = last30) #Showing Error ... why we have to predict resids manually (next step)
#predict residuals manually
plotts.sample.wge(ksfit$residuals)
phi = aic.wge(ksfit$residuals)
resids = fore.arma.wge(ksfit$residuals,phi = phi$phi,n.ahead = 30)
#predict trend manually
preds = predict(ksfit, newdata = last30)
predsFinal = preds + resids$f
plot(seq(1,508,1), CM$cmort, type = "l",xlim = c(0,528), ylab = "Cardiac Mortality", main = "20 Week Cardiac Mortality Forecast")
lines(seq(479,508,1), predsFinal, type = "l", col = "red")
ASE = mean((CM$cmort[479:508] - predsFinal)^2,na.rm = TRUE)
ASE
#ARIMA 4: categorical variable
#With Lagged Temp and Part
library(dplyr)
#Lag Temperature 1
ccf(CM$temp,CM$cmort)
CM$temp1 = dplyr::lag(CM$temp,1)
#Lag Particles lag 7 on particles
ccf(CM$part,CM$cmort)
CM$part7 = dplyr::lag(CM$temp,7)
#forecast Particles
plotts.sample.wge(CM$part) #freq near .0192 (annual)
CM_52 = artrans.wge(CM$part, c(rep(0,51),1))
plotts.sample.wge(CM_52) #looks like some low freq?
aic5.wge(CM_52) #picks ARMA(2,1) assume stationary
aic5.wge(CM_52,type = "bic") #picks ARMA(0,0)
ljung.wge(CM_52)$pval #FTR Ho
ljung.wge(CM_52, K = 48)$pval #FTR Ho
#Going with white noise despite peak at 0 in Spec D.
#est = est.arma.wge(CM_52, p = 3, q = 2)
#CM_52_AR2_MA1 = artrans.wge(CM_52,est$phi)
predsPart = fore.aruma.wge(CM$part,s = 52, n.ahead = 20)
#forecast Temp
plotts.sample.wge(CM$temp) #freq near .0192 (annual)
CM_52 = artrans.wge(CM$temp, c(rep(0,51),1))
plotts.sample.wge(CM_52) #looks like some low freq?
aic5.wge(CM_52) #picks ARMA(0,0)
aic5.wge(CM_52,type = "bic") #picks ARMA(0,0)
ljung.wge(CM_52)$pval
ljung.wge(CM_52, K = 48)$pval #barely rejects
acf(CM_52,lag.max = 48) # acf looks consistent with white noise
predsTemp = fore.aruma.wge(CM$temp,s = 52, n.ahead = 20)
# Model cmort based on predicted part and temp using MLR with Cor Erros
#assuming data is loaded in dataframe CM
CM$FWeek = as.factor(CM$Week%%52)
ksfit = lm(cmort~temp1+part7+Week+FWeek, data = CM)
AIC(ksfit)
phi = aic.wge(ksfit$residuals)
fit = arima(CM$cmort,order = c(phi$p,0,0), xreg = cbind(CM$temp1, CM$part7, CM$Week, CM$FWeek))
AIC(fit) #AIC = 3151
# Check for whiteness of residuals
acf(fit$residuals)
ljung.wge(fit$residuals) # pval = .066
ljung.wge(fit$residuals, K = 48) # pval = .0058
predsTemp$f1 = dplyr::lag(predsTemp$f,1)
predsPart$f1 = dplyr::lag(predsPart$f,7)
#load the forecasted Part and Temp in a data frame
next20 = data.frame(temp1 = predsTemp$f1, part = predsPart7$f, Week = seq(509,528,1), FWeek = as.factor(seq(509,528,1)%%52))
#get predictions
predsCMort = predict(fit,newxreg = next20) #creates error because of factor
#predict residuals manually
plotts.sample.wge(ksfit$residuals)
phi = aic.wge(ksfit$residuals)
resids = fore.arma.wge(ksfit$residuals,phi = phi$phi,n.ahead = 20)
#predict trend manually
preds = predict(ksfit, newdata = next20)
predsFinal = preds + resids$f
#plot next 20 cmort wrt time
plot(seq(1,508,1), cmort, type = "l",xlim = c(0,528), ylab = "Cardiac Mortality", main = "20 Week Cardiac Mortality Forecast")
lines(seq(509,528,1), predsFinal, type = "l", col = "red")
####### ASE #######
#Find ASE Need to forecast last 30 of known series.
CMsmall = CM[2:478,]
ksfit = lm(cmort~temp1+part7+Week+FWeek, data = CMsmall)
summary(ksfit)
AIC(ksfit)
phi = aic.wge(ksfit$residuals)
fit = arima(CMsmall$cmort,order = c(phi$p,0,0), seasonal = list(order = c(1,0,0), period = 52),
xreg = cbind(CMsmall$temp1, CMsmall$part7, CMsmall$Week, CMsmall$FWeek))
AIC(fit)
last30 = data.frame(temp1 = CM$temp1[479:508], part7 = CM$part7[479:508], Week = seq(479,508,1), FWeek = as.factor(seq(479,508,1)%%52))
#get predictions
predsCMort = predict(fit,newxreg = last30) #Showing Error ... why we have to predict resids manually (next step)
#predict residuals manually
plotts.sample.wge(ksfit$residuals)
phi = aic.wge(ksfit$residuals)
resids = fore.arma.wge(ksfit$residuals,phi = phi$phi,n.ahead = 30)
#predict trend manually
preds = predict(ksfit, newdata = last30)
predsFinal = preds + resids$f
plot(seq(1,508,1), CM$cmort, type = "l",xlim = c(0,528), ylab = "Cardiac Mortality", main = "20 Week Cardiac Mortality Forecast")
lines(seq(479,508,1), predsFinal, type = "l", col = "red")
ASE = mean((CM$cmort[479:508] - predsFinal)^2,na.rm = TRUE)
ASE
############ VAR MODELS ##########################
#VAR Model 1 Forecasts Seasonally Differenced Data
#Difference all series to make them stationary (assumptoin of VAR)
# Doesn't have to be white... just stationary
library(vars)
CM = read.csv(file.choose(),header = TRUE)
CM_52 = artrans.wge(CM$cmort,c(rep(0,51),1))
Part_52 = artrans.wge(CM$part,c(rep(0,51),1))
Temp_52 = artrans.wge(CM$temp,c(rep(0,51),1))
#VARSelect on Differenced Data chooses 2
VARselect(cbind(CM_52, Part_52, Temp_52),lag.max = 10, type = "both")
#VAR with p = 2
CMortDiffVAR = VAR(cbind(CM_52, Part_52, Temp_52),type = "both",p = 2)
preds=predict(CMortDiffVAR,n.ahead=20)
#We have predicted differences .... calculate actual cardiac mortalities
startingPoints = CM$cmort[456:475]
CMortForcasts = preds$fcst$CM_52[,1:3] + startingPoints
#Plot
dev.off()
plot(seq(1,508,1), CM$cmort, type = "l",xlim = c(0,528), ylab = "Cardiac Mortality", main = "20 Week Cardiac Mortality Forecast")
lines(seq(509,528,1), as.data.frame(CMortForcasts)$fcst, type = "l", col = "red")
#Find ASE using last 30
CM_52 = artrans.wge(CMsmall$cmort,c(rep(0,51),1))
Part_52 = artrans.wge(CMsmall$part,c(rep(0,51),1))
Temp_52 = artrans.wge(CMsmall$temp,c(rep(0,51),1))
VARselect(cbind(CM_52, Part_52, Temp_52),lag.max = 10, type = "both")
CMortDiffVAR = VAR(cbind(CM_52, Part_52, Temp_52),type = "both",p = 2)
preds=predict(CMortDiffVAR,n.ahead=30)
startingPoints = CM$cmort[428:457]
CMortForcasts = preds$fcst$CM_52[,1:3] + startingPoints
dev.off()
plot(seq(1,508,1), CM$cmort, type = "l",xlim = c(0,508), ylab = "Cardiac Mortality", main = "20 Week Cardiac Mortality Forecast")
lines(seq(479,508,1), CMortForcasts[,1], type = "l", col = "red")
ASE = mean((CM$cmort[479:508] - CMortForcasts[,1])^2)
ASE
#Find ASE using last 52
CMsmall = CM[1:456,] # 456 = 508-52
CM_52 = artrans.wge(CMsmall$cmort,c(rep(0,51),1))
Part_52 = artrans.wge(CMsmall$part,c(rep(0,51),1))
Temp_52 = artrans.wge(CMsmall$temp,c(rep(0,51),1))
VARselect(cbind(CM_52, Part_52, Temp_52),lag.max = 10, type = "both")
CMortDiffVAR = VAR(cbind(CM_52, Part_52, Temp_52),type = "both",p = 2)
preds=predict(CMortDiffVAR,n.ahead=52)
startingPoints = CM$cmort[405:456]
CMortForcasts = preds$fcst$CM_52[,1:3] + startingPoints
dev.off()
plot(seq(1,508,1), CM$cmort, type = "l",xlim = c(0,508), ylab = "Cardiac Mortality", main = "20 Week Cardiac Mortality Forecast")
lines(seq(457,508,1), CMortForcasts[,1], type = "l", col = "red")
ASE = mean((CM$cmort[457:508] - CMortForcasts[,1])^2)
ASE
#VAR Model 2 Forecasts Seasonal Dummy
CM = read.csv(file.choose(),header = TRUE)
#VARSelect on Seasonal Data chooses 2
VARselect(cbind(CM$cmort, CM$part, CM$temp),lag.max = 10, season = 52, type = "both")
#VAR with p = 2
CMortVAR = VAR(cbind(CM$cmort, CM$part, CM$temp),season = 52, type = "both",p = 2)
preds=predict(CMortVAR,n.ahead=20)
#Plot
plot(seq(1,508,1), CM$cmort, type = "l",xlim = c(0,528), ylab = "Cardiac Mortality", main = "20 Week Cardiac Mortality Forecast")
lines(seq(509,528,1), preds$fcst$y1[,1], type = "l", col = "red")
#Find ASE using last 30
CMsmall = CM[1:478,]
VARselect(cbind(CMsmall$cmort, CMsmall$part, CMsmall$temp),lag.max = 10, season = 52, type = "both")
CMortVAR = VAR(cbind(CMsmall$cmort, CMsmall$part, CMsmall$temp),season = 52, type = "both",p = 2)
preds=predict(CMortVAR,n.ahead=30)
#Plot
plot(seq(1,508,1), CM$cmort, type = "l",xlim = c(0,508), ylab = "Cardiac Mortality", main = "20 Week Cardiac Mortality Forecast")
lines(seq(479,508,1), preds$fcst$y1[,1], type = "l", col = "red")
ASE = mean((CM$cmort[479:508] - preds$fcst$y1[,1])^2)
ASE
#Find ASE using last 52
CMsmall = CM[1:456,] # 456 = 508-52
VARselect(cbind(CMsmall$cmort[2:456], CMsmall$part[2:456], CMsmall$temp[2:456]),lag.max = 10, season = 52, type = "both")
CMortVAR = VAR(cbind(CMsmall$cmort[2:456], CMsmall$part[2:456], CMsmall$temp[2:456]),season = 52, type = "both",p = 2)
preds=predict(CMortVAR,n.ahead=52)
#Plot
plot(seq(1,508,1), CM$cmort, type = "l",xlim = c(0,508), ylab = "Cardiac Mortality", main = "20 Week Cardiac Mortality Forecast")
lines(seq(457,508,1), preds$fcst$y1[,1], type = "l", col = "red")
ASE = mean((CM$cmort[457:508] - preds$fcst$y1[,1])^2)
ASE
#VAR Model 3 seasonal with Lag 1 Temp
CM = read.csv(file.choose(),header = TRUE)
CM$temp_1 = dplyr::lag(CM$temp,1)
ggpairs(CM[,-7])
VARselect(cbind(CM$cmort[2:479], CM$part[2:479], CM$temp_1[2:479]),lag.max = 10, season = 52, type = "both")
#VAR with p = 2
CMortVAR = VAR(cbind(CM$cmort[2:479], CM$part[2:479], CM$temp_1[2:479]),season = 52, type = "both",p = 2)
preds=predict(CMortVAR,n.ahead=20)
#Plot
plot(seq(1,508,1), CM$cmort, type = "l",xlim = c(0,528), ylab = "Cardiac Mortality", main = "20 Week Cardiac Mortality Forecast")
lines(seq(509,528,1), preds$fcst$y1[,1], type = "l", col = "red")
#Find ASE using last 30
CMsmall = CM[1:479,]
VARselect(cbind(CMsmall$cmort[2:478], CMsmall$part[2:478], CMsmall$temp_1[2:478]),lag.max = 10, season = 52, type = "both")
CMortVAR = VAR(cbind(CMsmall$cmort[2:478], CMsmall$part[2:478], CMsmall$temp_1[2:478]),season = 52, type = "both",p = 2)
preds=predict(CMortVAR,n.ahead=30)
#Plot
plot(seq(1,508,1), CM$cmort, type = "l",xlim = c(0,508), ylab = "Cardiac Mortality", main = "20 Week Cardiac Mortality Forecast")
lines(seq(479,508,1), preds$fcst$y1[,1], type = "l", col = "red")
ASE = mean((CM$cmort[479:508] - preds$fcst$y1[,1])^2)
ASE
#Find ASE using last 52
CMsmall = CM[1:456,] # 456 = 508-52
VARselect(cbind(CMsmall$cmort[2:456], CMsmall$part[2:456], CMsmall$temp_1[2:456]),lag.max = 10, season = 52, type = "both")
CMortVAR = VAR(cbind(CMsmall$cmort[2:456], CMsmall$part[2:456], CMsmall$temp_1[2:456]),season = 52, type = "both",p = 2)
preds=predict(CMortVAR,n.ahead=52)
#Plot
plot(seq(1,508,1), CM$cmort, type = "l",xlim = c(0,508), ylab = "Cardiac Mortality", main = "20 Week Cardiac Mortality Forecast")
lines(seq(457,508,1), preds$fcst$y1[,1], type = "l", col = "red")
ASE = mean((CM$cmort[457:508] - preds$fcst$y1[,1])^2)
ASE
#Sandbox 1 s = 52
#forecast Particles
plotts.sample.wge(CM$part) #freq near .0192 (annual)
CM_52 = artrans.wge(CM$part, c(rep(0,51),1))
plotts.sample.wge(CM_52) #looks like some low freq?
aic5.wge(CM_52) #picks ARMA(2,1) assume stationary
aic5.wge(CM_52,type = "bic") #picks ARMA(0,0)
ljung.wge(CM_52)$pval #FTR Ho
ljung.wge(CM_52, K = 48)$pval #FTR Ho
#Going with white noise despite peak at 0 in Spec D.
#est = est.arma.wge(CM_52, p = 3, q = 2)
#CM_52_AR2_MA1 = artrans.wge(CM_52,est$phi)
predsPart = fore.aruma.wge(CM$part,s = 52, n.ahead = 20)
#forecast Temp
plotts.sample.wge(CM$temp) #freq near .0192 (annual)
CM_52 = artrans.wge(CM$temp, c(rep(0,51),1))
plotts.sample.wge(CM_52) #looks like some low freq?
aic5.wge(CM_52) #picks ARMA(0,0)
aic5.wge(CM_52,type = "bic") #picks ARMA(0,0)
ljung.wge(CM_52)$pval
ljung.wge(CM_52, K = 48)$pval #barely rejects
acf(CM_52,lag.max = 48) # acf looks consistent with white noise
predsTemp = fore.aruma.wge(CM$temp,s = 52, n.ahead = 20)
# Model cmort based on predicted part and temp using MLR with Cor Erros
#assuming data is loaded in dataframe CM
ksfit = lm(cmort~temp+part+Week, data = CM)
phi = aic.wge(ksfit$residuals)
attach(CM)
fit = arima(cmort,order = c(phi$p,0,0), seasonal = list(order = c(0,1,0), period = 52),xreg = cbind(temp, part, Week))
AIC(fit)
# Check for whiteness of residuals
acf(fit$residuals)
ljung.wge(fit$residuals) # pval = .059
ljung.wge(fit$residuals, K = 48) # pval = .004
#load the forecasted Part and Temp in a data frame
next20 = data.frame(temp = predsTemp$f, part = predsPart$f, Week = seq(509,528,1))
#get predictions
predsCMort = predict(fit,newxreg = next20)
#plot next 20 cmort wrt time
plot(seq(1,508,1), cmort, type = "l",xlim = c(0,528), ylab = "Cardiac Mortality", main = "20 Week Cardiac Mortality Forecast")
lines(seq(509,528,1), predsCMort$pred, type = "l", col = "red")
# Univariate
foreCmort = fore.aruma.wge(CM$cmort,s = 52, n.ahead = 30, lastn = TRUE, limits = FALSE)
ASE = mean((CM$cmort[479:508] - foreCmort$f)^2)
ASE
# Univariate ASE 50 back
foreCmort = fore.aruma.wge(CM$cmort,s = 52, n.ahead = 52, lastn = TRUE, limits = FALSE)
ASE = mean((CM$cmort[457:508] - foreCmort$f)^2)
ASE
#Show the effect of "type = {"const", trend", "both", "none")
data(Canada)
VAR(Canada, p = 2, type = "const")
VAR(Canada, p = 2, type = "trend")
VAR(Canada, p = 2, type = "both")
VAR(Canada, p = 2, type = "none")
VAR(cbind(CM_52, Part_52),type = "const",p = 2, exogen = Temp_52)
VAR(cbind(CM_52, Part_52),type = "trend",p = 2, exogen = Temp_52)
VAR(cbind(CM_52, Part_52),type = "both",p = 2, exogen = Temp_52)
VAR(cbind(CM_52, Part_52),type = "none",p = 2, exogen = Temp_52)
|
########### Prereqs ###########
# Begin always run:
options(stringsAsFactors=FALSE,scipen=600)
library(tidyverse)
library(gridExtra)
library(emmeans)
library(multcomp)
library(corrplot)
# Ensembl library()
# End always run
setwd("~/gdrive/AthroProteomics/data")
peptides<-read.csv('peptide_table_20180514.csv')
proteins<-read.csv('protein_table_20180514.csv')
reg1<-regexpr("JVE.",text=names(peptides))
reg2<-regexpr(".File.Area.s.",text=names(peptides))
reg1DF<-data.frame(include=reg1>0,start=as.integer(reg1)+attr(reg1,"match.length"),
stop=as.integer(reg2)-1L)
names1<-substr(names(peptides),start=reg1DF$start,stop=reg1DF$stop)
names2<-str_split(names1,"\\.",simplify=TRUE)
pheno<-data.frame(oldName=names(peptides)[names1!=""],ptid=names2[names1!="",1],
timept=names2[names1!="",2],replicate=as.integer(names2[names1!="",3]))
pheno$newName<-paste0("rep_",1L:length(pheno$oldName))
names(peptides)[match(pheno$oldName,names(peptides))]<-pheno$newName
names(proteins)[match(pheno$oldName,names(proteins))]<-pheno$newName
pheno$uSamp<-paste(pheno$ptid,pheno$timept,sep="_")
# Phenotype data:
groups<-read.csv(file="~/gdrive/Athro/groups_20180515.csv")
groups$ptid<-as.character(groups$ptid)
groups$Group<-"sCAD"
groups$Group[!is.na(groups$MIGroup)]<-groups$MIGroup[!is.na(groups$MIGroup)]
pheno<-pheno %>% left_join(groups)
# Remove 2010_T0 because there are 4 replicates!
peptides<-peptides[,!(colnames(peptides) %in% pheno$newName[pheno$uSamp=="2010_T0"])]
pheno<-pheno %>% filter(uSamp != "2010_T0")
# Wide data:
makeWideFun<-function(data){
peptidesL<-data %>% dplyr::select(-Quality.Score,-(Parent.Protein:Percent.Files.With.Good.Quant)) %>%
gather(key="rep",value="Intensity",-Name)
peptidesL<-pheno %>% left_join(groups) %>% left_join(peptidesL,by=c("newName"="rep"))
peptidesL$GroupTime<-paste(peptidesL$Group,peptidesL$timept,sep=".")
peptidesL$GroupTime<-factor(peptidesL$GroupTime,
levels=c("sCAD.FU","sCAD.T0","Type 1.FU","Type 1.T0","Type 2.FU","Type 2.T0",
"Indeterminate.FU","Indeterminate.T0"))
peptidesL<-peptidesL %>% arrange(GroupTime,ptid,replicate)
temp1<-peptidesL %>% dplyr::select(GroupTime,ptid,replicate) %>% unique()
temp1$uID<-as.factor(1L:nrow(temp1))
peptidesL<-temp1 %>% left_join(peptidesL)
return(peptidesL)
}
# Export peptide sequences for protein query:
pepSeqs<-peptides$Name
pepSeqs<-gsub("(\\[.*?\\])","",pepSeqs)
pepSeqsStr<-paste(pepSeqs,collapse=",")
# write.table(pepSeqsStr,file="pepSeqsStr.txt",quote=FALSE,row.names=FALSE)
# Export other peptide annotation:
pepAnno<-peptides %>% dplyr::select(Name,Quality.Score,ParentProtein.FullName,Use.For.Quant,
Percent.Files.With.Good.Quant)
# save(pepAnno,file="pepAnno.RData")
setwd("~/gdrive/AthroProteomics/")
########### Normalization ###########
m0<-as.matrix(peptides[,grepl("rep",names(peptides))])
# Minimum value imputation:
mins<-apply(m0,2,function(x) min(x[x>0]))
for(i in 1:ncol(m0)){
m0[,i][m0[,i]<1e-6]<-mins[i]
}
# Log-transform:
m00<-m0
peptides00<-peptides
peptides00[,grepl("rep",names(peptides00))]<-m00
m0<-log2(m0)
peptides[,grepl("rep",names(peptides))]<-m0
peptidesL<-makeWideFun(peptides)
# png(file="Plots/peptideNoNorm.png",height=5,width=10,units="in",res=300)
p0<-ggplot(data=peptidesL,aes(x=uID,group=newName,color=GroupTime,y=Intensity))+
geom_boxplot()+theme_bw()+xlab("")+ylab("Intensity (log scale)")+
theme(axis.text.x=element_blank())
show(p0)
# dev.off()
# Columnwise total intensity normalization:
cSums<-apply(m00,2,sum)
cFac<-cSums/mean(cSums)
m1<-m00
for(i in 1:ncol(m1)) m1[,i]<-m1[,i]/cFac[i]
m1b<-m1
# Median normalization
cMed<-apply(m1,2,median)
cFac2<-cMed/mean(cMed)
for(i in 1:ncol(m1b)) m1b[,i]<-m1b[,i]/cFac2[i]
peptides1b<-peptides1<-peptides00
peptides1[,grepl("rep",names(peptides1))]<-log2(m1)
peptides1b[,grepl("rep",names(peptides1b))]<-log2(m1b)
peptidesL1<-makeWideFun(peptides1)
peptidesL1b<-makeWideFun(peptides1b)
# png(file="Plots/peptideColNorm.png",height=5,width=10,units="in",res=300)
p1<-ggplot(data=peptidesL1,aes(x=uID,group=newName,color=GroupTime,y=Intensity))+
geom_boxplot()+theme_bw()+xlab("")+ylab("Intensity (log scale)")+
theme(axis.text.x=element_blank())
show(p1)
# dev.off()
# png(file="Plots/peptideColMedNorm.png",height=5,width=10,units="in",res=300)
p1b<-ggplot(data=peptidesL1b,aes(x=uID,group=newName,color=GroupTime,y=Intensity))+
geom_boxplot()+theme_bw()+xlab("")+ylab("Intensity (log scale)")+
theme(axis.text.x=element_blank())
show(p1b)
# dev.off()
# png(file="Plots/peptideNoneVColNorm.png",height=10,width=10,units="in",res=300)
grid.arrange(p0,p1,nrow=2)
# dev.off()
# Quantile normalization:
m2<-preprocessCore::normalize.quantiles(m0)
peptides2<-peptides
peptides2[,grepl("rep",names(peptides2))]<-m2
peptidesL2<-makeWideFun(peptides2)
# png(file="Plots/peptideQuantNorm.png",height=5,width=10,units="in",res=300)
p2<-ggplot(data=peptidesL2,aes(x=uID,group=newName,color=GroupTime,y=Intensity))+
geom_boxplot()+theme_bw()+xlab("")+ylab("Intensity (log scale)")+
theme(axis.text.x=element_blank())
show(p2)
# dev.off()
# Cyclic loess:
m3<-limma::normalizeCyclicLoess(m0)
peptides3<-peptides
peptides3[,grepl("rep",names(peptides3))]<-m3
peptidesL3<-makeWideFun(peptides3)
# png(file="Plots/peptideFastCyclicLoess.png",height=5,width=10,units="in",res=300)
p3<-ggplot(data=peptidesL3,aes(x=uID,group=newName,color=GroupTime,y=Intensity))+
geom_boxplot()+theme_bw()+xlab("")+ylab("Intensity (log scale)")+
theme(axis.text.x=element_blank())
show(p3)
# dev.off()
# Cyclic loess bGal only:
bGalWeights<-as.numeric(grepl("P00722",peptides$Parent.Protein) &
peptides$Percent.Files.With.Good.Quant>.99)
m4<-limma::normalizeCyclicLoess(m0,weights = bGalWeights)
peptides4<-peptides
peptides4[,grepl("rep",names(peptides4))]<-m4
peptidesL4<-makeWideFun(peptides4)
# png(file="Plots/peptideFastCyclicLoessbGalOnly.png",height=5,width=10,units="in",res=300)
p4<-ggplot(data=peptidesL4,aes(x=uID,group=newName,color=GroupTime,y=Intensity))+
geom_boxplot()+theme_bw()+xlab("")+ylab("Intensity (log scale)")+
theme(axis.text.x=element_blank())
show(p4)
# dev.off()
# Cyclic loess half & half bGal only:
bGalWeights2<-bGalWeights+.2
bGalWeights2<-ifelse(bGalWeights2>1,1.0,bGalWeights2)
m5<-limma::normalizeCyclicLoess(m0,weights = bGalWeights2)
peptides5<-peptides
peptides5[,grepl("rep",names(peptides5))]<-m5
peptidesL5<-makeWideFun(peptides5)
# png(file="Plots/peptideFastCyclicLoessbGalHeavy.png",height=5,width=10,units="in",res=300)
p5<-ggplot(data=peptidesL5,aes(x=uID,group=newName,color=GroupTime,y=Intensity))+
geom_boxplot()+theme_bw()+xlab("")+ylab("Intensity (log scale)")+
theme(axis.text.x=element_blank())
show(p5)
# dev.off()
# MAD:
m6<-limma::normalizeMedianAbsValues(m0)
peptides6<-peptides
peptides6[,grepl("rep",names(peptides6))]<-m6
peptidesL6<-makeWideFun(peptides6)
# png(file="Plots/peptideMAD.png",height=5,width=10,units="in",res=300)
p6<-ggplot(data=peptidesL6,aes(x=uID,group=newName,color=GroupTime,y=Intensity))+
geom_boxplot()+theme_bw()+xlab("")+ylab("Intensity (log scale)")+
theme(axis.text.x=element_blank())
show(p6)
# dev.off()
# Beta gal:
bGalFun<-function(data,method){
# betaGal data:
bGal<-data %>% filter(grepl("P00722",ParentProtein.FullName) &
Percent.Files.With.Good.Quant>.99)
set.seed(3)
bGalShort<-sample(bGal$Name,8)
bGalL<-bGal %>% dplyr::select(-Quality.Score,-(Parent.Protein:Percent.Files.With.Good.Quant)) %>%
gather(key="rep",value="Intensity",-Name)
bGalLNames<-bGalL %>% dplyr::select(Name) %>% unique()
bGalLNames$id<-as.factor(1:nrow(bGalLNames))
bGalL<-bGalLNames %>% left_join(bGalL)
# CVs:
cv<-bGalL %>% group_by(Name) %>% summarize(cv=sd(2**Intensity)/mean(2**Intensity)*100)
names(cv)[names(cv)=="cv"]<-paste0(method,"CV")
# Plots:
fName<-paste0("Plots/bGalPeptides_",method,"_Full.png")
# png(filename=fName,height=5,width=8,units="in",res=300)
bGalp<-ggplot(bGalL,aes(x=rep,y=Intensity,group=Name,color=id))+
geom_line()+ylim(23,32)+theme_bw()+xlab("Replicate")+ylab("Intensity (log scale)")
print(bGalp)
# dev.off()
fName2<-paste0("Plots/bGalPeptides_",method,"_Samp.png")
# png(filename=fName2,height=5,width=8,units="in",res=300)
bGalp2<-ggplot(bGalL %>% filter(Name %in% bGalShort),
aes(x=rep,y=Intensity,group=Name,color=id))+
geom_line()+ylim(26,31.75)+theme_bw()+xlab("Replicate")+ylab("Intensity (log scale)")
print(bGalp2)
# dev.off()
return(cv)
}
bGalCVs<-cbind(
bGalFun(data=peptides,method="none"),
bGalFun(data=peptides1,method="columnTI")[,-1],
bGalFun(data=peptides1b,method="columnTIMed")[,-1],
bGalFun(peptides2,method="Quant")[,-1],
bGalFun(peptides3,method="FCycLoess")[,-1],
bGalFun(peptides4,method="FCycLoessbGal")[,-1],
bGalFun(peptides5,method="FCycLoessbGalLight")[,-1],
bGalFun(peptides6,method="MAD")[,-1]
)
# write.csv(bGalCVs,file="bGalCVs.csv",row.names=FALSE)
########### Un log-transform ###########
peptides1[,grepl("rep_",names(peptides1))]<-
2**peptides1[,grepl("rep_",names(peptides1))]
peptides2[,grepl("rep_",names(peptides2))]<-
2**peptides2[,grepl("rep_",names(peptides2))]
peptides3[,grepl("rep_",names(peptides3))]<-
2**peptides3[,grepl("rep_",names(peptides3))]
peptides4[,grepl("rep_",names(peptides4))]<-
2**peptides4[,grepl("rep_",names(peptides4))]
peptides5[,grepl("rep_",names(peptides5))]<-
2**peptides5[,grepl("rep_",names(peptides5))]
peptides6[,grepl("rep_",names(peptides6))]<-
2**peptides6[,grepl("rep_",names(peptides6))]
########### Rownames ###########
rownames(peptides00)<-peptides00$Name
rownames(peptides1)<-peptides1$Name
rownames(peptides2)<-peptides2$Name
rownames(peptides3)<-peptides3$Name
rownames(peptides4)<-peptides4$Name
rownames(peptides5)<-peptides5$Name
rownames(peptides6)<-peptides6$Name
########### My beta-gal protein normalization ###########
load("~/gdrive/AthroProteomics/data/pepAnno2.RData")
pinEcoli<-pepAnno2 %>% filter(grepl("P00722",proteins) & goodQuant>.99
& pinUseQuant=="Yes")
myEColi<-pepAnno2 %>% filter(grepl("P00722",proteins) & goodQuant>.99
& !grepl("(\\[.*?\\])",pepSeq))
myEColiIntensPep00<-peptides00[peptides00$Name %in% myEColi$pepSeq,
grepl("rep_",names(peptides00))]
myEColiIntensPep1<-peptides1[peptides1$Name %in% myEColi$pepSeq,
grepl("rep_",names(peptides1))]
myEColiIntensPep2<-peptides2[peptides2$Name %in% myEColi$pepSeq,
grepl("rep_",names(peptides2))]
myEColiIntensPep3<-peptides3[peptides3$Name %in% myEColi$pepSeq,
grepl("rep_",names(peptides3))]
myEColiIntensPep4<-peptides4[peptides4$Name %in% myEColi$pepSeq,
grepl("rep_",names(peptides4))]
myEColiIntensPep5<-peptides5[peptides5$Name %in% myEColi$pepSeq,
grepl("rep_",names(peptides5))]
myEColiIntensPep6<-peptides6[peptides6$Name %in% myEColi$pepSeq,
grepl("rep_",names(peptides6))]
cvFun1<-function(data){
return(sd(apply(data,2,sum))/mean(apply(data,2,sum)))
}
bGalProtCVs<-
data.frame(technique=c("None","Column TI","Quantile","CyclicLoess","CyclicLoessBGal",
"CyclicLoessBGalLt","MAD"),
cv=c(cvFun1(myEColiIntensPep00),cvFun1(myEColiIntensPep1),
cvFun1(myEColiIntensPep2),cvFun1(myEColiIntensPep3),
cvFun1(myEColiIntensPep4),cvFun1(myEColiIntensPep5),
cvFun1(myEColiIntensPep6)))
# write.csv(bGalProtCVs,file="bGalProtCVs.csv",row.names=FALSE)
########### Beta gal peptide correlations ###########
myEColiIntensPep1<-rbind(myEColiIntensPep1,apply(myEColiIntensPep1,2,sum))
rownames(myEColiIntensPep1)[nrow(myEColiIntensPep1)]<-"Total"
cor1<-cor(t(myEColiIntensPep1))
# png("Plots/cor1.png",height=6,width=6,units="in",res=300)
corrplot(cor1,type="upper",tl.cex=.4,is.corr=FALSE,cl.lim=c(-.4,1))
# dev.off()
myEColiIntensPep5<-rbind(myEColiIntensPep5,apply(myEColiIntensPep5,2,sum))
rownames(myEColiIntensPep5)[nrow(myEColiIntensPep5)]<-"Total"
cor5<-cor(t(myEColiIntensPep5))
# png("Plots/cor5.png",height=6,width=6,units="in",res=300)
corrplot(cor5,type="upper",tl.cex=.4,is.corr=FALSE,cl.lim=c(-.4,1))
# dev.off()
########### Combine injections ###########
combFun<-function(Names,data){
# Output dataframe:
df2<-data.frame(Name=Names)
# Combine injections
for(uSamp in unique(pheno$uSamp)){
df1<-data.frame(value=apply(data[,pheno$newName[pheno$uSamp==uSamp]],1,mean))
names(df1)[names(df1)=="value"]<-paste0("rep_",uSamp)
df2<-cbind(df2,df1)
}
# Median scale & log-transform
meds<-apply(df2[,grepl("rep",names(df2))],1,median)
for(i in 1:nrow(df2[,grepl("rep",names(df2))])){
df2[,grepl("rep",names(df2))][i,]<-log2(df2[,grepl("rep",names(df2))][i,]/meds[i])
}
return(df2)
}
pep1<-combFun(Names=peptides1$Name,data=peptides1)
# Major cleanup:
rm(bGalCVs,bGalProtCVs,cor1,cor5,m0,m00,m1,m1b,m2,m3,m4,m5,m6,
myEColi,myEColiIntensPep00,myEColiIntensPep1,myEColiIntensPep2,
myEColiIntensPep3,myEColiIntensPep4,myEColiIntensPep5,myEColiIntensPep6,
names2,p0,p1,p1b,p2,p3,p4,p5,p6,peptides,peptides00,peptides1b,
peptides2,peptides3,peptides4,peptides5,peptides6,peptidesL,peptidesL1b,
peptidesL2,peptidesL3,peptidesL4,peptidesL5,peptidesL6,proteins,
reg1DF,bGalWeights,bGalWeights2,cFac,cFac2,cMed,cSums,i,mins,names1,
pepSeqs,pepSeqsStr,reg1,reg2,bGalFun,combFun,cvFun1,makeWideFun,
pinEcoli)
########### Peptide difference at baseline ###########
pepDF<-pep1 %>% gather(key="rep",value="Intensity",-Name)
pepDF$ptid<-str_split(pepDF$rep,"_",simplify=TRUE)[,2]
pepDF$timept<-str_split(pepDF$rep,"_",simplify=TRUE)[,3]
pepDF<-pepDF %>% left_join(groups)
unqPep<-unique(pepDF$Name)
pepDFT0Res<-data.frame(unqPep=unqPep,T0_sCAD=NA,T0_Type1=NA,T0_Type2=NA,
T0_Anova=NA,T0_Type1_sCAD=NA,T0_Type2_sCAD=NA,T0_Type1_Type2=NA,
T0_Type1_sCAD_p=NA,T0_Type2_sCAD_p=NA,T0_Type1_Type2_p=NA)
for(i in 1:length(unqPep)){
# Linear Model
lm1<-lm(Intensity~Group,data=pepDF %>%
filter(Name==unqPep[i] & Group!="Indeterminate" & timept=="T0"))
# Overall T0 ANOVA:
lm1FStat<-summary(lm1)$fstatistic
pepDFT0Res$T0_Anova[i]<-pf(lm1FStat[1],lm1FStat[2],lm1FStat[3],lower.tail=FALSE)
# T0 Means:
lm1Emmeans<-as.data.frame(emmeans(lm1,~Group))
pepDFT0Res$T0_sCAD[i]<-lm1Emmeans$emmean[lm1Emmeans$Group=="sCAD"]
pepDFT0Res$T0_Type1[i]<-lm1Emmeans$emmean[lm1Emmeans$Group=="Type 1"]
pepDFT0Res$T0_Type2[i]<-lm1Emmeans$emmean[lm1Emmeans$Group=="Type 2"]
# Pairwise T0:
lm1Pairs<-as.data.frame(pairs(emmeans(lm1,~Group),adjust="none"))
pepDFT0Res$T0_Type1_sCAD[i]<-
(-lm1Pairs$estimate[lm1Pairs$contrast=="sCAD - Type 1"])
pepDFT0Res$T0_Type2_sCAD[i]<-
(-lm1Pairs$estimate[lm1Pairs$contrast=="sCAD - Type 2"])
pepDFT0Res$T0_Type1_Type2[i]<-
(lm1Pairs$estimate[lm1Pairs$contrast=="Type 1 - Type 2"])
# Pairwise T0 p-value
pepDFT0Res$T0_Type1_sCAD_p[i]<-
(lm1Pairs$p.value[lm1Pairs$contrast=="sCAD - Type 1"])
pepDFT0Res$T0_Type2_sCAD_p[i]<-
(lm1Pairs$p.value[lm1Pairs$contrast=="sCAD - Type 2"])
pepDFT0Res$T0_Type1_Type2_p[i]<-
(lm1Pairs$p.value[lm1Pairs$contrast=="Type 1 - Type 2"])
print(i)
}
pepDFT0Res<-pepDFT0Res %>% left_join(pepAnno2,by=c("unqPep"="pepSeq"))
pepDFT0ResGood<-pepDFT0Res %>%
filter(goodQuant>.8 & T0_Type1_sCAD_p<.1 & T0_Type1_Type2_p<.1)
########### Peptide Temporal Difference analysis ###########
pepDFw<-pepDF %>% dplyr::select(-rep) %>%
tidyr::spread(key="timept",value="Intensity")
pepDFw$d<-pepDFw$T0-pepDFw$FU
pepDFDRes<-data.frame(unqPep=unqPep,D_sCAD=NA,D_Type1=NA,D_Type2=NA,
D_Anova=NA,D_Type1_sCAD=NA,D_Type2_sCAD=NA,D_Type1_Type2=NA,
D_Type1_sCAD_p=NA,D_Type2_sCAD_p=NA,D_Type1_Type2_p=NA)
for(i in 1:length(unqPep)){
# Linear Model
lm1<-lm(d~Group,data=pepDFw %>%
filter(Name==unqPep[i] & Group!="Indeterminate"))
# Overall T0 ANOVA:
lm1FStat<-summary(lm1)$fstatistic
pepDFDRes$D_Anova[i]<-pf(lm1FStat[1],lm1FStat[2],lm1FStat[3],lower.tail=FALSE)
# Time-point Means:
lm1Emmeans<-as.data.frame(emmeans(lm1,~Group))
pepDFDRes$D_sCAD[i]<-lm1Emmeans$emmean[lm1Emmeans$Group=="sCAD"]
pepDFDRes$D_Type1[i]<-lm1Emmeans$emmean[lm1Emmeans$Group=="Type 1"]
pepDFDRes$D_Type2[i]<-lm1Emmeans$emmean[lm1Emmeans$Group=="Type 2"]
# Pairwise D:
lm1Pairs<-as.data.frame(pairs(emmeans(lm1,~Group),adjust="none"))
pepDFDRes$D_Type1_sCAD[i]<-
(-lm1Pairs$estimate[lm1Pairs$contrast=="sCAD - Type 1"])
pepDFDRes$D_Type2_sCAD[i]<-
(-lm1Pairs$estimate[lm1Pairs$contrast=="sCAD - Type 2"])
pepDFDRes$D_Type1_Type2[i]<-
(lm1Pairs$estimate[lm1Pairs$contrast=="Type 1 - Type 2"])
# Pairwise T0 p-value
pepDFDRes$D_Type1_sCAD_p[i]<-
(lm1Pairs$p.value[lm1Pairs$contrast=="sCAD - Type 1"])
pepDFDRes$D_Type2_sCAD_p[i]<-
(lm1Pairs$p.value[lm1Pairs$contrast=="sCAD - Type 2"])
pepDFDRes$D_Type1_Type2_p[i]<-
(lm1Pairs$p.value[lm1Pairs$contrast=="Type 1 - Type 2"])
print(i)
}
pepDFDRes<-pepDFDRes %>% left_join(pepAnno2,by=c("unqPep"="pepSeq"))
pepDFDResGood<-pepDFDRes %>%
filter(goodQuant>.8 & D_Type1_sCAD_p<.1 & D_Type1_Type2_p<.1)
rm(lm1,lm1Emmeans,lm1Pairs,i,lm1FStat,unqPep)
########### Protein Aggregation ###########
protList<-paste(pepAnno2$proteins,collapse=";")
protList<-unique(unlist(str_split(protList,";")))
Prot<-data.frame(prot=protList,lvl=NA,nPep=NA,peps=NA)
for(prot in Prot$prot){
peps<-pepAnno2$pepSeq[grepl(prot,pepAnno2$proteins,fixed=TRUE) & pepAnno2$protN==1 &
pepAnno2$goodQuant>.3]
pepsAll<-pepAnno2$pepSeq[grepl(prot,pepAnno2$proteins,fixed=TRUE)]
if(length(peps)>0){
Prot$peps[Prot$prot==prot]<-paste(peps,collapse=";")
Prot$nPep[Prot$prot==prot]<-length(peps)
}
if(length(pepsAll)>0){
Prot$pepsAll[Prot$prot==prot]<-paste(pepsAll,collapse=";")
Prot$nPepAll[Prot$prot==prot]<-length(pepsAll)
}
}
Prot0<-Prot
ProtOut<-Prot[is.na(Prot$nPep),]
Prot<-Prot[!is.na(Prot$nPep),]
# Calculate mean to make the protein abundances
pepsInProtList<-list()
for(i in 1:nrow(Prot)){
pepsInProt<-unlist(str_split(Prot$peps[i],";"))
pepsInProtDF<-pep1[pep1$Name %in% pepsInProt,]
pepsInProtDF$Name<-NULL
pepsInProtDF<-t(t(apply(pepsInProtDF,2,mean)))
pepsInProtDF<-as.data.frame(pepsInProtDF)
names(pepsInProtDF)[1]<-"value"
pepsInProtDF$prot<-Prot$prot[i]
pepsInProtDF$rep<-rownames(pepsInProtDF)
pepsInProtList[[i]]<-pepsInProtDF
}
prots<-do.call("rbind",pepsInProtList)
# Add sample annotation:
prots$rep<-gsub("rep_","",prots$rep)
prots<-prots %>% left_join(pheno %>% dplyr::select(uSamp,Group,ptid,timept)
%>% unique(),
by=c("rep"="uSamp"))
unqProts<-unique(prots$prot)
########### Join prot data to peptide data ###########
# Baseline abundances:
pepDFT0Res$OtherPepGood<-pepDFT0Res$OtherPepTotal<-pepDFT0Res$otherCor<-pepDFT0Res$otherGoodCor<-NA
for(i in 1:nrow(pepDFT0Res)){
tempProts<-pepDFT0Res$proteins[i]
tempProts<-unlist(str_split(tempProts,";"))
tempProts<-str_split(str_split(tempProts,"\\|",simplify=TRUE)[,2],
"-",simplify=TRUE)[,1]
allPepsDF<-pepDFT0Res[grepl(paste(tempProts,collapse="|"),
pepDFT0Res$proteins),]
allPeps<-unique(allPepsDF$unqPep)
allPepsGood<-unique(allPepsDF[allPepsDF$goodQuant>.3,]$unqPep)
pepDFT0Res$OtherPepTotal[i]<-length(allPeps)
pepDFT0Res$OtherPepGood[i]<-length(allPepsGood)
# Correlation analysis:
mat1<-as.matrix(pep1[pep1$Name %in% allPeps,names(pep1)!="Name"])
mat2<-as.matrix(pep1[pep1$Name %in% allPepsGood,names(pep1)!="Name"])
corMat1<-cor(t(mat1))
corMat2<-cor(t(mat2))
pepDFT0Res$otherCor[i]<-mean(corMat1[rownames(corMat1)!=pepDFT0Res$unqPep[i],
colnames(corMat1)==pepDFT0Res$unqPep[i]])
pepDFT0Res$otherGoodCor[i]<-mean(corMat2[rownames(corMat2)!=pepDFT0Res$unqPep[i],
colnames(corMat2)==pepDFT0Res$unqPep[i]])
print(i)
}
# Change across time:
pepDFDRes$OtherPepGood<-pepDFDRes$OtherPepTotal<-pepDFDRes$otherCor<-pepDFDRes$otherGoodCor<-NA
for(i in 1:nrow(pepDFDRes)){
tempProts<-pepDFDRes$proteins[i]
tempProts<-unlist(str_split(tempProts,";"))
tempProts<-str_split(str_split(tempProts,"\\|",simplify=TRUE)[,2],
"-",simplify=TRUE)[,1]
allPepsDF<-pepDFDRes[grepl(paste(tempProts,collapse="|"),
pepDFDRes$proteins),]
allPeps<-unique(allPepsDF$unqPep)
allPepsGood<-unique(allPepsDF[allPepsDF$goodQuant>.3,]$unqPep)
pepDFDRes$OtherPepTotal[i]<-length(allPeps)
pepDFDRes$OtherPepGood[i]<-length(allPepsGood)
# Correlation analysis:
mat1<-as.matrix(pep1[pep1$Name %in% allPeps,names(pep1)!="Name"])
mat2<-as.matrix(pep1[pep1$Name %in% allPepsGood,names(pep1)!="Name"])
corMat1<-cor(t(mat1))
corMat2<-cor(t(mat2))
pepDFDRes$otherCor[i]<-median(corMat1[rownames(corMat1)!=pepDFDRes$unqPep[i],
colnames(corMat1)==pepDFDRes$unqPep[i]])
pepDFDRes$otherGoodCor[i]<-median(corMat2[rownames(corMat2)!=pepDFDRes$unqPep[i],
colnames(corMat2)==pepDFDRes$unqPep[i]])
print(i)
}
# Join:
pepDFRes<-pepDFT0Res %>% full_join(
pepDFDRes %>% dplyr::select("unqPep","D_sCAD","D_Type1","D_Type2","D_Anova",
"D_Type1_sCAD","D_Type2_sCAD","D_Type1_Type2",
"D_Type1_sCAD_p","D_Type2_sCAD_p","D_Type1_Type2_p"),
by=c("unqPep"))
########### Add peptide criteria ###########
# Criteria 1: Fold change across time in Type 1 (ln-scale) > 1;
# Diff between sCAD and Type 1 fold change across time (ln-scale) > 0.75
# pepDFRes$crit1 <- abs(pepDFRes$D_Type1 > 1) & abs(pepDFRes$D_Type1_sCAD > .75)
# New Criteria 1:
pepDFRes$crit1<-pepDFRes$D_Type1_sCAD_p<.05
# Criteria 2: If >1 other peptide from protein or family, correlation > .5?
pepDFRes$crit2 <- FALSE
pepDFRes$crit2[(pepDFRes$OtherPepTotal > 1 & pepDFRes$otherCor > .5) |
(pepDFRes$OtherPepTotal ==1)] <- TRUE
# Criteria 3: No missed cleavages
pepDFRes$crit3 <- pepDFRes$miss == 1
# Criteria 4: No oxidized Met
pepDFRes$crit4 <- !grepl("M\\[Oxid\\]",pepDFRes$unqPep)
# Criteria 5: If #1-#4
pepDFRes$crit5 <- pepDFRes$crit1 & pepDFRes$crit2 & pepDFRes$crit3 & pepDFRes$crit4
# Criteria 6: If different between type 1 MI and sCAD at T0:
# pepDFRes$crit6 <- abs(pepDFRes$T0_Type1_sCAD)>.5
# New Criteria 6:
pepDFRes$crit6<-pepDFRes$T0_Type1_sCAD_p<.1
# Criteria 7: Criteria 5 and 6:
pepDFRes$crit7 <- pepDFRes$crit5 & pepDFRes$crit6
# Export peptide results:
# write.csv(pepDFRes,"pepDFResV2.csv")
########### Protein level analysis ###########
# Baseline
protDFT0Res<-data.frame(prot=unqProts,T0_sCAD=NA,T0_Type1=NA,T0_Type2=NA,
T0_Anova=NA,T0_Type1_sCAD=NA,T0_Type2_sCAD=NA,T0_Type1_Type2=NA,
T0_Type1_sCAD_p=NA,T0_Type2_sCAD_p=NA,T0_Type1_Type2_p=NA)
for(i in 1:nrow(protDFT0Res)){
# Linear Model
lm1<-lm(value~Group,data=prots %>%
filter(prot==protDFT0Res$prot[i] & Group!="Indeterminate" & timept=="T0"))
# Overall T0 ANOVA:
lm1FStat<-summary(lm1)$fstatistic
protDFT0Res$T0_Anova[i]<-pf(lm1FStat[1],lm1FStat[2],lm1FStat[3],lower.tail=FALSE)
# T0 Means:
lm1Emmeans<-as.data.frame(emmeans(lm1,~Group))
protDFT0Res$T0_sCAD[i]<-lm1Emmeans$emmean[lm1Emmeans$Group=="sCAD"]
protDFT0Res$T0_Type1[i]<-lm1Emmeans$emmean[lm1Emmeans$Group=="Type 1"]
protDFT0Res$T0_Type2[i]<-lm1Emmeans$emmean[lm1Emmeans$Group=="Type 2"]
# Pairwise T0:
lm1Pairs<-as.data.frame(pairs(emmeans(lm1,~Group),adjust="none"))
protDFT0Res$T0_Type1_sCAD[i]<-
(-lm1Pairs$estimate[lm1Pairs$contrast=="sCAD - Type 1"])
protDFT0Res$T0_Type2_sCAD[i]<-
(-lm1Pairs$estimate[lm1Pairs$contrast=="sCAD - Type 2"])
protDFT0Res$T0_Type1_Type2[i]<-
(lm1Pairs$estimate[lm1Pairs$contrast=="Type 1 - Type 2"])
# Pairwise T0 p-value
protDFT0Res$T0_Type1_sCAD_p[i]<-
(lm1Pairs$p.value[lm1Pairs$contrast=="sCAD - Type 1"])
protDFT0Res$T0_Type2_sCAD_p[i]<-
(lm1Pairs$p.value[lm1Pairs$contrast=="sCAD - Type 2"])
protDFT0Res$T0_Type1_Type2_p[i]<-
(lm1Pairs$p.value[lm1Pairs$contrast=="Type 1 - Type 2"])
print(i)
}
############ Protein Change across time ############
protsW<-prots %>% dplyr::select(-rep) %>%
tidyr::spread(key="timept",value="value")
protsW$d<-protsW$T0-protsW$FU
protDFDRes<-data.frame(prot=unqProts,D_sCAD=NA,D_Type1=NA,D_Type2=NA,
D_Anova=NA,D_Type1_sCAD=NA,D_Type2_sCAD=NA,D_Type1_Type2=NA,
D_Type1_sCAD_p=NA,D_Type2_sCAD_p=NA,D_Type1_Type2_p=NA)
for(i in 1:nrow(protDFDRes)){
# Linear Model
lm1<-lm(d~Group,data=protsW %>%
filter(prot==protDFDRes$prot[i] & Group!="Indeterminate"))
# Overall T0 ANOVA:
lm1FStat<-summary(lm1)$fstatistic
protDFDRes$D_Anova[i]<-pf(lm1FStat[1],lm1FStat[2],lm1FStat[3],lower.tail=FALSE)
# D Means:
lm1Emmeans<-as.data.frame(emmeans(lm1,~Group))
protDFDRes$D_sCAD[i]<-lm1Emmeans$emmean[lm1Emmeans$Group=="sCAD"]
protDFDRes$D_Type1[i]<-lm1Emmeans$emmean[lm1Emmeans$Group=="Type 1"]
protDFDRes$D_Type2[i]<-lm1Emmeans$emmean[lm1Emmeans$Group=="Type 2"]
# Pairwise D:
lm1Pairs<-as.data.frame(pairs(emmeans(lm1,~Group),adjust="none"))
protDFDRes$D_Type1_sCAD[i]<-
(-lm1Pairs$estimate[lm1Pairs$contrast=="sCAD - Type 1"])
protDFDRes$D_Type2_sCAD[i]<-
(-lm1Pairs$estimate[lm1Pairs$contrast=="sCAD - Type 2"])
protDFDRes$D_Type1_Type2[i]<-
(lm1Pairs$estimate[lm1Pairs$contrast=="Type 1 - Type 2"])
# Pairwise D p-value
protDFDRes$D_Type1_sCAD_p[i]<-
(lm1Pairs$p.value[lm1Pairs$contrast=="sCAD - Type 1"])
protDFDRes$D_Type2_sCAD_p[i]<-
(lm1Pairs$p.value[lm1Pairs$contrast=="sCAD - Type 2"])
protDFDRes$D_Type1_Type2_p[i]<-
(lm1Pairs$p.value[lm1Pairs$contrast=="Type 1 - Type 2"])
print(i)
}
# Save
rm(corMat1,corMat2,lm1,lm1Emmeans,lm1FStat,lm1Pairs,mat1,mat2,allPeps,
allPepsGood,i,peps,pepsAll,pepsInProt,prot,protList,tempProts,unqProts)
save.image(file="working_20181007.RData")
########### Peptide plots ###########
load(file="working_20181007.RData")
sigPeps<-pepDFRes$unqPep[pepDFRes$crit7 & pepDFRes$length<25 & pepDFRes$goodQuant > .05]
sigPeps2<-pepDFRes$unqPep[grepl("P04275",pepDFRes$proteins)]
sigPeps<-c(sigPeps,sigPeps2)
pepsSig<-pepDFRes[pepDFRes$unqPep %in% sigPeps,]
customTitles<-matrix(c(
"NSEEFAAAMSR","Apolipoprotein B-100",
"YYTYLIMNK","Complement C3",
"C[Carboxymethyl]EWETPEGC[Carboxymethyl]EQVLTGK","C4b-binding protein alpha chain",
"LQQVLHAGSGPC[Carboxymethyl]LPHLLSR","Alpha-2-antiplasmin",
"LC[Carboxymethyl]MGSGLNLC[Carboxymethyl]EPNNK","Serotransferrin",
"KPVAFSDYIHPVC[Carboxymethyl]LPDR","Prothrombin",
"YEITTIHNLFR","Heparin cofactor 2",
"EVVADSVWVDVK","Complement C3",
"TVMVNIENPEGIPVK","Complement C3",
"TSSFALNLPTLPEVK","Apolipoprotein B-100",
"NFVASHIANILNSEELDIQDLK","Apolipoprotein B-100",
"YTIAALLSPYSYSTTAVVTNPK","Transthyretin",
"EALQGVGDMGR","Serum amyloid A-4 protein",
"EWFSETFQK","Apolipoprotein C-I",
"NIQEYLSILTDPDGK","Apolipoprotein B-100",
"SGSSTASWIQNVDTK","Apolipoprotein B-100",
"AGPHC[Carboxymethyl]PTAQLIATLK","Platelet Factor 4",
"AASGTTGTYQEWK","Apolipoprotein B-100",
"GTHC[Carboxymethyl]NQVEVIATLK","Platelet basic protein",
"LIVAMSSWLQK","Apolipoprotein B-100",
"HIQNIDIQHLAGK","Apolipoprotein B-100",
"EDVYVVGTVLR","C4b-binding protein alpha chain",
"EELC[Carboxymethyl]TMFIR","Apolipoprotein B-100",
"HITSLEVIK","Platelet factor 4",
"SLMLHYEFLQR","Complement component C8 beta chain",
"ALVEQGFTVPEIK","Apolipoprotein B-100",
"AVLTIDEK","Alpha-1-antitrypsin",
"LC[Carboxymethyl]GC[Carboxymethyl]LELLLQFDQK","RUN and FYVE domain-containing protein 4",
"NIQSLEVIGK","Platelet basic protein",
"ATAVVDGAFK","Peroxiredoxin-2",
"VIVIPVGIGPHANLK","von Willebrand factor",
"YTLFQIFSK","von Willebrand factor"
),
ncol=2,byrow=TRUE)
customTitles<-as.data.frame(customTitles)
names(customTitles)<-c("unqPep","Title")
pepsSig<-pepsSig %>% left_join(customTitles)
# Plot function:
pFun1<-function(iter,ylower,yupper){
temp1<-pepDF[pepDF$Name==pepsSig$unqPep[iter] & pepDF$Group != "Indeterminate",]
ggplot(temp1,aes(timept,Intensity,color=Group,group=ptid))+
geom_point()+geom_line()+ylim(ylower,yupper)+theme_bw()+facet_grid(~Group)+
ggtitle(pepsSig$unqPep[iter],
subtitle=paste0(pepsSig$Title[iter],". cor = ",round(pepsSig$otherCor[iter],3)))+
xlab("Time-point")+ylab("Abundance")+
theme(plot.title = element_text(hjust = 0.5),
plot.subtitle = element_text(hjust = 0.5))
}
pepsSig<-pepsSig %>% arrange(Title,unqPep)
# pFun1(1,-2,2)
# pFun1(2,-3,3)
pFun1(3,-1.5,1.75)
pFun1(4,-3,3)
pFun1(5,-3,4)
pFun1(6,-1.25,2.1)
pFun1(7,-2,2.25)
pFun1(8,-1,1.25)
pFun1(9,-1.5,3)
pFun1(10,-1.1,1.5)
pFun1(11,-3,3)
pFun1(12,-1.1,1.3)
pFun1(13,-1.2,1)
# pFun1(14,-3,3)
# pFun1(15,-3,3)
# pFun1(16,-3,3)
# pFun1(17,-3,3)
# pFun1(18,-3,3)
# pFun1(19,-3,3)
pFun1(20,-1.1,1)
# pFun1(21,-3,3)
pFun1(22,-7.5,5.25)
pFun1(23,-2,3)
pFun1(24,-6.5,5.25)
pFun1(25,-5,5)
pFun1(26,-.9,.7)
# pFun1(27,-3,3)
# pFun1(28,-3,3)
# pFun1(29,-3,3)
pFun1(30,-1.2,1)
pFun1(31,-5,3)
pFun1(32,-4.5,3.2)
########### Peptide pull protein analysis ###########
setwd("~/gdrive/AthroProteomics")
load(file="working_20180821.RData")
# Which proteins:
str_split(pepDFRes$proteins,";") | /analysis.R | no_license | trainorp/atheroProteomics | R | false | false | 29,793 | r | ########### Prereqs ###########
# Begin always run:
options(stringsAsFactors=FALSE,scipen=600)
library(tidyverse)
library(gridExtra)
library(emmeans)
library(multcomp)
library(corrplot)
# Ensembl library()
# End always run
setwd("~/gdrive/AthroProteomics/data")
peptides<-read.csv('peptide_table_20180514.csv')
proteins<-read.csv('protein_table_20180514.csv')
reg1<-regexpr("JVE.",text=names(peptides))
reg2<-regexpr(".File.Area.s.",text=names(peptides))
reg1DF<-data.frame(include=reg1>0,start=as.integer(reg1)+attr(reg1,"match.length"),
stop=as.integer(reg2)-1L)
names1<-substr(names(peptides),start=reg1DF$start,stop=reg1DF$stop)
names2<-str_split(names1,"\\.",simplify=TRUE)
pheno<-data.frame(oldName=names(peptides)[names1!=""],ptid=names2[names1!="",1],
timept=names2[names1!="",2],replicate=as.integer(names2[names1!="",3]))
pheno$newName<-paste0("rep_",1L:length(pheno$oldName))
names(peptides)[match(pheno$oldName,names(peptides))]<-pheno$newName
names(proteins)[match(pheno$oldName,names(proteins))]<-pheno$newName
pheno$uSamp<-paste(pheno$ptid,pheno$timept,sep="_")
# Phenotype data:
groups<-read.csv(file="~/gdrive/Athro/groups_20180515.csv")
groups$ptid<-as.character(groups$ptid)
groups$Group<-"sCAD"
groups$Group[!is.na(groups$MIGroup)]<-groups$MIGroup[!is.na(groups$MIGroup)]
pheno<-pheno %>% left_join(groups)
# Remove 2010_T0 because there are 4 replicates!
peptides<-peptides[,!(colnames(peptides) %in% pheno$newName[pheno$uSamp=="2010_T0"])]
pheno<-pheno %>% filter(uSamp != "2010_T0")
# Wide data:
makeWideFun<-function(data){
peptidesL<-data %>% dplyr::select(-Quality.Score,-(Parent.Protein:Percent.Files.With.Good.Quant)) %>%
gather(key="rep",value="Intensity",-Name)
peptidesL<-pheno %>% left_join(groups) %>% left_join(peptidesL,by=c("newName"="rep"))
peptidesL$GroupTime<-paste(peptidesL$Group,peptidesL$timept,sep=".")
peptidesL$GroupTime<-factor(peptidesL$GroupTime,
levels=c("sCAD.FU","sCAD.T0","Type 1.FU","Type 1.T0","Type 2.FU","Type 2.T0",
"Indeterminate.FU","Indeterminate.T0"))
peptidesL<-peptidesL %>% arrange(GroupTime,ptid,replicate)
temp1<-peptidesL %>% dplyr::select(GroupTime,ptid,replicate) %>% unique()
temp1$uID<-as.factor(1L:nrow(temp1))
peptidesL<-temp1 %>% left_join(peptidesL)
return(peptidesL)
}
# Export peptide sequences for protein query:
pepSeqs<-peptides$Name
pepSeqs<-gsub("(\\[.*?\\])","",pepSeqs)
pepSeqsStr<-paste(pepSeqs,collapse=",")
# write.table(pepSeqsStr,file="pepSeqsStr.txt",quote=FALSE,row.names=FALSE)
# Export other peptide annotation:
pepAnno<-peptides %>% dplyr::select(Name,Quality.Score,ParentProtein.FullName,Use.For.Quant,
Percent.Files.With.Good.Quant)
# save(pepAnno,file="pepAnno.RData")
setwd("~/gdrive/AthroProteomics/")
########### Normalization ###########
m0<-as.matrix(peptides[,grepl("rep",names(peptides))])
# Minimum value imputation:
mins<-apply(m0,2,function(x) min(x[x>0]))
for(i in 1:ncol(m0)){
m0[,i][m0[,i]<1e-6]<-mins[i]
}
# Log-transform:
m00<-m0
peptides00<-peptides
peptides00[,grepl("rep",names(peptides00))]<-m00
m0<-log2(m0)
peptides[,grepl("rep",names(peptides))]<-m0
peptidesL<-makeWideFun(peptides)
# png(file="Plots/peptideNoNorm.png",height=5,width=10,units="in",res=300)
p0<-ggplot(data=peptidesL,aes(x=uID,group=newName,color=GroupTime,y=Intensity))+
geom_boxplot()+theme_bw()+xlab("")+ylab("Intensity (log scale)")+
theme(axis.text.x=element_blank())
show(p0)
# dev.off()
# Columnwise total intensity normalization:
cSums<-apply(m00,2,sum)
cFac<-cSums/mean(cSums)
m1<-m00
for(i in 1:ncol(m1)) m1[,i]<-m1[,i]/cFac[i]
m1b<-m1
# Median normalization
cMed<-apply(m1,2,median)
cFac2<-cMed/mean(cMed)
for(i in 1:ncol(m1b)) m1b[,i]<-m1b[,i]/cFac2[i]
peptides1b<-peptides1<-peptides00
peptides1[,grepl("rep",names(peptides1))]<-log2(m1)
peptides1b[,grepl("rep",names(peptides1b))]<-log2(m1b)
peptidesL1<-makeWideFun(peptides1)
peptidesL1b<-makeWideFun(peptides1b)
# png(file="Plots/peptideColNorm.png",height=5,width=10,units="in",res=300)
p1<-ggplot(data=peptidesL1,aes(x=uID,group=newName,color=GroupTime,y=Intensity))+
geom_boxplot()+theme_bw()+xlab("")+ylab("Intensity (log scale)")+
theme(axis.text.x=element_blank())
show(p1)
# dev.off()
# png(file="Plots/peptideColMedNorm.png",height=5,width=10,units="in",res=300)
p1b<-ggplot(data=peptidesL1b,aes(x=uID,group=newName,color=GroupTime,y=Intensity))+
geom_boxplot()+theme_bw()+xlab("")+ylab("Intensity (log scale)")+
theme(axis.text.x=element_blank())
show(p1b)
# dev.off()
# png(file="Plots/peptideNoneVColNorm.png",height=10,width=10,units="in",res=300)
grid.arrange(p0,p1,nrow=2)
# dev.off()
# Quantile normalization:
m2<-preprocessCore::normalize.quantiles(m0)
peptides2<-peptides
peptides2[,grepl("rep",names(peptides2))]<-m2
peptidesL2<-makeWideFun(peptides2)
# png(file="Plots/peptideQuantNorm.png",height=5,width=10,units="in",res=300)
p2<-ggplot(data=peptidesL2,aes(x=uID,group=newName,color=GroupTime,y=Intensity))+
geom_boxplot()+theme_bw()+xlab("")+ylab("Intensity (log scale)")+
theme(axis.text.x=element_blank())
show(p2)
# dev.off()
# Cyclic loess:
m3<-limma::normalizeCyclicLoess(m0)
peptides3<-peptides
peptides3[,grepl("rep",names(peptides3))]<-m3
peptidesL3<-makeWideFun(peptides3)
# png(file="Plots/peptideFastCyclicLoess.png",height=5,width=10,units="in",res=300)
p3<-ggplot(data=peptidesL3,aes(x=uID,group=newName,color=GroupTime,y=Intensity))+
geom_boxplot()+theme_bw()+xlab("")+ylab("Intensity (log scale)")+
theme(axis.text.x=element_blank())
show(p3)
# dev.off()
# Cyclic loess bGal only:
bGalWeights<-as.numeric(grepl("P00722",peptides$Parent.Protein) &
peptides$Percent.Files.With.Good.Quant>.99)
m4<-limma::normalizeCyclicLoess(m0,weights = bGalWeights)
peptides4<-peptides
peptides4[,grepl("rep",names(peptides4))]<-m4
peptidesL4<-makeWideFun(peptides4)
# png(file="Plots/peptideFastCyclicLoessbGalOnly.png",height=5,width=10,units="in",res=300)
p4<-ggplot(data=peptidesL4,aes(x=uID,group=newName,color=GroupTime,y=Intensity))+
geom_boxplot()+theme_bw()+xlab("")+ylab("Intensity (log scale)")+
theme(axis.text.x=element_blank())
show(p4)
# dev.off()
# Cyclic loess half & half bGal only:
bGalWeights2<-bGalWeights+.2
bGalWeights2<-ifelse(bGalWeights2>1,1.0,bGalWeights2)
m5<-limma::normalizeCyclicLoess(m0,weights = bGalWeights2)
peptides5<-peptides
peptides5[,grepl("rep",names(peptides5))]<-m5
peptidesL5<-makeWideFun(peptides5)
# png(file="Plots/peptideFastCyclicLoessbGalHeavy.png",height=5,width=10,units="in",res=300)
p5<-ggplot(data=peptidesL5,aes(x=uID,group=newName,color=GroupTime,y=Intensity))+
geom_boxplot()+theme_bw()+xlab("")+ylab("Intensity (log scale)")+
theme(axis.text.x=element_blank())
show(p5)
# dev.off()
# MAD:
m6<-limma::normalizeMedianAbsValues(m0)
peptides6<-peptides
peptides6[,grepl("rep",names(peptides6))]<-m6
peptidesL6<-makeWideFun(peptides6)
# png(file="Plots/peptideMAD.png",height=5,width=10,units="in",res=300)
p6<-ggplot(data=peptidesL6,aes(x=uID,group=newName,color=GroupTime,y=Intensity))+
geom_boxplot()+theme_bw()+xlab("")+ylab("Intensity (log scale)")+
theme(axis.text.x=element_blank())
show(p6)
# dev.off()
# Beta gal:
bGalFun<-function(data,method){
# betaGal data:
bGal<-data %>% filter(grepl("P00722",ParentProtein.FullName) &
Percent.Files.With.Good.Quant>.99)
set.seed(3)
bGalShort<-sample(bGal$Name,8)
bGalL<-bGal %>% dplyr::select(-Quality.Score,-(Parent.Protein:Percent.Files.With.Good.Quant)) %>%
gather(key="rep",value="Intensity",-Name)
bGalLNames<-bGalL %>% dplyr::select(Name) %>% unique()
bGalLNames$id<-as.factor(1:nrow(bGalLNames))
bGalL<-bGalLNames %>% left_join(bGalL)
# CVs:
cv<-bGalL %>% group_by(Name) %>% summarize(cv=sd(2**Intensity)/mean(2**Intensity)*100)
names(cv)[names(cv)=="cv"]<-paste0(method,"CV")
# Plots:
fName<-paste0("Plots/bGalPeptides_",method,"_Full.png")
# png(filename=fName,height=5,width=8,units="in",res=300)
bGalp<-ggplot(bGalL,aes(x=rep,y=Intensity,group=Name,color=id))+
geom_line()+ylim(23,32)+theme_bw()+xlab("Replicate")+ylab("Intensity (log scale)")
print(bGalp)
# dev.off()
fName2<-paste0("Plots/bGalPeptides_",method,"_Samp.png")
# png(filename=fName2,height=5,width=8,units="in",res=300)
bGalp2<-ggplot(bGalL %>% filter(Name %in% bGalShort),
aes(x=rep,y=Intensity,group=Name,color=id))+
geom_line()+ylim(26,31.75)+theme_bw()+xlab("Replicate")+ylab("Intensity (log scale)")
print(bGalp2)
# dev.off()
return(cv)
}
bGalCVs<-cbind(
bGalFun(data=peptides,method="none"),
bGalFun(data=peptides1,method="columnTI")[,-1],
bGalFun(data=peptides1b,method="columnTIMed")[,-1],
bGalFun(peptides2,method="Quant")[,-1],
bGalFun(peptides3,method="FCycLoess")[,-1],
bGalFun(peptides4,method="FCycLoessbGal")[,-1],
bGalFun(peptides5,method="FCycLoessbGalLight")[,-1],
bGalFun(peptides6,method="MAD")[,-1]
)
# write.csv(bGalCVs,file="bGalCVs.csv",row.names=FALSE)
########### Un log-transform ###########
peptides1[,grepl("rep_",names(peptides1))]<-
2**peptides1[,grepl("rep_",names(peptides1))]
peptides2[,grepl("rep_",names(peptides2))]<-
2**peptides2[,grepl("rep_",names(peptides2))]
peptides3[,grepl("rep_",names(peptides3))]<-
2**peptides3[,grepl("rep_",names(peptides3))]
peptides4[,grepl("rep_",names(peptides4))]<-
2**peptides4[,grepl("rep_",names(peptides4))]
peptides5[,grepl("rep_",names(peptides5))]<-
2**peptides5[,grepl("rep_",names(peptides5))]
peptides6[,grepl("rep_",names(peptides6))]<-
2**peptides6[,grepl("rep_",names(peptides6))]
########### Rownames ###########
rownames(peptides00)<-peptides00$Name
rownames(peptides1)<-peptides1$Name
rownames(peptides2)<-peptides2$Name
rownames(peptides3)<-peptides3$Name
rownames(peptides4)<-peptides4$Name
rownames(peptides5)<-peptides5$Name
rownames(peptides6)<-peptides6$Name
########### My beta-gal protein normalization ###########
load("~/gdrive/AthroProteomics/data/pepAnno2.RData")
pinEcoli<-pepAnno2 %>% filter(grepl("P00722",proteins) & goodQuant>.99
& pinUseQuant=="Yes")
myEColi<-pepAnno2 %>% filter(grepl("P00722",proteins) & goodQuant>.99
& !grepl("(\\[.*?\\])",pepSeq))
myEColiIntensPep00<-peptides00[peptides00$Name %in% myEColi$pepSeq,
grepl("rep_",names(peptides00))]
myEColiIntensPep1<-peptides1[peptides1$Name %in% myEColi$pepSeq,
grepl("rep_",names(peptides1))]
myEColiIntensPep2<-peptides2[peptides2$Name %in% myEColi$pepSeq,
grepl("rep_",names(peptides2))]
myEColiIntensPep3<-peptides3[peptides3$Name %in% myEColi$pepSeq,
grepl("rep_",names(peptides3))]
myEColiIntensPep4<-peptides4[peptides4$Name %in% myEColi$pepSeq,
grepl("rep_",names(peptides4))]
myEColiIntensPep5<-peptides5[peptides5$Name %in% myEColi$pepSeq,
grepl("rep_",names(peptides5))]
myEColiIntensPep6<-peptides6[peptides6$Name %in% myEColi$pepSeq,
grepl("rep_",names(peptides6))]
cvFun1<-function(data){
return(sd(apply(data,2,sum))/mean(apply(data,2,sum)))
}
bGalProtCVs<-
data.frame(technique=c("None","Column TI","Quantile","CyclicLoess","CyclicLoessBGal",
"CyclicLoessBGalLt","MAD"),
cv=c(cvFun1(myEColiIntensPep00),cvFun1(myEColiIntensPep1),
cvFun1(myEColiIntensPep2),cvFun1(myEColiIntensPep3),
cvFun1(myEColiIntensPep4),cvFun1(myEColiIntensPep5),
cvFun1(myEColiIntensPep6)))
# write.csv(bGalProtCVs,file="bGalProtCVs.csv",row.names=FALSE)
########### Beta gal peptide correlations ###########
myEColiIntensPep1<-rbind(myEColiIntensPep1,apply(myEColiIntensPep1,2,sum))
rownames(myEColiIntensPep1)[nrow(myEColiIntensPep1)]<-"Total"
cor1<-cor(t(myEColiIntensPep1))
# png("Plots/cor1.png",height=6,width=6,units="in",res=300)
corrplot(cor1,type="upper",tl.cex=.4,is.corr=FALSE,cl.lim=c(-.4,1))
# dev.off()
myEColiIntensPep5<-rbind(myEColiIntensPep5,apply(myEColiIntensPep5,2,sum))
rownames(myEColiIntensPep5)[nrow(myEColiIntensPep5)]<-"Total"
cor5<-cor(t(myEColiIntensPep5))
# png("Plots/cor5.png",height=6,width=6,units="in",res=300)
corrplot(cor5,type="upper",tl.cex=.4,is.corr=FALSE,cl.lim=c(-.4,1))
# dev.off()
########### Combine injections ###########
combFun<-function(Names,data){
# Output dataframe:
df2<-data.frame(Name=Names)
# Combine injections
for(uSamp in unique(pheno$uSamp)){
df1<-data.frame(value=apply(data[,pheno$newName[pheno$uSamp==uSamp]],1,mean))
names(df1)[names(df1)=="value"]<-paste0("rep_",uSamp)
df2<-cbind(df2,df1)
}
# Median scale & log-transform
meds<-apply(df2[,grepl("rep",names(df2))],1,median)
for(i in 1:nrow(df2[,grepl("rep",names(df2))])){
df2[,grepl("rep",names(df2))][i,]<-log2(df2[,grepl("rep",names(df2))][i,]/meds[i])
}
return(df2)
}
pep1<-combFun(Names=peptides1$Name,data=peptides1)
# Major cleanup:
rm(bGalCVs,bGalProtCVs,cor1,cor5,m0,m00,m1,m1b,m2,m3,m4,m5,m6,
myEColi,myEColiIntensPep00,myEColiIntensPep1,myEColiIntensPep2,
myEColiIntensPep3,myEColiIntensPep4,myEColiIntensPep5,myEColiIntensPep6,
names2,p0,p1,p1b,p2,p3,p4,p5,p6,peptides,peptides00,peptides1b,
peptides2,peptides3,peptides4,peptides5,peptides6,peptidesL,peptidesL1b,
peptidesL2,peptidesL3,peptidesL4,peptidesL5,peptidesL6,proteins,
reg1DF,bGalWeights,bGalWeights2,cFac,cFac2,cMed,cSums,i,mins,names1,
pepSeqs,pepSeqsStr,reg1,reg2,bGalFun,combFun,cvFun1,makeWideFun,
pinEcoli)
########### Peptide difference at baseline ###########
pepDF<-pep1 %>% gather(key="rep",value="Intensity",-Name)
pepDF$ptid<-str_split(pepDF$rep,"_",simplify=TRUE)[,2]
pepDF$timept<-str_split(pepDF$rep,"_",simplify=TRUE)[,3]
pepDF<-pepDF %>% left_join(groups)
unqPep<-unique(pepDF$Name)
pepDFT0Res<-data.frame(unqPep=unqPep,T0_sCAD=NA,T0_Type1=NA,T0_Type2=NA,
T0_Anova=NA,T0_Type1_sCAD=NA,T0_Type2_sCAD=NA,T0_Type1_Type2=NA,
T0_Type1_sCAD_p=NA,T0_Type2_sCAD_p=NA,T0_Type1_Type2_p=NA)
for(i in 1:length(unqPep)){
# Linear Model
lm1<-lm(Intensity~Group,data=pepDF %>%
filter(Name==unqPep[i] & Group!="Indeterminate" & timept=="T0"))
# Overall T0 ANOVA:
lm1FStat<-summary(lm1)$fstatistic
pepDFT0Res$T0_Anova[i]<-pf(lm1FStat[1],lm1FStat[2],lm1FStat[3],lower.tail=FALSE)
# T0 Means:
lm1Emmeans<-as.data.frame(emmeans(lm1,~Group))
pepDFT0Res$T0_sCAD[i]<-lm1Emmeans$emmean[lm1Emmeans$Group=="sCAD"]
pepDFT0Res$T0_Type1[i]<-lm1Emmeans$emmean[lm1Emmeans$Group=="Type 1"]
pepDFT0Res$T0_Type2[i]<-lm1Emmeans$emmean[lm1Emmeans$Group=="Type 2"]
# Pairwise T0:
lm1Pairs<-as.data.frame(pairs(emmeans(lm1,~Group),adjust="none"))
pepDFT0Res$T0_Type1_sCAD[i]<-
(-lm1Pairs$estimate[lm1Pairs$contrast=="sCAD - Type 1"])
pepDFT0Res$T0_Type2_sCAD[i]<-
(-lm1Pairs$estimate[lm1Pairs$contrast=="sCAD - Type 2"])
pepDFT0Res$T0_Type1_Type2[i]<-
(lm1Pairs$estimate[lm1Pairs$contrast=="Type 1 - Type 2"])
# Pairwise T0 p-value
pepDFT0Res$T0_Type1_sCAD_p[i]<-
(lm1Pairs$p.value[lm1Pairs$contrast=="sCAD - Type 1"])
pepDFT0Res$T0_Type2_sCAD_p[i]<-
(lm1Pairs$p.value[lm1Pairs$contrast=="sCAD - Type 2"])
pepDFT0Res$T0_Type1_Type2_p[i]<-
(lm1Pairs$p.value[lm1Pairs$contrast=="Type 1 - Type 2"])
print(i)
}
pepDFT0Res<-pepDFT0Res %>% left_join(pepAnno2,by=c("unqPep"="pepSeq"))
pepDFT0ResGood<-pepDFT0Res %>%
filter(goodQuant>.8 & T0_Type1_sCAD_p<.1 & T0_Type1_Type2_p<.1)
########### Peptide Temporal Difference analysis ###########
pepDFw<-pepDF %>% dplyr::select(-rep) %>%
tidyr::spread(key="timept",value="Intensity")
pepDFw$d<-pepDFw$T0-pepDFw$FU
pepDFDRes<-data.frame(unqPep=unqPep,D_sCAD=NA,D_Type1=NA,D_Type2=NA,
D_Anova=NA,D_Type1_sCAD=NA,D_Type2_sCAD=NA,D_Type1_Type2=NA,
D_Type1_sCAD_p=NA,D_Type2_sCAD_p=NA,D_Type1_Type2_p=NA)
for(i in 1:length(unqPep)){
# Linear Model
lm1<-lm(d~Group,data=pepDFw %>%
filter(Name==unqPep[i] & Group!="Indeterminate"))
# Overall T0 ANOVA:
lm1FStat<-summary(lm1)$fstatistic
pepDFDRes$D_Anova[i]<-pf(lm1FStat[1],lm1FStat[2],lm1FStat[3],lower.tail=FALSE)
# Time-point Means:
lm1Emmeans<-as.data.frame(emmeans(lm1,~Group))
pepDFDRes$D_sCAD[i]<-lm1Emmeans$emmean[lm1Emmeans$Group=="sCAD"]
pepDFDRes$D_Type1[i]<-lm1Emmeans$emmean[lm1Emmeans$Group=="Type 1"]
pepDFDRes$D_Type2[i]<-lm1Emmeans$emmean[lm1Emmeans$Group=="Type 2"]
# Pairwise D:
lm1Pairs<-as.data.frame(pairs(emmeans(lm1,~Group),adjust="none"))
pepDFDRes$D_Type1_sCAD[i]<-
(-lm1Pairs$estimate[lm1Pairs$contrast=="sCAD - Type 1"])
pepDFDRes$D_Type2_sCAD[i]<-
(-lm1Pairs$estimate[lm1Pairs$contrast=="sCAD - Type 2"])
pepDFDRes$D_Type1_Type2[i]<-
(lm1Pairs$estimate[lm1Pairs$contrast=="Type 1 - Type 2"])
# Pairwise T0 p-value
pepDFDRes$D_Type1_sCAD_p[i]<-
(lm1Pairs$p.value[lm1Pairs$contrast=="sCAD - Type 1"])
pepDFDRes$D_Type2_sCAD_p[i]<-
(lm1Pairs$p.value[lm1Pairs$contrast=="sCAD - Type 2"])
pepDFDRes$D_Type1_Type2_p[i]<-
(lm1Pairs$p.value[lm1Pairs$contrast=="Type 1 - Type 2"])
print(i)
}
pepDFDRes<-pepDFDRes %>% left_join(pepAnno2,by=c("unqPep"="pepSeq"))
pepDFDResGood<-pepDFDRes %>%
filter(goodQuant>.8 & D_Type1_sCAD_p<.1 & D_Type1_Type2_p<.1)
rm(lm1,lm1Emmeans,lm1Pairs,i,lm1FStat,unqPep)
########### Protein Aggregation ###########
protList<-paste(pepAnno2$proteins,collapse=";")
protList<-unique(unlist(str_split(protList,";")))
Prot<-data.frame(prot=protList,lvl=NA,nPep=NA,peps=NA)
for(prot in Prot$prot){
peps<-pepAnno2$pepSeq[grepl(prot,pepAnno2$proteins,fixed=TRUE) & pepAnno2$protN==1 &
pepAnno2$goodQuant>.3]
pepsAll<-pepAnno2$pepSeq[grepl(prot,pepAnno2$proteins,fixed=TRUE)]
if(length(peps)>0){
Prot$peps[Prot$prot==prot]<-paste(peps,collapse=";")
Prot$nPep[Prot$prot==prot]<-length(peps)
}
if(length(pepsAll)>0){
Prot$pepsAll[Prot$prot==prot]<-paste(pepsAll,collapse=";")
Prot$nPepAll[Prot$prot==prot]<-length(pepsAll)
}
}
Prot0<-Prot
ProtOut<-Prot[is.na(Prot$nPep),]
Prot<-Prot[!is.na(Prot$nPep),]
# Calculate mean to make the protein abundances
pepsInProtList<-list()
for(i in 1:nrow(Prot)){
pepsInProt<-unlist(str_split(Prot$peps[i],";"))
pepsInProtDF<-pep1[pep1$Name %in% pepsInProt,]
pepsInProtDF$Name<-NULL
pepsInProtDF<-t(t(apply(pepsInProtDF,2,mean)))
pepsInProtDF<-as.data.frame(pepsInProtDF)
names(pepsInProtDF)[1]<-"value"
pepsInProtDF$prot<-Prot$prot[i]
pepsInProtDF$rep<-rownames(pepsInProtDF)
pepsInProtList[[i]]<-pepsInProtDF
}
prots<-do.call("rbind",pepsInProtList)
# Add sample annotation:
prots$rep<-gsub("rep_","",prots$rep)
prots<-prots %>% left_join(pheno %>% dplyr::select(uSamp,Group,ptid,timept)
%>% unique(),
by=c("rep"="uSamp"))
unqProts<-unique(prots$prot)
########### Join prot data to peptide data ###########
# Baseline abundances:
pepDFT0Res$OtherPepGood<-pepDFT0Res$OtherPepTotal<-pepDFT0Res$otherCor<-pepDFT0Res$otherGoodCor<-NA
for(i in 1:nrow(pepDFT0Res)){
tempProts<-pepDFT0Res$proteins[i]
tempProts<-unlist(str_split(tempProts,";"))
tempProts<-str_split(str_split(tempProts,"\\|",simplify=TRUE)[,2],
"-",simplify=TRUE)[,1]
allPepsDF<-pepDFT0Res[grepl(paste(tempProts,collapse="|"),
pepDFT0Res$proteins),]
allPeps<-unique(allPepsDF$unqPep)
allPepsGood<-unique(allPepsDF[allPepsDF$goodQuant>.3,]$unqPep)
pepDFT0Res$OtherPepTotal[i]<-length(allPeps)
pepDFT0Res$OtherPepGood[i]<-length(allPepsGood)
# Correlation analysis:
mat1<-as.matrix(pep1[pep1$Name %in% allPeps,names(pep1)!="Name"])
mat2<-as.matrix(pep1[pep1$Name %in% allPepsGood,names(pep1)!="Name"])
corMat1<-cor(t(mat1))
corMat2<-cor(t(mat2))
pepDFT0Res$otherCor[i]<-mean(corMat1[rownames(corMat1)!=pepDFT0Res$unqPep[i],
colnames(corMat1)==pepDFT0Res$unqPep[i]])
pepDFT0Res$otherGoodCor[i]<-mean(corMat2[rownames(corMat2)!=pepDFT0Res$unqPep[i],
colnames(corMat2)==pepDFT0Res$unqPep[i]])
print(i)
}
# Change across time:
pepDFDRes$OtherPepGood<-pepDFDRes$OtherPepTotal<-pepDFDRes$otherCor<-pepDFDRes$otherGoodCor<-NA
for(i in 1:nrow(pepDFDRes)){
tempProts<-pepDFDRes$proteins[i]
tempProts<-unlist(str_split(tempProts,";"))
tempProts<-str_split(str_split(tempProts,"\\|",simplify=TRUE)[,2],
"-",simplify=TRUE)[,1]
allPepsDF<-pepDFDRes[grepl(paste(tempProts,collapse="|"),
pepDFDRes$proteins),]
allPeps<-unique(allPepsDF$unqPep)
allPepsGood<-unique(allPepsDF[allPepsDF$goodQuant>.3,]$unqPep)
pepDFDRes$OtherPepTotal[i]<-length(allPeps)
pepDFDRes$OtherPepGood[i]<-length(allPepsGood)
# Correlation analysis:
mat1<-as.matrix(pep1[pep1$Name %in% allPeps,names(pep1)!="Name"])
mat2<-as.matrix(pep1[pep1$Name %in% allPepsGood,names(pep1)!="Name"])
corMat1<-cor(t(mat1))
corMat2<-cor(t(mat2))
pepDFDRes$otherCor[i]<-median(corMat1[rownames(corMat1)!=pepDFDRes$unqPep[i],
colnames(corMat1)==pepDFDRes$unqPep[i]])
pepDFDRes$otherGoodCor[i]<-median(corMat2[rownames(corMat2)!=pepDFDRes$unqPep[i],
colnames(corMat2)==pepDFDRes$unqPep[i]])
print(i)
}
# Join:
pepDFRes<-pepDFT0Res %>% full_join(
pepDFDRes %>% dplyr::select("unqPep","D_sCAD","D_Type1","D_Type2","D_Anova",
"D_Type1_sCAD","D_Type2_sCAD","D_Type1_Type2",
"D_Type1_sCAD_p","D_Type2_sCAD_p","D_Type1_Type2_p"),
by=c("unqPep"))
########### Add peptide criteria ###########
# Criteria 1: Fold change across time in Type 1 (ln-scale) > 1;
# Diff between sCAD and Type 1 fold change across time (ln-scale) > 0.75
# pepDFRes$crit1 <- abs(pepDFRes$D_Type1 > 1) & abs(pepDFRes$D_Type1_sCAD > .75)
# New Criteria 1:
pepDFRes$crit1<-pepDFRes$D_Type1_sCAD_p<.05
# Criteria 2: If >1 other peptide from protein or family, correlation > .5?
pepDFRes$crit2 <- FALSE
pepDFRes$crit2[(pepDFRes$OtherPepTotal > 1 & pepDFRes$otherCor > .5) |
(pepDFRes$OtherPepTotal ==1)] <- TRUE
# Criteria 3: No missed cleavages
pepDFRes$crit3 <- pepDFRes$miss == 1
# Criteria 4: No oxidized Met
pepDFRes$crit4 <- !grepl("M\\[Oxid\\]",pepDFRes$unqPep)
# Criteria 5: If #1-#4
pepDFRes$crit5 <- pepDFRes$crit1 & pepDFRes$crit2 & pepDFRes$crit3 & pepDFRes$crit4
# Criteria 6: If different between type 1 MI and sCAD at T0:
# pepDFRes$crit6 <- abs(pepDFRes$T0_Type1_sCAD)>.5
# New Criteria 6:
pepDFRes$crit6<-pepDFRes$T0_Type1_sCAD_p<.1
# Criteria 7: Criteria 5 and 6:
pepDFRes$crit7 <- pepDFRes$crit5 & pepDFRes$crit6
# Export peptide results:
# write.csv(pepDFRes,"pepDFResV2.csv")
########### Protein level analysis ###########
# Baseline
protDFT0Res<-data.frame(prot=unqProts,T0_sCAD=NA,T0_Type1=NA,T0_Type2=NA,
T0_Anova=NA,T0_Type1_sCAD=NA,T0_Type2_sCAD=NA,T0_Type1_Type2=NA,
T0_Type1_sCAD_p=NA,T0_Type2_sCAD_p=NA,T0_Type1_Type2_p=NA)
for(i in 1:nrow(protDFT0Res)){
# Linear Model
lm1<-lm(value~Group,data=prots %>%
filter(prot==protDFT0Res$prot[i] & Group!="Indeterminate" & timept=="T0"))
# Overall T0 ANOVA:
lm1FStat<-summary(lm1)$fstatistic
protDFT0Res$T0_Anova[i]<-pf(lm1FStat[1],lm1FStat[2],lm1FStat[3],lower.tail=FALSE)
# T0 Means:
lm1Emmeans<-as.data.frame(emmeans(lm1,~Group))
protDFT0Res$T0_sCAD[i]<-lm1Emmeans$emmean[lm1Emmeans$Group=="sCAD"]
protDFT0Res$T0_Type1[i]<-lm1Emmeans$emmean[lm1Emmeans$Group=="Type 1"]
protDFT0Res$T0_Type2[i]<-lm1Emmeans$emmean[lm1Emmeans$Group=="Type 2"]
# Pairwise T0:
lm1Pairs<-as.data.frame(pairs(emmeans(lm1,~Group),adjust="none"))
protDFT0Res$T0_Type1_sCAD[i]<-
(-lm1Pairs$estimate[lm1Pairs$contrast=="sCAD - Type 1"])
protDFT0Res$T0_Type2_sCAD[i]<-
(-lm1Pairs$estimate[lm1Pairs$contrast=="sCAD - Type 2"])
protDFT0Res$T0_Type1_Type2[i]<-
(lm1Pairs$estimate[lm1Pairs$contrast=="Type 1 - Type 2"])
# Pairwise T0 p-value
protDFT0Res$T0_Type1_sCAD_p[i]<-
(lm1Pairs$p.value[lm1Pairs$contrast=="sCAD - Type 1"])
protDFT0Res$T0_Type2_sCAD_p[i]<-
(lm1Pairs$p.value[lm1Pairs$contrast=="sCAD - Type 2"])
protDFT0Res$T0_Type1_Type2_p[i]<-
(lm1Pairs$p.value[lm1Pairs$contrast=="Type 1 - Type 2"])
print(i)
}
############ Protein Change across time ############
protsW<-prots %>% dplyr::select(-rep) %>%
tidyr::spread(key="timept",value="value")
protsW$d<-protsW$T0-protsW$FU
protDFDRes<-data.frame(prot=unqProts,D_sCAD=NA,D_Type1=NA,D_Type2=NA,
D_Anova=NA,D_Type1_sCAD=NA,D_Type2_sCAD=NA,D_Type1_Type2=NA,
D_Type1_sCAD_p=NA,D_Type2_sCAD_p=NA,D_Type1_Type2_p=NA)
for(i in 1:nrow(protDFDRes)){
# Linear Model
lm1<-lm(d~Group,data=protsW %>%
filter(prot==protDFDRes$prot[i] & Group!="Indeterminate"))
# Overall T0 ANOVA:
lm1FStat<-summary(lm1)$fstatistic
protDFDRes$D_Anova[i]<-pf(lm1FStat[1],lm1FStat[2],lm1FStat[3],lower.tail=FALSE)
# D Means:
lm1Emmeans<-as.data.frame(emmeans(lm1,~Group))
protDFDRes$D_sCAD[i]<-lm1Emmeans$emmean[lm1Emmeans$Group=="sCAD"]
protDFDRes$D_Type1[i]<-lm1Emmeans$emmean[lm1Emmeans$Group=="Type 1"]
protDFDRes$D_Type2[i]<-lm1Emmeans$emmean[lm1Emmeans$Group=="Type 2"]
# Pairwise D:
lm1Pairs<-as.data.frame(pairs(emmeans(lm1,~Group),adjust="none"))
protDFDRes$D_Type1_sCAD[i]<-
(-lm1Pairs$estimate[lm1Pairs$contrast=="sCAD - Type 1"])
protDFDRes$D_Type2_sCAD[i]<-
(-lm1Pairs$estimate[lm1Pairs$contrast=="sCAD - Type 2"])
protDFDRes$D_Type1_Type2[i]<-
(lm1Pairs$estimate[lm1Pairs$contrast=="Type 1 - Type 2"])
# Pairwise D p-value
protDFDRes$D_Type1_sCAD_p[i]<-
(lm1Pairs$p.value[lm1Pairs$contrast=="sCAD - Type 1"])
protDFDRes$D_Type2_sCAD_p[i]<-
(lm1Pairs$p.value[lm1Pairs$contrast=="sCAD - Type 2"])
protDFDRes$D_Type1_Type2_p[i]<-
(lm1Pairs$p.value[lm1Pairs$contrast=="Type 1 - Type 2"])
print(i)
}
# Save
rm(corMat1,corMat2,lm1,lm1Emmeans,lm1FStat,lm1Pairs,mat1,mat2,allPeps,
allPepsGood,i,peps,pepsAll,pepsInProt,prot,protList,tempProts,unqProts)
save.image(file="working_20181007.RData")
########### Peptide plots ###########
load(file="working_20181007.RData")
sigPeps<-pepDFRes$unqPep[pepDFRes$crit7 & pepDFRes$length<25 & pepDFRes$goodQuant > .05]
sigPeps2<-pepDFRes$unqPep[grepl("P04275",pepDFRes$proteins)]
sigPeps<-c(sigPeps,sigPeps2)
pepsSig<-pepDFRes[pepDFRes$unqPep %in% sigPeps,]
customTitles<-matrix(c(
"NSEEFAAAMSR","Apolipoprotein B-100",
"YYTYLIMNK","Complement C3",
"C[Carboxymethyl]EWETPEGC[Carboxymethyl]EQVLTGK","C4b-binding protein alpha chain",
"LQQVLHAGSGPC[Carboxymethyl]LPHLLSR","Alpha-2-antiplasmin",
"LC[Carboxymethyl]MGSGLNLC[Carboxymethyl]EPNNK","Serotransferrin",
"KPVAFSDYIHPVC[Carboxymethyl]LPDR","Prothrombin",
"YEITTIHNLFR","Heparin cofactor 2",
"EVVADSVWVDVK","Complement C3",
"TVMVNIENPEGIPVK","Complement C3",
"TSSFALNLPTLPEVK","Apolipoprotein B-100",
"NFVASHIANILNSEELDIQDLK","Apolipoprotein B-100",
"YTIAALLSPYSYSTTAVVTNPK","Transthyretin",
"EALQGVGDMGR","Serum amyloid A-4 protein",
"EWFSETFQK","Apolipoprotein C-I",
"NIQEYLSILTDPDGK","Apolipoprotein B-100",
"SGSSTASWIQNVDTK","Apolipoprotein B-100",
"AGPHC[Carboxymethyl]PTAQLIATLK","Platelet Factor 4",
"AASGTTGTYQEWK","Apolipoprotein B-100",
"GTHC[Carboxymethyl]NQVEVIATLK","Platelet basic protein",
"LIVAMSSWLQK","Apolipoprotein B-100",
"HIQNIDIQHLAGK","Apolipoprotein B-100",
"EDVYVVGTVLR","C4b-binding protein alpha chain",
"EELC[Carboxymethyl]TMFIR","Apolipoprotein B-100",
"HITSLEVIK","Platelet factor 4",
"SLMLHYEFLQR","Complement component C8 beta chain",
"ALVEQGFTVPEIK","Apolipoprotein B-100",
"AVLTIDEK","Alpha-1-antitrypsin",
"LC[Carboxymethyl]GC[Carboxymethyl]LELLLQFDQK","RUN and FYVE domain-containing protein 4",
"NIQSLEVIGK","Platelet basic protein",
"ATAVVDGAFK","Peroxiredoxin-2",
"VIVIPVGIGPHANLK","von Willebrand factor",
"YTLFQIFSK","von Willebrand factor"
),
ncol=2,byrow=TRUE)
customTitles<-as.data.frame(customTitles)
names(customTitles)<-c("unqPep","Title")
pepsSig<-pepsSig %>% left_join(customTitles)
# Plot function:
pFun1<-function(iter,ylower,yupper){
temp1<-pepDF[pepDF$Name==pepsSig$unqPep[iter] & pepDF$Group != "Indeterminate",]
ggplot(temp1,aes(timept,Intensity,color=Group,group=ptid))+
geom_point()+geom_line()+ylim(ylower,yupper)+theme_bw()+facet_grid(~Group)+
ggtitle(pepsSig$unqPep[iter],
subtitle=paste0(pepsSig$Title[iter],". cor = ",round(pepsSig$otherCor[iter],3)))+
xlab("Time-point")+ylab("Abundance")+
theme(plot.title = element_text(hjust = 0.5),
plot.subtitle = element_text(hjust = 0.5))
}
pepsSig<-pepsSig %>% arrange(Title,unqPep)
# pFun1(1,-2,2)
# pFun1(2,-3,3)
pFun1(3,-1.5,1.75)
pFun1(4,-3,3)
pFun1(5,-3,4)
pFun1(6,-1.25,2.1)
pFun1(7,-2,2.25)
pFun1(8,-1,1.25)
pFun1(9,-1.5,3)
pFun1(10,-1.1,1.5)
pFun1(11,-3,3)
pFun1(12,-1.1,1.3)
pFun1(13,-1.2,1)
# pFun1(14,-3,3)
# pFun1(15,-3,3)
# pFun1(16,-3,3)
# pFun1(17,-3,3)
# pFun1(18,-3,3)
# pFun1(19,-3,3)
pFun1(20,-1.1,1)
# pFun1(21,-3,3)
pFun1(22,-7.5,5.25)
pFun1(23,-2,3)
pFun1(24,-6.5,5.25)
pFun1(25,-5,5)
pFun1(26,-.9,.7)
# pFun1(27,-3,3)
# pFun1(28,-3,3)
# pFun1(29,-3,3)
pFun1(30,-1.2,1)
pFun1(31,-5,3)
pFun1(32,-4.5,3.2)
########### Peptide pull protein analysis ###########
setwd("~/gdrive/AthroProteomics")
load(file="working_20180821.RData")
# Which proteins:
str_split(pepDFRes$proteins,";") |
# install.packages("jpeg",dependencies = TRUE)
# install.packages("ggplot2",dependencies = TRUE)
library(jpeg)
library(ggplot2)
img1 = "image1.jpg"
img2 = "image2.jpg"
img3 = "image3.jpg"
img4 = "image4.jpg"
img5 = "image5.jpg"
images <- c(img1, img2, img3, img4, img5)
# images <- c("image1.jpg")
# print(images)
for(img in images){
img_name = img;
img = readJPEG(img)
# print(img)
dimensions <- dim(img)
img_rgb <- data.frame(
x = rep(1:dimensions[2], each = dimensions[1]),
y = rep(dimensions[1]:1, dimensions[2]),
R = as.vector(img[,,1]),
G = as.vector(img[,,2]),
B = as.vector(img[,,3])
)
ggplot(data = img_rgb, aes(x = x, y = y)) +
geom_point(colour = rgb(img_rgb[c("R", "G", "B")])) +
labs(title = "Original Image") +
xlab("x") +
ylab("y")
k <- 2
kmeans_img <- kmeans(img_rgb[,c("R","G","B")], centers = k, iter.max = 5, nstart = 3)
clustered_img <- rgb(kmeans_img$centers[kmeans_img$cluster,])
diag_path = file.path(getwd(),"Clustered_Images",paste(k,"-clustered-",img_name,".jpeg",sep=""))
jpeg(file=diag_path)
pl <- ggplot(data = img_rgb, aes(x = x, y = y)) +
geom_point(colour = clustered_img) +
labs(title = paste(k, "color cluster on ",img)) +
xlab("x") +
ylab("y")
plot(pl)
dev.off()
}
| /K-Means & Clustering Images/Part-III/ImageSeg.R | no_license | SravaniLingam/Machine-Learning | R | false | false | 1,384 | r | # install.packages("jpeg",dependencies = TRUE)
# install.packages("ggplot2",dependencies = TRUE)
library(jpeg)
library(ggplot2)
img1 = "image1.jpg"
img2 = "image2.jpg"
img3 = "image3.jpg"
img4 = "image4.jpg"
img5 = "image5.jpg"
images <- c(img1, img2, img3, img4, img5)
# images <- c("image1.jpg")
# print(images)
for(img in images){
img_name = img;
img = readJPEG(img)
# print(img)
dimensions <- dim(img)
img_rgb <- data.frame(
x = rep(1:dimensions[2], each = dimensions[1]),
y = rep(dimensions[1]:1, dimensions[2]),
R = as.vector(img[,,1]),
G = as.vector(img[,,2]),
B = as.vector(img[,,3])
)
ggplot(data = img_rgb, aes(x = x, y = y)) +
geom_point(colour = rgb(img_rgb[c("R", "G", "B")])) +
labs(title = "Original Image") +
xlab("x") +
ylab("y")
k <- 2
kmeans_img <- kmeans(img_rgb[,c("R","G","B")], centers = k, iter.max = 5, nstart = 3)
clustered_img <- rgb(kmeans_img$centers[kmeans_img$cluster,])
diag_path = file.path(getwd(),"Clustered_Images",paste(k,"-clustered-",img_name,".jpeg",sep=""))
jpeg(file=diag_path)
pl <- ggplot(data = img_rgb, aes(x = x, y = y)) +
geom_point(colour = clustered_img) +
labs(title = paste(k, "color cluster on ",img)) +
xlab("x") +
ylab("y")
plot(pl)
dev.off()
}
|
library(radiant.data)
### Name: make_train
### Title: Generate a variable used to selected a training sample
### Aliases: make_train
### ** Examples
make_train(.5, 10)
| /data/genthat_extracted_code/radiant.data/examples/make_train.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 176 | r | library(radiant.data)
### Name: make_train
### Title: Generate a variable used to selected a training sample
### Aliases: make_train
### ** Examples
make_train(.5, 10)
|
#' An ecological niche model created with Maxent
#'
#' A RasterLayer containing an ecological niche model for the a tick
#' (*Amblyomma americanum*).
#'
#' @format A RasterLayer with 150 rows, 249 columns, and 37350 cells:
#' \describe{
#' \item{Suitability}{suitability, in probability values.}
#' }
#'
#' @source \url{https://kuscholarworks.ku.edu/handle/1808/26376}
#'
#' @name sp_model
#'
#' @examples
#' model <- raster::raster(system.file("extdata", "sp_model.tif",
#' package = "rangemap"))
#'
#' raster::plot(model)
NULL
#' A set of environmental variables for examples
#'
#' A RasterStack containing four bioclimatic variables downloaded from the
#' WorldClim database 1.4.
#'
#' @format A RasterStack with 180 rows, 218 columns, 39240 cells, and 4 layers:
#' \describe{
#' \item{variables.1}{bio5.}
#' \item{variables.2}{bio6.}
#' \item{variables.3}{bio13.}
#' \item{variables.4}{bio14.}
#' }
#'
#' @source \url{https://www.worldclim.org/data/v1.4/worldclim14.html}
#'
#' @name variables
#'
#' @examples
#' vars <- raster::stack(system.file("extdata", "variables.tif",
#' package = "rangemap"))
#' names(vars) <- c("bio5", "bio6", "bio13", "bio14")
#'
#' raster::plot(vars)
NULL
| /R/raster_doc.R | no_license | claununez/rangemap | R | false | false | 1,271 | r | #' An ecological niche model created with Maxent
#'
#' A RasterLayer containing an ecological niche model for the a tick
#' (*Amblyomma americanum*).
#'
#' @format A RasterLayer with 150 rows, 249 columns, and 37350 cells:
#' \describe{
#' \item{Suitability}{suitability, in probability values.}
#' }
#'
#' @source \url{https://kuscholarworks.ku.edu/handle/1808/26376}
#'
#' @name sp_model
#'
#' @examples
#' model <- raster::raster(system.file("extdata", "sp_model.tif",
#' package = "rangemap"))
#'
#' raster::plot(model)
NULL
#' A set of environmental variables for examples
#'
#' A RasterStack containing four bioclimatic variables downloaded from the
#' WorldClim database 1.4.
#'
#' @format A RasterStack with 180 rows, 218 columns, 39240 cells, and 4 layers:
#' \describe{
#' \item{variables.1}{bio5.}
#' \item{variables.2}{bio6.}
#' \item{variables.3}{bio13.}
#' \item{variables.4}{bio14.}
#' }
#'
#' @source \url{https://www.worldclim.org/data/v1.4/worldclim14.html}
#'
#' @name variables
#'
#' @examples
#' vars <- raster::stack(system.file("extdata", "variables.tif",
#' package = "rangemap"))
#' names(vars) <- c("bio5", "bio6", "bio13", "bio14")
#'
#' raster::plot(vars)
NULL
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{peace}
\alias{peace}
\title{Data on Public Support for War in a Sample of US Respondents}
\format{
A data frame with 1,273 rows and 17 columns: \describe{
\item{threatc}{number of adverse events respondents considered probable if the US did not engage in war}
\item{ally}{a dummy variable indicating whether the country had signed a military alliance with the US}
\item{trade}{a dummy variable indicating whether the country had high levels of trade with the US}
\item{h1}{an index measuring respondent's attitude toward militarism}
\item{i1}{an index measuring respondent's attitude toward internationalism}
\item{p1}{an index measuring respondent's identification with the Republican party}
\item{e1}{an index measuring respondent's attitude toward ethnocentrism}
\item{r1}{an index measuring respondent's attitude toward religiosity}
\item{male}{a dummy variable indicating whether the respondent is male}
\item{white}{a dummy variable indicating whether the respondent is white}
\item{age}{respondent's age}
\item{ed4}{respondent's education with categories ranging from high school or less to postgraduate degree}
\item{democ}{a dummy variable indicating whether the country was a democracy}
\item{strike}{a measure of support for war on a five-point scale}
\item{cost}{number of negative consequences anticipated if the US engaged in war}
\item{successc}{whether the respondent thought the operation would succeed. 0: less than 50-50 chance of working even in the short run; 1: efficacious only in the short run; 2: successful both in the short and long run}
\item{immoral}{a dummy variable indicating whether respondents thought it would be morally wrong to strike the country}
}
}
\usage{
peace
}
\description{
A dataset containing 17 variables on the views of 1,273 US adults about their support
for war against countries that were hypothetically developing nuclear weapons. The data include
several variables on the country's features and respondents' demographic and attitudinal characteristics
(Tomz and Weeks 2013; Zhou and Wodtke 2020).
}
\references{
Tomz, Michael R., and Jessica L. P. Weeks. 2013. Public Opinion and the Democratic Peace.
The American Political Science Review 107(4):849-65.
Zhou, Xiang, and Geoffrey T. Wodtke. 2020. Residual Balancing:
A Method of Constructing Weights for Marginal Structural Models. Political Analysis 28(4):487-506.
}
\keyword{datasets}
| /man/peace.Rd | no_license | xiangzhou09/rbw | R | false | true | 2,539 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{peace}
\alias{peace}
\title{Data on Public Support for War in a Sample of US Respondents}
\format{
A data frame with 1,273 rows and 17 columns: \describe{
\item{threatc}{number of adverse events respondents considered probable if the US did not engage in war}
\item{ally}{a dummy variable indicating whether the country had signed a military alliance with the US}
\item{trade}{a dummy variable indicating whether the country had high levels of trade with the US}
\item{h1}{an index measuring respondent's attitude toward militarism}
\item{i1}{an index measuring respondent's attitude toward internationalism}
\item{p1}{an index measuring respondent's identification with the Republican party}
\item{e1}{an index measuring respondent's attitude toward ethnocentrism}
\item{r1}{an index measuring respondent's attitude toward religiosity}
\item{male}{a dummy variable indicating whether the respondent is male}
\item{white}{a dummy variable indicating whether the respondent is white}
\item{age}{respondent's age}
\item{ed4}{respondent's education with categories ranging from high school or less to postgraduate degree}
\item{democ}{a dummy variable indicating whether the country was a democracy}
\item{strike}{a measure of support for war on a five-point scale}
\item{cost}{number of negative consequences anticipated if the US engaged in war}
\item{successc}{whether the respondent thought the operation would succeed. 0: less than 50-50 chance of working even in the short run; 1: efficacious only in the short run; 2: successful both in the short and long run}
\item{immoral}{a dummy variable indicating whether respondents thought it would be morally wrong to strike the country}
}
}
\usage{
peace
}
\description{
A dataset containing 17 variables on the views of 1,273 US adults about their support
for war against countries that were hypothetically developing nuclear weapons. The data include
several variables on the country's features and respondents' demographic and attitudinal characteristics
(Tomz and Weeks 2013; Zhou and Wodtke 2020).
}
\references{
Tomz, Michael R., and Jessica L. P. Weeks. 2013. Public Opinion and the Democratic Peace.
The American Political Science Review 107(4):849-65.
Zhou, Xiang, and Geoffrey T. Wodtke. 2020. Residual Balancing:
A Method of Constructing Weights for Marginal Structural Models. Political Analysis 28(4):487-506.
}
\keyword{datasets}
|
generateu <-
function(mat){
names=colnames(mat)
r=length(names)
U=list(r)
for(i in 1:r){
U[[i]]=IndMatrix(mat[,i])
colnames(U[[i]])=paste(names[i],"(",colnames(U[[i]]),")",sep="")
}
names(U)=names
class(U)="Umatrix"
return(U)
}
| /minque/R/generateu.R | no_license | ingted/R-Examples | R | false | false | 265 | r | generateu <-
function(mat){
names=colnames(mat)
r=length(names)
U=list(r)
for(i in 1:r){
U[[i]]=IndMatrix(mat[,i])
colnames(U[[i]])=paste(names[i],"(",colnames(U[[i]]),")",sep="")
}
names(U)=names
class(U)="Umatrix"
return(U)
}
|
#' Create flow diagram of model
#'
#' @author Christopher J. Brown
#' @rdname create_model_diagram
#' @export
create_model_diagram <- function(){
par(mar = c(rep(0.5, 4)))
plot(0,0, xlim = c(0,1), ylim = c(0,1), type = 'n', xlab = '', ylab = '', xaxt = 'n', yaxt = 'n', bty = 'n')
points(0.25, 0.5, cex = 20)
points(0.75, 0.5, cex = 20)
points(0.25, 0.1, cex = 15, pch = 0)
points(0.55, 0.1, cex = 15, pch = 0)
points(0.25, 0.9, cex = 15, pch = 0)
points(0.5, 0.9, cex = 15, pch = 0)
points(0.75, 0.9, cex = 15, pch = 0)
alwd <- 2
alen <- 0.2
acol <- 'grey'
arrows(0.75, 0.65, 0.25, 0.79, lwd = alwd, len = alen, col = acol)
arrows(0.75, 0.65, 0.55, 0.79, lwd = alwd, len = alen, col = acol)
arrows(0.75, 0.65, 0.75, 0.79, lwd = alwd, len = alen, col = acol)
arrows(0.25, 0.65, 0.25, 0.79, lwd = alwd, len = alen)
arrows(0.25, 0.65, 0.55, 0.79, lwd = alwd, len = alen)
arrows(0.25, 0.65, 0.75, 0.79, lwd = alwd, len = alen)
arrows(0.25, 0.21, 0.25, 0.35, lwd = alwd, len = alen)
arrows(0.55, 0.21, 0.28, 0.35, lwd = alwd, len = alen)
text(0.25, 0.1, paste('Distance to','\n','nearest sediment','\n','source'), cex = 0.8)
text(0.55, 0.1, 'Flow strength')
text(0.25, 0.5, 'Latent variable 1')
text(0.75, 0.5, 'Latent variable 2')
text(0.25, 0.89, 'Turf algae', font = 3)
text(0.5, 0.89, paste('Branching','\n','coral'), font = 3)
text(0.75, 0.89, 'Silt', font = 3)
}
| /R/create_model_diagram.R | no_license | cbrown5/BenthicLatent | R | false | false | 1,387 | r | #' Create flow diagram of model
#'
#' @author Christopher J. Brown
#' @rdname create_model_diagram
#' @export
create_model_diagram <- function(){
par(mar = c(rep(0.5, 4)))
plot(0,0, xlim = c(0,1), ylim = c(0,1), type = 'n', xlab = '', ylab = '', xaxt = 'n', yaxt = 'n', bty = 'n')
points(0.25, 0.5, cex = 20)
points(0.75, 0.5, cex = 20)
points(0.25, 0.1, cex = 15, pch = 0)
points(0.55, 0.1, cex = 15, pch = 0)
points(0.25, 0.9, cex = 15, pch = 0)
points(0.5, 0.9, cex = 15, pch = 0)
points(0.75, 0.9, cex = 15, pch = 0)
alwd <- 2
alen <- 0.2
acol <- 'grey'
arrows(0.75, 0.65, 0.25, 0.79, lwd = alwd, len = alen, col = acol)
arrows(0.75, 0.65, 0.55, 0.79, lwd = alwd, len = alen, col = acol)
arrows(0.75, 0.65, 0.75, 0.79, lwd = alwd, len = alen, col = acol)
arrows(0.25, 0.65, 0.25, 0.79, lwd = alwd, len = alen)
arrows(0.25, 0.65, 0.55, 0.79, lwd = alwd, len = alen)
arrows(0.25, 0.65, 0.75, 0.79, lwd = alwd, len = alen)
arrows(0.25, 0.21, 0.25, 0.35, lwd = alwd, len = alen)
arrows(0.55, 0.21, 0.28, 0.35, lwd = alwd, len = alen)
text(0.25, 0.1, paste('Distance to','\n','nearest sediment','\n','source'), cex = 0.8)
text(0.55, 0.1, 'Flow strength')
text(0.25, 0.5, 'Latent variable 1')
text(0.75, 0.5, 'Latent variable 2')
text(0.25, 0.89, 'Turf algae', font = 3)
text(0.5, 0.89, paste('Branching','\n','coral'), font = 3)
text(0.75, 0.89, 'Silt', font = 3)
}
|
\name{bowlerPerfForecast}
\alias{bowlerPerfForecast}
\title{
Forecast the bowler performance based on past performances using Holt-Winters forecasting
}
\description{
This function forecasts the performance of the bowler based on past performances using HoltWinters
forecasting model
}
\usage{
bowlerPerfForecast(file, name = "A Googly")
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{file}{
This is the <bowler>.csv file obtained with an initial getPlayerData()
}
\item{name}{
Name of the bowler
}
}
\details{
More details can be found in my short video tutorial in Youtube
https://www.youtube.com/watch?v=q9uMPFVsXsI
}
\value{
None
}
\references{
http://www.espncricinfo.com/ci/content/stats/index.html\cr
https://gigadom.wordpress.com/
}
\author{
Tinniam V Ganesh
}
\note{
Maintainer: Tinniam V Ganesh <tvganesh.85@gmail.com>
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\code{\link{bowlerEconRate}},
\code{\link{bowlerMovingAverage}},
\code{\link{bowlerContributionWonLost}}
}
\examples{
# Get or use the <bowler>.csv obtained with getPlayerData()
# a <- getPlayerData(30176,file="kumble.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
bowlerPerfForecast("../cricketr/data/kumble.csv","Anil Kumble")
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
| /man/bowlerPerfForecast.Rd | no_license | tvganesh/pkg | R | false | false | 1,449 | rd | \name{bowlerPerfForecast}
\alias{bowlerPerfForecast}
\title{
Forecast the bowler performance based on past performances using Holt-Winters forecasting
}
\description{
This function forecasts the performance of the bowler based on past performances using HoltWinters
forecasting model
}
\usage{
bowlerPerfForecast(file, name = "A Googly")
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{file}{
This is the <bowler>.csv file obtained with an initial getPlayerData()
}
\item{name}{
Name of the bowler
}
}
\details{
More details can be found in my short video tutorial in Youtube
https://www.youtube.com/watch?v=q9uMPFVsXsI
}
\value{
None
}
\references{
http://www.espncricinfo.com/ci/content/stats/index.html\cr
https://gigadom.wordpress.com/
}
\author{
Tinniam V Ganesh
}
\note{
Maintainer: Tinniam V Ganesh <tvganesh.85@gmail.com>
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\code{\link{bowlerEconRate}},
\code{\link{bowlerMovingAverage}},
\code{\link{bowlerContributionWonLost}}
}
\examples{
# Get or use the <bowler>.csv obtained with getPlayerData()
# a <- getPlayerData(30176,file="kumble.csv",type="batting", homeOrAway=c(1,2),result=c(1,2,4))
bowlerPerfForecast("../cricketr/data/kumble.csv","Anil Kumble")
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
generateFriedmanData <- function(n, ranef = FALSE, causal = FALSE, binary = FALSE) {
f <- function(x)
10 * sin(pi * x[,1] * x[,2]) + 20 * (x[,3] - 0.5)^2 + 10 * x[,4] + 5 * x[,5]
set.seed(99)
sigma <- 1.0
x <- matrix(runif(n * 10), n, 10)
mu <- f(x)
result <- list(x = x, sigma = sigma)
if (ranef) {
n.g.1 <- 5L
n.g.2 <- 8L
result <- within(result, {
g.1 <- sample(n.g.1, n, replace = TRUE)
Sigma.b.1 <- matrix(c(1.5^2, .2, .2, 1^2), 2)
R.b <- chol(Sigma.b.1)
b.1 <- matrix(rnorm(2 * n.g.1), n.g.1) %*% R.b
rm(R.b)
g.2 <- sample(n.g.2, n, replace = TRUE)
Sigma.b.2 <- as.matrix(1.2)
b.2 <- rnorm(n.g.2, 0, sqrt(Sigma.b.2))
mu.fixef <- x[,4] * 10
mu.bart <- mu - mu.fixef
mu.ranef <- b.1[g.1,1] + x[,4] * b.1[g.1,2] + b.2[g.2]
mu <- mu + mu.ranef
})
} else {
mu.fixef <- x[,4] * 10
mu.bart <- mu - mu.fixef
}
if (causal) {
result <- within(result, {
tau <- 5
z <- rbinom(n, 1, 0.2)
})
if (ranef) {
result <- within(result, {
mu.fixef.0 <- mu.fixef
mu.fixef.1 <- mu.fixef.0 + tau
mu.bart.0 <- mu.bart.1 <- mu.bart
mu.ranef.0 <- mu.ranef.1 <- mu.ranef
mu.0 <- mu.bart.0 + mu.fixef.0 + mu.ranef.0
mu.1 <- mu.bart.1 + mu.fixef.1 + mu.ranef.1
})
} else {
result <- within(result, {
mu.fixef.0 <- mu.fixef
mu.fixef.1 <- mu.fixef.0 + tau
mu.bart.0 <- mu.bart.1 <- mu.bart
mu.0 <- mu.bart.0 + mu.fixef.0
mu.1 <- mu.bart.1 + mu.fixef.1
})
}
if (binary) {
result <- within(result, {
loc <- mean(c(mu.0, mu.1))
scale <- sd(c(mu.0, mu.1)) / qnorm(0.15)
mu.0 <- (mu.0 - loc) / scale
mu.1 <- (mu.1 - loc) / scale
mu.fixef.0 <- (mu.fixef.0 - loc) / scale
mu.fixef.1 <- (mu.fixef.1 - loc) / scale
mu.bart.0 <- mu.bart.0 / scale
mu.bart.1 <- mu.bart.1 / scale
if (ranef) {
mu.ranef.0 <- mu.ranef.0 / scale
mu.ranef.1 <- mu.ranef.1 / scale
}
rm(loc, scale)
y.0 <- rbinom(n, 1L, pnorm(mu.0))
y.1 <- rbinom(n, 1L, pnorm(mu.1))
y <- y.1 * z + y.0 * (1 - z)
})
} else {
result <- within(result, {
y.0 <- mu.0 + rnorm(n, 0, sigma)
y.1 <- mu.1 + rnorm(n, 0, sigma)
y <- y.1 * z + y.0 * (1 - z)
})
}
result$mu <- NULL
result$mu.fixef <- NULL
result$mu.ranef <- NULL
} else {
if (binary) {
result <- within(result, {
loc <- mean(mu)
scale <- sd(mu) / qnorm(0.15)
mu <- (mu - loc) / scale
mu.fixef <- (mu.fixef - loc) / scale
mu.bart <- mu.bart / scale
if (ranef)
mu.ranef <- mu.ranef / scale
rm(loc, scale)
y <- rbinom(n, 1L, pnorm(mu))
})
} else {
result <- within(result, {
y <- mu + rnorm(n, 0, sigma)
})
}
}
result
}
| /inst/common/friedmanData.R | no_license | cran/bartCause | R | false | false | 3,137 | r | generateFriedmanData <- function(n, ranef = FALSE, causal = FALSE, binary = FALSE) {
f <- function(x)
10 * sin(pi * x[,1] * x[,2]) + 20 * (x[,3] - 0.5)^2 + 10 * x[,4] + 5 * x[,5]
set.seed(99)
sigma <- 1.0
x <- matrix(runif(n * 10), n, 10)
mu <- f(x)
result <- list(x = x, sigma = sigma)
if (ranef) {
n.g.1 <- 5L
n.g.2 <- 8L
result <- within(result, {
g.1 <- sample(n.g.1, n, replace = TRUE)
Sigma.b.1 <- matrix(c(1.5^2, .2, .2, 1^2), 2)
R.b <- chol(Sigma.b.1)
b.1 <- matrix(rnorm(2 * n.g.1), n.g.1) %*% R.b
rm(R.b)
g.2 <- sample(n.g.2, n, replace = TRUE)
Sigma.b.2 <- as.matrix(1.2)
b.2 <- rnorm(n.g.2, 0, sqrt(Sigma.b.2))
mu.fixef <- x[,4] * 10
mu.bart <- mu - mu.fixef
mu.ranef <- b.1[g.1,1] + x[,4] * b.1[g.1,2] + b.2[g.2]
mu <- mu + mu.ranef
})
} else {
mu.fixef <- x[,4] * 10
mu.bart <- mu - mu.fixef
}
if (causal) {
result <- within(result, {
tau <- 5
z <- rbinom(n, 1, 0.2)
})
if (ranef) {
result <- within(result, {
mu.fixef.0 <- mu.fixef
mu.fixef.1 <- mu.fixef.0 + tau
mu.bart.0 <- mu.bart.1 <- mu.bart
mu.ranef.0 <- mu.ranef.1 <- mu.ranef
mu.0 <- mu.bart.0 + mu.fixef.0 + mu.ranef.0
mu.1 <- mu.bart.1 + mu.fixef.1 + mu.ranef.1
})
} else {
result <- within(result, {
mu.fixef.0 <- mu.fixef
mu.fixef.1 <- mu.fixef.0 + tau
mu.bart.0 <- mu.bart.1 <- mu.bart
mu.0 <- mu.bart.0 + mu.fixef.0
mu.1 <- mu.bart.1 + mu.fixef.1
})
}
if (binary) {
result <- within(result, {
loc <- mean(c(mu.0, mu.1))
scale <- sd(c(mu.0, mu.1)) / qnorm(0.15)
mu.0 <- (mu.0 - loc) / scale
mu.1 <- (mu.1 - loc) / scale
mu.fixef.0 <- (mu.fixef.0 - loc) / scale
mu.fixef.1 <- (mu.fixef.1 - loc) / scale
mu.bart.0 <- mu.bart.0 / scale
mu.bart.1 <- mu.bart.1 / scale
if (ranef) {
mu.ranef.0 <- mu.ranef.0 / scale
mu.ranef.1 <- mu.ranef.1 / scale
}
rm(loc, scale)
y.0 <- rbinom(n, 1L, pnorm(mu.0))
y.1 <- rbinom(n, 1L, pnorm(mu.1))
y <- y.1 * z + y.0 * (1 - z)
})
} else {
result <- within(result, {
y.0 <- mu.0 + rnorm(n, 0, sigma)
y.1 <- mu.1 + rnorm(n, 0, sigma)
y <- y.1 * z + y.0 * (1 - z)
})
}
result$mu <- NULL
result$mu.fixef <- NULL
result$mu.ranef <- NULL
} else {
if (binary) {
result <- within(result, {
loc <- mean(mu)
scale <- sd(mu) / qnorm(0.15)
mu <- (mu - loc) / scale
mu.fixef <- (mu.fixef - loc) / scale
mu.bart <- mu.bart / scale
if (ranef)
mu.ranef <- mu.ranef / scale
rm(loc, scale)
y <- rbinom(n, 1L, pnorm(mu))
})
} else {
result <- within(result, {
y <- mu + rnorm(n, 0, sigma)
})
}
}
result
}
|
/man/point.est.crMh.Rd | no_license | DistanceDevelopment/WiSP | R | false | false | 3,042 | rd | ||
# R for Everyone, ch 7, Statistical Graphics
# install 'ggplot2', 'lubridate' ****
# Base graphics
require(ggplot2)
data()
diamonds
head(diamonds)
dim(diamonds)
summary(diamonds)
# base histogram
hist(diamonds$carat, main="Carat Histogram", xlab="Carat")
# base scatterplot
plot(price~carat, data=diamonds)
plot(diamonds$carat,diamonds$price)
# boxplot
boxplot(diamonds$carat)
#ggplot2
ggplot(data=diamonds) + geom_histogram(aes(x=carat))
ggplot(data=diamonds) + geom_density(aes(x=carat), fill="grey50")
#ggplot2 scatterplot
ggplot(data=diamonds, aes(x=carat,y=price)) + geom_point()
g <- ggplot(diamonds, aes(x=carat,y=price))
g + geom_point(aes(color=color))
g + geom_point(aes(color=color)) + facet_wrap(~color)
g + geom_point(aes(color=color)) + facet_grid(cut~clarity)
ggplot(diamonds, aes(x=carat)) + geom_histogram() + facet_wrap(~color)
ggplot(diamonds, aes(y=carat, x=1)) + geom_boxplot()
ggplot(diamonds, aes(y=carat, x=cut)) + geom_boxplot()
ggplot(diamonds, aes(y=carat, x=cut)) + geom_violin()
ggplot(diamonds, aes(y=carat, x=cut)) + geom_point() + geom_violin()
ggplot(diamonds, aes(y=carat, x=cut)) + geom_violin() + geom_point()
ggplot(economics, aes(x=date,y=pop)) + geom_line()
require(lubridate)
head(economics)
economics$year <- year(economics$date)
head(economics)
economics$month <- month(economics$date, label=TRUE)
head(economics)
econ2000 <- economics[which(economics$year > 2000),]
dim(econ2000)
require(scales)
g <- ggplot(econ2000, aes(x=month,y=pop))
g <- g + geom_line(aes(color=factor(year), group=year))
g <- g + scale_color_discrete(name="Year")
g <- g + scale_y_continuous(labels=comma)
g <- g + labs(title="Population Growth",x="Month",y="Population")
g
# themes
require(ggthemes)
g2 <- ggplot(diamonds, aes(x=carat,y=price)) + geom_point(aes(color=color))
g2 + theme_economist() + scale_colour_economist()
g2 + theme_excel() + scale_colour_excel()
g2 + theme_tufte()
g2 + theme_wsj()
| /Desktop/R/rfe_ch7.R | no_license | Thanatat-CPE0621/Todolist | R | false | false | 1,981 | r | # R for Everyone, ch 7, Statistical Graphics
# install 'ggplot2', 'lubridate' ****
# Base graphics
require(ggplot2)
data()
diamonds
head(diamonds)
dim(diamonds)
summary(diamonds)
# base histogram
hist(diamonds$carat, main="Carat Histogram", xlab="Carat")
# base scatterplot
plot(price~carat, data=diamonds)
plot(diamonds$carat,diamonds$price)
# boxplot
boxplot(diamonds$carat)
#ggplot2
ggplot(data=diamonds) + geom_histogram(aes(x=carat))
ggplot(data=diamonds) + geom_density(aes(x=carat), fill="grey50")
#ggplot2 scatterplot
ggplot(data=diamonds, aes(x=carat,y=price)) + geom_point()
g <- ggplot(diamonds, aes(x=carat,y=price))
g + geom_point(aes(color=color))
g + geom_point(aes(color=color)) + facet_wrap(~color)
g + geom_point(aes(color=color)) + facet_grid(cut~clarity)
ggplot(diamonds, aes(x=carat)) + geom_histogram() + facet_wrap(~color)
ggplot(diamonds, aes(y=carat, x=1)) + geom_boxplot()
ggplot(diamonds, aes(y=carat, x=cut)) + geom_boxplot()
ggplot(diamonds, aes(y=carat, x=cut)) + geom_violin()
ggplot(diamonds, aes(y=carat, x=cut)) + geom_point() + geom_violin()
ggplot(diamonds, aes(y=carat, x=cut)) + geom_violin() + geom_point()
ggplot(economics, aes(x=date,y=pop)) + geom_line()
require(lubridate)
head(economics)
economics$year <- year(economics$date)
head(economics)
economics$month <- month(economics$date, label=TRUE)
head(economics)
econ2000 <- economics[which(economics$year > 2000),]
dim(econ2000)
require(scales)
g <- ggplot(econ2000, aes(x=month,y=pop))
g <- g + geom_line(aes(color=factor(year), group=year))
g <- g + scale_color_discrete(name="Year")
g <- g + scale_y_continuous(labels=comma)
g <- g + labs(title="Population Growth",x="Month",y="Population")
g
# themes
require(ggthemes)
g2 <- ggplot(diamonds, aes(x=carat,y=price)) + geom_point(aes(color=color))
g2 + theme_economist() + scale_colour_economist()
g2 + theme_excel() + scale_colour_excel()
g2 + theme_tufte()
g2 + theme_wsj()
|
## This project contains two functions 1) makeCacheMatrix (creates a special "matrix" object that can cache its inverse)
## and 2) cacheSolve (computes the inverse of the special "matrix" returned by makeCacheMatrix).
## The makeCacheMatrix function creates a special "matrix" which is really a list containing
#other functions to perform the following tasks:
# 1) Set the value of the matrix
# 2) Get the value if the matrix
# 3) Set the inverse of the matrix
# 4) Get the inverse of the matrix
makeCacheMatrix<- function(x=matrix()) {
inv<-NULL
# 1) Set the value of the matrix
set <- function(y) {
x<<-y
inv<<-NULL
}
# 2) Get the value if the matrix
get<- function() x
# 3) Set the inverse of the matrix
setinverse<-function(inverse) inv <<- inverse
# 4) Get the inverse of the matrix
getinverse <- function() inv
list(set=set,get=get,setinverse=setinverse,getinverse=getinverse)
}
##cacheSolve function calculates the inverse of the special “matrix” created with the makeCacheMatrix function.
#It first checks to see if the inverse of the matrix has already been calculated.
cacheSolve <- function(x, ...) {
#Return a matrix that is the inverse of x
inv <- x$getinverse()
#In this case, it gets the inverse from the cache and skips the computation. If not, it calculates the inverse of
#the data and sets the inverse matrix in the cache via the setinverse function.
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinverse(inv)
inv
}
| /cachematrix.R | no_license | felipao-skecht/ProgrammingAssignment2 | R | false | false | 1,859 | r | ## This project contains two functions 1) makeCacheMatrix (creates a special "matrix" object that can cache its inverse)
## and 2) cacheSolve (computes the inverse of the special "matrix" returned by makeCacheMatrix).
## The makeCacheMatrix function creates a special "matrix" which is really a list containing
#other functions to perform the following tasks:
# 1) Set the value of the matrix
# 2) Get the value if the matrix
# 3) Set the inverse of the matrix
# 4) Get the inverse of the matrix
makeCacheMatrix<- function(x=matrix()) {
inv<-NULL
# 1) Set the value of the matrix
set <- function(y) {
x<<-y
inv<<-NULL
}
# 2) Get the value if the matrix
get<- function() x
# 3) Set the inverse of the matrix
setinverse<-function(inverse) inv <<- inverse
# 4) Get the inverse of the matrix
getinverse <- function() inv
list(set=set,get=get,setinverse=setinverse,getinverse=getinverse)
}
##cacheSolve function calculates the inverse of the special “matrix” created with the makeCacheMatrix function.
#It first checks to see if the inverse of the matrix has already been calculated.
cacheSolve <- function(x, ...) {
#Return a matrix that is the inverse of x
inv <- x$getinverse()
#In this case, it gets the inverse from the cache and skips the computation. If not, it calculates the inverse of
#the data and sets the inverse matrix in the cache via the setinverse function.
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinverse(inv)
inv
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/agree.coeff2.r
\name{kappa2.table}
\alias{kappa2.table}
\title{Kappa coefficient for 2 raters}
\usage{
kappa2.table(ratings, weights = identity.weights(1:ncol(ratings)),
conflev = 0.95, N = Inf)
}
\arguments{
\item{ratings}{A square table of ratings (assume no missing ratings).}
\item{weights}{An optional matrix that contains the weights used in the weighted analysis.}
\item{conflev}{An optional confidence level for confidence intervals. The default value is the traditional 0.95.}
\item{N}{An optional population size. The default value is infinity.}
}
\value{
A data frame containing the following 5 variables: coeff.name coeff.val coeff.se coeff.ci coeff.pval.
}
\description{
Kappa coefficient for 2 raters
}
| /man/kappa2.table.Rd | no_license | hapu66/irrCAC | R | false | true | 801 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/agree.coeff2.r
\name{kappa2.table}
\alias{kappa2.table}
\title{Kappa coefficient for 2 raters}
\usage{
kappa2.table(ratings, weights = identity.weights(1:ncol(ratings)),
conflev = 0.95, N = Inf)
}
\arguments{
\item{ratings}{A square table of ratings (assume no missing ratings).}
\item{weights}{An optional matrix that contains the weights used in the weighted analysis.}
\item{conflev}{An optional confidence level for confidence intervals. The default value is the traditional 0.95.}
\item{N}{An optional population size. The default value is infinity.}
}
\value{
A data frame containing the following 5 variables: coeff.name coeff.val coeff.se coeff.ci coeff.pval.
}
\description{
Kappa coefficient for 2 raters
}
|
# The function plotKMeans returns a plot of clustered data using k-means
#
# Author: Kaihua Liu
###########################################################################################################
source('LSDD.R')
source('LSDDsegmentation.R')
segMerge <- function(data, segResults, segLSDDPars, throttle = 100){
# result
newSeg <- data.frame()
# book keeping
mergedRight <- FALSE
for(i in 1:nrow(segResults)){
# check if previous seg was merged to the right
if(mergedRight == TRUE) {
mergedRight <- FALSE
next
}
# sorry you are too small we have to kill you
if(segResults[i, "segEnd"] - segResults[i, "segStart"] + 1 <= throttle){
# if it is the first seg or the last seg
# it only can be merged to one direction
# so stop calling the function
if(i == 1){
mergeDirection <- 'right'
}
else if(i == nrow(segResults)){
mergeDirection <- 'left'
}
# use LSDD fast function to calculate similarity
else {
mergeDirection <- LSDDCompare(
L = segResults[i-1, ],
G = segResults[i, ],
R = segResults[i+1, ],
data = data,
segLSDDPars = segLSDDPars
)
}
if(mergeDirection == "left"){
# merge to left
# edit last seg
newSeg[nrow(newSeg), "segEnd"] <- segResults[i, "segEnd"]
}else{
# merge to right
# push to new vector
newSeg <- rbind(newSeg,
list(
"segStart" = segResults[i,"segStart"],
"segEnd" = segResults[i+1, "segEnd"]
)
)
# skip seg to be deal in next loop
mergedRight <- TRUE
}
}
# You are long enough to pass
else{
newSeg <- rbind(newSeg, segResults[i,])
}
}
newSeg <- unique(newSeg)
return(newSeg)
}
#######################################
######### LSDDCompare ##########
#######################################
#
# |------------|-----|-----------|
# L G R
# Compare Gap with two different time sequences
# And return the one with higher similarity
#
#######################################
# Input:
#
# L, G, R:three time sequences to deal with. list("segStart" = , "segEnd" = )
# G is the gap (seg size below the minimum)
#
# data: dataset
#
# segLSDDPars: sigma and lambdas matrices
#######################################
# Output:
#
# "left"/"right", the side to merge
LSDDCompare <- function(L, G, R, data, segLSDDPars){
sum_LSDD_G2L <- 0
sum_LSDD_G2R <- 0
# under some extrme conditions
# the size of G could be 1, which means only 1 data point
# in this segments,
# but LSDD requires at least 2 data points to carry on.
# so just merge G to the shorter side
if(
G$segEnd - G$segStart > 1
&& L$segEnd - L$segStart > 1
&& R$segEnd - R$segStart > 1
){
for(i in 1:ncol(data)){
# 1 x n matrix
Gmatrix <- t(matrix(data[G$segStart:G$segEnd, i]))
Lmatrix <- t(matrix(data[L$segStart:L$segEnd, i]))
Rmatrix <- t(matrix(data[R$segStart:R$segEnd, i]))
# cat(dim(Gmatrix), dim(Lmatrix), dim(Rmatrix))
sum_LSDD_G2L <- sum(sum_LSDD_G2L, LSDDfast(
X1 = Gmatrix,
X2 = Lmatrix,
sigma = segLSDDPars[i, "sigma"], #sigma
lambda = segLSDDPars[i, "lambda"] #lambda
))
sum_LSDD_G2R <- sum(sum_LSDD_G2R, LSDDfast(
X1 = Gmatrix,
X2 = Rmatrix,
sigma = segLSDDPars[i, "sigma"], #sigma
lambda = segLSDDPars[i, "lambda"] #lambda
))
}
}
# cat("Similary to left is: ", sum_LSDD_G2L, "\n")
# cat("Similary to right is: ", sum_LSDD_G2R, "\n")
if(sum_LSDD_G2R < sum_LSDD_G2L){
result <- 'right'
}else if(sum_LSDD_G2L < sum_LSDD_G2R){
result <- 'left'
}else{
# this is equal, very rare situation, then merge with shorter segResults
if(L$segEnd - L$segStart < R$segEnd - R$segStart){
result <- 'left'
}
else{
result <- 'right'
}
}
# cat("Merge to ", result, "\n")
result
}
####### test #######
# data <- data.frame( read.csv('./data/test5000.csv'))
# data <- data[-1]
# segResults <- data.frame( read.csv('./data/segs5000.csv'))
# segLSDDPars <- data.frame( read.csv('./data/pars5000.csv'))
# segMerge(data = data, segResults = segResults, segLSDDPars = segLSDDPars, throttle = 100)
| /segMerge.R | no_license | lkaihua/Bipeline | R | false | false | 4,547 | r | # The function plotKMeans returns a plot of clustered data using k-means
#
# Author: Kaihua Liu
###########################################################################################################
source('LSDD.R')
source('LSDDsegmentation.R')
segMerge <- function(data, segResults, segLSDDPars, throttle = 100){
# result
newSeg <- data.frame()
# book keeping
mergedRight <- FALSE
for(i in 1:nrow(segResults)){
# check if previous seg was merged to the right
if(mergedRight == TRUE) {
mergedRight <- FALSE
next
}
# sorry you are too small we have to kill you
if(segResults[i, "segEnd"] - segResults[i, "segStart"] + 1 <= throttle){
# if it is the first seg or the last seg
# it only can be merged to one direction
# so stop calling the function
if(i == 1){
mergeDirection <- 'right'
}
else if(i == nrow(segResults)){
mergeDirection <- 'left'
}
# use LSDD fast function to calculate similarity
else {
mergeDirection <- LSDDCompare(
L = segResults[i-1, ],
G = segResults[i, ],
R = segResults[i+1, ],
data = data,
segLSDDPars = segLSDDPars
)
}
if(mergeDirection == "left"){
# merge to left
# edit last seg
newSeg[nrow(newSeg), "segEnd"] <- segResults[i, "segEnd"]
}else{
# merge to right
# push to new vector
newSeg <- rbind(newSeg,
list(
"segStart" = segResults[i,"segStart"],
"segEnd" = segResults[i+1, "segEnd"]
)
)
# skip seg to be deal in next loop
mergedRight <- TRUE
}
}
# You are long enough to pass
else{
newSeg <- rbind(newSeg, segResults[i,])
}
}
newSeg <- unique(newSeg)
return(newSeg)
}
#######################################
######### LSDDCompare ##########
#######################################
#
# |------------|-----|-----------|
# L G R
# Compare Gap with two different time sequences
# And return the one with higher similarity
#
#######################################
# Input:
#
# L, G, R:three time sequences to deal with. list("segStart" = , "segEnd" = )
# G is the gap (seg size below the minimum)
#
# data: dataset
#
# segLSDDPars: sigma and lambdas matrices
#######################################
# Output:
#
# "left"/"right", the side to merge
LSDDCompare <- function(L, G, R, data, segLSDDPars){
sum_LSDD_G2L <- 0
sum_LSDD_G2R <- 0
# under some extrme conditions
# the size of G could be 1, which means only 1 data point
# in this segments,
# but LSDD requires at least 2 data points to carry on.
# so just merge G to the shorter side
if(
G$segEnd - G$segStart > 1
&& L$segEnd - L$segStart > 1
&& R$segEnd - R$segStart > 1
){
for(i in 1:ncol(data)){
# 1 x n matrix
Gmatrix <- t(matrix(data[G$segStart:G$segEnd, i]))
Lmatrix <- t(matrix(data[L$segStart:L$segEnd, i]))
Rmatrix <- t(matrix(data[R$segStart:R$segEnd, i]))
# cat(dim(Gmatrix), dim(Lmatrix), dim(Rmatrix))
sum_LSDD_G2L <- sum(sum_LSDD_G2L, LSDDfast(
X1 = Gmatrix,
X2 = Lmatrix,
sigma = segLSDDPars[i, "sigma"], #sigma
lambda = segLSDDPars[i, "lambda"] #lambda
))
sum_LSDD_G2R <- sum(sum_LSDD_G2R, LSDDfast(
X1 = Gmatrix,
X2 = Rmatrix,
sigma = segLSDDPars[i, "sigma"], #sigma
lambda = segLSDDPars[i, "lambda"] #lambda
))
}
}
# cat("Similary to left is: ", sum_LSDD_G2L, "\n")
# cat("Similary to right is: ", sum_LSDD_G2R, "\n")
if(sum_LSDD_G2R < sum_LSDD_G2L){
result <- 'right'
}else if(sum_LSDD_G2L < sum_LSDD_G2R){
result <- 'left'
}else{
# this is equal, very rare situation, then merge with shorter segResults
if(L$segEnd - L$segStart < R$segEnd - R$segStart){
result <- 'left'
}
else{
result <- 'right'
}
}
# cat("Merge to ", result, "\n")
result
}
####### test #######
# data <- data.frame( read.csv('./data/test5000.csv'))
# data <- data[-1]
# segResults <- data.frame( read.csv('./data/segs5000.csv'))
# segLSDDPars <- data.frame( read.csv('./data/pars5000.csv'))
# segMerge(data = data, segResults = segResults, segLSDDPars = segLSDDPars, throttle = 100)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xmu.R
\name{xmu_summary_RAM_group_parameters}
\alias{xmu_summary_RAM_group_parameters}
\title{Order and group the parameters in a RAM summary}
\usage{
xmu_summary_RAM_group_parameters(
model,
paramTable,
means = FALSE,
residuals = FALSE
)
}
\arguments{
\item{model}{the model containing the parameters.}
\item{paramTable}{The parameter table.}
\item{means}{Whether to show the means (FALSE)}
\item{residuals}{Whether to show the residuals (FALSE)}
}
\value{
\itemize{
\item Sorted parameter table
}
}
\description{
Makes understanding complex model output easier by grouping parameters are type: residuals, latent variance, factor loading etc.
}
\examples{
\dontrun{
data(demoOneFactor)
manifests = names(demoOneFactor)
m1 = umxRAM("One Factor", data = demoOneFactor,
umxPath("G", to = manifests),
umxPath(v.m. = manifests),
umxPath(v1m0 = "G")
)
tmp = umxSummary(m1, means=FALSE, residuals = FALSE)
xmu_summary_RAM_group_parameters(m1, paramTable = tmp, means= FALSE, residuals= FALSE)
}
}
\seealso{
\itemize{
\item \code{\link[=umxSummary]{umxSummary()}}
}
Other xmu internal not for end user:
\code{\link{umxModel}()},
\code{\link{umxRenameMatrix}()},
\code{\link{umx_APA_pval}()},
\code{\link{umx_fun_mean_sd}()},
\code{\link{umx_get_bracket_addresses}()},
\code{\link{umx_make}()},
\code{\link{umx_standardize}()},
\code{\link{umx_string_to_algebra}()},
\code{\link{umx}},
\code{\link{xmuHasSquareBrackets}()},
\code{\link{xmuLabel_MATRIX_Model}()},
\code{\link{xmuLabel_Matrix}()},
\code{\link{xmuLabel_RAM_Model}()},
\code{\link{xmuMI}()},
\code{\link{xmuMakeDeviationThresholdsMatrices}()},
\code{\link{xmuMakeOneHeadedPathsFromPathList}()},
\code{\link{xmuMakeTwoHeadedPathsFromPathList}()},
\code{\link{xmuMaxLevels}()},
\code{\link{xmuMinLevels}()},
\code{\link{xmuPropagateLabels}()},
\code{\link{xmuRAM2Ordinal}()},
\code{\link{xmuTwinSuper_Continuous}()},
\code{\link{xmuTwinSuper_NoBinary}()},
\code{\link{xmuTwinUpgradeMeansToCovariateModel}()},
\code{\link{xmu_CI_merge}()},
\code{\link{xmu_CI_stash}()},
\code{\link{xmu_DF_to_mxData_TypeCov}()},
\code{\link{xmu_PadAndPruneForDefVars}()},
\code{\link{xmu_bracket_address2rclabel}()},
\code{\link{xmu_cell_is_on}()},
\code{\link{xmu_check_levels_identical}()},
\code{\link{xmu_check_needs_means}()},
\code{\link{xmu_check_variance}()},
\code{\link{xmu_clean_label}()},
\code{\link{xmu_data_missing}()},
\code{\link{xmu_data_swap_a_block}()},
\code{\link{xmu_describe_data_WLS}()},
\code{\link{xmu_dot_make_paths}()},
\code{\link{xmu_dot_make_residuals}()},
\code{\link{xmu_dot_maker}()},
\code{\link{xmu_dot_move_ranks}()},
\code{\link{xmu_dot_rank_str}()},
\code{\link{xmu_extract_column}()},
\code{\link{xmu_get_CI}()},
\code{\link{xmu_lavaan_process_group}()},
\code{\link{xmu_make_TwinSuperModel}()},
\code{\link{xmu_make_bin_cont_pair_data}()},
\code{\link{xmu_make_mxData}()},
\code{\link{xmu_match.arg}()},
\code{\link{xmu_name_from_lavaan_str}()},
\code{\link{xmu_path2twin}()},
\code{\link{xmu_path_regex}()},
\code{\link{xmu_print_algebras}()},
\code{\link{xmu_rclabel_2_bracket_address}()},
\code{\link{xmu_safe_run_summary}()},
\code{\link{xmu_set_sep_from_suffix}()},
\code{\link{xmu_show_fit_or_comparison}()},
\code{\link{xmu_simplex_corner}()},
\code{\link{xmu_standardize_ACEcov}()},
\code{\link{xmu_standardize_ACEv}()},
\code{\link{xmu_standardize_ACE}()},
\code{\link{xmu_standardize_CP}()},
\code{\link{xmu_standardize_IP}()},
\code{\link{xmu_standardize_RAM}()},
\code{\link{xmu_standardize_SexLim}()},
\code{\link{xmu_standardize_Simplex}()},
\code{\link{xmu_start_value_list}()},
\code{\link{xmu_starts}()},
\code{\link{xmu_twin_add_WeightMatrices}()},
\code{\link{xmu_twin_check}()},
\code{\link{xmu_twin_get_var_names}()},
\code{\link{xmu_twin_make_def_means_mats_and_alg}()},
\code{\link{xmu_twin_upgrade_selDvs2SelVars}()}
}
\concept{xmu internal not for end user}
| /man/xmu_summary_RAM_group_parameters.Rd | no_license | MATA62N/umx | R | false | true | 3,956 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xmu.R
\name{xmu_summary_RAM_group_parameters}
\alias{xmu_summary_RAM_group_parameters}
\title{Order and group the parameters in a RAM summary}
\usage{
xmu_summary_RAM_group_parameters(
model,
paramTable,
means = FALSE,
residuals = FALSE
)
}
\arguments{
\item{model}{the model containing the parameters.}
\item{paramTable}{The parameter table.}
\item{means}{Whether to show the means (FALSE)}
\item{residuals}{Whether to show the residuals (FALSE)}
}
\value{
\itemize{
\item Sorted parameter table
}
}
\description{
Makes understanding complex model output easier by grouping parameters are type: residuals, latent variance, factor loading etc.
}
\examples{
\dontrun{
data(demoOneFactor)
manifests = names(demoOneFactor)
m1 = umxRAM("One Factor", data = demoOneFactor,
umxPath("G", to = manifests),
umxPath(v.m. = manifests),
umxPath(v1m0 = "G")
)
tmp = umxSummary(m1, means=FALSE, residuals = FALSE)
xmu_summary_RAM_group_parameters(m1, paramTable = tmp, means= FALSE, residuals= FALSE)
}
}
\seealso{
\itemize{
\item \code{\link[=umxSummary]{umxSummary()}}
}
Other xmu internal not for end user:
\code{\link{umxModel}()},
\code{\link{umxRenameMatrix}()},
\code{\link{umx_APA_pval}()},
\code{\link{umx_fun_mean_sd}()},
\code{\link{umx_get_bracket_addresses}()},
\code{\link{umx_make}()},
\code{\link{umx_standardize}()},
\code{\link{umx_string_to_algebra}()},
\code{\link{umx}},
\code{\link{xmuHasSquareBrackets}()},
\code{\link{xmuLabel_MATRIX_Model}()},
\code{\link{xmuLabel_Matrix}()},
\code{\link{xmuLabel_RAM_Model}()},
\code{\link{xmuMI}()},
\code{\link{xmuMakeDeviationThresholdsMatrices}()},
\code{\link{xmuMakeOneHeadedPathsFromPathList}()},
\code{\link{xmuMakeTwoHeadedPathsFromPathList}()},
\code{\link{xmuMaxLevels}()},
\code{\link{xmuMinLevels}()},
\code{\link{xmuPropagateLabels}()},
\code{\link{xmuRAM2Ordinal}()},
\code{\link{xmuTwinSuper_Continuous}()},
\code{\link{xmuTwinSuper_NoBinary}()},
\code{\link{xmuTwinUpgradeMeansToCovariateModel}()},
\code{\link{xmu_CI_merge}()},
\code{\link{xmu_CI_stash}()},
\code{\link{xmu_DF_to_mxData_TypeCov}()},
\code{\link{xmu_PadAndPruneForDefVars}()},
\code{\link{xmu_bracket_address2rclabel}()},
\code{\link{xmu_cell_is_on}()},
\code{\link{xmu_check_levels_identical}()},
\code{\link{xmu_check_needs_means}()},
\code{\link{xmu_check_variance}()},
\code{\link{xmu_clean_label}()},
\code{\link{xmu_data_missing}()},
\code{\link{xmu_data_swap_a_block}()},
\code{\link{xmu_describe_data_WLS}()},
\code{\link{xmu_dot_make_paths}()},
\code{\link{xmu_dot_make_residuals}()},
\code{\link{xmu_dot_maker}()},
\code{\link{xmu_dot_move_ranks}()},
\code{\link{xmu_dot_rank_str}()},
\code{\link{xmu_extract_column}()},
\code{\link{xmu_get_CI}()},
\code{\link{xmu_lavaan_process_group}()},
\code{\link{xmu_make_TwinSuperModel}()},
\code{\link{xmu_make_bin_cont_pair_data}()},
\code{\link{xmu_make_mxData}()},
\code{\link{xmu_match.arg}()},
\code{\link{xmu_name_from_lavaan_str}()},
\code{\link{xmu_path2twin}()},
\code{\link{xmu_path_regex}()},
\code{\link{xmu_print_algebras}()},
\code{\link{xmu_rclabel_2_bracket_address}()},
\code{\link{xmu_safe_run_summary}()},
\code{\link{xmu_set_sep_from_suffix}()},
\code{\link{xmu_show_fit_or_comparison}()},
\code{\link{xmu_simplex_corner}()},
\code{\link{xmu_standardize_ACEcov}()},
\code{\link{xmu_standardize_ACEv}()},
\code{\link{xmu_standardize_ACE}()},
\code{\link{xmu_standardize_CP}()},
\code{\link{xmu_standardize_IP}()},
\code{\link{xmu_standardize_RAM}()},
\code{\link{xmu_standardize_SexLim}()},
\code{\link{xmu_standardize_Simplex}()},
\code{\link{xmu_start_value_list}()},
\code{\link{xmu_starts}()},
\code{\link{xmu_twin_add_WeightMatrices}()},
\code{\link{xmu_twin_check}()},
\code{\link{xmu_twin_get_var_names}()},
\code{\link{xmu_twin_make_def_means_mats_and_alg}()},
\code{\link{xmu_twin_upgrade_selDvs2SelVars}()}
}
\concept{xmu internal not for end user}
|
\docType{class}
\name{AllPosible_MV}
\alias{AllPosible_MV}
\alias{R6_AllPosible_MV}
\title{AllPosible_MV KEEL Preprocess Algorithm}
\description{
AllPosible_MV Preprocess Algorithm from KEEL.
}
\usage{
AllPosible_MV(train, test)
}
\arguments{
\item{train}{Train dataset as a data.frame object}
\item{test}{Test dataset as a data.frame object}
}
\value{
A data.frame with the preprocessed data for both \code{train} and \code{test} datasets.
}
\examples{
#data_train <- RKEEL::loadKeelDataset("car_train")
#data_test <- RKEEL::loadKeelDataset("car_test")
#Create algorithm
#algorithm <- RKEEL::AllPosible_MV(data_train, data_test)
#Run algorithm
#algorithm$run()
#See results
#algorithm$preprocessed_test
}
\keyword{preprocess}
| /RKEEL/man/AllPosible-MV.Rd | no_license | i02momuj/RKEEL | R | false | false | 732 | rd | \docType{class}
\name{AllPosible_MV}
\alias{AllPosible_MV}
\alias{R6_AllPosible_MV}
\title{AllPosible_MV KEEL Preprocess Algorithm}
\description{
AllPosible_MV Preprocess Algorithm from KEEL.
}
\usage{
AllPosible_MV(train, test)
}
\arguments{
\item{train}{Train dataset as a data.frame object}
\item{test}{Test dataset as a data.frame object}
}
\value{
A data.frame with the preprocessed data for both \code{train} and \code{test} datasets.
}
\examples{
#data_train <- RKEEL::loadKeelDataset("car_train")
#data_test <- RKEEL::loadKeelDataset("car_test")
#Create algorithm
#algorithm <- RKEEL::AllPosible_MV(data_train, data_test)
#Run algorithm
#algorithm$run()
#See results
#algorithm$preprocessed_test
}
\keyword{preprocess}
|
Sys.setenv("PKG_CXXFLAGS"="-std=c++11")
## required so that armadillo uses c++11 for gamma random number generation
library(MCMCpack)
library(truncnorm) ## rtruncnorm() is faster than rtnorm() in the msm package
library(ars)
library(compiler)
library(aster)
library(Rcpp)
library(RcppArmadillo)
source("overflowmcmc.R")
sourceCpp("overflowmcmcC.cpp")
overtrendmcmcCwrap <- function(niter, data, initial, prior, rwsds){
## Initial values
tau <- as.numeric(c(initial$tau))
nblocks <- length(tau)
sigma <- as.numeric(initial$sigma)
gam <- as.numeric(c(initial$gam))
phi <- as.numeric(initial$phi)
N <- as.numeric(c(initial$N))
Dbar <- as.numeric(c(initial$Dbar))
alpha <- as.numeric(initial$alpha)
beta <- as.numeric(initial$beta)
## RW sds and bookkeeping
taulogrwsd <- as.numeric(rwsds$taulogrwsd)
gamlogrwsds <- as.numeric(rwsds$gamlogrwsds)
alphalogrwsd <- as.numeric(rwsds$alphalogrwsd)
rwc <- rwsds$rwc
H <- rwsds$H
tune <- (rwsds$tune)*1
lowtarget <- rwsds$lowtarget
hightarget <- rwsds$hightarget
tauchol <- rwsds$tauchol
## Priors
vsigma <- prior$vsigma
ssigma <- prior$ssigma
aphi <- prior$aphi
bphi <- prior$bphi
mugam <- prior$mugamma
sig2gam <- prior$sig2gamma
Dbars <- prior$Dbars
aalpha <- prior$aalpha
balpha <- prior$balpha
abeta <- prior$abeta
bbeta <- prior$bbeta
## Data
tobs <- data$tobs
m <- data$m
M <- data$M
D <- data$D
out <- overtrendmcmcCPP(niter, tobs, D, M, m, tau, sigma, gam, phi, N, Dbar, alpha, beta,
vsigma, ssigma, aphi, bphi, mugam, sig2gam, Dbars, aalpha, balpha,
abeta, bbeta, taulogrwsd, gamlogrwsds, alphalogrwsd,
rwc, H, tune, lowtarget, hightarget, tauchol, rtruncnorm, rktnb,
rbetarejC)
return(out)
}
## List overtrendmcmcC(int niter, arma::vec tobs, arma::vec D, arma::vec M, arma::vec m,
## arma::vec tau, double sigma, arma::vec gam, double phi, arma::vec N,
## arma::vec Dbar, double alpha, double beta,
## double vsigma, double ssigma, double aphi, double bphi,
## arma::vec mugam, arma::vec sig2gam, arma::vec Dbars, double aalpha,
## double balpha, double abeta, double bbeta,
## double taulogrwsd, arma::vec gamlogrwsds, double alphalogrwsd,
## double rwc, int H, int tune,
## arma::vec lowtarget, arma::vec hightarget, arma::mat tauchol,
## Function rtruncnorm, Function rktnb, Function rbetarejC){
| /model/old/overflowmcmcCwrap.R | no_license | simpsonm/bitcointrans | R | false | false | 2,465 | r | Sys.setenv("PKG_CXXFLAGS"="-std=c++11")
## required so that armadillo uses c++11 for gamma random number generation
library(MCMCpack)
library(truncnorm) ## rtruncnorm() is faster than rtnorm() in the msm package
library(ars)
library(compiler)
library(aster)
library(Rcpp)
library(RcppArmadillo)
source("overflowmcmc.R")
sourceCpp("overflowmcmcC.cpp")
overtrendmcmcCwrap <- function(niter, data, initial, prior, rwsds){
## Initial values
tau <- as.numeric(c(initial$tau))
nblocks <- length(tau)
sigma <- as.numeric(initial$sigma)
gam <- as.numeric(c(initial$gam))
phi <- as.numeric(initial$phi)
N <- as.numeric(c(initial$N))
Dbar <- as.numeric(c(initial$Dbar))
alpha <- as.numeric(initial$alpha)
beta <- as.numeric(initial$beta)
## RW sds and bookkeeping
taulogrwsd <- as.numeric(rwsds$taulogrwsd)
gamlogrwsds <- as.numeric(rwsds$gamlogrwsds)
alphalogrwsd <- as.numeric(rwsds$alphalogrwsd)
rwc <- rwsds$rwc
H <- rwsds$H
tune <- (rwsds$tune)*1
lowtarget <- rwsds$lowtarget
hightarget <- rwsds$hightarget
tauchol <- rwsds$tauchol
## Priors
vsigma <- prior$vsigma
ssigma <- prior$ssigma
aphi <- prior$aphi
bphi <- prior$bphi
mugam <- prior$mugamma
sig2gam <- prior$sig2gamma
Dbars <- prior$Dbars
aalpha <- prior$aalpha
balpha <- prior$balpha
abeta <- prior$abeta
bbeta <- prior$bbeta
## Data
tobs <- data$tobs
m <- data$m
M <- data$M
D <- data$D
out <- overtrendmcmcCPP(niter, tobs, D, M, m, tau, sigma, gam, phi, N, Dbar, alpha, beta,
vsigma, ssigma, aphi, bphi, mugam, sig2gam, Dbars, aalpha, balpha,
abeta, bbeta, taulogrwsd, gamlogrwsds, alphalogrwsd,
rwc, H, tune, lowtarget, hightarget, tauchol, rtruncnorm, rktnb,
rbetarejC)
return(out)
}
## List overtrendmcmcC(int niter, arma::vec tobs, arma::vec D, arma::vec M, arma::vec m,
## arma::vec tau, double sigma, arma::vec gam, double phi, arma::vec N,
## arma::vec Dbar, double alpha, double beta,
## double vsigma, double ssigma, double aphi, double bphi,
## arma::vec mugam, arma::vec sig2gam, arma::vec Dbars, double aalpha,
## double balpha, double abeta, double bbeta,
## double taulogrwsd, arma::vec gamlogrwsds, double alphalogrwsd,
## double rwc, int H, int tune,
## arma::vec lowtarget, arma::vec hightarget, arma::mat tauchol,
## Function rtruncnorm, Function rktnb, Function rbetarejC){
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datasets.R
\docType{data}
\name{geexex}
\alias{geexex}
\title{Dataset used to illustrate Stefanski and Boos examples.}
\format{
a dataset with 9 variables and 100 observations
\itemize{
\item{Y1}{ rnorm(mean = 5, sd = 4)}
\item{Y2}{ rnorm(mean = 2, sd = 1)}
\item{X1}{ rgamma(shape =5)}
\item{Y3}{ 2 + 3*X1 + 1*rnorm(0, 1)}
\item{W1}{ X1 + 0.25 * rnorm(0, 1)}
\item{Z1}{ 2 + 1.5*X1 + 1*rnorm(0, 1)}
\item{X2}{ 0 for first 50 observation, 1 for rest}
\item{Y4}{ 0.1 + 0.1*X1 + 0.5*X2 + rnorm(0, 1)}
\item{Y5}{ rbinom(prob = plogis(0.1 + 0.1*X1 + 0.5*X2))}
}
}
\description{
The data used to illustrate examples 1-9 of Stefanski and Boos (2002).
}
\references{
Stefanski, L. A., & Boos, D. D. (2002). The calculus of m-estimation. The American Statistician, 56(1), 29-38.
}
\keyword{datasets}
| /man/geexex.Rd | permissive | bsaul/geex | R | false | true | 897 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datasets.R
\docType{data}
\name{geexex}
\alias{geexex}
\title{Dataset used to illustrate Stefanski and Boos examples.}
\format{
a dataset with 9 variables and 100 observations
\itemize{
\item{Y1}{ rnorm(mean = 5, sd = 4)}
\item{Y2}{ rnorm(mean = 2, sd = 1)}
\item{X1}{ rgamma(shape =5)}
\item{Y3}{ 2 + 3*X1 + 1*rnorm(0, 1)}
\item{W1}{ X1 + 0.25 * rnorm(0, 1)}
\item{Z1}{ 2 + 1.5*X1 + 1*rnorm(0, 1)}
\item{X2}{ 0 for first 50 observation, 1 for rest}
\item{Y4}{ 0.1 + 0.1*X1 + 0.5*X2 + rnorm(0, 1)}
\item{Y5}{ rbinom(prob = plogis(0.1 + 0.1*X1 + 0.5*X2))}
}
}
\description{
The data used to illustrate examples 1-9 of Stefanski and Boos (2002).
}
\references{
Stefanski, L. A., & Boos, D. D. (2002). The calculus of m-estimation. The American Statistician, 56(1), 29-38.
}
\keyword{datasets}
|
# DataUtil.r
# Functions to get data for selection and analysis.
# From Micah Altman 20170726
# Adapted RBLlandau 20171002
library(tidyverse)
# H A C K S
# Absurd hack in here to make lifem print reasonably.
# Increase the penalty for printing in scientific
# (exponential) format, and limit the number of digits
# so that there are no places after the radix point.
options("scipen"=100, "digits"=1)
# End of absurd hack.
# Enable printing wide tables.
options("width"=120)
# Add an infix concatenation operator for strings.
`%+%` <- function(a,b) paste0(a,b)
# U T I L I T Y F U N C T I O N S
# Better functions for summarizing the loss data.
# These are more robust than just mean, and more broadly estimated
# than median.
# midmean is just a trimmed mean of the middle half of the sample.
# trimean is a less efficient but easier trimmed central measure,
# and was a JWT favorite.
# trimmedmean is a 10% trimmed mean, i.e., mean of the middle 80% of the data.
trimean <- function(vect)
{ foo <- fivenum(vect);
ans <- 0.5*foo[3] + 0.25*foo[2] + 0.25*foo[4];
ans
}
midmean <- function(vect)
{ ans <- mean(vect, trim=0.25, na.rm=TRUE);
ans
}
trimmedmean <- function(vect)
{ ans <- mean(vect, trim=0.10, na.rm=TRUE);
ans
}
# Select only the dataframe rows with N copies.
fnSelectCopies <- function(dfIn, nCopies)
{ dfIn[dfIn$copies==nCopies,]
}
# Safe functions for plotting.
# Note that safelog(0) returns 0, as needed for sensible axis labeling.
safelog <- function(x) {return(log10(x+1))}
safe <- function(x) {return(x+0.001)}
# Shock tabulation functions.
fntShockTab <- function(input, freq, impact, duration) {
res1 <- input[input$copies>1 &
input$shockfreq==freq &
input$shockimpact==impact &
input$shockmaxlife==duration,]
assign("res.shocktdat", res1, envir=globalenv())
#print(paste("res.shockdat",length(res.shockdat$copies)))
restab <- dcast(res1, copies~lifem, value.var="mdmlosspct")
return(restab)
}
# f n d f G e t G i a n t D a t a
fndfGetGiantData <- function(dir.string)
{
# L O A D F I L E S
# Load & merge all files in directory
if (dir.string == "") {dir.string <- "./"}
filesList <- grep(pattern="^Giant.*\\.txt$", dir(dir.string),
perl=TRUE, value = TRUE)
# !!! I need to get fully qualified names out of grep here. How?
# !!! As it is, works only for current directory.
sims.df.ls <- lapply( filesList,
function(x)read.table(x,
header=TRUE,sep=" ", na.strings="nolinefound")
) # NOTE MISSING PARAMETER ENTRIES in na.strings
sims.merged.df <- bind_rows(sims.df.ls)
# Set up namesets for dependent, independent, ignored vars.
# Many of the columns are not relevant to data analysis, only tracking.
allVarNames <- colnames(sims.merged.df)
ignoreVarNames <- c("timestamp","seed","walltime","cputime","logfilename",
"logfilesize","instructionfilename","todaysdatetime","extractorversion",
"cpuinfo","mongoid","audittype")
resultVarNames <- c("docslost","docsrepairedmajority","docsrepairedminority",
"lost","nshocks","nglitches","deadserversactive","deadserversall",
"docsokay","auditmajority","auditminority","auditlost","auditrepairs",
"auditcycles")
coreResultVarNames <- c("docslost","docsrepairedmajority",
"docsrepairedminority")
paramVarNames <- setdiff(setdiff(allVarNames, resultVarNames), ignoreVarNames)
testVarNames <- c("copies", "lifem")
nonIgnoreVarNames <- setdiff(allVarNames, ignoreVarNames)
# Select, group, and aggregate.
sims.selected.df <- sims.merged.df[nonIgnoreVarNames]
gp_sims.merged<-eval(parse(text=paste("group_by(sims.selected.df,",
paste(collapse=",", paramVarNames),")")))
results <- summarise(gp_sims.merged,
mdmlosspct=round(midmean(docslost/docstotal)*100,1), n=n())
selectedresults <- results[which(results[["copies"]]!=1),]
return(results)
}
# f n d f G e t S u b s e t D a t a
fndfGetSubsetData <- function(results, lColumnNames)
{
subset <- results[lColumnNames]
return(subset)
}
# f n d f G e t A u d i t D a t a
fndfGetAuditData <- function(results)
{
# Specific to audit analyses:
lNamesIWant <- c("copies","lifem","mdmlosspct",
"auditfrequency","auditsegments",
#"audittype", too, needs fixed, but is not in data!
"docsize","shelfsize")
results.narrow <- fndfGetSubsetData(results, lNamesIWant)
results.plausible <- results.narrow[results.narrow$lifem>=2,]
return(results.plausible)
}
# f n d f G e t S h o c k D a t a
fndfGetShockData <- function(results)
{
# Specific to shock analyses:
lNamesIWant <- c("copies","lifem","mdmlosspct",
"shockfreq","shockimpact","shockmaxlife","shockspan"
)
shockresults <- results[lNamesIWant]
return(shockresults)
}
# f n d f G e t G l i t c h D a t a
fndfGetGlitchData <- function(results)
{
# Specific to shock analyses:
lNamesIWant <- c("copies","lifem","mdmlosspct",
"glitchfreq","glitchspan","glitchimpact","glitchmaxlife",
"docsize","shelfsize")
shockresults <- results[lNamesIWant]
return(shockresults)
}
# f n d f G e t D o c s i z e D a t a
fndfGetDocsizeData <- function(results)
{
lNamesIWant <- c("copies","lifem","mdmlosspct",
"auditfreq","auditsegments",
#"audittype", too, needs fixed
"docsize","shelfsize")
}
# f n d f G e t S h e l f s i z e D a t a
fndfGetShelfsizeData <- function(results)
{
lNamesIWant <- c("copies","lifem","mdmlosspct",
"auditfreq","auditsegments",
#"audittype", too, needs fixed
"docsize","shelfsize")
}
# Edit history:
# 20171002 RBL Copy from Altman original ExploreGiantOutputs.r.
# Add functions to select data for specific analyses.
# 20171103 RBL Start on audit work.
# TODO: Add audittype to the extracted data in GiantOutput files,
# and then we can use it here.
# 20171129 RBL Add audittype to ignore column list.
# Add 10% trimmed mean as a stat.
# 20171211 RBL Narrow the list of columns for shock data.
#
#
#END
| /veryoldpictures/shocks/low/span1/1yr/DataUtil.r | permissive | MIT-Informatics/PreservationSimulation | R | false | false | 6,519 | r | # DataUtil.r
# Functions to get data for selection and analysis.
# From Micah Altman 20170726
# Adapted RBLlandau 20171002
library(tidyverse)
# H A C K S
# Absurd hack in here to make lifem print reasonably.
# Increase the penalty for printing in scientific
# (exponential) format, and limit the number of digits
# so that there are no places after the radix point.
options("scipen"=100, "digits"=1)
# End of absurd hack.
# Enable printing wide tables.
options("width"=120)
# Add an infix concatenation operator for strings.
`%+%` <- function(a,b) paste0(a,b)
# U T I L I T Y F U N C T I O N S
# Better functions for summarizing the loss data.
# These are more robust than just mean, and more broadly estimated
# than median.
# midmean is just a trimmed mean of the middle half of the sample.
# trimean is a less efficient but easier trimmed central measure,
# and was a JWT favorite.
# trimmedmean is a 10% trimmed mean, i.e., mean of the middle 80% of the data.
trimean <- function(vect)
{ foo <- fivenum(vect);
ans <- 0.5*foo[3] + 0.25*foo[2] + 0.25*foo[4];
ans
}
midmean <- function(vect)
{ ans <- mean(vect, trim=0.25, na.rm=TRUE);
ans
}
trimmedmean <- function(vect)
{ ans <- mean(vect, trim=0.10, na.rm=TRUE);
ans
}
# Select only the dataframe rows with N copies.
fnSelectCopies <- function(dfIn, nCopies)
{ dfIn[dfIn$copies==nCopies,]
}
# Safe functions for plotting.
# Note that safelog(0) returns 0, as needed for sensible axis labeling.
safelog <- function(x) {return(log10(x+1))}
safe <- function(x) {return(x+0.001)}
# Shock tabulation functions.
fntShockTab <- function(input, freq, impact, duration) {
res1 <- input[input$copies>1 &
input$shockfreq==freq &
input$shockimpact==impact &
input$shockmaxlife==duration,]
assign("res.shocktdat", res1, envir=globalenv())
#print(paste("res.shockdat",length(res.shockdat$copies)))
restab <- dcast(res1, copies~lifem, value.var="mdmlosspct")
return(restab)
}
# f n d f G e t G i a n t D a t a
fndfGetGiantData <- function(dir.string)
{
# L O A D F I L E S
# Load & merge all files in directory
if (dir.string == "") {dir.string <- "./"}
filesList <- grep(pattern="^Giant.*\\.txt$", dir(dir.string),
perl=TRUE, value = TRUE)
# !!! I need to get fully qualified names out of grep here. How?
# !!! As it is, works only for current directory.
sims.df.ls <- lapply( filesList,
function(x)read.table(x,
header=TRUE,sep=" ", na.strings="nolinefound")
) # NOTE MISSING PARAMETER ENTRIES in na.strings
sims.merged.df <- bind_rows(sims.df.ls)
# Set up namesets for dependent, independent, ignored vars.
# Many of the columns are not relevant to data analysis, only tracking.
allVarNames <- colnames(sims.merged.df)
ignoreVarNames <- c("timestamp","seed","walltime","cputime","logfilename",
"logfilesize","instructionfilename","todaysdatetime","extractorversion",
"cpuinfo","mongoid","audittype")
resultVarNames <- c("docslost","docsrepairedmajority","docsrepairedminority",
"lost","nshocks","nglitches","deadserversactive","deadserversall",
"docsokay","auditmajority","auditminority","auditlost","auditrepairs",
"auditcycles")
coreResultVarNames <- c("docslost","docsrepairedmajority",
"docsrepairedminority")
paramVarNames <- setdiff(setdiff(allVarNames, resultVarNames), ignoreVarNames)
testVarNames <- c("copies", "lifem")
nonIgnoreVarNames <- setdiff(allVarNames, ignoreVarNames)
# Select, group, and aggregate.
sims.selected.df <- sims.merged.df[nonIgnoreVarNames]
gp_sims.merged<-eval(parse(text=paste("group_by(sims.selected.df,",
paste(collapse=",", paramVarNames),")")))
results <- summarise(gp_sims.merged,
mdmlosspct=round(midmean(docslost/docstotal)*100,1), n=n())
selectedresults <- results[which(results[["copies"]]!=1),]
return(results)
}
# f n d f G e t S u b s e t D a t a
fndfGetSubsetData <- function(results, lColumnNames)
{
subset <- results[lColumnNames]
return(subset)
}
# f n d f G e t A u d i t D a t a
fndfGetAuditData <- function(results)
{
# Specific to audit analyses:
lNamesIWant <- c("copies","lifem","mdmlosspct",
"auditfrequency","auditsegments",
#"audittype", too, needs fixed, but is not in data!
"docsize","shelfsize")
results.narrow <- fndfGetSubsetData(results, lNamesIWant)
results.plausible <- results.narrow[results.narrow$lifem>=2,]
return(results.plausible)
}
# f n d f G e t S h o c k D a t a
fndfGetShockData <- function(results)
{
# Specific to shock analyses:
lNamesIWant <- c("copies","lifem","mdmlosspct",
"shockfreq","shockimpact","shockmaxlife","shockspan"
)
shockresults <- results[lNamesIWant]
return(shockresults)
}
# f n d f G e t G l i t c h D a t a
fndfGetGlitchData <- function(results)
{
# Specific to shock analyses:
lNamesIWant <- c("copies","lifem","mdmlosspct",
"glitchfreq","glitchspan","glitchimpact","glitchmaxlife",
"docsize","shelfsize")
shockresults <- results[lNamesIWant]
return(shockresults)
}
# f n d f G e t D o c s i z e D a t a
fndfGetDocsizeData <- function(results)
{
lNamesIWant <- c("copies","lifem","mdmlosspct",
"auditfreq","auditsegments",
#"audittype", too, needs fixed
"docsize","shelfsize")
}
# f n d f G e t S h e l f s i z e D a t a
fndfGetShelfsizeData <- function(results)
{
lNamesIWant <- c("copies","lifem","mdmlosspct",
"auditfreq","auditsegments",
#"audittype", too, needs fixed
"docsize","shelfsize")
}
# Edit history:
# 20171002 RBL Copy from Altman original ExploreGiantOutputs.r.
# Add functions to select data for specific analyses.
# 20171103 RBL Start on audit work.
# TODO: Add audittype to the extracted data in GiantOutput files,
# and then we can use it here.
# 20171129 RBL Add audittype to ignore column list.
# Add 10% trimmed mean as a stat.
# 20171211 RBL Narrow the list of columns for shock data.
#
#
#END
|
netgraph <- function(x, seq = x$seq,
labels = rownames(x$TE.fixed),
cex = 1, col = "slateblue", offset = 0.0175,
scale = 1.10,
plastic, thickness, lwd = 5, lwd.min = lwd / 2.5, lwd.max = lwd * 4,
dim = "2d",
##
highlight = NULL, col.highlight = "red2",
lwd.highlight = lwd, highlight.split = ":",
##
multiarm = any(x$narms > 2),
col.multiarm = NULL,
alpha.transparency = 0.5,
##
points = FALSE, col.points = "red",
cex.points = 1, pch.points = 20,
##
start.layout = ifelse(dim == "2d", "circle", "eigen"),
eig1 = 2, eig2 = 3, eig3 = 4,
iterate,
tol = 0.0001, maxit = 500, allfigures = FALSE,
A.matrix = x$A.matrix,
N.matrix = sign(A.matrix),
##
xpos = NULL, ypos = NULL, zpos = NULL,
...) {
if (!inherits(x, "netmeta"))
stop("Argument 'x' must be an object of class \"netmeta\"")
dim <- meta:::setchar(dim, c("2d", "3d"))
is_2d <- dim == "2d"
is_3d <- !is_2d
##
start.layout <- meta:::setchar(start.layout, c("eigen", "prcomp", "circle", "random"))
##
if (!missing(seq) & is.null(seq))
stop("Argument 'seq' must be not NULL.")
##
if (!missing(labels) & is.null(labels))
stop("Argument 'labels' must be not NULL.")
if (missing(iterate))
iterate <- ifelse(start.layout == "circle", FALSE, TRUE)
if (missing(plastic))
if (start.layout == "circle" & iterate == FALSE & is_2d)
plastic <- TRUE
else
plastic <- FALSE
if (missing(thickness)) {
if (start.layout == "circle" & iterate == FALSE & plastic == TRUE) {
thick <- "se.fixed"
thickness <- "se.fixed"
}
else {
thick <- "equal"
thickness <- "equal"
}
}
else {
if (!is.matrix(thickness)) {
if (length(thickness) == 1 & is.character(thickness))
thick <- meta:::setchar(thickness,
c("equal", "number.of.studies",
"se.fixed", "se.random", "w.fixed", "w.random"))
##
else if (length(thickness) == 1 & is.logical(thickness)) {
if (thickness)
thick <- "se.fixed"
else
thick <- "equal"
}
}
else {
if ((dim(thickness)[1] != dim(A.matrix)[1]) |
(dim(thickness)[2] != dim(A.matrix)[2]))
stop("Dimension of argument 'A.matrix' and 'thickness' are different.")
if (is.null(dimnames(thickness)))
stop("Matrix 'thickness' must have row and column names identical to argument 'A.matrix'.")
else {
if (any(rownames(thickness) != rownames(A.matrix)))
stop("Row names of matrix 'thickness' must be identical to argument 'A.matrix'.")
if (any(colnames(thickness) != colnames(A.matrix)))
stop("Column names of matrix 'thickness' must be identical to argument 'A.matrix'.")
}
##
W.matrix <- thickness
thick <- "matrix"
}
}
if (allfigures & is_3d) {
warning("Argument 'allfigures' set to FALSE for 3-D network plot.")
allfigures <- FALSE
}
if (is.null(seq) | !(start.layout == "circle" & iterate == FALSE)) {
seq1 <- 1:length(labels)
if (!missing(seq) & !is.null(seq) & (is.null(xpos) & is.null(ypos)))
warning("Argument 'seq' only considered if start.layout=\"circle\" and iterate=FALSE.")
}
else {
rn <- rownames(x$TE.fixed)
seq1 <- charmatch(setseq(seq, rn), rn)
}
##
A.matrix <- A.matrix[seq1, seq1]
N.matrix <- N.matrix[seq1, seq1]
##
if (thick == "matrix")
W.matrix <- W.matrix[seq1, seq1]
##
labels <- labels[seq1]
A.sign <- sign(A.matrix)
if ((is_2d & (is.null(xpos) & is.null(ypos))) |
(is_3d & (is.null(xpos) & is.null(ypos) & is.null(zpos)))) {
stressdata <- stress(x,
A.matrix = A.matrix,
N.matrix = N.matrix,
##
dim = dim,
start.layout = start.layout,
iterate = iterate,
eig1 = eig1, eig2 = eig2, eig3 = eig3,
tol = tol,
maxit = maxit,
##
allfigures = allfigures,
##
seq = seq,
##
labels = labels,
cex = cex,
col = col,
offset = offset,
scale = scale,
##
plastic = plastic,
thickness = thickness,
lwd = lwd,
lwd.min = lwd.min,
lwd.max = lwd.max,
##
highlight = highlight,
col.highlight = col.highlight,
lwd.highlight = lwd.highlight,
highlight.split = highlight.split,
## multiarm
col.multiarm = col.multiarm,
alpha.transparency = alpha.transparency,
##
points = points, col.points = col.points,
cex.points = cex.points, pch.points = pch.points,
##
...)
##
xpos <- stressdata$x
ypos <- stressdata$y
if (is_3d)
zpos <- stressdata$z
}
if (allfigures)
return(invisible(NULL))
n <- dim(A.matrix)[1]
d <- scale * max(abs(c(min(c(xpos, ypos), na.rm = TRUE),
max(c(xpos, ypos), na.rm = TRUE))))
## Generate dataset for plotting
##
if (is_2d)
pd <- data.frame(xpos, ypos, labels, seq)
else
pd <- data.frame(xpos, ypos, zpos, labels, seq)
##
pd$adj1 <- NA
pd$adj2 <- NA
pd$adj3 <- NA
##
pd$adj1[pd$xpos >= 0] <- 0
pd$adj1[pd$xpos < 0] <- 1
##
pd$adj2[pd$ypos > 0] <- 0
pd$adj2[pd$ypos <= 0] <- 1
##
if (!is_2d) {
pd$adj3[pd$zpos > 0] <- 0
pd$adj3[pd$zpos <= 0] <- 1
}
##
offset <- offset * 2 * d
##
if (is_2d) {
pd$xpos.labels <- pd$xpos - offset + 2 * (pd$adj1 == 0) * offset
pd$ypos.labels <- pd$ypos - offset + 2 * (pd$adj2 == 0) * offset
}
else {
pd$xpos.labels <- pd$xpos
pd$ypos.labels <- pd$ypos
pd$zpos.labels <- pd$zpos
}
##
## Define coloured regions for multi-arm studies
##
if (multiarm) {
td1 <- data.frame(studies = x$studies, narms = x$narms)
td1 <- td1[rev(order(td1$narms)), ]
td1 <- td1[td1$narms > 2, ]
multiarm.studies <- td1$studies
##
n.multi <- length(multiarm.studies)
##
missing.col.multiarm <- missing(col.multiarm)
##
if (missing.col.multiarm | is.null(col.multiarm)) {
## Check for R package colorspace & use various gray values if
## not installed packages
if (!any(as.data.frame(installed.packages())$Package == "colorspace"))
col.polygon <- grDevices::rainbow(n.multi, alpha = alpha.transparency)
else
col.polygon <- colorspace::sequential_hcl(n.multi, alpha = alpha.transparency)
}
else {
##
if (is.function(col.multiarm)) {
mcname <- deparse(substitute(col.multiarm))
##
csfun <- function(fcall, fname) {
is.cs <- length(grep(fname, fcall)) > 0
if (is.cs)
meta:::is.installed.package("colorspace")
is.cs
}
##
if (csfun(mcname, "rainbow_hcl"))
col.polygon <- colorspace::rainbow_hcl(n.multi, start = 240, end = 60, alpha = alpha.transparency)
else if (csfun(mcname, "sequential_hcl"))
col.polygon <- colorspace::sequential_hcl(n.multi, alpha = alpha.transparency)
else if (csfun(mcname, "diverge_hcl"))
col.polygon <- colorspace::diverge_hcl(n.multi, alpha = alpha.transparency)
else if (csfun(mcname, "heat_hcl"))
col.polygon <- colorspace::heat_hcl(n.multi, alpha = alpha.transparency)
else if (csfun(mcname, "terrain_hcl"))
col.polygon <- colorspace::terrain_hcl(n.multi, alpha = alpha.transparency)
else if (csfun(mcname, "diverge_hsv"))
col.polygon <- colorspace::diverge_hsv(n.multi, alpha = alpha.transparency)
else if (csfun(mcname, "choose_palette")) {
fcolm <- colorspace::choose_palette(n = n.multi)
col.polygon <- fcolm(n = n.multi)
}
else
col.polygon <- sapply(n.multi, col.multiarm, alpha = alpha.transparency)
##
if (csfun(mcname, "sequential_hcl") |
csfun(mcname, "diverge_hcl") |
csfun(mcname, "heat_hcl"))
col.polygon <- rev(col.polygon)
}
}
##
if (!missing.col.multiarm & is.character(col.multiarm)) {
if (length(col.multiarm) > 1 & length(col.multiarm) != n.multi)
stop("Length of argument 'col.multiarm' must be equal to one or the number of multi-arm studies: ", n.multi)
col.polygon <- col.multiarm
}
}
##
## Define line width
##
if (thick == "number.of.studies") {
W.matrix <- lwd.max * A.matrix / max(A.matrix)
W.matrix[W.matrix < lwd.min & W.matrix != 0] <- lwd.min
}
else if (thick == "equal") {
W.matrix <- lwd * A.sign
}
else if (thick == "se.fixed") {
IV.matrix <- x$seTE.direct.fixed[seq1, seq1]
IV.matrix[is.infinite(IV.matrix)] <- NA
W.matrix <- lwd.max * min(IV.matrix, na.rm = TRUE) / IV.matrix
W.matrix[W.matrix < lwd.min & W.matrix != 0] <- lwd.min
}
else if (thick == "se.random") {
IV.matrix <- x$seTE.direct.random[seq1, seq1]
IV.matrix[is.infinite(IV.matrix)] <- NA
W.matrix <- lwd.max * min(IV.matrix, na.rm = TRUE) / IV.matrix
W.matrix[W.matrix < lwd.min & W.matrix != 0] <- lwd.min
}
else if (thick == "w.fixed") {
IV.matrix <- 1 / x$seTE.direct.fixed[seq1, seq1]^2
IV.matrix[is.infinite(IV.matrix)] <- NA
W.matrix <- lwd.max * IV.matrix / max(IV.matrix, na.rm = TRUE)
W.matrix[W.matrix < lwd.min & W.matrix != 0] <- lwd.min
}
else if (thick == "w.random") {
IV.matrix <- 1 / x$seTE.direct.random[seq1, seq1]^2
IV.matrix[is.infinite(IV.matrix)] <- NA
W.matrix <- lwd.max * IV.matrix / max(IV.matrix, na.rm = TRUE)
W.matrix[W.matrix < lwd.min & W.matrix != 0] <- lwd.min
}
else if (thick == "matrix") {
W.matrix[is.infinite(W.matrix)] <- NA
if (min(W.matrix[W.matrix != 0], na.rm = TRUE) == max(W.matrix[W.matrix != 0], na.rm = TRUE))
W.matrix <- lwd * W.matrix
else
W.matrix <- lwd.max * W.matrix / max(W.matrix, na.rm = TRUE)
W.matrix[W.matrix < lwd.min & W.matrix != 0] <- lwd.min
}
##
##
## Plot graph
##
##
range <- c(-d, d)
##
if (is_2d) {
oldpar <- par(xpd = TRUE, pty = "s")
on.exit(par(oldpar))
##
plot(xpos, ypos,
xlim = range, ylim = range,
type = "n", axes = FALSE, bty = "n",
xlab = "", ylab = "",
...)
##
## Add coloured regions for multi-arm studies
##
if (multiarm) {
##
if (n.multi > 0) {
multiarm.labels <- vector("list", n.multi)
if (length(col.polygon) == 1)
col.polygon <- rep(col.polygon, n.multi)
for (i in 1:n.multi) {
treat1 <- x$treat1[x$studlab %in% multiarm.studies[i]]
treat2 <- x$treat2[x$studlab %in% multiarm.studies[i]]
multiarm.labels[[i]] <- sort(unique(c(treat2, treat1)))
##
pdm <- pd[pd$seq %in% multiarm.labels[[i]], ]
if (nrow(pdm) == 0)
pdm <- pd[pd$labels %in% multiarm.labels[[i]], ]
##
## Clockwise ordering of polygon coordinates
##
polysort <- function(x, y) {
xnorm <- (x - mean(x)) / sd(x) # Normalise coordinate x
ynorm <- (y - mean(y)) / sd(y) # Normalise coordinate y
r <- sqrt(xnorm^2 + ynorm^2) # Calculate polar coordinates
cosphi <- xnorm / r
sinphi <- ynorm / r
s <- as.numeric(sinphi > 0) # Define angles to lie in [0, 2 * pi]
phi <- acos(cosphi)
alpha <- s * phi + (1 - s) * (2 * pi - phi)
##
res <- order(alpha)
res
}
##
pdm <- pdm[polysort(pdm$xpos, pdm$ypos), ]
##
polygon(pdm$xpos, pdm$ypos,
col = col.polygon[i], border = NA)
}
}
}
##
## Draw lines
##
if (plastic) {
n.plastic <- 30
lwd.multiply <- rep(NA, n.plastic)
cols <- rep("", n.plastic)
j <- 0
for (i in n.plastic:1) {
j <- j + 1
lwd.multiply[j] <- sin(pi * i / 2 / n.plastic)
cols[j] <- paste("gray", round(100 * (1 - i / n.plastic)), sep = "")
}
}
else {
lwd.multiply <- 1
cols <- col
}
##
for (n.plines in 1:length(lwd.multiply)) {
for (i in 1:(n - 1)) {
for (j in (i + 1):n) {
if (A.sign[i, j] > 0) {
lines(c(xpos[i], xpos[j]), c(ypos[i], ypos[j]),
lwd = W.matrix[i, j] * lwd.multiply[n.plines],
col = cols[n.plines])
}
}
}
}
##
## Add highlighted comparisons
##
if (!is.null(highlight)) {
for (high in highlight) {
highs <- unlist(strsplit(high, split = highlight.split))
if (length(highs) != 2)
stop("Wrong format for argument 'highlight' (see helpfile of plotgraph command).")
##
if (sum(pd$labels %in% highs) != 2)
stop(paste("Argument 'highlight' must contain two of the following values (separated by \":\"):\n ",
paste(paste("'", pd$labels, "'", sep = ""),
collapse = " - "), sep = ""))
##
pdh <- pd[pd$labels %in% highs, ]
##
if (is_2d) {
lines(pdh$xpos, pdh$ypos,
lwd = W.matrix[labels == highs[1], labels == highs[2]],
col = col.highlight)
}
}
}
##
## Add points for labels
##
if (points)
points(xpos, ypos,
pch = pch.points, cex = cex.points, col = col.points)
##
## Print treatment labels
##
if (!is.null(labels))
for (i in 1:n)
text(pd$xpos.labels[i], pd$ypos.labels[i],
labels = pd$labels[i],
cex = cex,
adj = c(pd$adj1[i], pd$adj2[i]))
}
else {
plot3d(xpos, ypos, zpos,
size = 10, col = col.points, cex = cex.points,
axes = FALSE, box = FALSE,
xlab = "", ylab = "", zlab = "")
##
## Add points for labels
##
if (points)
points3d(xpos, ypos, zpos,
pch = pch.points, cex = cex.points, col = col.points)
##
## Print treatment labels
##
if (!is.null(labels))
for (i in 1:n)
text3d(pd$xpos.labels[i], pd$ypos.labels[i], pd$zpos.labels[i],
texts = pd$labels[i],
cex = cex,
adj = c(pd$adj1[i], pd$adj2[i]))
##
## Add highlighted comparisons
##
if (!is.null(highlight)) {
for (high in highlight) {
highs <- unlist(strsplit(high, split = highlight.split))
if (length(highs) != 2)
stop("Wrong format for argument 'highlight' (see helpfile of plotgraph command).")
##
if (sum(pd$labels %in% highs) != 2)
stop(paste("Argument 'highlight' must contain two of the following values (separated by \":\"):\n ",
paste(paste("'", pd$labels, "'", sep = ""),
collapse = " - "), sep = ""))
##
pdh <- pd[pd$labels %in% highs, ]
##
lines3d(pdh$xpos*(1+1e-4), pdh$ypos*(1+1e-4), pdh$zpos*(1+1e-4),
lwd = W.matrix[labels == highs[1], labels == highs[2]],
col = col.highlight)
}
}
##
## Add coloured regions for multi-arm studies
##
if (multiarm) {
##
morethan3 <- FALSE
##
if (n.multi > 0) {
multiarm.labels <- vector("list", n.multi)
if (length(col.polygon) == 1)
col.polygon <- rep(col.polygon, n.multi)
for (i in 1:n.multi) {
treat1 <- x$treat1[x$studlab %in% multiarm.studies[i]]
treat2 <- x$treat2[x$studlab %in% multiarm.studies[i]]
multiarm.labels[[i]] <- sort(unique(c(treat2, treat1)))
##
pdm <- pd[pd$seq %in% multiarm.labels[[i]], ]
if (nrow(pdm) == 0)
pdm <- pd[pd$labels %in% multiarm.labels[[i]], ]
if (nrow(pdm) == 3)
triangles3d(pdm$xpos, pdm$ypos, pdm$zpos,
col = col.polygon[i])
else
morethan3 <- TRUE
}
}
if (morethan3)
warning("Multi-arm studies with more than three treatments not shown in 3-D plot.")
}
##
## Draw lines
##
for (i in 1:(n - 1)) {
for (j in (i + 1):n) {
if (A.sign[i, j] > 0) {
lines3d(c(xpos[i], xpos[j]), c(ypos[i], ypos[j]), c(zpos[i], zpos[j]),
lwd = W.matrix[i, j],
col = col)
}
}
}
}
invisible(NULL)
}
| /netmeta/R/netgraph.R | no_license | ingted/R-Examples | R | false | false | 17,779 | r | netgraph <- function(x, seq = x$seq,
labels = rownames(x$TE.fixed),
cex = 1, col = "slateblue", offset = 0.0175,
scale = 1.10,
plastic, thickness, lwd = 5, lwd.min = lwd / 2.5, lwd.max = lwd * 4,
dim = "2d",
##
highlight = NULL, col.highlight = "red2",
lwd.highlight = lwd, highlight.split = ":",
##
multiarm = any(x$narms > 2),
col.multiarm = NULL,
alpha.transparency = 0.5,
##
points = FALSE, col.points = "red",
cex.points = 1, pch.points = 20,
##
start.layout = ifelse(dim == "2d", "circle", "eigen"),
eig1 = 2, eig2 = 3, eig3 = 4,
iterate,
tol = 0.0001, maxit = 500, allfigures = FALSE,
A.matrix = x$A.matrix,
N.matrix = sign(A.matrix),
##
xpos = NULL, ypos = NULL, zpos = NULL,
...) {
if (!inherits(x, "netmeta"))
stop("Argument 'x' must be an object of class \"netmeta\"")
dim <- meta:::setchar(dim, c("2d", "3d"))
is_2d <- dim == "2d"
is_3d <- !is_2d
##
start.layout <- meta:::setchar(start.layout, c("eigen", "prcomp", "circle", "random"))
##
if (!missing(seq) & is.null(seq))
stop("Argument 'seq' must be not NULL.")
##
if (!missing(labels) & is.null(labels))
stop("Argument 'labels' must be not NULL.")
if (missing(iterate))
iterate <- ifelse(start.layout == "circle", FALSE, TRUE)
if (missing(plastic))
if (start.layout == "circle" & iterate == FALSE & is_2d)
plastic <- TRUE
else
plastic <- FALSE
if (missing(thickness)) {
if (start.layout == "circle" & iterate == FALSE & plastic == TRUE) {
thick <- "se.fixed"
thickness <- "se.fixed"
}
else {
thick <- "equal"
thickness <- "equal"
}
}
else {
if (!is.matrix(thickness)) {
if (length(thickness) == 1 & is.character(thickness))
thick <- meta:::setchar(thickness,
c("equal", "number.of.studies",
"se.fixed", "se.random", "w.fixed", "w.random"))
##
else if (length(thickness) == 1 & is.logical(thickness)) {
if (thickness)
thick <- "se.fixed"
else
thick <- "equal"
}
}
else {
if ((dim(thickness)[1] != dim(A.matrix)[1]) |
(dim(thickness)[2] != dim(A.matrix)[2]))
stop("Dimension of argument 'A.matrix' and 'thickness' are different.")
if (is.null(dimnames(thickness)))
stop("Matrix 'thickness' must have row and column names identical to argument 'A.matrix'.")
else {
if (any(rownames(thickness) != rownames(A.matrix)))
stop("Row names of matrix 'thickness' must be identical to argument 'A.matrix'.")
if (any(colnames(thickness) != colnames(A.matrix)))
stop("Column names of matrix 'thickness' must be identical to argument 'A.matrix'.")
}
##
W.matrix <- thickness
thick <- "matrix"
}
}
if (allfigures & is_3d) {
warning("Argument 'allfigures' set to FALSE for 3-D network plot.")
allfigures <- FALSE
}
if (is.null(seq) | !(start.layout == "circle" & iterate == FALSE)) {
seq1 <- 1:length(labels)
if (!missing(seq) & !is.null(seq) & (is.null(xpos) & is.null(ypos)))
warning("Argument 'seq' only considered if start.layout=\"circle\" and iterate=FALSE.")
}
else {
rn <- rownames(x$TE.fixed)
seq1 <- charmatch(setseq(seq, rn), rn)
}
##
A.matrix <- A.matrix[seq1, seq1]
N.matrix <- N.matrix[seq1, seq1]
##
if (thick == "matrix")
W.matrix <- W.matrix[seq1, seq1]
##
labels <- labels[seq1]
A.sign <- sign(A.matrix)
if ((is_2d & (is.null(xpos) & is.null(ypos))) |
(is_3d & (is.null(xpos) & is.null(ypos) & is.null(zpos)))) {
stressdata <- stress(x,
A.matrix = A.matrix,
N.matrix = N.matrix,
##
dim = dim,
start.layout = start.layout,
iterate = iterate,
eig1 = eig1, eig2 = eig2, eig3 = eig3,
tol = tol,
maxit = maxit,
##
allfigures = allfigures,
##
seq = seq,
##
labels = labels,
cex = cex,
col = col,
offset = offset,
scale = scale,
##
plastic = plastic,
thickness = thickness,
lwd = lwd,
lwd.min = lwd.min,
lwd.max = lwd.max,
##
highlight = highlight,
col.highlight = col.highlight,
lwd.highlight = lwd.highlight,
highlight.split = highlight.split,
## multiarm
col.multiarm = col.multiarm,
alpha.transparency = alpha.transparency,
##
points = points, col.points = col.points,
cex.points = cex.points, pch.points = pch.points,
##
...)
##
xpos <- stressdata$x
ypos <- stressdata$y
if (is_3d)
zpos <- stressdata$z
}
if (allfigures)
return(invisible(NULL))
n <- dim(A.matrix)[1]
d <- scale * max(abs(c(min(c(xpos, ypos), na.rm = TRUE),
max(c(xpos, ypos), na.rm = TRUE))))
## Generate dataset for plotting
##
if (is_2d)
pd <- data.frame(xpos, ypos, labels, seq)
else
pd <- data.frame(xpos, ypos, zpos, labels, seq)
##
pd$adj1 <- NA
pd$adj2 <- NA
pd$adj3 <- NA
##
pd$adj1[pd$xpos >= 0] <- 0
pd$adj1[pd$xpos < 0] <- 1
##
pd$adj2[pd$ypos > 0] <- 0
pd$adj2[pd$ypos <= 0] <- 1
##
if (!is_2d) {
pd$adj3[pd$zpos > 0] <- 0
pd$adj3[pd$zpos <= 0] <- 1
}
##
offset <- offset * 2 * d
##
if (is_2d) {
pd$xpos.labels <- pd$xpos - offset + 2 * (pd$adj1 == 0) * offset
pd$ypos.labels <- pd$ypos - offset + 2 * (pd$adj2 == 0) * offset
}
else {
pd$xpos.labels <- pd$xpos
pd$ypos.labels <- pd$ypos
pd$zpos.labels <- pd$zpos
}
##
## Define coloured regions for multi-arm studies
##
if (multiarm) {
td1 <- data.frame(studies = x$studies, narms = x$narms)
td1 <- td1[rev(order(td1$narms)), ]
td1 <- td1[td1$narms > 2, ]
multiarm.studies <- td1$studies
##
n.multi <- length(multiarm.studies)
##
missing.col.multiarm <- missing(col.multiarm)
##
if (missing.col.multiarm | is.null(col.multiarm)) {
## Check for R package colorspace & use various gray values if
## not installed packages
if (!any(as.data.frame(installed.packages())$Package == "colorspace"))
col.polygon <- grDevices::rainbow(n.multi, alpha = alpha.transparency)
else
col.polygon <- colorspace::sequential_hcl(n.multi, alpha = alpha.transparency)
}
else {
##
if (is.function(col.multiarm)) {
mcname <- deparse(substitute(col.multiarm))
##
csfun <- function(fcall, fname) {
is.cs <- length(grep(fname, fcall)) > 0
if (is.cs)
meta:::is.installed.package("colorspace")
is.cs
}
##
if (csfun(mcname, "rainbow_hcl"))
col.polygon <- colorspace::rainbow_hcl(n.multi, start = 240, end = 60, alpha = alpha.transparency)
else if (csfun(mcname, "sequential_hcl"))
col.polygon <- colorspace::sequential_hcl(n.multi, alpha = alpha.transparency)
else if (csfun(mcname, "diverge_hcl"))
col.polygon <- colorspace::diverge_hcl(n.multi, alpha = alpha.transparency)
else if (csfun(mcname, "heat_hcl"))
col.polygon <- colorspace::heat_hcl(n.multi, alpha = alpha.transparency)
else if (csfun(mcname, "terrain_hcl"))
col.polygon <- colorspace::terrain_hcl(n.multi, alpha = alpha.transparency)
else if (csfun(mcname, "diverge_hsv"))
col.polygon <- colorspace::diverge_hsv(n.multi, alpha = alpha.transparency)
else if (csfun(mcname, "choose_palette")) {
fcolm <- colorspace::choose_palette(n = n.multi)
col.polygon <- fcolm(n = n.multi)
}
else
col.polygon <- sapply(n.multi, col.multiarm, alpha = alpha.transparency)
##
if (csfun(mcname, "sequential_hcl") |
csfun(mcname, "diverge_hcl") |
csfun(mcname, "heat_hcl"))
col.polygon <- rev(col.polygon)
}
}
##
if (!missing.col.multiarm & is.character(col.multiarm)) {
if (length(col.multiarm) > 1 & length(col.multiarm) != n.multi)
stop("Length of argument 'col.multiarm' must be equal to one or the number of multi-arm studies: ", n.multi)
col.polygon <- col.multiarm
}
}
##
## Define line width
##
if (thick == "number.of.studies") {
W.matrix <- lwd.max * A.matrix / max(A.matrix)
W.matrix[W.matrix < lwd.min & W.matrix != 0] <- lwd.min
}
else if (thick == "equal") {
W.matrix <- lwd * A.sign
}
else if (thick == "se.fixed") {
IV.matrix <- x$seTE.direct.fixed[seq1, seq1]
IV.matrix[is.infinite(IV.matrix)] <- NA
W.matrix <- lwd.max * min(IV.matrix, na.rm = TRUE) / IV.matrix
W.matrix[W.matrix < lwd.min & W.matrix != 0] <- lwd.min
}
else if (thick == "se.random") {
IV.matrix <- x$seTE.direct.random[seq1, seq1]
IV.matrix[is.infinite(IV.matrix)] <- NA
W.matrix <- lwd.max * min(IV.matrix, na.rm = TRUE) / IV.matrix
W.matrix[W.matrix < lwd.min & W.matrix != 0] <- lwd.min
}
else if (thick == "w.fixed") {
IV.matrix <- 1 / x$seTE.direct.fixed[seq1, seq1]^2
IV.matrix[is.infinite(IV.matrix)] <- NA
W.matrix <- lwd.max * IV.matrix / max(IV.matrix, na.rm = TRUE)
W.matrix[W.matrix < lwd.min & W.matrix != 0] <- lwd.min
}
else if (thick == "w.random") {
IV.matrix <- 1 / x$seTE.direct.random[seq1, seq1]^2
IV.matrix[is.infinite(IV.matrix)] <- NA
W.matrix <- lwd.max * IV.matrix / max(IV.matrix, na.rm = TRUE)
W.matrix[W.matrix < lwd.min & W.matrix != 0] <- lwd.min
}
else if (thick == "matrix") {
W.matrix[is.infinite(W.matrix)] <- NA
if (min(W.matrix[W.matrix != 0], na.rm = TRUE) == max(W.matrix[W.matrix != 0], na.rm = TRUE))
W.matrix <- lwd * W.matrix
else
W.matrix <- lwd.max * W.matrix / max(W.matrix, na.rm = TRUE)
W.matrix[W.matrix < lwd.min & W.matrix != 0] <- lwd.min
}
##
##
## Plot graph
##
##
range <- c(-d, d)
##
if (is_2d) {
oldpar <- par(xpd = TRUE, pty = "s")
on.exit(par(oldpar))
##
plot(xpos, ypos,
xlim = range, ylim = range,
type = "n", axes = FALSE, bty = "n",
xlab = "", ylab = "",
...)
##
## Add coloured regions for multi-arm studies
##
if (multiarm) {
##
if (n.multi > 0) {
multiarm.labels <- vector("list", n.multi)
if (length(col.polygon) == 1)
col.polygon <- rep(col.polygon, n.multi)
for (i in 1:n.multi) {
treat1 <- x$treat1[x$studlab %in% multiarm.studies[i]]
treat2 <- x$treat2[x$studlab %in% multiarm.studies[i]]
multiarm.labels[[i]] <- sort(unique(c(treat2, treat1)))
##
pdm <- pd[pd$seq %in% multiarm.labels[[i]], ]
if (nrow(pdm) == 0)
pdm <- pd[pd$labels %in% multiarm.labels[[i]], ]
##
## Clockwise ordering of polygon coordinates
##
polysort <- function(x, y) {
xnorm <- (x - mean(x)) / sd(x) # Normalise coordinate x
ynorm <- (y - mean(y)) / sd(y) # Normalise coordinate y
r <- sqrt(xnorm^2 + ynorm^2) # Calculate polar coordinates
cosphi <- xnorm / r
sinphi <- ynorm / r
s <- as.numeric(sinphi > 0) # Define angles to lie in [0, 2 * pi]
phi <- acos(cosphi)
alpha <- s * phi + (1 - s) * (2 * pi - phi)
##
res <- order(alpha)
res
}
##
pdm <- pdm[polysort(pdm$xpos, pdm$ypos), ]
##
polygon(pdm$xpos, pdm$ypos,
col = col.polygon[i], border = NA)
}
}
}
##
## Draw lines
##
if (plastic) {
n.plastic <- 30
lwd.multiply <- rep(NA, n.plastic)
cols <- rep("", n.plastic)
j <- 0
for (i in n.plastic:1) {
j <- j + 1
lwd.multiply[j] <- sin(pi * i / 2 / n.plastic)
cols[j] <- paste("gray", round(100 * (1 - i / n.plastic)), sep = "")
}
}
else {
lwd.multiply <- 1
cols <- col
}
##
for (n.plines in 1:length(lwd.multiply)) {
for (i in 1:(n - 1)) {
for (j in (i + 1):n) {
if (A.sign[i, j] > 0) {
lines(c(xpos[i], xpos[j]), c(ypos[i], ypos[j]),
lwd = W.matrix[i, j] * lwd.multiply[n.plines],
col = cols[n.plines])
}
}
}
}
##
## Add highlighted comparisons
##
if (!is.null(highlight)) {
for (high in highlight) {
highs <- unlist(strsplit(high, split = highlight.split))
if (length(highs) != 2)
stop("Wrong format for argument 'highlight' (see helpfile of plotgraph command).")
##
if (sum(pd$labels %in% highs) != 2)
stop(paste("Argument 'highlight' must contain two of the following values (separated by \":\"):\n ",
paste(paste("'", pd$labels, "'", sep = ""),
collapse = " - "), sep = ""))
##
pdh <- pd[pd$labels %in% highs, ]
##
if (is_2d) {
lines(pdh$xpos, pdh$ypos,
lwd = W.matrix[labels == highs[1], labels == highs[2]],
col = col.highlight)
}
}
}
##
## Add points for labels
##
if (points)
points(xpos, ypos,
pch = pch.points, cex = cex.points, col = col.points)
##
## Print treatment labels
##
if (!is.null(labels))
for (i in 1:n)
text(pd$xpos.labels[i], pd$ypos.labels[i],
labels = pd$labels[i],
cex = cex,
adj = c(pd$adj1[i], pd$adj2[i]))
}
else {
plot3d(xpos, ypos, zpos,
size = 10, col = col.points, cex = cex.points,
axes = FALSE, box = FALSE,
xlab = "", ylab = "", zlab = "")
##
## Add points for labels
##
if (points)
points3d(xpos, ypos, zpos,
pch = pch.points, cex = cex.points, col = col.points)
##
## Print treatment labels
##
if (!is.null(labels))
for (i in 1:n)
text3d(pd$xpos.labels[i], pd$ypos.labels[i], pd$zpos.labels[i],
texts = pd$labels[i],
cex = cex,
adj = c(pd$adj1[i], pd$adj2[i]))
##
## Add highlighted comparisons
##
if (!is.null(highlight)) {
for (high in highlight) {
highs <- unlist(strsplit(high, split = highlight.split))
if (length(highs) != 2)
stop("Wrong format for argument 'highlight' (see helpfile of plotgraph command).")
##
if (sum(pd$labels %in% highs) != 2)
stop(paste("Argument 'highlight' must contain two of the following values (separated by \":\"):\n ",
paste(paste("'", pd$labels, "'", sep = ""),
collapse = " - "), sep = ""))
##
pdh <- pd[pd$labels %in% highs, ]
##
lines3d(pdh$xpos*(1+1e-4), pdh$ypos*(1+1e-4), pdh$zpos*(1+1e-4),
lwd = W.matrix[labels == highs[1], labels == highs[2]],
col = col.highlight)
}
}
##
## Add coloured regions for multi-arm studies
##
if (multiarm) {
##
morethan3 <- FALSE
##
if (n.multi > 0) {
multiarm.labels <- vector("list", n.multi)
if (length(col.polygon) == 1)
col.polygon <- rep(col.polygon, n.multi)
for (i in 1:n.multi) {
treat1 <- x$treat1[x$studlab %in% multiarm.studies[i]]
treat2 <- x$treat2[x$studlab %in% multiarm.studies[i]]
multiarm.labels[[i]] <- sort(unique(c(treat2, treat1)))
##
pdm <- pd[pd$seq %in% multiarm.labels[[i]], ]
if (nrow(pdm) == 0)
pdm <- pd[pd$labels %in% multiarm.labels[[i]], ]
if (nrow(pdm) == 3)
triangles3d(pdm$xpos, pdm$ypos, pdm$zpos,
col = col.polygon[i])
else
morethan3 <- TRUE
}
}
if (morethan3)
warning("Multi-arm studies with more than three treatments not shown in 3-D plot.")
}
##
## Draw lines
##
for (i in 1:(n - 1)) {
for (j in (i + 1):n) {
if (A.sign[i, j] > 0) {
lines3d(c(xpos[i], xpos[j]), c(ypos[i], ypos[j]), c(zpos[i], zpos[j]),
lwd = W.matrix[i, j],
col = col)
}
}
}
}
invisible(NULL)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sort_p.R
\name{sort_p.tbl_regression}
\alias{sort_p.tbl_regression}
\title{Sort variables in table by ascending p-values}
\usage{
\method{sort_p}{tbl_regression}(x, ...)
}
\arguments{
\item{x}{An object created using \code{tbl_regression} function}
\item{...}{Not used}
}
\value{
A \code{tbl_regression} object
}
\description{
Sort variables in tables created by \link{tbl_regression} by ascending p-values
}
\section{Example Output}{
\if{html}{\figure{tbl_lm_sort_p_ex.png}{options: width=50\%}}
}
\examples{
tbl_lm_sort_p_ex <-
glm(response ~ trt + grade, trial, family = binomial(link = "logit")) \%>\%
tbl_regression(exponentiate = TRUE) \%>\%
sort_p()
}
\seealso{
Other tbl_regression tools: \code{\link{add_global_p.tbl_regression}},
\code{\link{add_nevent.tbl_regression}},
\code{\link{bold_italicize_labels_levels}},
\code{\link{bold_p.tbl_regression}},
\code{\link{bold_p.tbl_stack}},
\code{\link{inline_text.tbl_regression}},
\code{\link{modify_header}}, \code{\link{tbl_merge}},
\code{\link{tbl_regression}}, \code{\link{tbl_stack}}
}
\author{
Karissa Whiting
}
\concept{tbl_regression tools}
| /man/sort_p.tbl_regression.Rd | permissive | Glewando/gtsummary | R | false | true | 1,205 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sort_p.R
\name{sort_p.tbl_regression}
\alias{sort_p.tbl_regression}
\title{Sort variables in table by ascending p-values}
\usage{
\method{sort_p}{tbl_regression}(x, ...)
}
\arguments{
\item{x}{An object created using \code{tbl_regression} function}
\item{...}{Not used}
}
\value{
A \code{tbl_regression} object
}
\description{
Sort variables in tables created by \link{tbl_regression} by ascending p-values
}
\section{Example Output}{
\if{html}{\figure{tbl_lm_sort_p_ex.png}{options: width=50\%}}
}
\examples{
tbl_lm_sort_p_ex <-
glm(response ~ trt + grade, trial, family = binomial(link = "logit")) \%>\%
tbl_regression(exponentiate = TRUE) \%>\%
sort_p()
}
\seealso{
Other tbl_regression tools: \code{\link{add_global_p.tbl_regression}},
\code{\link{add_nevent.tbl_regression}},
\code{\link{bold_italicize_labels_levels}},
\code{\link{bold_p.tbl_regression}},
\code{\link{bold_p.tbl_stack}},
\code{\link{inline_text.tbl_regression}},
\code{\link{modify_header}}, \code{\link{tbl_merge}},
\code{\link{tbl_regression}}, \code{\link{tbl_stack}}
}
\author{
Karissa Whiting
}
\concept{tbl_regression tools}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/empirical.controls.R
\name{empirical.controls}
\alias{empirical.controls}
\title{A function for estimating the probability that each gene is an empirical control}
\usage{
empirical.controls(dat, mod, mod0 = NULL, n.sv, B = 5,
type = c("norm", "counts"))
}
\arguments{
\item{dat}{The transformed data matrix with the variables in rows and samples in columns}
\item{mod}{The model matrix being used to fit the data}
\item{mod0}{The null model being compared when fitting the data}
\item{n.sv}{The number of surogate variables to estimate}
\item{B}{The number of iterations of the irwsva algorithm to perform}
\item{type}{If type is norm then standard irwsva is applied, if type is counts, then the moderated log transform is applied first}
}
\value{
pcontrol A vector of probabilites that each gene is a control.
}
\description{
This function uses the iteratively reweighted surrogate variable analysis approach
to estimate the probability that each gene is an empirical control.
}
\examples{
library(bladderbatch)
data(bladderdata)
dat <- bladderEset[1:5000,]
pheno = pData(dat)
edata = exprs(dat)
mod = model.matrix(~as.factor(cancer), data=pheno)
n.sv = num.sv(edata,mod,method="leek")
pcontrol <- empirical.controls(edata,mod,mod0=NULL,n.sv=n.sv,type="norm")
}
| /man/empirical.controls.Rd | no_license | DongyueXie/sva-devel | R | false | true | 1,351 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/empirical.controls.R
\name{empirical.controls}
\alias{empirical.controls}
\title{A function for estimating the probability that each gene is an empirical control}
\usage{
empirical.controls(dat, mod, mod0 = NULL, n.sv, B = 5,
type = c("norm", "counts"))
}
\arguments{
\item{dat}{The transformed data matrix with the variables in rows and samples in columns}
\item{mod}{The model matrix being used to fit the data}
\item{mod0}{The null model being compared when fitting the data}
\item{n.sv}{The number of surogate variables to estimate}
\item{B}{The number of iterations of the irwsva algorithm to perform}
\item{type}{If type is norm then standard irwsva is applied, if type is counts, then the moderated log transform is applied first}
}
\value{
pcontrol A vector of probabilites that each gene is a control.
}
\description{
This function uses the iteratively reweighted surrogate variable analysis approach
to estimate the probability that each gene is an empirical control.
}
\examples{
library(bladderbatch)
data(bladderdata)
dat <- bladderEset[1:5000,]
pheno = pData(dat)
edata = exprs(dat)
mod = model.matrix(~as.factor(cancer), data=pheno)
n.sv = num.sv(edata,mod,method="leek")
pcontrol <- empirical.controls(edata,mod,mod0=NULL,n.sv=n.sv,type="norm")
}
|
#Day 16
setwd("C:/Users/David.simons/Documents/advent of code")
dance <- unlist(strsplit(readLines("day16.txt"), ","))
moves <- substr(dance, 1, 1)
args <- strsplit(substr(dance, 2, nchar(dance)), "/")
#part 1 ----
danceRound <- function(progs){
N <- length(progs)
for (i in seq_along(moves)) {
if (moves[i] == "s") {
n <- as.integer(args[[i]])
progs <- progs[c((N-n+1):N, 1:(N-n))]
} else if (moves[i] == "x") {
x <- as.integer(args[[i]]) + 1
progs[x] <- progs[rev(x)]
} else if (moves[i] == "p") {
x <- args[[i]]
progs[match(x, progs)] <- rev(x)
}
}
progs
}
progs <- danceRound(letters[1:16])
print(do.call(paste0, as.list(progs)))
#part 2 ----
#try it but see if there's a loop anywhere
progs <- letters[1:16]
history <- do.call(paste0, as.list(progs))
for (round in seq(1e9)) {
progs <- danceRound(progs)
arrangement <- do.call(paste0, as.list(progs))
if (arrangement %in% history) {
print(sprintf("Repeat found after moves %d and %d", match(arrangement, history) - 1, round))
break
} else history <- c(history, arrangement)
}
#therefore only need to go dance 1e9 %% 60 times (and already recorded in our history)
print(history[1e9%%60 + 1])
| /day16.R | no_license | d-sci/Advent-of-Code-2017 | R | false | false | 1,274 | r | #Day 16
setwd("C:/Users/David.simons/Documents/advent of code")
dance <- unlist(strsplit(readLines("day16.txt"), ","))
moves <- substr(dance, 1, 1)
args <- strsplit(substr(dance, 2, nchar(dance)), "/")
#part 1 ----
danceRound <- function(progs){
N <- length(progs)
for (i in seq_along(moves)) {
if (moves[i] == "s") {
n <- as.integer(args[[i]])
progs <- progs[c((N-n+1):N, 1:(N-n))]
} else if (moves[i] == "x") {
x <- as.integer(args[[i]]) + 1
progs[x] <- progs[rev(x)]
} else if (moves[i] == "p") {
x <- args[[i]]
progs[match(x, progs)] <- rev(x)
}
}
progs
}
progs <- danceRound(letters[1:16])
print(do.call(paste0, as.list(progs)))
#part 2 ----
#try it but see if there's a loop anywhere
progs <- letters[1:16]
history <- do.call(paste0, as.list(progs))
for (round in seq(1e9)) {
progs <- danceRound(progs)
arrangement <- do.call(paste0, as.list(progs))
if (arrangement %in% history) {
print(sprintf("Repeat found after moves %d and %d", match(arrangement, history) - 1, round))
break
} else history <- c(history, arrangement)
}
#therefore only need to go dance 1e9 %% 60 times (and already recorded in our history)
print(history[1e9%%60 + 1])
|
# R Script to interpolate model variable onto pressure
# grid and generate zonal mean.
# Takes input's:
# "var", "nc1" and "pres" from calling script
# Alex Archibald, February, 2012
# This version also includes a call to a routine to do a
# 2d interpolation of the model field (lon, lat) to the obs
# grid (i.e. N96-N48 transform)
source("/home/ata27/R/interp2d.R")
# Arguments --------------------------------------------------------------------------------------- #
pmax <- length(pres)
# extract model co-ords
lonp <- get.var.ncdf(nc1, "longitude")
latp <- get.var.ncdf(nc1, "latitude")
levp <- get.var.ncdf(nc1, "hybrid_ht")
timep <- get.var.ncdf(nc1, "t")
lonv <- get.var.ncdf(nc1, "longitude")
latv <- get.var.ncdf(nc1, "latitude")
levv <- get.var.ncdf(nc1, "hybrid_ht")
timev <- get.var.ncdf(nc1, "t")
# check that the two variables have the same lengths
if ( length(lonp) == length(lonv) ) xmax <- length(lonp) else print("Longitudes not equal")
if ( length(latp) == length(latv) ) ymax <- length(latp) else print("Latitudes not equal")
if ( length(levp) == length(levv) ) zmax <- length(levp) else print("Levels not equal")
if ( length(timep) == length(timev) ) tmax <- length(timep) else print("Times not equal")
# set counters to NULL
it <- NULL; iy <- NULL; ix <- NULL; ip <- NULL
i <- NULL; j <- NULL
# set variables
p1 <- NULL; p2 <- NULL; zm <- FALSE
# hard code the N48/Obs co-ords
xmax <- 96; ymax <- 73
# create empty array's to fill with data
newvv <- array(as.numeric(NA), dim=c(ymax,pmax,tmax))
pp <- array(as.numeric(NA), dim=c(ymax,zmax,tmax))
vv <- array(as.numeric(NA), dim=c(ymax,zmax,tmax))
pp.rgd2<- array(as.numeric(NA), dim=c(xmax,ymax,zmax,tmax))
vv.rgd2<- array(as.numeric(NA), dim=c(xmax,ymax,zmax,tmax))
# check for dimension missmatches
source(paste(script.dir, "/interp_error_checks.R", sep=""))
# extract the model variables and regrid onto the same grid as the obs
vv.rgd <- get.var.ncdf( nc1,var)
for (i in 1:length(levv) ) {
for (j in 1:length(timev) ) {
vv.rgd2[,,i,j] <- interp2d(vv.rgd[,,i,j], newx=xmax, newy=ymax)
} }
pp.rgd <- get.var.ncdf( nc1,"p")
for (i in 1:length(levv) ) {
for (j in 1:length(timev) ) {
pp.rgd2[,,i,j] <- interp2d(pp.rgd[,,i,j], newx=xmax, newy=ymax)
} }
# Main code here -------------------------------------------------------------------------------------- #
# loop over all time steps
print ("start loop over pressure")
for (it in 1:tmax) {
print (paste("Time step: ",it,sep=""))
# read pressure and variable
pp <- pp.rgd2[,,,it:1]
pp <- apply(pp,c(2,3),mean)
print(str(pp))
vv <- vv.rgd2[,,,it:1]
vv <- apply(vv,c(2,3),mean)
print(str(vv))
# loop over longitude and latitude
for (iy in 1:ymax) {
# loop over pressure
for (ip in 1:pmax) {
# determine the interval, loop over model levels and interpolate linear in log(p)
ptarget = log(pres[ip])
for (iz in 2:zmax) {
p1=log(pp[iy,iz-1]/100.) # NOTE conversion to hPa
p2=log(pp[iy,iz ]/100.)
if ( ptarget <= p1 ) {
if ( ptarget >= p2 ) {
newvv[iy,ip,it] = vv[iy,iz-1] + ( ( (vv[iy,iz] - vv[iy,iz-1] )/(p2-p1))*(ptarget-p1) )
} # end if
} # end if/while
}# end do model level loop
} # end do pressure loop
} # end do latitude loop
} # end do loop over time
# end of main code ----------------------------------------------------------------------------------- #
| /interpolate_zm_n96_eval.R | no_license | paultgriffiths/R_pyle | R | false | false | 3,514 | r | # R Script to interpolate model variable onto pressure
# grid and generate zonal mean.
# Takes input's:
# "var", "nc1" and "pres" from calling script
# Alex Archibald, February, 2012
# This version also includes a call to a routine to do a
# 2d interpolation of the model field (lon, lat) to the obs
# grid (i.e. N96-N48 transform)
source("/home/ata27/R/interp2d.R")
# Arguments --------------------------------------------------------------------------------------- #
pmax <- length(pres)
# extract model co-ords
lonp <- get.var.ncdf(nc1, "longitude")
latp <- get.var.ncdf(nc1, "latitude")
levp <- get.var.ncdf(nc1, "hybrid_ht")
timep <- get.var.ncdf(nc1, "t")
lonv <- get.var.ncdf(nc1, "longitude")
latv <- get.var.ncdf(nc1, "latitude")
levv <- get.var.ncdf(nc1, "hybrid_ht")
timev <- get.var.ncdf(nc1, "t")
# check that the two variables have the same lengths
if ( length(lonp) == length(lonv) ) xmax <- length(lonp) else print("Longitudes not equal")
if ( length(latp) == length(latv) ) ymax <- length(latp) else print("Latitudes not equal")
if ( length(levp) == length(levv) ) zmax <- length(levp) else print("Levels not equal")
if ( length(timep) == length(timev) ) tmax <- length(timep) else print("Times not equal")
# set counters to NULL
it <- NULL; iy <- NULL; ix <- NULL; ip <- NULL
i <- NULL; j <- NULL
# set variables
p1 <- NULL; p2 <- NULL; zm <- FALSE
# hard code the N48/Obs co-ords
xmax <- 96; ymax <- 73
# create empty array's to fill with data
newvv <- array(as.numeric(NA), dim=c(ymax,pmax,tmax))
pp <- array(as.numeric(NA), dim=c(ymax,zmax,tmax))
vv <- array(as.numeric(NA), dim=c(ymax,zmax,tmax))
pp.rgd2<- array(as.numeric(NA), dim=c(xmax,ymax,zmax,tmax))
vv.rgd2<- array(as.numeric(NA), dim=c(xmax,ymax,zmax,tmax))
# check for dimension missmatches
source(paste(script.dir, "/interp_error_checks.R", sep=""))
# extract the model variables and regrid onto the same grid as the obs
vv.rgd <- get.var.ncdf( nc1,var)
for (i in 1:length(levv) ) {
for (j in 1:length(timev) ) {
vv.rgd2[,,i,j] <- interp2d(vv.rgd[,,i,j], newx=xmax, newy=ymax)
} }
pp.rgd <- get.var.ncdf( nc1,"p")
for (i in 1:length(levv) ) {
for (j in 1:length(timev) ) {
pp.rgd2[,,i,j] <- interp2d(pp.rgd[,,i,j], newx=xmax, newy=ymax)
} }
# Main code here -------------------------------------------------------------------------------------- #
# loop over all time steps
print ("start loop over pressure")
for (it in 1:tmax) {
print (paste("Time step: ",it,sep=""))
# read pressure and variable
pp <- pp.rgd2[,,,it:1]
pp <- apply(pp,c(2,3),mean)
print(str(pp))
vv <- vv.rgd2[,,,it:1]
vv <- apply(vv,c(2,3),mean)
print(str(vv))
# loop over longitude and latitude
for (iy in 1:ymax) {
# loop over pressure
for (ip in 1:pmax) {
# determine the interval, loop over model levels and interpolate linear in log(p)
ptarget = log(pres[ip])
for (iz in 2:zmax) {
p1=log(pp[iy,iz-1]/100.) # NOTE conversion to hPa
p2=log(pp[iy,iz ]/100.)
if ( ptarget <= p1 ) {
if ( ptarget >= p2 ) {
newvv[iy,ip,it] = vv[iy,iz-1] + ( ( (vv[iy,iz] - vv[iy,iz-1] )/(p2-p1))*(ptarget-p1) )
} # end if
} # end if/while
}# end do model level loop
} # end do pressure loop
} # end do latitude loop
} # end do loop over time
# end of main code ----------------------------------------------------------------------------------- #
|
# Exercise 3: writing and executing functions
# Define a function `add_three` that takes a single argument and
# returns a value 3 greater than the input
add_three <- function(value) {
value + 3
}
# Create a variable `ten` that is the result of passing 7 to your `add_three`
# function
ten <- add_three(7)
# Define a function `imperial_to_metric` that takes in two arguments: a number
# of feet and a number of inches
# The function should return the equivalent length in meters
imperial_to_metric <- function(feet, inches) {
((feet * 12) + inches) * .0254
}
# Create a variable `height_in_meters` by passing your height in imperial to the
# `imperial_to_metric` function
height_in_meters <- imperial_to_metric(5, 11)
| /chapter-06-exercises/exercise-3/exercise.R | permissive | akoltko-1763595/book-exercises | R | false | false | 728 | r | # Exercise 3: writing and executing functions
# Define a function `add_three` that takes a single argument and
# returns a value 3 greater than the input
add_three <- function(value) {
value + 3
}
# Create a variable `ten` that is the result of passing 7 to your `add_three`
# function
ten <- add_three(7)
# Define a function `imperial_to_metric` that takes in two arguments: a number
# of feet and a number of inches
# The function should return the equivalent length in meters
imperial_to_metric <- function(feet, inches) {
((feet * 12) + inches) * .0254
}
# Create a variable `height_in_meters` by passing your height in imperial to the
# `imperial_to_metric` function
height_in_meters <- imperial_to_metric(5, 11)
|
#' @title Extract JSON
#' @description Checks if two dataframes are equal.
#' @param df1 A dataframe
#' @param df2 The second dataframe
#' @return TRUE if the dataframes are equal else FALSE
#' @export
extract_json <- \(df){
check_key = \(dict, key) ifelse(key %in% names(fromJSON(dict)), fromJSON(dict)[key], NA)
df %>%
mutate(
col_new = unlist(future_map(col, ~ check_key(.x, "")))
)
} | /R/extract_json.r | no_license | gfleetwood/sansor | R | false | false | 427 | r | #' @title Extract JSON
#' @description Checks if two dataframes are equal.
#' @param df1 A dataframe
#' @param df2 The second dataframe
#' @return TRUE if the dataframes are equal else FALSE
#' @export
extract_json <- \(df){
check_key = \(dict, key) ifelse(key %in% names(fromJSON(dict)), fromJSON(dict)[key], NA)
df %>%
mutate(
col_new = unlist(future_map(col, ~ check_key(.x, "")))
)
} |
#' # [EXP_76](https://github.com/JoshuaHarris391/Lab_Notebook_Cunliffe_2018/blob/master/Experimental_Log/Logs/EXP_76.md) obtaining primer query sequences
#' Primers flanking the two sgRNA target sites from EXP_73 will be determined by first obtaining a query sequence for each target site to put through to primer BLAST
#'
#' ## Installing package
# source("http://bioconductor.org/biocLite.R")
# biocLite("Biostrings")
# biocLite("seqinr")
#' ## loading Packages
#+ message=FALSE
library('Biostrings')
library('seqinr')
# library('rstudioapi')
library('tidyverse')
#' ## Setting WD
# setwd('~/Dropbox/Research/PhD/Experiments/EXP_76/EXP_76_Fn14_Primer_Design/scripts/')
#' ## Reading in fasta files
#' These sequences contain 1kB upstream and 1kb downstream of the first and last exon
#' Sequence includes introns and 4 exons
#' fasta file was obtained from UCSC genome browser. [link to entry](https://genome.ucsc.edu/cgi-bin/hgGene?hgg_gene=ENST00000326577.8&hgg_prot=ENST00000326577.8&hgg_chrom=chr16&hgg_start=3020311&hgg_end=3022383&hgg_type=knownGene&db=hg38&hgsid=712319617_zMV9zkvqyZxeyCewSf3qflc4CiR1)
#'
#' #### Whole Sequence (including 1kB up and downstream)
Fn14_whole_seq_fasta <- read.fasta(file = "../sequence/TNFRSF12A_Whole_Seq.fa",
seqtype = 'DNA',
as.string = TRUE)
Fn14_whole_seq_fasta <- Fn14_whole_seq_fasta[[1]]
Fn14_whole_seq_fasta %>% print()
#' #### Creating character string
Fn14_whole_seq_char <- Fn14_whole_seq_fasta %>% as.character()
Fn14_whole_seq_char %>% class() %>% print()
#' #### Converting to DNAstring
Fn14_whole_seq_dnastring <- DNAString(Fn14_whole_seq_char)
Fn14_whole_seq_dnastring %>% class() %>% print()
#' ## Sequences by Region
#' Note, first and last region are not part of the Fn14 gene
Fn14_Region_seq <- read.fasta(file = "../sequence/TNFRSF12A_Region_Seq.fa",
seqtype = 'DNA',
as.string = TRUE)
for (i in 1:length(Fn14_Region_seq)) {
Fn14_Region_seq[[i]] %>% head() %>% print()
}
#' ## Loading in sgRNA target sequences
#' Sequences are 5' to 3'
sgRNA_1 <- 'CGGGCGCAGGACGTGCACTA'
sgRNA_2 <- 'AGCTTGGCTCCCGCCGCGTC'
#' *sgRNA_2 targets the 3' UTR on the antisense strand, therefore it needs to be converted to the reverse complement
#'
#' ## Converting sgRNA_2 to rev comp
sgRNA_2 <- DNAString(sgRNA_2) %>%
reverseComplement() %>%
as.character()
print(sgRNA_2)
#' #### Match position for sgRNA_1 to Fn14 whole sequence
sgRNA_1_match <- matchPattern(sgRNA_1, Fn14_whole_seq_dnastring)
print(sgRNA_1_match)
#' #### Match position for sgRNA_2 to Fn14 whole sequence
sgRNA_2_match <- matchPattern(as.character(sgRNA_2), Fn14_whole_seq_dnastring)
print(sgRNA_2_match)
#' # Printing query sequence and printing match sequence
#' ## sgRNA_1 sequence
sgRNA_1 %>%
as.character() %>%
print()
#' #### Fn14 whole sequence lookup
Fn14_whole_seq_dnastring[start(sgRNA_1_match) : end(sgRNA_1_match)] %>%
as.character() %>%
print()
#' #### Testing for a match
Fn14_whole_seq_dnastring[start(sgRNA_1_match) : end(sgRNA_1_match)] %>% as.character() %in% sgRNA_1 %>% as.character()
#' ## sgRNA_2 sequence
sgRNA_2 %>%
as.character() %>%
print()
#' #### Fn14 whole sequence lookup
Fn14_whole_seq_dnastring[start(sgRNA_2_match) : end(sgRNA_2_match)] %>%
as.character() %>%
print()
#' #### Testing for a match
Fn14_whole_seq_dnastring[start(sgRNA_2_match) : end(sgRNA_2_match)] %>% as.character() %in% sgRNA_2 %>% as.character()
#' # Creating a query sequence for Fn14 primer blast
#' Query sequences will have 'buffer_bp' number of base pairs upstream of the 5' end and 'buffer_bp' number of base pairs downstream from the 3' end of the matching sgRNA sequence
#'
# Defining number of buffer base pairs
buffer_bp <- 800
#' ## sgRNA_1 Primer Query Sequence
EXP_76_sgRNA_1_Query <- Fn14_whole_seq_dnastring[(start(sgRNA_1_match)-buffer_bp) : end(sgRNA_1_match+buffer_bp)] %>%
as.character()
print(EXP_76_sgRNA_1_Query)
print(nchar(EXP_76_sgRNA_1_Query))
#' ## sgRNA_2 Primer Query Sequence
EXP_76_sgRNA_2_Query <- Fn14_whole_seq_dnastring[(start(sgRNA_2_match)-buffer_bp) : end(sgRNA_2_match+buffer_bp)] %>%
as.character()
print(EXP_76_sgRNA_2_Query)
print(nchar(EXP_76_sgRNA_2_Query))
#' ## CRISPR-CAS9 clevage sites
#' Base pair position is relative to the query sequence
sgRNA_1_match_query <- matchPattern(sgRNA_1, EXP_76_sgRNA_1_Query)
paste("EXP_76_sgRNA_1_Query Clevage site is between base pairs:", end(sgRNA_1_match_query), "and", end(sgRNA_1_match_query)+1)
sgRNA_2_match_query <- matchPattern(sgRNA_2, EXP_76_sgRNA_2_Query)
paste("EXP_76_sgRNA_2_Query Clevage site is between base pairs:", end(sgRNA_2_match_query), "and", end(sgRNA_2_match_query)+1)
#' ## Exporting query sequences as fasta files
write.fasta(EXP_76_sgRNA_1_Query, "EXP_76_sgRNA_1_Query", "../output/EXP_76_sgRNA_1_Query.fa", as.string = TRUE)
write.fasta(EXP_76_sgRNA_2_Query, "EXP_76_sgRNA_2_Query", "../output/EXP_76_sgRNA_2_Query.fa", as.string = TRUE)
| /scripts/Primer_Query_Sequence.R | no_license | JoshuaHarris391/EXP_76_Fn14_Primer_Design | R | false | false | 5,063 | r | #' # [EXP_76](https://github.com/JoshuaHarris391/Lab_Notebook_Cunliffe_2018/blob/master/Experimental_Log/Logs/EXP_76.md) obtaining primer query sequences
#' Primers flanking the two sgRNA target sites from EXP_73 will be determined by first obtaining a query sequence for each target site to put through to primer BLAST
#'
#' ## Installing package
# source("http://bioconductor.org/biocLite.R")
# biocLite("Biostrings")
# biocLite("seqinr")
#' ## loading Packages
#+ message=FALSE
library('Biostrings')
library('seqinr')
# library('rstudioapi')
library('tidyverse')
#' ## Setting WD
# setwd('~/Dropbox/Research/PhD/Experiments/EXP_76/EXP_76_Fn14_Primer_Design/scripts/')
#' ## Reading in fasta files
#' These sequences contain 1kB upstream and 1kb downstream of the first and last exon
#' Sequence includes introns and 4 exons
#' fasta file was obtained from UCSC genome browser. [link to entry](https://genome.ucsc.edu/cgi-bin/hgGene?hgg_gene=ENST00000326577.8&hgg_prot=ENST00000326577.8&hgg_chrom=chr16&hgg_start=3020311&hgg_end=3022383&hgg_type=knownGene&db=hg38&hgsid=712319617_zMV9zkvqyZxeyCewSf3qflc4CiR1)
#'
#' #### Whole Sequence (including 1kB up and downstream)
Fn14_whole_seq_fasta <- read.fasta(file = "../sequence/TNFRSF12A_Whole_Seq.fa",
seqtype = 'DNA',
as.string = TRUE)
Fn14_whole_seq_fasta <- Fn14_whole_seq_fasta[[1]]
Fn14_whole_seq_fasta %>% print()
#' #### Creating character string
Fn14_whole_seq_char <- Fn14_whole_seq_fasta %>% as.character()
Fn14_whole_seq_char %>% class() %>% print()
#' #### Converting to DNAstring
Fn14_whole_seq_dnastring <- DNAString(Fn14_whole_seq_char)
Fn14_whole_seq_dnastring %>% class() %>% print()
#' ## Sequences by Region
#' Note, first and last region are not part of the Fn14 gene
Fn14_Region_seq <- read.fasta(file = "../sequence/TNFRSF12A_Region_Seq.fa",
seqtype = 'DNA',
as.string = TRUE)
for (i in 1:length(Fn14_Region_seq)) {
Fn14_Region_seq[[i]] %>% head() %>% print()
}
#' ## Loading in sgRNA target sequences
#' Sequences are 5' to 3'
sgRNA_1 <- 'CGGGCGCAGGACGTGCACTA'
sgRNA_2 <- 'AGCTTGGCTCCCGCCGCGTC'
#' *sgRNA_2 targets the 3' UTR on the antisense strand, therefore it needs to be converted to the reverse complement
#'
#' ## Converting sgRNA_2 to rev comp
sgRNA_2 <- DNAString(sgRNA_2) %>%
reverseComplement() %>%
as.character()
print(sgRNA_2)
#' #### Match position for sgRNA_1 to Fn14 whole sequence
sgRNA_1_match <- matchPattern(sgRNA_1, Fn14_whole_seq_dnastring)
print(sgRNA_1_match)
#' #### Match position for sgRNA_2 to Fn14 whole sequence
sgRNA_2_match <- matchPattern(as.character(sgRNA_2), Fn14_whole_seq_dnastring)
print(sgRNA_2_match)
#' # Printing query sequence and printing match sequence
#' ## sgRNA_1 sequence
sgRNA_1 %>%
as.character() %>%
print()
#' #### Fn14 whole sequence lookup
Fn14_whole_seq_dnastring[start(sgRNA_1_match) : end(sgRNA_1_match)] %>%
as.character() %>%
print()
#' #### Testing for a match
Fn14_whole_seq_dnastring[start(sgRNA_1_match) : end(sgRNA_1_match)] %>% as.character() %in% sgRNA_1 %>% as.character()
#' ## sgRNA_2 sequence
sgRNA_2 %>%
as.character() %>%
print()
#' #### Fn14 whole sequence lookup
Fn14_whole_seq_dnastring[start(sgRNA_2_match) : end(sgRNA_2_match)] %>%
as.character() %>%
print()
#' #### Testing for a match
Fn14_whole_seq_dnastring[start(sgRNA_2_match) : end(sgRNA_2_match)] %>% as.character() %in% sgRNA_2 %>% as.character()
#' # Creating a query sequence for Fn14 primer blast
#' Query sequences will have 'buffer_bp' number of base pairs upstream of the 5' end and 'buffer_bp' number of base pairs downstream from the 3' end of the matching sgRNA sequence
#'
# Defining number of buffer base pairs
buffer_bp <- 800
#' ## sgRNA_1 Primer Query Sequence
EXP_76_sgRNA_1_Query <- Fn14_whole_seq_dnastring[(start(sgRNA_1_match)-buffer_bp) : end(sgRNA_1_match+buffer_bp)] %>%
as.character()
print(EXP_76_sgRNA_1_Query)
print(nchar(EXP_76_sgRNA_1_Query))
#' ## sgRNA_2 Primer Query Sequence
EXP_76_sgRNA_2_Query <- Fn14_whole_seq_dnastring[(start(sgRNA_2_match)-buffer_bp) : end(sgRNA_2_match+buffer_bp)] %>%
as.character()
print(EXP_76_sgRNA_2_Query)
print(nchar(EXP_76_sgRNA_2_Query))
#' ## CRISPR-CAS9 clevage sites
#' Base pair position is relative to the query sequence
sgRNA_1_match_query <- matchPattern(sgRNA_1, EXP_76_sgRNA_1_Query)
paste("EXP_76_sgRNA_1_Query Clevage site is between base pairs:", end(sgRNA_1_match_query), "and", end(sgRNA_1_match_query)+1)
sgRNA_2_match_query <- matchPattern(sgRNA_2, EXP_76_sgRNA_2_Query)
paste("EXP_76_sgRNA_2_Query Clevage site is between base pairs:", end(sgRNA_2_match_query), "and", end(sgRNA_2_match_query)+1)
#' ## Exporting query sequences as fasta files
write.fasta(EXP_76_sgRNA_1_Query, "EXP_76_sgRNA_1_Query", "../output/EXP_76_sgRNA_1_Query.fa", as.string = TRUE)
write.fasta(EXP_76_sgRNA_2_Query, "EXP_76_sgRNA_2_Query", "../output/EXP_76_sgRNA_2_Query.fa", as.string = TRUE)
|
#' Calculate the Profile Likelihood
#'
#' Calculate the Profile Likelihood based on initial covariance parameters (not including sigma2)
#' @param theta0 initial covariance paramters (not including sigma2)
#' @param CovStructure the covariance structure, 1 - a separable covariance matrix, 2 - a nonseparable covariance matrix
#' @param Ds the distance matrix of location
#' @param Dt the distance matrix of time
#' @param P the dimension of data
#' @param Xprime transformed X, Xprime = (I-S)X
#' @param Zprime transformed Z, Zprime = (I-S)Z
#' @return the value of log profile likelihood.
#' @export
log.profile = function(theta0,CovStructure,loctim,Ds,Dt,Xprime,Zprime,Sigma=NULL) {
P = nrow(Xprime)
if (CovStructure == 1) {
psi = (1-theta0[1])*exp(-Ds/theta0[2]-Dt/theta0[3])
diag(psi) = 1 }
else if (CovStructure == 2) {
psi = (1-theta0[1])/(theta0[2]*(Dt)^2 + 1)*exp(- theta0[3]*Ds^2/(theta0[2]*(Dt)^2 + 1))
diag(psi) = 1 }
else if (CovStructure == 3) {
psi <- (1-theta0[1])/(theta0[2]^2*(Dt)^2+1)^(3/2)*exp(-theta0[3]*Ds)
diag(psi) = 1 }
else if (CovStructure == 4) {
DD1 = theta0[4]*loctim[,3] + 1
psi <- DD1%*%t(DD1)/(theta0[2]^2*(Dt)^2+1)^(3/2)*exp(-theta0[3]*Ds)
diag(psi) = diag(DD1%*% t(DD1))+theta0[1] }
else if (CovStructure == 5) {
DD1 = theta0[4]*loctim[,3]+theta0[5]*loctim[,1]+theta0[6]*loctim[,2] + 1
psi <- DD1%*%t(DD1)/(theta0[2]^2*(Dt)^2+1)^(3/2)*exp(-theta0[3]*Ds)
diag(psi) = diag(DD1%*% t(DD1))+theta0[1] }
else if (CovStructure == 6) {
DD1 =1 + theta0[4]*loctim[,3]+theta0[5]*loctim[,3]^2+
theta0[6]*loctim[,3]^3+theta0[7]*(ifelse((loctim[,3]-truncate.t)>0,(loctim[,3]-truncate.t),0))^3
psi <- DD1%*%t(DD1)/(theta0[2]^2*(Dt)^2+1)^(3/2)*exp(-theta0[3]*Ds)
diag(psi) = diag(DD1%*% t(DD1)) +theta0[1] }
else stop("Please provide a valid covariance structure identification code (1-3).")
U = base::chol(psi)
sx = solve(t(U),Xprime)
sz = solve(t(U),Zprime)
beta = solve(t(sx)%*%sx,t(sx)%*%sz)
ress = (sz - sx%*%beta)
sigma2 = t(ress)%*%ress/P
log.prof = P/2*log(sigma2) + sum(log(diag(U))) + P/2
return(log.prof)
# U <- chol(psi)
# U.inv <- backsolve(U, diag(1, nrow = P))
# psi.inv <- U.inv %*% t(U.inv)
# # Compute the MLE for beta.
# beta <- solve(t(Xprime) %*% psi.inv %*% Xprime) %*% t(Xprime) %*% psi.inv %*% Zprime
# # Compute the MLE for sigma.
# resid <- Zprime - Xprime %*% beta
# sigma2 <- t(resid) %*% psi.inv %*% resid/P
# # Evaluate -log-profile likelihood.
# log.U.det <- sum(log(diag(U)))
# # Log of the determinant of U.
# prof <- log.U.det + (P * log(sigma2))/2 + P/2
# return(prof)
}
| /R/CalculateProfile.R | no_license | liujl93/STplm | R | false | false | 2,636 | r | #' Calculate the Profile Likelihood
#'
#' Calculate the Profile Likelihood based on initial covariance parameters (not including sigma2)
#' @param theta0 initial covariance paramters (not including sigma2)
#' @param CovStructure the covariance structure, 1 - a separable covariance matrix, 2 - a nonseparable covariance matrix
#' @param Ds the distance matrix of location
#' @param Dt the distance matrix of time
#' @param P the dimension of data
#' @param Xprime transformed X, Xprime = (I-S)X
#' @param Zprime transformed Z, Zprime = (I-S)Z
#' @return the value of log profile likelihood.
#' @export
log.profile = function(theta0,CovStructure,loctim,Ds,Dt,Xprime,Zprime,Sigma=NULL) {
P = nrow(Xprime)
if (CovStructure == 1) {
psi = (1-theta0[1])*exp(-Ds/theta0[2]-Dt/theta0[3])
diag(psi) = 1 }
else if (CovStructure == 2) {
psi = (1-theta0[1])/(theta0[2]*(Dt)^2 + 1)*exp(- theta0[3]*Ds^2/(theta0[2]*(Dt)^2 + 1))
diag(psi) = 1 }
else if (CovStructure == 3) {
psi <- (1-theta0[1])/(theta0[2]^2*(Dt)^2+1)^(3/2)*exp(-theta0[3]*Ds)
diag(psi) = 1 }
else if (CovStructure == 4) {
DD1 = theta0[4]*loctim[,3] + 1
psi <- DD1%*%t(DD1)/(theta0[2]^2*(Dt)^2+1)^(3/2)*exp(-theta0[3]*Ds)
diag(psi) = diag(DD1%*% t(DD1))+theta0[1] }
else if (CovStructure == 5) {
DD1 = theta0[4]*loctim[,3]+theta0[5]*loctim[,1]+theta0[6]*loctim[,2] + 1
psi <- DD1%*%t(DD1)/(theta0[2]^2*(Dt)^2+1)^(3/2)*exp(-theta0[3]*Ds)
diag(psi) = diag(DD1%*% t(DD1))+theta0[1] }
else if (CovStructure == 6) {
DD1 =1 + theta0[4]*loctim[,3]+theta0[5]*loctim[,3]^2+
theta0[6]*loctim[,3]^3+theta0[7]*(ifelse((loctim[,3]-truncate.t)>0,(loctim[,3]-truncate.t),0))^3
psi <- DD1%*%t(DD1)/(theta0[2]^2*(Dt)^2+1)^(3/2)*exp(-theta0[3]*Ds)
diag(psi) = diag(DD1%*% t(DD1)) +theta0[1] }
else stop("Please provide a valid covariance structure identification code (1-3).")
U = base::chol(psi)
sx = solve(t(U),Xprime)
sz = solve(t(U),Zprime)
beta = solve(t(sx)%*%sx,t(sx)%*%sz)
ress = (sz - sx%*%beta)
sigma2 = t(ress)%*%ress/P
log.prof = P/2*log(sigma2) + sum(log(diag(U))) + P/2
return(log.prof)
# U <- chol(psi)
# U.inv <- backsolve(U, diag(1, nrow = P))
# psi.inv <- U.inv %*% t(U.inv)
# # Compute the MLE for beta.
# beta <- solve(t(Xprime) %*% psi.inv %*% Xprime) %*% t(Xprime) %*% psi.inv %*% Zprime
# # Compute the MLE for sigma.
# resid <- Zprime - Xprime %*% beta
# sigma2 <- t(resid) %*% psi.inv %*% resid/P
# # Evaluate -log-profile likelihood.
# log.U.det <- sum(log(diag(U)))
# # Log of the determinant of U.
# prof <- log.U.det + (P * log(sigma2))/2 + P/2
# return(prof)
}
|
## -------------------------------------------------------------------------
# Take average weighted methylation level of feature across bio replicates
## -------------------------------------------------------------------------
library(sqldf)
library(readr)
library(doBy)
library(dplyr)
library(reshape2)
# Make one file covering all samples
file.list = list.files(("./"),pattern="*weighted_meth.txt")
read_file1 <- function(x){
read_delim(x, "\t", escape_double = F, col_names = T, trim_ws = T)
}
samples <- lapply(file.list, read_file1)
# Make one dataframe for each population
females <- samples[1:6]
males <- samples[7:12]
for(i in seq_along(females)){
females[[i]]$sex <- "female"
}
females_all <- as.data.frame(bind_rows(females))
females_merged <- summaryBy(weightedMeth ~ chr + feature + gene_id + start + end +
cpg_count + sex, data = females_all, FUN=mean)
for(i in seq_along(males)){
males[[i]]$sex <- "male"
}
males_all <- as.data.frame(bind_rows(males))
males_merged <- summaryBy(weightedMeth ~ chr + feature + gene_id + start + end +
cpg_count + sex, data = males_all, FUN=mean)
all_data <- rbind(females_merged, males_merged)
all_data2 <- dcast(all_data, chr + feature + gene_id + start + end +
cpg_count ~ sex, value.var = "weightedMeth.mean")
write.table(all_data2, file="Dcitri_weighted_meth_annotation_by_sex.txt", col.names = T,
row.names = F, quote = F, sep = "\t")
| /Differential_methylation/13_mean_weighted_meth_by_sex.R | no_license | MooHoll/Asian_Psyllid_Methylation | R | false | false | 1,499 | r | ## -------------------------------------------------------------------------
# Take average weighted methylation level of feature across bio replicates
## -------------------------------------------------------------------------
library(sqldf)
library(readr)
library(doBy)
library(dplyr)
library(reshape2)
# Make one file covering all samples
file.list = list.files(("./"),pattern="*weighted_meth.txt")
read_file1 <- function(x){
read_delim(x, "\t", escape_double = F, col_names = T, trim_ws = T)
}
samples <- lapply(file.list, read_file1)
# Make one dataframe for each population
females <- samples[1:6]
males <- samples[7:12]
for(i in seq_along(females)){
females[[i]]$sex <- "female"
}
females_all <- as.data.frame(bind_rows(females))
females_merged <- summaryBy(weightedMeth ~ chr + feature + gene_id + start + end +
cpg_count + sex, data = females_all, FUN=mean)
for(i in seq_along(males)){
males[[i]]$sex <- "male"
}
males_all <- as.data.frame(bind_rows(males))
males_merged <- summaryBy(weightedMeth ~ chr + feature + gene_id + start + end +
cpg_count + sex, data = males_all, FUN=mean)
all_data <- rbind(females_merged, males_merged)
all_data2 <- dcast(all_data, chr + feature + gene_id + start + end +
cpg_count ~ sex, value.var = "weightedMeth.mean")
write.table(all_data2, file="Dcitri_weighted_meth_annotation_by_sex.txt", col.names = T,
row.names = F, quote = F, sep = "\t")
|
## load library
library(googleAnalyticsR)
library(dplyr)
## source the authentication file
source("options.R")
## get account info
account_list <- google_analytics_account_list()
# account_main <- account_list %>% filter(viewName == "All Web Site Data")
## get another set of accounts
account_curated <- read.csv("account_list_verified.csv",stringsAsFactors = F) %>%
mutate(
Profile.ID = as.character(Profile.ID)
) %>%
unique()
## find common profile ids in account_curated and account_list
account_common <- account_list %>%
semi_join(account_curated,c("viewId"="Profile.ID"))
account_common <- merge(account_curated,account_common,by.x = "Profile.ID",by.y = "viewId") %>%
select(-accountId,-UA.number,-Url,-accountName)
account_common_focus <- account_common %>%
select(Profile.ID,webPropertyName,Brand,Super.Category,Sub.Category,Market,Region.Market)
write.csv(account_common_focus,"account_common_focus.csv",row.names = F)
## set up the analytics call
## it uses tryCatch since sometimes the analytics call fails
for(i in 1:nrow(account_common_focus))
{
print(i)
errTrp <- tryCatch({
flag = TRUE
tmp <- google_analytics_4(viewId = account_common_focus$Profile.ID[i],
date_range = c("2016-01-03","2016-10-29"),
metrics = c("users","sessions","bounces"),
dimensions = c("week","medium","country"),
max = -1 )
}, warning = function(w){
print("warning")
}, error = function(e){
print("error")
flag = FALSE
}, finally = {
if(flag)
{
# tmp$ProfileName <- paste0(account_common_focus$accountName[i],": ",account_common_focus$webPropertyName[i])
tmp$ViewID <- account_common_focus$Profile.ID[i]
if(i == 1){
df1 <- tmp
}else if(i > 1 && length(tmp) > 1){
df1 <- rbind(df1,tmp)
print(paste0("Total Rows: ",nrow(df1)))
}
}
})
}
## remove temp variables
rm(errTrp,tmp)
## save the result
write.csv(df1,"allBrands_medium_weekly_curated.csv",row.names = F)
| /01_data_fetch_ga.r | no_license | engti/unilever-reporting | R | false | false | 2,565 | r | ## load library
library(googleAnalyticsR)
library(dplyr)
## source the authentication file
source("options.R")
## get account info
account_list <- google_analytics_account_list()
# account_main <- account_list %>% filter(viewName == "All Web Site Data")
## get another set of accounts
account_curated <- read.csv("account_list_verified.csv",stringsAsFactors = F) %>%
mutate(
Profile.ID = as.character(Profile.ID)
) %>%
unique()
## find common profile ids in account_curated and account_list
account_common <- account_list %>%
semi_join(account_curated,c("viewId"="Profile.ID"))
account_common <- merge(account_curated,account_common,by.x = "Profile.ID",by.y = "viewId") %>%
select(-accountId,-UA.number,-Url,-accountName)
account_common_focus <- account_common %>%
select(Profile.ID,webPropertyName,Brand,Super.Category,Sub.Category,Market,Region.Market)
write.csv(account_common_focus,"account_common_focus.csv",row.names = F)
## set up the analytics call
## it uses tryCatch since sometimes the analytics call fails
for(i in 1:nrow(account_common_focus))
{
print(i)
errTrp <- tryCatch({
flag = TRUE
tmp <- google_analytics_4(viewId = account_common_focus$Profile.ID[i],
date_range = c("2016-01-03","2016-10-29"),
metrics = c("users","sessions","bounces"),
dimensions = c("week","medium","country"),
max = -1 )
}, warning = function(w){
print("warning")
}, error = function(e){
print("error")
flag = FALSE
}, finally = {
if(flag)
{
# tmp$ProfileName <- paste0(account_common_focus$accountName[i],": ",account_common_focus$webPropertyName[i])
tmp$ViewID <- account_common_focus$Profile.ID[i]
if(i == 1){
df1 <- tmp
}else if(i > 1 && length(tmp) > 1){
df1 <- rbind(df1,tmp)
print(paste0("Total Rows: ",nrow(df1)))
}
}
})
}
## remove temp variables
rm(errTrp,tmp)
## save the result
write.csv(df1,"allBrands_medium_weekly_curated.csv",row.names = F)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bioRad.R
\name{readvp.table}
\alias{readvp.table}
\title{Read vertical profiles from vol2bird stdout}
\usage{
readvp.table(file, radar, wavelength = "C")
}
\arguments{
\item{file}{A text file containing the standard output (stdout) generated by vol2bird}
\item{radar}{string containing a radar identifier}
\item{wavelength}{radar wavelength in cm, or one of 'C' or 'S' for C-band and S-band radar, respectively}
}
\value{
an object inhereting from class "\code{vpts}", see \link{vpts} for details
}
\description{
Read vertical profiles from vol2bird stdout
}
\examples{
# locate example file:
VPtable <- system.file("extdata", "VPtable.txt", package="bioRad")
# load time series:
ts=readvp.table(VPtable,radar="KBGM", wavelength='S')
ts
}
| /man/readvp.table.Rd | permissive | macheng94/bioRad | R | false | true | 819 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bioRad.R
\name{readvp.table}
\alias{readvp.table}
\title{Read vertical profiles from vol2bird stdout}
\usage{
readvp.table(file, radar, wavelength = "C")
}
\arguments{
\item{file}{A text file containing the standard output (stdout) generated by vol2bird}
\item{radar}{string containing a radar identifier}
\item{wavelength}{radar wavelength in cm, or one of 'C' or 'S' for C-band and S-band radar, respectively}
}
\value{
an object inhereting from class "\code{vpts}", see \link{vpts} for details
}
\description{
Read vertical profiles from vol2bird stdout
}
\examples{
# locate example file:
VPtable <- system.file("extdata", "VPtable.txt", package="bioRad")
# load time series:
ts=readvp.table(VPtable,radar="KBGM", wavelength='S')
ts
}
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{SNF}
\alias{SNF}
\alias{SNF.dynamic.mcmc}
\alias{SNF.static.maxLik}
\alias{SNF.static.mcmc}
\title{SNF}
\usage{
SNF(data, method = c("static.maxLik", "static.mcmc", "dynamic.mcmc"), ...)
SNF.static.maxLik(data, ...)
SNF.static.mcmc(data, m = 1000, last_estimation, ...)
SNF.dynamic.mcmc(m, data, last_estimation, update_tau = TRUE, tau = 0.005)
}
\arguments{
\item{data}{data}
\item{method}{Estimation method, either "static.maxLik","static.mcmc","dynamic.mcmc". Default is "static.maxLik"}
\item{m}{m}
\item{last_estimation}{last_estimation}
\item{update_tau}{update_tau}
\item{tau}{tau}
\item{...}{others argument.}
}
\value{
SNF object
}
\description{
Strategy Network Formation
}
\author{
TszKin Julian Chan \email{ctszkin@gmail.com}
}
| /simmen/man/SNF.Rd | no_license | ctszkin/simmen | R | false | false | 809 | rd | % Generated by roxygen2 (4.0.1): do not edit by hand
\name{SNF}
\alias{SNF}
\alias{SNF.dynamic.mcmc}
\alias{SNF.static.maxLik}
\alias{SNF.static.mcmc}
\title{SNF}
\usage{
SNF(data, method = c("static.maxLik", "static.mcmc", "dynamic.mcmc"), ...)
SNF.static.maxLik(data, ...)
SNF.static.mcmc(data, m = 1000, last_estimation, ...)
SNF.dynamic.mcmc(m, data, last_estimation, update_tau = TRUE, tau = 0.005)
}
\arguments{
\item{data}{data}
\item{method}{Estimation method, either "static.maxLik","static.mcmc","dynamic.mcmc". Default is "static.maxLik"}
\item{m}{m}
\item{last_estimation}{last_estimation}
\item{update_tau}{update_tau}
\item{tau}{tau}
\item{...}{others argument.}
}
\value{
SNF object
}
\description{
Strategy Network Formation
}
\author{
TszKin Julian Chan \email{ctszkin@gmail.com}
}
|
library(tidyr)
library(tidyverse)
library(caret)
library(h2o)
library(lubridate)
library(keras)
library(chron)
library(magrittr)
train <- read_csv('train.csv')
test <- read_csv('test.csv')
sample <- read_csv('sample_submission.csv')
summary(train)
train$is_train <- 1
test$is_train <- 0
test$amount_spent_per_room_night_scaled <- NA
full <- bind_rows(train,test)
sapply(train,function(x)sum(is.na(x)))
sapply(test,function(x)sum(is.na(x)))
full <- full %>% mutate(booking_date=as.Date(booking_date,"%d/%m/%y"),
checkin_date=as.Date(checkin_date,"%d/%m/%y"),
checkout_date=as.Date(checkout_date,"%d/%m/%y"),
advance_booking = as.numeric(checkin_date-booking_date),
resort_id=str_trunc(resort_id,10),
memberid=str_trunc(memberid,10))
#advance bookings
full %>% group_by(memberid) %>% mutate(d=n(),r=sum(amount_spent_per_room_night_scaled)) %>%
ungroup() %>% filter(advance_booking<0) %>% View()
#total of 14 members. here we cannot remove these users id because,
#there are 2 users in test with negative advance booking date.
#take either same day booking, 15 days mode and 33 days median
full %>% group_by(advance_booking) %>% count(sort=TRUE)
#since 0 is 3rd highest, lets take same day booking
full <- full %>% mutate(booking_date=case_when(advance_booking<0~checkin_date,
TRUE~booking_date),
advance_booking=case_when(advance_booking<0~0,
TRUE~advance_booking))
full <- full %>% mutate(booking_month=month(booking_date),booking_day=day(booking_date),
checkin_month=month(checkin_date),checkin_day=day(checkin_date),
booking_d=weekdays(booking_date),checkin_d=weekdays(checkin_date),
checkout_d=weekdays(checkout_date),checkin_quarter=quarter(checkin_date),
checkin_weekend=is.weekend(checkin_date))
full %>% group_by(total_pax) %>% count(sort=TRUE)
full %>% filter(total_pax>11) %>% View()
full <- full %>% mutate(pax=case_when(total_pax>4~'others',
total_pax<2~'others',
total_pax==2~'couple',
TRUE~'family'))
full %>% group_by(pax) %>% count(sort=TRUE)
full %>% ggplot(aes(factor(roomnights),advance_booking))+geom_boxplot()
full %>% group_by(roomnights) %>% count(sort=TRUE)
full %>% filter(roomnights<1) %>% View()
#cleaning the room night stay
full <- full %>% mutate(roomnights=case_when(roomnights<1~as.integer(checkout_date-checkin_date),
TRUE~roomnights))
full <- full %>% mutate(room_stay=case_when(roomnights<=2~'VShort',
roomnights>2&roomnights<=4~'Normal',
roomnights>4&roomnights<8~'Week',
roomnights>7~'Long'))
full %>% group_by(room_stay) %>% count(sort=TRUE)
full %>% ggplot(aes(season_holidayed_code,fill=factor(checkin_quarter)))+geom_bar(stat='count')
top_season_quaterly <- full %>% filter(!is.na(season_holidayed_code)) %>% group_by(checkin_quarter,season_holidayed_code) %>%
tally() %>% top_n(1)
full <- full %>% mutate(season_holidayed_code=case_when(
is.na(season_holidayed_code)&checkin_quarter==2~as.integer(2),
is.na(season_holidayed_code)&checkin_quarter==3~as.integer(4),
is.na(season_holidayed_code)&checkin_quarter==4~as.integer(2),
TRUE~season_holidayed_code))
full <- full %>% mutate(adv_booking=case_when(advance_booking<=2~'Sudden',
advance_booking>2&advance_booking<=15~'Fortnite',
advance_booking>15&advance_booking<=31~'Amonth',
advance_booking>31~"Long Booking"))
full %>% filter(!is.na(amount_spent_per_room_night_scaled))%>%
group_by(adv_booking) %>% summarise(mean(amount_spent_per_room_night_scaled)) %>% View()
full %>% group_by(state_code_residence,) %>% count(sort=TRUE)
full %>% ggplot(aes(factor(state_code_residence),fill=factor(cluster_code)))+
geom_bar(stat='count')
full <- full %>% mutate(state_code_residence=replace_na(state_code_residence,8))
#now lets get the count
full <- full %>%
group_by(memberid) %>% mutate(max_total_pax=max(total_pax),min_total_pax=min(total_pax),
max_roomnight=max(roomnights),min_roomnight=min(roomnights),
total_visit=n(),max_adult=max(numberofadults),
min_adult=min(numberofadults),max_children=max(numberofchildren),
min_children=min(numberofchildren)) %>% ungroup()
full <- full %>% group_by(memberid,resort_region_code,resort_type_code) %>%
mutate(max_roomnight_rrc=max(roomnights),min_roomnight_rrc=min(roomnights),total_visit_rrc=n(),
min_adult_rrc=max(numberofadults),min_adult_rrc=min(numberofadults),
max_children_rrc=max(numberofchildren),min_children_rrc=min(numberofchildren)) %>% ungroup()
prac <- full %>% filter(is_train==1) %>% group_by(pax,room_stay,adv_booking,checkin_d) %>%
summarize(prac_spent=n(),
prac_min=min(amount_spent_per_room_night_scaled)/prac_spent,
prac_max=max(amount_spent_per_room_night_scaled)/prac_spent,
prac_avg=mean(amount_spent_per_room_night_scaled)/prac_spent) %>% ungroup()
cmpr <- full %>% filter(is_train==1) %>%
group_by(cluster_code,member_age_buckets,pax,room_type_booked_code) %>%
summarise(cmpr_spent=n(),
cmpr_min=min(amount_spent_per_room_night_scaled)/cmpr_spent,
cmpr_max=max(amount_spent_per_room_night_scaled)/cmpr_spent,
cmpr_avg=mean(amount_spent_per_room_night_scaled)/cmpr_spent) %>% ungroup()
sssr <- full %>% filter(is_train==1) %>%
group_by(season_holidayed_code,state_code_residence,state_code_resort,reservationstatusid_code) %>%
summarise(sssr_spent=n(),
sssr_min=min(amount_spent_per_room_night_scaled)/sssr_spent,
sssr_max=max(amount_spent_per_room_night_scaled)/sssr_spent,
sssr_avg=mean(amount_spent_per_room_night_scaled)/sssr_spent) %>% ungroup()
# #lets get mode details on memeber
# full %>% group_by(memberid) %>% count(sort=TRUE)
# full %>% group_by(memberid,persontravellingid) %>% count(sort=TRUE)
#
# full %>% ggplot(aes(factor(resort_region_code),amount_spent_per_room_night_scaled))+geom_boxplot()
#
# full %>% ggplot(aes(factor(main_product_code),amount_spent_per_room_night_scaled))+geom_boxplot()
#
# full %>% filter(total_pax>12) %>% group_by(memberid) %>% count(sort=TRUE)
# full %>% ggplot(aes(factor(main_product_code),total_pax))+geom_boxplot()
# full %>% ggplot(aes(factor(channel_code),total_pax))+geom_boxplot()
#
# full %>% filter(total_pax>15)%>% group_by(main_product_code,total_pax) %>% count()
#
#
# full %>% group_by(resort_id,checkin_month) %>% count(sort=TRUE)
# full %>% group_by(resort_id,season_holidayed_code) %>% count(sort=TRUE)
#
# train %>% ggplot(aes(resort_region_code))+geom_bar(stat='count')
# train %>% ggplot(aes(resort_type_code))+geom_bar(stat='count')
# train %>% ggplot(aes(season_holidayed_code))+geom_bar(stat='count')
#
# train %>% group_by(season_holidayed_code) %>% count(sort=TRUE)
#
# full %>% filter(is_train==1) %>% ggplot(aes(resort_region_code,fill=factor(cluster_code)))+
# geom_bar(stat="count")
#
# full %>% filter(is_train==1) %>% ggplot(aes(resort_type_code,fill=factor(cluster_code)))+
# geom_bar(stat="count")
# full %>% filter(is_train==1) %>% ggplot(aes(resort_type_code,fill=factor(resort_region_code)))+
# geom_bar(stat="count")
#
# full %>% filter(is_train==1) %>% ggplot(aes(cluster_code,fill=factor(state_code_resort)))+
# geom_bar(stat="count")
#
# train %>% group_by(resort_id) %>% count(sort = TRUE)
full <- full %>% left_join(cmpr)
full <- full %>% left_join(sssr) %>% left_join(.,prac)
full <- full %>% mutate(channel_code=factor(channel_code),
main_product_code = factor(main_product_code),
persontravellingid=factor(persontravellingid),
resort_region_code=factor(resort_region_code),
resort_type_code=factor(resort_type_code),
season_holidayed_code=factor(season_holidayed_code),
state_code_residence=factor(state_code_residence),
state_code_resort=factor(state_code_resort),
room_type_booked_code=factor(room_type_booked_code),
member_age_buckets=factor(member_age_buckets),
cluster_code=factor(cluster_code),memberid=factor(memberid),
reservationstatusid_code = factor(reservationstatusid_code))
colls <- c('booking_month','booking_day','checkin_month','checkin_day','booking_d','checkin_d',
'checkout_d','checkin_quarter','checkin_weekend','pax','room_stay','adv_booking')
full %<>% mutate_at(colls,funs(factor(.)))
X <- colnames(full)[c(5:16,18,19,21,22,26:53,55:57,59:61,63:65)]
Y <- colnames(train)[c(24)]
str(full[,X])
X1 <- X[c(13,14,15,16,18,20,22:53)]
X2 <- X[c(27:53)]
tr<- full %>% filter(is_train==1)
te <- full %>% filter(is_train==0)
h2o.init()
tr <- as.h2o(tr[c(X,Y)])
te <- as.h2o(te[c(X)])
almname <- paste('ak_h2o_automl',format(Sys.time(),"%d%H%M%S"),sep = '_')
autoML <- h2o.automl(X,Y,training_frame = tr,seed=2019,stopping_metric=c("RMSE"),max_models = 10)
autoML
glmML <- h2o.glm(X,Y,training_frame = tr,seed=2019,nfolds = 5,family = "gaussian",lambda_search = T,
standardize = F,remove_collinear_columns = T,alpha = 0)
glmML
save(glmML,file="glmmlv3.rda")
g2<-h2o.gbm(X,Y,training_frame = tr,ntrees = 400
,score_tree_interval = 50 #,nfolds = 4,stopping_rounds = 4,stopping_tolerance = 0
,learn_rate = 0.001,max_depth = 8,sample_rate = 0.6,col_sample_rate = 0.6
,model_id = "g2")
g2
save(g2, file="gbmv2.rda")
aml_test_pred <- h2o.predict(autoML,te)
prediction <- as.vector(aml_test_pred)
glm_pred <- h2o.predict(glmML,te)
glm_pred <- as.vector(glm_pred)
g2_pred <- h2o.predict(g2,te)
g2_pred <- as.vector(g2_pred)
sample$amount_spent_per_room_night_scaled <- g2_pred*0.55+prediction*0.4+glm_pred*0.05
filename <- paste('ak_ensemble_4',format(Sys.time(),"%Y%m%d%H%M%s"),sep = '_')
write.csv(sample,paste0(filename,'.csv',collapse = ''),row.names = FALSE)
| /ClubMahindra/FinalModel_AK.R | no_license | aka7h/Analytics-Hackathon | R | false | false | 11,034 | r | library(tidyr)
library(tidyverse)
library(caret)
library(h2o)
library(lubridate)
library(keras)
library(chron)
library(magrittr)
train <- read_csv('train.csv')
test <- read_csv('test.csv')
sample <- read_csv('sample_submission.csv')
summary(train)
train$is_train <- 1
test$is_train <- 0
test$amount_spent_per_room_night_scaled <- NA
full <- bind_rows(train,test)
sapply(train,function(x)sum(is.na(x)))
sapply(test,function(x)sum(is.na(x)))
full <- full %>% mutate(booking_date=as.Date(booking_date,"%d/%m/%y"),
checkin_date=as.Date(checkin_date,"%d/%m/%y"),
checkout_date=as.Date(checkout_date,"%d/%m/%y"),
advance_booking = as.numeric(checkin_date-booking_date),
resort_id=str_trunc(resort_id,10),
memberid=str_trunc(memberid,10))
#advance bookings
full %>% group_by(memberid) %>% mutate(d=n(),r=sum(amount_spent_per_room_night_scaled)) %>%
ungroup() %>% filter(advance_booking<0) %>% View()
#total of 14 members. here we cannot remove these users id because,
#there are 2 users in test with negative advance booking date.
#take either same day booking, 15 days mode and 33 days median
full %>% group_by(advance_booking) %>% count(sort=TRUE)
#since 0 is 3rd highest, lets take same day booking
full <- full %>% mutate(booking_date=case_when(advance_booking<0~checkin_date,
TRUE~booking_date),
advance_booking=case_when(advance_booking<0~0,
TRUE~advance_booking))
full <- full %>% mutate(booking_month=month(booking_date),booking_day=day(booking_date),
checkin_month=month(checkin_date),checkin_day=day(checkin_date),
booking_d=weekdays(booking_date),checkin_d=weekdays(checkin_date),
checkout_d=weekdays(checkout_date),checkin_quarter=quarter(checkin_date),
checkin_weekend=is.weekend(checkin_date))
full %>% group_by(total_pax) %>% count(sort=TRUE)
full %>% filter(total_pax>11) %>% View()
full <- full %>% mutate(pax=case_when(total_pax>4~'others',
total_pax<2~'others',
total_pax==2~'couple',
TRUE~'family'))
full %>% group_by(pax) %>% count(sort=TRUE)
full %>% ggplot(aes(factor(roomnights),advance_booking))+geom_boxplot()
full %>% group_by(roomnights) %>% count(sort=TRUE)
full %>% filter(roomnights<1) %>% View()
#cleaning the room night stay
full <- full %>% mutate(roomnights=case_when(roomnights<1~as.integer(checkout_date-checkin_date),
TRUE~roomnights))
full <- full %>% mutate(room_stay=case_when(roomnights<=2~'VShort',
roomnights>2&roomnights<=4~'Normal',
roomnights>4&roomnights<8~'Week',
roomnights>7~'Long'))
full %>% group_by(room_stay) %>% count(sort=TRUE)
full %>% ggplot(aes(season_holidayed_code,fill=factor(checkin_quarter)))+geom_bar(stat='count')
top_season_quaterly <- full %>% filter(!is.na(season_holidayed_code)) %>% group_by(checkin_quarter,season_holidayed_code) %>%
tally() %>% top_n(1)
full <- full %>% mutate(season_holidayed_code=case_when(
is.na(season_holidayed_code)&checkin_quarter==2~as.integer(2),
is.na(season_holidayed_code)&checkin_quarter==3~as.integer(4),
is.na(season_holidayed_code)&checkin_quarter==4~as.integer(2),
TRUE~season_holidayed_code))
full <- full %>% mutate(adv_booking=case_when(advance_booking<=2~'Sudden',
advance_booking>2&advance_booking<=15~'Fortnite',
advance_booking>15&advance_booking<=31~'Amonth',
advance_booking>31~"Long Booking"))
full %>% filter(!is.na(amount_spent_per_room_night_scaled))%>%
group_by(adv_booking) %>% summarise(mean(amount_spent_per_room_night_scaled)) %>% View()
full %>% group_by(state_code_residence,) %>% count(sort=TRUE)
full %>% ggplot(aes(factor(state_code_residence),fill=factor(cluster_code)))+
geom_bar(stat='count')
full <- full %>% mutate(state_code_residence=replace_na(state_code_residence,8))
#now lets get the count
full <- full %>%
group_by(memberid) %>% mutate(max_total_pax=max(total_pax),min_total_pax=min(total_pax),
max_roomnight=max(roomnights),min_roomnight=min(roomnights),
total_visit=n(),max_adult=max(numberofadults),
min_adult=min(numberofadults),max_children=max(numberofchildren),
min_children=min(numberofchildren)) %>% ungroup()
full <- full %>% group_by(memberid,resort_region_code,resort_type_code) %>%
mutate(max_roomnight_rrc=max(roomnights),min_roomnight_rrc=min(roomnights),total_visit_rrc=n(),
min_adult_rrc=max(numberofadults),min_adult_rrc=min(numberofadults),
max_children_rrc=max(numberofchildren),min_children_rrc=min(numberofchildren)) %>% ungroup()
prac <- full %>% filter(is_train==1) %>% group_by(pax,room_stay,adv_booking,checkin_d) %>%
summarize(prac_spent=n(),
prac_min=min(amount_spent_per_room_night_scaled)/prac_spent,
prac_max=max(amount_spent_per_room_night_scaled)/prac_spent,
prac_avg=mean(amount_spent_per_room_night_scaled)/prac_spent) %>% ungroup()
cmpr <- full %>% filter(is_train==1) %>%
group_by(cluster_code,member_age_buckets,pax,room_type_booked_code) %>%
summarise(cmpr_spent=n(),
cmpr_min=min(amount_spent_per_room_night_scaled)/cmpr_spent,
cmpr_max=max(amount_spent_per_room_night_scaled)/cmpr_spent,
cmpr_avg=mean(amount_spent_per_room_night_scaled)/cmpr_spent) %>% ungroup()
sssr <- full %>% filter(is_train==1) %>%
group_by(season_holidayed_code,state_code_residence,state_code_resort,reservationstatusid_code) %>%
summarise(sssr_spent=n(),
sssr_min=min(amount_spent_per_room_night_scaled)/sssr_spent,
sssr_max=max(amount_spent_per_room_night_scaled)/sssr_spent,
sssr_avg=mean(amount_spent_per_room_night_scaled)/sssr_spent) %>% ungroup()
# #lets get mode details on memeber
# full %>% group_by(memberid) %>% count(sort=TRUE)
# full %>% group_by(memberid,persontravellingid) %>% count(sort=TRUE)
#
# full %>% ggplot(aes(factor(resort_region_code),amount_spent_per_room_night_scaled))+geom_boxplot()
#
# full %>% ggplot(aes(factor(main_product_code),amount_spent_per_room_night_scaled))+geom_boxplot()
#
# full %>% filter(total_pax>12) %>% group_by(memberid) %>% count(sort=TRUE)
# full %>% ggplot(aes(factor(main_product_code),total_pax))+geom_boxplot()
# full %>% ggplot(aes(factor(channel_code),total_pax))+geom_boxplot()
#
# full %>% filter(total_pax>15)%>% group_by(main_product_code,total_pax) %>% count()
#
#
# full %>% group_by(resort_id,checkin_month) %>% count(sort=TRUE)
# full %>% group_by(resort_id,season_holidayed_code) %>% count(sort=TRUE)
#
# train %>% ggplot(aes(resort_region_code))+geom_bar(stat='count')
# train %>% ggplot(aes(resort_type_code))+geom_bar(stat='count')
# train %>% ggplot(aes(season_holidayed_code))+geom_bar(stat='count')
#
# train %>% group_by(season_holidayed_code) %>% count(sort=TRUE)
#
# full %>% filter(is_train==1) %>% ggplot(aes(resort_region_code,fill=factor(cluster_code)))+
# geom_bar(stat="count")
#
# full %>% filter(is_train==1) %>% ggplot(aes(resort_type_code,fill=factor(cluster_code)))+
# geom_bar(stat="count")
# full %>% filter(is_train==1) %>% ggplot(aes(resort_type_code,fill=factor(resort_region_code)))+
# geom_bar(stat="count")
#
# full %>% filter(is_train==1) %>% ggplot(aes(cluster_code,fill=factor(state_code_resort)))+
# geom_bar(stat="count")
#
# train %>% group_by(resort_id) %>% count(sort = TRUE)
full <- full %>% left_join(cmpr)
full <- full %>% left_join(sssr) %>% left_join(.,prac)
full <- full %>% mutate(channel_code=factor(channel_code),
main_product_code = factor(main_product_code),
persontravellingid=factor(persontravellingid),
resort_region_code=factor(resort_region_code),
resort_type_code=factor(resort_type_code),
season_holidayed_code=factor(season_holidayed_code),
state_code_residence=factor(state_code_residence),
state_code_resort=factor(state_code_resort),
room_type_booked_code=factor(room_type_booked_code),
member_age_buckets=factor(member_age_buckets),
cluster_code=factor(cluster_code),memberid=factor(memberid),
reservationstatusid_code = factor(reservationstatusid_code))
colls <- c('booking_month','booking_day','checkin_month','checkin_day','booking_d','checkin_d',
'checkout_d','checkin_quarter','checkin_weekend','pax','room_stay','adv_booking')
full %<>% mutate_at(colls,funs(factor(.)))
X <- colnames(full)[c(5:16,18,19,21,22,26:53,55:57,59:61,63:65)]
Y <- colnames(train)[c(24)]
str(full[,X])
X1 <- X[c(13,14,15,16,18,20,22:53)]
X2 <- X[c(27:53)]
tr<- full %>% filter(is_train==1)
te <- full %>% filter(is_train==0)
h2o.init()
tr <- as.h2o(tr[c(X,Y)])
te <- as.h2o(te[c(X)])
almname <- paste('ak_h2o_automl',format(Sys.time(),"%d%H%M%S"),sep = '_')
autoML <- h2o.automl(X,Y,training_frame = tr,seed=2019,stopping_metric=c("RMSE"),max_models = 10)
autoML
glmML <- h2o.glm(X,Y,training_frame = tr,seed=2019,nfolds = 5,family = "gaussian",lambda_search = T,
standardize = F,remove_collinear_columns = T,alpha = 0)
glmML
save(glmML,file="glmmlv3.rda")
g2<-h2o.gbm(X,Y,training_frame = tr,ntrees = 400
,score_tree_interval = 50 #,nfolds = 4,stopping_rounds = 4,stopping_tolerance = 0
,learn_rate = 0.001,max_depth = 8,sample_rate = 0.6,col_sample_rate = 0.6
,model_id = "g2")
g2
save(g2, file="gbmv2.rda")
aml_test_pred <- h2o.predict(autoML,te)
prediction <- as.vector(aml_test_pred)
glm_pred <- h2o.predict(glmML,te)
glm_pred <- as.vector(glm_pred)
g2_pred <- h2o.predict(g2,te)
g2_pred <- as.vector(g2_pred)
sample$amount_spent_per_room_night_scaled <- g2_pred*0.55+prediction*0.4+glm_pred*0.05
filename <- paste('ak_ensemble_4',format(Sys.time(),"%Y%m%d%H%M%s"),sep = '_')
write.csv(sample,paste0(filename,'.csv',collapse = ''),row.names = FALSE)
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{vennplot}
\alias{vennplot}
\title{vennplot}
\usage{
vennplot(Sets, by = "gplots")
}
\arguments{
\item{Sets}{a list of object, can be vector or GRanges object}
\item{by}{one of gplots or Vennerable}
}
\value{
venn plot that summarize the overlap of peaks
from different experiments or gene annotation from
different peak files.
}
\description{
plot the overlap of a list of object
}
\examples{
require(TxDb.Hsapiens.UCSC.hg19.knownGene)
txdb <- TxDb.Hsapiens.UCSC.hg19.knownGene
peakfile <- system.file("extdata", "sample_peaks.txt", package="ChIPseeker")
peakAnno <- annotatePeak(peakfile, TranscriptDb=txdb)
peakAnnoList <- lapply(1:3, function(i) peakAnno[sample(1:length(peakAnno), 100),])
names(peakAnnoList) <- paste("peak", 1:3, sep="_")
genes= lapply(peakAnnoList, function(i) unlist(i$geneId))
vennplot(genes)
}
\author{
G Yu
}
| /2X/2.14/ChIPseeker/man/vennplot.Rd | no_license | GuangchuangYu/bioc-release | R | false | false | 897 | rd | % Generated by roxygen2 (4.0.1): do not edit by hand
\name{vennplot}
\alias{vennplot}
\title{vennplot}
\usage{
vennplot(Sets, by = "gplots")
}
\arguments{
\item{Sets}{a list of object, can be vector or GRanges object}
\item{by}{one of gplots or Vennerable}
}
\value{
venn plot that summarize the overlap of peaks
from different experiments or gene annotation from
different peak files.
}
\description{
plot the overlap of a list of object
}
\examples{
require(TxDb.Hsapiens.UCSC.hg19.knownGene)
txdb <- TxDb.Hsapiens.UCSC.hg19.knownGene
peakfile <- system.file("extdata", "sample_peaks.txt", package="ChIPseeker")
peakAnno <- annotatePeak(peakfile, TranscriptDb=txdb)
peakAnnoList <- lapply(1:3, function(i) peakAnno[sample(1:length(peakAnno), 100),])
names(peakAnnoList) <- paste("peak", 1:3, sep="_")
genes= lapply(peakAnnoList, function(i) unlist(i$geneId))
vennplot(genes)
}
\author{
G Yu
}
|
a109e44b7976499aef07aced76364af4 dungeon_i10-m5-u10-v0.pddl_planlen=174.qdimacs 36021 97353 | /code/dcnf-ankit-optimized/Results/QBFLIB-2018/A1/Database/Kronegger-Pfandler-Pichler/dungeon/dungeon_i10-m5-u10-v0.pddl_planlen=174/dungeon_i10-m5-u10-v0.pddl_planlen=174.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 91 | r | a109e44b7976499aef07aced76364af4 dungeon_i10-m5-u10-v0.pddl_planlen=174.qdimacs 36021 97353 |
# Choose your own! Wines
# Script to accompany the final report on my Capstone project
# Author: Christian Mast
# Date: 23.6.2020
# Date Source:
# Original Owners:
# Forina, M. et al, PARVUS -
# An Extendible Package for Data Exploration, Classification and Correlation.
# Institute of Pharmaceutical and Food Analysis and Technologies, Via Brigata Salerno, 16147 Genoa, Italy
#
# Donor:
# Stefan Aeberhard, email: stefan '@' coral.cs.jcu.edu.au
# Installing packages if required
if(!require(tidyverse)) install.packages("tidyverse", repos = "http://cran.us.r-project.org")
if(!require(ggplot2)) install.packages("ggplot2", repos = "http://cran.us.r-project.org")
if(!require(caret)) install.packages("caret", repos = "http://cran.us.r-project.org")
if(!require(data.table)) install.packages("data.table", repos = "http://cran.us.r-project.org")
if(!require(corrplot)) install.packages("corrplot", repos = "http://cran.us.r-project.org")
if(!require(randomForest)) install.packages("randomForest", repos = "http://cran.us.r-project.org")
if(!require(rpart)) install.packages("rpart", repos = "http://cran.us.r-project.org")
if(!require(Rborist)) install.packages("Rborist", repos = "http://cran.us.r-project.org")
if(!require(kernlab)) install.packages("kernlab", repos = "http://cran.us.r-project.org")
# Getting the data
# Define the urls
wine_data_url <- "https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data"
wine_names_url <- "https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.names"
#Download and rename files
download.file(wine_data_url, ".\\wine-data.csv")
download.file(wine_names_url, ".\\wine-names.txt")
#Creating a data frame from wine-data.csv
wines <- read.csv(".\\wine-data.csv", header=FALSE, sep=",")
#Adding column names - for the ease of life, this is done manually
#"OD280/OD315 of diluted wines" was shortened to OD280.OD315
#"Nonflavanoid phenols" was shortened to Nonflav. phenols
#Slashes and spaces were removed to make rpart happy - thx to Stackoverflow for the tip!
colnames(wines) <- c("Winery", "Alcohol", "Malic_acid",
"Ash", "Alcalinity_of_ash", "Magnesium",
"Total_phenols", "Flavanoids",
"Nonflav._phenols", "Proanthocyanins",
"Color_intensity", "Hue",
"OD280.OD315", "Proline")
#Show the first 6 rows of the data frame
head(wines)
#Show Min, 1st Quartile, Median, Mean, 3rd quartile and max of wines
summary(wines)
# Create a grid of histogramms
gather(wines, variable, value) %>%
ggplot (aes(value)) +
geom_histogram(bins=18)+
facet_wrap(~variable, ncol=4, scales="free")+
ggtitle("Histograms of variables in 'wines' data frame")+
xlab("Values")+
ylab("Count")
#Removing winery, then calling corrplot()
wines %>% subset(select=-Winery) %>%
cor(.) %>%
corrplot(tl.col="black",
tl.cex=0.7,
number.cex=0.6,
title="Correlation Plot",
type="lower",
addCoef.col="black",mar=c(0,0,1,0))
#scaling wines, then rebuilding the winery column
scaled_wines <- as.data.frame(scale(wines))
scaled_wines$Winery <- as.factor(wines$Winery)
# Set a seed to get reproducible train & test sets
set.seed(101, sample.kind = "Rounding")
index <- createDataPartition(scaled_wines$Winery, p = 0.5, list = FALSE)
train_set <- scaled_wines[-index, ]
test_set <- scaled_wines[+index, ]
# Train a knn model
fit_knn <- train(Winery ~.,
method="knn",
tuneGrid = data.frame(k=seq(3,31,2)),
data = train_set)
# Output the fit results
fit_knn
# Plot the fit results to show the best k
plot(fit_knn)
# Create a prediction, calculate the confusion matrix and print the accuracy
prediction_knn <- predict(fit_knn, test_set)
cm_knn <- confusionMatrix(prediction_knn, test_set$Winery)
cat("Accuracy of the knn model:", cm_knn$overall["Accuracy"],"\n\n")
# Train a rf model
fit_rpart <- train(Winery ~.,
method="rpart",
tuneGrid = data.frame(cp = seq(0.0, 0.05, len = 16)),
data = train_set)
# Output the fit results
fit_rpart
# Plot the fit results to show the best k
plot(fit_rpart)
# Create a prediction, calculate the confusion matrix and print the accuracy
prediction_rpart <- predict(fit_rpart, test_set)
cm_rpart <- confusionMatrix(prediction_rpart, test_set$Winery)
cat("Accuracy of the rpart model:", cm_rpart$overall["Accuracy"],"\n\n")
#create a plot of the classification tree and add text labels
plot(fit_rpart$finalModel, margin = 0.1, main="Classification tree")
text(fit_rpart$finalModel, cex = 0.75, pos=3, offset=0)
#Create a randomForest fit and plot it to show the number of trees needed later on
fit_rf <- randomForest(Winery~., data = train_set)
plot(fit_rf, main="Error of randomForest vs number of trees")
# Train a rborist model
fit_rb <- train(Winery ~.,
method="Rborist",
tuneGrid = data.frame(predFixed=2, minNode=seq(3,9,1)),
nTree=100,
data = train_set)
# Output the fit results
fit_rb
# Plot the fit results to show the best k
plot(fit_rb)
# Create a prediction, calculate the confusion matrix and print the accuracy
prediction_rb <- predict(fit_rb, test_set)
cm_rb <- confusionMatrix(prediction_rb, test_set$Winery)
cat("Accuracy of the Rborist model:", cm_rb$overall["Accuracy"],"\n\n")
# Train a "Support Vector Machine with linear Kernel" model
fit_svml <- train(Winery ~.,
method="svmLinear",
tuneGrid = expand.grid(C = c(0.001, 0.01, 0.1, 1, 10)),
data = train_set)
# Output the fit results
fit_svml
# Create a prediction, calculate the confusion matrix and print the accuracy
prediction_svml <- predict(fit_svml, test_set)
cm_svml <- confusionMatrix(prediction_svml, test_set$Winery)
cat("Accuracy of the svmLinear model:", cm_svml$overall["Accuracy"],"\n\n")
#Convert predictions to numbers, calculate the average
prediction_ensemble <- as.factor(round((
as.numeric(prediction_knn)+
as.numeric(prediction_rb)+
as.numeric(prediction_svml))/3))
# Calculate the confusion matrix and print the accuracy
cm_ensemble <- confusionMatrix(prediction_ensemble, test_set$Winery)
cat("Accuracy of the Ensemble:", cm_ensemble$overall["Accuracy"],"\n\n")
| /CYO-wine-script.r | no_license | CokeSpirit/ChoseYourOwn | R | false | false | 6,413 | r | # Choose your own! Wines
# Script to accompany the final report on my Capstone project
# Author: Christian Mast
# Date: 23.6.2020
# Date Source:
# Original Owners:
# Forina, M. et al, PARVUS -
# An Extendible Package for Data Exploration, Classification and Correlation.
# Institute of Pharmaceutical and Food Analysis and Technologies, Via Brigata Salerno, 16147 Genoa, Italy
#
# Donor:
# Stefan Aeberhard, email: stefan '@' coral.cs.jcu.edu.au
# Installing packages if required
if(!require(tidyverse)) install.packages("tidyverse", repos = "http://cran.us.r-project.org")
if(!require(ggplot2)) install.packages("ggplot2", repos = "http://cran.us.r-project.org")
if(!require(caret)) install.packages("caret", repos = "http://cran.us.r-project.org")
if(!require(data.table)) install.packages("data.table", repos = "http://cran.us.r-project.org")
if(!require(corrplot)) install.packages("corrplot", repos = "http://cran.us.r-project.org")
if(!require(randomForest)) install.packages("randomForest", repos = "http://cran.us.r-project.org")
if(!require(rpart)) install.packages("rpart", repos = "http://cran.us.r-project.org")
if(!require(Rborist)) install.packages("Rborist", repos = "http://cran.us.r-project.org")
if(!require(kernlab)) install.packages("kernlab", repos = "http://cran.us.r-project.org")
# Getting the data
# Define the urls
wine_data_url <- "https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data"
wine_names_url <- "https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.names"
#Download and rename files
download.file(wine_data_url, ".\\wine-data.csv")
download.file(wine_names_url, ".\\wine-names.txt")
#Creating a data frame from wine-data.csv
wines <- read.csv(".\\wine-data.csv", header=FALSE, sep=",")
#Adding column names - for the ease of life, this is done manually
#"OD280/OD315 of diluted wines" was shortened to OD280.OD315
#"Nonflavanoid phenols" was shortened to Nonflav. phenols
#Slashes and spaces were removed to make rpart happy - thx to Stackoverflow for the tip!
colnames(wines) <- c("Winery", "Alcohol", "Malic_acid",
"Ash", "Alcalinity_of_ash", "Magnesium",
"Total_phenols", "Flavanoids",
"Nonflav._phenols", "Proanthocyanins",
"Color_intensity", "Hue",
"OD280.OD315", "Proline")
#Show the first 6 rows of the data frame
head(wines)
#Show Min, 1st Quartile, Median, Mean, 3rd quartile and max of wines
summary(wines)
# Create a grid of histogramms
gather(wines, variable, value) %>%
ggplot (aes(value)) +
geom_histogram(bins=18)+
facet_wrap(~variable, ncol=4, scales="free")+
ggtitle("Histograms of variables in 'wines' data frame")+
xlab("Values")+
ylab("Count")
#Removing winery, then calling corrplot()
wines %>% subset(select=-Winery) %>%
cor(.) %>%
corrplot(tl.col="black",
tl.cex=0.7,
number.cex=0.6,
title="Correlation Plot",
type="lower",
addCoef.col="black",mar=c(0,0,1,0))
#scaling wines, then rebuilding the winery column
scaled_wines <- as.data.frame(scale(wines))
scaled_wines$Winery <- as.factor(wines$Winery)
# Set a seed to get reproducible train & test sets
set.seed(101, sample.kind = "Rounding")
index <- createDataPartition(scaled_wines$Winery, p = 0.5, list = FALSE)
train_set <- scaled_wines[-index, ]
test_set <- scaled_wines[+index, ]
# Train a knn model
fit_knn <- train(Winery ~.,
method="knn",
tuneGrid = data.frame(k=seq(3,31,2)),
data = train_set)
# Output the fit results
fit_knn
# Plot the fit results to show the best k
plot(fit_knn)
# Create a prediction, calculate the confusion matrix and print the accuracy
prediction_knn <- predict(fit_knn, test_set)
cm_knn <- confusionMatrix(prediction_knn, test_set$Winery)
cat("Accuracy of the knn model:", cm_knn$overall["Accuracy"],"\n\n")
# Train a rf model
fit_rpart <- train(Winery ~.,
method="rpart",
tuneGrid = data.frame(cp = seq(0.0, 0.05, len = 16)),
data = train_set)
# Output the fit results
fit_rpart
# Plot the fit results to show the best k
plot(fit_rpart)
# Create a prediction, calculate the confusion matrix and print the accuracy
prediction_rpart <- predict(fit_rpart, test_set)
cm_rpart <- confusionMatrix(prediction_rpart, test_set$Winery)
cat("Accuracy of the rpart model:", cm_rpart$overall["Accuracy"],"\n\n")
#create a plot of the classification tree and add text labels
plot(fit_rpart$finalModel, margin = 0.1, main="Classification tree")
text(fit_rpart$finalModel, cex = 0.75, pos=3, offset=0)
#Create a randomForest fit and plot it to show the number of trees needed later on
fit_rf <- randomForest(Winery~., data = train_set)
plot(fit_rf, main="Error of randomForest vs number of trees")
# Train a rborist model
fit_rb <- train(Winery ~.,
method="Rborist",
tuneGrid = data.frame(predFixed=2, minNode=seq(3,9,1)),
nTree=100,
data = train_set)
# Output the fit results
fit_rb
# Plot the fit results to show the best k
plot(fit_rb)
# Create a prediction, calculate the confusion matrix and print the accuracy
prediction_rb <- predict(fit_rb, test_set)
cm_rb <- confusionMatrix(prediction_rb, test_set$Winery)
cat("Accuracy of the Rborist model:", cm_rb$overall["Accuracy"],"\n\n")
# Train a "Support Vector Machine with linear Kernel" model
fit_svml <- train(Winery ~.,
method="svmLinear",
tuneGrid = expand.grid(C = c(0.001, 0.01, 0.1, 1, 10)),
data = train_set)
# Output the fit results
fit_svml
# Create a prediction, calculate the confusion matrix and print the accuracy
prediction_svml <- predict(fit_svml, test_set)
cm_svml <- confusionMatrix(prediction_svml, test_set$Winery)
cat("Accuracy of the svmLinear model:", cm_svml$overall["Accuracy"],"\n\n")
#Convert predictions to numbers, calculate the average
prediction_ensemble <- as.factor(round((
as.numeric(prediction_knn)+
as.numeric(prediction_rb)+
as.numeric(prediction_svml))/3))
# Calculate the confusion matrix and print the accuracy
cm_ensemble <- confusionMatrix(prediction_ensemble, test_set$Winery)
cat("Accuracy of the Ensemble:", cm_ensemble$overall["Accuracy"],"\n\n")
|
glean.prelim <-
function(choice = 1)
{
### Purpose:- ALCM data collection
### ----------------------------------------------------------------------
### Modified from:-
### ----------------------------------------------------------------------
### Arguments:-
### ----------------------------------------------------------------------
### Author:- Patrick Connolly, Date:- 10 Apr 2013, 13:29
### ----------------------------------------------------------------------
### Revisions:-
prelim.df <- within(prelim.df, Efppm[is.na(Efppm)] <- 0)
prelim.df <- prelim.df[, -4]
alcm.df <- prelim.df[prelim.df$Species == "ALCM",]
alcm.df$Lifestage = "diap"
latania.df <- prelim.df[prelim.df$Species == "Latania scale",]
latania.df$Species <- "LS"
latania.df <- df.sort(latania.df, c("Efppm", "Lifestage"))
mb.df <- prelim.df[prelim.df$Species == "Mealybug",]
mb.df$Lifestage <- "all" ## add lifestages
mb.sum <- unique(mb.df[, 1:5])
mb.sum$Live <- aggregate(Live~ Efppm, data = mb.df, sum)$Live
mb.sum$Dead <- aggregate(Dead~ Efppm, data = mb.df, sum)$Dead
mb.sum$Total <-aggregate(Total~ Efppm, data = mb.df, sum)$Total
## names(mb.sum) <- names(mb.df)
thrip.df <- prelim.df[prelim.df$Species == "Onion thrips",]
thrip.df <- df.sort(thrip.df, c("Efppm", "Lifestage"))
sjs.df <- prelim.df[prelim.df$Species == "San Jose",]
sjs.df$Lifestage <- "all"
sjs.sum <- unique(sjs.df[, 1:5])
sjs.sum$Live <- aggregate(Live~ Efppm, data = sjs.df, sum)$Live
sjs.sum$Dead <- aggregate(Dead~ Efppm, data = sjs.df, sum)$Dead
sjs.sum$Total <- aggregate(Total~ Efppm, data = sjs.df, sum)$Total
## names(sjs.sum) <- names(sjs.df)
## add into one dataframe
use.df <- rbind(alcm.df, latania.df, mb.sum, thrip.df, sjs.sum)
## attach(alcm.df)
## on.exit(detach("alcm.df"))
idset <- with(use.df, make.id(Efppm))
cutx <- NULL
leg.brief <- with(use.df, unique(paste(Species, Lifestage)))
maint <- "Mortality of various in ethyl formate"
xlabels <- c(0, 0)
xaxtitle <- "Dose (ppm)"
with(use.df,
list(id = idset, times = Efppm, total = unlist(Dead) + unlist(Live),
dead = Dead,
cutx = cutx, offset = 0, xaxtitle = xaxtitle, maint = maint,
legend = leg.brief, xlabels = xlabels, takelog = FALSE))
}
| /.tmp/hrapgc.glean.prelim.R | no_license | Tuxkid/PNZ_EF | R | false | false | 2,293 | r | glean.prelim <-
function(choice = 1)
{
### Purpose:- ALCM data collection
### ----------------------------------------------------------------------
### Modified from:-
### ----------------------------------------------------------------------
### Arguments:-
### ----------------------------------------------------------------------
### Author:- Patrick Connolly, Date:- 10 Apr 2013, 13:29
### ----------------------------------------------------------------------
### Revisions:-
prelim.df <- within(prelim.df, Efppm[is.na(Efppm)] <- 0)
prelim.df <- prelim.df[, -4]
alcm.df <- prelim.df[prelim.df$Species == "ALCM",]
alcm.df$Lifestage = "diap"
latania.df <- prelim.df[prelim.df$Species == "Latania scale",]
latania.df$Species <- "LS"
latania.df <- df.sort(latania.df, c("Efppm", "Lifestage"))
mb.df <- prelim.df[prelim.df$Species == "Mealybug",]
mb.df$Lifestage <- "all" ## add lifestages
mb.sum <- unique(mb.df[, 1:5])
mb.sum$Live <- aggregate(Live~ Efppm, data = mb.df, sum)$Live
mb.sum$Dead <- aggregate(Dead~ Efppm, data = mb.df, sum)$Dead
mb.sum$Total <-aggregate(Total~ Efppm, data = mb.df, sum)$Total
## names(mb.sum) <- names(mb.df)
thrip.df <- prelim.df[prelim.df$Species == "Onion thrips",]
thrip.df <- df.sort(thrip.df, c("Efppm", "Lifestage"))
sjs.df <- prelim.df[prelim.df$Species == "San Jose",]
sjs.df$Lifestage <- "all"
sjs.sum <- unique(sjs.df[, 1:5])
sjs.sum$Live <- aggregate(Live~ Efppm, data = sjs.df, sum)$Live
sjs.sum$Dead <- aggregate(Dead~ Efppm, data = sjs.df, sum)$Dead
sjs.sum$Total <- aggregate(Total~ Efppm, data = sjs.df, sum)$Total
## names(sjs.sum) <- names(sjs.df)
## add into one dataframe
use.df <- rbind(alcm.df, latania.df, mb.sum, thrip.df, sjs.sum)
## attach(alcm.df)
## on.exit(detach("alcm.df"))
idset <- with(use.df, make.id(Efppm))
cutx <- NULL
leg.brief <- with(use.df, unique(paste(Species, Lifestage)))
maint <- "Mortality of various in ethyl formate"
xlabels <- c(0, 0)
xaxtitle <- "Dose (ppm)"
with(use.df,
list(id = idset, times = Efppm, total = unlist(Dead) + unlist(Live),
dead = Dead,
cutx = cutx, offset = 0, xaxtitle = xaxtitle, maint = maint,
legend = leg.brief, xlabels = xlabels, takelog = FALSE))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_admissions.R
\name{get_admissions}
\alias{get_admissions}
\title{Get Hospital Admissions}
\usage{
get_admissions(
keep_vars = "new_adm",
level = "trust",
release_date = Sys.Date(),
mapping,
geo_names
)
}
\arguments{
\item{keep_vars}{Character string, defaulting to "new_adm" (first-time COVID-19 hospital admissions). Defines which variables to keep from the raw data. Other supported options are: "all_adm" (for all COVID-19 hospital admissions), and "all_bed" (for all COVID-19 beds occupied). Multiple values allowed.}
\item{level}{Character string, defaulting to "trust". Defines the level of aggregation
at which to return the data. Other supported options are "utla" for UTLA level admissions
or "ltla" for LTLA level admissions.}
\item{release_date}{Date, release date of data to download. Will automatically find
the Thursday prior to the date specified.}
\item{mapping}{A data.frame containing geo_code, trust_code, p_geo and p_trust.}
\item{geo_names}{A data.frame containing \code{geo_code} and \code{geo_name}. Used to
assign meaningful to geographies.}
}
\value{
A data.frame of daily admissions and/or bed occupancy data, reported at the Trust, LTLA or UTLA level. Note that new admissions ("new_adm") are called "admissions" in the data.frame to be consistent with a previous version of this function.
}
\description{
Downloads hospital admissions by Hospital trust using
\code{download_trust_data} and then optionally aggregates to either LTLA or UTLA
level. This can be done either with the built in mapping or a user supplied mapping.
}
| /man/get_admissions.Rd | permissive | epiforecasts/covid19.nhs.data | R | false | true | 1,651 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_admissions.R
\name{get_admissions}
\alias{get_admissions}
\title{Get Hospital Admissions}
\usage{
get_admissions(
keep_vars = "new_adm",
level = "trust",
release_date = Sys.Date(),
mapping,
geo_names
)
}
\arguments{
\item{keep_vars}{Character string, defaulting to "new_adm" (first-time COVID-19 hospital admissions). Defines which variables to keep from the raw data. Other supported options are: "all_adm" (for all COVID-19 hospital admissions), and "all_bed" (for all COVID-19 beds occupied). Multiple values allowed.}
\item{level}{Character string, defaulting to "trust". Defines the level of aggregation
at which to return the data. Other supported options are "utla" for UTLA level admissions
or "ltla" for LTLA level admissions.}
\item{release_date}{Date, release date of data to download. Will automatically find
the Thursday prior to the date specified.}
\item{mapping}{A data.frame containing geo_code, trust_code, p_geo and p_trust.}
\item{geo_names}{A data.frame containing \code{geo_code} and \code{geo_name}. Used to
assign meaningful to geographies.}
}
\value{
A data.frame of daily admissions and/or bed occupancy data, reported at the Trust, LTLA or UTLA level. Note that new admissions ("new_adm") are called "admissions" in the data.frame to be consistent with a previous version of this function.
}
\description{
Downloads hospital admissions by Hospital trust using
\code{download_trust_data} and then optionally aggregates to either LTLA or UTLA
level. This can be done either with the built in mapping or a user supplied mapping.
}
|
#' Run Seurat Integration
#'
#' run batch correction, followed by:
#' 1) stashing of batches in metadata 'batch'
#' 2) clustering with resolution 0.2 to 2.0 in increments of 0.2
#' 3) saving to <proj_dir>/output/sce/<feature>_seu_<suffix>.rds
#'
#' @param suffix a suffix to be appended to a file save in output dir
#' @param seu_list
#' @param resolution
#' @param feature
#' @param algorithm
#' @param organism
#' @param ...
#'
#' @return
#' @export
#'
#'
#' @examples
seurat_integration_pipeline <- function(seu_list, feature, resolution, suffix = '', algorithm = 1, organism, annotate_cell_cycle = FALSE, annotate_percent_mito = FALSE, ...) {
integrated_seu <- seurat_integrate(seu_list, ...)
# cluster merged seurat objects
integrated_seu <- seurat_cluster(integrated_seu, resolution = resolution, algorithm = algorithm, ...)
integrated_seu <- find_all_markers(integrated_seu)
integrated_seu <- getEnrichedPathways(integrated_seu)
# add read count column
integrated_seu <- add_read_count_col(integrated_seu)
# annotate cell cycle scoring to seurat objects
if (annotate_cell_cycle){
integrated_seu <- annotate_cell_cycle(integrated_seu, feature, ...)
}
# annotate mitochondrial percentage in seurat metadata
if (annotate_percent_mito){
integrated_seu <- add_percent_mito(integrated_seu, feature, ...)
}
#annotate excluded cells
# integrated_seu <- annotate_excluded(integrated_seu, excluded_cells)
return(integrated_seu)
}
#' Run Seurat Pipeline
#'
#' Preprocess, Cluster and Reduce Dimensions for a single seurat object
#'
#' @param seu
#' @param resolution
#'
#' @return
#' @export
#'
#' @examples
seurat_pipeline <- function(seu, feature = "gene", resolution=0.6, reduction = "pca", annotate_cell_cycle = TRUE, annotate_percent_mito = TRUE, ...){
seu <- seurat_preprocess(seu, scale = T, ...)
# PCA
seu <- seurat_reduce_dimensions(seu, check_duplicates = FALSE, reduction = reduction, ...)
seu <- seurat_cluster(seu = seu, resolution = resolution, reduction = reduction, ...)
seu <- find_all_markers(seu, resolution = resolution, reduction = reduction)
seu <- getEnrichedPathways(seu)
# annotate low read count category in seurat metadata
seu <- seuratTools::add_read_count_col(seu)
# annotate cell cycle scoring to seurat objects
if (annotate_cell_cycle){
seu <- annotate_cell_cycle(seu, feature, ...)
}
# annotate mitochondrial percentage in seurat metadata
if (annotate_percent_mito){
seu <- add_percent_mito(seu, feature, ...)
}
return(seu)
}
| /R/pipeline.R | permissive | mitsingh/seuratTools | R | false | false | 2,552 | r | #' Run Seurat Integration
#'
#' run batch correction, followed by:
#' 1) stashing of batches in metadata 'batch'
#' 2) clustering with resolution 0.2 to 2.0 in increments of 0.2
#' 3) saving to <proj_dir>/output/sce/<feature>_seu_<suffix>.rds
#'
#' @param suffix a suffix to be appended to a file save in output dir
#' @param seu_list
#' @param resolution
#' @param feature
#' @param algorithm
#' @param organism
#' @param ...
#'
#' @return
#' @export
#'
#'
#' @examples
seurat_integration_pipeline <- function(seu_list, feature, resolution, suffix = '', algorithm = 1, organism, annotate_cell_cycle = FALSE, annotate_percent_mito = FALSE, ...) {
integrated_seu <- seurat_integrate(seu_list, ...)
# cluster merged seurat objects
integrated_seu <- seurat_cluster(integrated_seu, resolution = resolution, algorithm = algorithm, ...)
integrated_seu <- find_all_markers(integrated_seu)
integrated_seu <- getEnrichedPathways(integrated_seu)
# add read count column
integrated_seu <- add_read_count_col(integrated_seu)
# annotate cell cycle scoring to seurat objects
if (annotate_cell_cycle){
integrated_seu <- annotate_cell_cycle(integrated_seu, feature, ...)
}
# annotate mitochondrial percentage in seurat metadata
if (annotate_percent_mito){
integrated_seu <- add_percent_mito(integrated_seu, feature, ...)
}
#annotate excluded cells
# integrated_seu <- annotate_excluded(integrated_seu, excluded_cells)
return(integrated_seu)
}
#' Run Seurat Pipeline
#'
#' Preprocess, Cluster and Reduce Dimensions for a single seurat object
#'
#' @param seu
#' @param resolution
#'
#' @return
#' @export
#'
#' @examples
seurat_pipeline <- function(seu, feature = "gene", resolution=0.6, reduction = "pca", annotate_cell_cycle = TRUE, annotate_percent_mito = TRUE, ...){
seu <- seurat_preprocess(seu, scale = T, ...)
# PCA
seu <- seurat_reduce_dimensions(seu, check_duplicates = FALSE, reduction = reduction, ...)
seu <- seurat_cluster(seu = seu, resolution = resolution, reduction = reduction, ...)
seu <- find_all_markers(seu, resolution = resolution, reduction = reduction)
seu <- getEnrichedPathways(seu)
# annotate low read count category in seurat metadata
seu <- seuratTools::add_read_count_col(seu)
# annotate cell cycle scoring to seurat objects
if (annotate_cell_cycle){
seu <- annotate_cell_cycle(seu, feature, ...)
}
# annotate mitochondrial percentage in seurat metadata
if (annotate_percent_mito){
seu <- add_percent_mito(seu, feature, ...)
}
return(seu)
}
|
Kruskal-Wallis rank sum test
data: ARRAY and categs
Kruskal-Wallis chi-squared = 59.482, df = 3, p-value = 7.585e-13
AMPDEA BCEIBEA CVEA3
BCEIBEA 2.1e-07 - -
CVEA3 0.99 4.8e-08 -
HHCORandomLPNORM 1.9e-06 0.98 4.6e-07
| /MaFMethodology/R/cec/IGD/10/kruskaloutput.R | no_license | fritsche/hhcopreliminaryresults | R | false | false | 286 | r |
Kruskal-Wallis rank sum test
data: ARRAY and categs
Kruskal-Wallis chi-squared = 59.482, df = 3, p-value = 7.585e-13
AMPDEA BCEIBEA CVEA3
BCEIBEA 2.1e-07 - -
CVEA3 0.99 4.8e-08 -
HHCORandomLPNORM 1.9e-06 0.98 4.6e-07
|
#import libraries
library(dplyr)
library(imputeTS)
library(tidyverse)
library(ggplot2)
library(reshape)
library(GGally)
library(forecast)
#import the dataset to the workspace
dataset.df <- read.csv("googleplaystore.csv")
#Clean data, convert from factor to num type
data.clean <- dataset.df %>%
mutate(
# Eliminate some characters to transform Installs to numeric
Installs = gsub("\\+", "", as.character(Installs)),
Installs = as.numeric(gsub(",", "", Installs)),
# Eliminate M to transform Size to numeric
Size = gsub("M", "", Size),
# Replace cells with k to 0 since it is < 1MB
Size = ifelse(grepl("k", Size), 0, as.numeric(Size)),
# Transform reviews to numeric
Reviews = as.numeric(Reviews),
# Remove currency symbol from Price, change it to numeric
Price = as.numeric(gsub("\\$", "", as.character(Price))),
# Replace "Varies with device" to NA since it is unknown
Min.Android.Ver = gsub("Varies with device", NA, Android.Ver),
# Keep only version number to 1 decimal
Min.Android.Ver = as.numeric(substr(Min.Android.Ver, start = 1, stop = 3)),
# Drop old Android version column
Android.Ver = NULL
)
#check if their are any duplicate records.
nrow(data.clean %>% distinct())
#Omit duplicate records
data.clean <- data.clean %>% distinct()
#Replace NA or missing values with mean
data.clean$Rating <- na.mean(data.clean$Rating)
data.clean$Size <- na.mean(data.clean$Size)
#check the missing values
sum(is.na(data.clean$Reviews))
#Descriptive statistics
summary(data.clean)
#BoxPlot
ggplot(data.clean,aes(x=Content.Rating, y=log10(Installs))) + scale_y_continuous("Installs, log10-scaling") +
geom_boxplot(outlier.colour = "red")+
geom_point()
ggplot(data.clean,aes(x=Type, y=log10(Installs))) + scale_y_continuous("Type, log10-scaling") +
geom_boxplot(outlier.colour = "red")+
geom_point()
#copy dataframe and standardize on that dataframe
data.final <- data.clean
#create dummy variable for Type i.e. free or paid
dummy_type <- as.data.frame(model.matrix(~ 0 + Type, data=data.final))
dummy_Content.Rating <- as.data.frame(model.matrix(~ 0 + Content.Rating, data=data.final))
dummy_Category <- as.data.frame(model.matrix(~ 0 + Category, data=data.final))
data.final <- cbind(data.final[,-7], dummy_type[,])
data.final <- cbind(data.final[,-8], dummy_Content.Rating[,])
data.final <- cbind(data.final[,-2], dummy_Category[,])
data.final <- data.final[,-12]#drop one dummy type
data.final <- data.final[,-12]#drop one dummy content rating
data.final <- data.final[,-34]#drop one dummy category
#standardize data
options(scipen=999, digits = 5)
#data.final[,c(3,4,5,6,7,12,13,14,15,16,17)] <- scale(data.final[,c(3,4,5,6,7,12,13,14,15,16,17)])
#choose data for Linear modelling
data.final.new <- data.final[-c(1,8,9,10)]
data.final.new <- data.final.new[-c(6)]
#Heat map between various predictors and target variable
cor.mat <- round(cor(data.final.new),2)
heatmap(as.matrix(cor.mat),Colv = NA,Rowv = NA)
melted.cor.mat <- melt(cor.mat)
ggplot(melted.cor.mat, aes(x = X1, y = X2, fill = value)) +
geom_tile() +
geom_text(aes(x = X1, y = X2, label = value))
# select variables for regression
selected.var <- data.final.new
# partition data
set.seed(3) # set seed for reproducing the partition
numberOfRows <- nrow(selected.var)
train.index <- sample(numberOfRows, numberOfRows*0.6)
train.df <- selected.var[train.index, ]
valid.df <- selected.var[-train.index, ]
# use lm() to run a linear regression of Ratings on all predictors in the
# use . after ~ to include all the remaining columns in train.df as predictors.
data.final.lm <- lm(Rating ~ ., data = train.df)
# use options() to ensure numbers are not displayed in scientific notation.
options(scipen = 999)
summary(data.final.lm)
# use predict() to make predictions on a new set.
data.final.lm.pred <- predict(data.final.lm, valid.df)
options(scipen=999, digits = 5)
some.residuals <- valid.df$Rating[1:20] - data.final.lm.pred[1:20]
data.frame("Predicted" = data.final.lm.pred[1:20], "Actual" = valid.df$Rating[1:20],
"Residual" = some.residuals)
#accurcy of the model with all the predictors
accuracy(data.final.lm.pred, valid.df$Rating)
plot(data.final.lm)
plot(data.final.lm.pred, valid.df$Rating,xlab = "Actual", ylab = "Predcited")
# use step() to run stepwise regression.
# model - 1 Using backward regression
data.final.lm.backward <- step(data.final.lm, direction = "backward")
summary(data.final.lm.backward) # Which variables were dropped?
data.final.lm.backward.pred <- predict(data.final.lm.backward, valid.df)
accuracy(data.final.lm.backward.pred, valid.df$Rating)
# model 1 - using forward regression
# create model with no predictors
data.final.lm.null <- lm(Rating~1, data = train.df)
# use step() to run forward regression.
data.final.lm.forward <- step(data.final.lm.null, scope=list(lower=data.final.lm.null, upper=data.final.lm), direction = "forward")
summary(data.final.lm.forward) # Which variables were added?
data.final.lm.forward.pred <- predict(data.final.lm.forward, valid.df)
accuracy(data.final.lm.forward.pred, valid.df$Rating)
# model 1 - using stepwise
# use step() to run stepwise regression.
data.final.lm.stepwise <- step(data.final.lm, direction = "both")
summary(data.final.lm.stepwise) # Which variables were dropped/added?
data.final.lm.stepwise.pred <- predict(data.final.lm.stepwise, valid.df)
accuracy(data.final.lm.step.pred, valid.df$Rating)
# coefficient
round(coefficients(data.final.lm.stepwise),5)
#Second model Reviews+Size+CategoryGame
# use lm() to run a linear regression of Ratings on all predictors in the
# use . after ~ to include all the remaining columns in train.df as predictors.
data.final.lm2 <- lm(Rating ~ Reviews+Size+CategoryFAMILY+Price+Content.RatingTeen+TypeFree, data = train.df)
# use options() to ensure numbers are not displayed in scientific notation.
options(scipen = 999)
summary(data.final.lm2)
# use predict() to make predictions on a new set.
data.final.lm.pred2 <- predict(data.final.lm2, valid.df)
options(scipen=999, digits = 5)
some.residuals <- valid.df$Rating[1:20] - data.final.lm.pred2[1:20]
data.frame("Predicted" = data.final.lm.pred2[1:20], "Actual" = valid.df$Rating[1:20],
"Residual" = some.residuals)
#accurcy of the model with all the predictors
accuracy(data.final.lm.pred2, valid.df$Rating)
plot(data.final.lm2)
plot(data.final.lm.pred2, valid.df$Rating,xlab = "Actual", ylab = "Predcited")
# use step() to run stepwise regression.
# model - 2 Using backward regression
data.final.lm.backward2 <- step(data.final.lm2, direction = "backward")
summary(data.final.lm.backward2) # Which variables were dropped?
data.final.lm.backward2.pred <- predict(data.final.lm.backward2, valid.df)
accuracy(data.final.lm.backward2.pred, valid.df$Rating)
# model 2 - using stepwise
# use step() to run stepwise regression.
data.final.lm.stepwise2 <- step(data.final.lm2, direction = "both")
summary(data.final.lm.stepwise2) # Which variables were dropped/added?
data.final.lm.stepwise2.pred <- predict(data.final.lm.stepwise2, valid.df)
accuracy(data.final.lm.stepwise2.pred, valid.df$Rating)
#Third model Reviews+Size+CategoryGame+CategorySPORTS+Price+CategoryTravel_and_local
# use lm() to run a linear regression of Ratings on all predictors in the
# use . after ~ to include all the remaining columns in train.df as predictors.
data.final.lm3 <- lm(Rating ~ Reviews+Size+CategoryGAME+Price+Content.RatingTeen+TypeFree, data = train.df)
# use options() to ensure numbers are not displayed in scientific notation.
options(scipen = 999)
summary(data.final.lm3)
# use predict() to make predictions on a new set.
data.final.lm.pred3 <- predict(data.final.lm3, valid.df)
options(scipen=999, digits = 5)
some.residuals <- valid.df$Rating[1:20] - data.final.lm.pred[1:20]
data.frame("Predicted" = data.final.lm.pred3[1:20], "Actual" = valid.df$Rating[1:20],
"Residual" = some.residuals)
#accurcy of the model with all the predictors
accuracy(data.final.lm.pred3, valid.df$Rating)
coefficients(data.final.lm3)
plot(data.final.lm3)
#plot(data.final.lm.pred2, valid.df$Rating, xlab = "Actual", ylab = "Predcited")
# use step() to run stepwise regression.
# model - 3 Using backward regression
data.final.lm.backward3 <- step(data.final.lm3, direction = "backward")
summary(data.final.lm.backward3) # Which variables were dropped?
data.final.lm.backward3.pred <- predict(data.final.lm.backward3, valid.df)
accuracy(data.final.lm.backward3.pred, valid.df$Rating)
# model 3 - using stepwise
# use step() to run stepwise regression.
data.final.lm.stepwise3 <- step(data.final.lm3, direction = "both")
summary(data.final.lm.stepwise3) # Which variables were dropped/added?
data.final.lm.stepwise3.pred <- predict(data.final.lm.stepwise3, valid.df)
accuracy(data.final.lm.stepwise3.pred, valid.df$Rating)
#Fourth Model
#Third model Reviews+Size+CategoryGame+CategorySPORTS+Price+CategoryTravel_and_local
# use lm() to run a linear regression of Ratings on all predictors in the
# use . after ~ to include all the remaining columns in train.df as predictors.
data.final.lm4 <- lm(Rating ~ Reviews+Size+CategoryGAME+Price+Content.RatingEveryone+TypeFree, data = train.df)
#CategoryFAMILY+CategoryART_AND_DESIGN
# use options() to ensure numbers are not displayed in scientific notation.
options(scipen = 999)
summary(data.final.lm4)
# use predict() to make predictions on a new set.
data.final.lm.pred4 <- predict(data.final.lm4, valid.df)
options(scipen=999, digits = 5)
some.residuals <- valid.df$Rating[1:20] - data.final.lm.pred[1:20]
data.frame("Predicted" = data.final.lm.pred4[1:20], "Actual" = valid.df$Rating[1:20],
"Residual" = some.residuals)
#accurcay of the model with all the predictors
accuracy(data.final.lm.pred4, valid.df$Rating)
plot(data.final.lm4)
plot(data.final.lm.pred4, valid.df$Rating, xlab = "Actual", ylab = "Predcited")
# use step() to run stepwise regression.
# model - 4 Using backward regression
data.final.lm.backward4 <- step(data.final.lm4, direction = "backward")
summary(data.final.lm.backward4) # Which variables were dropped?
data.final.lm.backward4.pred <- predict(data.final.lm.backward4, valid.df)
accuracy(data.final.lm.backward4.pred, valid.df$Rating)
# model 4 - using stepwise
# use step() to run stepwise regression.
data.final.lm.stepwise4 <- step(data.final.lm4, direction = "both")
summary(data.final.lm.stepwise4) # Which variables were dropped/added?
data.final.lm.stepwise4.pred <- predict(data.final.lm.stepwise4, valid.df)
accuracy(data.final.lm.stepwise4.pred, valid.df$Rating)
| /ProjectFinal.R | no_license | neelimayeddula/Googlereviewanalysis | R | false | false | 10,958 | r | #import libraries
library(dplyr)
library(imputeTS)
library(tidyverse)
library(ggplot2)
library(reshape)
library(GGally)
library(forecast)
#import the dataset to the workspace
dataset.df <- read.csv("googleplaystore.csv")
#Clean data, convert from factor to num type
data.clean <- dataset.df %>%
mutate(
# Eliminate some characters to transform Installs to numeric
Installs = gsub("\\+", "", as.character(Installs)),
Installs = as.numeric(gsub(",", "", Installs)),
# Eliminate M to transform Size to numeric
Size = gsub("M", "", Size),
# Replace cells with k to 0 since it is < 1MB
Size = ifelse(grepl("k", Size), 0, as.numeric(Size)),
# Transform reviews to numeric
Reviews = as.numeric(Reviews),
# Remove currency symbol from Price, change it to numeric
Price = as.numeric(gsub("\\$", "", as.character(Price))),
# Replace "Varies with device" to NA since it is unknown
Min.Android.Ver = gsub("Varies with device", NA, Android.Ver),
# Keep only version number to 1 decimal
Min.Android.Ver = as.numeric(substr(Min.Android.Ver, start = 1, stop = 3)),
# Drop old Android version column
Android.Ver = NULL
)
#check if their are any duplicate records.
nrow(data.clean %>% distinct())
#Omit duplicate records
data.clean <- data.clean %>% distinct()
#Replace NA or missing values with mean
data.clean$Rating <- na.mean(data.clean$Rating)
data.clean$Size <- na.mean(data.clean$Size)
#check the missing values
sum(is.na(data.clean$Reviews))
#Descriptive statistics
summary(data.clean)
#BoxPlot
ggplot(data.clean,aes(x=Content.Rating, y=log10(Installs))) + scale_y_continuous("Installs, log10-scaling") +
geom_boxplot(outlier.colour = "red")+
geom_point()
ggplot(data.clean,aes(x=Type, y=log10(Installs))) + scale_y_continuous("Type, log10-scaling") +
geom_boxplot(outlier.colour = "red")+
geom_point()
#copy dataframe and standardize on that dataframe
data.final <- data.clean
#create dummy variable for Type i.e. free or paid
dummy_type <- as.data.frame(model.matrix(~ 0 + Type, data=data.final))
dummy_Content.Rating <- as.data.frame(model.matrix(~ 0 + Content.Rating, data=data.final))
dummy_Category <- as.data.frame(model.matrix(~ 0 + Category, data=data.final))
data.final <- cbind(data.final[,-7], dummy_type[,])
data.final <- cbind(data.final[,-8], dummy_Content.Rating[,])
data.final <- cbind(data.final[,-2], dummy_Category[,])
data.final <- data.final[,-12]#drop one dummy type
data.final <- data.final[,-12]#drop one dummy content rating
data.final <- data.final[,-34]#drop one dummy category
#standardize data
options(scipen=999, digits = 5)
#data.final[,c(3,4,5,6,7,12,13,14,15,16,17)] <- scale(data.final[,c(3,4,5,6,7,12,13,14,15,16,17)])
#choose data for Linear modelling
data.final.new <- data.final[-c(1,8,9,10)]
data.final.new <- data.final.new[-c(6)]
#Heat map between various predictors and target variable
cor.mat <- round(cor(data.final.new),2)
heatmap(as.matrix(cor.mat),Colv = NA,Rowv = NA)
melted.cor.mat <- melt(cor.mat)
ggplot(melted.cor.mat, aes(x = X1, y = X2, fill = value)) +
geom_tile() +
geom_text(aes(x = X1, y = X2, label = value))
# select variables for regression
selected.var <- data.final.new
# partition data
set.seed(3) # set seed for reproducing the partition
numberOfRows <- nrow(selected.var)
train.index <- sample(numberOfRows, numberOfRows*0.6)
train.df <- selected.var[train.index, ]
valid.df <- selected.var[-train.index, ]
# use lm() to run a linear regression of Ratings on all predictors in the
# use . after ~ to include all the remaining columns in train.df as predictors.
data.final.lm <- lm(Rating ~ ., data = train.df)
# use options() to ensure numbers are not displayed in scientific notation.
options(scipen = 999)
summary(data.final.lm)
# use predict() to make predictions on a new set.
data.final.lm.pred <- predict(data.final.lm, valid.df)
options(scipen=999, digits = 5)
some.residuals <- valid.df$Rating[1:20] - data.final.lm.pred[1:20]
data.frame("Predicted" = data.final.lm.pred[1:20], "Actual" = valid.df$Rating[1:20],
"Residual" = some.residuals)
#accurcy of the model with all the predictors
accuracy(data.final.lm.pred, valid.df$Rating)
plot(data.final.lm)
plot(data.final.lm.pred, valid.df$Rating,xlab = "Actual", ylab = "Predcited")
# use step() to run stepwise regression.
# model - 1 Using backward regression
data.final.lm.backward <- step(data.final.lm, direction = "backward")
summary(data.final.lm.backward) # Which variables were dropped?
data.final.lm.backward.pred <- predict(data.final.lm.backward, valid.df)
accuracy(data.final.lm.backward.pred, valid.df$Rating)
# model 1 - using forward regression
# create model with no predictors
data.final.lm.null <- lm(Rating~1, data = train.df)
# use step() to run forward regression.
data.final.lm.forward <- step(data.final.lm.null, scope=list(lower=data.final.lm.null, upper=data.final.lm), direction = "forward")
summary(data.final.lm.forward) # Which variables were added?
data.final.lm.forward.pred <- predict(data.final.lm.forward, valid.df)
accuracy(data.final.lm.forward.pred, valid.df$Rating)
# model 1 - using stepwise
# use step() to run stepwise regression.
data.final.lm.stepwise <- step(data.final.lm, direction = "both")
summary(data.final.lm.stepwise) # Which variables were dropped/added?
data.final.lm.stepwise.pred <- predict(data.final.lm.stepwise, valid.df)
accuracy(data.final.lm.step.pred, valid.df$Rating)
# coefficient
round(coefficients(data.final.lm.stepwise),5)
#Second model Reviews+Size+CategoryGame
# use lm() to run a linear regression of Ratings on all predictors in the
# use . after ~ to include all the remaining columns in train.df as predictors.
data.final.lm2 <- lm(Rating ~ Reviews+Size+CategoryFAMILY+Price+Content.RatingTeen+TypeFree, data = train.df)
# use options() to ensure numbers are not displayed in scientific notation.
options(scipen = 999)
summary(data.final.lm2)
# use predict() to make predictions on a new set.
data.final.lm.pred2 <- predict(data.final.lm2, valid.df)
options(scipen=999, digits = 5)
some.residuals <- valid.df$Rating[1:20] - data.final.lm.pred2[1:20]
data.frame("Predicted" = data.final.lm.pred2[1:20], "Actual" = valid.df$Rating[1:20],
"Residual" = some.residuals)
#accurcy of the model with all the predictors
accuracy(data.final.lm.pred2, valid.df$Rating)
plot(data.final.lm2)
plot(data.final.lm.pred2, valid.df$Rating,xlab = "Actual", ylab = "Predcited")
# use step() to run stepwise regression.
# model - 2 Using backward regression
data.final.lm.backward2 <- step(data.final.lm2, direction = "backward")
summary(data.final.lm.backward2) # Which variables were dropped?
data.final.lm.backward2.pred <- predict(data.final.lm.backward2, valid.df)
accuracy(data.final.lm.backward2.pred, valid.df$Rating)
# model 2 - using stepwise
# use step() to run stepwise regression.
data.final.lm.stepwise2 <- step(data.final.lm2, direction = "both")
summary(data.final.lm.stepwise2) # Which variables were dropped/added?
data.final.lm.stepwise2.pred <- predict(data.final.lm.stepwise2, valid.df)
accuracy(data.final.lm.stepwise2.pred, valid.df$Rating)
#Third model Reviews+Size+CategoryGame+CategorySPORTS+Price+CategoryTravel_and_local
# use lm() to run a linear regression of Ratings on all predictors in the
# use . after ~ to include all the remaining columns in train.df as predictors.
data.final.lm3 <- lm(Rating ~ Reviews+Size+CategoryGAME+Price+Content.RatingTeen+TypeFree, data = train.df)
# use options() to ensure numbers are not displayed in scientific notation.
options(scipen = 999)
summary(data.final.lm3)
# use predict() to make predictions on a new set.
data.final.lm.pred3 <- predict(data.final.lm3, valid.df)
options(scipen=999, digits = 5)
some.residuals <- valid.df$Rating[1:20] - data.final.lm.pred[1:20]
data.frame("Predicted" = data.final.lm.pred3[1:20], "Actual" = valid.df$Rating[1:20],
"Residual" = some.residuals)
#accurcy of the model with all the predictors
accuracy(data.final.lm.pred3, valid.df$Rating)
coefficients(data.final.lm3)
plot(data.final.lm3)
#plot(data.final.lm.pred2, valid.df$Rating, xlab = "Actual", ylab = "Predcited")
# use step() to run stepwise regression.
# model - 3 Using backward regression
data.final.lm.backward3 <- step(data.final.lm3, direction = "backward")
summary(data.final.lm.backward3) # Which variables were dropped?
data.final.lm.backward3.pred <- predict(data.final.lm.backward3, valid.df)
accuracy(data.final.lm.backward3.pred, valid.df$Rating)
# model 3 - using stepwise
# use step() to run stepwise regression.
data.final.lm.stepwise3 <- step(data.final.lm3, direction = "both")
summary(data.final.lm.stepwise3) # Which variables were dropped/added?
data.final.lm.stepwise3.pred <- predict(data.final.lm.stepwise3, valid.df)
accuracy(data.final.lm.stepwise3.pred, valid.df$Rating)
#Fourth Model
#Third model Reviews+Size+CategoryGame+CategorySPORTS+Price+CategoryTravel_and_local
# use lm() to run a linear regression of Ratings on all predictors in the
# use . after ~ to include all the remaining columns in train.df as predictors.
data.final.lm4 <- lm(Rating ~ Reviews+Size+CategoryGAME+Price+Content.RatingEveryone+TypeFree, data = train.df)
#CategoryFAMILY+CategoryART_AND_DESIGN
# use options() to ensure numbers are not displayed in scientific notation.
options(scipen = 999)
summary(data.final.lm4)
# use predict() to make predictions on a new set.
data.final.lm.pred4 <- predict(data.final.lm4, valid.df)
options(scipen=999, digits = 5)
some.residuals <- valid.df$Rating[1:20] - data.final.lm.pred[1:20]
data.frame("Predicted" = data.final.lm.pred4[1:20], "Actual" = valid.df$Rating[1:20],
"Residual" = some.residuals)
#accurcay of the model with all the predictors
accuracy(data.final.lm.pred4, valid.df$Rating)
plot(data.final.lm4)
plot(data.final.lm.pred4, valid.df$Rating, xlab = "Actual", ylab = "Predcited")
# use step() to run stepwise regression.
# model - 4 Using backward regression
data.final.lm.backward4 <- step(data.final.lm4, direction = "backward")
summary(data.final.lm.backward4) # Which variables were dropped?
data.final.lm.backward4.pred <- predict(data.final.lm.backward4, valid.df)
accuracy(data.final.lm.backward4.pred, valid.df$Rating)
# model 4 - using stepwise
# use step() to run stepwise regression.
data.final.lm.stepwise4 <- step(data.final.lm4, direction = "both")
summary(data.final.lm.stepwise4) # Which variables were dropped/added?
data.final.lm.stepwise4.pred <- predict(data.final.lm.stepwise4, valid.df)
accuracy(data.final.lm.stepwise4.pred, valid.df$Rating)
|
library(caTools)
library(caret)
library(naivebayes)
train <- cd[indices, ]
test <- cd[!indices, ]
train$flag<-as.factor(train$flag)
test$flag<-as.factor(test$flag)
nb <- naive_bayes(flag ~ ., train)
summary(nb)
predict(nb, test[,-1], type = "class")
ControlParamteres <- trainControl(method = "cv",
number = 5,
laplace = 0,
usekernel=TRUE,
usepoisson = TRUE,
search = "grid"
)
parametersGrid <- expand.grid(eta = 0.1,
colsample_bytree=c(0.5,0.7),
max_depth=c(50,100),
nrounds=100,
gamma=1,
min_child_weight=2,subsample = c(0.5, 0.6))
train$flag <- as.factor(ifelse(train$flag == 1, "Yes", "No"))
test$flag <- as.factor(ifelse(test$flag == 1, "Yes", "No"))
library(doParallel)
library(foreach)
### Register parallel backend
cl <- makeCluster(detectCores())
registerDoParallel(cl)
getDoParWorkers()
modelxgboost <- caret::train(flag~.,
data = train,
method = "naive_bayes",
trControl = ControlParamteres,
tuneGrid=parametersGrid)
stopCluster(cl)
modelxgboost
prediction<-stats::predict(modelxgboost,test[,-1],positive="Yes")
test_conf <- caret::confusionMatrix(data=as.factor(prediction), reference=as.factor(test$flag),positive="Yes")
test_conf
#Accuracy : 0.7912
#Sensitivity : 0.7143
#Specificity : 0.8571
library(ROCR)
library(Comp2ROC)
test_cutoff_churn <- ifelse(prediction=="Yes",1,0)
test_actual_churn <- ifelse(test$flag=="Yes",1,0)
modeltests(test_cutoff_churn , test_actual_churn)
# KS Statistics Score: 0.57
# Area Under ROC Curve: 0.79
# GINI: 0.57
#F1 Score
round(F1_Score(test_actual_churn, test_cutoff_churn),digits = 2)
#0.82
# Lift & Gain Chart
actual_response <- factor(test_actual_churn)
test_cutoff_churn<-factor(test_cutoff_churn)
Churn_decile = lift(actual_response, test_cutoff_churn, groups = 10)
Churn_decile
#6 9 3 34 81.0 1.35
| /naive_bayes-gridsearch.R | no_license | yogesh-ds/msc | R | false | false | 2,296 | r | library(caTools)
library(caret)
library(naivebayes)
train <- cd[indices, ]
test <- cd[!indices, ]
train$flag<-as.factor(train$flag)
test$flag<-as.factor(test$flag)
nb <- naive_bayes(flag ~ ., train)
summary(nb)
predict(nb, test[,-1], type = "class")
ControlParamteres <- trainControl(method = "cv",
number = 5,
laplace = 0,
usekernel=TRUE,
usepoisson = TRUE,
search = "grid"
)
parametersGrid <- expand.grid(eta = 0.1,
colsample_bytree=c(0.5,0.7),
max_depth=c(50,100),
nrounds=100,
gamma=1,
min_child_weight=2,subsample = c(0.5, 0.6))
train$flag <- as.factor(ifelse(train$flag == 1, "Yes", "No"))
test$flag <- as.factor(ifelse(test$flag == 1, "Yes", "No"))
library(doParallel)
library(foreach)
### Register parallel backend
cl <- makeCluster(detectCores())
registerDoParallel(cl)
getDoParWorkers()
modelxgboost <- caret::train(flag~.,
data = train,
method = "naive_bayes",
trControl = ControlParamteres,
tuneGrid=parametersGrid)
stopCluster(cl)
modelxgboost
prediction<-stats::predict(modelxgboost,test[,-1],positive="Yes")
test_conf <- caret::confusionMatrix(data=as.factor(prediction), reference=as.factor(test$flag),positive="Yes")
test_conf
#Accuracy : 0.7912
#Sensitivity : 0.7143
#Specificity : 0.8571
library(ROCR)
library(Comp2ROC)
test_cutoff_churn <- ifelse(prediction=="Yes",1,0)
test_actual_churn <- ifelse(test$flag=="Yes",1,0)
modeltests(test_cutoff_churn , test_actual_churn)
# KS Statistics Score: 0.57
# Area Under ROC Curve: 0.79
# GINI: 0.57
#F1 Score
round(F1_Score(test_actual_churn, test_cutoff_churn),digits = 2)
#0.82
# Lift & Gain Chart
actual_response <- factor(test_actual_churn)
test_cutoff_churn<-factor(test_cutoff_churn)
Churn_decile = lift(actual_response, test_cutoff_churn, groups = 10)
Churn_decile
#6 9 3 34 81.0 1.35
|
# -*- tab-width:2;indent-tabs-mode:t;show-trailing-whitespace:t;rm-trailing-spaces:t -*-
# vi: set ts=2 noet:
#
# (c) Copyright Rosetta Commons Member Institutions.
# (c) This file is part of the Rosetta software suite and is made available under license.
# (c) The Rosetta software is developed by the contributing members of the Rosetta Commons.
# (c) For more information, see http://www.rosettacommons.org. Questions about this can be
# (c) addressed to University of Washington UW TechTransfer, email: license@u.washington.edu.
library(reshape2)
library(ggplot2)
source("../../plots/hbonds/hbond_geo_dim_scales.R")
feature_analyses <- c(feature_analyses, methods::new("FeaturesAnalysis",
id = "AHD_cdf_75",
author = "Matthew O'Meara",
brief_description = "",
feature_reporter_dependencies = c("HBondFeatures"),
run=function(self, sample_sources, output_dir, output_formats){
sele <-"
SELECT
geom.cosAHD,
acc.HBChemType AS acc_chem_type,
don.HBChemType AS don_chem_type
FROM
hbonds AS hb,
hbond_geom_coords AS geom,
hbond_sites AS don,
hbond_sites AS acc,
hbond_sites_pdb AS don_pdb,
hbond_sites_pdb AS acc_pdb
WHERE
geom.struct_id = hb.struct_id AND geom.hbond_id = hb.hbond_id AND
don.struct_id = hb.struct_id AND don.site_id = hb.don_id AND
acc.struct_id = hb.struct_id AND acc.site_id = hb.acc_id AND
don_pdb.struct_id = hb.struct_id AND don_pdb.site_id = hb.don_id AND
don_pdb.heavy_atom_temperature < 30 AND
acc_pdb.struct_id = hb.struct_id AND acc_pdb.site_id = hb.acc_id AND
acc_pdb.heavy_atom_temperature < 30 AND
ABS(don.resNum - acc.resNum) > 4;"
f <- query_sample_sources(sample_sources, sele)
f <- transform(f,
don_chem_type_name = don_chem_type_name_linear(don_chem_type),
acc_chem_type_name = acc_chem_type_name_linear(acc_chem_type),
AHD=acos(cosAHD))
f <- na.omit(f, method="r")
ref_ss <- sample_sources[sample_sources$reference, "sample_source"]
if(length(ref_ss) != 1) {
stop("ERROR: This analysis script requires a single reference sample source")
}
new_ss <- sample_sources[!sample_sources$reference,"sample_source"]
cdf.don <- compute_quantiles(f[f$acc_chem_type != "hbacc_PBA",], c("sample_source", "don_chem_type_name"), "AHD")
names(cdf.don)[2] <- "chem_type_name"
cdf.acc <- compute_quantiles(f[f$don_chem_type != "hbdon_PBA",], c("sample_source", "acc_chem_type_name"), "AHD")
names(cdf.acc)[2] <- "chem_type_name"
cdf.chem <- rbind(cdf.don, cdf.acc)
cdf.chem$quantiles <- cdf.chem$quantiles * 180/pi
t <- dcast(cdf.chem, chem_type_name + probs ~ sample_source, value="quantiles")
table_id <- "AHD_cdf_don_or_acc_chem_type"
table_title <- "A-H-D Angle containing 75% of H-Bonds (Degrees)\nB-Factor < 30, SeqSep > 4, SC-Partner"
save_tables(self,
t, table_id,
sample_sources, output_dir, output_formats,
caption=table_title, caption.placement="top")
##########################
cdf.ref <- cdf.chem[cdf.chem$sample_source == ref_ss,c("sample_source", "chem_type_name", "quantiles")]
names(cdf.ref)[1] <- "ref_sample_source"
names(cdf.ref)[3] <- "ref_quantile"
cdf.new <- cdf.chem[cdf.chem$sample_source %in% new_ss,c("sample_source", "chem_type_name", "quantiles")]
names(cdf.new)[1] <- "new_sample_source"
names(cdf.new)[3] <- "new_quantile"
cdf.chem_ref_new <- merge(cdf.new, cdf.ref)
plot_id <- "AHD_cdf_don_or_acc_chem_type_qq"
p <- ggplot(data=cdf.chem_ref_new) + theme_bw() +
geom_abline(slope=1) +
geom_point(aes(x=ref_quantile, y=new_quantile, colour=new_sample_source)) +
coord_equal(ratio=1) +
scale_x_continuous(
paste("A-H-D Angle (Degrees), Ref: ", ref_ss, sep="")) +
scale_y_continuous(paste("A-H-D Angle Candidate", sep="")) +
scale_colour_discrete("") +
ggtitle("A-H-D Angle Containing 75% of H-Bonds\n\nB-Factor < 30, SeqSep > 4, SC-Partner")
if(length(new_ss) <= 3){
p <- p + theme(legend.position="bottom", legend.direction="horizontal")
}
alt_output_formats <- transform(output_formats, width=height*.65)
save_plots(self, plot_id, sample_sources, output_dir, alt_output_formats)
plot_id <- "AHD_cdf_don_or_acc_chem_type_qq_text"
p <- ggplot(data=cdf.chem_ref_new) + theme_bw() +
geom_abline(slope=1) +
geom_text(aes(x=ref_quantile, y=new_quantile, colour=new_sample_source, label=chem_type_name), size=2) +
# coord_equal(ratio=1) +
scale_x_continuous(
paste("A-H-D Angle (Degrees), Ref: ", ref_ss, sep="")) +
scale_y_continuous(paste("A-H-D Angle Candidate", sep="")) +
scale_colour_discrete("") +
ggtitle("A-H-D Angle Containing 75% of H-Bonds\n\nB-Factor < 30, SeqSep > 4, SC-Partner")
if(length(new_ss) <= 3){
p <- p + theme(legend.position="bottom", legend.direction="horizontal")
}
#alt_output_formats <- transform(output_formats, width=height*.65)
save_plots(self, plot_id, sample_sources, output_dir, output_formats)
#
#######################################
#
#modes.all <- estimate_primary_modes_1d(f, c("sample_source", "don_chem_type_name", "acc_chem_type_name"), "AHdist")
#
#t <- dcast(modes.all, don_chem_type_name + acc_chem_type_name ~ sample_source, value="primary_mode")
#
#ref_ss <- sample_sources[sample_sources$reference, "sample_source"]
#if(length(ref_ss) != 1) {
# stop("ERROR: This analysis script requires a single reference sample source")
#}
#new_ss <- sample_sources[!sample_sources$reference,"sample_source"]
#
#modes.all.ref <- modes.all[modes.all$sample_source == ref_ss,]
#names(modes.all.ref)[1] <- "ref_sample_source"
#names(modes.all.ref)[4] <- "ref_primary_mode"
#modes.all.new <- modes.all[modes.all$sample_source %in% new_ss,]
#names(modes.all.new)[1] <- "new_sample_source"
#names(modes.all.new)[4] <- "new_primary_mode"
#modes.all <- merge(modes.all.new, modes.all.ref)
#
#plot_id <- "AHdist_primary_mode_by_don_acc_chem_type"
#p <- ggplot(data=modes.all) + theme_bw() +
# geom_abline(slope=1) +
# geom_point(aes(x=ref_primary_mode, y=new_primary_mode, colour=new_sample_source)) +
# coord_equal(ratio=1) +
# scale_x_continuous(
# paste("A-H Length (A), Ref: ", ref_ss, sep="")) +
# scale_y_continuous(paste("A-H Length (A) Candidate", sep="")) +
# scale_colour_discrete("") +
# ggtitle("A-H Lengths: Reference vs Candidate Sample Source\nGrouped by Donor and Acceptor Chemical Types")
#
#if(length(new_ss) <= 3){
# p <- p + theme(legend.position="bottom", legend.direction="horizontal")
#}
#
#alt_output_formats <- transform(output_formats, width=height*.8)
#save_plots(self, plot_id, sample_sources, output_dir, alt_output_formats)
#
#
#plot_id <- "AHdist_primary_mode_by_don_acc_chem_type_text"
#p <- ggplot(data=modes.all) + theme_bw() +
# geom_abline(slope=1) +
# geom_text(aes(x=ref_primary_mode, y=new_primary_mode, colour=new_sample_source, label=interaction(don_chem_type_name, acc_chem_type_name, sep="\n")), size=2) +
# coord_equal(ratio=1) +
# scale_x_continuous(
# paste("A-H Length (A), Ref: ", ref_ss, sep="")) +
# scale_y_continuous(paste("A-H Length (A) Candidate", sep="")) +
# scale_colour_discrete("") +
# ggtitle("A-H Lengths: Reference vs Candidate Sample Source\nGrouped by Donor and Acceptor Chemical Types")
#
#if(length(new_ss) <= 3){
# p <- p + theme(legend.position="bottom", legend.direction="horizontal")
#}
#
#alt_output_formats <- transform(output_formats, width=height*.8)
#save_plots(self, plot_id, sample_sources, output_dir, alt_output_formats)
})) # end FeaturesAnalysis
| /inst/scripts/analysis/statistics/hbonds/AHD_cdf_75.R | no_license | momeara/RosettaFeatures | R | false | false | 7,276 | r | # -*- tab-width:2;indent-tabs-mode:t;show-trailing-whitespace:t;rm-trailing-spaces:t -*-
# vi: set ts=2 noet:
#
# (c) Copyright Rosetta Commons Member Institutions.
# (c) This file is part of the Rosetta software suite and is made available under license.
# (c) The Rosetta software is developed by the contributing members of the Rosetta Commons.
# (c) For more information, see http://www.rosettacommons.org. Questions about this can be
# (c) addressed to University of Washington UW TechTransfer, email: license@u.washington.edu.
library(reshape2)
library(ggplot2)
source("../../plots/hbonds/hbond_geo_dim_scales.R")
feature_analyses <- c(feature_analyses, methods::new("FeaturesAnalysis",
id = "AHD_cdf_75",
author = "Matthew O'Meara",
brief_description = "",
feature_reporter_dependencies = c("HBondFeatures"),
run=function(self, sample_sources, output_dir, output_formats){
sele <-"
SELECT
geom.cosAHD,
acc.HBChemType AS acc_chem_type,
don.HBChemType AS don_chem_type
FROM
hbonds AS hb,
hbond_geom_coords AS geom,
hbond_sites AS don,
hbond_sites AS acc,
hbond_sites_pdb AS don_pdb,
hbond_sites_pdb AS acc_pdb
WHERE
geom.struct_id = hb.struct_id AND geom.hbond_id = hb.hbond_id AND
don.struct_id = hb.struct_id AND don.site_id = hb.don_id AND
acc.struct_id = hb.struct_id AND acc.site_id = hb.acc_id AND
don_pdb.struct_id = hb.struct_id AND don_pdb.site_id = hb.don_id AND
don_pdb.heavy_atom_temperature < 30 AND
acc_pdb.struct_id = hb.struct_id AND acc_pdb.site_id = hb.acc_id AND
acc_pdb.heavy_atom_temperature < 30 AND
ABS(don.resNum - acc.resNum) > 4;"
f <- query_sample_sources(sample_sources, sele)
f <- transform(f,
don_chem_type_name = don_chem_type_name_linear(don_chem_type),
acc_chem_type_name = acc_chem_type_name_linear(acc_chem_type),
AHD=acos(cosAHD))
f <- na.omit(f, method="r")
ref_ss <- sample_sources[sample_sources$reference, "sample_source"]
if(length(ref_ss) != 1) {
stop("ERROR: This analysis script requires a single reference sample source")
}
new_ss <- sample_sources[!sample_sources$reference,"sample_source"]
cdf.don <- compute_quantiles(f[f$acc_chem_type != "hbacc_PBA",], c("sample_source", "don_chem_type_name"), "AHD")
names(cdf.don)[2] <- "chem_type_name"
cdf.acc <- compute_quantiles(f[f$don_chem_type != "hbdon_PBA",], c("sample_source", "acc_chem_type_name"), "AHD")
names(cdf.acc)[2] <- "chem_type_name"
cdf.chem <- rbind(cdf.don, cdf.acc)
cdf.chem$quantiles <- cdf.chem$quantiles * 180/pi
t <- dcast(cdf.chem, chem_type_name + probs ~ sample_source, value="quantiles")
table_id <- "AHD_cdf_don_or_acc_chem_type"
table_title <- "A-H-D Angle containing 75% of H-Bonds (Degrees)\nB-Factor < 30, SeqSep > 4, SC-Partner"
save_tables(self,
t, table_id,
sample_sources, output_dir, output_formats,
caption=table_title, caption.placement="top")
##########################
cdf.ref <- cdf.chem[cdf.chem$sample_source == ref_ss,c("sample_source", "chem_type_name", "quantiles")]
names(cdf.ref)[1] <- "ref_sample_source"
names(cdf.ref)[3] <- "ref_quantile"
cdf.new <- cdf.chem[cdf.chem$sample_source %in% new_ss,c("sample_source", "chem_type_name", "quantiles")]
names(cdf.new)[1] <- "new_sample_source"
names(cdf.new)[3] <- "new_quantile"
cdf.chem_ref_new <- merge(cdf.new, cdf.ref)
plot_id <- "AHD_cdf_don_or_acc_chem_type_qq"
p <- ggplot(data=cdf.chem_ref_new) + theme_bw() +
geom_abline(slope=1) +
geom_point(aes(x=ref_quantile, y=new_quantile, colour=new_sample_source)) +
coord_equal(ratio=1) +
scale_x_continuous(
paste("A-H-D Angle (Degrees), Ref: ", ref_ss, sep="")) +
scale_y_continuous(paste("A-H-D Angle Candidate", sep="")) +
scale_colour_discrete("") +
ggtitle("A-H-D Angle Containing 75% of H-Bonds\n\nB-Factor < 30, SeqSep > 4, SC-Partner")
if(length(new_ss) <= 3){
p <- p + theme(legend.position="bottom", legend.direction="horizontal")
}
alt_output_formats <- transform(output_formats, width=height*.65)
save_plots(self, plot_id, sample_sources, output_dir, alt_output_formats)
plot_id <- "AHD_cdf_don_or_acc_chem_type_qq_text"
p <- ggplot(data=cdf.chem_ref_new) + theme_bw() +
geom_abline(slope=1) +
geom_text(aes(x=ref_quantile, y=new_quantile, colour=new_sample_source, label=chem_type_name), size=2) +
# coord_equal(ratio=1) +
scale_x_continuous(
paste("A-H-D Angle (Degrees), Ref: ", ref_ss, sep="")) +
scale_y_continuous(paste("A-H-D Angle Candidate", sep="")) +
scale_colour_discrete("") +
ggtitle("A-H-D Angle Containing 75% of H-Bonds\n\nB-Factor < 30, SeqSep > 4, SC-Partner")
if(length(new_ss) <= 3){
p <- p + theme(legend.position="bottom", legend.direction="horizontal")
}
#alt_output_formats <- transform(output_formats, width=height*.65)
save_plots(self, plot_id, sample_sources, output_dir, output_formats)
#
#######################################
#
#modes.all <- estimate_primary_modes_1d(f, c("sample_source", "don_chem_type_name", "acc_chem_type_name"), "AHdist")
#
#t <- dcast(modes.all, don_chem_type_name + acc_chem_type_name ~ sample_source, value="primary_mode")
#
#ref_ss <- sample_sources[sample_sources$reference, "sample_source"]
#if(length(ref_ss) != 1) {
# stop("ERROR: This analysis script requires a single reference sample source")
#}
#new_ss <- sample_sources[!sample_sources$reference,"sample_source"]
#
#modes.all.ref <- modes.all[modes.all$sample_source == ref_ss,]
#names(modes.all.ref)[1] <- "ref_sample_source"
#names(modes.all.ref)[4] <- "ref_primary_mode"
#modes.all.new <- modes.all[modes.all$sample_source %in% new_ss,]
#names(modes.all.new)[1] <- "new_sample_source"
#names(modes.all.new)[4] <- "new_primary_mode"
#modes.all <- merge(modes.all.new, modes.all.ref)
#
#plot_id <- "AHdist_primary_mode_by_don_acc_chem_type"
#p <- ggplot(data=modes.all) + theme_bw() +
# geom_abline(slope=1) +
# geom_point(aes(x=ref_primary_mode, y=new_primary_mode, colour=new_sample_source)) +
# coord_equal(ratio=1) +
# scale_x_continuous(
# paste("A-H Length (A), Ref: ", ref_ss, sep="")) +
# scale_y_continuous(paste("A-H Length (A) Candidate", sep="")) +
# scale_colour_discrete("") +
# ggtitle("A-H Lengths: Reference vs Candidate Sample Source\nGrouped by Donor and Acceptor Chemical Types")
#
#if(length(new_ss) <= 3){
# p <- p + theme(legend.position="bottom", legend.direction="horizontal")
#}
#
#alt_output_formats <- transform(output_formats, width=height*.8)
#save_plots(self, plot_id, sample_sources, output_dir, alt_output_formats)
#
#
#plot_id <- "AHdist_primary_mode_by_don_acc_chem_type_text"
#p <- ggplot(data=modes.all) + theme_bw() +
# geom_abline(slope=1) +
# geom_text(aes(x=ref_primary_mode, y=new_primary_mode, colour=new_sample_source, label=interaction(don_chem_type_name, acc_chem_type_name, sep="\n")), size=2) +
# coord_equal(ratio=1) +
# scale_x_continuous(
# paste("A-H Length (A), Ref: ", ref_ss, sep="")) +
# scale_y_continuous(paste("A-H Length (A) Candidate", sep="")) +
# scale_colour_discrete("") +
# ggtitle("A-H Lengths: Reference vs Candidate Sample Source\nGrouped by Donor and Acceptor Chemical Types")
#
#if(length(new_ss) <= 3){
# p <- p + theme(legend.position="bottom", legend.direction="horizontal")
#}
#
#alt_output_formats <- transform(output_formats, width=height*.8)
#save_plots(self, plot_id, sample_sources, output_dir, alt_output_formats)
})) # end FeaturesAnalysis
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/binomial.R
\name{bin_mode}
\alias{bin_mode}
\title{bin_mode}
\usage{
bin_mode(trials, prob)
}
\arguments{
\item{trials}{the number of (fixed) trials}
\item{prob}{the probability of success on each trial}
}
\value{
the mode of the binomial distribution
}
\description{
calculates the mode of the binomial distribution
}
\examples{
bin_mode(10, 0.3)
}
| /binomial/man/bin_mode.Rd | no_license | stat133-sp19/hw-stat133-ycycchen | R | false | true | 429 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/binomial.R
\name{bin_mode}
\alias{bin_mode}
\title{bin_mode}
\usage{
bin_mode(trials, prob)
}
\arguments{
\item{trials}{the number of (fixed) trials}
\item{prob}{the probability of success on each trial}
}
\value{
the mode of the binomial distribution
}
\description{
calculates the mode of the binomial distribution
}
\examples{
bin_mode(10, 0.3)
}
|
library(raster)
library(sp)
reprojectAndCrop <- function(raster, boundary, epsg, resolution) {
# crs in proj.4 format
coordSys <- paste("+init=epsg:", epsg, sep = "")
## crop for 30 m resolution to reduce needed computation resources
boundaries_reproj <- spTransform(boundary, crs(raster))
ext <- extent(boundaries_reproj)
raster_cropped <- crop(raster, c(ext[1]-500, ext[2]+500, ext[3]-500, ext[4]+500))
# reproject
reprojectedRaster <- projectRaster(raster_cropped, res=resolution, crs = coordSys, method = 'ngb')
return(reprojectedRaster)
}
makeBinary <- function(ghsl, threshold) {
# determine binary threshold
class.m <- c(0, threshold, 0, threshold, 100, 1)
rcl.m <- matrix(class.m, ncol = 3, byrow = T)
# reclassify
ghsl_threshold <- reclassify(ghsl, rcl.m)
return(ghsl_threshold)
}
getChange <- function(ghsl_early, ghsl_late, boundary, epsg, resolution, threshold) {
# reproject, crop and use threshold on ghsl data
ghsl_e_crop_bin <- makeBinary(reprojectAndCrop(ghsl_early, boundary, epsg, resolution), threshold)
ghsl_l_crop_bin <- makeBinary(reprojectAndCrop(ghsl_late, boundary, epsg, resolution), threshold)
# find built-up change
builtUpChange <- (ghsl_l_crop_bin - ghsl_e_crop_bin)
plot(builtUpChange)
return(builtUpChange)
}
getChangeFromMultitemp <- function(ghsl, boundary, epsg, resolution) {
ghsl_crop <- reprojectAndCrop(ghsl, boundary, epsg, resolution)
# 3-4: changed from 1990 to 2014 -> 1
# 2: not built up in any epoch -> 0
# 0, 1, 5, 6: no data, water, built up before
class.m <- c(0, 1, NA, # 0-1
1, 2, 0, # 2
2, 4, 1, # 3-4
4, 6, NA) # 5-6
rcl.m <- matrix(class.m, ncol = 3, byrow = T)
# reclassify
ghsl_changed <- reclassify(ghsl_crop, rcl.m, include.lowest = T)
plot(ghsl_changed)
return(ghsl_changed)
}
DNtoPercentage <- function(DN) {
# convert pixel values to radians
rad <- (acos(DN/250))
# convert radians to percentage slope
percentage <- (tan(rad)*100)
return(percentage)
}
citySlopeAsPercentage <- function(slope_raster, boundary, epsg) {
# clip and reproject slope dataset
slope_reprojected <- reprojectAndCrop(slope_raster, boundary, epsg, 25)
# convert to percentage
slope_per <- DNtoPercentage(slope_reprojected)
plot(slope_per)
return(slope_per)
}
reclassify_landuse <- function(landuse_raster, year = 1990) {
# 1: artificial
# 2: crop
# 3: pasture
# 4: forest
# 5: open / bare land
# 6: water
if(year == 2000) {
class.m <- c(111, 142, 1,
211, 223, 2,
241, 244, 2,
231, 231, 3,
311, 313, 4,
323, 324, 4,
321, 322, 5,
331, 335, 5,
411, 422, 5,
423, 523, 6)
} else {
class.m <- c(1, 11, 1,
12, 17, 2,
19, 22, 2,
18, 18, 3,
23, 25, 4,
28, 29, 4,
26, 27, 5,
30, 34, 5,
35, 38, 5,
39, 44, 6)
}
rcl.m <- matrix(class.m, ncol = 3, byrow = T)
# +1 in the 'to' column to include this value (interval will be open on the right, to be closed left)
rcl.m[,2] <- rcl.m[,2]+1
# reclassify
landuse_reclassified <- reclassify(landuse_raster, rcl.m, include.lowest=T, right=FALSE)
return(landuse_reclassified)
}
cropAndReclassify_landuse <- function(landuse, boundary, epsg, resolution) {
landuse_crop <- reprojectAndCrop(landuse, boundary, epsg, resolution)
landuse_recl <- reclassify_landuse(landuse_crop)
plot(landuse_recl)
return(landuse_recl)
}
calc_dist_raster <- function(osm, boundaries, resolution, epsg) {
# reproject vector data
coordSys <- paste("+init=epsg:", epsg, sep = "")
osm_reproj <- spTransform(osm, coordSys)
boundaries_reproj <- spTransform(boundaries, coordSys)
# get and increase extent of osm (or other vector) data
ext <- extent(c(extent(boundaries_reproj)[1]-5000, extent(boundaries_reproj)[2]+5000,extent(boundaries_reproj)[3]-5000,extent(boundaries_reproj)[4]+5000))
# create raster template
raster_template <- raster(ext, resolution = resolution, crs = coordSys)
# rasterize vector data
rasterized <- rasterize(osm_reproj, raster_template, field = 1)
# calculate euclidean distances
distances <- distance(rasterized)
city_dist <- reprojectAndCrop(distances, boundaries, epsg, resolution)
plot(city_dist)
return(city_dist)
}
calc_builtup_density <- function(ghsl_30m, boundary, epsg, window_size) {
ghsl_reprojected <- reprojectAndCrop(ghsl_30m, boundary, epsg, 30)
### reclassify to (not) built-up
# 5-6: built before 1990
# 0-4: not built-up berfore 1990
class.m <- c(0, 4, 0,
4, 6, 1)
rcl.m <- matrix(class.m, ncol = 3, byrow = T)
ghsl_changed <- reclassify(ghsl_reprojected, rcl.m, include.lowest = T)
# count cells within 7 x 7 window
builtupCells <- focal(ghsl_changed, w=matrix(1, nc=window_size, nr=window_size))
builtupDensity <- (builtupCells/(window_size*window_size-1))*100
plot(builtupDensity)
return(builtupDensity)
}
create_stack <- function(ghsl_30m, epsg, boundary, ghsl_pop, builtupDens_windowSizw, slope, landuse, road, primary_road, river, train_stations, city_center, airport) {
change <- getChangeFromMultitemp(ghsl_30m, boundary, epsg, 30)
builtup_density <- calc_builtup_density(ghsl_30m, boundary, epsg, builtupDens_windowSizw)
pop_density <- reprojectAndCrop(ghsl_pop, boundary, epsg, 250)
slope <- citySlopeAsPercentage(slope, boundary, epsg)
landuse <- cropAndReclassify_landuse(landuse, boundary, epsg, 100)
dist_mRoad <- calc_dist_raster(road, boundary, 120, epsg)
dist_pRoad <- calc_dist_raster(primary_road, boundary, 120, epsg)
dist_river <- calc_dist_raster(river, boundary, 120, epsg)
dist_train <- calc_dist_raster(train_stations, boundary, 120, epsg)
dist_center <- calc_dist_raster(city_center, boundary, 120, epsg)
dist_airport <- calc_dist_raster(airport, boundary, 120, epsg)
builtup_density <- projectRaster(builtup_density, change)
pop_density <- projectRaster(pop_density, change)
slope <- projectRaster(slope, change)
landuse <- projectRaster(landuse, change, method = 'ngb')
dist_mRoad <- projectRaster(dist_mRoad, change)
dist_pRoad <- projectRaster(dist_pRoad, change)
dist_river <- projectRaster(dist_river, change)
dist_train <- projectRaster(dist_train, change)
dist_center <- projectRaster(dist_center, change)
dist_airport <- projectRaster(dist_airport, change)
change_stack <- stack(change, builtup_density, pop_density, slope, landuse, dist_mRoad, dist_pRoad, dist_river, dist_train, dist_center, dist_airport)
# crop and mask
coordSys <- paste("+init=epsg:", epsg, sep = "")
boundaries_reproj <- spTransform(boundary, coordSys)
change_stack <- crop(change_stack, boundaries_reproj)
change_stack <- mask(change_stack, boundaries_reproj)
names(change_stack) <- c("change", "built_dens", "pop_dens", "slope", "landuse", "mRoads_dist", "pRoads_dist", "river_dist", "train_dist", "center_dist", "airport_dist")
return(change_stack)
}
| /preparation_functions.R | no_license | jsten07/BachelorThesis | R | false | false | 7,303 | r | library(raster)
library(sp)
reprojectAndCrop <- function(raster, boundary, epsg, resolution) {
# crs in proj.4 format
coordSys <- paste("+init=epsg:", epsg, sep = "")
## crop for 30 m resolution to reduce needed computation resources
boundaries_reproj <- spTransform(boundary, crs(raster))
ext <- extent(boundaries_reproj)
raster_cropped <- crop(raster, c(ext[1]-500, ext[2]+500, ext[3]-500, ext[4]+500))
# reproject
reprojectedRaster <- projectRaster(raster_cropped, res=resolution, crs = coordSys, method = 'ngb')
return(reprojectedRaster)
}
makeBinary <- function(ghsl, threshold) {
# determine binary threshold
class.m <- c(0, threshold, 0, threshold, 100, 1)
rcl.m <- matrix(class.m, ncol = 3, byrow = T)
# reclassify
ghsl_threshold <- reclassify(ghsl, rcl.m)
return(ghsl_threshold)
}
getChange <- function(ghsl_early, ghsl_late, boundary, epsg, resolution, threshold) {
# reproject, crop and use threshold on ghsl data
ghsl_e_crop_bin <- makeBinary(reprojectAndCrop(ghsl_early, boundary, epsg, resolution), threshold)
ghsl_l_crop_bin <- makeBinary(reprojectAndCrop(ghsl_late, boundary, epsg, resolution), threshold)
# find built-up change
builtUpChange <- (ghsl_l_crop_bin - ghsl_e_crop_bin)
plot(builtUpChange)
return(builtUpChange)
}
getChangeFromMultitemp <- function(ghsl, boundary, epsg, resolution) {
ghsl_crop <- reprojectAndCrop(ghsl, boundary, epsg, resolution)
# 3-4: changed from 1990 to 2014 -> 1
# 2: not built up in any epoch -> 0
# 0, 1, 5, 6: no data, water, built up before
class.m <- c(0, 1, NA, # 0-1
1, 2, 0, # 2
2, 4, 1, # 3-4
4, 6, NA) # 5-6
rcl.m <- matrix(class.m, ncol = 3, byrow = T)
# reclassify
ghsl_changed <- reclassify(ghsl_crop, rcl.m, include.lowest = T)
plot(ghsl_changed)
return(ghsl_changed)
}
DNtoPercentage <- function(DN) {
# convert pixel values to radians
rad <- (acos(DN/250))
# convert radians to percentage slope
percentage <- (tan(rad)*100)
return(percentage)
}
citySlopeAsPercentage <- function(slope_raster, boundary, epsg) {
# clip and reproject slope dataset
slope_reprojected <- reprojectAndCrop(slope_raster, boundary, epsg, 25)
# convert to percentage
slope_per <- DNtoPercentage(slope_reprojected)
plot(slope_per)
return(slope_per)
}
reclassify_landuse <- function(landuse_raster, year = 1990) {
# 1: artificial
# 2: crop
# 3: pasture
# 4: forest
# 5: open / bare land
# 6: water
if(year == 2000) {
class.m <- c(111, 142, 1,
211, 223, 2,
241, 244, 2,
231, 231, 3,
311, 313, 4,
323, 324, 4,
321, 322, 5,
331, 335, 5,
411, 422, 5,
423, 523, 6)
} else {
class.m <- c(1, 11, 1,
12, 17, 2,
19, 22, 2,
18, 18, 3,
23, 25, 4,
28, 29, 4,
26, 27, 5,
30, 34, 5,
35, 38, 5,
39, 44, 6)
}
rcl.m <- matrix(class.m, ncol = 3, byrow = T)
# +1 in the 'to' column to include this value (interval will be open on the right, to be closed left)
rcl.m[,2] <- rcl.m[,2]+1
# reclassify
landuse_reclassified <- reclassify(landuse_raster, rcl.m, include.lowest=T, right=FALSE)
return(landuse_reclassified)
}
cropAndReclassify_landuse <- function(landuse, boundary, epsg, resolution) {
landuse_crop <- reprojectAndCrop(landuse, boundary, epsg, resolution)
landuse_recl <- reclassify_landuse(landuse_crop)
plot(landuse_recl)
return(landuse_recl)
}
calc_dist_raster <- function(osm, boundaries, resolution, epsg) {
# reproject vector data
coordSys <- paste("+init=epsg:", epsg, sep = "")
osm_reproj <- spTransform(osm, coordSys)
boundaries_reproj <- spTransform(boundaries, coordSys)
# get and increase extent of osm (or other vector) data
ext <- extent(c(extent(boundaries_reproj)[1]-5000, extent(boundaries_reproj)[2]+5000,extent(boundaries_reproj)[3]-5000,extent(boundaries_reproj)[4]+5000))
# create raster template
raster_template <- raster(ext, resolution = resolution, crs = coordSys)
# rasterize vector data
rasterized <- rasterize(osm_reproj, raster_template, field = 1)
# calculate euclidean distances
distances <- distance(rasterized)
city_dist <- reprojectAndCrop(distances, boundaries, epsg, resolution)
plot(city_dist)
return(city_dist)
}
calc_builtup_density <- function(ghsl_30m, boundary, epsg, window_size) {
ghsl_reprojected <- reprojectAndCrop(ghsl_30m, boundary, epsg, 30)
### reclassify to (not) built-up
# 5-6: built before 1990
# 0-4: not built-up berfore 1990
class.m <- c(0, 4, 0,
4, 6, 1)
rcl.m <- matrix(class.m, ncol = 3, byrow = T)
ghsl_changed <- reclassify(ghsl_reprojected, rcl.m, include.lowest = T)
# count cells within 7 x 7 window
builtupCells <- focal(ghsl_changed, w=matrix(1, nc=window_size, nr=window_size))
builtupDensity <- (builtupCells/(window_size*window_size-1))*100
plot(builtupDensity)
return(builtupDensity)
}
create_stack <- function(ghsl_30m, epsg, boundary, ghsl_pop, builtupDens_windowSizw, slope, landuse, road, primary_road, river, train_stations, city_center, airport) {
change <- getChangeFromMultitemp(ghsl_30m, boundary, epsg, 30)
builtup_density <- calc_builtup_density(ghsl_30m, boundary, epsg, builtupDens_windowSizw)
pop_density <- reprojectAndCrop(ghsl_pop, boundary, epsg, 250)
slope <- citySlopeAsPercentage(slope, boundary, epsg)
landuse <- cropAndReclassify_landuse(landuse, boundary, epsg, 100)
dist_mRoad <- calc_dist_raster(road, boundary, 120, epsg)
dist_pRoad <- calc_dist_raster(primary_road, boundary, 120, epsg)
dist_river <- calc_dist_raster(river, boundary, 120, epsg)
dist_train <- calc_dist_raster(train_stations, boundary, 120, epsg)
dist_center <- calc_dist_raster(city_center, boundary, 120, epsg)
dist_airport <- calc_dist_raster(airport, boundary, 120, epsg)
builtup_density <- projectRaster(builtup_density, change)
pop_density <- projectRaster(pop_density, change)
slope <- projectRaster(slope, change)
landuse <- projectRaster(landuse, change, method = 'ngb')
dist_mRoad <- projectRaster(dist_mRoad, change)
dist_pRoad <- projectRaster(dist_pRoad, change)
dist_river <- projectRaster(dist_river, change)
dist_train <- projectRaster(dist_train, change)
dist_center <- projectRaster(dist_center, change)
dist_airport <- projectRaster(dist_airport, change)
change_stack <- stack(change, builtup_density, pop_density, slope, landuse, dist_mRoad, dist_pRoad, dist_river, dist_train, dist_center, dist_airport)
# crop and mask
coordSys <- paste("+init=epsg:", epsg, sep = "")
boundaries_reproj <- spTransform(boundary, coordSys)
change_stack <- crop(change_stack, boundaries_reproj)
change_stack <- mask(change_stack, boundaries_reproj)
names(change_stack) <- c("change", "built_dens", "pop_dens", "slope", "landuse", "mRoads_dist", "pRoads_dist", "river_dist", "train_dist", "center_dist", "airport_dist")
return(change_stack)
}
|
library(data.table)
# Unzip Data
# unzip("exdata_data_household_power_consumption.zip")
# Read Data
power <- fread("household_power_consumption.txt", na.strings=c("?"))
# Note: na.strings ="?" does not work. This is a documented flaw in fread, see:
# http://r.789695.n4.nabble.com/fread-coercing-to-character-when-seeing-NA-td4677209.html
# Subset Data to days of interest
psub <- subset(power, Date %in% c("1/2/2007", "2/2/2007"))
# Coerce Data to Numeric
psub$Global_active_power <- as.numeric(psub$Global_active_power)
# Plot PNG file
png("plot1.png", width=480, height=480)
with(psub,
hist(Global_active_power, col="red",
xlab="Global Active Power (kilowatts)",
main="Global Active Power"))
dev.off()
| /plot1.R | no_license | bjurban/ExData_Plotting1 | R | false | false | 738 | r | library(data.table)
# Unzip Data
# unzip("exdata_data_household_power_consumption.zip")
# Read Data
power <- fread("household_power_consumption.txt", na.strings=c("?"))
# Note: na.strings ="?" does not work. This is a documented flaw in fread, see:
# http://r.789695.n4.nabble.com/fread-coercing-to-character-when-seeing-NA-td4677209.html
# Subset Data to days of interest
psub <- subset(power, Date %in% c("1/2/2007", "2/2/2007"))
# Coerce Data to Numeric
psub$Global_active_power <- as.numeric(psub$Global_active_power)
# Plot PNG file
png("plot1.png", width=480, height=480)
with(psub,
hist(Global_active_power, col="red",
xlab="Global Active Power (kilowatts)",
main="Global Active Power"))
dev.off()
|
## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ---- eval = FALSE------------------------------------------------------------
# devtools::install_github("ipeaGIT/r5r", subdir = "r-package")
#
## ---- message = FALSE---------------------------------------------------------
library(r5r)
library(sf)
library(data.table)
library(ggplot2)
library(mapview)
## -----------------------------------------------------------------------------
data_path <- system.file("extdata", package = "r5r")
list.files(data_path)
## -----------------------------------------------------------------------------
points <- fread(system.file("extdata/poa_hexgrid.csv", package = "r5r"))
points <- points[ c(sample(1:nrow(points), 10, replace=TRUE)), ]
head(points)
## ---- message = FALSE, eval = FALSE-------------------------------------------
# options(java.parameters = "-Xmx2G")
## ---- message = FALSE, eval = FALSE-------------------------------------------
# # Indicate the path where OSM and GTFS data are stored
# r5r_core <- setup_r5(data_path = data_path, verbose = FALSE)
#
## ---- message = FALSE, eval = FALSE-------------------------------------------
# # calculate a travel time matrix
# ttm <- travel_time_matrix(r5r_core = r5r_core,
# origins = points,
# destinations = points,
# departure_datetime = lubridate::as_datetime("2019-03-20 14:00:00",
# tz = "America/Sao_Paulo"),
# mode = c("WALK", "TRANSIT"),
# max_walk_dist = 5000,
# max_trip_duration = 120,
# verbose = FALSE)
#
# head(ttm)
## ----ttm head, echo = FALSE, message = FALSE----------------------------------
knitr::include_graphics(system.file("img", "vig_output_ttm.png", package="r5r"))
## ---- message = FALSE, eval = FALSE-------------------------------------------
# # inputs
# points <- read.csv(file.path(data_path, "poa_points_of_interest.csv"))
# origins <- points[10,]
# destinations <- points[12,]
# mode = c("WALK", "TRANSIT")
# max_walk_dist <- 10000
# departure_datetime <- lubridate::as_datetime("2019-03-20 14:00:00",
# tz = "America/Sao_Paulo")
#
# df <- detailed_itineraries(r5r_core = r5r_core,
# origins,
# destinations,
# mode,
# departure_datetime,
# max_walk_dist,
# shortest_path = FALSE,
# verbose = FALSE)
#
# head(df)
## ----detailed head, echo = FALSE, message = FALSE-----------------------------
knitr::include_graphics(system.file("img", "vig_output_detailed.png", package="r5r"))
## ---- message = FALSE, eval = FALSE-------------------------------------------
# # extract OSM network
# street_net <- street_network_to_sf(r5r_core)
#
# # plot
# ggplot() +
# geom_sf(data = street_net$edges, color='gray85') +
# geom_sf(data = df, aes(color=mode)) +
# facet_wrap(.~option) +
# theme_void()
#
## ----ggplot2 output, echo = FALSE, message = FALSE----------------------------
knitr::include_graphics(system.file("img", "vig_detailed_ggplot.png", package="r5r"))
## ---- message = FALSE, eval = FALSE-------------------------------------------
# mapviewOptions(platform = 'leafgl')
# mapview(df, zcol = 'option')
#
## ----mapview output, echo = FALSE, message = FALSE----------------------------
knitr::include_graphics(system.file("img", "vig_detailed_mapview.png", package="r5r"))
| /r-package/doc/intro_to_r5r.R | no_license | juliafagundescoc/r5r | R | false | false | 3,820 | r | ## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ---- eval = FALSE------------------------------------------------------------
# devtools::install_github("ipeaGIT/r5r", subdir = "r-package")
#
## ---- message = FALSE---------------------------------------------------------
library(r5r)
library(sf)
library(data.table)
library(ggplot2)
library(mapview)
## -----------------------------------------------------------------------------
data_path <- system.file("extdata", package = "r5r")
list.files(data_path)
## -----------------------------------------------------------------------------
points <- fread(system.file("extdata/poa_hexgrid.csv", package = "r5r"))
points <- points[ c(sample(1:nrow(points), 10, replace=TRUE)), ]
head(points)
## ---- message = FALSE, eval = FALSE-------------------------------------------
# options(java.parameters = "-Xmx2G")
## ---- message = FALSE, eval = FALSE-------------------------------------------
# # Indicate the path where OSM and GTFS data are stored
# r5r_core <- setup_r5(data_path = data_path, verbose = FALSE)
#
## ---- message = FALSE, eval = FALSE-------------------------------------------
# # calculate a travel time matrix
# ttm <- travel_time_matrix(r5r_core = r5r_core,
# origins = points,
# destinations = points,
# departure_datetime = lubridate::as_datetime("2019-03-20 14:00:00",
# tz = "America/Sao_Paulo"),
# mode = c("WALK", "TRANSIT"),
# max_walk_dist = 5000,
# max_trip_duration = 120,
# verbose = FALSE)
#
# head(ttm)
## ----ttm head, echo = FALSE, message = FALSE----------------------------------
knitr::include_graphics(system.file("img", "vig_output_ttm.png", package="r5r"))
## ---- message = FALSE, eval = FALSE-------------------------------------------
# # inputs
# points <- read.csv(file.path(data_path, "poa_points_of_interest.csv"))
# origins <- points[10,]
# destinations <- points[12,]
# mode = c("WALK", "TRANSIT")
# max_walk_dist <- 10000
# departure_datetime <- lubridate::as_datetime("2019-03-20 14:00:00",
# tz = "America/Sao_Paulo")
#
# df <- detailed_itineraries(r5r_core = r5r_core,
# origins,
# destinations,
# mode,
# departure_datetime,
# max_walk_dist,
# shortest_path = FALSE,
# verbose = FALSE)
#
# head(df)
## ----detailed head, echo = FALSE, message = FALSE-----------------------------
knitr::include_graphics(system.file("img", "vig_output_detailed.png", package="r5r"))
## ---- message = FALSE, eval = FALSE-------------------------------------------
# # extract OSM network
# street_net <- street_network_to_sf(r5r_core)
#
# # plot
# ggplot() +
# geom_sf(data = street_net$edges, color='gray85') +
# geom_sf(data = df, aes(color=mode)) +
# facet_wrap(.~option) +
# theme_void()
#
## ----ggplot2 output, echo = FALSE, message = FALSE----------------------------
knitr::include_graphics(system.file("img", "vig_detailed_ggplot.png", package="r5r"))
## ---- message = FALSE, eval = FALSE-------------------------------------------
# mapviewOptions(platform = 'leafgl')
# mapview(df, zcol = 'option')
#
## ----mapview output, echo = FALSE, message = FALSE----------------------------
knitr::include_graphics(system.file("img", "vig_detailed_mapview.png", package="r5r"))
|
s1 = 10
s2 = "Hello"
s3 = FALSE
s4 = 1 - 3i
v1 = c(3, 10 ,12)
v2 = c("Taylor", "Hyuna")
v3 = c(TRUE, FALSE, TRUE)
v4 = c(v1 ,20, 30)
v1 = 1:5
v2 = 5:1
v3 = -3.3:5
v1 = seq(from=1, to=5, by=1)
v5 = sequence(10)
v4 = sequence(0)
v1 = rep("a", times=5)
v1 = rep("a", each=5)
v2 = rep(c("a", "b"), times=5, each=5)
v1 = c(20, 46, 51)
names(v1) = c("Hyuna", "Maria", "Taeguen")
v1
weight = c(11,12,13,14)
weight[1:3]
weight[c(1,2)]
gender = c('f','m','f','f')
gender
gender_factor = factor(gender)
gender_factor
levels(gender_factor)
gender_factor2 = factor(gender, levels = c('f','m'), labels=c('female','male'))
gender_factor2
gender_factor3 = factor(gender, ordered=TRUE)
gender_factor3
#Matrix
v1 = 1:3
v2 = 4:6
m1 = rbind(v1, v2)
m2 = cbind(v1, v2)
m2
m3 = matrix(1:4, nrow=2, ncol=2)
m3
m4 = matrix(1:4, nrow=2, ncol=2, byrow=TRUE)
m4
#
v1 =1:5
m1 = matrix(1:6, nrow=2, ncol=3)
| /rbind,cbind,matrix.R | no_license | Hyuna13/Rstudio | R | false | false | 903 | r | s1 = 10
s2 = "Hello"
s3 = FALSE
s4 = 1 - 3i
v1 = c(3, 10 ,12)
v2 = c("Taylor", "Hyuna")
v3 = c(TRUE, FALSE, TRUE)
v4 = c(v1 ,20, 30)
v1 = 1:5
v2 = 5:1
v3 = -3.3:5
v1 = seq(from=1, to=5, by=1)
v5 = sequence(10)
v4 = sequence(0)
v1 = rep("a", times=5)
v1 = rep("a", each=5)
v2 = rep(c("a", "b"), times=5, each=5)
v1 = c(20, 46, 51)
names(v1) = c("Hyuna", "Maria", "Taeguen")
v1
weight = c(11,12,13,14)
weight[1:3]
weight[c(1,2)]
gender = c('f','m','f','f')
gender
gender_factor = factor(gender)
gender_factor
levels(gender_factor)
gender_factor2 = factor(gender, levels = c('f','m'), labels=c('female','male'))
gender_factor2
gender_factor3 = factor(gender, ordered=TRUE)
gender_factor3
#Matrix
v1 = 1:3
v2 = 4:6
m1 = rbind(v1, v2)
m2 = cbind(v1, v2)
m2
m3 = matrix(1:4, nrow=2, ncol=2)
m3
m4 = matrix(1:4, nrow=2, ncol=2, byrow=TRUE)
m4
#
v1 =1:5
m1 = matrix(1:6, nrow=2, ncol=3)
|
corr <- function(directory, threshold = 0) {
## 'directory' is a character vector of length 1 indicating
## the location of the CSV files
## 'threshold' is a numeric vector of length 1 indicating the
## number of completely observed observations (on all
## variables) required to compute the correlation between
## nitrate and sulfate; the default is 0
## Return a numeric vector of correlations
## NOTE: Do not round the result!
id<-1:332
path<-paste(directory,"/",sprintf("%03d",id),".csv",sep="")
crr<-vector(mode="numeric",length = 0)
for (i in 1:length(id))
{
temp<-read.csv(path[i])
nobs=nrow(temp[complete.cases(temp),])
if (nobs>threshold)
{
crr<-c(crr,cor(temp["sulfate"],temp["nitrate"],use="complete.obs"))
}
}
return(crr)
} | /corr.R | no_license | arjun-gndu-2003/rcourseera | R | false | false | 812 | r | corr <- function(directory, threshold = 0) {
## 'directory' is a character vector of length 1 indicating
## the location of the CSV files
## 'threshold' is a numeric vector of length 1 indicating the
## number of completely observed observations (on all
## variables) required to compute the correlation between
## nitrate and sulfate; the default is 0
## Return a numeric vector of correlations
## NOTE: Do not round the result!
id<-1:332
path<-paste(directory,"/",sprintf("%03d",id),".csv",sep="")
crr<-vector(mode="numeric",length = 0)
for (i in 1:length(id))
{
temp<-read.csv(path[i])
nobs=nrow(temp[complete.cases(temp),])
if (nobs>threshold)
{
crr<-c(crr,cor(temp["sulfate"],temp["nitrate"],use="complete.obs"))
}
}
return(crr)
} |
library(pdftools)
library(tidyverse)
library(tabulizer)
library(haven)
library(geojsonio)
library(sf)
# Proof of concept pdf scraping
# extract table sections chosen for extract_areas only include data within "Governorates" section excluding the line with "Governorates"
# File is available in google docs https://drive.google.com/file/d/1jJwf9u04yqiG6GYfIVaBJJqLPXGyobtu/view?usp=sharing
pdf_file <- "Iraq/Iraq 2018 MICS SFR English Volume I - 22 Sep 20.pdf"
wealth_index_quantiles <- extract_areas(pdf_file, pages = 51)
# wealth_index_quantiles <- extract_tables(pdf_file, pages = 51)
wealth_index_quantiles <- wealth_index_quantiles[[1]]
wealth_index_quantiles <- as.data.frame(wealth_index_quantiles)
wealth_index_quantiles <- wealth_index_quantiles %>%
rename(poorest = V2,
second = V3,
middle = V4,
fourth = V5,
richest = V6,
total_wealth = V7,
number_of_household_members = V8,
governorates = V1) %>%
filter_all(all_vars(!is.na(.)))
woman_literacy <- as.data.frame(extract_areas(pdf_file, pages = 60)[[1]])
woman_literacy <- woman_literacy %>%
rename(governorates = V1,
pre_primary_literate = V2,
pre_primary_illiterate = V3,
primary_literate = V4,
primary_illiterate = V5,
lower_secondary_literate = V6,
secondary_or_higher_literate = V7,
total_woman_literacy = V8,
total_percentage_woman_literate = V9,
number_of_women_15_to_49_years = V10
)
childhood_mortality <- as.data.frame(extract_areas(pdf_file, pages = 86)[[1]])
childhood_mortality <- childhood_mortality %>%
rename(
governorates = V1,
neonatal_mortality_rate = V2,
post_natal_mortality_rate = V3,
infant_mortality_rate = V4,
child_mortality_rate = V5,
under_5_mortality_rate = V6
)
df <- wealth_index_quantiles %>%
full_join(woman_literacy, by = "governorates") %>%
full_join(childhood_mortality, by = "governorates") %>%
filter(governorates != "Kerbala") %>%
mutate(
governorates = case_when(
governorates == "Duhok" ~ "Dohuk",
governorates == "Nainawa" ~ "Nineveh",
governorates == "Sulaimaniya" ~ "Sulaymaniyah",
governorates == "Diala" ~ "Diyala",
governorates == "Anbar" ~ "Al Anbar",
governorates == "Karbalah" ~ "Karbala",
governorates == "Salahaddin" ~ "Saladin",
governorates == "Qadissiyah" ~ "Al-Qadisiyah",
governorates == "Muthana" ~ "Muthanna",
governorates == "Thiqar" ~ "Dhi Qar",
governorates == "Missan" ~ "Maysan",
TRUE ~ governorates
)
)
write.csv(df, "Iraq/subset_mics.csv")
# From MakeCountryBoundaries.R --------------------------------------------
worldadm1<-topojson_read("https://www.geoboundaries.org/data/geoBoundariesCGAZ-3_0_0/ADM1/simplifyRatio_25/geoBoundariesCGAZ_ADM1.topojson")
IRQ1<-worldadm1%>%
filter(shapeGroup=="IRQ")
IRQ1 <- IRQ1 %>%
left_join(df, by = c("shapeName" = "governorates")) # %>%
# pivot_longer(
# cols = !!(names(df)[2:length(names(df))]),
# names_to = "variable",
# values_to = "measure"
# ) %>%
# mutate(
# var_type = case_when(
# variable %in% names(woman_literacy) ~ "woman_literacy",
# variable %in% names(wealth_index_quantiles) ~ "wealth_index_quantiles",
# variable %in% names(childhood_mortality) ~ "childhood_mortality"
# )
# )
ggplot(IRQ1) +
geom_sf(aes(fill = as.numeric(total_percentage_woman_literate))) +
ggtitle("Total Percentage of Literate Woman") +
labs(fill = "% Womany Literate")
ggplot(IRQ1) +
geom_sf(aes(fill = as.numeric(poorest))) +
ggtitle("% of Goverorate in Poorest Wealth Quinitile") +
labs(fill = "% in Poorest Quintile")
ggplot(IRQ1) +
geom_sf(aes(fill = as.numeric(neonatal_mortality_rate))) +
ggtitle("Neonatal mortality") +
labs(fill = "Neonatal mortality rate")
# woman <- read_sav("Iraq/wm.sav")
# child <- read_sav("Iraq/ch.sav")
# births <- read_sav("Iraq/bh.sav")
# womanchild <- read_sav("Iraq/fs.sav")
# female_mut <- read_sav("Iraq/fg.sav")
# household <- read_sav("Iraq/hh.sav")
# members <- read_sav("Iraq/hl.sav")
# mort <- read_sav("Iraq/mm.sav")
| /Scripts/mics_survey_scraping_iraq.R | permissive | luigi-0/Mar21-humanitarian-data | R | false | false | 4,185 | r | library(pdftools)
library(tidyverse)
library(tabulizer)
library(haven)
library(geojsonio)
library(sf)
# Proof of concept pdf scraping
# extract table sections chosen for extract_areas only include data within "Governorates" section excluding the line with "Governorates"
# File is available in google docs https://drive.google.com/file/d/1jJwf9u04yqiG6GYfIVaBJJqLPXGyobtu/view?usp=sharing
pdf_file <- "Iraq/Iraq 2018 MICS SFR English Volume I - 22 Sep 20.pdf"
wealth_index_quantiles <- extract_areas(pdf_file, pages = 51)
# wealth_index_quantiles <- extract_tables(pdf_file, pages = 51)
wealth_index_quantiles <- wealth_index_quantiles[[1]]
wealth_index_quantiles <- as.data.frame(wealth_index_quantiles)
wealth_index_quantiles <- wealth_index_quantiles %>%
rename(poorest = V2,
second = V3,
middle = V4,
fourth = V5,
richest = V6,
total_wealth = V7,
number_of_household_members = V8,
governorates = V1) %>%
filter_all(all_vars(!is.na(.)))
woman_literacy <- as.data.frame(extract_areas(pdf_file, pages = 60)[[1]])
woman_literacy <- woman_literacy %>%
rename(governorates = V1,
pre_primary_literate = V2,
pre_primary_illiterate = V3,
primary_literate = V4,
primary_illiterate = V5,
lower_secondary_literate = V6,
secondary_or_higher_literate = V7,
total_woman_literacy = V8,
total_percentage_woman_literate = V9,
number_of_women_15_to_49_years = V10
)
childhood_mortality <- as.data.frame(extract_areas(pdf_file, pages = 86)[[1]])
childhood_mortality <- childhood_mortality %>%
rename(
governorates = V1,
neonatal_mortality_rate = V2,
post_natal_mortality_rate = V3,
infant_mortality_rate = V4,
child_mortality_rate = V5,
under_5_mortality_rate = V6
)
df <- wealth_index_quantiles %>%
full_join(woman_literacy, by = "governorates") %>%
full_join(childhood_mortality, by = "governorates") %>%
filter(governorates != "Kerbala") %>%
mutate(
governorates = case_when(
governorates == "Duhok" ~ "Dohuk",
governorates == "Nainawa" ~ "Nineveh",
governorates == "Sulaimaniya" ~ "Sulaymaniyah",
governorates == "Diala" ~ "Diyala",
governorates == "Anbar" ~ "Al Anbar",
governorates == "Karbalah" ~ "Karbala",
governorates == "Salahaddin" ~ "Saladin",
governorates == "Qadissiyah" ~ "Al-Qadisiyah",
governorates == "Muthana" ~ "Muthanna",
governorates == "Thiqar" ~ "Dhi Qar",
governorates == "Missan" ~ "Maysan",
TRUE ~ governorates
)
)
write.csv(df, "Iraq/subset_mics.csv")
# From MakeCountryBoundaries.R --------------------------------------------
worldadm1<-topojson_read("https://www.geoboundaries.org/data/geoBoundariesCGAZ-3_0_0/ADM1/simplifyRatio_25/geoBoundariesCGAZ_ADM1.topojson")
IRQ1<-worldadm1%>%
filter(shapeGroup=="IRQ")
IRQ1 <- IRQ1 %>%
left_join(df, by = c("shapeName" = "governorates")) # %>%
# pivot_longer(
# cols = !!(names(df)[2:length(names(df))]),
# names_to = "variable",
# values_to = "measure"
# ) %>%
# mutate(
# var_type = case_when(
# variable %in% names(woman_literacy) ~ "woman_literacy",
# variable %in% names(wealth_index_quantiles) ~ "wealth_index_quantiles",
# variable %in% names(childhood_mortality) ~ "childhood_mortality"
# )
# )
ggplot(IRQ1) +
geom_sf(aes(fill = as.numeric(total_percentage_woman_literate))) +
ggtitle("Total Percentage of Literate Woman") +
labs(fill = "% Womany Literate")
ggplot(IRQ1) +
geom_sf(aes(fill = as.numeric(poorest))) +
ggtitle("% of Goverorate in Poorest Wealth Quinitile") +
labs(fill = "% in Poorest Quintile")
ggplot(IRQ1) +
geom_sf(aes(fill = as.numeric(neonatal_mortality_rate))) +
ggtitle("Neonatal mortality") +
labs(fill = "Neonatal mortality rate")
# woman <- read_sav("Iraq/wm.sav")
# child <- read_sav("Iraq/ch.sav")
# births <- read_sav("Iraq/bh.sav")
# womanchild <- read_sav("Iraq/fs.sav")
# female_mut <- read_sav("Iraq/fg.sav")
# household <- read_sav("Iraq/hh.sav")
# members <- read_sav("Iraq/hl.sav")
# mort <- read_sav("Iraq/mm.sav")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/eval_forecasts.R
\name{eval_forecasts}
\alias{eval_forecasts}
\title{Evaluate forecasts}
\usage{
eval_forecasts(
true_values,
predictions,
prediction_type = "probabilistic",
outcome_type = "integer",
metrics = NULL,
output = "df"
)
}
\arguments{
\item{true_values}{A vector with the true observed values of size n}
\item{predictions}{a list of appropriate predictions. Every
item in the list corresponds to the predictions made by one model.
Appropriate dimensions and input types are:
\itemize{
\item for probabilistic integer and continuous forecasts: a matrix or
data.frame of size nxN of predictive samples. n (number of rows) being
the number of observed values to predict and N (number of columns) the
number of Monte Carlo samples
\item for probabilistic binary forecasts: a vector of length n that gives
the probability that the corresponding element in \code{true_values}
will be equal to one.
\item for all point estimates: a vector of size n with predictions for the
corresponding entries of \code{true_values}}}
\item{prediction_type}{Type of the prediction made. Can be eitehr
"probabilitic" or "point" for point predictions.}
\item{outcome_type}{type of the variable to be predicted. Can be either
"integer" or "continuous" or "binary".}
\item{metrics}{what metrics to include. Currently not used, as all metrics
are displayed}
\item{output}{specify the format of the output. Can be either "df" (returns
a single data.frame) or anything else (returns a list of data.frames)}
}
\value{
output option "df" returns a single data.frame with the prediction
results.
Rownames of the data.frame correspond to the metric applied for the scoring.
\code{mean} and \code{sd} are the mean and standard deviations of the scores
achieved by the predictions for every single value of \code{true_values}.
Only in the case of the \code{\link{pit}}, \code{mean} and \code{sd} return
the mean and standard deviation of the Replicates of the Randomised PIT.
If everything else than "df" is specified, the above results are returned
as a list of data.frames for the different metrics.
}
\description{
The function \code{eval_forecasts} is a wrapper that provides
an interface to lower-level functions. It can be used to assess the goodness
of probabilistic or point forecasts to continues, integer-valued or
binary variables. The lower-level functions accessed are:
\enumerate{
\item \code{\link{eval_forecasts_prob_int}}
\item \code{\link{eval_forecasts_prob_cont}}
\item \code{\link{eval_forecasts_prob_bin}}
\item \code{\link{eval_forecasts_point_int}}
\item \code{\link{eval_forecasts_point_cont}}
\item \code{\link{eval_forecasts_point_bin}}
}
}
\references{
Funk S, Camacho A, Kucharski AJ, Lowe R, Eggo RM, Edmunds WJ
(2019) Assessing the performance of real-time epidemic forecasts: A
case study of Ebola in the Western Area region of Sierra Leone, 2014-15.
PLoS Comput Biol 15(2): e1006785.
\url{https://doi.org/10.1371/journal.pcbi.1006785}
}
\author{
Nikos Bosse \email{nikosbosse@gmail.com}
}
| /man/eval_forecasts.Rd | permissive | laasousa/scoringutils | R | false | true | 3,126 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/eval_forecasts.R
\name{eval_forecasts}
\alias{eval_forecasts}
\title{Evaluate forecasts}
\usage{
eval_forecasts(
true_values,
predictions,
prediction_type = "probabilistic",
outcome_type = "integer",
metrics = NULL,
output = "df"
)
}
\arguments{
\item{true_values}{A vector with the true observed values of size n}
\item{predictions}{a list of appropriate predictions. Every
item in the list corresponds to the predictions made by one model.
Appropriate dimensions and input types are:
\itemize{
\item for probabilistic integer and continuous forecasts: a matrix or
data.frame of size nxN of predictive samples. n (number of rows) being
the number of observed values to predict and N (number of columns) the
number of Monte Carlo samples
\item for probabilistic binary forecasts: a vector of length n that gives
the probability that the corresponding element in \code{true_values}
will be equal to one.
\item for all point estimates: a vector of size n with predictions for the
corresponding entries of \code{true_values}}}
\item{prediction_type}{Type of the prediction made. Can be eitehr
"probabilitic" or "point" for point predictions.}
\item{outcome_type}{type of the variable to be predicted. Can be either
"integer" or "continuous" or "binary".}
\item{metrics}{what metrics to include. Currently not used, as all metrics
are displayed}
\item{output}{specify the format of the output. Can be either "df" (returns
a single data.frame) or anything else (returns a list of data.frames)}
}
\value{
output option "df" returns a single data.frame with the prediction
results.
Rownames of the data.frame correspond to the metric applied for the scoring.
\code{mean} and \code{sd} are the mean and standard deviations of the scores
achieved by the predictions for every single value of \code{true_values}.
Only in the case of the \code{\link{pit}}, \code{mean} and \code{sd} return
the mean and standard deviation of the Replicates of the Randomised PIT.
If everything else than "df" is specified, the above results are returned
as a list of data.frames for the different metrics.
}
\description{
The function \code{eval_forecasts} is a wrapper that provides
an interface to lower-level functions. It can be used to assess the goodness
of probabilistic or point forecasts to continues, integer-valued or
binary variables. The lower-level functions accessed are:
\enumerate{
\item \code{\link{eval_forecasts_prob_int}}
\item \code{\link{eval_forecasts_prob_cont}}
\item \code{\link{eval_forecasts_prob_bin}}
\item \code{\link{eval_forecasts_point_int}}
\item \code{\link{eval_forecasts_point_cont}}
\item \code{\link{eval_forecasts_point_bin}}
}
}
\references{
Funk S, Camacho A, Kucharski AJ, Lowe R, Eggo RM, Edmunds WJ
(2019) Assessing the performance of real-time epidemic forecasts: A
case study of Ebola in the Western Area region of Sierra Leone, 2014-15.
PLoS Comput Biol 15(2): e1006785.
\url{https://doi.org/10.1371/journal.pcbi.1006785}
}
\author{
Nikos Bosse \email{nikosbosse@gmail.com}
}
|
\name{rp.univar}
\alias{rp.univar}
\title{Descriptive Statistics}
\usage{
rp.univar(x, subset = NULL, fn, na.rm = TRUE, ...)
}
\arguments{
\item{x}{a numeric variable to be summarised}
\item{subset}{an expression that evaluates to logical
vector (defaults to \code{NULL}, in which case the
function specified in \code{fun} is applied on a vector)}
\item{fn}{a function or a function name to be applied on
a variable or it's subset}
\item{na.rm}{a logical value indicating whether NA's
should be removed (defaults to \code{TRUE})}
\item{...}{additional arguments for function specified in
\code{fn}}
}
\value{
a numeric
}
\description{
This function operates only on vectors or their subsets,
by calculating a descriptive statistic specified in
\code{fn} argument. Yielded result is rounded to 3
decimal places by default (which can be changed by
passing an integer to \code{decimals} argument).
}
| /man/rp.univar.Rd | no_license | ramnathv/rapport | R | false | false | 936 | rd | \name{rp.univar}
\alias{rp.univar}
\title{Descriptive Statistics}
\usage{
rp.univar(x, subset = NULL, fn, na.rm = TRUE, ...)
}
\arguments{
\item{x}{a numeric variable to be summarised}
\item{subset}{an expression that evaluates to logical
vector (defaults to \code{NULL}, in which case the
function specified in \code{fun} is applied on a vector)}
\item{fn}{a function or a function name to be applied on
a variable or it's subset}
\item{na.rm}{a logical value indicating whether NA's
should be removed (defaults to \code{TRUE})}
\item{...}{additional arguments for function specified in
\code{fn}}
}
\value{
a numeric
}
\description{
This function operates only on vectors or their subsets,
by calculating a descriptive statistic specified in
\code{fn} argument. Yielded result is rounded to 3
decimal places by default (which can be changed by
passing an integer to \code{decimals} argument).
}
|
arg_subscript <- function(value, n, names, get)
{
if (missing(value) || is.null(value)) {
return(NULL)
}
r <- length(dim(value))
if (r >= 2) {
stop(sprintf("subscript is a rank-%.0f array", r))
}
if (is.object(value)) {
vnames <- names(value)
if (is.numeric(value)) {
value <- as.numeric(value)
} else if (is.logical(value)) {
value <- as.logical(value)
} else {
value <- as.character(value)
}
names(value) <- vnames
}
if (!is.character(value)) {
value <- .Call(rframe_subscript, value, n, names, get)
} else {
index <- match(value, names, 0L)
if (get) {
vnames <- names(value)
if (is.null(vnames)) {
vnames <- value
} else {
empty <- is.na(vnames) | !nzchar(vnames)
vnames[empty] <- value[empty]
}
} else {
vnames <- value
}
names(index) <- vnames
new <- which(index == 0L)
nnew <- length(new)
if (nnew > 0) {
if (get) {
i <- new[[1]]
vi <- value[[i]]
if (is.na(vi)) {
stop("unknown name <NA>")
} else {
stop(sprintf("unknown name \"%s\"", vi))
}
} else {
inew <- (n + 1L):(n + nnew)
vnew <- value[new]
index[new] <- inew[match(vnew, vnew, 0)] # handle duplicates
}
}
value <- index
}
value
}
arg_row_subscript <- function(value, n, keys, get)
{
if (is.record(value)) {
value <- as.dataset(value)
keys2 <- keys(value)
value <- rowid(keys, value)
if (anyNA(value)) {
i <- which(is.na(value))[[1]]
stop(sprintf("unknown key (row subscript %.0f)", i))
}
} else {
keys2 <- NULL
value <- arg_subscript(value, n, NULL, get)
}
if (get) {
if (!is.null(keys2)) {
keys <- keys2
} else if (!is.null(keys)) {
keys <- keys[value, , drop = FALSE]
if (anyDuplicated(value)) {
keys <- append_copy_num(keys, n, value)
}
keys <- as.keyset(keys)
}
attr(value, "keys") <- keys
}
value
}
append_copy_num <- function(x, nkey, id)
{
# TODO: implement in C?
copy <- integer(nkey)
newkey <- integer(length(id))
for (i in seq_along(id)) {
k <- id[[i]]
copy[[k]] <- copy[[k]] + 1L
newkey[[i]] <- copy[[k]]
}
names <- names(x)
if (is.null(names)) {
names <- character(length(x))
}
x[[length(x) + 1L]] <- newkey
names(x) <- c(names, "#")
x
}
| /R/arg-subscript.R | permissive | patperry/r-frame | R | false | false | 2,864 | r | arg_subscript <- function(value, n, names, get)
{
if (missing(value) || is.null(value)) {
return(NULL)
}
r <- length(dim(value))
if (r >= 2) {
stop(sprintf("subscript is a rank-%.0f array", r))
}
if (is.object(value)) {
vnames <- names(value)
if (is.numeric(value)) {
value <- as.numeric(value)
} else if (is.logical(value)) {
value <- as.logical(value)
} else {
value <- as.character(value)
}
names(value) <- vnames
}
if (!is.character(value)) {
value <- .Call(rframe_subscript, value, n, names, get)
} else {
index <- match(value, names, 0L)
if (get) {
vnames <- names(value)
if (is.null(vnames)) {
vnames <- value
} else {
empty <- is.na(vnames) | !nzchar(vnames)
vnames[empty] <- value[empty]
}
} else {
vnames <- value
}
names(index) <- vnames
new <- which(index == 0L)
nnew <- length(new)
if (nnew > 0) {
if (get) {
i <- new[[1]]
vi <- value[[i]]
if (is.na(vi)) {
stop("unknown name <NA>")
} else {
stop(sprintf("unknown name \"%s\"", vi))
}
} else {
inew <- (n + 1L):(n + nnew)
vnew <- value[new]
index[new] <- inew[match(vnew, vnew, 0)] # handle duplicates
}
}
value <- index
}
value
}
arg_row_subscript <- function(value, n, keys, get)
{
if (is.record(value)) {
value <- as.dataset(value)
keys2 <- keys(value)
value <- rowid(keys, value)
if (anyNA(value)) {
i <- which(is.na(value))[[1]]
stop(sprintf("unknown key (row subscript %.0f)", i))
}
} else {
keys2 <- NULL
value <- arg_subscript(value, n, NULL, get)
}
if (get) {
if (!is.null(keys2)) {
keys <- keys2
} else if (!is.null(keys)) {
keys <- keys[value, , drop = FALSE]
if (anyDuplicated(value)) {
keys <- append_copy_num(keys, n, value)
}
keys <- as.keyset(keys)
}
attr(value, "keys") <- keys
}
value
}
append_copy_num <- function(x, nkey, id)
{
# TODO: implement in C?
copy <- integer(nkey)
newkey <- integer(length(id))
for (i in seq_along(id)) {
k <- id[[i]]
copy[[k]] <- copy[[k]] + 1L
newkey[[i]] <- copy[[k]]
}
names <- names(x)
if (is.null(names)) {
names <- character(length(x))
}
x[[length(x) + 1L]] <- newkey
names(x) <- c(names, "#")
x
}
|
##### Defining global objects####
# source functions
source("2_load_libraries.R")
library(tidyverse)
library(plotly)
library(d3heatmap)
library(fields)
library(shinyBS)
library(markdown)
library(scales)
master=read.csv("data/All_bycatch_data_2010_2015.csv") %>% select(-c(CV,FOOTNOTE.S.,FISHERY.TYPE.GENERAL,FISHERY.TYPE.SPECIFIC)) %>% .[complete.cases(.[,c(6,9,10)]),] %>% mutate(NUM.FISH=rep(1,nrow(.)))
# master_raw=read.csv("data/All_bycatch_data_2010_2015.csv") %>% select(-c(CV,FOOTNOTE.S.,FISHERY.TYPE.GENERAL,FISHERY.TYPE.SPECIFIC)) %>% .[complete.cases(.[,c(6,9,10)]),] %>% mutate(NUM.FISH=rep(1,nrow(.)))
master[master=="Pot"]<-"pot"
master[master=="NW"]<-"WC"
master[master=="SW"]<-"WC"
# cleaning for resubmission
master[master=="combined gears"]<-"longline gears"
master=master %>% mutate(FISHERY=gsub("West Coast Mid-Water Trawl for Whiting","West Coast Mid-Water Trawl for Hake",FISHERY)) %>% mutate(FISHERY=gsub("Oregon/California Pink Shrimp","Washington/Oregon/California Pink Shrimp",FISHERY))
# cleaning for resubmission
master_extra_raw=read.csv("data/All_bycatch_data_2010_2015.csv")%>% mutate(YEAR=replace_na(YEAR,replace="2006-2010"))
master_extra_raw[master_extra_raw=="combined gears"]<-"longline gears"
master_extra_raw=master_extra_raw %>% mutate(FISHERY=gsub("West Coast Mid-Water Trawl for Whiting","West Coast Mid-Water Trawl for Hake",FISHERY)) %>% mutate(FISHERY=gsub("Oregon/California Pink Shrimp","Washington/Oregon/California Pink Shrimp",FISHERY))
### code to split mammals by year ####
# a=master %>% filter(GROUP=="marine mammal") %>% filter(UNIT=="INDIVIDUAL")
# new=list()
# for(i in 1:nrow(a)){
# print(i)
# if(nchar(as.character(a$YEAR[i]))>4){
# b=strsplit(as.character(a$YEAR[i]),"-")
# c=lapply(b,function(x)paste0(x,"-01-01"))
# d=interval(c[[1]][1],c[[1]][2])
# e=time_length(d,unit="year")+1
# bycatch=a$TOTAL.FISHERY.BYCATCH.MM[i]/e
# f=a %>% slice(rep(i,each=e))
# f$TOTAL.FISHERY.BYCATCH.MM=bycatch
# f$YEAR=seq(b[[1]][1],b[[1]][2])
# new[[length(new)+1]] <- f
# }
# }
#
# test=do.call("rbind",new)
# other=master%>% filter(GROUP=="marine mammal") %>% filter(UNIT=="INDIVIDUAL") %>% filter(nchar(as.character(YEAR))==4)
# final=rbind(test,other)
# write.csv(final,"data/mammals_by_year.csv",row.names = F)
#####
mammals=read.csv("data/mammals_by_year.csv")
rbi=read.csv("data/SummaryData_December2019_AllFisheryYears_AnalysisExport.csv")
group=unique(master$GROUP)%>% .[complete.cases(.)]
year=c(2010,2011,2012,2013,2014,2015)
region=as.factor(master$REGION) %>% unique()
fishery=unique(master$FISHERY)%>% .[complete.cases(.)] %>% as.character() %>% sort()
fishery=c("Don't filter",fishery)
species=unique(master$SCIENTIFIC.NAME) %>% .[complete.cases(.)] %>% as.character()%>% sort()
species=c("Don't filter",species)
gear=unique(master$FISHERY.TYPE)%>% .[complete.cases(.)] %>% as.character()%>% sort()
gear=c("Don't filter",gear)
ui <- dashboardPage(skin = "black",
dashboardHeader(
title = "National Bycatch Database Explorer",
titleWidth = 350
),
dashboardSidebar(
width = 280,
sidebarMenu(id = 'sidebarmenu',
menuItem("Visualize by species group", tabName='species',icon=icon("fish")),
conditionalPanel("input.sidebarmenu ==='species'",
#checkboxInput("sp_region", "Subdivide by region",value=FALSE),
radioButtons(inputId="choice_sp", label="How would you like to subdivide the data?", selected = "Don't subdivide",
choices=c("Region","Gear type", "Don't subdivide"))),
#checkboxInput("sp_region", "Subdivide by fishing type",value=FALSE)),
menuItem("Visualize by gear type", tabName='fishing',icon=icon("ship",lib='font-awesome')),
conditionalPanel("input.sidebarmenu ==='fishing'",
checkboxInput("Free_y", "Fixed y axis scale",value=T),
radioButtons(inputId="choice_gear", label="How would you like to subdivide the data?", selected = "Don't subdivide",
choices=c("Region", "Don't subdivide")),
radioButtons(inputId="choice_metric", label="What metric would you like to see?", selected = "FISHERY.BYCATCH.RATIO",
choices=c("Bycatch ratio"="FISHERY.BYCATCH.RATIO",
"Total catch"="TOTAL.CATCH",
"Total landings"="TOTAL.FISHERY.LANDINGS",
"Number of fisheries"="NUM.FISH"))),
menuItem("Relative Bycatch Index",tabName = 'rbi',icon=icon("award")),
conditionalPanel("input.sidebarmenu==='rbi'",
bsButton("q1", label = "", icon = icon("question"), style = "info", size = "extra-small"),
bsPopover(id = "q1", title = "",
content = "Unlike other taxonomic groups bycatch impacts of a fishery on marine mammals was only represented by MMPA weighting, therefore our default ranking doubled the weighting of the MMPA category relative to the other criteria. You can adjust the slider to see how changing the criteria weightings influences the final RBI of each fishery in each year.",
placement = "right",
trigger = "hover",
options = list(container = "body")),
radioButtons("display","Select display metric",choices = list("Relative Bycatch Index","Inter-criteria variance"),width = "100%",selected = "Relative Bycatch Index"),
conditionalPanel("input.display ==='Relative Bycatch Index'",
sliderInput("mmpa","Adjust MMPA weighting",min=1,max=5,step=1,value=2),
# shinyBS::bsTooltip("mmpa", "The wait times will be broken into this many equally spaced bins",
# "right", options = list(container = "body"))
sliderInput("TB_lbs","Adjust Total Bycatch (lbs) weighting",min=1,max=5,step=1,value=1),
sliderInput("TB_indv","Adjust Total Bycatch (indv) weighting",min=1,max=5,step=1,value=1),
sliderInput("BR","Adjust Bycatch Ratio weighting",min=1,max=5,step=1,value=1),
sliderInput("ESA_n","Adjust ESA (#) weighting",min=1,max=5,step=1,value=1),
sliderInput("ESA_lbs","Adjust ESA (lbs) weighting",min=1,max=5,step=1,value=1),
sliderInput("ESA_bt","Adjust ESA (birds and turtles) weighting",min=1,max=5,step=1,value=1),
sliderInput("IUCN_n","Adjust IUCN (#) weighting",min=1,max=5,step=1,value=1),
sliderInput("IUCN_lbs","Adjust IUCN (lbs) weighting",min=1,max=5,step=1,value=1),
sliderInput("IUCN_bt","Adjust IUCN (bids and turtles) weighting",min=1,max=5,step=1,value=1),
sliderInput("Tier","Adjust Tier weighting",min=1,max=5,step=1,value=1),
sliderInput("CV","Adjust CV weighting",min=1,max=5,step=1,value=1)
)),
menuItem("Explore raw data", tabName='raw',icon=icon("database",lib='font-awesome')),
conditionalPanel("input.sidebarmenu ==='raw'",
selectInput("raw_species","Filter species",species,width = "100%"),
selectInput("raw_fishery","Filter fishery",fishery,width = "100%"),
selectInput("raw_gear","Filter gear",gear,width = "100%"),
div(style="text-align:center",downloadButton("downloadDataF", label = h6(style="color:black","Download dataset"))),
div(style="text-align:center",downloadButton("downloadDataM", label = h6(style="color:black","Download metadata")))
)#,
# div(style="text-align:center",url <- a(tags$span(style="color:dodgerblue",h4("Read the paper")), href="https://media.giphy.com/media/qaoutfIYJYxr2/source.gif"))
)),
dashboardBody(
tabItems(
tabItem(tabName = "rbi",
fluidRow(
column(h4(style="text-align:center;","This app explores relative bycatch performance in US fisheries with bycatch estimates published in the National Bycatch Report."),width = 12),
column(h5(""),width=1,plotOutput("scale",height = '800px'),style = "background-color:white;"),
column(h5(""),width=10,d3heatmapOutput("heatmap",height = '800px'),style = "background-color:white;",
absolutePanel(draggable=F,top = 300, left = 0, right = 0,tags$div(h2(style="background-color:white;opacity:0.6;text-align:center;color:red;padding:0px;border-radius: 0px;transform: rotate(45deg); ",tags$b(tags$em("EXPLORATORY ONLY. Output does not necessarily align with results in Savoca et al.")))))),
# column(h5(""),width=1,plotOutput("scale_SD",height = '800px'),style = "background-color:white;"),
# column(h5("Inter-criteria variance"),width=5,d3heatmapOutput("heatmap_SD",height = '800px'),style = "background-color:white;"),
column(h6(style="font-style: italic;","App developed by Heather Welch (UCSC/NOAA)"),width = 12)
# absolutePanel(div(style="text-align:center;color:red;padding:0px;border-radius: 0px; ",tags$b(tags$em("placeholder"))),draggable=T,top=350, right=50)
# absolutePanel(draggable=T,top = 0, left = 0, right = 0,div(style="padding: 8px; border-bottom: 1px solid #CCC; background: #FFFFEE;",HTML(markdownToHTML(fragment.only=TRUE,text="placeholder"))))
)),
tabItem(tabName = "species",
fluidRow(
column(h4(style="text-align:center;","This app explores relative bycatch performance in US fisheries with bycatch estimates published in the National Bycatch Report."),width = 12),
column(h5("Fish and invertebrates"),width=4,plotOutput("Fish")),
column(h5("Mammals"),width=4,plotOutput("Mammals")),
column(h5("Seabirds and sea turtles"),width=4,plotOutput("SBST")),
column(h6(style="font-style: italic;","App developed by Heather Welch (UCSC/NOAA)"),width = 12)
)),
tabItem(tabName = "fishing",
fluidRow(
column(h4(style="text-align:center;","This app explores relative bycatch performance in US fisheries with bycatch estimates published in the National Bycatch Report."),width = 12),
column(h5(""),width=12,plotOutput("gear_ll",height = '800px'))),
column(h6(style="font-style: italic;","App developed by Heather Welch (UCSC/NOAA)"),width = 12)
),
tabItem(tabName = "raw",
fluidRow(
column(h4(style="text-align:center;","This app explores relative bycatch performance in US fisheries with bycatch estimates published in the National Bycatch Report."),width = 12),
column(h5(""),width=12,DT::dataTableOutput("rawTable")),
column(h6(style="font-style: italic;","App developed by Heather Welch (UCSC/NOAA)"),width = 12)
))
))
)
server <- shinyServer(function(input, output,session) {
output$heatmap<-renderD3heatmap({
if(input$display=="Relative Bycatch Index"){
a=rbi %>% mutate(mean_criteria = apply(.[,37:48],1,function(x) weighted.mean(x,w=c(input$TB_lbs,input$TB_indv,input$BR,input$ESA_n,input$ESA_lbs,input$ESA_bt,input$IUCN_n,input$IUCN_lbs,input$IUCN_bt,input$mmpa,input$Tier,input$CV),na.rm=T)))#Here 'w' refers to the weights.
# a=rbi %>% mutate(mean_criteria = apply(.[,29:40],1,function(x) weighted.mean(x,w=c(rep(1,9),input$mmpa,rep(1,2)),na.rm=T)))#Here 'w' refers to the weights.
# a=rbi %>% mutate(mean_criteria = apply(.[,29:40],1,function(x) weighted.mean(x,w=c(rep(1,9),2,rep(1,2)),na.rm=T)))# delete this before launching
q=a %>% select(Year,mean_criteria,Fishery) %>% spread(Year,mean_criteria) %>% mutate(Fishery=as.character(Fishery)) %>% arrange(desc(Fishery))
rownames(q)=q$Fishery
q=q %>% .[,2:ncol(.)]
d3heatmap(q, na.rm=T,Rowv = FALSE, Colv=FALSE, colors=c("#053061" ,"#2166AC", "#4393C3", "#D1E5F0" , "#FDDBC7", "#F4A582" ,"#D6604D" ,"#B2182B","#B2182B","#67001F"),
xlab=w,
show_grid=F, yaxis_width=400,show_color_legend=T,na_color="white",row_side_palette=c("#053061" ,"#2166AC", "#4393C3", "#D1E5F0" , "#FDDBC7", "#F4A582" ,"#D6604D" ,"#B2182B","#B2182B","#67001F")
)
}
else if(input$display=="Inter-criteria variance"){
a=rbi %>% mutate(mean_criteria = apply(.[,37:48],1,function(x) weighted.mean(x,w=c(rep(1,9),2,rep(1,2)),na.rm=T)))# delete this before launching
q=rbi %>% select(Year,criteria_sd,Fishery)%>% mutate(criteria_sd=criteria_sd^2) %>% spread(Year,criteria_sd) %>% mutate(Fishery=as.character(Fishery)) %>% arrange(desc(Fishery))
rownames(q)=q$Fishery
q=q %>% .[,2:ncol(.)]
d3heatmap(q, na.rm=T,Rowv = FALSE, Colv=FALSE, colors=c("#440154FF", "#31688EFF" ,"#35B779FF", "#FDE725FF"),
xlab=w,
show_grid=F, yaxis_width=400,show_color_legend=T,na_color="white",row_side_palette=c("#440154FF", "#31688EFF" ,"#35B779FF", "#FDE725FF")
)
}
})
output$scale<-renderPlot({
if(input$display=="Relative Bycatch Index"){
# par(mar=c(1,.1,.1,.1))
a=rbi %>% mutate(mean_criteria = apply(.[,37:48],1,function(x) weighted.mean(x,w=c(input$TB_lbs,input$TB_indv,input$BR,input$ESA_n,input$ESA_lbs,input$ESA_bt,input$IUCN_n,input$IUCN_lbs,input$IUCN_bt,input$mmpa,input$Tier,input$CV),na.rm=T)))#Here 'w' refers to the weights.
col.pal <- colorRampPalette(c("#053061" ,"#2166AC", "#4393C3", "#D1E5F0" , "#FDDBC7", "#F4A582" ,"#D6604D" ,"#B2182B","#B2182B","#67001F"))
ncolors <- 100
#breaks <- seq(min(a$mean_criteria,na.rm = T),max(a$mean_criteria,na.rm = T),,ncolors+1)
breaks <- seq(0,.51,,ncolors+1)
levs <- breaks[-1] - diff(breaks)/2
# image(x=levs, y=1, z=as.matrix(levs), col=col.pal(ncolors), breaks=breaks, ylab="", xlab="", yaxt="n")
par(mar=c(.1,.1,.1,.1))
image.plot(x=levs, y=1,smallplot= c(0,.2,.2,1), z=as.matrix(levs), legend.only = TRUE,col=col.pal(ncolors), breaks=breaks, ylab="", xlab="", yaxt="n",axis.args = list(cex.axis = .6))
}
else if(input$display=="Inter-criteria variance"){
col.pal <- colorRampPalette(c("#440154FF", "#31688EFF" ,"#35B779FF", "#FDE725FF"))
ncolors <- 100
breaks <- seq(min((rbi$criteria_sd)^2,na.rm = T),max(.31,na.rm = T),,ncolors+1)
levs <- breaks[-1] - diff(breaks)/2
# image(x=levs, y=1, z=as.matrix(levs), col=col.pal(ncolors), breaks=breaks, ylab="", xlab="", yaxt="n")
par(mar=c(.1,.1,.1,.1))
image.plot(x=levs, y=1,smallplot= c(0,.2,.2,1), z=as.matrix(levs), legend.only = TRUE,col=col.pal(ncolors), breaks=breaks, ylab="", xlab="", yaxt="n",axis.args = list(cex.axis = .6))
}
})
# output$heatmap_SD<-renderD3heatmap({
# a=rbi %>% mutate(mean_criteria = apply(.[,29:40],1,function(x) weighted.mean(x,w=c(input$TB_lbs,input$TB_indv,input$BR,input$ESA_n,input$ESA_lbs,input$ESA_bt,input$IUCN_n,input$IUCN_lbs,input$IUCN_bt,input$mmpa,input$Tier,input$CV),na.rm=T)))#Here 'w' refers to the weights.
# # a=rbi %>% mutate(mean_criteria = apply(.[,29:40],1,function(x) weighted.mean(x,w=c(rep(1,9),input$mmpa,rep(1,2)),na.rm=T)))#Here 'w' refers to the weights.
# # a=rbi %>% mutate(mean_criteria = apply(.[,29:40],1,function(x) weighted.mean(x,w=c(rep(1,9),2,rep(1,2)),na.rm=T)))# delete this before launching
#
# q=a %>% select(Year,mean_criteria,Fishery) %>% spread(Year,mean_criteria) %>% mutate(Fishery=as.character(Fishery)) %>% arrange(desc(Fishery))
# rownames(q)=q$Fishery
# q=q %>% .[,2:ncol(.)]
#
# d3heatmap(q, na.rm=T,Rowv = FALSE, Colv=FALSE, colors=c("#053061" ,"#2166AC", "#4393C3", "#D1E5F0" , "#FDDBC7", "#F4A582" ,"#D6604D" ,"#B2182B","#B2182B","#67001F"),
# xlab=w,
# show_grid=F, yaxis_width=400,show_color_legend=T,na_color="white",row_side_palette=c("#053061" ,"#2166AC", "#4393C3", "#D1E5F0" , "#FDDBC7", "#F4A582" ,"#D6604D" ,"#B2182B","#B2182B","#67001F")
#
# )
# })
# output$scale_SD<-renderPlot({
# # par(mar=c(1,.1,.1,.1))
# # a=rbi %>% mutate(mean_criteria = apply(.[,29:40],1,function(x) weighted.mean(x,w=c(rep(1,9),input$mmpa,rep(1,2)),na.rm=T)))
# col.pal <- colorRampPalette(c("#440154FF", "#31688EFF" ,"#35B779FF", "#FDE725FF"))
# ncolors <- 100
# breaks <- seq(min((rbi$criteria_sd)^2,na.rm = T),max(.31,na.rm = T),,ncolors+1)
# levs <- breaks[-1] - diff(breaks)/2
# # image(x=levs, y=1, z=as.matrix(levs), col=col.pal(ncolors), breaks=breaks, ylab="", xlab="", yaxt="n")
# par(mar=c(.1,.1,.1,.1))
# image.plot(x=levs, y=1,smallplot= c(0,.2,.2,1), z=as.matrix(levs), legend.only = TRUE,col=col.pal(ncolors), breaks=breaks, ylab="", xlab="", yaxt="n",axis.args = list(cex.axis = .6))
#
# })
# output$placeholder=renderText({
# "test"
# })
output$Fish<-renderPlot({
value=input$choice_sp
if(value=="Don't subdivide"){
a=master %>% filter(GROUP=="invertebrate"|GROUP=="fish") %>% filter(YEAR==2010|YEAR==2011|YEAR==2012|YEAR==2013|YEAR==2014|YEAR==2015) %>% filter(UNIT=="POUND") %>%
group_by(YEAR,FISHERY) %>% summarise(newcol=mean(TOTAL.FISHERY.BYCATCH.FISH.INVERT)) %>% group_by(YEAR) %>% summarise(newcol=sum(newcol))
b=ggplot(a) +geom_bar(aes(x=YEAR,y=newcol),stat="identity")+ theme_bw()+
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"))+ylab("Total bycatch (lbs)")+xlab("Year")+ scale_y_log10(labels = scales::comma)
b
}
if(value=="Region"){
a=master %>% filter(GROUP=="invertebrate"|GROUP=="fish") %>% filter(YEAR==2010|YEAR==2011|YEAR==2012|YEAR==2013|YEAR==2014|YEAR==2015) %>% filter(UNIT=="POUND") %>%
group_by(YEAR,FISHERY,REGION) %>% summarise(newcol=mean(TOTAL.FISHERY.BYCATCH.FISH.INVERT)) %>% group_by(YEAR,REGION) %>% summarise(newcol=sum(newcol))
b=ggplot(a) +geom_bar(aes(x=YEAR,y=newcol,fill=REGION),stat="identity", position = position_dodge())+ theme_bw()+
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"))+ylab("Total bycatch (lbs)")+xlab("Year")+
scale_fill_manual("",values=c("AK"="#7489ff","PI"="#c2c700","SE"="#00683b","WC"="#b45300","NE"="#afcf9d"),labels=c("AK"="Alaska","PI"="Pacific Islands","SE"="Southeast","WC"="Westcoast","NE"="Northeast"))+ scale_y_continuous(labels = comma)
}
if(value=="Gear type"){
a=master %>% filter(GROUP=="invertebrate"|GROUP=="fish") %>% filter(YEAR==2010|YEAR==2011|YEAR==2012|YEAR==2013|YEAR==2014|YEAR==2015) %>% filter(UNIT=="POUND") %>%
group_by(YEAR,FISHERY,FISHERY.TYPE) %>% summarise(newcol=mean(TOTAL.FISHERY.BYCATCH.FISH.INVERT)) %>% group_by(YEAR,FISHERY.TYPE) %>% summarise(newcol=sum(newcol))
b=ggplot(a) +geom_bar(aes(x=YEAR,y=newcol,fill=FISHERY.TYPE),stat="identity", position = position_dodge())+ theme_bw()+
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"))+ylab("Total bycatch (lbs)")+xlab("Year")+
scale_fill_manual("",values=c("jig"="#9f7bb2","dredge"="#7dac33","gillnet"="#c64f79","line"="#93ccaf","longline"="#8e97ee","pot"="#59663e","seine"="#ffca33","trawl"="#c5703f","troll"="#4d304b"))+ scale_y_continuous(labels = comma)
}
b
})
output$Mammals<-renderPlot({
value=input$choice_sp
if(value=="Don't subdivide"){
a=mammals %>% filter(YEAR==2010|YEAR==2011|YEAR==2012|YEAR==2013|YEAR==2014|YEAR==2015) %>%
group_by(YEAR,FISHERY) %>% summarise(newcol=mean(TOTAL.FISHERY.BYCATCH.MM)) %>% group_by(YEAR) %>% summarise(newcol=sum(newcol))
b=ggplot(a) +geom_bar(aes(x=YEAR,y=newcol),stat="identity")+ theme_bw()+
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"))+ylab("Total bycatch (individuals)")+xlab("Year")+ scale_y_continuous(labels = comma)
}
if(value=="Region"){
a=mammals %>% filter(YEAR==2010|YEAR==2011|YEAR==2012|YEAR==2013|YEAR==2014|YEAR==2015) %>%
group_by(YEAR,FISHERY,REGION) %>% summarise(newcol=mean(TOTAL.FISHERY.BYCATCH.MM)) %>% group_by(YEAR,REGION) %>% summarise(newcol=sum(newcol))
b=ggplot(a) +geom_bar(aes(x=YEAR,y=newcol,fill=REGION),stat="identity", position = position_dodge())+ theme_bw()+
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"))+ylab("Total bycatch (individuals)")+xlab("Year")+
scale_fill_manual("",values=c("AK"="#7489ff","PI"="#c2c700","SE"="#00683b","WC"="#b45300","NE"="#afcf9d"),labels=c("AK"="Alaska","PI"="Pacific Islands","SE"="Southeast","WC"="Westcoast","NE"="Northeast"))+ scale_y_continuous(labels = comma)
}
if(value=="Gear type"){
a=mammals %>% filter(YEAR==2010|YEAR==2011|YEAR==2012|YEAR==2013|YEAR==2014|YEAR==2015) %>%
group_by(YEAR,FISHERY,FISHERY.TYPE) %>% summarise(newcol=mean(TOTAL.FISHERY.BYCATCH.MM)) %>% group_by(YEAR,FISHERY.TYPE) %>% summarise(newcol=sum(newcol))
b=ggplot(a) +geom_bar(aes(x=YEAR,y=newcol,fill=FISHERY.TYPE),stat="identity", position = position_dodge())+ theme_bw()+
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"))+ylab("Total bycatch (individuals)")+xlab("Year")+
scale_fill_manual("",values=c("jig"="#9f7bb2","dredge"="#7dac33","gillnet"="#c64f79","line"="#93ccaf","longline"="#8e97ee","pot"="#59663e","seine"="#ffca33","trawl"="#c5703f","troll"="#4d304b"))+ scale_y_continuous(labels = comma)
}
b
})
output$SBST<-renderPlot({
value=input$choice_sp
if(value=="Don't subdivide"){
a=master %>% filter(GROUP=="seabird"|GROUP=="sea turtle") %>% filter(YEAR==2010|YEAR==2011|YEAR==2012|YEAR==2013|YEAR==2014|YEAR==2015)%>% filter(UNIT=="INDIVIDUAL") %>%
group_by(YEAR,FISHERY) %>% summarise(newcol=mean(TOTAL.FISHERY.BYCATCH.SBST)) %>% group_by(YEAR) %>% summarise(newcol=sum(newcol))
b=ggplot(a) +geom_bar(aes(x=YEAR,y=newcol),stat="identity")+ theme_bw()+
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"))+ylab("Total bycatch (individuals)")+xlab("Year")+ scale_y_continuous(labels = comma)
}
if(value=="Region"){
a=master %>% filter(GROUP=="seabird"|GROUP=="sea turtle") %>% filter(YEAR==2010|YEAR==2011|YEAR==2012|YEAR==2013|YEAR==2014|YEAR==2015)%>% filter(UNIT=="INDIVIDUAL") %>%
group_by(YEAR,FISHERY,REGION) %>% summarise(newcol=mean(TOTAL.FISHERY.BYCATCH.SBST)) %>% group_by(YEAR,REGION) %>% summarise(newcol=sum(newcol))
b=ggplot(a) +geom_bar(aes(x=YEAR,y=newcol,fill=REGION),stat="identity", position = position_dodge())+ theme_bw()+
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"))+ylab("Total bycatch (individuals)")+xlab("Year")+
scale_fill_manual("",values=c("AK"="#7489ff","PI"="#c2c700","SE"="#00683b","WC"="#b45300","NE"="#afcf9d"),labels=c("AK"="Alaska","PI"="Pacific Islands","SE"="Southeast","WC"="Westcoast","NE"="Northeast"))+ scale_y_continuous(labels = comma)
}
if(value=="Gear type"){
a=master %>% filter(GROUP=="seabird"|GROUP=="sea turtle") %>% filter(YEAR==2010|YEAR==2011|YEAR==2012|YEAR==2013|YEAR==2014|YEAR==2015)%>% filter(UNIT=="INDIVIDUAL") %>%
group_by(YEAR,FISHERY,FISHERY.TYPE) %>% summarise(newcol=mean(TOTAL.FISHERY.BYCATCH.SBST)) %>% group_by(YEAR,FISHERY.TYPE) %>% summarise(newcol=sum(newcol))
b=ggplot(a) +geom_bar(aes(x=YEAR,y=newcol,fill=FISHERY.TYPE),stat="identity", position = position_dodge())+ theme_bw()+
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"))+ylab("Total bycatch (individuals)")+xlab("Year")+
scale_fill_manual("",values=c("jig"="#9f7bb2","dredge"="#7dac33","gillnet"="#c64f79","line"="#93ccaf","longline"="#8e97ee","pot"="#59663e","seine"="#ffca33","trawl"="#c5703f","troll"="#4d304b"))+ scale_y_continuous(labels = comma)
}
b
})
metric=reactive({
a=input$choice_metric
b=grep(a,colnames(master),value=T)
return(b)
})
output$gear_ll<-renderPlot({
value=input$choice_gear
if(metric()=="FISHERY.BYCATCH.RATIO"){
a=master %>% filter(YEAR==2010|YEAR==2011|YEAR==2012|YEAR==2013|YEAR==2014|YEAR==2015) %>%
group_by(YEAR,FISHERY.TYPE) %>% summarise(newcol=mean(FISHERY.BYCATCH.RATIO,na.rm=T))
aa=master %>% filter(YEAR==2010|YEAR==2011|YEAR==2012|YEAR==2013|YEAR==2014|YEAR==2015) %>%
group_by(YEAR,FISHERY.TYPE,REGION) %>% summarise(newcol=mean(FISHERY.BYCATCH.RATIO,na.rm=T))
if(value=="Don't subdivide"){
b=ggplot(a) +geom_bar(aes(x=YEAR,y=newcol),stat="identity")+facet_wrap(~FISHERY.TYPE,scales = "free_y")+ theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"))+ylab("Bycatch ratio")+xlab("Year")
}
if(value=="Region"){
b=ggplot(aa) +geom_bar(aes(x=YEAR,y=newcol,fill=REGION),stat="identity", position = position_dodge())+facet_wrap(~FISHERY.TYPE,scales = "free_y")+ theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"))+ylab("Bycatch ratio")+xlab("Year")+
scale_fill_manual("",values=c("AK"="#7489ff","PI"="#c2c700","SE"="#00683b","WC"="#b45300","NE"="#afcf9d"),labels=c("AK"="Alaska","PI"="Pacific Islands","SE"="Southeast","WC"="Westcoast","NE"="Northeast"))
}
}
if(metric()=="TOTAL.FISHERY.LANDINGS"){
a=master %>% filter(YEAR==2010|YEAR==2011|YEAR==2012|YEAR==2013|YEAR==2014|YEAR==2015) %>%
group_by(YEAR,FISHERY.TYPE) %>% summarise(newcol=sum(TOTAL.FISHERY.LANDINGS,na.rm=T))
aa=master %>% filter(YEAR==2010|YEAR==2011|YEAR==2012|YEAR==2013|YEAR==2014|YEAR==2015) %>%
group_by(YEAR,FISHERY.TYPE,REGION) %>% summarise(newcol=sum(TOTAL.FISHERY.LANDINGS,na.rm=T))
if(value=="Don't subdivide"){
options(scipen=10000)
b=ggplot(a) +geom_bar(aes(x=YEAR,y=newcol),stat="identity")+facet_wrap(~FISHERY.TYPE,scales = "free_y")+ theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"))+ylab("Total landings")+xlab("Year")+scale_y_log10(labels = scales::comma)
}
if(value=="Region"){
options(scipen=10000)
b=ggplot(aa) +geom_bar(aes(x=YEAR,y=newcol,fill=REGION),stat="identity", position = position_dodge())+facet_wrap(~FISHERY.TYPE,scales = "free_y")+ theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"))+ylab("Total landings")+xlab("Year")+scale_y_log10(labels = scales::comma)+
scale_fill_manual("",values=c("AK"="#7489ff","PI"="#c2c700","SE"="#00683b","WC"="#b45300","NE"="#afcf9d"),labels=c("AK"="Alaska","PI"="Pacific Islands","SE"="Southeast","WC"="Westcoast","NE"="Northeast"))
}
}
if(metric()=="TOTAL.CATCH"){
a=master %>% filter(YEAR==2010|YEAR==2011|YEAR==2012|YEAR==2013|YEAR==2014|YEAR==2015) %>%
group_by(YEAR,FISHERY.TYPE) %>% summarise(newcol=sum(TOTAL.CATCH,na.rm=T))
aa=master %>% filter(YEAR==2010|YEAR==2011|YEAR==2012|YEAR==2013|YEAR==2014|YEAR==2015) %>%
group_by(YEAR,FISHERY.TYPE,REGION) %>% summarise(newcol=sum(TOTAL.CATCH,na.rm=T))
if(value=="Don't subdivide"){
options(scipen=10000)
b=ggplot(a) +geom_bar(aes(x=YEAR,y=newcol),stat="identity")+facet_wrap(~FISHERY.TYPE,scales = "free_y")+ theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"))+ylab("Total catch")+xlab("Year")+scale_y_log10(labels = scales::comma)
}
if(value=="Region"){
options(scipen=10000)
b=ggplot(aa) +geom_bar(aes(x=YEAR,y=newcol,fill=REGION),stat="identity", position = position_dodge())+facet_wrap(~FISHERY.TYPE,scales = "free_y")+ theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"))+ylab("Total catch")+xlab("Year")+scale_y_log10(labels = scales::comma)+
scale_fill_manual("",values=c("AK"="#7489ff","PI"="#c2c700","SE"="#00683b","WC"="#b45300","NE"="#afcf9d"),labels=c("AK"="Alaska","PI"="Pacific Islands","SE"="Southeast","WC"="Westcoast","NE"="Northeast"))
}
}
if(metric()=="NUM.FISH"){
a=master %>% filter(YEAR==2010|YEAR==2011|YEAR==2012|YEAR==2013|YEAR==2014|YEAR==2015) %>%
group_by(YEAR,FISHERY.TYPE,FISHERY) %>% summarise(newcol=n()) %>% distinct() %>% group_by(YEAR,FISHERY.TYPE)%>% summarise(newcol=n())
aa=master %>% filter(YEAR==2010|YEAR==2011|YEAR==2012|YEAR==2013|YEAR==2014|YEAR==2015) %>%
group_by(YEAR,FISHERY.TYPE,REGION,FISHERY) %>% summarise(newcol=n()) %>% distinct() %>% group_by(YEAR,FISHERY.TYPE,REGION)%>% summarise(newcol=n())
if(value=="Don't subdivide"){
options(scipen=10000)
b=ggplot(a) +geom_bar(aes(x=YEAR,y=newcol),stat="identity")+facet_wrap(~FISHERY.TYPE,scales = "free_y")+ theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"))+ylab("Number of fisheries")+xlab("Year")
}
if(value=="Region"){
options(scipen=10000)
b=ggplot(aa) +geom_bar(aes(x=YEAR,y=newcol,fill=REGION),stat="identity", position = position_dodge())+facet_wrap(~FISHERY.TYPE,scales = "free_y")+ theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"))+ylab("Number of fisheries")+xlab("Year")+
scale_fill_manual("",values=c("AK"="#7489ff","PI"="#c2c700","SE"="#00683b","WC"="#b45300","NE"="#afcf9d"),labels=c("AK"="Alaska","PI"="Pacific Islands","SE"="Southeast","WC"="Westcoast","NE"="Northeast"))
}
}
if(input$Free_y==T){
b=b+facet_wrap(~FISHERY.TYPE,scales = "fixed")
}
b
})
# }, height = function() {
# session$clientData$output_gear_ll_width
# })
output$rawTable<-DT::renderDataTable({
a=master_extra_raw #%>% select(-c(TOTAL.FISHERY.BYCATCH.MM,TOTAL.FISHERY.BYCATCH.SBST,NUM.FISH,TOTAL.FISHERY.BYCATCH.FISH.INVERT,OBSERVER.COVERAGE,TOTAL.FISHERY.LANDINGS,TOTAL.CATCH))
fish=input$raw_fishery
sp=input$raw_species
#
# if(input$raw_species=="Don't filter" & input$raw_fishery=="Don't filter"& input$raw_gear=="Don't filter"){
# b=a
# } else if(input$raw_species=="Don't filter" & input$raw_fishery!="Don't filter"& input$raw_gear!="Don't filter"){
# b=a %>% filter(FISHERY==input$raw_fishery)
# } else if(input$raw_species!="Don't filter" & input$raw_fishery=="Don't filter"& input$raw_gear!="Don't filter"){
# b=a %>% filter(SCIENTIFIC.NAME==input$raw_species)
# } else if(input$raw_species!="Don't filter" & input$raw_fishery!="Don't filter"){
# b=a %>% filter(FISHERY==input$raw_fishery & SCIENTIFIC.NAME==input$raw_species)
# }
if(input$raw_species!="Don't filter"){
a=a %>% filter(SCIENTIFIC.NAME==input$raw_species)
}
if(input$raw_fishery!="Don't filter"){
a=a %>% filter(FISHERY==input$raw_fishery)
}
if(input$raw_gear!="Don't filter"){
a=a %>% filter(FISHERY.TYPE==input$raw_gear)
}
datatable(a,options=list(scrollX=TRUE))
})
filtered_data=reactive({
a=master_extra_raw
if(input$raw_species!="Don't filter"){
a=a %>% filter(SCIENTIFIC.NAME==input$raw_species)
}
if(input$raw_fishery!="Don't filter"){
a=a %>% filter(FISHERY==input$raw_fishery)
}
if(input$raw_gear!="Don't filter"){
a=a %>% filter(FISHERY.TYPE==input$raw_gear)
}
return(a)
})
output$downloadDataF <- downloadHandler(
filename = function() {
paste("National_Bycatch_Database", ".csv", sep = "")
},
content = function(file) {
write.csv(filtered_data(), file, row.names = FALSE)
})
output$downloadDataM <- downloadHandler(
filename = "US_Bycatch_analysis_Raw_data_metadata.pdf",
content = function(file) {
file.copy("data/US_Bycatch_analysis_Raw_data_metadata.pdf", file)
})
})
shinyApp(ui = ui, server = server)
| /NBR_bycatch_explorer/app_V9.R | no_license | mssavoca/DOM_fisheries-analysis | R | false | false | 36,191 | r | ##### Defining global objects####
# source functions
source("2_load_libraries.R")
library(tidyverse)
library(plotly)
library(d3heatmap)
library(fields)
library(shinyBS)
library(markdown)
library(scales)
master=read.csv("data/All_bycatch_data_2010_2015.csv") %>% select(-c(CV,FOOTNOTE.S.,FISHERY.TYPE.GENERAL,FISHERY.TYPE.SPECIFIC)) %>% .[complete.cases(.[,c(6,9,10)]),] %>% mutate(NUM.FISH=rep(1,nrow(.)))
# master_raw=read.csv("data/All_bycatch_data_2010_2015.csv") %>% select(-c(CV,FOOTNOTE.S.,FISHERY.TYPE.GENERAL,FISHERY.TYPE.SPECIFIC)) %>% .[complete.cases(.[,c(6,9,10)]),] %>% mutate(NUM.FISH=rep(1,nrow(.)))
master[master=="Pot"]<-"pot"
master[master=="NW"]<-"WC"
master[master=="SW"]<-"WC"
# cleaning for resubmission
master[master=="combined gears"]<-"longline gears"
master=master %>% mutate(FISHERY=gsub("West Coast Mid-Water Trawl for Whiting","West Coast Mid-Water Trawl for Hake",FISHERY)) %>% mutate(FISHERY=gsub("Oregon/California Pink Shrimp","Washington/Oregon/California Pink Shrimp",FISHERY))
# cleaning for resubmission
master_extra_raw=read.csv("data/All_bycatch_data_2010_2015.csv")%>% mutate(YEAR=replace_na(YEAR,replace="2006-2010"))
master_extra_raw[master_extra_raw=="combined gears"]<-"longline gears"
master_extra_raw=master_extra_raw %>% mutate(FISHERY=gsub("West Coast Mid-Water Trawl for Whiting","West Coast Mid-Water Trawl for Hake",FISHERY)) %>% mutate(FISHERY=gsub("Oregon/California Pink Shrimp","Washington/Oregon/California Pink Shrimp",FISHERY))
### code to split mammals by year ####
# a=master %>% filter(GROUP=="marine mammal") %>% filter(UNIT=="INDIVIDUAL")
# new=list()
# for(i in 1:nrow(a)){
# print(i)
# if(nchar(as.character(a$YEAR[i]))>4){
# b=strsplit(as.character(a$YEAR[i]),"-")
# c=lapply(b,function(x)paste0(x,"-01-01"))
# d=interval(c[[1]][1],c[[1]][2])
# e=time_length(d,unit="year")+1
# bycatch=a$TOTAL.FISHERY.BYCATCH.MM[i]/e
# f=a %>% slice(rep(i,each=e))
# f$TOTAL.FISHERY.BYCATCH.MM=bycatch
# f$YEAR=seq(b[[1]][1],b[[1]][2])
# new[[length(new)+1]] <- f
# }
# }
#
# test=do.call("rbind",new)
# other=master%>% filter(GROUP=="marine mammal") %>% filter(UNIT=="INDIVIDUAL") %>% filter(nchar(as.character(YEAR))==4)
# final=rbind(test,other)
# write.csv(final,"data/mammals_by_year.csv",row.names = F)
#####
mammals=read.csv("data/mammals_by_year.csv")
rbi=read.csv("data/SummaryData_December2019_AllFisheryYears_AnalysisExport.csv")
group=unique(master$GROUP)%>% .[complete.cases(.)]
year=c(2010,2011,2012,2013,2014,2015)
region=as.factor(master$REGION) %>% unique()
fishery=unique(master$FISHERY)%>% .[complete.cases(.)] %>% as.character() %>% sort()
fishery=c("Don't filter",fishery)
species=unique(master$SCIENTIFIC.NAME) %>% .[complete.cases(.)] %>% as.character()%>% sort()
species=c("Don't filter",species)
gear=unique(master$FISHERY.TYPE)%>% .[complete.cases(.)] %>% as.character()%>% sort()
gear=c("Don't filter",gear)
ui <- dashboardPage(skin = "black",
dashboardHeader(
title = "National Bycatch Database Explorer",
titleWidth = 350
),
dashboardSidebar(
width = 280,
sidebarMenu(id = 'sidebarmenu',
menuItem("Visualize by species group", tabName='species',icon=icon("fish")),
conditionalPanel("input.sidebarmenu ==='species'",
#checkboxInput("sp_region", "Subdivide by region",value=FALSE),
radioButtons(inputId="choice_sp", label="How would you like to subdivide the data?", selected = "Don't subdivide",
choices=c("Region","Gear type", "Don't subdivide"))),
#checkboxInput("sp_region", "Subdivide by fishing type",value=FALSE)),
menuItem("Visualize by gear type", tabName='fishing',icon=icon("ship",lib='font-awesome')),
conditionalPanel("input.sidebarmenu ==='fishing'",
checkboxInput("Free_y", "Fixed y axis scale",value=T),
radioButtons(inputId="choice_gear", label="How would you like to subdivide the data?", selected = "Don't subdivide",
choices=c("Region", "Don't subdivide")),
radioButtons(inputId="choice_metric", label="What metric would you like to see?", selected = "FISHERY.BYCATCH.RATIO",
choices=c("Bycatch ratio"="FISHERY.BYCATCH.RATIO",
"Total catch"="TOTAL.CATCH",
"Total landings"="TOTAL.FISHERY.LANDINGS",
"Number of fisheries"="NUM.FISH"))),
menuItem("Relative Bycatch Index",tabName = 'rbi',icon=icon("award")),
conditionalPanel("input.sidebarmenu==='rbi'",
bsButton("q1", label = "", icon = icon("question"), style = "info", size = "extra-small"),
bsPopover(id = "q1", title = "",
content = "Unlike other taxonomic groups bycatch impacts of a fishery on marine mammals was only represented by MMPA weighting, therefore our default ranking doubled the weighting of the MMPA category relative to the other criteria. You can adjust the slider to see how changing the criteria weightings influences the final RBI of each fishery in each year.",
placement = "right",
trigger = "hover",
options = list(container = "body")),
radioButtons("display","Select display metric",choices = list("Relative Bycatch Index","Inter-criteria variance"),width = "100%",selected = "Relative Bycatch Index"),
conditionalPanel("input.display ==='Relative Bycatch Index'",
sliderInput("mmpa","Adjust MMPA weighting",min=1,max=5,step=1,value=2),
# shinyBS::bsTooltip("mmpa", "The wait times will be broken into this many equally spaced bins",
# "right", options = list(container = "body"))
sliderInput("TB_lbs","Adjust Total Bycatch (lbs) weighting",min=1,max=5,step=1,value=1),
sliderInput("TB_indv","Adjust Total Bycatch (indv) weighting",min=1,max=5,step=1,value=1),
sliderInput("BR","Adjust Bycatch Ratio weighting",min=1,max=5,step=1,value=1),
sliderInput("ESA_n","Adjust ESA (#) weighting",min=1,max=5,step=1,value=1),
sliderInput("ESA_lbs","Adjust ESA (lbs) weighting",min=1,max=5,step=1,value=1),
sliderInput("ESA_bt","Adjust ESA (birds and turtles) weighting",min=1,max=5,step=1,value=1),
sliderInput("IUCN_n","Adjust IUCN (#) weighting",min=1,max=5,step=1,value=1),
sliderInput("IUCN_lbs","Adjust IUCN (lbs) weighting",min=1,max=5,step=1,value=1),
sliderInput("IUCN_bt","Adjust IUCN (bids and turtles) weighting",min=1,max=5,step=1,value=1),
sliderInput("Tier","Adjust Tier weighting",min=1,max=5,step=1,value=1),
sliderInput("CV","Adjust CV weighting",min=1,max=5,step=1,value=1)
)),
menuItem("Explore raw data", tabName='raw',icon=icon("database",lib='font-awesome')),
conditionalPanel("input.sidebarmenu ==='raw'",
selectInput("raw_species","Filter species",species,width = "100%"),
selectInput("raw_fishery","Filter fishery",fishery,width = "100%"),
selectInput("raw_gear","Filter gear",gear,width = "100%"),
div(style="text-align:center",downloadButton("downloadDataF", label = h6(style="color:black","Download dataset"))),
div(style="text-align:center",downloadButton("downloadDataM", label = h6(style="color:black","Download metadata")))
)#,
# div(style="text-align:center",url <- a(tags$span(style="color:dodgerblue",h4("Read the paper")), href="https://media.giphy.com/media/qaoutfIYJYxr2/source.gif"))
)),
dashboardBody(
tabItems(
tabItem(tabName = "rbi",
fluidRow(
column(h4(style="text-align:center;","This app explores relative bycatch performance in US fisheries with bycatch estimates published in the National Bycatch Report."),width = 12),
column(h5(""),width=1,plotOutput("scale",height = '800px'),style = "background-color:white;"),
column(h5(""),width=10,d3heatmapOutput("heatmap",height = '800px'),style = "background-color:white;",
absolutePanel(draggable=F,top = 300, left = 0, right = 0,tags$div(h2(style="background-color:white;opacity:0.6;text-align:center;color:red;padding:0px;border-radius: 0px;transform: rotate(45deg); ",tags$b(tags$em("EXPLORATORY ONLY. Output does not necessarily align with results in Savoca et al.")))))),
# column(h5(""),width=1,plotOutput("scale_SD",height = '800px'),style = "background-color:white;"),
# column(h5("Inter-criteria variance"),width=5,d3heatmapOutput("heatmap_SD",height = '800px'),style = "background-color:white;"),
column(h6(style="font-style: italic;","App developed by Heather Welch (UCSC/NOAA)"),width = 12)
# absolutePanel(div(style="text-align:center;color:red;padding:0px;border-radius: 0px; ",tags$b(tags$em("placeholder"))),draggable=T,top=350, right=50)
# absolutePanel(draggable=T,top = 0, left = 0, right = 0,div(style="padding: 8px; border-bottom: 1px solid #CCC; background: #FFFFEE;",HTML(markdownToHTML(fragment.only=TRUE,text="placeholder"))))
)),
tabItem(tabName = "species",
fluidRow(
column(h4(style="text-align:center;","This app explores relative bycatch performance in US fisheries with bycatch estimates published in the National Bycatch Report."),width = 12),
column(h5("Fish and invertebrates"),width=4,plotOutput("Fish")),
column(h5("Mammals"),width=4,plotOutput("Mammals")),
column(h5("Seabirds and sea turtles"),width=4,plotOutput("SBST")),
column(h6(style="font-style: italic;","App developed by Heather Welch (UCSC/NOAA)"),width = 12)
)),
tabItem(tabName = "fishing",
fluidRow(
column(h4(style="text-align:center;","This app explores relative bycatch performance in US fisheries with bycatch estimates published in the National Bycatch Report."),width = 12),
column(h5(""),width=12,plotOutput("gear_ll",height = '800px'))),
column(h6(style="font-style: italic;","App developed by Heather Welch (UCSC/NOAA)"),width = 12)
),
tabItem(tabName = "raw",
fluidRow(
column(h4(style="text-align:center;","This app explores relative bycatch performance in US fisheries with bycatch estimates published in the National Bycatch Report."),width = 12),
column(h5(""),width=12,DT::dataTableOutput("rawTable")),
column(h6(style="font-style: italic;","App developed by Heather Welch (UCSC/NOAA)"),width = 12)
))
))
)
server <- shinyServer(function(input, output,session) {
output$heatmap<-renderD3heatmap({
if(input$display=="Relative Bycatch Index"){
a=rbi %>% mutate(mean_criteria = apply(.[,37:48],1,function(x) weighted.mean(x,w=c(input$TB_lbs,input$TB_indv,input$BR,input$ESA_n,input$ESA_lbs,input$ESA_bt,input$IUCN_n,input$IUCN_lbs,input$IUCN_bt,input$mmpa,input$Tier,input$CV),na.rm=T)))#Here 'w' refers to the weights.
# a=rbi %>% mutate(mean_criteria = apply(.[,29:40],1,function(x) weighted.mean(x,w=c(rep(1,9),input$mmpa,rep(1,2)),na.rm=T)))#Here 'w' refers to the weights.
# a=rbi %>% mutate(mean_criteria = apply(.[,29:40],1,function(x) weighted.mean(x,w=c(rep(1,9),2,rep(1,2)),na.rm=T)))# delete this before launching
q=a %>% select(Year,mean_criteria,Fishery) %>% spread(Year,mean_criteria) %>% mutate(Fishery=as.character(Fishery)) %>% arrange(desc(Fishery))
rownames(q)=q$Fishery
q=q %>% .[,2:ncol(.)]
d3heatmap(q, na.rm=T,Rowv = FALSE, Colv=FALSE, colors=c("#053061" ,"#2166AC", "#4393C3", "#D1E5F0" , "#FDDBC7", "#F4A582" ,"#D6604D" ,"#B2182B","#B2182B","#67001F"),
xlab=w,
show_grid=F, yaxis_width=400,show_color_legend=T,na_color="white",row_side_palette=c("#053061" ,"#2166AC", "#4393C3", "#D1E5F0" , "#FDDBC7", "#F4A582" ,"#D6604D" ,"#B2182B","#B2182B","#67001F")
)
}
else if(input$display=="Inter-criteria variance"){
a=rbi %>% mutate(mean_criteria = apply(.[,37:48],1,function(x) weighted.mean(x,w=c(rep(1,9),2,rep(1,2)),na.rm=T)))# delete this before launching
q=rbi %>% select(Year,criteria_sd,Fishery)%>% mutate(criteria_sd=criteria_sd^2) %>% spread(Year,criteria_sd) %>% mutate(Fishery=as.character(Fishery)) %>% arrange(desc(Fishery))
rownames(q)=q$Fishery
q=q %>% .[,2:ncol(.)]
d3heatmap(q, na.rm=T,Rowv = FALSE, Colv=FALSE, colors=c("#440154FF", "#31688EFF" ,"#35B779FF", "#FDE725FF"),
xlab=w,
show_grid=F, yaxis_width=400,show_color_legend=T,na_color="white",row_side_palette=c("#440154FF", "#31688EFF" ,"#35B779FF", "#FDE725FF")
)
}
})
output$scale<-renderPlot({
if(input$display=="Relative Bycatch Index"){
# par(mar=c(1,.1,.1,.1))
a=rbi %>% mutate(mean_criteria = apply(.[,37:48],1,function(x) weighted.mean(x,w=c(input$TB_lbs,input$TB_indv,input$BR,input$ESA_n,input$ESA_lbs,input$ESA_bt,input$IUCN_n,input$IUCN_lbs,input$IUCN_bt,input$mmpa,input$Tier,input$CV),na.rm=T)))#Here 'w' refers to the weights.
col.pal <- colorRampPalette(c("#053061" ,"#2166AC", "#4393C3", "#D1E5F0" , "#FDDBC7", "#F4A582" ,"#D6604D" ,"#B2182B","#B2182B","#67001F"))
ncolors <- 100
#breaks <- seq(min(a$mean_criteria,na.rm = T),max(a$mean_criteria,na.rm = T),,ncolors+1)
breaks <- seq(0,.51,,ncolors+1)
levs <- breaks[-1] - diff(breaks)/2
# image(x=levs, y=1, z=as.matrix(levs), col=col.pal(ncolors), breaks=breaks, ylab="", xlab="", yaxt="n")
par(mar=c(.1,.1,.1,.1))
image.plot(x=levs, y=1,smallplot= c(0,.2,.2,1), z=as.matrix(levs), legend.only = TRUE,col=col.pal(ncolors), breaks=breaks, ylab="", xlab="", yaxt="n",axis.args = list(cex.axis = .6))
}
else if(input$display=="Inter-criteria variance"){
col.pal <- colorRampPalette(c("#440154FF", "#31688EFF" ,"#35B779FF", "#FDE725FF"))
ncolors <- 100
breaks <- seq(min((rbi$criteria_sd)^2,na.rm = T),max(.31,na.rm = T),,ncolors+1)
levs <- breaks[-1] - diff(breaks)/2
# image(x=levs, y=1, z=as.matrix(levs), col=col.pal(ncolors), breaks=breaks, ylab="", xlab="", yaxt="n")
par(mar=c(.1,.1,.1,.1))
image.plot(x=levs, y=1,smallplot= c(0,.2,.2,1), z=as.matrix(levs), legend.only = TRUE,col=col.pal(ncolors), breaks=breaks, ylab="", xlab="", yaxt="n",axis.args = list(cex.axis = .6))
}
})
# output$heatmap_SD<-renderD3heatmap({
# a=rbi %>% mutate(mean_criteria = apply(.[,29:40],1,function(x) weighted.mean(x,w=c(input$TB_lbs,input$TB_indv,input$BR,input$ESA_n,input$ESA_lbs,input$ESA_bt,input$IUCN_n,input$IUCN_lbs,input$IUCN_bt,input$mmpa,input$Tier,input$CV),na.rm=T)))#Here 'w' refers to the weights.
# # a=rbi %>% mutate(mean_criteria = apply(.[,29:40],1,function(x) weighted.mean(x,w=c(rep(1,9),input$mmpa,rep(1,2)),na.rm=T)))#Here 'w' refers to the weights.
# # a=rbi %>% mutate(mean_criteria = apply(.[,29:40],1,function(x) weighted.mean(x,w=c(rep(1,9),2,rep(1,2)),na.rm=T)))# delete this before launching
#
# q=a %>% select(Year,mean_criteria,Fishery) %>% spread(Year,mean_criteria) %>% mutate(Fishery=as.character(Fishery)) %>% arrange(desc(Fishery))
# rownames(q)=q$Fishery
# q=q %>% .[,2:ncol(.)]
#
# d3heatmap(q, na.rm=T,Rowv = FALSE, Colv=FALSE, colors=c("#053061" ,"#2166AC", "#4393C3", "#D1E5F0" , "#FDDBC7", "#F4A582" ,"#D6604D" ,"#B2182B","#B2182B","#67001F"),
# xlab=w,
# show_grid=F, yaxis_width=400,show_color_legend=T,na_color="white",row_side_palette=c("#053061" ,"#2166AC", "#4393C3", "#D1E5F0" , "#FDDBC7", "#F4A582" ,"#D6604D" ,"#B2182B","#B2182B","#67001F")
#
# )
# })
# output$scale_SD<-renderPlot({
# # par(mar=c(1,.1,.1,.1))
# # a=rbi %>% mutate(mean_criteria = apply(.[,29:40],1,function(x) weighted.mean(x,w=c(rep(1,9),input$mmpa,rep(1,2)),na.rm=T)))
# col.pal <- colorRampPalette(c("#440154FF", "#31688EFF" ,"#35B779FF", "#FDE725FF"))
# ncolors <- 100
# breaks <- seq(min((rbi$criteria_sd)^2,na.rm = T),max(.31,na.rm = T),,ncolors+1)
# levs <- breaks[-1] - diff(breaks)/2
# # image(x=levs, y=1, z=as.matrix(levs), col=col.pal(ncolors), breaks=breaks, ylab="", xlab="", yaxt="n")
# par(mar=c(.1,.1,.1,.1))
# image.plot(x=levs, y=1,smallplot= c(0,.2,.2,1), z=as.matrix(levs), legend.only = TRUE,col=col.pal(ncolors), breaks=breaks, ylab="", xlab="", yaxt="n",axis.args = list(cex.axis = .6))
#
# })
# output$placeholder=renderText({
# "test"
# })
output$Fish<-renderPlot({
value=input$choice_sp
if(value=="Don't subdivide"){
a=master %>% filter(GROUP=="invertebrate"|GROUP=="fish") %>% filter(YEAR==2010|YEAR==2011|YEAR==2012|YEAR==2013|YEAR==2014|YEAR==2015) %>% filter(UNIT=="POUND") %>%
group_by(YEAR,FISHERY) %>% summarise(newcol=mean(TOTAL.FISHERY.BYCATCH.FISH.INVERT)) %>% group_by(YEAR) %>% summarise(newcol=sum(newcol))
b=ggplot(a) +geom_bar(aes(x=YEAR,y=newcol),stat="identity")+ theme_bw()+
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"))+ylab("Total bycatch (lbs)")+xlab("Year")+ scale_y_log10(labels = scales::comma)
b
}
if(value=="Region"){
a=master %>% filter(GROUP=="invertebrate"|GROUP=="fish") %>% filter(YEAR==2010|YEAR==2011|YEAR==2012|YEAR==2013|YEAR==2014|YEAR==2015) %>% filter(UNIT=="POUND") %>%
group_by(YEAR,FISHERY,REGION) %>% summarise(newcol=mean(TOTAL.FISHERY.BYCATCH.FISH.INVERT)) %>% group_by(YEAR,REGION) %>% summarise(newcol=sum(newcol))
b=ggplot(a) +geom_bar(aes(x=YEAR,y=newcol,fill=REGION),stat="identity", position = position_dodge())+ theme_bw()+
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"))+ylab("Total bycatch (lbs)")+xlab("Year")+
scale_fill_manual("",values=c("AK"="#7489ff","PI"="#c2c700","SE"="#00683b","WC"="#b45300","NE"="#afcf9d"),labels=c("AK"="Alaska","PI"="Pacific Islands","SE"="Southeast","WC"="Westcoast","NE"="Northeast"))+ scale_y_continuous(labels = comma)
}
if(value=="Gear type"){
a=master %>% filter(GROUP=="invertebrate"|GROUP=="fish") %>% filter(YEAR==2010|YEAR==2011|YEAR==2012|YEAR==2013|YEAR==2014|YEAR==2015) %>% filter(UNIT=="POUND") %>%
group_by(YEAR,FISHERY,FISHERY.TYPE) %>% summarise(newcol=mean(TOTAL.FISHERY.BYCATCH.FISH.INVERT)) %>% group_by(YEAR,FISHERY.TYPE) %>% summarise(newcol=sum(newcol))
b=ggplot(a) +geom_bar(aes(x=YEAR,y=newcol,fill=FISHERY.TYPE),stat="identity", position = position_dodge())+ theme_bw()+
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"))+ylab("Total bycatch (lbs)")+xlab("Year")+
scale_fill_manual("",values=c("jig"="#9f7bb2","dredge"="#7dac33","gillnet"="#c64f79","line"="#93ccaf","longline"="#8e97ee","pot"="#59663e","seine"="#ffca33","trawl"="#c5703f","troll"="#4d304b"))+ scale_y_continuous(labels = comma)
}
b
})
output$Mammals<-renderPlot({
value=input$choice_sp
if(value=="Don't subdivide"){
a=mammals %>% filter(YEAR==2010|YEAR==2011|YEAR==2012|YEAR==2013|YEAR==2014|YEAR==2015) %>%
group_by(YEAR,FISHERY) %>% summarise(newcol=mean(TOTAL.FISHERY.BYCATCH.MM)) %>% group_by(YEAR) %>% summarise(newcol=sum(newcol))
b=ggplot(a) +geom_bar(aes(x=YEAR,y=newcol),stat="identity")+ theme_bw()+
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"))+ylab("Total bycatch (individuals)")+xlab("Year")+ scale_y_continuous(labels = comma)
}
if(value=="Region"){
a=mammals %>% filter(YEAR==2010|YEAR==2011|YEAR==2012|YEAR==2013|YEAR==2014|YEAR==2015) %>%
group_by(YEAR,FISHERY,REGION) %>% summarise(newcol=mean(TOTAL.FISHERY.BYCATCH.MM)) %>% group_by(YEAR,REGION) %>% summarise(newcol=sum(newcol))
b=ggplot(a) +geom_bar(aes(x=YEAR,y=newcol,fill=REGION),stat="identity", position = position_dodge())+ theme_bw()+
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"))+ylab("Total bycatch (individuals)")+xlab("Year")+
scale_fill_manual("",values=c("AK"="#7489ff","PI"="#c2c700","SE"="#00683b","WC"="#b45300","NE"="#afcf9d"),labels=c("AK"="Alaska","PI"="Pacific Islands","SE"="Southeast","WC"="Westcoast","NE"="Northeast"))+ scale_y_continuous(labels = comma)
}
if(value=="Gear type"){
a=mammals %>% filter(YEAR==2010|YEAR==2011|YEAR==2012|YEAR==2013|YEAR==2014|YEAR==2015) %>%
group_by(YEAR,FISHERY,FISHERY.TYPE) %>% summarise(newcol=mean(TOTAL.FISHERY.BYCATCH.MM)) %>% group_by(YEAR,FISHERY.TYPE) %>% summarise(newcol=sum(newcol))
b=ggplot(a) +geom_bar(aes(x=YEAR,y=newcol,fill=FISHERY.TYPE),stat="identity", position = position_dodge())+ theme_bw()+
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"))+ylab("Total bycatch (individuals)")+xlab("Year")+
scale_fill_manual("",values=c("jig"="#9f7bb2","dredge"="#7dac33","gillnet"="#c64f79","line"="#93ccaf","longline"="#8e97ee","pot"="#59663e","seine"="#ffca33","trawl"="#c5703f","troll"="#4d304b"))+ scale_y_continuous(labels = comma)
}
b
})
output$SBST<-renderPlot({
value=input$choice_sp
if(value=="Don't subdivide"){
a=master %>% filter(GROUP=="seabird"|GROUP=="sea turtle") %>% filter(YEAR==2010|YEAR==2011|YEAR==2012|YEAR==2013|YEAR==2014|YEAR==2015)%>% filter(UNIT=="INDIVIDUAL") %>%
group_by(YEAR,FISHERY) %>% summarise(newcol=mean(TOTAL.FISHERY.BYCATCH.SBST)) %>% group_by(YEAR) %>% summarise(newcol=sum(newcol))
b=ggplot(a) +geom_bar(aes(x=YEAR,y=newcol),stat="identity")+ theme_bw()+
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"))+ylab("Total bycatch (individuals)")+xlab("Year")+ scale_y_continuous(labels = comma)
}
if(value=="Region"){
a=master %>% filter(GROUP=="seabird"|GROUP=="sea turtle") %>% filter(YEAR==2010|YEAR==2011|YEAR==2012|YEAR==2013|YEAR==2014|YEAR==2015)%>% filter(UNIT=="INDIVIDUAL") %>%
group_by(YEAR,FISHERY,REGION) %>% summarise(newcol=mean(TOTAL.FISHERY.BYCATCH.SBST)) %>% group_by(YEAR,REGION) %>% summarise(newcol=sum(newcol))
b=ggplot(a) +geom_bar(aes(x=YEAR,y=newcol,fill=REGION),stat="identity", position = position_dodge())+ theme_bw()+
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"))+ylab("Total bycatch (individuals)")+xlab("Year")+
scale_fill_manual("",values=c("AK"="#7489ff","PI"="#c2c700","SE"="#00683b","WC"="#b45300","NE"="#afcf9d"),labels=c("AK"="Alaska","PI"="Pacific Islands","SE"="Southeast","WC"="Westcoast","NE"="Northeast"))+ scale_y_continuous(labels = comma)
}
if(value=="Gear type"){
a=master %>% filter(GROUP=="seabird"|GROUP=="sea turtle") %>% filter(YEAR==2010|YEAR==2011|YEAR==2012|YEAR==2013|YEAR==2014|YEAR==2015)%>% filter(UNIT=="INDIVIDUAL") %>%
group_by(YEAR,FISHERY,FISHERY.TYPE) %>% summarise(newcol=mean(TOTAL.FISHERY.BYCATCH.SBST)) %>% group_by(YEAR,FISHERY.TYPE) %>% summarise(newcol=sum(newcol))
b=ggplot(a) +geom_bar(aes(x=YEAR,y=newcol,fill=FISHERY.TYPE),stat="identity", position = position_dodge())+ theme_bw()+
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"))+ylab("Total bycatch (individuals)")+xlab("Year")+
scale_fill_manual("",values=c("jig"="#9f7bb2","dredge"="#7dac33","gillnet"="#c64f79","line"="#93ccaf","longline"="#8e97ee","pot"="#59663e","seine"="#ffca33","trawl"="#c5703f","troll"="#4d304b"))+ scale_y_continuous(labels = comma)
}
b
})
metric=reactive({
a=input$choice_metric
b=grep(a,colnames(master),value=T)
return(b)
})
output$gear_ll<-renderPlot({
value=input$choice_gear
if(metric()=="FISHERY.BYCATCH.RATIO"){
a=master %>% filter(YEAR==2010|YEAR==2011|YEAR==2012|YEAR==2013|YEAR==2014|YEAR==2015) %>%
group_by(YEAR,FISHERY.TYPE) %>% summarise(newcol=mean(FISHERY.BYCATCH.RATIO,na.rm=T))
aa=master %>% filter(YEAR==2010|YEAR==2011|YEAR==2012|YEAR==2013|YEAR==2014|YEAR==2015) %>%
group_by(YEAR,FISHERY.TYPE,REGION) %>% summarise(newcol=mean(FISHERY.BYCATCH.RATIO,na.rm=T))
if(value=="Don't subdivide"){
b=ggplot(a) +geom_bar(aes(x=YEAR,y=newcol),stat="identity")+facet_wrap(~FISHERY.TYPE,scales = "free_y")+ theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"))+ylab("Bycatch ratio")+xlab("Year")
}
if(value=="Region"){
b=ggplot(aa) +geom_bar(aes(x=YEAR,y=newcol,fill=REGION),stat="identity", position = position_dodge())+facet_wrap(~FISHERY.TYPE,scales = "free_y")+ theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"))+ylab("Bycatch ratio")+xlab("Year")+
scale_fill_manual("",values=c("AK"="#7489ff","PI"="#c2c700","SE"="#00683b","WC"="#b45300","NE"="#afcf9d"),labels=c("AK"="Alaska","PI"="Pacific Islands","SE"="Southeast","WC"="Westcoast","NE"="Northeast"))
}
}
if(metric()=="TOTAL.FISHERY.LANDINGS"){
a=master %>% filter(YEAR==2010|YEAR==2011|YEAR==2012|YEAR==2013|YEAR==2014|YEAR==2015) %>%
group_by(YEAR,FISHERY.TYPE) %>% summarise(newcol=sum(TOTAL.FISHERY.LANDINGS,na.rm=T))
aa=master %>% filter(YEAR==2010|YEAR==2011|YEAR==2012|YEAR==2013|YEAR==2014|YEAR==2015) %>%
group_by(YEAR,FISHERY.TYPE,REGION) %>% summarise(newcol=sum(TOTAL.FISHERY.LANDINGS,na.rm=T))
if(value=="Don't subdivide"){
options(scipen=10000)
b=ggplot(a) +geom_bar(aes(x=YEAR,y=newcol),stat="identity")+facet_wrap(~FISHERY.TYPE,scales = "free_y")+ theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"))+ylab("Total landings")+xlab("Year")+scale_y_log10(labels = scales::comma)
}
if(value=="Region"){
options(scipen=10000)
b=ggplot(aa) +geom_bar(aes(x=YEAR,y=newcol,fill=REGION),stat="identity", position = position_dodge())+facet_wrap(~FISHERY.TYPE,scales = "free_y")+ theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"))+ylab("Total landings")+xlab("Year")+scale_y_log10(labels = scales::comma)+
scale_fill_manual("",values=c("AK"="#7489ff","PI"="#c2c700","SE"="#00683b","WC"="#b45300","NE"="#afcf9d"),labels=c("AK"="Alaska","PI"="Pacific Islands","SE"="Southeast","WC"="Westcoast","NE"="Northeast"))
}
}
if(metric()=="TOTAL.CATCH"){
a=master %>% filter(YEAR==2010|YEAR==2011|YEAR==2012|YEAR==2013|YEAR==2014|YEAR==2015) %>%
group_by(YEAR,FISHERY.TYPE) %>% summarise(newcol=sum(TOTAL.CATCH,na.rm=T))
aa=master %>% filter(YEAR==2010|YEAR==2011|YEAR==2012|YEAR==2013|YEAR==2014|YEAR==2015) %>%
group_by(YEAR,FISHERY.TYPE,REGION) %>% summarise(newcol=sum(TOTAL.CATCH,na.rm=T))
if(value=="Don't subdivide"){
options(scipen=10000)
b=ggplot(a) +geom_bar(aes(x=YEAR,y=newcol),stat="identity")+facet_wrap(~FISHERY.TYPE,scales = "free_y")+ theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"))+ylab("Total catch")+xlab("Year")+scale_y_log10(labels = scales::comma)
}
if(value=="Region"){
options(scipen=10000)
b=ggplot(aa) +geom_bar(aes(x=YEAR,y=newcol,fill=REGION),stat="identity", position = position_dodge())+facet_wrap(~FISHERY.TYPE,scales = "free_y")+ theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"))+ylab("Total catch")+xlab("Year")+scale_y_log10(labels = scales::comma)+
scale_fill_manual("",values=c("AK"="#7489ff","PI"="#c2c700","SE"="#00683b","WC"="#b45300","NE"="#afcf9d"),labels=c("AK"="Alaska","PI"="Pacific Islands","SE"="Southeast","WC"="Westcoast","NE"="Northeast"))
}
}
if(metric()=="NUM.FISH"){
a=master %>% filter(YEAR==2010|YEAR==2011|YEAR==2012|YEAR==2013|YEAR==2014|YEAR==2015) %>%
group_by(YEAR,FISHERY.TYPE,FISHERY) %>% summarise(newcol=n()) %>% distinct() %>% group_by(YEAR,FISHERY.TYPE)%>% summarise(newcol=n())
aa=master %>% filter(YEAR==2010|YEAR==2011|YEAR==2012|YEAR==2013|YEAR==2014|YEAR==2015) %>%
group_by(YEAR,FISHERY.TYPE,REGION,FISHERY) %>% summarise(newcol=n()) %>% distinct() %>% group_by(YEAR,FISHERY.TYPE,REGION)%>% summarise(newcol=n())
if(value=="Don't subdivide"){
options(scipen=10000)
b=ggplot(a) +geom_bar(aes(x=YEAR,y=newcol),stat="identity")+facet_wrap(~FISHERY.TYPE,scales = "free_y")+ theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"))+ylab("Number of fisheries")+xlab("Year")
}
if(value=="Region"){
options(scipen=10000)
b=ggplot(aa) +geom_bar(aes(x=YEAR,y=newcol,fill=REGION),stat="identity", position = position_dodge())+facet_wrap(~FISHERY.TYPE,scales = "free_y")+ theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
panel.border = element_rect(colour = "black"))+ylab("Number of fisheries")+xlab("Year")+
scale_fill_manual("",values=c("AK"="#7489ff","PI"="#c2c700","SE"="#00683b","WC"="#b45300","NE"="#afcf9d"),labels=c("AK"="Alaska","PI"="Pacific Islands","SE"="Southeast","WC"="Westcoast","NE"="Northeast"))
}
}
if(input$Free_y==T){
b=b+facet_wrap(~FISHERY.TYPE,scales = "fixed")
}
b
})
# }, height = function() {
# session$clientData$output_gear_ll_width
# })
output$rawTable<-DT::renderDataTable({
a=master_extra_raw #%>% select(-c(TOTAL.FISHERY.BYCATCH.MM,TOTAL.FISHERY.BYCATCH.SBST,NUM.FISH,TOTAL.FISHERY.BYCATCH.FISH.INVERT,OBSERVER.COVERAGE,TOTAL.FISHERY.LANDINGS,TOTAL.CATCH))
fish=input$raw_fishery
sp=input$raw_species
#
# if(input$raw_species=="Don't filter" & input$raw_fishery=="Don't filter"& input$raw_gear=="Don't filter"){
# b=a
# } else if(input$raw_species=="Don't filter" & input$raw_fishery!="Don't filter"& input$raw_gear!="Don't filter"){
# b=a %>% filter(FISHERY==input$raw_fishery)
# } else if(input$raw_species!="Don't filter" & input$raw_fishery=="Don't filter"& input$raw_gear!="Don't filter"){
# b=a %>% filter(SCIENTIFIC.NAME==input$raw_species)
# } else if(input$raw_species!="Don't filter" & input$raw_fishery!="Don't filter"){
# b=a %>% filter(FISHERY==input$raw_fishery & SCIENTIFIC.NAME==input$raw_species)
# }
if(input$raw_species!="Don't filter"){
a=a %>% filter(SCIENTIFIC.NAME==input$raw_species)
}
if(input$raw_fishery!="Don't filter"){
a=a %>% filter(FISHERY==input$raw_fishery)
}
if(input$raw_gear!="Don't filter"){
a=a %>% filter(FISHERY.TYPE==input$raw_gear)
}
datatable(a,options=list(scrollX=TRUE))
})
filtered_data=reactive({
a=master_extra_raw
if(input$raw_species!="Don't filter"){
a=a %>% filter(SCIENTIFIC.NAME==input$raw_species)
}
if(input$raw_fishery!="Don't filter"){
a=a %>% filter(FISHERY==input$raw_fishery)
}
if(input$raw_gear!="Don't filter"){
a=a %>% filter(FISHERY.TYPE==input$raw_gear)
}
return(a)
})
output$downloadDataF <- downloadHandler(
filename = function() {
paste("National_Bycatch_Database", ".csv", sep = "")
},
content = function(file) {
write.csv(filtered_data(), file, row.names = FALSE)
})
output$downloadDataM <- downloadHandler(
filename = "US_Bycatch_analysis_Raw_data_metadata.pdf",
content = function(file) {
file.copy("data/US_Bycatch_analysis_Raw_data_metadata.pdf", file)
})
})
shinyApp(ui = ui, server = server)
|
#' Launch precisely Shiny app
#'
#' `launch_precisely_app()` launches a Shiny app to calculate and plot
#' precision, sample size, and upper limit calculations.
#'
#' @export
launch_precisely_app <- function() {
app_dir <- system.file("shiny_app", "precisely", package = "precisely")
if (app_dir == "") {
stop("Shiny app not found. Try re-installing `precisely`.", call. = FALSE)
}
shiny::runApp(app_dir, display.mode = "normal")
}
| /R/shiny_precisely.R | permissive | malcolmbarrett/precisely | R | false | false | 445 | r | #' Launch precisely Shiny app
#'
#' `launch_precisely_app()` launches a Shiny app to calculate and plot
#' precision, sample size, and upper limit calculations.
#'
#' @export
launch_precisely_app <- function() {
app_dir <- system.file("shiny_app", "precisely", package = "precisely")
if (app_dir == "") {
stop("Shiny app not found. Try re-installing `precisely`.", call. = FALSE)
}
shiny::runApp(app_dir, display.mode = "normal")
}
|
### test subsample
### LU decomposition and singular subsamples handling
require(robustbase)
source(system.file("xtraR/subsample-fns.R", package = "robustbase", mustWork=TRUE))
## instead of relying on system.file("test-tools-1.R", package="Matrix"):
source(system.file("xtraR/test-tools.R", package = "robustbase")) # assert.EQ(), showProc.time() ..
options(nwarnings = 4e4)
cat("doExtras:", doExtras <- robustbase:::doExtras(),"\n")
showProc.time()
A <- rbind(c(0.001, 1),
c(1, 2))
set.seed(11)
str(sa <- tstSubsample(A))
A <- rbind(c(3, 17, 10),
c(2, 4, -2),
c(6, 18, 12))
tstSubsample(A)
## test some random matrix
set.seed(1002)
A <- matrix(rnorm(100), 10)
tstSubsample(A)
## test singular matrix handling
A <- rbind(c(1, 0, 0),
c(0, 1, 0),
c(0, 1, 0),
c(0, 0, 1))
tstSubsample(A)
## test subsample with mts > 0
data <- data.frame(y = rnorm(9), expand.grid(A = letters[1:3], B = letters[1:3]))
x <- model.matrix(y ~ ., data)
y <- data$y
## this should produce a warning and return status == 2
showSys.time(z <- Rsubsample(x, y, mts=2))
stopifnot(z$status == 2)
## test equilibration
## columns only
X <- rbind(c(1e-7, 1e-10),
c(2 , 0.2))
y <- 1:2
tstSubsample(t(X), y)
## rows only
X <- rbind(c(1e-7, 1e+10),
c(2 , 0.2))
y <- 1:2
tstSubsample(X, y)
## both
X <- rbind(c(1e-7, 2 ),
c(1e10, 2e12))
y <- 1:2
tstSubsample(X, y)
showProc.time()
## test real data example
data(possumDiv)## 151 * 9; the last two variables are factors
with(possumDiv, table(eucalyptus, aspect))
mf <- model.frame(Diversity ~ .^2, possumDiv)
X <- model.matrix(mf, possumDiv)
y <- model.response(mf)
stopifnot(qr(X)$rank == ncol(X))
## this used to fail: different pivots in step 37
str(s1 <- tstSubsample(X, y))
s2 <- tstSubsample(X / max(abs(X)), y / max(abs(X)))
s3 <- tstSubsample(X * 2^-50, y * 2^-50)
## all components *BUT* x, y, lu, Dr, Dc, rowequ, colequ :
nm <- names(s1); nm <- nm[is.na(match(nm, c("x","y","lu", "Dr", "Dc", "rowequ", "colequ")))]
stopifnot(all.equal(s1[nm], s2[nm], tolerance=1e-10),
all.equal(s1[nm], s3[nm], tolerance=1e-10))
showProc.time()
set.seed(10)
nsing <- sum(replicate(if(doExtras) 200 else 20, tstSubsampleSing(X, y)))
stopifnot(nsing == 0)
showProc.time()
## test example with many categorical predictors - 2 different random seeds:
set.seed(10) ; r1 <- lmrob(Diversity ~ .^2 , data = possumDiv, cov="none")
set.seed(108); r2 <- lmrob(Diversity ~ .^2 , data = possumDiv, cov="none")# lmrob.S() failed
(i1 <- r1$init) # print(<lmrob.S>)
(i2 <- r1$init) # ... and they are "somewhat" close:
stopifnot(all.equal(r1[names(r1) != "init.S"],
r2[names(r2) != "init.S"], tol = 0.40))
c1 <- coef(r1)
c2 <- coef(r2)
relD <- (c1-c2)*2/(c1+c2)
xCf <- which(abs(relD) >= 10)
stopifnot(exprs = {
identical(xCf, c(`Bark:aspectSW-NW` = 46L))
all.equal(c1[-xCf], c2[-xCf], tol = 0.35) # 0.3418
sign(c1[-xCf]) == sign(c2[-xCf])
})
showProc.time()
## investigate problematic subsample:
idc <- 1 + c(140, 60, 12, 13, 89, 90, 118, 80, 17, 134, 59, 94, 36,
43, 46, 93, 107, 62, 57, 116, 11, 45, 35, 38, 120, 34, 29,
33, 147, 105, 115, 92, 61, 91, 104, 141, 138, 129, 130, 84,
119, 132, 6, 135, 112, 16, 67, 41, 102, 76, 111, 82, 148, 24,
131, 10, 96, 0, 87, 21, 127, 56, 124)
rc <- lm(Diversity ~ .^2 , data = possumDiv, subset = idc)
X <- model.matrix(rc)
y <- possumDiv$Diversity[idc]
tstSubsample(X, y)## have different pivots ... could not find non-singular
lu <- LU.gaxpy(t(X))
stopifnot(lu$sing)
zc <- Rsubsample(X, y)
stopifnot(zc$status > 0)
## column 52 is linearly dependent and should have been discarded
## qr(t(X))$pivot
image(as(round(zc$lu - (lu$L + lu$U - diag(nrow(lu$U))), 10), "Matrix"))
image(as( sign(zc$lu) - sign(lu$L + lu$U - diag(nrow(lu$U))), "Matrix"))
showProc.time()
## test equilibration
## colequ only
X <- matrix(c(1e-7, 2, 1e-10, 0.2), 2)
y <- 1:2
tstSubsample(t(X), y)
## rowequ only
X <- matrix(c(1e-7, 2, 1e10, 0.2), 2)
y <- 1:2
tstSubsample(X, y)
## both
X <- matrix(c(1e-7, 1e10, 2, 2e12), 2)
y <- 1:2
tstSubsample(X, y)
showProc.time()
### real data, see MM's ~/R/MM/Pkg-ex/robustbase/hedlmrob.R
## close to singular cov():
attach(system.file("external", "d1k27.rda", package="robustbase", mustWork=TRUE))
fm1 <- lmrob(y ~ a + I(a^2) + tf + I(tf^2) + A + I(A^2) + . , data = d1k27)
## ^^^^^ gave error, earlier, now with a warning -- use ".vcov.w"
## --> cov = ".vcov.w"
fm2 <- lmrob(y ~ a + I(a^2) + tf + I(tf^2) + A + I(A^2) + . , data = d1k27,
cov = ".vcov.w", trace = TRUE)
showProc.time()# 2.77
if(doExtras) withAutoprint({##---------------------------------------------------------
## Q: does it change to use numeric instead of binary factors ?
## A: not really ..
d1k.n <- d1k27
d1k.n[-(1:5)] <- lapply(d1k27[,-(1:5)], as.numeric)
fm1.n <- lmrob(y ~ a + I(a^2) + tf + I(tf^2) + A + I(A^2) + . , data = d1k.n)
fm2.n <- lmrob(y ~ a + I(a^2) + tf + I(tf^2) + A + I(A^2) + . , data = d1k.n,
cov = ".vcov.w", trace = 2)
summary(weights(fm1, type="robustness"))
hist(weights(fm1, type="robustness"), main="robustness weights of fm1")
rug(weights(fm1, type="robustness"))
showProc.time()## 2.88
##
fmc <- lm (y ~ poly(a,2)-a + poly(tf, 2)-tf + poly(A, 2)-A + . , data = d1k27)
summary(fmc)
## -> has NA's for 'a, tf, A' --- bad that it did *not* work to remove them
nform <- update(formula(fm1), ~ .
+poly(A,2) -A -I(A^2)
+poly(a,2) -a -I(a^2)
+poly(tf,2) -tf -I(tf^2))
fm1. <- lmrob(nform, data = d1k27)# now w/o warning !? !!
fm2. <- lmrob(nform, data = d1k27, cov = ".vcov.w", trace = TRUE)
## now lmrob takes care of NA coefficients automatically
lmrob(y ~ poly(a,2)-a + poly(tf, 2)-tf + poly(A, 2)-A + . , data = d1k27)
showProc.time() ## 4.24
}) ## only if(doExtras) -----------------------------------------------------
## test exact fit property
set.seed(20)
data <- data.frame(y=c(rep.int(0, 20), round(100*rnorm(5))),
group=rep(letters[1:5], each=5))
x <- model.matrix(y ~ group, data)
(ini <- lmrob.S(x, data$y, lmrob.control()))
(ret <- lmrob(y ~ group, data))
summary(ret)
showProc.time() ## 4.24
##--- continuous x -- exact fit -- inspired by Thomas Mang's real data example
mkD9 <- function(iN, dN = 1:m) {
stopifnot((length(iN) -> m) == length(dN), 1 <= m, m <= 5,
iN == as.integer(iN), is.numeric(dN), !is.na(dN))
x <- c(-3:0,0:1,1:3) # {n=9; sorted; x= 0, 1 are "doubled"}
y <- x+5
y[iN] <- y[iN] + dN
data.frame(x,y)
}
mkRS <- function(...) { set.seed(...); .Random.seed }
d <- mkD9(c(1L,3:4, 7L))
rs2 <- mkRS(2)
Se <- tryCatch(error = identity,
with(d, lmrob.S(cbind(1,x), y, lmrob.control("KS2014", seed=rs2))))
## gave DGELS rank error {for lmrob.c+wg..}
if(inherits(Se, "error")) {
cat("Caught ")
print(Se)
} else withAutoprint({ ## no error
coef(Se)
stopifnot(coef(Se) == c(5, 1)) # was (0 0)
residuals(Se) # was == y ---- FIXME
})
## try 100 different seeds
repS <- lapply(1:100, function(ii) tryCatch(error = identity,
with(d, lmrob.S(cbind(1,x), y, lmrob.control("KS2014", seed = mkRS(ii))))))
if(FALSE)
## was
str(unique(repS))## ==> 100 times the same error
## now completely different: *all* returned properly
str(cfS <- t(sapply(repS, coef))) # all numeric -- not *one* error --
## even all the *same* (5 1) solution:
(ucfS <- unique(cfS))
stopifnot(identical(ucfS, array(c(5, 1), dim = 1:2, dimnames = list(NULL, c("", "x")))))
## *Not* "KS2014" but the defaults works *all the time* (!)
repS0 <- lapply(1:100, function(ii) tryCatch(error = identity,
with(d, lmrob.S(cbind(1,x), y, lmrob.control(seed = mkRS(ii))))))
summary(warnings())
## 100 identical warnings:
## In lmrob.S(cbind(1, x), y, lmrob.control(seed = mkRS(ii))) :
## S-estimated scale == 0: Probably exact fit; check your data
str(cfS0 <- t(sapply(repS0, coef))) # all numeric -- not *one* error
## even all the same *and* the same as "KS2014"
(ucfS0 <- unique(cfS0))
stopifnot(nrow(ucfS0) == 1L,
ucfS0 == c(5,1))
d9L <- list(
mkD9(c(1L,3L, 5L, 7L))
, mkD9(c(1L,3L, 8:9))
, mkD9(2L*(1:4))
)
if(doExtras) {
sfsmisc::mult.fig(length(d9L)); invisible(lapply(d9L, function(d) plot(y ~ x, data=d)))
}
dorob <- function(dat, control=lmrob.control(...), meth = c("S", "MM"),
doPl=interactive(), cex=1, ...) {
meth <- match.arg(meth)
stopifnot(is.data.frame(dat), c("x","y") %in% names(dat), is.list(control))
if(doPl) plot(y ~ x, data=dat) ## with(dat, n.plot(x, y, cex=cex))
ans <- tryCatch(error = identity,
switch(meth
, "S" = with(dat, lmrob.S(cbind(1,x), y, control))
, "MM"= lmrob(y ~ x, data = dat, control=control)
, stop("invalid 'meth'")))
if(!doPl)
return(ans)
## else
if(!inherits(ans, "error")) {
abline(coef(ans))
} else { # error
mtext(paste(paste0("lmrob.", meth), "Error:", conditionMessage(ans)))
}
invisible(ans)
}
## a bad case -- much better new robustbase >= 0.99-0
Se <- dorob(d9L[[1]], lmrob.control("KS2014", mkRS(2), trace.lev=4))
## was really bad -- ended returning coef = (0 0); fitted == 0, residuals == 0 !!
if(doExtras) sfsmisc::mult.fig(length(d9L))
r0 <- lapply(d9L, dorob, seed=rs2, doPl=doExtras) # 3 x ".. exact fit" warning
if(doExtras) print(r0)
## back to 3 identical fits: (5 1)
(cf0 <- sapply(r0, coef))
stopifnot(cf0 == c(5,1))
if(doExtras) sfsmisc::mult.fig(length(d9L))
### Here, all 3 were "0-models"
r14 <- lapply(d9L, dorob, control=lmrob.control("KS2014", seed=rs2), doPl=doExtras)
## --> 3 (identical) warnings: In lmrob.S(cbind(1, x), y, control) :#
## S-estimated scale == 0: Probably exact fit; check your data
## now *does* plot
if(doExtras) print(r14)
## all 3 are "identical"
(cf14 <- sapply(r14, coef))
identical(cf0, cf14) # see TRUE; test a bit less:
stopifnot(all.equal(cf0, cf14, tol=1e-15))
## use "large n"
ctrl.LRG.n <- lmrob.control("KS2014", seed=rs2, trace.lev = if(doExtras) 2 else 1, # 3: too much (for now),
nResample = 60,
fast.s.large.n = 7, n.group = 3, groups = 2)
rLrg.n <- lapply(d9L, \(d) lmrob.S(cbind(1,d$x), d$y, ctrl.LRG.n))
summary(warnings())
sapply(rLrg.n, coef)
## currently ... .... really would want always (5 1)
## [,1] [,2] [,3]
## [1,] 5 5 7.333333
## [2,] 1 1 1.666667
## ==> use lmrob() instead of lmrob.S():
mm0 <- lapply(d9L, dorob, meth = "MM", seed=rs2, doPl=doExtras) # looks all fine -- no longer: error in [[3]]
if(doExtras) print(mm0)
## now, the 3rd one errors (on Linux, not on M1 mac!)
(cm0 <- sapply(mm0, function(.) if(inherits(.,"error")) noquote(paste("Caught", as.character(.))) else coef(.)))
## no longer needed
c0.12 <- rbind(`(Intercept)` = c(5.7640215, 6.0267156),
x = c(0.85175883, 1.3823841))
if(is.list(cm0)) { ## after error {was on Linux+Win, not on M1 mac}:
## NB: This does *not* happen on Macbuilder -- there the result it cf = (5 1) !!
stopifnot(all.equal(tol = 1e-8, # seen 4.4376e-9
c0.12, simplify2array(cm0[1:2])))
print(cm0[[3]])
## FIXME?: Caught Error in eigen(ret, symmetric = TRUE): infinite or missing values in 'x'\n
} else if(is.matrix(cm0)) { # when no error happened
k <- ncol(cm0)
stopifnot(all.equal(tol = 1e-8, rbind(`(Intercept)` = rep(5,k), "x" = rep(1,k)), cm0))
} else warning("not yet encountered this case {and it should not happen}")
se3 <- lmrob(y ~ x, data=d9L[[3]], init = r0[[3]], seed=rs2, trace.lev=6)
if(doExtras) sfsmisc::mult.fig(length(d9L))
### Here, all 3 were "0-models"
## now, have 3 *different* cases {with this seed}
## [1] : init fails (-> r14[[1]] above)
## [2] : init s=0, b=(5,1) .. but residuals(),fitted() wrong
## [3] : init s=0, b=(5,1) ..*and* residuals(),fitted() are good
cm14 <- lapply(d9L, dorob, meth = "MM", control=lmrob.control("KS2014", seed=rs2), doPl=doExtras)
## now, first is error; for others, coef = (5, 1) are correct:
stopifnot(exprs = {
sapply(cm14[-1], coef) == c(5,1)
sapply(cm14[-1], sigma) == 0
})
m2 <- cm14[[2]]
summary(m2) # prints quite nicely; and this is perfect (for scale=0), too:
## {residual != 0 <==> weights = 0}:
cbind(rwgt = weights(m2, "rob"), res = residuals(m2), fit = fitted(m2), y = d9L[[2]][,"y"])
sapply(cm14, residuals) ## now, [2] is good; [3] still wrong - FIXME
sapply(cm14, fitted)
sapply(cm14, weights, "robust")## [2]: 0 1 0 1 1 1 1 0 0; [3]: all 0
## (unfinished ... do *test* once we've checked platform consistency)
summary(warnings())
showProc.time()
| /tests/subsample.R | no_license | cran/robustbase | R | false | false | 12,864 | r | ### test subsample
### LU decomposition and singular subsamples handling
require(robustbase)
source(system.file("xtraR/subsample-fns.R", package = "robustbase", mustWork=TRUE))
## instead of relying on system.file("test-tools-1.R", package="Matrix"):
source(system.file("xtraR/test-tools.R", package = "robustbase")) # assert.EQ(), showProc.time() ..
options(nwarnings = 4e4)
cat("doExtras:", doExtras <- robustbase:::doExtras(),"\n")
showProc.time()
A <- rbind(c(0.001, 1),
c(1, 2))
set.seed(11)
str(sa <- tstSubsample(A))
A <- rbind(c(3, 17, 10),
c(2, 4, -2),
c(6, 18, 12))
tstSubsample(A)
## test some random matrix
set.seed(1002)
A <- matrix(rnorm(100), 10)
tstSubsample(A)
## test singular matrix handling
A <- rbind(c(1, 0, 0),
c(0, 1, 0),
c(0, 1, 0),
c(0, 0, 1))
tstSubsample(A)
## test subsample with mts > 0
data <- data.frame(y = rnorm(9), expand.grid(A = letters[1:3], B = letters[1:3]))
x <- model.matrix(y ~ ., data)
y <- data$y
## this should produce a warning and return status == 2
showSys.time(z <- Rsubsample(x, y, mts=2))
stopifnot(z$status == 2)
## test equilibration
## columns only
X <- rbind(c(1e-7, 1e-10),
c(2 , 0.2))
y <- 1:2
tstSubsample(t(X), y)
## rows only
X <- rbind(c(1e-7, 1e+10),
c(2 , 0.2))
y <- 1:2
tstSubsample(X, y)
## both
X <- rbind(c(1e-7, 2 ),
c(1e10, 2e12))
y <- 1:2
tstSubsample(X, y)
showProc.time()
## test real data example
data(possumDiv)## 151 * 9; the last two variables are factors
with(possumDiv, table(eucalyptus, aspect))
mf <- model.frame(Diversity ~ .^2, possumDiv)
X <- model.matrix(mf, possumDiv)
y <- model.response(mf)
stopifnot(qr(X)$rank == ncol(X))
## this used to fail: different pivots in step 37
str(s1 <- tstSubsample(X, y))
s2 <- tstSubsample(X / max(abs(X)), y / max(abs(X)))
s3 <- tstSubsample(X * 2^-50, y * 2^-50)
## all components *BUT* x, y, lu, Dr, Dc, rowequ, colequ :
nm <- names(s1); nm <- nm[is.na(match(nm, c("x","y","lu", "Dr", "Dc", "rowequ", "colequ")))]
stopifnot(all.equal(s1[nm], s2[nm], tolerance=1e-10),
all.equal(s1[nm], s3[nm], tolerance=1e-10))
showProc.time()
set.seed(10)
nsing <- sum(replicate(if(doExtras) 200 else 20, tstSubsampleSing(X, y)))
stopifnot(nsing == 0)
showProc.time()
## test example with many categorical predictors - 2 different random seeds:
set.seed(10) ; r1 <- lmrob(Diversity ~ .^2 , data = possumDiv, cov="none")
set.seed(108); r2 <- lmrob(Diversity ~ .^2 , data = possumDiv, cov="none")# lmrob.S() failed
(i1 <- r1$init) # print(<lmrob.S>)
(i2 <- r1$init) # ... and they are "somewhat" close:
stopifnot(all.equal(r1[names(r1) != "init.S"],
r2[names(r2) != "init.S"], tol = 0.40))
c1 <- coef(r1)
c2 <- coef(r2)
relD <- (c1-c2)*2/(c1+c2)
xCf <- which(abs(relD) >= 10)
stopifnot(exprs = {
identical(xCf, c(`Bark:aspectSW-NW` = 46L))
all.equal(c1[-xCf], c2[-xCf], tol = 0.35) # 0.3418
sign(c1[-xCf]) == sign(c2[-xCf])
})
showProc.time()
## investigate problematic subsample:
idc <- 1 + c(140, 60, 12, 13, 89, 90, 118, 80, 17, 134, 59, 94, 36,
43, 46, 93, 107, 62, 57, 116, 11, 45, 35, 38, 120, 34, 29,
33, 147, 105, 115, 92, 61, 91, 104, 141, 138, 129, 130, 84,
119, 132, 6, 135, 112, 16, 67, 41, 102, 76, 111, 82, 148, 24,
131, 10, 96, 0, 87, 21, 127, 56, 124)
rc <- lm(Diversity ~ .^2 , data = possumDiv, subset = idc)
X <- model.matrix(rc)
y <- possumDiv$Diversity[idc]
tstSubsample(X, y)## have different pivots ... could not find non-singular
lu <- LU.gaxpy(t(X))
stopifnot(lu$sing)
zc <- Rsubsample(X, y)
stopifnot(zc$status > 0)
## column 52 is linearly dependent and should have been discarded
## qr(t(X))$pivot
image(as(round(zc$lu - (lu$L + lu$U - diag(nrow(lu$U))), 10), "Matrix"))
image(as( sign(zc$lu) - sign(lu$L + lu$U - diag(nrow(lu$U))), "Matrix"))
showProc.time()
## test equilibration
## colequ only
X <- matrix(c(1e-7, 2, 1e-10, 0.2), 2)
y <- 1:2
tstSubsample(t(X), y)
## rowequ only
X <- matrix(c(1e-7, 2, 1e10, 0.2), 2)
y <- 1:2
tstSubsample(X, y)
## both
X <- matrix(c(1e-7, 1e10, 2, 2e12), 2)
y <- 1:2
tstSubsample(X, y)
showProc.time()
### real data, see MM's ~/R/MM/Pkg-ex/robustbase/hedlmrob.R
## close to singular cov():
attach(system.file("external", "d1k27.rda", package="robustbase", mustWork=TRUE))
fm1 <- lmrob(y ~ a + I(a^2) + tf + I(tf^2) + A + I(A^2) + . , data = d1k27)
## ^^^^^ gave error, earlier, now with a warning -- use ".vcov.w"
## --> cov = ".vcov.w"
fm2 <- lmrob(y ~ a + I(a^2) + tf + I(tf^2) + A + I(A^2) + . , data = d1k27,
cov = ".vcov.w", trace = TRUE)
showProc.time()# 2.77
if(doExtras) withAutoprint({##---------------------------------------------------------
## Q: does it change to use numeric instead of binary factors ?
## A: not really ..
d1k.n <- d1k27
d1k.n[-(1:5)] <- lapply(d1k27[,-(1:5)], as.numeric)
fm1.n <- lmrob(y ~ a + I(a^2) + tf + I(tf^2) + A + I(A^2) + . , data = d1k.n)
fm2.n <- lmrob(y ~ a + I(a^2) + tf + I(tf^2) + A + I(A^2) + . , data = d1k.n,
cov = ".vcov.w", trace = 2)
summary(weights(fm1, type="robustness"))
hist(weights(fm1, type="robustness"), main="robustness weights of fm1")
rug(weights(fm1, type="robustness"))
showProc.time()## 2.88
##
fmc <- lm (y ~ poly(a,2)-a + poly(tf, 2)-tf + poly(A, 2)-A + . , data = d1k27)
summary(fmc)
## -> has NA's for 'a, tf, A' --- bad that it did *not* work to remove them
nform <- update(formula(fm1), ~ .
+poly(A,2) -A -I(A^2)
+poly(a,2) -a -I(a^2)
+poly(tf,2) -tf -I(tf^2))
fm1. <- lmrob(nform, data = d1k27)# now w/o warning !? !!
fm2. <- lmrob(nform, data = d1k27, cov = ".vcov.w", trace = TRUE)
## now lmrob takes care of NA coefficients automatically
lmrob(y ~ poly(a,2)-a + poly(tf, 2)-tf + poly(A, 2)-A + . , data = d1k27)
showProc.time() ## 4.24
}) ## only if(doExtras) -----------------------------------------------------
## test exact fit property
set.seed(20)
data <- data.frame(y=c(rep.int(0, 20), round(100*rnorm(5))),
group=rep(letters[1:5], each=5))
x <- model.matrix(y ~ group, data)
(ini <- lmrob.S(x, data$y, lmrob.control()))
(ret <- lmrob(y ~ group, data))
summary(ret)
showProc.time() ## 4.24
##--- continuous x -- exact fit -- inspired by Thomas Mang's real data example
mkD9 <- function(iN, dN = 1:m) {
stopifnot((length(iN) -> m) == length(dN), 1 <= m, m <= 5,
iN == as.integer(iN), is.numeric(dN), !is.na(dN))
x <- c(-3:0,0:1,1:3) # {n=9; sorted; x= 0, 1 are "doubled"}
y <- x+5
y[iN] <- y[iN] + dN
data.frame(x,y)
}
mkRS <- function(...) { set.seed(...); .Random.seed }
d <- mkD9(c(1L,3:4, 7L))
rs2 <- mkRS(2)
Se <- tryCatch(error = identity,
with(d, lmrob.S(cbind(1,x), y, lmrob.control("KS2014", seed=rs2))))
## gave DGELS rank error {for lmrob.c+wg..}
if(inherits(Se, "error")) {
cat("Caught ")
print(Se)
} else withAutoprint({ ## no error
coef(Se)
stopifnot(coef(Se) == c(5, 1)) # was (0 0)
residuals(Se) # was == y ---- FIXME
})
## try 100 different seeds
repS <- lapply(1:100, function(ii) tryCatch(error = identity,
with(d, lmrob.S(cbind(1,x), y, lmrob.control("KS2014", seed = mkRS(ii))))))
if(FALSE)
## was
str(unique(repS))## ==> 100 times the same error
## now completely different: *all* returned properly
str(cfS <- t(sapply(repS, coef))) # all numeric -- not *one* error --
## even all the *same* (5 1) solution:
(ucfS <- unique(cfS))
stopifnot(identical(ucfS, array(c(5, 1), dim = 1:2, dimnames = list(NULL, c("", "x")))))
## *Not* "KS2014" but the defaults works *all the time* (!)
repS0 <- lapply(1:100, function(ii) tryCatch(error = identity,
with(d, lmrob.S(cbind(1,x), y, lmrob.control(seed = mkRS(ii))))))
summary(warnings())
## 100 identical warnings:
## In lmrob.S(cbind(1, x), y, lmrob.control(seed = mkRS(ii))) :
## S-estimated scale == 0: Probably exact fit; check your data
str(cfS0 <- t(sapply(repS0, coef))) # all numeric -- not *one* error
## even all the same *and* the same as "KS2014"
(ucfS0 <- unique(cfS0))
stopifnot(nrow(ucfS0) == 1L,
ucfS0 == c(5,1))
d9L <- list(
mkD9(c(1L,3L, 5L, 7L))
, mkD9(c(1L,3L, 8:9))
, mkD9(2L*(1:4))
)
if(doExtras) {
sfsmisc::mult.fig(length(d9L)); invisible(lapply(d9L, function(d) plot(y ~ x, data=d)))
}
dorob <- function(dat, control=lmrob.control(...), meth = c("S", "MM"),
doPl=interactive(), cex=1, ...) {
meth <- match.arg(meth)
stopifnot(is.data.frame(dat), c("x","y") %in% names(dat), is.list(control))
if(doPl) plot(y ~ x, data=dat) ## with(dat, n.plot(x, y, cex=cex))
ans <- tryCatch(error = identity,
switch(meth
, "S" = with(dat, lmrob.S(cbind(1,x), y, control))
, "MM"= lmrob(y ~ x, data = dat, control=control)
, stop("invalid 'meth'")))
if(!doPl)
return(ans)
## else
if(!inherits(ans, "error")) {
abline(coef(ans))
} else { # error
mtext(paste(paste0("lmrob.", meth), "Error:", conditionMessage(ans)))
}
invisible(ans)
}
## a bad case -- much better new robustbase >= 0.99-0
Se <- dorob(d9L[[1]], lmrob.control("KS2014", mkRS(2), trace.lev=4))
## was really bad -- ended returning coef = (0 0); fitted == 0, residuals == 0 !!
if(doExtras) sfsmisc::mult.fig(length(d9L))
r0 <- lapply(d9L, dorob, seed=rs2, doPl=doExtras) # 3 x ".. exact fit" warning
if(doExtras) print(r0)
## back to 3 identical fits: (5 1)
(cf0 <- sapply(r0, coef))
stopifnot(cf0 == c(5,1))
if(doExtras) sfsmisc::mult.fig(length(d9L))
### Here, all 3 were "0-models"
r14 <- lapply(d9L, dorob, control=lmrob.control("KS2014", seed=rs2), doPl=doExtras)
## --> 3 (identical) warnings: In lmrob.S(cbind(1, x), y, control) :#
## S-estimated scale == 0: Probably exact fit; check your data
## now *does* plot
if(doExtras) print(r14)
## all 3 are "identical"
(cf14 <- sapply(r14, coef))
identical(cf0, cf14) # see TRUE; test a bit less:
stopifnot(all.equal(cf0, cf14, tol=1e-15))
## use "large n"
ctrl.LRG.n <- lmrob.control("KS2014", seed=rs2, trace.lev = if(doExtras) 2 else 1, # 3: too much (for now),
nResample = 60,
fast.s.large.n = 7, n.group = 3, groups = 2)
rLrg.n <- lapply(d9L, \(d) lmrob.S(cbind(1,d$x), d$y, ctrl.LRG.n))
summary(warnings())
sapply(rLrg.n, coef)
## currently ... .... really would want always (5 1)
## [,1] [,2] [,3]
## [1,] 5 5 7.333333
## [2,] 1 1 1.666667
## ==> use lmrob() instead of lmrob.S():
mm0 <- lapply(d9L, dorob, meth = "MM", seed=rs2, doPl=doExtras) # looks all fine -- no longer: error in [[3]]
if(doExtras) print(mm0)
## now, the 3rd one errors (on Linux, not on M1 mac!)
(cm0 <- sapply(mm0, function(.) if(inherits(.,"error")) noquote(paste("Caught", as.character(.))) else coef(.)))
## no longer needed
c0.12 <- rbind(`(Intercept)` = c(5.7640215, 6.0267156),
x = c(0.85175883, 1.3823841))
if(is.list(cm0)) { ## after error {was on Linux+Win, not on M1 mac}:
## NB: This does *not* happen on Macbuilder -- there the result it cf = (5 1) !!
stopifnot(all.equal(tol = 1e-8, # seen 4.4376e-9
c0.12, simplify2array(cm0[1:2])))
print(cm0[[3]])
## FIXME?: Caught Error in eigen(ret, symmetric = TRUE): infinite or missing values in 'x'\n
} else if(is.matrix(cm0)) { # when no error happened
k <- ncol(cm0)
stopifnot(all.equal(tol = 1e-8, rbind(`(Intercept)` = rep(5,k), "x" = rep(1,k)), cm0))
} else warning("not yet encountered this case {and it should not happen}")
se3 <- lmrob(y ~ x, data=d9L[[3]], init = r0[[3]], seed=rs2, trace.lev=6)
if(doExtras) sfsmisc::mult.fig(length(d9L))
### Here, all 3 were "0-models"
## now, have 3 *different* cases {with this seed}
## [1] : init fails (-> r14[[1]] above)
## [2] : init s=0, b=(5,1) .. but residuals(),fitted() wrong
## [3] : init s=0, b=(5,1) ..*and* residuals(),fitted() are good
cm14 <- lapply(d9L, dorob, meth = "MM", control=lmrob.control("KS2014", seed=rs2), doPl=doExtras)
## now, first is error; for others, coef = (5, 1) are correct:
stopifnot(exprs = {
sapply(cm14[-1], coef) == c(5,1)
sapply(cm14[-1], sigma) == 0
})
m2 <- cm14[[2]]
summary(m2) # prints quite nicely; and this is perfect (for scale=0), too:
## {residual != 0 <==> weights = 0}:
cbind(rwgt = weights(m2, "rob"), res = residuals(m2), fit = fitted(m2), y = d9L[[2]][,"y"])
sapply(cm14, residuals) ## now, [2] is good; [3] still wrong - FIXME
sapply(cm14, fitted)
sapply(cm14, weights, "robust")## [2]: 0 1 0 1 1 1 1 0 0; [3]: all 0
## (unfinished ... do *test* once we've checked platform consistency)
summary(warnings())
showProc.time()
|
context("checkbox_suffixes.R")
load(test_path("testdata", "RedcapProject_RedcapTestApi.Rdata"))
purgeProject(rcon, purge_all = TRUE)
rcon$flush_all()
restoreProject(RedcapProject_RedcapTestApi,
rcon)
CheckboxMetaData <- exportMetaData(rcon)
CheckboxMetaData <- CheckboxMetaData[CheckboxMetaData$field_name %in% c("prereq_checkbox"), ]
# For the purpose of testing, we are going to add a couple more options to these meta data
# Doing it this way allows us to add tests for any code/label mapping without having to
# alter the testing database.
CheckboxMetaData$select_choices_or_calculations <-
paste0(CheckboxMetaData$select_choices_or_calculations,
" | lowercase, Lowercase code | mixedCase, Mixed case code | 12ab, alpha, numeric | use_underscore, Use an underscore")
test_that(
"Checkbox suffixes are correctly generated",
{
expect_equal(
checkbox_suffixes(fields = c("prereq_checkbox"),
meta_data = CheckboxMetaData),
list(name_suffix = c(prereq_checkbox1 = "prereq_checkbox___1",
prereq_checkbox2 = "prereq_checkbox___2",
prereq_checkbox3 = "prereq_checkbox___abc",
prereq_checkbox4 = "prereq_checkbox___4",
prereq_checkbox5 = "prereq_checkbox___lowercase",
prereq_checkbox6 = "prereq_checkbox___mixedcase",
prereq_checkbox7 = "prereq_checkbox___12ab",
prereq_checkbox8 = "prereq_checkbox___use_underscore"),
label_suffix = c(prereq_checkbox1 = "Pre-requisite as a checkbox: Checkbox1",
prereq_checkbox2 = "Pre-requisite as a checkbox: Checkbox2",
prereq_checkbox3 = "Pre-requisite as a checkbox: CheckboxABC",
prereq_checkbox4 = "Pre-requisite as a checkbox: Do not use in branching logic",
prereq_checkbox5 = "Pre-requisite as a checkbox: Lowercase code",
prereq_checkbox6 = "Pre-requisite as a checkbox: Mixed case code",
prereq_checkbox7 = "Pre-requisite as a checkbox: alpha, numeric",
prereq_checkbox8 = "Pre-requisite as a checkbox: Use an underscore"))
)
}
)
| /tests/testthat/test-checkbox_suffixes.R | no_license | cran/redcapAPI | R | false | false | 2,369 | r | context("checkbox_suffixes.R")
load(test_path("testdata", "RedcapProject_RedcapTestApi.Rdata"))
purgeProject(rcon, purge_all = TRUE)
rcon$flush_all()
restoreProject(RedcapProject_RedcapTestApi,
rcon)
CheckboxMetaData <- exportMetaData(rcon)
CheckboxMetaData <- CheckboxMetaData[CheckboxMetaData$field_name %in% c("prereq_checkbox"), ]
# For the purpose of testing, we are going to add a couple more options to these meta data
# Doing it this way allows us to add tests for any code/label mapping without having to
# alter the testing database.
CheckboxMetaData$select_choices_or_calculations <-
paste0(CheckboxMetaData$select_choices_or_calculations,
" | lowercase, Lowercase code | mixedCase, Mixed case code | 12ab, alpha, numeric | use_underscore, Use an underscore")
test_that(
"Checkbox suffixes are correctly generated",
{
expect_equal(
checkbox_suffixes(fields = c("prereq_checkbox"),
meta_data = CheckboxMetaData),
list(name_suffix = c(prereq_checkbox1 = "prereq_checkbox___1",
prereq_checkbox2 = "prereq_checkbox___2",
prereq_checkbox3 = "prereq_checkbox___abc",
prereq_checkbox4 = "prereq_checkbox___4",
prereq_checkbox5 = "prereq_checkbox___lowercase",
prereq_checkbox6 = "prereq_checkbox___mixedcase",
prereq_checkbox7 = "prereq_checkbox___12ab",
prereq_checkbox8 = "prereq_checkbox___use_underscore"),
label_suffix = c(prereq_checkbox1 = "Pre-requisite as a checkbox: Checkbox1",
prereq_checkbox2 = "Pre-requisite as a checkbox: Checkbox2",
prereq_checkbox3 = "Pre-requisite as a checkbox: CheckboxABC",
prereq_checkbox4 = "Pre-requisite as a checkbox: Do not use in branching logic",
prereq_checkbox5 = "Pre-requisite as a checkbox: Lowercase code",
prereq_checkbox6 = "Pre-requisite as a checkbox: Mixed case code",
prereq_checkbox7 = "Pre-requisite as a checkbox: alpha, numeric",
prereq_checkbox8 = "Pre-requisite as a checkbox: Use an underscore"))
)
}
)
|
library(graphics)
#Define headers
header <- read.table("household_power_consumption.txt", nrows = 1, sep = ";")
#Read table with all column values read as characters
HHPwConsump <- read.table("household_power_consumption.txt", header = FALSE, sep = ";", skip = 1, colClasses = rep("character", 9))
#Set headers as column names
colnames(HHPwConsump) <- unlist(header)
#Find and subset relevant data
HHPwConsump_sub <- HHPwConsump[which(HHPwConsump$Date == "1/2/2007" | HHPwConsump$Date == "2/2/2007"), ]
#For columns that should be numeric, replace "?" (for missing data) with NA
HHPwConsump_sub[, 3:9][HHPwConsump_sub[, 3:9] == "?"] <- NA
#For columns that should be numeric, set their value type to numeric
HHPwConsump_sub[, 3:9] <- as.numeric(unlist(HHPwConsump_sub[, 3:9]))
#Concatenate Date and Time columns for simplicity
DateTime <- paste(HHPwConsump_sub$Date, HHPwConsump_sub$Time, sep = " ")
#Change Date/Time data from character representation to a POSIXlt object
DataTimes <- strptime(DateTime, "%d/%m/%Y %H:%M:%S")
#Create png file of the plot
png(filename = "plot4.png", width = 480, height = 480, units = "px")
#Set graphical parameters such that more than one graph can appear in a plot
par(mfcol = c(2, 2))
#Plot 2
plot(DataTimes, HHPwConsump_sub$Global_active_power, type = "l", xlab = "", ylab = "Global Active Power")
#Plot 3
plot(DataTimes, HHPwConsump_sub$Sub_metering_1, type = "l", ylab = "Energy sub metering", xlab = "", col = "black")
lines(DataTimes, HHPwConsump_sub$Sub_metering_2, col = "red")
lines(DataTimes, HHPwConsump_sub$Sub_metering_3, col = "blue")
legend("topright", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty = 1, col = c("black", "red", "blue"), bty = "n")
#A new plot
plot(DataTimes, HHPwConsump_sub$Voltage, type = "l", xlab = "datetime", ylab = "Voltage")
#A second new plot
plot(DataTimes, HHPwConsump_sub$Global_reactive_power, type = "l", xlab = "datetime", ylab = "Global_reactive_power")
dev.off() | /plot4.R | no_license | sng0616/ExData_Plotting1 | R | false | false | 1,975 | r | library(graphics)
#Define headers
header <- read.table("household_power_consumption.txt", nrows = 1, sep = ";")
#Read table with all column values read as characters
HHPwConsump <- read.table("household_power_consumption.txt", header = FALSE, sep = ";", skip = 1, colClasses = rep("character", 9))
#Set headers as column names
colnames(HHPwConsump) <- unlist(header)
#Find and subset relevant data
HHPwConsump_sub <- HHPwConsump[which(HHPwConsump$Date == "1/2/2007" | HHPwConsump$Date == "2/2/2007"), ]
#For columns that should be numeric, replace "?" (for missing data) with NA
HHPwConsump_sub[, 3:9][HHPwConsump_sub[, 3:9] == "?"] <- NA
#For columns that should be numeric, set their value type to numeric
HHPwConsump_sub[, 3:9] <- as.numeric(unlist(HHPwConsump_sub[, 3:9]))
#Concatenate Date and Time columns for simplicity
DateTime <- paste(HHPwConsump_sub$Date, HHPwConsump_sub$Time, sep = " ")
#Change Date/Time data from character representation to a POSIXlt object
DataTimes <- strptime(DateTime, "%d/%m/%Y %H:%M:%S")
#Create png file of the plot
png(filename = "plot4.png", width = 480, height = 480, units = "px")
#Set graphical parameters such that more than one graph can appear in a plot
par(mfcol = c(2, 2))
#Plot 2
plot(DataTimes, HHPwConsump_sub$Global_active_power, type = "l", xlab = "", ylab = "Global Active Power")
#Plot 3
plot(DataTimes, HHPwConsump_sub$Sub_metering_1, type = "l", ylab = "Energy sub metering", xlab = "", col = "black")
lines(DataTimes, HHPwConsump_sub$Sub_metering_2, col = "red")
lines(DataTimes, HHPwConsump_sub$Sub_metering_3, col = "blue")
legend("topright", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty = 1, col = c("black", "red", "blue"), bty = "n")
#A new plot
plot(DataTimes, HHPwConsump_sub$Voltage, type = "l", xlab = "datetime", ylab = "Voltage")
#A second new plot
plot(DataTimes, HHPwConsump_sub$Global_reactive_power, type = "l", xlab = "datetime", ylab = "Global_reactive_power")
dev.off() |
\name{is.basis}
\alias{is.basis}
\title{
Confirm Object is Class "Basisfd"
}
\description{
Check that an argument is a basis object.
}
\usage{
is.basis(basisobj)
}
\arguments{
\item{basisobj}{
an object to be checked.
}
}
\value{
a logical value:
\code{TRUE} if the class is correct, \code{FALSE} otherwise.
}
\seealso{
\code{\link{is.fd}},
\code{\link{is.fdPar}},
\code{\link{is.Lfd}}
}
% docclass is function
\keyword{smooth}
| /man/is.basis.Rd | no_license | cran/fda | R | false | false | 432 | rd | \name{is.basis}
\alias{is.basis}
\title{
Confirm Object is Class "Basisfd"
}
\description{
Check that an argument is a basis object.
}
\usage{
is.basis(basisobj)
}
\arguments{
\item{basisobj}{
an object to be checked.
}
}
\value{
a logical value:
\code{TRUE} if the class is correct, \code{FALSE} otherwise.
}
\seealso{
\code{\link{is.fd}},
\code{\link{is.fdPar}},
\code{\link{is.Lfd}}
}
% docclass is function
\keyword{smooth}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.