content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
#Run simulations
all_best_sched <- all_assn_sched <- all_obs_dlt <- all_ind_assn <- NULL
rmse <- NULL
Nsim <- 1000
for (s in 1:Nsim)
{
tryCatch(
{
if (s%%(Nsim/20)==0) cat("On simulation ",s,"...\n" ,sep="")
set.seed(121667+s)
#Vector of DLT probability thresholds for all participants
U <- runif(Npart, 0, 1)
source("onesim.R")
if (stop_trial)
cat("Simulation", s, "stopped - all schedules too toxic!\n")
all_best_sched[s] <- best
all_assn_sched <- rbind(all_assn_sched, assn_tbl)
all_obs_dlt <- rbind(all_obs_dlt, obs_dlt)
all_ind_assn <- rbind(all_ind_assn, assn)
rmse <- c(rmse, sqrt(mean(p3_post-ptox[,3])^2))
},
error=function(e){cat("ERROR :",conditionMessage(e), "\n")})
}
best_sched <- (table(c(1:nsched, all_best_sched))-1)/Nsim
assn_sched <- apply(all_assn_sched, 2, mean)/Npart
ind_assn <- apply(all_ind_assn, 2, mean)
simdir <- "/Users/tombraun/Desktop/Sim Results/"
if (!dir.exists(simdir)) dir.create(simdir)
save.image(paste(simdir, "scen", scen_num, iat_mean, "d.RData", sep=""))
| /CRM-SUP/one_scen.R | no_license | tombraun1216/CRM-with-Step-Up-Dosing | R | false | false | 1,092 | r | #Run simulations
all_best_sched <- all_assn_sched <- all_obs_dlt <- all_ind_assn <- NULL
rmse <- NULL
Nsim <- 1000
for (s in 1:Nsim)
{
tryCatch(
{
if (s%%(Nsim/20)==0) cat("On simulation ",s,"...\n" ,sep="")
set.seed(121667+s)
#Vector of DLT probability thresholds for all participants
U <- runif(Npart, 0, 1)
source("onesim.R")
if (stop_trial)
cat("Simulation", s, "stopped - all schedules too toxic!\n")
all_best_sched[s] <- best
all_assn_sched <- rbind(all_assn_sched, assn_tbl)
all_obs_dlt <- rbind(all_obs_dlt, obs_dlt)
all_ind_assn <- rbind(all_ind_assn, assn)
rmse <- c(rmse, sqrt(mean(p3_post-ptox[,3])^2))
},
error=function(e){cat("ERROR :",conditionMessage(e), "\n")})
}
best_sched <- (table(c(1:nsched, all_best_sched))-1)/Nsim
assn_sched <- apply(all_assn_sched, 2, mean)/Npart
ind_assn <- apply(all_ind_assn, 2, mean)
simdir <- "/Users/tombraun/Desktop/Sim Results/"
if (!dir.exists(simdir)) dir.create(simdir)
save.image(paste(simdir, "scen", scen_num, iat_mean, "d.RData", sep=""))
|
setwd("C:/Users/ldiemoz/Documents/le")
setwd("/mnt/projects/FusionData/0.CCB/myUpstream/lifeTables/Contra Costa")
library("readxl")
library("dplyr")
library("readr")
deaths <- read_xlsx("test.xlsx") %>%
mutate(year=2018)
nxCity <- read_csv('ContraCosta_pop.csv') %>%
mutate(sex = ifelse(sex == "Total", "T",
ifelse(sex == "Male", "M",
ifelse(sex == "Female","F",NA)))) %>%
filter(year==2018) %>%
mutate(Nx=Nx*5)
mxContraCosta <- full_join(nxCity, deaths, by = c("year", "agell", "ageul", "GEOID", "sex", "raceCode"))
source("calculate_le_function.R")
# agell note: must be numeric
le.richmond <- calculateLE( mxContraCosta %>% arrange(agell) %>% filter(GEOID == "Richmond") )
le.san_pablo <- calculateLE( mxContraCosta %>% arrange(agell) %>% filter(GEOID == "San Pablo") )
write_csv(bind_rows(le.richmond,le.san_pablo),"ccle.csv")
| /myUpstream/lifeTables/Contra Costa/contra_costa_le_setup.R | no_license | mcSamuelDataSci/CACommunityBurden | R | false | false | 948 | r | setwd("C:/Users/ldiemoz/Documents/le")
setwd("/mnt/projects/FusionData/0.CCB/myUpstream/lifeTables/Contra Costa")
library("readxl")
library("dplyr")
library("readr")
deaths <- read_xlsx("test.xlsx") %>%
mutate(year=2018)
nxCity <- read_csv('ContraCosta_pop.csv') %>%
mutate(sex = ifelse(sex == "Total", "T",
ifelse(sex == "Male", "M",
ifelse(sex == "Female","F",NA)))) %>%
filter(year==2018) %>%
mutate(Nx=Nx*5)
mxContraCosta <- full_join(nxCity, deaths, by = c("year", "agell", "ageul", "GEOID", "sex", "raceCode"))
source("calculate_le_function.R")
# agell note: must be numeric
le.richmond <- calculateLE( mxContraCosta %>% arrange(agell) %>% filter(GEOID == "Richmond") )
le.san_pablo <- calculateLE( mxContraCosta %>% arrange(agell) %>% filter(GEOID == "San Pablo") )
write_csv(bind_rows(le.richmond,le.san_pablo),"ccle.csv")
|
#'Stripplot of observed and imputed data
#'
#'Plotting methods for imputed data using \pkg{lattice}.
#'\code{stripplot} produces one-dimensional
#'scatterplots. The function
#'automatically separates the observed and imputed data. The
#'functions extend the usual features of \pkg{lattice}.
#'
#'The argument \code{na.groups} may be used to specify (combinations of)
#'missingness in any of the variables. The argument \code{groups} can be used
#'to specify groups based on the variable values themselves. Only one of both
#'may be active at the same time. When both are specified, \code{na.groups}
#'takes precedence over \code{groups}.
#'
#'Use the \code{subset} and \code{na.groups} together to plots parts of the
#'data. For example, select the first imputed data set by by
#'\code{subset=.imp==1}.
#'
#'Graphical paramaters like \code{col}, \code{pch} and \code{cex} can be
#'specified in the arguments list to alter the plotting symbols. If
#'\code{length(col)==2}, the color specification to define the observed and
#'missing groups. \code{col[1]} is the color of the 'observed' data,
#'\code{col[2]} is the color of the missing or imputed data. A convenient color
#'choice is \code{col=mdc(1:2)}, a transparent blue color for the observed
#'data, and a transparent red color for the imputed data. A good choice is
#'\code{col=mdc(1:2), pch=20, cex=1.5}. These choices can be set for the
#'duration of the session by running \code{mice.theme()}.
#'
#'@aliases stripplot
#'@param x A \code{mids} object, typically created by \code{mice()} or
#'\code{mice.mids()}.
#'@param data Formula that selects the data to be plotted. This argument
#'follows the \pkg{lattice} rules for \emph{formulas}, describing the primary
#'variables (used for the per-panel display) and the optional conditioning
#'variables (which define the subsets plotted in different panels) to be used
#'in the plot.
#'
#'The formula is evaluated on the complete data set in the \code{long} form.
#'Legal variable names for the formula include \code{names(x$data)} plus the
#'two administrative factors \code{.imp} and \code{.id}.
#'
#'\bold{Extended formula interface:} The primary variable terms (both the LHS
#'\code{y} and RHS \code{x}) may consist of multiple terms separated by a
#'\sQuote{+} sign, e.g., \code{y1 + y2 ~ x | a * b}. This formula would be
#'taken to mean that the user wants to plot both \code{y1 ~ x | a * b} and
#'\code{y2 ~ x | a * b}, but with the \code{y1 ~ x} and \code{y2 ~ x} in
#'\emph{separate panels}. This behavior differs from standard \pkg{lattice}.
#'\emph{Only combine terms of the same type}, i.e. only factors or only
#'numerical variables. Mixing numerical and categorical data occasionally
#'produces odds labeling of vertical axis.
#'
#'For convience, in \code{stripplot()} and \code{bwplot} the formula
#'\code{y~.imp} may be abbreviated as \code{y}. This applies only to a single
#'\code{y}, and does not (yet) work for \code{y1+y2~.imp}.
#'
#'@param na.groups An expression evaluating to a logical vector indicating
#'which two groups are distinguished (e.g. using different colors) in the
#'display. The environment in which this expression is evaluated in the
#'response indicator \code{is.na(x$data)}.
#'
#'The default \code{na.group = NULL} constrasts the observed and missing data
#'in the LHS \code{y} variable of the display, i.e. groups created by
#'\code{is.na(y)}. The expression \code{y} creates the groups according to
#'\code{is.na(y)}. The expression \code{y1 & y2} creates groups by
#'\code{is.na(y1) & is.na(y2)}, and \code{y1 | y2} creates groups as
#'\code{is.na(y1) | is.na(y2)}, and so on.
#'@param groups This is the usual \code{groups} arguments in \pkg{lattice}. It
#'differs from \code{na.groups} because it evaluates in the completed data
#'\code{data.frame(complete(x, "long", inc=TRUE))} (as usual), whereas
#'\code{na.groups} evaluates in the response indicator. See
#'\code{\link{xyplot}} for more details. When both \code{na.groups} and
#'\code{groups} are specified, \code{na.groups} takes precedence, and
#'\code{groups} is ignored.
#'@param theme A named list containing the graphical parameters. The default
#'function \code{mice.theme} produces a short list of default colors, line
#'width, and so on. The extensive list may be obtained from
#'\code{trellis.par.get()}. Global graphical parameters like \code{col} or
#'\code{cex} in high-level calls are still honored, so first experiment with
#'the global parameters. Many setting consists of a pair. For example,
#'\code{mice.theme} defines two symbol colors. The first is for the observed
#'data, the second for the imputed data. The theme settings only exist during
#'the call, and do not affect the trellis graphical parameters.
#'@param jitter.data See \code{\link[lattice:panel.xyplot]{panel.xyplot}}.
#'@param horizontal See \code{\link[lattice:xyplot]{xyplot}}.
#'@param as.table See \code{\link[lattice:xyplot]{xyplot}}.
#'@param panel See \code{\link{xyplot}}.
#'@param default.prepanel See \code{\link[lattice:xyplot]{xyplot}}.
#'@param outer See \code{\link[lattice:xyplot]{xyplot}}.
#'@param allow.multiple See \code{\link[lattice:xyplot]{xyplot}}.
#'@param drop.unused.levels See \code{\link[lattice:xyplot]{xyplot}}.
#'@param subscripts See \code{\link[lattice:xyplot]{xyplot}}.
#'@param subset See \code{\link[lattice:xyplot]{xyplot}}.
#'@param \dots Further arguments, usually not directly processed by the
#'high-level functions documented here, but instead passed on to other
#'functions.
#'@return The high-level functions documented here, as well as other high-level
#'Lattice functions, return an object of class \code{"trellis"}. The
#'\code{\link[lattice:update.trellis]{update}} method can be used to
#'subsequently update components of the object, and the
#'\code{\link[lattice:print.trellis]{print}} method (usually called by default)
#'will plot it on an appropriate plotting device.
#'@note The first two arguments (\code{x} and \code{data}) are reversed
#'compared to the standard Trellis syntax implemented in \pkg{lattice}. This
#'reversal was necessary in order to benefit from automatic method dispatch.
#'
#'In \pkg{mice} the argument \code{x} is always a \code{mids} object, whereas
#'in \pkg{lattice} the argument \code{x} is always a formula.
#'
#'In \pkg{mice} the argument \code{data} is always a formula object, whereas in
#'\pkg{lattice} the argument \code{data} is usually a data frame.
#'
#'All other arguments have identical interpretation.
#'
#'@author Stef van Buuren
#'@seealso \code{\link{mice}}, \code{\link{xyplot}}, \code{\link{densityplot}},
#'\code{\link{bwplot}}, \code{\link{lattice}} for an overview of the
#'package, as well as \code{\link[lattice:stripplot]{stripplot}},
#'\code{\link[lattice:panel.stripplot]{panel.stripplot}},
#'\code{\link[lattice:print.trellis]{print.trellis}},
#'\code{\link[lattice:trellis.par.set]{trellis.par.set}}
#'@references Sarkar, Deepayan (2008) \emph{Lattice: Multivariate Data
#'Visualization with R}, Springer.
#'
#'van Buuren S and Groothuis-Oudshoorn K (2011). \code{mice}: Multivariate
#'Imputation by Chained Equations in \code{R}. \emph{Journal of Statistical
#'Software}, \bold{45}(3), 1-67. \url{http://www.jstatsoft.org/v45/i03/}
#'@keywords hplot
#'@examples
#'imp <- mice(boys, maxit=1)
#'
#'### stripplot, all numerical variables
#'\dontrun{stripplot(imp)}
#'
#'### same, but with improved display
#'\dontrun{stripplot(imp, col=c("grey",mdc(2)),pch=c(1,20))}
#'
#'### distribution per imputation of height, weight and bmi
#'### labeled by their own missingness
#'\dontrun{stripplot(imp, hgt+wgt+bmi~.imp, cex=c(2,4), pch=c(1,20),jitter=FALSE,
#'layout=c(3,1))}
#'
#'### same, but labeled with the missingness of wgt (just four cases)
#'\dontrun{stripplot(imp, hgt+wgt+bmi~.imp, na=wgt, cex=c(2,4), pch=c(1,20),jitter=FALSE,
#'layout=c(3,1))}
#'
#'### distribution of age and height, labeled by missingness in height
#'### most height values are missing for those around
#'### the age of two years
#'### some additional missings occur in region WEST
#'\dontrun{stripplot(imp, age+hgt~.imp|reg, hgt, col=c(hcl(0,0,40,0.2), mdc(2)),pch=c(1,20))}
#'
#'### heavily jitted relation between two categorical variables
#'### labeled by missingness of gen
#'### aggregated over all imputed data sets
#'\dontrun{stripplot(imp, gen~phb, factor=2, cex=c(8,1), hor=TRUE)}
#'
#'### circle fun
#'stripplot(imp, gen~.imp, na = wgt, factor = 2, cex = c(8.6),
#' hor = FALSE, outer = TRUE, scales = "free", pch = c(1,19))
#'
#'@method stripplot mids
#'@export
stripplot.mids <- function(x,
data,
na.groups = NULL,
groups = NULL,
as.table = TRUE,
theme = mice.theme(),
allow.multiple = TRUE,
outer = TRUE,
drop.unused.levels = lattice::lattice.getOption("drop.unused.levels"),
panel = lattice::lattice.getOption("panel.stripplot"),
default.prepanel = lattice::lattice.getOption("prepanel.default.stripplot"),
jitter.data = TRUE,
horizontal = FALSE,
...,
subscripts = TRUE,
subset = TRUE)
{
call <- match.call()
if (!is.mids(x)) stop("Argument 'x' must be a 'mids' object")
## unpack data and response indicator
cd <- data.frame(complete(x, "long", include=TRUE))
r <- as.data.frame(is.na(x$data))
## evaluate na.group in response indicator
nagp <- eval(expr=substitute(na.groups), envir=r, enclos=parent.frame())
if (is.expression(nagp)) nagp <- eval(expr=nagp, envir=r, enclos=parent.frame())
## evaluate groups in imputed data
ngp <- eval(expr=substitute(groups), envir=cd, enclos=parent.frame())
if (is.expression(ngp)) ngp <- eval(expr=ngp, envir=cd, enclos=parent.frame())
groups <- ngp
## evaluate subset in imputed data
ss <- eval(expr=substitute(subset), envir=cd, enclos=parent.frame())
if (is.expression(ss)) ss <- eval(expr=ss, envir=cd, enclos=parent.frame())
subset <- ss
## evaluate further arguments before parsing
dots <- list(...)
args <- list(panel = panel,
default.prepanel = default.prepanel,
allow.multiple = allow.multiple,
outer = outer,
drop.unused.levels = drop.unused.levels,
subscripts = subscripts,
as.table = as.table,
jitter.data = jitter.data,
horizontal = horizontal)
## create formula if not given (in call$data !)
vnames <- names(cd)[-seq_len(2)]
allfactors <- unlist(lapply(cd,is.factor))[-seq_len(2)]
if (missing(data)) {
vnames <- vnames[!allfactors]
formula <- as.formula(paste0(paste0(vnames,collapse="+"),"~.imp"))
} else {
## pad abbreviated formula
abbrev <- ! any(grepl("~", call$data))
if (abbrev) {
if (length(call$data)>1) stop("Cannot pad extended formula.")
else formula <- as.formula(paste(call$data,"~.imp",sep=""))
} else {
formula <- data
}
}
## determine the y-variables
form <- lattice::latticeParseFormula(model=formula, data=cd, subset = subset,
groups = groups, multiple = allow.multiple,
outer = outer, subscripts = TRUE,
drop = drop.unused.levels)
ynames <- unlist(lapply(strsplit(form$left.name," \\+ "), rm.whitespace)) ## Jul2011
xnames <- unlist(lapply(strsplit(form$right.name," \\+ "), rm.whitespace)) ## Jul2011
## calculate selection vector gp
nona <- is.null(call$na.groups)
if (!is.null(call$groups) && nona) gp <- call$groups
else {
if (nona) {
na.df <- r[, ynames, drop=FALSE]
gp <- unlist(lapply(na.df, rep, x$m+1))
} else {
gp <- rep(nagp, length(ynames)*(x$m+1))
}
}
## change axis defaults of extended formula interface
if (is.null(call$xlab) && !is.na(match(".imp",xnames))) {
dots$xlab <- ""
if (length(xnames)==1) dots$xlab <- "Imputation number"
}
if (is.null(call$ylab)) {
args$ylab <- ""
if (length(ynames)==1) args$ylab <- ynames
}
if (is.null(call$scales)) {
args$scales <- list()
if (length(ynames)>1)
args$scales <- list(x=list(relation="free"), y=list(relation="free"))
}
## ready
args <- c(x=formula, data=list(cd),
groups=list(gp),
args, dots, subset=call$subset)
## go
tp <- do.call("stripplot", args)
tp <- update(tp, par.settings = theme)
return(tp)
}
| /R/stripplot.r | no_license | moreno-betancur/mice | R | false | false | 12,987 | r | #'Stripplot of observed and imputed data
#'
#'Plotting methods for imputed data using \pkg{lattice}.
#'\code{stripplot} produces one-dimensional
#'scatterplots. The function
#'automatically separates the observed and imputed data. The
#'functions extend the usual features of \pkg{lattice}.
#'
#'The argument \code{na.groups} may be used to specify (combinations of)
#'missingness in any of the variables. The argument \code{groups} can be used
#'to specify groups based on the variable values themselves. Only one of both
#'may be active at the same time. When both are specified, \code{na.groups}
#'takes precedence over \code{groups}.
#'
#'Use the \code{subset} and \code{na.groups} together to plots parts of the
#'data. For example, select the first imputed data set by by
#'\code{subset=.imp==1}.
#'
#'Graphical paramaters like \code{col}, \code{pch} and \code{cex} can be
#'specified in the arguments list to alter the plotting symbols. If
#'\code{length(col)==2}, the color specification to define the observed and
#'missing groups. \code{col[1]} is the color of the 'observed' data,
#'\code{col[2]} is the color of the missing or imputed data. A convenient color
#'choice is \code{col=mdc(1:2)}, a transparent blue color for the observed
#'data, and a transparent red color for the imputed data. A good choice is
#'\code{col=mdc(1:2), pch=20, cex=1.5}. These choices can be set for the
#'duration of the session by running \code{mice.theme()}.
#'
#'@aliases stripplot
#'@param x A \code{mids} object, typically created by \code{mice()} or
#'\code{mice.mids()}.
#'@param data Formula that selects the data to be plotted. This argument
#'follows the \pkg{lattice} rules for \emph{formulas}, describing the primary
#'variables (used for the per-panel display) and the optional conditioning
#'variables (which define the subsets plotted in different panels) to be used
#'in the plot.
#'
#'The formula is evaluated on the complete data set in the \code{long} form.
#'Legal variable names for the formula include \code{names(x$data)} plus the
#'two administrative factors \code{.imp} and \code{.id}.
#'
#'\bold{Extended formula interface:} The primary variable terms (both the LHS
#'\code{y} and RHS \code{x}) may consist of multiple terms separated by a
#'\sQuote{+} sign, e.g., \code{y1 + y2 ~ x | a * b}. This formula would be
#'taken to mean that the user wants to plot both \code{y1 ~ x | a * b} and
#'\code{y2 ~ x | a * b}, but with the \code{y1 ~ x} and \code{y2 ~ x} in
#'\emph{separate panels}. This behavior differs from standard \pkg{lattice}.
#'\emph{Only combine terms of the same type}, i.e. only factors or only
#'numerical variables. Mixing numerical and categorical data occasionally
#'produces odds labeling of vertical axis.
#'
#'For convience, in \code{stripplot()} and \code{bwplot} the formula
#'\code{y~.imp} may be abbreviated as \code{y}. This applies only to a single
#'\code{y}, and does not (yet) work for \code{y1+y2~.imp}.
#'
#'@param na.groups An expression evaluating to a logical vector indicating
#'which two groups are distinguished (e.g. using different colors) in the
#'display. The environment in which this expression is evaluated in the
#'response indicator \code{is.na(x$data)}.
#'
#'The default \code{na.group = NULL} constrasts the observed and missing data
#'in the LHS \code{y} variable of the display, i.e. groups created by
#'\code{is.na(y)}. The expression \code{y} creates the groups according to
#'\code{is.na(y)}. The expression \code{y1 & y2} creates groups by
#'\code{is.na(y1) & is.na(y2)}, and \code{y1 | y2} creates groups as
#'\code{is.na(y1) | is.na(y2)}, and so on.
#'@param groups This is the usual \code{groups} arguments in \pkg{lattice}. It
#'differs from \code{na.groups} because it evaluates in the completed data
#'\code{data.frame(complete(x, "long", inc=TRUE))} (as usual), whereas
#'\code{na.groups} evaluates in the response indicator. See
#'\code{\link{xyplot}} for more details. When both \code{na.groups} and
#'\code{groups} are specified, \code{na.groups} takes precedence, and
#'\code{groups} is ignored.
#'@param theme A named list containing the graphical parameters. The default
#'function \code{mice.theme} produces a short list of default colors, line
#'width, and so on. The extensive list may be obtained from
#'\code{trellis.par.get()}. Global graphical parameters like \code{col} or
#'\code{cex} in high-level calls are still honored, so first experiment with
#'the global parameters. Many setting consists of a pair. For example,
#'\code{mice.theme} defines two symbol colors. The first is for the observed
#'data, the second for the imputed data. The theme settings only exist during
#'the call, and do not affect the trellis graphical parameters.
#'@param jitter.data See \code{\link[lattice:panel.xyplot]{panel.xyplot}}.
#'@param horizontal See \code{\link[lattice:xyplot]{xyplot}}.
#'@param as.table See \code{\link[lattice:xyplot]{xyplot}}.
#'@param panel See \code{\link{xyplot}}.
#'@param default.prepanel See \code{\link[lattice:xyplot]{xyplot}}.
#'@param outer See \code{\link[lattice:xyplot]{xyplot}}.
#'@param allow.multiple See \code{\link[lattice:xyplot]{xyplot}}.
#'@param drop.unused.levels See \code{\link[lattice:xyplot]{xyplot}}.
#'@param subscripts See \code{\link[lattice:xyplot]{xyplot}}.
#'@param subset See \code{\link[lattice:xyplot]{xyplot}}.
#'@param \dots Further arguments, usually not directly processed by the
#'high-level functions documented here, but instead passed on to other
#'functions.
#'@return The high-level functions documented here, as well as other high-level
#'Lattice functions, return an object of class \code{"trellis"}. The
#'\code{\link[lattice:update.trellis]{update}} method can be used to
#'subsequently update components of the object, and the
#'\code{\link[lattice:print.trellis]{print}} method (usually called by default)
#'will plot it on an appropriate plotting device.
#'@note The first two arguments (\code{x} and \code{data}) are reversed
#'compared to the standard Trellis syntax implemented in \pkg{lattice}. This
#'reversal was necessary in order to benefit from automatic method dispatch.
#'
#'In \pkg{mice} the argument \code{x} is always a \code{mids} object, whereas
#'in \pkg{lattice} the argument \code{x} is always a formula.
#'
#'In \pkg{mice} the argument \code{data} is always a formula object, whereas in
#'\pkg{lattice} the argument \code{data} is usually a data frame.
#'
#'All other arguments have identical interpretation.
#'
#'@author Stef van Buuren
#'@seealso \code{\link{mice}}, \code{\link{xyplot}}, \code{\link{densityplot}},
#'\code{\link{bwplot}}, \code{\link{lattice}} for an overview of the
#'package, as well as \code{\link[lattice:stripplot]{stripplot}},
#'\code{\link[lattice:panel.stripplot]{panel.stripplot}},
#'\code{\link[lattice:print.trellis]{print.trellis}},
#'\code{\link[lattice:trellis.par.set]{trellis.par.set}}
#'@references Sarkar, Deepayan (2008) \emph{Lattice: Multivariate Data
#'Visualization with R}, Springer.
#'
#'van Buuren S and Groothuis-Oudshoorn K (2011). \code{mice}: Multivariate
#'Imputation by Chained Equations in \code{R}. \emph{Journal of Statistical
#'Software}, \bold{45}(3), 1-67. \url{http://www.jstatsoft.org/v45/i03/}
#'@keywords hplot
#'@examples
#'imp <- mice(boys, maxit=1)
#'
#'### stripplot, all numerical variables
#'\dontrun{stripplot(imp)}
#'
#'### same, but with improved display
#'\dontrun{stripplot(imp, col=c("grey",mdc(2)),pch=c(1,20))}
#'
#'### distribution per imputation of height, weight and bmi
#'### labeled by their own missingness
#'\dontrun{stripplot(imp, hgt+wgt+bmi~.imp, cex=c(2,4), pch=c(1,20),jitter=FALSE,
#'layout=c(3,1))}
#'
#'### same, but labeled with the missingness of wgt (just four cases)
#'\dontrun{stripplot(imp, hgt+wgt+bmi~.imp, na=wgt, cex=c(2,4), pch=c(1,20),jitter=FALSE,
#'layout=c(3,1))}
#'
#'### distribution of age and height, labeled by missingness in height
#'### most height values are missing for those around
#'### the age of two years
#'### some additional missings occur in region WEST
#'\dontrun{stripplot(imp, age+hgt~.imp|reg, hgt, col=c(hcl(0,0,40,0.2), mdc(2)),pch=c(1,20))}
#'
#'### heavily jitted relation between two categorical variables
#'### labeled by missingness of gen
#'### aggregated over all imputed data sets
#'\dontrun{stripplot(imp, gen~phb, factor=2, cex=c(8,1), hor=TRUE)}
#'
#'### circle fun
#'stripplot(imp, gen~.imp, na = wgt, factor = 2, cex = c(8.6),
#' hor = FALSE, outer = TRUE, scales = "free", pch = c(1,19))
#'
#'@method stripplot mids
#'@export
stripplot.mids <- function(x,
data,
na.groups = NULL,
groups = NULL,
as.table = TRUE,
theme = mice.theme(),
allow.multiple = TRUE,
outer = TRUE,
drop.unused.levels = lattice::lattice.getOption("drop.unused.levels"),
panel = lattice::lattice.getOption("panel.stripplot"),
default.prepanel = lattice::lattice.getOption("prepanel.default.stripplot"),
jitter.data = TRUE,
horizontal = FALSE,
...,
subscripts = TRUE,
subset = TRUE)
{
call <- match.call()
if (!is.mids(x)) stop("Argument 'x' must be a 'mids' object")
## unpack data and response indicator
cd <- data.frame(complete(x, "long", include=TRUE))
r <- as.data.frame(is.na(x$data))
## evaluate na.group in response indicator
nagp <- eval(expr=substitute(na.groups), envir=r, enclos=parent.frame())
if (is.expression(nagp)) nagp <- eval(expr=nagp, envir=r, enclos=parent.frame())
## evaluate groups in imputed data
ngp <- eval(expr=substitute(groups), envir=cd, enclos=parent.frame())
if (is.expression(ngp)) ngp <- eval(expr=ngp, envir=cd, enclos=parent.frame())
groups <- ngp
## evaluate subset in imputed data
ss <- eval(expr=substitute(subset), envir=cd, enclos=parent.frame())
if (is.expression(ss)) ss <- eval(expr=ss, envir=cd, enclos=parent.frame())
subset <- ss
## evaluate further arguments before parsing
dots <- list(...)
args <- list(panel = panel,
default.prepanel = default.prepanel,
allow.multiple = allow.multiple,
outer = outer,
drop.unused.levels = drop.unused.levels,
subscripts = subscripts,
as.table = as.table,
jitter.data = jitter.data,
horizontal = horizontal)
## create formula if not given (in call$data !)
vnames <- names(cd)[-seq_len(2)]
allfactors <- unlist(lapply(cd,is.factor))[-seq_len(2)]
if (missing(data)) {
vnames <- vnames[!allfactors]
formula <- as.formula(paste0(paste0(vnames,collapse="+"),"~.imp"))
} else {
## pad abbreviated formula
abbrev <- ! any(grepl("~", call$data))
if (abbrev) {
if (length(call$data)>1) stop("Cannot pad extended formula.")
else formula <- as.formula(paste(call$data,"~.imp",sep=""))
} else {
formula <- data
}
}
## determine the y-variables
form <- lattice::latticeParseFormula(model=formula, data=cd, subset = subset,
groups = groups, multiple = allow.multiple,
outer = outer, subscripts = TRUE,
drop = drop.unused.levels)
ynames <- unlist(lapply(strsplit(form$left.name," \\+ "), rm.whitespace)) ## Jul2011
xnames <- unlist(lapply(strsplit(form$right.name," \\+ "), rm.whitespace)) ## Jul2011
## calculate selection vector gp
nona <- is.null(call$na.groups)
if (!is.null(call$groups) && nona) gp <- call$groups
else {
if (nona) {
na.df <- r[, ynames, drop=FALSE]
gp <- unlist(lapply(na.df, rep, x$m+1))
} else {
gp <- rep(nagp, length(ynames)*(x$m+1))
}
}
## change axis defaults of extended formula interface
if (is.null(call$xlab) && !is.na(match(".imp",xnames))) {
dots$xlab <- ""
if (length(xnames)==1) dots$xlab <- "Imputation number"
}
if (is.null(call$ylab)) {
args$ylab <- ""
if (length(ynames)==1) args$ylab <- ynames
}
if (is.null(call$scales)) {
args$scales <- list()
if (length(ynames)>1)
args$scales <- list(x=list(relation="free"), y=list(relation="free"))
}
## ready
args <- c(x=formula, data=list(cd),
groups=list(gp),
args, dots, subset=call$subset)
## go
tp <- do.call("stripplot", args)
tp <- update(tp, par.settings = theme)
return(tp)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/user-agent-style-sheets.R
\docType{data}
\name{html4_user_agent_css}
\alias{html4_user_agent_css}
\title{HTML4 user-agent css}
\format{
An object of class \code{list} of length 120.
}
\usage{
html4_user_agent_css
}
\description{
HTML4 user-agent css
}
\keyword{datasets}
| /man/html4_user_agent_css.Rd | permissive | coolbutuseless/cssparser | R | false | true | 349 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/user-agent-style-sheets.R
\docType{data}
\name{html4_user_agent_css}
\alias{html4_user_agent_css}
\title{HTML4 user-agent css}
\format{
An object of class \code{list} of length 120.
}
\usage{
html4_user_agent_css
}
\description{
HTML4 user-agent css
}
\keyword{datasets}
|
feature_wise <- read.csv("src/main/resources/evaluation/programs/java/pngtasticColorCounter/r/feature_wise.csv")
model <- lm(time~FREQTHRESHOLD+DISTTHRESHOLD+TIMEOUT+LOGLEVEL+MINALPHA, data = feature_wise)
coef(model)
| /src/main/resources/evaluation/programs/java/pngtasticColorCounter/r/feature_wise.R | permissive | miguelvelezmj25/ConfigCrusher | R | false | false | 218 | r | feature_wise <- read.csv("src/main/resources/evaluation/programs/java/pngtasticColorCounter/r/feature_wise.csv")
model <- lm(time~FREQTHRESHOLD+DISTTHRESHOLD+TIMEOUT+LOGLEVEL+MINALPHA, data = feature_wise)
coef(model)
|
library(shiny)
library(plotly)
library(ggplot2)
data(mtcars)
factor_vars <- c("cyl", "vs", "am", "gear", "carb")
for (var in factor_vars) {
mtcars[, var] <- as.factor(mtcars[[var]])
}
server <- shinyServer(function(input, output) {
output$vanilla_ggplot <- renderPlot({
ggplot(mtcars) +
aes_string(input$x, input$y, fill = input$color) +
geom_boxplot()
})
output$interactive_ggplotly <- renderPlotly({
plot <- ggplot(mtcars, aes_string(input$x, input$y, fill = input$color)) +
geom_boxplot()
ggplotly(plot) %>%
layout(boxmode = "group")
})
})
| /plotly-demo/server.R | permissive | thomas-neitmann/shiny-demo-apps | R | false | false | 607 | r | library(shiny)
library(plotly)
library(ggplot2)
data(mtcars)
factor_vars <- c("cyl", "vs", "am", "gear", "carb")
for (var in factor_vars) {
mtcars[, var] <- as.factor(mtcars[[var]])
}
server <- shinyServer(function(input, output) {
output$vanilla_ggplot <- renderPlot({
ggplot(mtcars) +
aes_string(input$x, input$y, fill = input$color) +
geom_boxplot()
})
output$interactive_ggplotly <- renderPlotly({
plot <- ggplot(mtcars, aes_string(input$x, input$y, fill = input$color)) +
geom_boxplot()
ggplotly(plot) %>%
layout(boxmode = "group")
})
})
|
#### BUTTONS
########################################## BUttons
### first button simply returns the name of the button pushed
###### and the clicks and picks
NEXT<-function(nh, g)
{
##### BUTTONDOC:NEXT:'Next BATCH of FILES'
if(g$zenclick>1)
{
rd = getrdpix(g$zloc, g$zenclick, g$sel, nh)
}
else
{
rd=list(PUSHED="NEXT")
}
g$action = "break"
g$rd = rd
g$zloc = list(x=NULL, y=NULL)
invisible(list(global.vars=g) )
}
PREV<-function(nh, g)
{
##### BUTTONDOC:PREV:'Previous BATCH of FILES'
if(g$zenclick>1)
{
rd = getrdpix(g$zloc, g$zenclick, g$sel, nh)
}
else
{
rd=list(PUSHED="PREV")
}
g$action = "break"
g$rd = rd
g$zloc = list(x=NULL, y=NULL)
invisible(list(global.vars=g))
}
HALF<-function(nh, g)
{
##### BUTTONDOC:HALF:'Shift Half a window'
if(g$zenclick>1)
{
rd = getrdpix(g$zloc, g$zenclick, g$sel, nh)
}
else
{
rd=list(PUSHED="HALF")
}
g$action = "break"
g$zloc = list(x=NULL, y=NULL)
g$rd = rd
invisible(list(global.vars=g))
}
CENTER<-function(nh, g)
{
##### BUTTONDOC:CENTER:'Center a window'
if (g$zenclick > 1) {
rd = getrdpix(g$zloc, g$zenclick, g$sel, nh)
}
else {
rd = list(PUSHED = "CENTER")
}
g$action = "break"
g$rd = rd
g$zloc = list(x = NULL, y = NULL)
invisible(list(global.vars = g))
}
MARK<-function(nh, g)
{
##### BUTTONDOC:MARK:'Mark a trace'
if(g$zenclick>1)
{
rd = getrdpix(g$zloc, g$zenclick, g$sel, nh)
}
else
{
rd=list(PUSHED="MARK")
}
g$action = "break"
g$rd = rd
g$zloc = list(x=NULL, y=NULL)
invisible(list(global.vars=g))
}
##########################################
DOC<-function(nh, g)
{
##### BUTTONDOC:DOC:'Show documentation'
PICK.DOC(g$BLABS)
g$zloc = list(x=NULL, y=NULL)
g$action = "replot"
invisible(list(global.vars=g))
}
##########################################
REFRESH<-function(nh, g)
{
##### BUTTONDOC:REFRESH:'Refresh screen'
u = par("usr")
L = length(g$sloc$x)
if(L>1)
{
abline(v=g$sloc$x[c(L-1,L)], col=gray(0.8), lty=2)
}
g$sloc = list(x=c(u[1],u[2]), y=c(u[3],u[4]))
g$zloc = list(x=NULL, y=NULL)
g$action = "replot"
invisible(list(global.vars=g))
}
#######
RESTORE<-function(nh, g)
{
##### BUTTONDOC:RESTORE:'Restore from zoom'
u = par("usr")
L = length(g$sloc$x)
##### this line is wrong; it does nothing
if(L>1)
{
abline(v=g$sloc$x[c(L-1,L)], col=gray(0.8), lty=2)
}
g$sloc = list(x=c(u[1],u[2]), y=c(u[3],u[4]))
g$zloc = list(x=NULL, y=NULL)
g$WIN = NULL
g$action = "replot"
invisible(list(global.vars=g))
}
#######
ZOOM.out<-function(nh, g)
{
##### BUTTONDOC:ZOOM.out:'Zoom out'
u = par("usr")
DX = (u[2]-u[1])*0.3
zloc = list(x= c(u[1]-DX, u[2]+DX))
g$WIN = zloc$x
g$zloc = list(x=NULL, y=NULL)
g$action = "replot"
invisible(list(global.vars=g))
}
ZOOM.in<-function(nh, g)
{
##### BUTTONDOC:ZOOM.in:'Zoom in'
zenclick = length(g$zloc$x)
if(zenclick>=3)
{
n1=g$zenclick-2
pwin = sort(g$zloc$x[c(n1,n1+1)])
g$WIN = pwin
}
else
{
u = par("usr")
DX = (u[2]-u[1])*0.3
zloc = list(x= c(u[1]+DX, u[2]-DX))
g$WIN = zloc$x
}
g$zloc = list(x=NULL, y=NULL)
g$action = "replot"
invisible(list(global.vars=g))
}
#######
LEFT<-function(nh, g)
{
##### BUTTONDOC:LEFT:'Shift Left'
u = par("usr")
DX = (u[2]-u[1])*0.3
#### zloc = list(x= c(u[1]+DX, u[2]+DX))
g$WIN =c(u[1]-DX, u[2]-DX)
g$zloc = list(x=NULL, y=NULL)
g$action = "replot"
invisible(list(global.vars=g))
}
RIGHT<-function(nh, g)
{
##### BUTTONDOC:RIGHT:'Shift Right'
u = par("usr")
DX = (u[2]-u[1])*0.3
#### zloc = list(x= c(u[1]+DX, u[2]+DX))
g$WIN =c(u[1]+DX, u[2]+DX)
g$zloc = list(x=NULL, y=NULL)
g$action = "replot"
invisible(list(global.vars=g))
}
#######
SCALE<-function(nh, g)
{
##### BUTTONDOC:SCALE:'Toggle Scale by trace/window'
if(g$ScaleFACT==1)
{
g$ScaleFACT=2
}
else
{
g$ScaleFACT=1
}
g$action = "replot"
g$zloc = list(x=NULL, y=NULL)
invisible(list(global.vars=g))
}
########################################
Xwin<-function(nh, g)
{
##### BUTTONDOC:Xwin:'Delete all windows except main'
ALLdevs = dev.list()
ww = ALLdevs[ which(g$MAINdev != ALLdevs)]
for(i in 1:length(ww))
{
dev.off(which = ww[i])
}
dev.set(g$MAINdev)
g$zloc = list(x=NULL, y=NULL)
g$action="donothing"
invisible(list(global.vars=g))
}
########################################
PSEL<-function(nh, g)
{
##### BUTTONDOC:PSEL:'Pick trace Sta/COMP to show'
sel = SELSTA(nh, sel=g$sel, newdev=TRUE, STAY=FALSE)
NSEL = length(nh$dt[g$sel])
g$du = 1/NSEL
isel = sel[1]
Torigin = list(jd=nh$info$jd[isel], hr=nh$info$hr[isel],
mi=nh$info$mi[isel],
sec=(nh$info$sec[isel]+nh$info$msec[isel]/1000+nh$info$t1[isel]-nh$info$off[isel]))
g$Torigin=Torigin
g$sel = sel
g$STNS = nh$STNS[sel]
g$COMPS = nh$COMPS[sel]
g$zloc = list(x=NULL, y=NULL)
g$action = "replot"
invisible(list(global.vars=g))
}
#######################################
#### this needs work
FLIP<-function(nh, g)
{
##### BUTTONDOC:FLIP:'Flip selected trace'
zenclick = length(g$zloc$x)
if(zenclick>1)
{
nc = 1:(zenclick-1)
lnc = length(nc)
ypick = length(g$sel)-floor(length(g$sel)*g$zloc$y[nc])
ipick = unique( g$sel[ypick] )
cat("FLIP: pwig POLARITY REVERSED: "); cat(ipick, sep=" " ); cat("\n")
for(JJ in 1:length(ipick) )
{
jtr = ipick[JJ]
nh$JSTR[[jtr]] = (-1)*nh$JSTR[[jtr]]
}
}
else
{
cat("FLIP: No traces selected: Try Again"); cat("\n")
}
g$zloc = list(x=NULL, y=NULL)
g$action = "replace"
invisible(list(NH=nh, global.vars=g))
}
########################
PTS<-function(nh, g)
{
##### BUTTONDOC:PTS:'Show sample points'
g$pts=!g$pts
g$action = "replot"
g$zloc = list(x=NULL, y=NULL)
invisible(list(global.vars=g))
}
FILT<-function(nh, g)
{
##### BUTTONDOC:FILT:'Filter trace'
### print( data.frame(g$filters) )
Fdef = choosfilt(thefilts=g$filters, ncol=5)
if(!is.null(Fdef))
{
if(Fdef$type=="None")
{
dev.set( g$MAINdev)
g$SUBTIT = NA
g$action = "revert"
KF = nh
return(list(global.vars=g))
}
else
{
### g$SUBTIT = paste(Fdef$type,Fdef$fl, Fdef$fh, sep=" ")
g$SUBTIT = filterstamp(Fdef$fl, Fdef$fh, Fdef$type)
g$action = "replace"
KF = FILT.SEISN(nh, sel = g$sel, FILT=Fdef)
}
### X11()
}
else
{
g$action = "replot"
KF = nh
}
g$zloc = list(x=NULL, y=NULL)
dev.set( g$MAINdev)
invisible(list(NH=KF, global.vars=g))
}
UNFILT<-function(nh, g)
{
##### BUTTONDOC:UNFILT:'Unfilter traces'
dev.set( g$MAINdev)
g$SUBTIT = NA
g$action = "revert"
g$zloc = list(x=NULL, y=NULL)
invisible(list(global.vars=g))
}
#########################
fspread<-function(nh, g)
{
##### BUTTONDOC:fspread:'do a filter spread on selection'
### click on a trace panel and do a filter spread
zenclick = length(g$zloc$x)
if(zenclick>=3)
{
ypick = length(g$sel)-floor(length(g$sel)*g$zloc$y[zenclick-1])
ipick = g$sel[ypick]
print(paste(sep=' ',"fspread", ypick, nh$info$name[ ipick]))
famp = nh$JSTR[[ipick]]
pwin = sort(c(g$zloc$x[zenclick-2], g$zloc$x[zenclick-1]))
ex = seq(from=nh$info$t1[ipick], by=nh$info$dt[ipick], length.out=length(famp) )
temp = famp[ ex > pwin[1] & ex <pwin[2]]
#### Xamp = -1*temp
smallex = ex[ ex > pwin[1] & ex <pwin[2]]
asec = nh$info$sec[ipick]+nh$info$msec[ipick]/1000+nh$info$t1[ipick]-nh$info$off[ipick]+pwin[1]
spaz = recdate( nh$info$jd[ipick], nh$info$hr[ipick], nh$info$mi[ipick], asec, nh$info$yr[ipick] )
spaz$yr = as.integer(nh$info$yr[ipick])
MODAY = getmoday(spaz$jd, spaz$yr)
TP = list(yr=spaz$yr[1], jd=spaz$jd, mo=MODAY$mo,
dom= MODAY$dom ,hr=spaz$hr, mi=spaz$mi, sec=spaz$sec )
dst = dateStamp(TP)
titl = paste(nh$STNS[ipick], nh$COMPS[ipick], dst)
fh=c(1/20, 1/10, 1/5, .5, 1, 2, 3)
fl=rep(1/100, times=length(fh) )
dev.new(width=14, height=10)
jex = range(smallex)
jr = jex[2] - jex[1]
j10 = jr*0.2
jwin = c(jex[1]+j10, jex[2]-j10)
# jwin = NULL
FILT.spread(smallex, temp, nh$dt[ipick], fl = fl, fh = fh, sfact = 1, WIN = jwin, PLOT = TRUE, TIT =titl , TAPER = 0.1, POSTTAPER=NULL)
dev.set(g$MAINdev)
g$zloc = list(x=NULL, y=NULL)
g$action="donothing"
invisible(list(global.vars=g))
}
else
{
cat("XTR WARNING: no window or trace has been selected:", sep="\n")
RETX=NULL
g$zloc = list(x=NULL, y=NULL)
g$action="donothing"
invisible(list(global.vars=g))
}
}
SPEC<-function(nh, g)
{
##### BUTTONDOC:SPEC:'Display Spectrum'
nclick = length(g$zloc$x)
if(nclick>=3)
{
nc = 1:(nclick-1)
lnc = length(nc)
ypick = length(g$sel)-floor(length(g$sel)*g$zloc$y[nc])
ipick = g$sel[ypick]
### print(paste(sep=' ',ypick, NH$info$name[ ipick]))
print(ipick)
i1 = seq(from=1, to=max(nc), by=2)
i1 = i1[i1<max(nc)]
amp = list()
dees = list()
stamps = list()
speccol = vector()
ni = 0
for(ipix in i1)
{
pwin = sort(c(g$zloc$x[ipix], g$zloc$x[ipix+1]))
print(c(ipix, pwin))
kpix = ipick[ipix]
famp = nh$JSTR[[kpix]]
ex = seq(from=nh$info$t1[kpix], by=nh$info$dt[kpix], length.out=length(famp) )
temp = famp[ ex > pwin[1] & ex <pwin[2]]
if(any(is.na(temp)))
{
print(paste("getting NA in trace",kpix, nh$STNS[kpix],nh$COMPS[kpix],pwin[1], pwin[2] ))
next
}
ni = ni +1
amp[[ni]] = temp-mean(temp)
dees[ni] = nh$dt[kpix]
speccol[ni] = g$pcols[kpix]
ftime = Zdate(nh$info, kpix, pwin[1])
psta = nh$STNS[kpix]
pcomp = nh$COMPS[kpix]
STAMP = paste(sep=" ", psta, pcomp, ftime)
stamps[ni] = STAMP
}
print(stamps)
a = list(y=amp, dt=dees, stamps=stamps)
if(length(a$y)>0)
{
dev.new(width=10, height=10)
f1 = 0.1
f2 = floor(0.33*(1/nh$dt[ipick]))
### oop=par(no.readonly = TRUE)
### par(mfrow=c(length(a$y), 1) )
### for(io in 1:length(a$y)) plot(a$y[[io]], type='l')
### par(oop)
### readline("type in something")
MTM.drive(a, f1, f2[1], COL=speccol, PLOT=TRUE)
}
dev.set(g$MAINdev)
}
else
{
cat("SPEC WARNING: no window or trace has been selected:", sep="\n")
}
g$zloc = list(x=NULL, y=NULL)
g$action="donothing"
invisible(list(global.vars=g))
}
WWIN<-function(nh, g)
{
##### BUTTONDOC:WWIN:'Window'
nclick = length(g$zloc$x)
if(nclick>=3)
{
nc = 1:(nclick-1)
lnc = length(nc)
ypick = length(g$sel)-floor(length(g$sel)*g$zloc$y[nc])
ipick = g$sel[ypick]
### print(paste(sep=' ',ypick, NH$info$name[ ipick]))
print(ipick)
i1 = seq(from=1, to=max(nc), by=2)
i1 = i1[i1<max(nc)]
amp = list()
dees = list()
stamps = list()
speccol = vector()
ni = 0
for(ipix in i1)
{
pwin = sort(c(g$zloc$x[ipix], g$zloc$x[ipix+1]))
print(c(ipix, pwin))
kpix = ipick[ipix]
famp = nh$JSTR[[kpix]]
ex = seq(from=nh$info$t1[kpix], by=nh$info$dt[kpix], length.out=length(famp) )
temp = famp[ ex > pwin[1] & ex <pwin[2]]
if(any(is.na(temp)))
{
print(paste("getting NA in trace",kpix, nh$STNS[kpix],nh$COMPS[kpix],pwin[1], pwin[2] ))
next
}
ni = ni +1
amp[[ni]] = temp-mean(temp)
dees[ni] = nh$dt[kpix]
speccol[ni] = g$pcols[kpix]
ftime = Zdate(nh$info, kpix, pwin[1])
psta = nh$STNS[kpix]
pcomp = nh$COMPS[kpix]
STAMP = paste(sep=" ", psta, pcomp, ftime)
stamps[ni] = STAMP
}
dev.new(width=10, height=10)
for(i in 1:ni) {
y = amp[[i]]
len = length(amp[[i]])
print(c(i, len, dees[[i]]))
xt = seq(from=0, by=dees[[i]], length=len)
plot(xt , amp[[i]],main=stamps[[i]], type='l');
locator(1) }
dev.set(g$MAINdev)
}
g$zloc = list(x=NULL, y=NULL)
g$action="donothing"
invisible(list(global.vars=g))
}
##########################
SGRAM<-function(nh, g)
{
##### BUTTONDOC:SGRAM:'Spectrogram'
zenclick = length(g$zloc$x)
if(zenclick>=2)
{
if(zenclick==2)
{
ypick = length(g$sel)-floor(length(g$sel)*g$zloc$y[zenclick-1])
ipick = g$sel[ypick]
### print(paste(sep=' ',ypick, NH$info$name[ipick]))
pwin = g$WIN
}
else
{
ypick = length(g$sel)-floor(length(g$sel)*g$zloc$y[zenclick-1])
ipick = g$sel[ypick]
### print(paste(sep=' ',ypick, NH$info$name[ ipick]))
pwin = sort(c(g$zloc$x[zenclick-2], g$zloc$x[zenclick-1]))
}
g$LASTwin = pwin
### print(paste(sep=" ", "DOING SGRAM Nclick, ipick, pwin", Nclick, ipick, pwin))
famp = nh$JSTR[[ipick]]
ex = seq(from=nh$info$t1[ipick], by=nh$info$dt[ipick], length.out=length(famp))
temp = famp[ ex > pwin[1] & ex <pwin[2]]
Xamp = temp-mean(temp)
# ftime = Zdate(nh$info, g$sel[ypick], pwin[1])
ftime = ghstamp(nh, sel=g$sel[ypick], WIN=pwin )
print(paste(sep=" ",min(ex), max(ex)))
print(paste(sep=" ",pwin[1], pwin[2]))
print(paste(sep=" ", ipick, length(famp),length(temp),length(Xamp), nh$dt[ipick],ftime))
SPECT.drive(Xamp, DT=nh$dt[ipick], STAMP=ftime)
### plotevol(DEV, log=1, fl=0, fh=15, col=rainbow(50))
}
else
{
pwin = g$LASTwin
ypick = 1
ipick = g$sel[1]
cat("SGRAM WARNING: no window or trace has been selected:" , sep="\n")
}
dev.set(g$MAINdev)
g$zloc = list(x=NULL, y=NULL)
g$action="donothing"
invisible(list(global.vars=g))
}
WLET<-function(nh, g)
{
##### BUTTONDOC:WLET:'Wavelet Transform'
zenclick = length(g$zloc$x)
if(zenclick>=2)
{
if(zenclick==2)
{
ypick = length(g$sel)-floor(length(g$sel)*g$zloc$y[zenclick-1])
ipick = g$sel[ypick]
### print(paste(sep=' ',ypick, NH$info$name[ipick]))
pwin = g$WIN
}
else
{
ypick = length(g$sel)-floor(length(g$sel)*g$zloc$y[zenclick-1])
ipick = g$sel[ypick]
### print(paste(sep=' ',ypick, NH$info$name[ ipick]))
pwin = sort(c(g$zloc$x[zenclick-2], g$zloc$x[zenclick-1]))
}
g$LASTwin = pwin
### print(paste(sep=" ", "DOING SGRAM Nclick, ipick, pwin", Nclick, ipick, pwin))
famp = nh$JSTR[[ipick]]
ex = seq(from=nh$info$t1[ipick], by=nh$info$dt[ipick], length.out=length(famp))
temp = famp[ ex > pwin[1] & ex <pwin[2]]
Xamp = temp-mean(temp)
## ftime = Zdate(nh$info, g$sel[ypick], pwin[1])
ftime = ghstamp(nh, sel=g$sel[ypick], WIN=pwin )
wlet.drive(Xamp, nh$dt[ipick], STAMP=ftime)
### plotevol(DEV, log=1, fl=0, fh=15, col=rainbow(50))
}
else
{
pwin = g$LASTwin
ypick = 1
ipick = g$sel[1]
cat("WLET WARNING: no window or trace has been selected:" , sep="\n")
}
dev.set(g$MAINdev)
g$zloc = list(x=NULL, y=NULL)
g$action="donothing"
invisible(list(global.vars=g))
}
XTR<-function(nh, g)
{
##### BUTTONDOC:XTR:'Extract single trace'
zenclick = length(g$zloc$x)
if(zenclick>=3)
{
ypick = length(g$sel)-floor(length(g$sel)*g$zloc$y[zenclick-1])
ipick = g$sel[ypick]
print(paste(sep=' ',"EXTRACT", ypick, nh$info$name[ ipick]))
famp = nh$JSTR[[ipick]]
pwin = sort(c(g$zloc$x[zenclick-2], g$zloc$x[zenclick-1]))
ex = seq(from=nh$info$t1[ipick], by=nh$info$dt[ipick], length.out=length(famp) )
temp = famp[ ex > pwin[1] & ex <pwin[2]]
#### Xamp = -1*temp
smallex = ex[ ex > pwin[1] & ex <pwin[2]]
asec = nh$info$sec[ipick]+nh$info$msec[ipick]/1000+nh$info$t1[ipick]-nh$info$off[ipick]+pwin[1]
spaz = recdate( nh$info$jd[ipick], nh$info$hr[ipick], nh$info$mi[ipick], asec, nh$info$yr[ipick] )
spaz$yr = as.integer(nh$info$yr[ipick])
MODAY = getmoday(spaz$jd, spaz$yr)
TP = list(yr=spaz$yr, jd=spaz$jd, mo=MODAY$mo,
dom= MODAY$dom ,hr=spaz$hr, mi=spaz$mi, sec=spaz$sec )
RETX = list(but="RET", x=smallex, y=temp, dt=nh$dt[ipick], STNS=nh$STNS[ipick],
COMPS=nh$COMPS[ipick], fname=nh$info$name[ipick] , TIMEpick=TP, mark=TRUE, deltat=nh$dt[ipick] )
g$zloc = list(x=NULL, y=NULL)
g$action="exit"
invisible(list(RETX = RETX, global.vars=g))
}
else
{
cat("XTR WARNING: no window or trace has been selected:", sep="\n")
RETX=NULL
g$zloc = list(x=NULL, y=NULL)
g$action="donothing"
invisible(list(global.vars=g))
}
}
########################################
Pinfo<-function(nh, g)
{
##### BUTTONDOC:Pinfo:'Pick information'
zenclick = length(g$zloc$x)
if(zenclick>=2)
{
### NSEL = length(nh$dt[g$sel])
### du = 1/NSEL
kix = legitpix(g$sel, g$zloc, zenclick)
ypick = kix$ypick
ppick = kix$ppick
dpick = c(0, diff(ppick))
ipick = g$sel[ypick]
m = match(g$STNS[ipick],g$UNIsta)
### jj = floor(( g$zloc$y[zenclick-1])/du)
asec = nh$info$sec[ipick]+nh$info$msec[ipick]/1000+nh$info$t1[ipick]-nh$info$off[ipick]+ppick[zenclick-1]
print(paste(sep=" ", "PICK=",
nh$info$yr[ipick], nh$info$jd[ipick], nh$info$hr[ipick],
nh$info$mi[ipick], asec, "sta=", nh$STNS[ipick], "comp=", nh$COMPS[ipick] ))
print(ppick)
## pstas = paste(nh$STNS[ipick], nh$COMPS[ipick], sep=".")
rd = getrdpix(g$zloc, zenclick, g$sel, nh)
RDtmes = rd$yr+rd$jd/366+rd$hr/(366*24)+rd$mi/(366*24*60)+rd$sec/(366*24*3600)
wearliest = which.min(RDtmes)
PAS = paste(sep="_", "Jtim(", rd$jd[wearliest], ", hr=" , rd$hr[wearliest] ,
", mi=", rd$mi[wearliest], ",sec=", rd$sec[wearliest], ")")
DEEtimes = YRsecdif(
rd$jd[wearliest],rd$hr[wearliest],rd$mi[wearliest], rd$sec[wearliest],
rd$jd, rd$hr, rd$mi, rd$sec, rd$yr[wearliest], rd$yr)
apickorg = paste(sep=",", rd$yr[wearliest], rd$jd[wearliest],rd$hr[wearliest],rd$mi[wearliest], rd$sec[wearliest])
## pstas = nh$STNS[ipick]
apstas = paste(sep="", '"', paste(rd$stn, collapse='","'), '"')
## pcomps =nh$COMPS[ipick]
apcomps = paste(sep="", '"', paste(rd$comp, collapse='","'), '"')
cat("", sep="\n")
cat("", sep="\n")
cat("##################", sep="\n")
cat( paste(sep=" ", "orgtim=c(", apickorg , ")") , sep="\n")
cat("", sep="\n")
cat( paste(sep=" ", "stns=c(", apstas, ")") , sep="\n")
cat( paste(sep=" ", "comps=c(", apcomps, ")") , sep="\n")
cat( paste(sep=" ", "tims=c(", paste(DEEtimes, collapse=","), ")") , sep="\n")
cat("", sep="\n")
cat("##################", sep="\n")
cat("", sep="\n")
cat("Time Differences between picks:", sep="\n")
cat(paste(dpick), sep="\n")
cat("", sep="\n")
#### print(zloc$y[1:(zenclick-1)])
#### print(ypick)
#### print(ipick)
cat("##################", sep="\n")
cat("rd = scan(file='', what=list(jd=0,hr=0,mi=0,sec=0,yr=0,stn='',comp=''))" , sep="\n")
write.table(file="", data.frame(rd), row.names =FALSE, col.names =FALSE )
cat(" ", sep="\n")
cat("GMT TIME: ", sep="\n")
showdatetime(rd)
cat(" ", sep="\n")
PAS = paste(sep=" ", "Jtim(", rd$jd, ", hr=" , rd$hr , ", mi=", rd$mi, ",sec=", rd$sec, ")")
cat("", sep="\n")
cat(PAS, sep="\n")
if(!is.null(nh$TZ))
{
rdlocal = recdate(jd=rd$jd, hr=rd$hr+nh$TZ, mi=rd$mi, sec=rd$sec , yr=rd$yr)
cat(" ", sep="\n")
cat(paste(sep=" ", "LOCAL TIMES, SHIFT=", nh$TZ) , sep="\n")
showdatetime(rdlocal, AMPM=TRUE)
}
}
else
{
cat("Pinfo WARNING: no pick or trace has been selected:", sep="\n")
}
g$zloc = list(x=NULL, y=NULL)
g$action="donothing"
invisible(list(global.vars=g))
}
#################################
#################################
TSHIFT<-function(nh, g)
{
##### BUTTONDOC:TSHIFT:'Shift traces to line up with first pick'
zenclick = length(g$zloc$x)
if(zenclick>=2)
{
kix = legitpix(g$sel, g$zloc, zenclick)
ypick = kix$ypick
ppick = kix$ppick
dpick = c(0, diff(ppick))
ipick = g$sel[ypick]
print(paste(nh$STNS[ipick], nh$COMPS[ipick]))
tshft = rep(0,times=length(nh$STNS))
tshft[ipick] = ppick-ppick[1]
## print(data.frame(list(sta=nh$STNS, comp=nh$COMPS, tshft=tshft)))
print(data.frame(list(sta=nh$STNS[ipick] , comp=nh$COMPS[ipick] , tshft=tshft[ipick] )))
Tshift = list(name = nh$STNS[ipick], t=tshft[ipick])
cat(file = "","\n\n")
nam = "kshift"
cat(file = "", paste(sep = "", nam, "=list()") )
cat(file = "","\n")
cat(file = "", paste(sep = "", nam, "$name=c(\"", paste(format(Tshift$name), collapse = "\",\""), "\")"), fill = TRUE)
cat(file = "", paste(sep = "", nam, "$t=c(", paste(format(Tshift$t), collapse = ","), ")"), fill = TRUE)
cat(file = "","\n")
g$ASHIFT = tshft
g$BLAHSHIFT = tshft
}
else
{
g$ASHIFT = g$SHIFT.ORIG
}
g$zloc = list(x=NULL, y=NULL)
g$action = "replot"
invisible(list(NH=nh, global.vars=g))
}
#################################
#################################
#################################
RMS<-function(nh, g)
{
##### BUTTONDOC:RMS:'Root Mean Square of selection'
zenclick = length(g$zloc$x)
sel = g$sel
if(zenclick>=2)
{
kix = legitpix(g$sel, g$zloc, zenclick)
ypick = kix$ypick
ppick = kix$ppick
myinfo = list(yr=nh$info$yr, jd=nh$info$jd, hr=nh$info$hr, mi=nh$info$mi, sec=rep(0, times=length(nh$info$mi)))
if(length(ypick)>0)
{ ############ length(ypick) proceed only if have legitimate picks
ipick = sel[ypick]
npick = length(ypick)
pairseq = seq(from=1, to=npick-1, by=2)
##### Output1 = vector(length=length(pairseq))
Output2 = vector(length=length(pairseq))
for(iz in pairseq)
{ ############### loop over pairs of picks
i1 = ipick[iz]
################ this is the time in sec from the beginning of the trace
asec = nh$info$sec[i1]+nh$info$msec[i1]/1000+nh$info$t1[i1]-nh$info$off[i1]+ppick[iz]
if(npick<2)
{
bsec = asec+5
}
else
{
iz1 = ipick[iz+1]
bsec = nh$info$sec[iz1]+nh$info$msec[iz1]/1000+nh$info$t1[iz1]-nh$info$off[iz1]+ppick[iz+1]
}
rsig1 = nh$JSTR[[i1]]
t1 = seq(from=0, length=length(rsig1), by=nh$dt[i1])
which.time = which( t1>ppick[iz] & t1< ppick[iz+1] )
rwhich = range(which.time)
rsig = rsig1[ which.time ]
rsig = rsig-mean(rsig)
rms = sqrt( mean( rsig^2 ))
cat(paste(sep=" ", "#########", iz, i1, format(ppick[iz]), format(ppick[iz+1]),
format(asec) , format(bsec), length(rsig), format(rms) ), sep="\n" )
Output2[iz] = paste(sep=" ",
nh$STNS[i1],
nh$COMPS[i1] ,
myinfo$yr[i1],
myinfo$jd[i1],
myinfo$hr[i1],
myinfo$mi[i1],
format(asec),
format(bsec),
format(rms))
dur = diff(c(asec, bsec) )
if(is.null(dur)) dur = 0
#### g$WPX = pickhandler(i1=i1, ppick=ppick[iz], kzap=kzap, err=NA, ycol=ycol, NPX=g$NPX, g$WPX, nh)
#### g$NADDPIX = g$NADDPIX+1
#### g$NPX = g$NPX+1
#### Nn = names(g$WPX)
#### g$WPX =rbind(g$WPX, rep(NA, length(Nn)))
}
cat("############", sep="\n")
cat( "OUTrms =scan(file=\"\", what=list(stn=\"\", comp=\"\", yr=0, jd=0, hr=0, mi=0, t1=0, t2=0, rms=0))", sep="\n" )
for(iz in pairseq)
{
cat(Output2[iz], sep="\n")
}
cat("\n" )
cat("######", sep="\n")
}
else
{
print("not enough legitimate picks, need at least 2 or more")
}
}
else
{
print("not enough legitimate picks, need at least 2 or more")
}
g$zloc = list(x=NULL, y=NULL)
g$action = "donothing"
invisible(list(global.vars=g))
}
###############################
LocStyle<-function(nh, g)
{
##### BUTTONDOC:LocStyle:'choose the locator style for picking in swig'
### choose the locator style for picking in swig
g$ilocstyle = -1
inum= c(-1, 0, 1, 2, 3)
achoice = c("points", "abline", "segs(default)", "segs+abline", "segs+long-abline")
P2 = RPMG::chooser(achoice, ncol=5, nsel=1, newdev=TRUE, STAY=FALSE,
cols =rgb(1, .7, .7) , main="" , pch=21, cex=3, col='red' , bg='blue' )
i = which(P2==achoice)
g$ilocstyle = inum[i]
g$iloc
g$action = "donothing"
invisible(list(NH=nh, global.vars=g))
}
| /R/wigFUNCS.R | no_license | cran/RSEIS | R | false | false | 29,340 | r | #### BUTTONS
########################################## BUttons
### first button simply returns the name of the button pushed
###### and the clicks and picks
NEXT<-function(nh, g)
{
##### BUTTONDOC:NEXT:'Next BATCH of FILES'
if(g$zenclick>1)
{
rd = getrdpix(g$zloc, g$zenclick, g$sel, nh)
}
else
{
rd=list(PUSHED="NEXT")
}
g$action = "break"
g$rd = rd
g$zloc = list(x=NULL, y=NULL)
invisible(list(global.vars=g) )
}
PREV<-function(nh, g)
{
##### BUTTONDOC:PREV:'Previous BATCH of FILES'
if(g$zenclick>1)
{
rd = getrdpix(g$zloc, g$zenclick, g$sel, nh)
}
else
{
rd=list(PUSHED="PREV")
}
g$action = "break"
g$rd = rd
g$zloc = list(x=NULL, y=NULL)
invisible(list(global.vars=g))
}
HALF<-function(nh, g)
{
##### BUTTONDOC:HALF:'Shift Half a window'
if(g$zenclick>1)
{
rd = getrdpix(g$zloc, g$zenclick, g$sel, nh)
}
else
{
rd=list(PUSHED="HALF")
}
g$action = "break"
g$zloc = list(x=NULL, y=NULL)
g$rd = rd
invisible(list(global.vars=g))
}
CENTER<-function(nh, g)
{
##### BUTTONDOC:CENTER:'Center a window'
if (g$zenclick > 1) {
rd = getrdpix(g$zloc, g$zenclick, g$sel, nh)
}
else {
rd = list(PUSHED = "CENTER")
}
g$action = "break"
g$rd = rd
g$zloc = list(x = NULL, y = NULL)
invisible(list(global.vars = g))
}
MARK<-function(nh, g)
{
##### BUTTONDOC:MARK:'Mark a trace'
if(g$zenclick>1)
{
rd = getrdpix(g$zloc, g$zenclick, g$sel, nh)
}
else
{
rd=list(PUSHED="MARK")
}
g$action = "break"
g$rd = rd
g$zloc = list(x=NULL, y=NULL)
invisible(list(global.vars=g))
}
##########################################
DOC<-function(nh, g)
{
##### BUTTONDOC:DOC:'Show documentation'
PICK.DOC(g$BLABS)
g$zloc = list(x=NULL, y=NULL)
g$action = "replot"
invisible(list(global.vars=g))
}
##########################################
REFRESH<-function(nh, g)
{
##### BUTTONDOC:REFRESH:'Refresh screen'
u = par("usr")
L = length(g$sloc$x)
if(L>1)
{
abline(v=g$sloc$x[c(L-1,L)], col=gray(0.8), lty=2)
}
g$sloc = list(x=c(u[1],u[2]), y=c(u[3],u[4]))
g$zloc = list(x=NULL, y=NULL)
g$action = "replot"
invisible(list(global.vars=g))
}
#######
RESTORE<-function(nh, g)
{
##### BUTTONDOC:RESTORE:'Restore from zoom'
u = par("usr")
L = length(g$sloc$x)
##### this line is wrong; it does nothing
if(L>1)
{
abline(v=g$sloc$x[c(L-1,L)], col=gray(0.8), lty=2)
}
g$sloc = list(x=c(u[1],u[2]), y=c(u[3],u[4]))
g$zloc = list(x=NULL, y=NULL)
g$WIN = NULL
g$action = "replot"
invisible(list(global.vars=g))
}
#######
ZOOM.out<-function(nh, g)
{
##### BUTTONDOC:ZOOM.out:'Zoom out'
u = par("usr")
DX = (u[2]-u[1])*0.3
zloc = list(x= c(u[1]-DX, u[2]+DX))
g$WIN = zloc$x
g$zloc = list(x=NULL, y=NULL)
g$action = "replot"
invisible(list(global.vars=g))
}
ZOOM.in<-function(nh, g)
{
##### BUTTONDOC:ZOOM.in:'Zoom in'
zenclick = length(g$zloc$x)
if(zenclick>=3)
{
n1=g$zenclick-2
pwin = sort(g$zloc$x[c(n1,n1+1)])
g$WIN = pwin
}
else
{
u = par("usr")
DX = (u[2]-u[1])*0.3
zloc = list(x= c(u[1]+DX, u[2]-DX))
g$WIN = zloc$x
}
g$zloc = list(x=NULL, y=NULL)
g$action = "replot"
invisible(list(global.vars=g))
}
#######
LEFT<-function(nh, g)
{
##### BUTTONDOC:LEFT:'Shift Left'
u = par("usr")
DX = (u[2]-u[1])*0.3
#### zloc = list(x= c(u[1]+DX, u[2]+DX))
g$WIN =c(u[1]-DX, u[2]-DX)
g$zloc = list(x=NULL, y=NULL)
g$action = "replot"
invisible(list(global.vars=g))
}
RIGHT<-function(nh, g)
{
##### BUTTONDOC:RIGHT:'Shift Right'
u = par("usr")
DX = (u[2]-u[1])*0.3
#### zloc = list(x= c(u[1]+DX, u[2]+DX))
g$WIN =c(u[1]+DX, u[2]+DX)
g$zloc = list(x=NULL, y=NULL)
g$action = "replot"
invisible(list(global.vars=g))
}
#######
SCALE<-function(nh, g)
{
##### BUTTONDOC:SCALE:'Toggle Scale by trace/window'
if(g$ScaleFACT==1)
{
g$ScaleFACT=2
}
else
{
g$ScaleFACT=1
}
g$action = "replot"
g$zloc = list(x=NULL, y=NULL)
invisible(list(global.vars=g))
}
########################################
Xwin<-function(nh, g)
{
##### BUTTONDOC:Xwin:'Delete all windows except main'
ALLdevs = dev.list()
ww = ALLdevs[ which(g$MAINdev != ALLdevs)]
for(i in 1:length(ww))
{
dev.off(which = ww[i])
}
dev.set(g$MAINdev)
g$zloc = list(x=NULL, y=NULL)
g$action="donothing"
invisible(list(global.vars=g))
}
########################################
PSEL<-function(nh, g)
{
##### BUTTONDOC:PSEL:'Pick trace Sta/COMP to show'
sel = SELSTA(nh, sel=g$sel, newdev=TRUE, STAY=FALSE)
NSEL = length(nh$dt[g$sel])
g$du = 1/NSEL
isel = sel[1]
Torigin = list(jd=nh$info$jd[isel], hr=nh$info$hr[isel],
mi=nh$info$mi[isel],
sec=(nh$info$sec[isel]+nh$info$msec[isel]/1000+nh$info$t1[isel]-nh$info$off[isel]))
g$Torigin=Torigin
g$sel = sel
g$STNS = nh$STNS[sel]
g$COMPS = nh$COMPS[sel]
g$zloc = list(x=NULL, y=NULL)
g$action = "replot"
invisible(list(global.vars=g))
}
#######################################
#### this needs work
FLIP<-function(nh, g)
{
##### BUTTONDOC:FLIP:'Flip selected trace'
zenclick = length(g$zloc$x)
if(zenclick>1)
{
nc = 1:(zenclick-1)
lnc = length(nc)
ypick = length(g$sel)-floor(length(g$sel)*g$zloc$y[nc])
ipick = unique( g$sel[ypick] )
cat("FLIP: pwig POLARITY REVERSED: "); cat(ipick, sep=" " ); cat("\n")
for(JJ in 1:length(ipick) )
{
jtr = ipick[JJ]
nh$JSTR[[jtr]] = (-1)*nh$JSTR[[jtr]]
}
}
else
{
cat("FLIP: No traces selected: Try Again"); cat("\n")
}
g$zloc = list(x=NULL, y=NULL)
g$action = "replace"
invisible(list(NH=nh, global.vars=g))
}
########################
PTS<-function(nh, g)
{
##### BUTTONDOC:PTS:'Show sample points'
g$pts=!g$pts
g$action = "replot"
g$zloc = list(x=NULL, y=NULL)
invisible(list(global.vars=g))
}
FILT<-function(nh, g)
{
##### BUTTONDOC:FILT:'Filter trace'
### print( data.frame(g$filters) )
Fdef = choosfilt(thefilts=g$filters, ncol=5)
if(!is.null(Fdef))
{
if(Fdef$type=="None")
{
dev.set( g$MAINdev)
g$SUBTIT = NA
g$action = "revert"
KF = nh
return(list(global.vars=g))
}
else
{
### g$SUBTIT = paste(Fdef$type,Fdef$fl, Fdef$fh, sep=" ")
g$SUBTIT = filterstamp(Fdef$fl, Fdef$fh, Fdef$type)
g$action = "replace"
KF = FILT.SEISN(nh, sel = g$sel, FILT=Fdef)
}
### X11()
}
else
{
g$action = "replot"
KF = nh
}
g$zloc = list(x=NULL, y=NULL)
dev.set( g$MAINdev)
invisible(list(NH=KF, global.vars=g))
}
UNFILT<-function(nh, g)
{
##### BUTTONDOC:UNFILT:'Unfilter traces'
dev.set( g$MAINdev)
g$SUBTIT = NA
g$action = "revert"
g$zloc = list(x=NULL, y=NULL)
invisible(list(global.vars=g))
}
#########################
fspread<-function(nh, g)
{
##### BUTTONDOC:fspread:'do a filter spread on selection'
### click on a trace panel and do a filter spread
zenclick = length(g$zloc$x)
if(zenclick>=3)
{
ypick = length(g$sel)-floor(length(g$sel)*g$zloc$y[zenclick-1])
ipick = g$sel[ypick]
print(paste(sep=' ',"fspread", ypick, nh$info$name[ ipick]))
famp = nh$JSTR[[ipick]]
pwin = sort(c(g$zloc$x[zenclick-2], g$zloc$x[zenclick-1]))
ex = seq(from=nh$info$t1[ipick], by=nh$info$dt[ipick], length.out=length(famp) )
temp = famp[ ex > pwin[1] & ex <pwin[2]]
#### Xamp = -1*temp
smallex = ex[ ex > pwin[1] & ex <pwin[2]]
asec = nh$info$sec[ipick]+nh$info$msec[ipick]/1000+nh$info$t1[ipick]-nh$info$off[ipick]+pwin[1]
spaz = recdate( nh$info$jd[ipick], nh$info$hr[ipick], nh$info$mi[ipick], asec, nh$info$yr[ipick] )
spaz$yr = as.integer(nh$info$yr[ipick])
MODAY = getmoday(spaz$jd, spaz$yr)
TP = list(yr=spaz$yr[1], jd=spaz$jd, mo=MODAY$mo,
dom= MODAY$dom ,hr=spaz$hr, mi=spaz$mi, sec=spaz$sec )
dst = dateStamp(TP)
titl = paste(nh$STNS[ipick], nh$COMPS[ipick], dst)
fh=c(1/20, 1/10, 1/5, .5, 1, 2, 3)
fl=rep(1/100, times=length(fh) )
dev.new(width=14, height=10)
jex = range(smallex)
jr = jex[2] - jex[1]
j10 = jr*0.2
jwin = c(jex[1]+j10, jex[2]-j10)
# jwin = NULL
FILT.spread(smallex, temp, nh$dt[ipick], fl = fl, fh = fh, sfact = 1, WIN = jwin, PLOT = TRUE, TIT =titl , TAPER = 0.1, POSTTAPER=NULL)
dev.set(g$MAINdev)
g$zloc = list(x=NULL, y=NULL)
g$action="donothing"
invisible(list(global.vars=g))
}
else
{
cat("XTR WARNING: no window or trace has been selected:", sep="\n")
RETX=NULL
g$zloc = list(x=NULL, y=NULL)
g$action="donothing"
invisible(list(global.vars=g))
}
}
SPEC<-function(nh, g)
{
##### BUTTONDOC:SPEC:'Display Spectrum'
nclick = length(g$zloc$x)
if(nclick>=3)
{
nc = 1:(nclick-1)
lnc = length(nc)
ypick = length(g$sel)-floor(length(g$sel)*g$zloc$y[nc])
ipick = g$sel[ypick]
### print(paste(sep=' ',ypick, NH$info$name[ ipick]))
print(ipick)
i1 = seq(from=1, to=max(nc), by=2)
i1 = i1[i1<max(nc)]
amp = list()
dees = list()
stamps = list()
speccol = vector()
ni = 0
for(ipix in i1)
{
pwin = sort(c(g$zloc$x[ipix], g$zloc$x[ipix+1]))
print(c(ipix, pwin))
kpix = ipick[ipix]
famp = nh$JSTR[[kpix]]
ex = seq(from=nh$info$t1[kpix], by=nh$info$dt[kpix], length.out=length(famp) )
temp = famp[ ex > pwin[1] & ex <pwin[2]]
if(any(is.na(temp)))
{
print(paste("getting NA in trace",kpix, nh$STNS[kpix],nh$COMPS[kpix],pwin[1], pwin[2] ))
next
}
ni = ni +1
amp[[ni]] = temp-mean(temp)
dees[ni] = nh$dt[kpix]
speccol[ni] = g$pcols[kpix]
ftime = Zdate(nh$info, kpix, pwin[1])
psta = nh$STNS[kpix]
pcomp = nh$COMPS[kpix]
STAMP = paste(sep=" ", psta, pcomp, ftime)
stamps[ni] = STAMP
}
print(stamps)
a = list(y=amp, dt=dees, stamps=stamps)
if(length(a$y)>0)
{
dev.new(width=10, height=10)
f1 = 0.1
f2 = floor(0.33*(1/nh$dt[ipick]))
### oop=par(no.readonly = TRUE)
### par(mfrow=c(length(a$y), 1) )
### for(io in 1:length(a$y)) plot(a$y[[io]], type='l')
### par(oop)
### readline("type in something")
MTM.drive(a, f1, f2[1], COL=speccol, PLOT=TRUE)
}
dev.set(g$MAINdev)
}
else
{
cat("SPEC WARNING: no window or trace has been selected:", sep="\n")
}
g$zloc = list(x=NULL, y=NULL)
g$action="donothing"
invisible(list(global.vars=g))
}
WWIN<-function(nh, g)
{
##### BUTTONDOC:WWIN:'Window'
nclick = length(g$zloc$x)
if(nclick>=3)
{
nc = 1:(nclick-1)
lnc = length(nc)
ypick = length(g$sel)-floor(length(g$sel)*g$zloc$y[nc])
ipick = g$sel[ypick]
### print(paste(sep=' ',ypick, NH$info$name[ ipick]))
print(ipick)
i1 = seq(from=1, to=max(nc), by=2)
i1 = i1[i1<max(nc)]
amp = list()
dees = list()
stamps = list()
speccol = vector()
ni = 0
for(ipix in i1)
{
pwin = sort(c(g$zloc$x[ipix], g$zloc$x[ipix+1]))
print(c(ipix, pwin))
kpix = ipick[ipix]
famp = nh$JSTR[[kpix]]
ex = seq(from=nh$info$t1[kpix], by=nh$info$dt[kpix], length.out=length(famp) )
temp = famp[ ex > pwin[1] & ex <pwin[2]]
if(any(is.na(temp)))
{
print(paste("getting NA in trace",kpix, nh$STNS[kpix],nh$COMPS[kpix],pwin[1], pwin[2] ))
next
}
ni = ni +1
amp[[ni]] = temp-mean(temp)
dees[ni] = nh$dt[kpix]
speccol[ni] = g$pcols[kpix]
ftime = Zdate(nh$info, kpix, pwin[1])
psta = nh$STNS[kpix]
pcomp = nh$COMPS[kpix]
STAMP = paste(sep=" ", psta, pcomp, ftime)
stamps[ni] = STAMP
}
dev.new(width=10, height=10)
for(i in 1:ni) {
y = amp[[i]]
len = length(amp[[i]])
print(c(i, len, dees[[i]]))
xt = seq(from=0, by=dees[[i]], length=len)
plot(xt , amp[[i]],main=stamps[[i]], type='l');
locator(1) }
dev.set(g$MAINdev)
}
g$zloc = list(x=NULL, y=NULL)
g$action="donothing"
invisible(list(global.vars=g))
}
##########################
SGRAM<-function(nh, g)
{
##### BUTTONDOC:SGRAM:'Spectrogram'
zenclick = length(g$zloc$x)
if(zenclick>=2)
{
if(zenclick==2)
{
ypick = length(g$sel)-floor(length(g$sel)*g$zloc$y[zenclick-1])
ipick = g$sel[ypick]
### print(paste(sep=' ',ypick, NH$info$name[ipick]))
pwin = g$WIN
}
else
{
ypick = length(g$sel)-floor(length(g$sel)*g$zloc$y[zenclick-1])
ipick = g$sel[ypick]
### print(paste(sep=' ',ypick, NH$info$name[ ipick]))
pwin = sort(c(g$zloc$x[zenclick-2], g$zloc$x[zenclick-1]))
}
g$LASTwin = pwin
### print(paste(sep=" ", "DOING SGRAM Nclick, ipick, pwin", Nclick, ipick, pwin))
famp = nh$JSTR[[ipick]]
ex = seq(from=nh$info$t1[ipick], by=nh$info$dt[ipick], length.out=length(famp))
temp = famp[ ex > pwin[1] & ex <pwin[2]]
Xamp = temp-mean(temp)
# ftime = Zdate(nh$info, g$sel[ypick], pwin[1])
ftime = ghstamp(nh, sel=g$sel[ypick], WIN=pwin )
print(paste(sep=" ",min(ex), max(ex)))
print(paste(sep=" ",pwin[1], pwin[2]))
print(paste(sep=" ", ipick, length(famp),length(temp),length(Xamp), nh$dt[ipick],ftime))
SPECT.drive(Xamp, DT=nh$dt[ipick], STAMP=ftime)
### plotevol(DEV, log=1, fl=0, fh=15, col=rainbow(50))
}
else
{
pwin = g$LASTwin
ypick = 1
ipick = g$sel[1]
cat("SGRAM WARNING: no window or trace has been selected:" , sep="\n")
}
dev.set(g$MAINdev)
g$zloc = list(x=NULL, y=NULL)
g$action="donothing"
invisible(list(global.vars=g))
}
WLET<-function(nh, g)
{
##### BUTTONDOC:WLET:'Wavelet Transform'
zenclick = length(g$zloc$x)
if(zenclick>=2)
{
if(zenclick==2)
{
ypick = length(g$sel)-floor(length(g$sel)*g$zloc$y[zenclick-1])
ipick = g$sel[ypick]
### print(paste(sep=' ',ypick, NH$info$name[ipick]))
pwin = g$WIN
}
else
{
ypick = length(g$sel)-floor(length(g$sel)*g$zloc$y[zenclick-1])
ipick = g$sel[ypick]
### print(paste(sep=' ',ypick, NH$info$name[ ipick]))
pwin = sort(c(g$zloc$x[zenclick-2], g$zloc$x[zenclick-1]))
}
g$LASTwin = pwin
### print(paste(sep=" ", "DOING SGRAM Nclick, ipick, pwin", Nclick, ipick, pwin))
famp = nh$JSTR[[ipick]]
ex = seq(from=nh$info$t1[ipick], by=nh$info$dt[ipick], length.out=length(famp))
temp = famp[ ex > pwin[1] & ex <pwin[2]]
Xamp = temp-mean(temp)
## ftime = Zdate(nh$info, g$sel[ypick], pwin[1])
ftime = ghstamp(nh, sel=g$sel[ypick], WIN=pwin )
wlet.drive(Xamp, nh$dt[ipick], STAMP=ftime)
### plotevol(DEV, log=1, fl=0, fh=15, col=rainbow(50))
}
else
{
pwin = g$LASTwin
ypick = 1
ipick = g$sel[1]
cat("WLET WARNING: no window or trace has been selected:" , sep="\n")
}
dev.set(g$MAINdev)
g$zloc = list(x=NULL, y=NULL)
g$action="donothing"
invisible(list(global.vars=g))
}
XTR<-function(nh, g)
{
##### BUTTONDOC:XTR:'Extract single trace'
zenclick = length(g$zloc$x)
if(zenclick>=3)
{
ypick = length(g$sel)-floor(length(g$sel)*g$zloc$y[zenclick-1])
ipick = g$sel[ypick]
print(paste(sep=' ',"EXTRACT", ypick, nh$info$name[ ipick]))
famp = nh$JSTR[[ipick]]
pwin = sort(c(g$zloc$x[zenclick-2], g$zloc$x[zenclick-1]))
ex = seq(from=nh$info$t1[ipick], by=nh$info$dt[ipick], length.out=length(famp) )
temp = famp[ ex > pwin[1] & ex <pwin[2]]
#### Xamp = -1*temp
smallex = ex[ ex > pwin[1] & ex <pwin[2]]
asec = nh$info$sec[ipick]+nh$info$msec[ipick]/1000+nh$info$t1[ipick]-nh$info$off[ipick]+pwin[1]
spaz = recdate( nh$info$jd[ipick], nh$info$hr[ipick], nh$info$mi[ipick], asec, nh$info$yr[ipick] )
spaz$yr = as.integer(nh$info$yr[ipick])
MODAY = getmoday(spaz$jd, spaz$yr)
TP = list(yr=spaz$yr, jd=spaz$jd, mo=MODAY$mo,
dom= MODAY$dom ,hr=spaz$hr, mi=spaz$mi, sec=spaz$sec )
RETX = list(but="RET", x=smallex, y=temp, dt=nh$dt[ipick], STNS=nh$STNS[ipick],
COMPS=nh$COMPS[ipick], fname=nh$info$name[ipick] , TIMEpick=TP, mark=TRUE, deltat=nh$dt[ipick] )
g$zloc = list(x=NULL, y=NULL)
g$action="exit"
invisible(list(RETX = RETX, global.vars=g))
}
else
{
cat("XTR WARNING: no window or trace has been selected:", sep="\n")
RETX=NULL
g$zloc = list(x=NULL, y=NULL)
g$action="donothing"
invisible(list(global.vars=g))
}
}
########################################
Pinfo<-function(nh, g)
{
##### BUTTONDOC:Pinfo:'Pick information'
zenclick = length(g$zloc$x)
if(zenclick>=2)
{
### NSEL = length(nh$dt[g$sel])
### du = 1/NSEL
kix = legitpix(g$sel, g$zloc, zenclick)
ypick = kix$ypick
ppick = kix$ppick
dpick = c(0, diff(ppick))
ipick = g$sel[ypick]
m = match(g$STNS[ipick],g$UNIsta)
### jj = floor(( g$zloc$y[zenclick-1])/du)
asec = nh$info$sec[ipick]+nh$info$msec[ipick]/1000+nh$info$t1[ipick]-nh$info$off[ipick]+ppick[zenclick-1]
print(paste(sep=" ", "PICK=",
nh$info$yr[ipick], nh$info$jd[ipick], nh$info$hr[ipick],
nh$info$mi[ipick], asec, "sta=", nh$STNS[ipick], "comp=", nh$COMPS[ipick] ))
print(ppick)
## pstas = paste(nh$STNS[ipick], nh$COMPS[ipick], sep=".")
rd = getrdpix(g$zloc, zenclick, g$sel, nh)
RDtmes = rd$yr+rd$jd/366+rd$hr/(366*24)+rd$mi/(366*24*60)+rd$sec/(366*24*3600)
wearliest = which.min(RDtmes)
PAS = paste(sep="_", "Jtim(", rd$jd[wearliest], ", hr=" , rd$hr[wearliest] ,
", mi=", rd$mi[wearliest], ",sec=", rd$sec[wearliest], ")")
DEEtimes = YRsecdif(
rd$jd[wearliest],rd$hr[wearliest],rd$mi[wearliest], rd$sec[wearliest],
rd$jd, rd$hr, rd$mi, rd$sec, rd$yr[wearliest], rd$yr)
apickorg = paste(sep=",", rd$yr[wearliest], rd$jd[wearliest],rd$hr[wearliest],rd$mi[wearliest], rd$sec[wearliest])
## pstas = nh$STNS[ipick]
apstas = paste(sep="", '"', paste(rd$stn, collapse='","'), '"')
## pcomps =nh$COMPS[ipick]
apcomps = paste(sep="", '"', paste(rd$comp, collapse='","'), '"')
cat("", sep="\n")
cat("", sep="\n")
cat("##################", sep="\n")
cat( paste(sep=" ", "orgtim=c(", apickorg , ")") , sep="\n")
cat("", sep="\n")
cat( paste(sep=" ", "stns=c(", apstas, ")") , sep="\n")
cat( paste(sep=" ", "comps=c(", apcomps, ")") , sep="\n")
cat( paste(sep=" ", "tims=c(", paste(DEEtimes, collapse=","), ")") , sep="\n")
cat("", sep="\n")
cat("##################", sep="\n")
cat("", sep="\n")
cat("Time Differences between picks:", sep="\n")
cat(paste(dpick), sep="\n")
cat("", sep="\n")
#### print(zloc$y[1:(zenclick-1)])
#### print(ypick)
#### print(ipick)
cat("##################", sep="\n")
cat("rd = scan(file='', what=list(jd=0,hr=0,mi=0,sec=0,yr=0,stn='',comp=''))" , sep="\n")
write.table(file="", data.frame(rd), row.names =FALSE, col.names =FALSE )
cat(" ", sep="\n")
cat("GMT TIME: ", sep="\n")
showdatetime(rd)
cat(" ", sep="\n")
PAS = paste(sep=" ", "Jtim(", rd$jd, ", hr=" , rd$hr , ", mi=", rd$mi, ",sec=", rd$sec, ")")
cat("", sep="\n")
cat(PAS, sep="\n")
if(!is.null(nh$TZ))
{
rdlocal = recdate(jd=rd$jd, hr=rd$hr+nh$TZ, mi=rd$mi, sec=rd$sec , yr=rd$yr)
cat(" ", sep="\n")
cat(paste(sep=" ", "LOCAL TIMES, SHIFT=", nh$TZ) , sep="\n")
showdatetime(rdlocal, AMPM=TRUE)
}
}
else
{
cat("Pinfo WARNING: no pick or trace has been selected:", sep="\n")
}
g$zloc = list(x=NULL, y=NULL)
g$action="donothing"
invisible(list(global.vars=g))
}
#################################
#################################
TSHIFT<-function(nh, g)
{
##### BUTTONDOC:TSHIFT:'Shift traces to line up with first pick'
zenclick = length(g$zloc$x)
if(zenclick>=2)
{
kix = legitpix(g$sel, g$zloc, zenclick)
ypick = kix$ypick
ppick = kix$ppick
dpick = c(0, diff(ppick))
ipick = g$sel[ypick]
print(paste(nh$STNS[ipick], nh$COMPS[ipick]))
tshft = rep(0,times=length(nh$STNS))
tshft[ipick] = ppick-ppick[1]
## print(data.frame(list(sta=nh$STNS, comp=nh$COMPS, tshft=tshft)))
print(data.frame(list(sta=nh$STNS[ipick] , comp=nh$COMPS[ipick] , tshft=tshft[ipick] )))
Tshift = list(name = nh$STNS[ipick], t=tshft[ipick])
cat(file = "","\n\n")
nam = "kshift"
cat(file = "", paste(sep = "", nam, "=list()") )
cat(file = "","\n")
cat(file = "", paste(sep = "", nam, "$name=c(\"", paste(format(Tshift$name), collapse = "\",\""), "\")"), fill = TRUE)
cat(file = "", paste(sep = "", nam, "$t=c(", paste(format(Tshift$t), collapse = ","), ")"), fill = TRUE)
cat(file = "","\n")
g$ASHIFT = tshft
g$BLAHSHIFT = tshft
}
else
{
g$ASHIFT = g$SHIFT.ORIG
}
g$zloc = list(x=NULL, y=NULL)
g$action = "replot"
invisible(list(NH=nh, global.vars=g))
}
#################################
#################################
#################################
RMS<-function(nh, g)
{
##### BUTTONDOC:RMS:'Root Mean Square of selection'
zenclick = length(g$zloc$x)
sel = g$sel
if(zenclick>=2)
{
kix = legitpix(g$sel, g$zloc, zenclick)
ypick = kix$ypick
ppick = kix$ppick
myinfo = list(yr=nh$info$yr, jd=nh$info$jd, hr=nh$info$hr, mi=nh$info$mi, sec=rep(0, times=length(nh$info$mi)))
if(length(ypick)>0)
{ ############ length(ypick) proceed only if have legitimate picks
ipick = sel[ypick]
npick = length(ypick)
pairseq = seq(from=1, to=npick-1, by=2)
##### Output1 = vector(length=length(pairseq))
Output2 = vector(length=length(pairseq))
for(iz in pairseq)
{ ############### loop over pairs of picks
i1 = ipick[iz]
################ this is the time in sec from the beginning of the trace
asec = nh$info$sec[i1]+nh$info$msec[i1]/1000+nh$info$t1[i1]-nh$info$off[i1]+ppick[iz]
if(npick<2)
{
bsec = asec+5
}
else
{
iz1 = ipick[iz+1]
bsec = nh$info$sec[iz1]+nh$info$msec[iz1]/1000+nh$info$t1[iz1]-nh$info$off[iz1]+ppick[iz+1]
}
rsig1 = nh$JSTR[[i1]]
t1 = seq(from=0, length=length(rsig1), by=nh$dt[i1])
which.time = which( t1>ppick[iz] & t1< ppick[iz+1] )
rwhich = range(which.time)
rsig = rsig1[ which.time ]
rsig = rsig-mean(rsig)
rms = sqrt( mean( rsig^2 ))
cat(paste(sep=" ", "#########", iz, i1, format(ppick[iz]), format(ppick[iz+1]),
format(asec) , format(bsec), length(rsig), format(rms) ), sep="\n" )
Output2[iz] = paste(sep=" ",
nh$STNS[i1],
nh$COMPS[i1] ,
myinfo$yr[i1],
myinfo$jd[i1],
myinfo$hr[i1],
myinfo$mi[i1],
format(asec),
format(bsec),
format(rms))
dur = diff(c(asec, bsec) )
if(is.null(dur)) dur = 0
#### g$WPX = pickhandler(i1=i1, ppick=ppick[iz], kzap=kzap, err=NA, ycol=ycol, NPX=g$NPX, g$WPX, nh)
#### g$NADDPIX = g$NADDPIX+1
#### g$NPX = g$NPX+1
#### Nn = names(g$WPX)
#### g$WPX =rbind(g$WPX, rep(NA, length(Nn)))
}
cat("############", sep="\n")
cat( "OUTrms =scan(file=\"\", what=list(stn=\"\", comp=\"\", yr=0, jd=0, hr=0, mi=0, t1=0, t2=0, rms=0))", sep="\n" )
for(iz in pairseq)
{
cat(Output2[iz], sep="\n")
}
cat("\n" )
cat("######", sep="\n")
}
else
{
print("not enough legitimate picks, need at least 2 or more")
}
}
else
{
print("not enough legitimate picks, need at least 2 or more")
}
g$zloc = list(x=NULL, y=NULL)
g$action = "donothing"
invisible(list(global.vars=g))
}
###############################
LocStyle<-function(nh, g)
{
##### BUTTONDOC:LocStyle:'choose the locator style for picking in swig'
### choose the locator style for picking in swig
g$ilocstyle = -1
inum= c(-1, 0, 1, 2, 3)
achoice = c("points", "abline", "segs(default)", "segs+abline", "segs+long-abline")
P2 = RPMG::chooser(achoice, ncol=5, nsel=1, newdev=TRUE, STAY=FALSE,
cols =rgb(1, .7, .7) , main="" , pch=21, cex=3, col='red' , bg='blue' )
i = which(P2==achoice)
g$ilocstyle = inum[i]
g$iloc
g$action = "donothing"
invisible(list(NH=nh, global.vars=g))
}
|
#' Write Bioinfo RMD
#'
#' Write Bioinformatics RMD workflow.
#'
#' @inheritParams bioinfo_rmd_contrasts
#' @details If need to remove a sample, rerun with new \code{input.files}.
#' @export
#' @examples
#' \donttest{
#' bioinfo_rmd_contrasts(filename="new_analysis", input.files = c("counts.csv", "pheno.csv"),
#' contr.v='c(treat="treat-control")')
#' }
bioinfo_rmd_contrasts_voom <- function(filename, local.path=NULL, data.desc="Gene expression",
input.files, data.nas=TRUE, min.npergrp=3, grp.var="grp",
covars=NULL, aw.model=paste0("~0+", grp.var), use_aw=TRUE, use_trend=FALSE,
contr.v, limma.model=NULL, row.type="gene", gmt_abbrev=c('reactome', 'tft'),
gmt_prefix=c('c2.cp.reactome', 'c3.tft.gtrd')){
proj.nm <- sub("analyze_", "", filename)
yaml.title <- gsub("_", " ", proj.nm)
yh <- yaml_header(yaml.title=yaml.title)
if (is.null(local.path)) local.path <- getwd()
net.path <- sub("B:/", "J:/cores/bioinformatics/", local.path)
local.path <- sub("B:/", "", local.path)
sc <- setup_chunk(path=local.path)
dt <- data_txt(input.files = input.files, path=net.path)
rd <- read_data_chunk(input.files=input.files, data.logged=TRUE)
blocks <- list(yaml=yh, setup=sc, data=dt, read=rd)
blocks[["feat_filt"]] <- feat_filt_voom_chunk(min.npergrp=min.npergrp, row.type = row.type)
blocks[["norm"]] <- norm_voom_chunk(proj.nm=proj.nm, voom.model=aw.model, path=net.path, use_aw=use_aw)
blocks[["bp"]] <- boxplot_chunk(elist=TRUE)
blocks[["pca"]] <- pca_chunk(grp.var=grp.var, proj.nm=proj.nm, covars=covars, elst=TRUE)
use_annot <- ifelse(length(input.files) >= 3, TRUE, FALSE)
#don't use aw since already in elst
blocks[["lc"]] <- limma_contrasts_chunk(grp.var=grp.var, contr.v=contr.v, path=net.path, proj.nm=proj.nm,
limma.model=limma.model, use_aw=FALSE, use_trend=use_trend,
use_annot=use_annot, row.type=row.type, elst=TRUE)
blocks[["fp"]] <- feature_plots_chunk(grp.var=grp.var, path=net.path, proj.nm=proj.nm, contr.v=contr.v,
use_annot=use_annot, elst=TRUE)
blocks[["rc"]] <- roast_contrasts_chunk(grp.var=grp.var, path=net.path, elst=TRUE, use_aw=FALSE)
blocks[["check"]] <- check_chunk()
blocks[["session"]] <- session_chunk()
blocks[["refs"]] <- "# References"
#i want text, but not yaml or code, to skip a line after each \n. Easiest to add "" to text.
write_blocks(filename=paste0(filename, "_0"), blocks = blocks)
}
| /R/bioinfo_rmd_contrasts_voom.R | permissive | jdreyf/bioinformd | R | false | false | 2,635 | r | #' Write Bioinfo RMD
#'
#' Write Bioinformatics RMD workflow.
#'
#' @inheritParams bioinfo_rmd_contrasts
#' @details If need to remove a sample, rerun with new \code{input.files}.
#' @export
#' @examples
#' \donttest{
#' bioinfo_rmd_contrasts(filename="new_analysis", input.files = c("counts.csv", "pheno.csv"),
#' contr.v='c(treat="treat-control")')
#' }
bioinfo_rmd_contrasts_voom <- function(filename, local.path=NULL, data.desc="Gene expression",
input.files, data.nas=TRUE, min.npergrp=3, grp.var="grp",
covars=NULL, aw.model=paste0("~0+", grp.var), use_aw=TRUE, use_trend=FALSE,
contr.v, limma.model=NULL, row.type="gene", gmt_abbrev=c('reactome', 'tft'),
gmt_prefix=c('c2.cp.reactome', 'c3.tft.gtrd')){
proj.nm <- sub("analyze_", "", filename)
yaml.title <- gsub("_", " ", proj.nm)
yh <- yaml_header(yaml.title=yaml.title)
if (is.null(local.path)) local.path <- getwd()
net.path <- sub("B:/", "J:/cores/bioinformatics/", local.path)
local.path <- sub("B:/", "", local.path)
sc <- setup_chunk(path=local.path)
dt <- data_txt(input.files = input.files, path=net.path)
rd <- read_data_chunk(input.files=input.files, data.logged=TRUE)
blocks <- list(yaml=yh, setup=sc, data=dt, read=rd)
blocks[["feat_filt"]] <- feat_filt_voom_chunk(min.npergrp=min.npergrp, row.type = row.type)
blocks[["norm"]] <- norm_voom_chunk(proj.nm=proj.nm, voom.model=aw.model, path=net.path, use_aw=use_aw)
blocks[["bp"]] <- boxplot_chunk(elist=TRUE)
blocks[["pca"]] <- pca_chunk(grp.var=grp.var, proj.nm=proj.nm, covars=covars, elst=TRUE)
use_annot <- ifelse(length(input.files) >= 3, TRUE, FALSE)
#don't use aw since already in elst
blocks[["lc"]] <- limma_contrasts_chunk(grp.var=grp.var, contr.v=contr.v, path=net.path, proj.nm=proj.nm,
limma.model=limma.model, use_aw=FALSE, use_trend=use_trend,
use_annot=use_annot, row.type=row.type, elst=TRUE)
blocks[["fp"]] <- feature_plots_chunk(grp.var=grp.var, path=net.path, proj.nm=proj.nm, contr.v=contr.v,
use_annot=use_annot, elst=TRUE)
blocks[["rc"]] <- roast_contrasts_chunk(grp.var=grp.var, path=net.path, elst=TRUE, use_aw=FALSE)
blocks[["check"]] <- check_chunk()
blocks[["session"]] <- session_chunk()
blocks[["refs"]] <- "# References"
#i want text, but not yaml or code, to skip a line after each \n. Easiest to add "" to text.
write_blocks(filename=paste0(filename, "_0"), blocks = blocks)
}
|
################
# Median Abundance Analysis #
################
# Analyzing the landscape factors that effect rare bee species abundance in
# New Jersey forest fragments. Done at three different landscape scales (1000m,
# 500m, 300m) to see which one creates the best (adjusted R squared) model.
# Done using a LASSO approach where n simulations were conducted in order to account
# for random variation in model results due to random cv selection, and to create
# a distribution of values for coefficients.
## Set up environment: set seed, packages, functions
rm(list=ls())
set.seed(123)
RNGkind("L'Ecuyer-CMRG")
library(readr)
library(plyr)
library(dplyr)
library(AER)
library(MASS)
library(car)
library(mpath)
library(cbar)
library(psych)
library(stargazer)
library(xtable)
library(ape)
library(caret)
library(glmnet)
library(corrplot)
library(tibble)
library(tidyr)
library(scales)
library(cowplot)
library(grid)
library(gridExtra)
library(parallel); no_cores <- detectCores() - 1
cor.mtest <- function(mat, ...) {
mat <- as.matrix(mat)
n <- ncol(mat)
p.mat<- matrix(NA, n, n)
diag(p.mat) <- 0
for (i in 1:(n - 1)) {
for (j in (i + 1):n) {
tmp <- cor.test(mat[, i], mat[, j], ...)
p.mat[i, j] <- p.mat[j, i] <- tmp$p.value
}
}
colnames(p.mat) <- rownames(p.mat) <- colnames(mat)
p.mat
}
output.function.poisson <- function(cv_size){
fit <- cv.glmnet(X,y, alpha = 1, standardize = TRUE, lambda = lambdas_to_try, family = "poisson", nfolds = cv_size)
# coefficients from lambda min
coef.included <- as.data.frame(as.matrix(coef(fit, s = fit$lambda.min)))
coef.included <- coef.included %>% rownames_to_column(var = "Variable")
names(coef.included)[2] <- "Value"
# adjusted R sq
yhat <- predict(fit, X, s = fit$lambda.min, type = "response")
sst <- sum((y - mean(y))^2)
ssr <- sum((y - yhat)^2)
rsq <- 1 - (ssr / sst)
number.coefs <- coef(fit,s = "lambda.min")[1:nrow(coef(fit, s = "lambda.min"))]
number.coefs <- length(number.coefs[number.coefs!=0]) - 1
adj.rsq <- 1-(((1-rsq)*(29-1))/(29-number.coefs-1))
# spatial autocorrelation
res <- y - yhat
fit.i <- Moran.I(as.vector(res), site.dists.inv, alternative = "greater")
#return(list(differences, adj.rsq, AICc))
return(list(coef.included, rsq, adj.rsq, fit.i$p.value))
}
## Read in data
# Directory will depend on personal computer vs cluster computing
if(getwd() == "/Users/Zoe/Desktop/Bee_Project/Analysis1"){
abun <- read_csv("Outputs/abundance_metrics_col.csv")
effort <- read_csv("Outputs/effort_col.csv")
names(effort)[3] <- "Effort"
sites <- read.csv("~/Desktop/Bee_Project/Analysis1/Outputs/col_rae_sites.csv", stringsAsFactors = F)
preds_300m <- read.csv("~/Desktop/Bee_Project/Analysis1/GIS/Analysis1_predictors_300m_updated_units.csv", stringsAsFactors = F)
preds_500m <- read.csv("~/Desktop/Bee_Project/Analysis1/GIS/Analysis1_predictors_500m_updated_units.csv", stringsAsFactors = F)
preds_1km <- read.csv("~/Desktop/Bee_Project/Analysis1/GIS/Analysis1_predictors_1km_updated_units.csv", stringsAsFactors = F)
} else{
setwd("/home/zvolenec/bees")
abun <- read_csv("abundance_metrics_col.csv")
effort <- read_csv("effort_col.csv")
names(effort)[3] <- "Effort"
sites <- read.csv("col_rae_sites.csv", stringsAsFactors = F)
preds_300m <- read.csv("Analysis1_predictors_300m_updated_units.csv", stringsAsFactors = F)
preds_500m <- read.csv("Analysis1_predictors_500m_updated_units.csv", stringsAsFactors = F)
preds_1km <- read.csv("Analysis1_predictors_1km_updated_units.csv", stringsAsFactors = F)
}
# Get together inverse distance matrix for spatial autocorrelation tests
sites <- merge(sites, abun, by = "Unit_Nm")
sites <- sites[!duplicated(sites[1]),]
site.dists <- as.matrix(dist(cbind(sites$Longitude, sites$Latitude),
method = "euclidean"))
site.dists.inv <- 1/site.dists
diag(site.dists.inv) <- 0
###################
##### Models ######
###################
# Look at data distribution
plot(density(abun$Rare_Abundance)); shapiro.test(abun$Rare_Abundance)
#### 1km ####
## Get variables in order
# Create dataframe with predictors / response variable
data.1km <- abun[c(2,3)] %>% left_join(preds_1km, by = "Unit_Nm") %>%
left_join(effort[c(2,3)], by = "Unit_Nm") %>% left_join(sites[c(1,7)], by = "Unit_Nm") %>%
dplyr::select(-Unit_Nm)
# Group land use predictors by their Anderson land use subsection
colnames(data.1km)[6:71] <- substr(colnames(data.1km[6:71]), 1, 3)
for(i in 6:70){
for(j in (i+1):71){
if(colnames(data.1km)[i] == colnames(data.1km)[j]){
data.1km[j]<- data.1km[i] + data.1km[j]
colnames(data.1km)[i] <- "delete"
}
else{
next
}
}
}
data.1km <- data.1km[,-which(names(data.1km) %in% "delete")]
# Look at univariate relationships between response /predictors
for(i in ncol(data.1km):2){
pred <- names(data.1km)[i]
plot(Rare_Abundance ~ get(pred, data.1km), data = data.1km, xlab = pred)
}
## Take out correlated predictors
# Regularized regression can handle correlated predictors, but want to choose the
# predictors that would be selected
data.1km.cleaned <- data.1km
predictors.1km <- data.1km.cleaned[-1]
p.mat.1km <- cor.mtest(predictors.1km)
cor.1km <- cor(predictors.1km)
cor.1km[abs(cor.1km) < 0.85 ] = 0
corrplot(cor.1km, type="lower",
p.mat = p.mat.1km, sig.level = 0.05,insig = "blank",method="number",number.cex=0.7)
data.1km.cleaned <- data.1km.cleaned[-grep("^Perimeter_km$",colnames(data.1km.cleaned))]
data.1km.cleaned <- data.1km.cleaned[-grep("SDI_LU12",colnames(data.1km.cleaned))]
data.1km.cleaned <- data.1km.cleaned[-grep("^SDI_TYPE12_Anthro$",colnames(data.1km.cleaned))]
data.1km.cleaned <- data.1km.cleaned[-grep("^Number_Houses$",colnames(data.1km.cleaned))]
data.1km.cleaned <- data.1km.cleaned[-grep("^SDI_TYPE12_Nat$",colnames(data.1km.cleaned))]
data.1km.cleaned <- data.1km.cleaned[-grep("^LDI$",colnames(data.1km.cleaned))]
## Run Lasso
lambdas_to_try <- c(0,10^seq(-3, 2, length.out = 1000))
n <- 10000
y <- data.1km.cleaned %>% dplyr::select(Rare_Abundance) %>% as.matrix()
X <- data.1km.cleaned %>% dplyr::select(-Rare_Abundance) %>% as.matrix()
set.seed(123)
cl <- makeCluster(no_cores)
m.1km <- mclapply(rep(5, n), output.function.poisson,
mc.set.seed = T); stopCluster(cl)
# Get coefficients summary
# http://www.haowang.pw/blog/Poisson-Coefficient-Interpretation/
# https://bookdown.org/roback/bookdown-bysh/ch-poissonreg.html
coefficients.1km <- data.frame()
for(i in 1:n){
coefficients.1km <- rbind(coefficients.1km,m.1km[[i]][[1]])
}
coef.1km.summ <- coefficients.1km %>% group_by(Variable) %>%
summarize(Avg = mean(Value), SD = sd(Value), Min = min(Value), Max = max(Value),
Conf.low = sd(Value) * (-1.96/sqrt(n)), Conf.high = sd(Value) * (1.96/sqrt(n)),
Included = sum(Value != 0)) %>% arrange(desc(Included))
# Get adj R sq summary
rsq.1km <- c()
for(i in 1:n){
rsq.1km[i] <- m.1km[[i]][[2]]
}
summary(rsq.1km)
# Get adj R sq summary
adj.rsq.1km <- c()
for(i in 1:n){
adj.rsq.1km[i] <- m.1km[[i]][[3]]
}
summary(adj.rsq.1km)
# Get moran'i summary
i.p.1km <- c()
for(i in 1:n){
i.p.1km[i] <- m.1km[[i]][[4]]
}
summary(i.p.1km)
#### 500m ####
## Get variables in order
data.500m <- abun[c(2,3)] %>% left_join(preds_500m, by = "Unit_Nm") %>%
left_join(effort[c(2,3)], by = "Unit_Nm") %>% left_join(sites[c(1,7)], by = "Unit_Nm") %>%
dplyr::select(-Unit_Nm)
colnames(data.500m)[6:66] <- substr(colnames(data.500m[6:66]), 1, 3)
for(i in 6:65){
for(j in (i+1):66){
if(colnames(data.500m)[i] == colnames(data.500m)[j]){
data.500m[j]<- data.500m[i] + data.500m[j]
colnames(data.500m)[i] <- "delete"
}
else{
next
}
}
}
data.500m <- data.500m[,-which(names(data.500m) %in% "delete")]
for(i in ncol(data.500m):2){
pred <- names(data.500m)[i]
plot(Rare_Abundance ~ get(pred, data.500m), data = data.500m, xlab = pred)
}
## Take out correlated predictors
data.500m.cleaned <- data.500m
predictors.500m <- data.500m.cleaned[-1]
p.mat.500m <- cor.mtest(predictors.500m)
cor.500m <- cor(predictors.500m)
cor.500m[abs(cor.500m) < 0.85 ] = 0
corrplot(cor.500m, type="lower",
p.mat = p.mat.500m, sig.level = 0.05,insig = "blank",method="number",number.cex=0.7)
data.500m.cleaned <- data.500m.cleaned[-grep("^Perimeter_km$",colnames(data.500m.cleaned))]
data.500m.cleaned <- data.500m.cleaned[-grep("SDI_LU12",colnames(data.500m.cleaned))]
data.500m.cleaned <- data.500m.cleaned[-grep("^SDI_TYPE12_Anthro$",colnames(data.500m.cleaned))]
data.500m.cleaned <- data.500m.cleaned[-grep("^Number_Houses$",colnames(data.500m.cleaned))]
data.500m.cleaned <- data.500m.cleaned[-grep("^SDI_TYPE12_Nat$",colnames(data.500m.cleaned))]
data.500m.cleaned <- data.500m.cleaned[-grep("^MEAN_PATCH$",colnames(data.500m.cleaned))]
## Run Lasso
lambdas_to_try <- c(0,10^seq(-3, 2, length.out = 1000))
n <- 10000
y <- data.500m.cleaned %>% dplyr::select(Rare_Abundance) %>% as.matrix()
X <- data.500m.cleaned %>% dplyr::select(-Rare_Abundance) %>% as.matrix()
set.seed(123)
cl <- makeCluster(no_cores)
m.500m <- mclapply(rep(5, n), output.function.poisson,
mc.set.seed = T); stopCluster(cl)
# Get coefficients summary
coefficients.500m <- data.frame()
for(i in 1:n){
coefficients.500m <- rbind(coefficients.500m,m.500m[[i]][[1]])
}
coef.500m.summ <- coefficients.500m %>% group_by(Variable) %>%
summarize(Avg = mean(Value), SD = sd(Value), Min = min(Value), Max = max(Value),
Conf.low = sd(Value) * (-1.96/sqrt(n)), Conf.high = sd(Value) * (1.96/sqrt(n)),
Included = sum(Value != 0)) %>% arrange(desc(Included))
# Get adj R sq summary
rsq.500m <- c()
for(i in 1:n){
rsq.500m[i] <- m.500m[[i]][[2]]
}
summary(rsq.500m)
# Get adj R sq summary
adj.rsq.500m <- c()
for(i in 1:n){
adj.rsq.500m[i] <- m.500m[[i]][[3]]
}
summary(adj.rsq.500m)
# Get moran'i summary
i.p.500m <- c()
for(i in 1:n){
i.p.500m[i] <- m.500m[[i]][[4]]
}
summary(i.p.500m)
#### 300m ####
## Get variables in order
data.300m <- abun[c(2,3)] %>% left_join(preds_300m, by = "Unit_Nm") %>%
left_join(effort[c(2,3)], by = "Unit_Nm") %>% left_join(sites[c(1,7)], by = "Unit_Nm") %>%
dplyr::select(-Unit_Nm)
colnames(data.300m)[6:64] <- substr(colnames(data.300m[6:64]), 1, 3)
for(i in 6:63){
for(j in (i+1):64){
if(colnames(data.300m)[i] == colnames(data.300m)[j]){
data.300m[j]<- data.300m[i] + data.300m[j]
colnames(data.300m)[i] <- "delete"
}
else{
next
}
}
}
data.300m <- data.300m[,-which(names(data.300m) %in% "delete")]
for(i in ncol(data.300m):2){
pred <- names(data.300m)[i]
plot(Rare_Abundance ~ get(pred, data.300m), data = data.300m, xlab = pred)
}
## Take out correlated predictors
data.300m.cleaned <- data.300m
predictors.300m <- data.300m.cleaned[-1]
p.mat.300m <- cor.mtest(predictors.300m)
cor.300m <- cor(predictors.300m)
cor.300m[abs(cor.300m) < 0.85 ] = 0
corrplot(cor.300m, type="lower",
p.mat = p.mat.300m, sig.level = 0.05,insig = "blank",method="number",number.cex=0.7)
data.300m.cleaned <- data.300m.cleaned[-grep("^Perimeter_km$",colnames(data.300m.cleaned))]
data.300m.cleaned <- data.300m.cleaned[-grep("SDI_LU12",colnames(data.300m.cleaned))]
data.300m.cleaned <- data.300m.cleaned[-grep("^SDI_TYPE12_Anthro$",colnames(data.300m.cleaned))]
data.300m.cleaned <- data.300m.cleaned[-grep("^MEAN_PATCH$",colnames(data.300m.cleaned))]
data.300m.cleaned <- data.300m.cleaned[-grep("^Number_Houses$",colnames(data.300m.cleaned))]
data.300m.cleaned <- data.300m.cleaned[-grep("^SDI_TYPE12_Nat$",colnames(data.300m.cleaned))]
## Run Lasso
lambdas_to_try <- c(0,10^seq(-3, 2, length.out = 1000))
n <- 10000
y <- data.300m.cleaned %>% dplyr::select(Rare_Abundance) %>% as.matrix()
X <- data.300m.cleaned %>% dplyr::select(-Rare_Abundance) %>% as.matrix()
set.seed(123)
cl <- makeCluster(no_cores)
m.300m <- mclapply(rep(5, n), output.function.poisson,
mc.set.seed = T); stopCluster(cl)
# Get coefficients summary
coefficients.300m <- data.frame()
for(i in 1:n){
coefficients.300m <- rbind(coefficients.300m,m.300m[[i]][[1]])
}
coef.300m.summ <- coefficients.300m %>% group_by(Variable) %>%
summarize(Avg = mean(Value), SD = sd(Value), Min = min(Value), Max = max(Value),
Conf.low = sd(Value) * (-1.96/sqrt(n)), Conf.high = sd(Value) * (1.96/sqrt(n)),
Included = sum(Value != 0)) %>% arrange(desc(Included))
# Get adj R sq summary
rsq.300m <- c()
for(i in 1:n){
rsq.300m[i] <- m.300m[[i]][[2]]
}
summary(rsq.300m)
# Get adj R sq summary
adj.rsq.300m <- c()
for(i in 1:n){
adj.rsq.300m[i] <- m.300m[[i]][[3]]
}
summary(adj.rsq.300m)
# Get moran'i summary
i.p.300m <- c()
for(i in 1:n){
i.p.300m[i] <- m.300m[[i]][[4]]
}
summary(i.p.300m)
#### Save output ####
if(getwd() == "/Users/Zoe/Desktop/Bee_Project/Analysis1"){
saveRDS(m.1km, "Bee_Ecology_Models/abundance_1km.rds")
saveRDS(m.500m, "Bee_Ecology_Models/abundance_500m.rds")
saveRDS(m.300m, "Bee_Ecology_Models/abundance_300m.rds")
} else{
saveRDS(m.1km, file = "/scratch/gpfs/zvolenec/abundance_1km.rds")
saveRDS(m.500m, file = "/scratch/gpfs/zvolenec/abundance_500m.rds")
saveRDS(m.300m, file = "/scratch/gpfs/zvolenec/abundance_300m.rds")
}
| /rare_bee_species_abundance_models.R | no_license | zo33/Code_samples | R | false | false | 13,485 | r | ################
# Median Abundance Analysis #
################
# Analyzing the landscape factors that effect rare bee species abundance in
# New Jersey forest fragments. Done at three different landscape scales (1000m,
# 500m, 300m) to see which one creates the best (adjusted R squared) model.
# Done using a LASSO approach where n simulations were conducted in order to account
# for random variation in model results due to random cv selection, and to create
# a distribution of values for coefficients.
## Set up environment: set seed, packages, functions
rm(list=ls())
set.seed(123)
RNGkind("L'Ecuyer-CMRG")
library(readr)
library(plyr)
library(dplyr)
library(AER)
library(MASS)
library(car)
library(mpath)
library(cbar)
library(psych)
library(stargazer)
library(xtable)
library(ape)
library(caret)
library(glmnet)
library(corrplot)
library(tibble)
library(tidyr)
library(scales)
library(cowplot)
library(grid)
library(gridExtra)
library(parallel); no_cores <- detectCores() - 1
cor.mtest <- function(mat, ...) {
mat <- as.matrix(mat)
n <- ncol(mat)
p.mat<- matrix(NA, n, n)
diag(p.mat) <- 0
for (i in 1:(n - 1)) {
for (j in (i + 1):n) {
tmp <- cor.test(mat[, i], mat[, j], ...)
p.mat[i, j] <- p.mat[j, i] <- tmp$p.value
}
}
colnames(p.mat) <- rownames(p.mat) <- colnames(mat)
p.mat
}
output.function.poisson <- function(cv_size){
fit <- cv.glmnet(X,y, alpha = 1, standardize = TRUE, lambda = lambdas_to_try, family = "poisson", nfolds = cv_size)
# coefficients from lambda min
coef.included <- as.data.frame(as.matrix(coef(fit, s = fit$lambda.min)))
coef.included <- coef.included %>% rownames_to_column(var = "Variable")
names(coef.included)[2] <- "Value"
# adjusted R sq
yhat <- predict(fit, X, s = fit$lambda.min, type = "response")
sst <- sum((y - mean(y))^2)
ssr <- sum((y - yhat)^2)
rsq <- 1 - (ssr / sst)
number.coefs <- coef(fit,s = "lambda.min")[1:nrow(coef(fit, s = "lambda.min"))]
number.coefs <- length(number.coefs[number.coefs!=0]) - 1
adj.rsq <- 1-(((1-rsq)*(29-1))/(29-number.coefs-1))
# spatial autocorrelation
res <- y - yhat
fit.i <- Moran.I(as.vector(res), site.dists.inv, alternative = "greater")
#return(list(differences, adj.rsq, AICc))
return(list(coef.included, rsq, adj.rsq, fit.i$p.value))
}
## Read in data
# Directory will depend on personal computer vs cluster computing
if(getwd() == "/Users/Zoe/Desktop/Bee_Project/Analysis1"){
abun <- read_csv("Outputs/abundance_metrics_col.csv")
effort <- read_csv("Outputs/effort_col.csv")
names(effort)[3] <- "Effort"
sites <- read.csv("~/Desktop/Bee_Project/Analysis1/Outputs/col_rae_sites.csv", stringsAsFactors = F)
preds_300m <- read.csv("~/Desktop/Bee_Project/Analysis1/GIS/Analysis1_predictors_300m_updated_units.csv", stringsAsFactors = F)
preds_500m <- read.csv("~/Desktop/Bee_Project/Analysis1/GIS/Analysis1_predictors_500m_updated_units.csv", stringsAsFactors = F)
preds_1km <- read.csv("~/Desktop/Bee_Project/Analysis1/GIS/Analysis1_predictors_1km_updated_units.csv", stringsAsFactors = F)
} else{
setwd("/home/zvolenec/bees")
abun <- read_csv("abundance_metrics_col.csv")
effort <- read_csv("effort_col.csv")
names(effort)[3] <- "Effort"
sites <- read.csv("col_rae_sites.csv", stringsAsFactors = F)
preds_300m <- read.csv("Analysis1_predictors_300m_updated_units.csv", stringsAsFactors = F)
preds_500m <- read.csv("Analysis1_predictors_500m_updated_units.csv", stringsAsFactors = F)
preds_1km <- read.csv("Analysis1_predictors_1km_updated_units.csv", stringsAsFactors = F)
}
# Get together inverse distance matrix for spatial autocorrelation tests
sites <- merge(sites, abun, by = "Unit_Nm")
sites <- sites[!duplicated(sites[1]),]
site.dists <- as.matrix(dist(cbind(sites$Longitude, sites$Latitude),
method = "euclidean"))
site.dists.inv <- 1/site.dists
diag(site.dists.inv) <- 0
###################
##### Models ######
###################
# Look at data distribution
plot(density(abun$Rare_Abundance)); shapiro.test(abun$Rare_Abundance)
#### 1km ####
## Get variables in order
# Create dataframe with predictors / response variable
data.1km <- abun[c(2,3)] %>% left_join(preds_1km, by = "Unit_Nm") %>%
left_join(effort[c(2,3)], by = "Unit_Nm") %>% left_join(sites[c(1,7)], by = "Unit_Nm") %>%
dplyr::select(-Unit_Nm)
# Group land use predictors by their Anderson land use subsection
colnames(data.1km)[6:71] <- substr(colnames(data.1km[6:71]), 1, 3)
for(i in 6:70){
for(j in (i+1):71){
if(colnames(data.1km)[i] == colnames(data.1km)[j]){
data.1km[j]<- data.1km[i] + data.1km[j]
colnames(data.1km)[i] <- "delete"
}
else{
next
}
}
}
data.1km <- data.1km[,-which(names(data.1km) %in% "delete")]
# Look at univariate relationships between response /predictors
for(i in ncol(data.1km):2){
pred <- names(data.1km)[i]
plot(Rare_Abundance ~ get(pred, data.1km), data = data.1km, xlab = pred)
}
## Take out correlated predictors
# Regularized regression can handle correlated predictors, but want to choose the
# predictors that would be selected
data.1km.cleaned <- data.1km
predictors.1km <- data.1km.cleaned[-1]
p.mat.1km <- cor.mtest(predictors.1km)
cor.1km <- cor(predictors.1km)
cor.1km[abs(cor.1km) < 0.85 ] = 0
corrplot(cor.1km, type="lower",
p.mat = p.mat.1km, sig.level = 0.05,insig = "blank",method="number",number.cex=0.7)
data.1km.cleaned <- data.1km.cleaned[-grep("^Perimeter_km$",colnames(data.1km.cleaned))]
data.1km.cleaned <- data.1km.cleaned[-grep("SDI_LU12",colnames(data.1km.cleaned))]
data.1km.cleaned <- data.1km.cleaned[-grep("^SDI_TYPE12_Anthro$",colnames(data.1km.cleaned))]
data.1km.cleaned <- data.1km.cleaned[-grep("^Number_Houses$",colnames(data.1km.cleaned))]
data.1km.cleaned <- data.1km.cleaned[-grep("^SDI_TYPE12_Nat$",colnames(data.1km.cleaned))]
data.1km.cleaned <- data.1km.cleaned[-grep("^LDI$",colnames(data.1km.cleaned))]
## Run Lasso
lambdas_to_try <- c(0,10^seq(-3, 2, length.out = 1000))
n <- 10000
y <- data.1km.cleaned %>% dplyr::select(Rare_Abundance) %>% as.matrix()
X <- data.1km.cleaned %>% dplyr::select(-Rare_Abundance) %>% as.matrix()
set.seed(123)
cl <- makeCluster(no_cores)
m.1km <- mclapply(rep(5, n), output.function.poisson,
mc.set.seed = T); stopCluster(cl)
# Get coefficients summary
# http://www.haowang.pw/blog/Poisson-Coefficient-Interpretation/
# https://bookdown.org/roback/bookdown-bysh/ch-poissonreg.html
coefficients.1km <- data.frame()
for(i in 1:n){
coefficients.1km <- rbind(coefficients.1km,m.1km[[i]][[1]])
}
coef.1km.summ <- coefficients.1km %>% group_by(Variable) %>%
summarize(Avg = mean(Value), SD = sd(Value), Min = min(Value), Max = max(Value),
Conf.low = sd(Value) * (-1.96/sqrt(n)), Conf.high = sd(Value) * (1.96/sqrt(n)),
Included = sum(Value != 0)) %>% arrange(desc(Included))
# Get adj R sq summary
rsq.1km <- c()
for(i in 1:n){
rsq.1km[i] <- m.1km[[i]][[2]]
}
summary(rsq.1km)
# Get adj R sq summary
adj.rsq.1km <- c()
for(i in 1:n){
adj.rsq.1km[i] <- m.1km[[i]][[3]]
}
summary(adj.rsq.1km)
# Get moran'i summary
i.p.1km <- c()
for(i in 1:n){
i.p.1km[i] <- m.1km[[i]][[4]]
}
summary(i.p.1km)
#### 500m ####
## Get variables in order
data.500m <- abun[c(2,3)] %>% left_join(preds_500m, by = "Unit_Nm") %>%
left_join(effort[c(2,3)], by = "Unit_Nm") %>% left_join(sites[c(1,7)], by = "Unit_Nm") %>%
dplyr::select(-Unit_Nm)
colnames(data.500m)[6:66] <- substr(colnames(data.500m[6:66]), 1, 3)
for(i in 6:65){
for(j in (i+1):66){
if(colnames(data.500m)[i] == colnames(data.500m)[j]){
data.500m[j]<- data.500m[i] + data.500m[j]
colnames(data.500m)[i] <- "delete"
}
else{
next
}
}
}
data.500m <- data.500m[,-which(names(data.500m) %in% "delete")]
for(i in ncol(data.500m):2){
pred <- names(data.500m)[i]
plot(Rare_Abundance ~ get(pred, data.500m), data = data.500m, xlab = pred)
}
## Take out correlated predictors
data.500m.cleaned <- data.500m
predictors.500m <- data.500m.cleaned[-1]
p.mat.500m <- cor.mtest(predictors.500m)
cor.500m <- cor(predictors.500m)
cor.500m[abs(cor.500m) < 0.85 ] = 0
corrplot(cor.500m, type="lower",
p.mat = p.mat.500m, sig.level = 0.05,insig = "blank",method="number",number.cex=0.7)
data.500m.cleaned <- data.500m.cleaned[-grep("^Perimeter_km$",colnames(data.500m.cleaned))]
data.500m.cleaned <- data.500m.cleaned[-grep("SDI_LU12",colnames(data.500m.cleaned))]
data.500m.cleaned <- data.500m.cleaned[-grep("^SDI_TYPE12_Anthro$",colnames(data.500m.cleaned))]
data.500m.cleaned <- data.500m.cleaned[-grep("^Number_Houses$",colnames(data.500m.cleaned))]
data.500m.cleaned <- data.500m.cleaned[-grep("^SDI_TYPE12_Nat$",colnames(data.500m.cleaned))]
data.500m.cleaned <- data.500m.cleaned[-grep("^MEAN_PATCH$",colnames(data.500m.cleaned))]
## Run Lasso
lambdas_to_try <- c(0,10^seq(-3, 2, length.out = 1000))
n <- 10000
y <- data.500m.cleaned %>% dplyr::select(Rare_Abundance) %>% as.matrix()
X <- data.500m.cleaned %>% dplyr::select(-Rare_Abundance) %>% as.matrix()
set.seed(123)
cl <- makeCluster(no_cores)
m.500m <- mclapply(rep(5, n), output.function.poisson,
mc.set.seed = T); stopCluster(cl)
# Get coefficients summary
coefficients.500m <- data.frame()
for(i in 1:n){
coefficients.500m <- rbind(coefficients.500m,m.500m[[i]][[1]])
}
coef.500m.summ <- coefficients.500m %>% group_by(Variable) %>%
summarize(Avg = mean(Value), SD = sd(Value), Min = min(Value), Max = max(Value),
Conf.low = sd(Value) * (-1.96/sqrt(n)), Conf.high = sd(Value) * (1.96/sqrt(n)),
Included = sum(Value != 0)) %>% arrange(desc(Included))
# Get adj R sq summary
rsq.500m <- c()
for(i in 1:n){
rsq.500m[i] <- m.500m[[i]][[2]]
}
summary(rsq.500m)
# Get adj R sq summary
adj.rsq.500m <- c()
for(i in 1:n){
adj.rsq.500m[i] <- m.500m[[i]][[3]]
}
summary(adj.rsq.500m)
# Get moran'i summary
i.p.500m <- c()
for(i in 1:n){
i.p.500m[i] <- m.500m[[i]][[4]]
}
summary(i.p.500m)
#### 300m ####
## Get variables in order
data.300m <- abun[c(2,3)] %>% left_join(preds_300m, by = "Unit_Nm") %>%
left_join(effort[c(2,3)], by = "Unit_Nm") %>% left_join(sites[c(1,7)], by = "Unit_Nm") %>%
dplyr::select(-Unit_Nm)
colnames(data.300m)[6:64] <- substr(colnames(data.300m[6:64]), 1, 3)
for(i in 6:63){
for(j in (i+1):64){
if(colnames(data.300m)[i] == colnames(data.300m)[j]){
data.300m[j]<- data.300m[i] + data.300m[j]
colnames(data.300m)[i] <- "delete"
}
else{
next
}
}
}
data.300m <- data.300m[,-which(names(data.300m) %in% "delete")]
for(i in ncol(data.300m):2){
pred <- names(data.300m)[i]
plot(Rare_Abundance ~ get(pred, data.300m), data = data.300m, xlab = pred)
}
## Take out correlated predictors
data.300m.cleaned <- data.300m
predictors.300m <- data.300m.cleaned[-1]
p.mat.300m <- cor.mtest(predictors.300m)
cor.300m <- cor(predictors.300m)
cor.300m[abs(cor.300m) < 0.85 ] = 0
corrplot(cor.300m, type="lower",
p.mat = p.mat.300m, sig.level = 0.05,insig = "blank",method="number",number.cex=0.7)
data.300m.cleaned <- data.300m.cleaned[-grep("^Perimeter_km$",colnames(data.300m.cleaned))]
data.300m.cleaned <- data.300m.cleaned[-grep("SDI_LU12",colnames(data.300m.cleaned))]
data.300m.cleaned <- data.300m.cleaned[-grep("^SDI_TYPE12_Anthro$",colnames(data.300m.cleaned))]
data.300m.cleaned <- data.300m.cleaned[-grep("^MEAN_PATCH$",colnames(data.300m.cleaned))]
data.300m.cleaned <- data.300m.cleaned[-grep("^Number_Houses$",colnames(data.300m.cleaned))]
data.300m.cleaned <- data.300m.cleaned[-grep("^SDI_TYPE12_Nat$",colnames(data.300m.cleaned))]
## Run Lasso
lambdas_to_try <- c(0,10^seq(-3, 2, length.out = 1000))
n <- 10000
y <- data.300m.cleaned %>% dplyr::select(Rare_Abundance) %>% as.matrix()
X <- data.300m.cleaned %>% dplyr::select(-Rare_Abundance) %>% as.matrix()
set.seed(123)
cl <- makeCluster(no_cores)
m.300m <- mclapply(rep(5, n), output.function.poisson,
mc.set.seed = T); stopCluster(cl)
# Get coefficients summary
coefficients.300m <- data.frame()
for(i in 1:n){
coefficients.300m <- rbind(coefficients.300m,m.300m[[i]][[1]])
}
coef.300m.summ <- coefficients.300m %>% group_by(Variable) %>%
summarize(Avg = mean(Value), SD = sd(Value), Min = min(Value), Max = max(Value),
Conf.low = sd(Value) * (-1.96/sqrt(n)), Conf.high = sd(Value) * (1.96/sqrt(n)),
Included = sum(Value != 0)) %>% arrange(desc(Included))
# Get adj R sq summary
rsq.300m <- c()
for(i in 1:n){
rsq.300m[i] <- m.300m[[i]][[2]]
}
summary(rsq.300m)
# Get adj R sq summary
adj.rsq.300m <- c()
for(i in 1:n){
adj.rsq.300m[i] <- m.300m[[i]][[3]]
}
summary(adj.rsq.300m)
# Get moran'i summary
i.p.300m <- c()
for(i in 1:n){
i.p.300m[i] <- m.300m[[i]][[4]]
}
summary(i.p.300m)
#### Save output ####
if(getwd() == "/Users/Zoe/Desktop/Bee_Project/Analysis1"){
saveRDS(m.1km, "Bee_Ecology_Models/abundance_1km.rds")
saveRDS(m.500m, "Bee_Ecology_Models/abundance_500m.rds")
saveRDS(m.300m, "Bee_Ecology_Models/abundance_300m.rds")
} else{
saveRDS(m.1km, file = "/scratch/gpfs/zvolenec/abundance_1km.rds")
saveRDS(m.500m, file = "/scratch/gpfs/zvolenec/abundance_500m.rds")
saveRDS(m.300m, file = "/scratch/gpfs/zvolenec/abundance_300m.rds")
}
|
setwd("~/MyFile") #Dossier emplacement ? d?finir
MSData1=read.csv(file="MSData1.csv",
header=TRUE ,sep=";")
stockname = unique(MSData$secID)
Price_list = list()
Price_list2 = list()
toUSD_list=list()
sector_list=list()
Mdv_list=list()
Mdv2_list=list()
expreturn_series=NULL
return_series=NULL
toUSD=NULL
Mdv=NULL
Mdv2=NULL
sector_matrix=matrix(0,nrow=12,ncol=801)
currency_matrix=matrix(0,nrow=2,ncol=801)
for (i in stockname)
{
index_stock = (MSData1$secID == i)
Price_list[[i]] = MSData1$return[index_stock]
Price_list2[[i]] = MSData1$return4[index_stock]
toUSD_list[[i]]= MSData1$toUSD[index_stock]
Mdv_list[[i]]= MSData1$DailyVolume[index_stock]
Mdv2_list[[i]]= MSData1$matched[index_stock]
if( length(Price_list[[i]]) == 19)
{
if(i<=801 && MSData1$currency[i]=="EUR"){
currency_matrix[1,i]=1}
if(i<=801 && MSData1$currency[i]=="GBP"){
currency_matrix[2,i]=1}
for(j in 1:11){
if(i<=801 && MSData1$sector[i]==5*j){
sector_matrix[j,i]=1}
}
expreturn_series=cbind(expreturn_series,as.matrix(Price_list[[i]]))
return_series=cbind(return_series,as.matrix(Price_list2[[i]]))
toUSD=cbind(toUSD,as.matrix(toUSD_list[[i]]))
Mdv=cbind(Mdv,as.matrix(Mdv_list[[i]]))
Mdv2=cbind(Mdv2,as.matrix(Mdv2_list[[i]]))
}
}
max(Mdv)
min(Mdv)
mean(Mdv)
data=dataVol
Matched_Volume=as.vector(cbind(as.matrix(Mdv2,ncol=1)))
Median_daily_Volume=as.vector(cbind(as.matrix(Mdv,ncol=1)))
dreg=data.frame(Matched_Volume=Matched_Volume,
MDV=Median_daily_Volume)
reg <- ggplot(data = dreg, aes(x = Matched_Volume, y = MDV)) +
geom_smooth(method = "lm", se=FALSE, color="black", formula = y ~ x) +
geom_point()
reg
lm_eqn = function(dreg){
m = lm(MDV ~ Matched_Volume, dreg);
eq <- substitute(italic(y) == a + b %.% italic(x)*","~~italic(r)^2~"="~r2,
list(a = format(coef(m)[1], digits = 2),
b = format(coef(m)[2], digits = 2),
r2 = format(summary(m)$r.squared, digits = 3)))
as.character(as.expression(eq));
}
reg1 = reg + geom_text(aes(x = 10^2, y = 4*10^10, label = lm_eqn(dreg)), parse = TRUE)
reg=lm(formula=Matched_Volume~Median_daily_Volume)
summary(reg)
0.2412*sqrt(var(Median_daily_Volume)/var(Matched_Volume))
plot(datadVol,datamVol)
abline(reg=reg)
| /regMVMDV.R | no_license | githubfun/pms | R | false | false | 2,428 | r | setwd("~/MyFile") #Dossier emplacement ? d?finir
MSData1=read.csv(file="MSData1.csv",
header=TRUE ,sep=";")
stockname = unique(MSData$secID)
Price_list = list()
Price_list2 = list()
toUSD_list=list()
sector_list=list()
Mdv_list=list()
Mdv2_list=list()
expreturn_series=NULL
return_series=NULL
toUSD=NULL
Mdv=NULL
Mdv2=NULL
sector_matrix=matrix(0,nrow=12,ncol=801)
currency_matrix=matrix(0,nrow=2,ncol=801)
for (i in stockname)
{
index_stock = (MSData1$secID == i)
Price_list[[i]] = MSData1$return[index_stock]
Price_list2[[i]] = MSData1$return4[index_stock]
toUSD_list[[i]]= MSData1$toUSD[index_stock]
Mdv_list[[i]]= MSData1$DailyVolume[index_stock]
Mdv2_list[[i]]= MSData1$matched[index_stock]
if( length(Price_list[[i]]) == 19)
{
if(i<=801 && MSData1$currency[i]=="EUR"){
currency_matrix[1,i]=1}
if(i<=801 && MSData1$currency[i]=="GBP"){
currency_matrix[2,i]=1}
for(j in 1:11){
if(i<=801 && MSData1$sector[i]==5*j){
sector_matrix[j,i]=1}
}
expreturn_series=cbind(expreturn_series,as.matrix(Price_list[[i]]))
return_series=cbind(return_series,as.matrix(Price_list2[[i]]))
toUSD=cbind(toUSD,as.matrix(toUSD_list[[i]]))
Mdv=cbind(Mdv,as.matrix(Mdv_list[[i]]))
Mdv2=cbind(Mdv2,as.matrix(Mdv2_list[[i]]))
}
}
max(Mdv)
min(Mdv)
mean(Mdv)
data=dataVol
Matched_Volume=as.vector(cbind(as.matrix(Mdv2,ncol=1)))
Median_daily_Volume=as.vector(cbind(as.matrix(Mdv,ncol=1)))
dreg=data.frame(Matched_Volume=Matched_Volume,
MDV=Median_daily_Volume)
reg <- ggplot(data = dreg, aes(x = Matched_Volume, y = MDV)) +
geom_smooth(method = "lm", se=FALSE, color="black", formula = y ~ x) +
geom_point()
reg
lm_eqn = function(dreg){
m = lm(MDV ~ Matched_Volume, dreg);
eq <- substitute(italic(y) == a + b %.% italic(x)*","~~italic(r)^2~"="~r2,
list(a = format(coef(m)[1], digits = 2),
b = format(coef(m)[2], digits = 2),
r2 = format(summary(m)$r.squared, digits = 3)))
as.character(as.expression(eq));
}
reg1 = reg + geom_text(aes(x = 10^2, y = 4*10^10, label = lm_eqn(dreg)), parse = TRUE)
reg=lm(formula=Matched_Volume~Median_daily_Volume)
summary(reg)
0.2412*sqrt(var(Median_daily_Volume)/var(Matched_Volume))
plot(datadVol,datamVol)
abline(reg=reg)
|
# To install a package, do install.packages("nameofpackage") where the name of the package is in quotes.
# It should then connect and allow you to choose the mirror you prefer to download packages from.
# Repeat for installing the four packages below and you should be good to go.
# We could turn this into a shiny app or just go deeper in a directed analysis. - Hans Thompson
library(jsonlite)
library(leaflet)
library(stringr)
library(lubridate)
url <- "http://bus.codeforanchorage.org/db.json"
#get the json and convert to a data frame
json_data <- fromJSON(url)$requests
#format the date object
json_data$date <- ymd_hms(json_data$date, tz = "America/Anchorage")
#show requests over time
ggplot(data = json_data, aes(x = date)) + geom_bar() + ggtitle("People Mover Real Time Requests") +
ylab("Number of Requests")
#show requests by hour
ggplot(data = json_data, aes(x = hour(date))) + geom_bar() + ggtitle("People Mover Real Time Requests") +
ylab("Number of Requests") + xlab("Hour of the Day")
lat_lon <- str_split_fixed(json_data[nchar(json_data$input) > 20,]$input, ", ", 2)
lat_lon <- data.frame(lat = as.numeric(lat_lon[,1]), lon = as.numeric(lat_lon[,2]))
#Map out the requests that are coded with latitude and longitude.
leaflet() %>% addTiles() %>%
setView(-149.885, 61.181, zoom = 11) %>% addCircles(lat_lon$lon, lat_lon$lat, radius = 5, color = "#ff0000")
| /analysis.r | no_license | codeforanchorage/bus-request-loggeR | R | false | false | 1,384 | r | # To install a package, do install.packages("nameofpackage") where the name of the package is in quotes.
# It should then connect and allow you to choose the mirror you prefer to download packages from.
# Repeat for installing the four packages below and you should be good to go.
# We could turn this into a shiny app or just go deeper in a directed analysis. - Hans Thompson
library(jsonlite)
library(leaflet)
library(stringr)
library(lubridate)
url <- "http://bus.codeforanchorage.org/db.json"
#get the json and convert to a data frame
json_data <- fromJSON(url)$requests
#format the date object
json_data$date <- ymd_hms(json_data$date, tz = "America/Anchorage")
#show requests over time
ggplot(data = json_data, aes(x = date)) + geom_bar() + ggtitle("People Mover Real Time Requests") +
ylab("Number of Requests")
#show requests by hour
ggplot(data = json_data, aes(x = hour(date))) + geom_bar() + ggtitle("People Mover Real Time Requests") +
ylab("Number of Requests") + xlab("Hour of the Day")
lat_lon <- str_split_fixed(json_data[nchar(json_data$input) > 20,]$input, ", ", 2)
lat_lon <- data.frame(lat = as.numeric(lat_lon[,1]), lon = as.numeric(lat_lon[,2]))
#Map out the requests that are coded with latitude and longitude.
leaflet() %>% addTiles() %>%
setView(-149.885, 61.181, zoom = 11) %>% addCircles(lat_lon$lon, lat_lon$lat, radius = 5, color = "#ff0000")
|
# This script takes data input from two different users in two sheets of an excel
# workbook and prints a list of rows that are not identical, for review by the user.
# Output is a data frame of row and column of a mismatch.
# Notes: - row order matters, i.e. data sheets must be entered in same order
# - worksheets to be compared must be the first two worksheets in the workbook
# - name of worksheets does not matter
# - prints "worksheets identical" if two worksheets are identical
library(openxlsx)
# ===============================================================================
# Functions
compare_worksheets = function(excel_file) {
# this function compares the two excel worksheet to identify inconsistencies
# load data from excel workbook
ws1 = read.xlsx(excel_file, sheet = 1, colNames = TRUE, na.strings = '')
ws2 = read.xlsx(excel_file, sheet = 2, colNames = TRUE, na.strings = '')
# if the two worksheets are identical, exit function
if (identical(ws1,ws2)) {
print('Worksheets identical')
}
# otherwise, loop through rows one at a time
else {
unmatched = data.frame(row = c(),column = c()) # empty data frame for storing output
num_rows = length(ws1$month)
curr_row = 1
while (curr_row<=num_rows) {
v1 = as.character(as.vector(ws1[curr_row,])) # extract row from worksheet 1
v2 = as.character(as.vector(ws2[curr_row,])) # extract row from worksheet 2
# if the two versions of the row are not identical
if (!identical(v1,v2)) {
# loop through each element in the row
col_error = vector()
for (n in seq(length(v1))) {
if (!identical(v1[n],v2[n])) {
# add the column name to output vector
col_error = append(col_error,colnames(ws1)[n])
}
}
# append row and column info to output data frame (curr_row+1 to skip header in excel file)
unmatched = rbind(unmatched,data.frame(row = curr_row+1,column = col_error))
}
curr_row = curr_row+1 # increment index and continue loop
}
return(unmatched)
}
}
| /DataCleaningScripts/compare_raw_data.r | permissive | eastonwhite/PortalData | R | false | false | 2,279 | r | # This script takes data input from two different users in two sheets of an excel
# workbook and prints a list of rows that are not identical, for review by the user.
# Output is a data frame of row and column of a mismatch.
# Notes: - row order matters, i.e. data sheets must be entered in same order
# - worksheets to be compared must be the first two worksheets in the workbook
# - name of worksheets does not matter
# - prints "worksheets identical" if two worksheets are identical
library(openxlsx)
# ===============================================================================
# Functions
compare_worksheets = function(excel_file) {
# this function compares the two excel worksheet to identify inconsistencies
# load data from excel workbook
ws1 = read.xlsx(excel_file, sheet = 1, colNames = TRUE, na.strings = '')
ws2 = read.xlsx(excel_file, sheet = 2, colNames = TRUE, na.strings = '')
# if the two worksheets are identical, exit function
if (identical(ws1,ws2)) {
print('Worksheets identical')
}
# otherwise, loop through rows one at a time
else {
unmatched = data.frame(row = c(),column = c()) # empty data frame for storing output
num_rows = length(ws1$month)
curr_row = 1
while (curr_row<=num_rows) {
v1 = as.character(as.vector(ws1[curr_row,])) # extract row from worksheet 1
v2 = as.character(as.vector(ws2[curr_row,])) # extract row from worksheet 2
# if the two versions of the row are not identical
if (!identical(v1,v2)) {
# loop through each element in the row
col_error = vector()
for (n in seq(length(v1))) {
if (!identical(v1[n],v2[n])) {
# add the column name to output vector
col_error = append(col_error,colnames(ws1)[n])
}
}
# append row and column info to output data frame (curr_row+1 to skip header in excel file)
unmatched = rbind(unmatched,data.frame(row = curr_row+1,column = col_error))
}
curr_row = curr_row+1 # increment index and continue loop
}
return(unmatched)
}
}
|
#' @title Resource Frequency
#'
#' @description Analyses the frequency of resources at different levels of analysis.
#'
#' @details
#' Argument \code{level} has the following options:
#' \itemize{
#' \item At \code{"log"} level, summary statistics show the number of times a resource executes an activity in the complete log.
#' \item On \code{"case"} level, summary statistics of the frequency of resources can be used to get a better view on the
#' variance between the different cases, to get an insight into the number of different resources working on each case together
#' with the number of activities a resource executes per case.
#' \item On \code{"activity"} level, the resource frequency states how many different resources are executing a specific
#' activity in the complete log.
#' \item On \code{"resource"} level, this metric simply shows the absolute and relative frequency of occurrences of each
#' resource in the complete log.
#' \item On \code{"resource-activity"} level, the absolute and relative number of times each resource-activity combination
#' occurs in the complete log can be calculated. Two different relative numbers are provided here, one from the resource
#' perspective and one from the activity perspective. At the resource perspective, the denominator is the total number of
#' executions by the resource under consideration. At the activity perspective, the denominator is the total number of
#' occurrences of the activity under consideration.
#' }
#'
#' @inherit end_activities params
#' @inherit activity_frequency params references seealso return
#'
#' @seealso \code{\link{resource_involvement}}
#'
#' @family metrics
#'
#' @concept metrics_organizational
#'
#' @export resource_frequency
resource_frequency <- function(log,
level = c("log", "case", "activity", "resource", "resource-activity"),
append = deprecated(),
append_column = NULL,
sort = TRUE,
eventlog = deprecated()) {
UseMethod("resource_frequency")
}
#' @describeIn resource_frequency Computes the resource frequency for an \code{\link[bupaR]{eventlog}}.
#' @export
resource_frequency.eventlog <- function(log,
level = c("log", "case", "activity", "resource", "resource-activity"),
append = deprecated(),
append_column = NULL,
sort = TRUE,
eventlog = deprecated()) {
if(lifecycle::is_present(eventlog)) {
lifecycle::deprecate_warn(
when = "0.9.0",
what = "resource_frequency(eventlog)",
with = "resource_frequency(log)")
log <- eventlog
}
append <- lifecycle_warning_append(append)
level <- rlang::arg_match(level)
absolute <- NULL
if(is.null(append_column)) {
append_column <- case_when(level == "case" ~ "median",
level == "resource" ~ "absolute",
level == "resource-activity"~"absolute",
level == "activity"~"median",
TRUE ~ "NA")
}
FUN <- switch(level,
log = resource_frequency_log,
case = resource_frequency_case,
activity = resource_frequency_activity,
resource = resource_frequency_resource,
"resource-activity" = resource_frequency_resource_activity)
output <- FUN(log = log)
if(level %in% c("resource", "resource-activity") && sort) {
output %>%
arrange(-absolute) -> output
}
return_metric(log, output, level, append, append_column, "resource_frequency", ifelse(level == "resource", 2,
ifelse(level == "resource-activity", 3,9)))
}
#' @describeIn resource_frequency Computes the resource frequency for a \code{\link[bupaR]{grouped_eventlog}}.
#' @export
resource_frequency.grouped_eventlog <- function(log,
level = c("log", "case", "activity", "resource", "resource-activity"),
append = deprecated(),
append_column = NULL,
sort = TRUE,
eventlog = deprecated()) {
log <- lifecycle_warning_eventlog(log, eventlog)
append <- lifecycle_warning_append(append)
level <- rlang::arg_match(level)
absolute <- NULL
if(is.null(append_column)) {
append_column <- case_when(level == "case" ~ "median",
level == "resource" ~ "absolute",
level == "resource-activity"~"absolute",
level == "activity"~"median",
T ~ "NA")
}
FUN <- switch(level,
log = resource_frequency_log,
case = resource_frequency_case,
activity = resource_frequency_activity,
resource = resource_frequency_resource,
"resource-activity" = resource_frequency_resource_activity)
output <- bupaR:::apply_grouped_fun(log, FUN, .ignore_groups = FALSE, .keep_groups = FALSE, .returns_log = FALSE)
#if(!(level %in% c("log"))) {
# grouped_metric(eventlog, FUN) -> output
#}
#else {
# grouped_metric_raw_log(eventlog, FUN) -> output
#}
if(level %in% c("resource", "resource-activity") && sort) {
output %>%
arrange(-absolute) -> output
}
return_metric(log, output, level, append, append_column, "resource_frequency", ifelse(level == "resource", 2,
ifelse(level == "resource-activity", 3,9)))
}
#' @describeIn resource_frequency Computes the resource frequency for an \code{\link[bupaR]{activitylog}}.
#' @export
resource_frequency.activitylog <- function(log,
level = c("log", "case", "activity", "resource", "resource-activity"),
append = deprecated(),
append_column = NULL,
sort = TRUE,
eventlog = deprecated()) {
log <- lifecycle_warning_eventlog(log, eventlog)
append <- lifecycle_warning_append(append)
level <- rlang::arg_match(level)
resource_frequency.eventlog(bupaR::to_eventlog(log),
level = level,
append = append,
append_column = append_column,
sort = sort)
}
#' @describeIn resource_frequency Computes the resource frequency for a \code{\link[bupaR]{grouped_activitylog}}.
#' @export
resource_frequency.grouped_activitylog <- function(log,
level = c("log", "case", "activity", "resource", "resource-activity"),
append = deprecated(),
append_column = NULL,
sort = TRUE,
eventlog = deprecated()) {
log <- lifecycle_warning_eventlog(log, eventlog)
append <- lifecycle_warning_append(append)
level <- rlang::arg_match(level)
resource_frequency.grouped_eventlog(bupaR::to_eventlog(log),
level = level,
append = append,
append_column = append_column,
sort = sort)
}
| /R/resource_frequency.R | no_license | cran/edeaR | R | false | false | 6,666 | r | #' @title Resource Frequency
#'
#' @description Analyses the frequency of resources at different levels of analysis.
#'
#' @details
#' Argument \code{level} has the following options:
#' \itemize{
#' \item At \code{"log"} level, summary statistics show the number of times a resource executes an activity in the complete log.
#' \item On \code{"case"} level, summary statistics of the frequency of resources can be used to get a better view on the
#' variance between the different cases, to get an insight into the number of different resources working on each case together
#' with the number of activities a resource executes per case.
#' \item On \code{"activity"} level, the resource frequency states how many different resources are executing a specific
#' activity in the complete log.
#' \item On \code{"resource"} level, this metric simply shows the absolute and relative frequency of occurrences of each
#' resource in the complete log.
#' \item On \code{"resource-activity"} level, the absolute and relative number of times each resource-activity combination
#' occurs in the complete log can be calculated. Two different relative numbers are provided here, one from the resource
#' perspective and one from the activity perspective. At the resource perspective, the denominator is the total number of
#' executions by the resource under consideration. At the activity perspective, the denominator is the total number of
#' occurrences of the activity under consideration.
#' }
#'
#' @inherit end_activities params
#' @inherit activity_frequency params references seealso return
#'
#' @seealso \code{\link{resource_involvement}}
#'
#' @family metrics
#'
#' @concept metrics_organizational
#'
#' @export resource_frequency
resource_frequency <- function(log,
level = c("log", "case", "activity", "resource", "resource-activity"),
append = deprecated(),
append_column = NULL,
sort = TRUE,
eventlog = deprecated()) {
UseMethod("resource_frequency")
}
#' @describeIn resource_frequency Computes the resource frequency for an \code{\link[bupaR]{eventlog}}.
#' @export
resource_frequency.eventlog <- function(log,
level = c("log", "case", "activity", "resource", "resource-activity"),
append = deprecated(),
append_column = NULL,
sort = TRUE,
eventlog = deprecated()) {
if(lifecycle::is_present(eventlog)) {
lifecycle::deprecate_warn(
when = "0.9.0",
what = "resource_frequency(eventlog)",
with = "resource_frequency(log)")
log <- eventlog
}
append <- lifecycle_warning_append(append)
level <- rlang::arg_match(level)
absolute <- NULL
if(is.null(append_column)) {
append_column <- case_when(level == "case" ~ "median",
level == "resource" ~ "absolute",
level == "resource-activity"~"absolute",
level == "activity"~"median",
TRUE ~ "NA")
}
FUN <- switch(level,
log = resource_frequency_log,
case = resource_frequency_case,
activity = resource_frequency_activity,
resource = resource_frequency_resource,
"resource-activity" = resource_frequency_resource_activity)
output <- FUN(log = log)
if(level %in% c("resource", "resource-activity") && sort) {
output %>%
arrange(-absolute) -> output
}
return_metric(log, output, level, append, append_column, "resource_frequency", ifelse(level == "resource", 2,
ifelse(level == "resource-activity", 3,9)))
}
#' @describeIn resource_frequency Computes the resource frequency for a \code{\link[bupaR]{grouped_eventlog}}.
#' @export
resource_frequency.grouped_eventlog <- function(log,
level = c("log", "case", "activity", "resource", "resource-activity"),
append = deprecated(),
append_column = NULL,
sort = TRUE,
eventlog = deprecated()) {
log <- lifecycle_warning_eventlog(log, eventlog)
append <- lifecycle_warning_append(append)
level <- rlang::arg_match(level)
absolute <- NULL
if(is.null(append_column)) {
append_column <- case_when(level == "case" ~ "median",
level == "resource" ~ "absolute",
level == "resource-activity"~"absolute",
level == "activity"~"median",
T ~ "NA")
}
FUN <- switch(level,
log = resource_frequency_log,
case = resource_frequency_case,
activity = resource_frequency_activity,
resource = resource_frequency_resource,
"resource-activity" = resource_frequency_resource_activity)
output <- bupaR:::apply_grouped_fun(log, FUN, .ignore_groups = FALSE, .keep_groups = FALSE, .returns_log = FALSE)
#if(!(level %in% c("log"))) {
# grouped_metric(eventlog, FUN) -> output
#}
#else {
# grouped_metric_raw_log(eventlog, FUN) -> output
#}
if(level %in% c("resource", "resource-activity") && sort) {
output %>%
arrange(-absolute) -> output
}
return_metric(log, output, level, append, append_column, "resource_frequency", ifelse(level == "resource", 2,
ifelse(level == "resource-activity", 3,9)))
}
#' @describeIn resource_frequency Computes the resource frequency for an \code{\link[bupaR]{activitylog}}.
#' @export
resource_frequency.activitylog <- function(log,
level = c("log", "case", "activity", "resource", "resource-activity"),
append = deprecated(),
append_column = NULL,
sort = TRUE,
eventlog = deprecated()) {
log <- lifecycle_warning_eventlog(log, eventlog)
append <- lifecycle_warning_append(append)
level <- rlang::arg_match(level)
resource_frequency.eventlog(bupaR::to_eventlog(log),
level = level,
append = append,
append_column = append_column,
sort = sort)
}
#' @describeIn resource_frequency Computes the resource frequency for a \code{\link[bupaR]{grouped_activitylog}}.
#' @export
resource_frequency.grouped_activitylog <- function(log,
level = c("log", "case", "activity", "resource", "resource-activity"),
append = deprecated(),
append_column = NULL,
sort = TRUE,
eventlog = deprecated()) {
log <- lifecycle_warning_eventlog(log, eventlog)
append <- lifecycle_warning_append(append)
level <- rlang::arg_match(level)
resource_frequency.grouped_eventlog(bupaR::to_eventlog(log),
level = level,
append = append,
append_column = append_column,
sort = sort)
}
|
library(survival)
library(survminer)
library(timeROC)
inputFile="input1.txt"
outFile="ROC.pdf"
setwd("C:\\Users\\13321\\OneDrive\\桌面\\cox3\\18.multiROC")
rt=read.table(inputFile, header=T, sep="\t", check.names=F, row.names=1)
rt[,"futime"]=rt[,"futime"]/365
#颜色
bioCol=rainbow(ncol(rt)-2)
#绘制
aucText=c()
pdf(file=outFile,width=6,height=6)
i=3
ROC_rt=timeROC(T=rt$futime,delta=rt$fustat,marker=rt[,i],cause=1,weighting='aalen',times=c(1),ROC=TRUE)
plot(ROC_rt,time=1,col=bioCol[i-2],title=FALSE,lwd=2)
aucText=c(paste0(colnames(rt)[i],", AUC=",sprintf("%.3f",ROC_rt$AUC[2])))
abline(0,1)
for(i in 4:ncol(rt)){
ROC_rt=timeROC(T=rt$futime,delta=rt$fustat,marker=rt[,i],cause=1,weighting='aalen',times=c(1),ROC=TRUE)
plot(ROC_rt,time=1,col=bioCol[i-2],title=FALSE,lwd=2,add=TRUE)
aucText=c(aucText,paste0(colnames(rt)[i],", AUC=",sprintf("%.3f",ROC_rt$AUC[2])))
}
legend("bottomright", aucText,lwd=2,bty="n",col=bioCol[1:(ncol(rt)-2)])
dev.off()
######Video source: https://ke.biowolf.cn
######??????ѧ??: https://www.biowolf.cn/
######?Ź??ںţ?biowolf_cn
######???????䣺biowolf@foxmail.com
######??????: 18520221056
| /prediction mdel/13/bioR44.multiVarTimeROC.R | no_license | suer12/FIO-FAM-lncRNA | R | false | false | 1,204 | r |
library(survival)
library(survminer)
library(timeROC)
inputFile="input1.txt"
outFile="ROC.pdf"
setwd("C:\\Users\\13321\\OneDrive\\桌面\\cox3\\18.multiROC")
rt=read.table(inputFile, header=T, sep="\t", check.names=F, row.names=1)
rt[,"futime"]=rt[,"futime"]/365
#颜色
bioCol=rainbow(ncol(rt)-2)
#绘制
aucText=c()
pdf(file=outFile,width=6,height=6)
i=3
ROC_rt=timeROC(T=rt$futime,delta=rt$fustat,marker=rt[,i],cause=1,weighting='aalen',times=c(1),ROC=TRUE)
plot(ROC_rt,time=1,col=bioCol[i-2],title=FALSE,lwd=2)
aucText=c(paste0(colnames(rt)[i],", AUC=",sprintf("%.3f",ROC_rt$AUC[2])))
abline(0,1)
for(i in 4:ncol(rt)){
ROC_rt=timeROC(T=rt$futime,delta=rt$fustat,marker=rt[,i],cause=1,weighting='aalen',times=c(1),ROC=TRUE)
plot(ROC_rt,time=1,col=bioCol[i-2],title=FALSE,lwd=2,add=TRUE)
aucText=c(aucText,paste0(colnames(rt)[i],", AUC=",sprintf("%.3f",ROC_rt$AUC[2])))
}
legend("bottomright", aucText,lwd=2,bty="n",col=bioCol[1:(ncol(rt)-2)])
dev.off()
######Video source: https://ke.biowolf.cn
######??????ѧ??: https://www.biowolf.cn/
######?Ź??ںţ?biowolf_cn
######???????䣺biowolf@foxmail.com
######??????: 18520221056
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cog.R
\name{as_cognostics}
\alias{as_cognostics}
\title{Cast a data frame as a cognostics data frame}
\usage{
as_cognostics(x, cond_cols, key_col = NULL, cog_desc = NULL)
}
\arguments{
\item{x}{a data frame}
\item{cond_cols}{the column name(s) that comprise the conditioning variables}
\item{key_col}{the column name that indicates the panel key}
\item{cog_desc}{an optional named list of descriptions for the cognostics columns}
}
\description{
Cast a data frame as a cognostics data frame
}
| /man/as_cognostics.Rd | no_license | timelyportfolio/trelliscopejs | R | false | true | 575 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cog.R
\name{as_cognostics}
\alias{as_cognostics}
\title{Cast a data frame as a cognostics data frame}
\usage{
as_cognostics(x, cond_cols, key_col = NULL, cog_desc = NULL)
}
\arguments{
\item{x}{a data frame}
\item{cond_cols}{the column name(s) that comprise the conditioning variables}
\item{key_col}{the column name that indicates the panel key}
\item{cog_desc}{an optional named list of descriptions for the cognostics columns}
}
\description{
Cast a data frame as a cognostics data frame
}
|
#' Get data of German polls from wahlrecht.de.
#'
#' This is the main function to parse XML files from Wahlrecht.de to extract polling data.
#' It catches all XML elements that can be found.
#' Not all regions have XML available, their data can't be accessed with this function.
#' @param region string, all available regions are documented in the readme
#' @return dataframe
#' @examples
#' germanpolls(region = "de")
#' germanpolls(region = "by")
#' @importFrom RCurl url.exists
#' @export
germanpolls <- function(region = "de") {
if(region == "de") {
df <- get_data_from_xml_de()
} else if (region == "eu") {
print("No XML yet, please go to http://www.wahlrecht.de/umfragen/europawahl.htm")
} else {
url <- paste0("http://www.wahlrecht.de/umfragen/xml/land_", region, ".xml")
if(RCurl::url.exists(url) == TRUE) {
df <- get_data_from_xml_laender(url)
} else(print("No XML, sorry"))
}
}
| /R/germanpolls.R | no_license | cutterkom/germanpolls | R | false | false | 929 | r | #' Get data of German polls from wahlrecht.de.
#'
#' This is the main function to parse XML files from Wahlrecht.de to extract polling data.
#' It catches all XML elements that can be found.
#' Not all regions have XML available, their data can't be accessed with this function.
#' @param region string, all available regions are documented in the readme
#' @return dataframe
#' @examples
#' germanpolls(region = "de")
#' germanpolls(region = "by")
#' @importFrom RCurl url.exists
#' @export
germanpolls <- function(region = "de") {
if(region == "de") {
df <- get_data_from_xml_de()
} else if (region == "eu") {
print("No XML yet, please go to http://www.wahlrecht.de/umfragen/europawahl.htm")
} else {
url <- paste0("http://www.wahlrecht.de/umfragen/xml/land_", region, ".xml")
if(RCurl::url.exists(url) == TRUE) {
df <- get_data_from_xml_laender(url)
} else(print("No XML, sorry"))
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/chart_amOHLC.R
\name{amOHLC}
\alias{amOHLC}
\title{Plotting OHLC chart}
\usage{
amOHLC(
data,
xlab = "",
ylab = "",
horiz = FALSE,
zoom = TRUE,
positiveColor = "#7f8da9",
negativeColor = "#db4c3c",
names = c("low", "open", "close", "high"),
dataDateFormat = NULL,
minPeriod = ifelse(!is.null(dataDateFormat), "DD", ""),
...
)
}
\arguments{
\item{data}{\code{data.frame}, dataframe with at least 5 columns :
category, open (numeric), close (numeric), low (numeric),
high (numeric).}
\item{xlab}{\code{character}, label for x-axis.}
\item{ylab}{\code{character}, label for y-axis.}
\item{horiz}{\code{logical}, TRUE for an horizontal chart, FALSE for a vertical one}
\item{zoom}{\code{logical}, default set to TRUE : a cursor is added to the chart.}
\item{positiveColor}{\code{character}, color for positive values (in hexadecimal).}
\item{negativeColor}{\code{character}, color for negative values (in hexadecimal).}
\item{names}{\code{character}, names for the tooltip. Default to c("low", "open", "close", "high").}
\item{dataDateFormat}{\code{character}, default set to NULL. Even if your chart parses dates,
you can pass them as strings in your dataframe -
all you need to do is to set data date format and the chart will parse dates to date objects.
Check this page for available formats.
Please note that two-digit years (YY) as well as literal month names (MMM) are NOT supported in this setting.}
\item{minPeriod}{\code{character}, minPeriod Specifies the shortest period of your data.
This should be set only if dataDateFormat is not 'NULL'.
Possible period values:
fff - milliseconds, ss - seconds, mm - minutes, hh - hours, DD - days, MM - months, YYYY - years.
It's also possible to supply a number for increments, i.e. '15mm'
which will instruct the chart that your data is supplied in 15 minute increments.}
\item{...}{see \code{\link{amOptions}} for more options.}
}
\description{
amOHLC computes an OHLC chart of the given value.
}
\examples{
data("data_candleStick2")
amOHLC(data = data_candleStick2)
\dontrun{
# Other examples available which can be time consuming depending on your configuration.
if (requireNamespace("pipeR", quietly = TRUE)) {
require(pipeR)
# Change colors
amOHLC(data = data_candleStick2, positiveColor = "green", negativeColor = "red")
# Naming the axes
amOHLC(data = data_candleStick2, xlab = "categories", ylab = "values") \%>>\% setChartCursor()
# Rotate the labels for x axis
amOHLC(data = data_candleStick2, labelRotation = 90)
# Change names
amOHLC(data = data_candleStick2, names = c("min", "begin", "end", "max")) \%>>\% setChartCursor()
# Use amOptions
amOHLC(data = data_candleStick2, zoom = FALSE)
}
}
}
\references{
See online documentation \url{https://datastorm-open.github.io/introduction_ramcharts/}
and \link{amChartsAPI}
}
\seealso{
\link{amOptions}, \link{amBarplot}, \link{amBoxplot}, \link{amHist}, \link{amPie},
\link{amPlot}, \link{amTimeSeries}, \link{amStockMultiSet}, \link{amBullet}, \link{amRadar},
\link{amWind}, \link{amFunnel}, \link{amAngularGauge}, \link{amSolidGauge}, \link{amMekko},
\link{amCandlestick}, \link{amFloatingBar}, \link{amOHLC}, \link{amWaterfall}
}
| /man/amOHLC.Rd | no_license | datastorm-open/rAmCharts | R | false | true | 3,260 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/chart_amOHLC.R
\name{amOHLC}
\alias{amOHLC}
\title{Plotting OHLC chart}
\usage{
amOHLC(
data,
xlab = "",
ylab = "",
horiz = FALSE,
zoom = TRUE,
positiveColor = "#7f8da9",
negativeColor = "#db4c3c",
names = c("low", "open", "close", "high"),
dataDateFormat = NULL,
minPeriod = ifelse(!is.null(dataDateFormat), "DD", ""),
...
)
}
\arguments{
\item{data}{\code{data.frame}, dataframe with at least 5 columns :
category, open (numeric), close (numeric), low (numeric),
high (numeric).}
\item{xlab}{\code{character}, label for x-axis.}
\item{ylab}{\code{character}, label for y-axis.}
\item{horiz}{\code{logical}, TRUE for an horizontal chart, FALSE for a vertical one}
\item{zoom}{\code{logical}, default set to TRUE : a cursor is added to the chart.}
\item{positiveColor}{\code{character}, color for positive values (in hexadecimal).}
\item{negativeColor}{\code{character}, color for negative values (in hexadecimal).}
\item{names}{\code{character}, names for the tooltip. Default to c("low", "open", "close", "high").}
\item{dataDateFormat}{\code{character}, default set to NULL. Even if your chart parses dates,
you can pass them as strings in your dataframe -
all you need to do is to set data date format and the chart will parse dates to date objects.
Check this page for available formats.
Please note that two-digit years (YY) as well as literal month names (MMM) are NOT supported in this setting.}
\item{minPeriod}{\code{character}, minPeriod Specifies the shortest period of your data.
This should be set only if dataDateFormat is not 'NULL'.
Possible period values:
fff - milliseconds, ss - seconds, mm - minutes, hh - hours, DD - days, MM - months, YYYY - years.
It's also possible to supply a number for increments, i.e. '15mm'
which will instruct the chart that your data is supplied in 15 minute increments.}
\item{...}{see \code{\link{amOptions}} for more options.}
}
\description{
amOHLC computes an OHLC chart of the given value.
}
\examples{
data("data_candleStick2")
amOHLC(data = data_candleStick2)
\dontrun{
# Other examples available which can be time consuming depending on your configuration.
if (requireNamespace("pipeR", quietly = TRUE)) {
require(pipeR)
# Change colors
amOHLC(data = data_candleStick2, positiveColor = "green", negativeColor = "red")
# Naming the axes
amOHLC(data = data_candleStick2, xlab = "categories", ylab = "values") \%>>\% setChartCursor()
# Rotate the labels for x axis
amOHLC(data = data_candleStick2, labelRotation = 90)
# Change names
amOHLC(data = data_candleStick2, names = c("min", "begin", "end", "max")) \%>>\% setChartCursor()
# Use amOptions
amOHLC(data = data_candleStick2, zoom = FALSE)
}
}
}
\references{
See online documentation \url{https://datastorm-open.github.io/introduction_ramcharts/}
and \link{amChartsAPI}
}
\seealso{
\link{amOptions}, \link{amBarplot}, \link{amBoxplot}, \link{amHist}, \link{amPie},
\link{amPlot}, \link{amTimeSeries}, \link{amStockMultiSet}, \link{amBullet}, \link{amRadar},
\link{amWind}, \link{amFunnel}, \link{amAngularGauge}, \link{amSolidGauge}, \link{amMekko},
\link{amCandlestick}, \link{amFloatingBar}, \link{amOHLC}, \link{amWaterfall}
}
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{mongo.create}
\alias{mongo.create}
\title{Create an object of class "mongo"}
\usage{
mongo.create(host = "127.0.0.1", name = "", username = "",
password = "", db = "admin", timeout = 0L)
}
\arguments{
\item{host}{(string vector) A list of hosts/ports to which to connect. If a
port is not given, 27017 is used. Seperate ports from the IP address by
colon, like "120.0.0.1:12345".}
\item{name}{(string) The name of the replset to which to connect. If name ==
"" (the default), the hosts are tried one by one until a connection is made.
Otherwise, name must be the name of the replset and the given hosts are
assumed to be seeds of the replset. Each of these is connected to and
queried in turn until one reports that it is a master. This master is then
queried for a list of hosts and these are in turn connected to and verified
as belonging to the given replset name. When one of these reports that it
is a master, that connection is used to form the actual connection as
returned.}
\item{username}{(string) The username to be used for authentication
purposes. The default username of "" indicates that no user authentication
is to be performed by the initial connect.}
\item{password}{(string) The password corresponding to the given username.}
\item{db}{(string) The name of the database upon which to authenticate the
given username and password. If authentication fails, the connection is
disconnected, but mongo.get.err() will indicate not indicate an error.}
\item{timeout}{(as.integer) The number of milliseconds to wait before timing
out of a network operation. The default (0L) indicates no timeout.}
}
\value{
If successful, a mongo object for use in subsequent database
operations; otherwise, mongo.get.err() may be called on the returned mongo
object to see why it failed.
}
\description{
Connect to a MongoDB server or replset and return an object of class "mongo"
used for further communication over the connection.
}
\details{
All parameters are stored as attributes of the returned mongo object. Note
that these attributes only reflect the initial parameters. Only the external
data pointed to by the "mongo" attribute actually changes if, for example,
mongo.timeout is called after the initial call to \code{mongo.create}.
}
\examples{
mongo <- mongo.create()
\dontrun{
mongo <- mongo.create("192.168.0.3")}
}
\seealso{
\link{mongo},\cr \code{\link{mongo.is.connected}},\cr
\code{\link{mongo.disconnect}},\cr \code{\link{mongo.reconnect}},\cr
\code{\link{mongo.get.err}},\cr \code{\link{mongo.get.primary}},\cr
\code{\link{mongo.get.hosts}},\cr \code{\link{mongo.get.socket}},\cr
\code{\link{mongo.set.timeout}},\cr \code{\link{mongo.get.timeout}}.
}
| /man/mongo.create.Rd | no_license | agnaldodasilva/rmongodb | R | false | false | 2,746 | rd | % Generated by roxygen2 (4.0.2): do not edit by hand
\name{mongo.create}
\alias{mongo.create}
\title{Create an object of class "mongo"}
\usage{
mongo.create(host = "127.0.0.1", name = "", username = "",
password = "", db = "admin", timeout = 0L)
}
\arguments{
\item{host}{(string vector) A list of hosts/ports to which to connect. If a
port is not given, 27017 is used. Seperate ports from the IP address by
colon, like "120.0.0.1:12345".}
\item{name}{(string) The name of the replset to which to connect. If name ==
"" (the default), the hosts are tried one by one until a connection is made.
Otherwise, name must be the name of the replset and the given hosts are
assumed to be seeds of the replset. Each of these is connected to and
queried in turn until one reports that it is a master. This master is then
queried for a list of hosts and these are in turn connected to and verified
as belonging to the given replset name. When one of these reports that it
is a master, that connection is used to form the actual connection as
returned.}
\item{username}{(string) The username to be used for authentication
purposes. The default username of "" indicates that no user authentication
is to be performed by the initial connect.}
\item{password}{(string) The password corresponding to the given username.}
\item{db}{(string) The name of the database upon which to authenticate the
given username and password. If authentication fails, the connection is
disconnected, but mongo.get.err() will indicate not indicate an error.}
\item{timeout}{(as.integer) The number of milliseconds to wait before timing
out of a network operation. The default (0L) indicates no timeout.}
}
\value{
If successful, a mongo object for use in subsequent database
operations; otherwise, mongo.get.err() may be called on the returned mongo
object to see why it failed.
}
\description{
Connect to a MongoDB server or replset and return an object of class "mongo"
used for further communication over the connection.
}
\details{
All parameters are stored as attributes of the returned mongo object. Note
that these attributes only reflect the initial parameters. Only the external
data pointed to by the "mongo" attribute actually changes if, for example,
mongo.timeout is called after the initial call to \code{mongo.create}.
}
\examples{
mongo <- mongo.create()
\dontrun{
mongo <- mongo.create("192.168.0.3")}
}
\seealso{
\link{mongo},\cr \code{\link{mongo.is.connected}},\cr
\code{\link{mongo.disconnect}},\cr \code{\link{mongo.reconnect}},\cr
\code{\link{mongo.get.err}},\cr \code{\link{mongo.get.primary}},\cr
\code{\link{mongo.get.hosts}},\cr \code{\link{mongo.get.socket}},\cr
\code{\link{mongo.set.timeout}},\cr \code{\link{mongo.get.timeout}}.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paragraph2vec.R
\name{write.paragraph2vec}
\alias{write.paragraph2vec}
\title{Save a paragraph2vec model to disk}
\usage{
write.paragraph2vec(x, file)
}
\arguments{
\item{x}{an object of class \code{paragraph2vec} or \code{paragraph2vec_trained} as returned by \code{\link{paragraph2vec}}}
\item{file}{the path to the file where to store the model}
}
\value{
invisibly a logical if the resulting file exists and has been written on your hard disk
}
\description{
Save a paragraph2vec model as a binary file to disk
}
\examples{
\dontshow{if(require(tokenizers.bpe))\{}
library(tokenizers.bpe)
data(belgium_parliament, package = "tokenizers.bpe")
x <- subset(belgium_parliament, language \%in\% "french")
x <- subset(x, nchar(text) > 0 & txt_count_words(text) < 1000)
\donttest{
model <- paragraph2vec(x = x, type = "PV-DM", dim = 100, iter = 20)
model <- paragraph2vec(x = x, type = "PV-DBOW", dim = 100, iter = 20)
}
\dontshow{
model <- paragraph2vec(x = head(x, 5),
type = "PV-DM", dim = 5, iter = 1, min_count = 0)
}
path <- "mymodel.bin"
\dontshow{
path <- tempfile(pattern = "paragraph2vec", fileext = ".bin")
}
write.paragraph2vec(model, file = path)
model <- read.paragraph2vec(file = path)
vocab <- summary(model, type = "vocabulary", which = "docs")
vocab <- summary(model, type = "vocabulary", which = "words")
embedding <- as.matrix(model, which = "docs")
embedding <- as.matrix(model, which = "words")
\dontshow{
file.remove(path)
}
\dontshow{\} # End of main if statement running only if the required packages are installed}
}
\seealso{
\code{\link{paragraph2vec}}
}
| /man/write.paragraph2vec.Rd | no_license | abhisaw/doc2vec | R | false | true | 1,687 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paragraph2vec.R
\name{write.paragraph2vec}
\alias{write.paragraph2vec}
\title{Save a paragraph2vec model to disk}
\usage{
write.paragraph2vec(x, file)
}
\arguments{
\item{x}{an object of class \code{paragraph2vec} or \code{paragraph2vec_trained} as returned by \code{\link{paragraph2vec}}}
\item{file}{the path to the file where to store the model}
}
\value{
invisibly a logical if the resulting file exists and has been written on your hard disk
}
\description{
Save a paragraph2vec model as a binary file to disk
}
\examples{
\dontshow{if(require(tokenizers.bpe))\{}
library(tokenizers.bpe)
data(belgium_parliament, package = "tokenizers.bpe")
x <- subset(belgium_parliament, language \%in\% "french")
x <- subset(x, nchar(text) > 0 & txt_count_words(text) < 1000)
\donttest{
model <- paragraph2vec(x = x, type = "PV-DM", dim = 100, iter = 20)
model <- paragraph2vec(x = x, type = "PV-DBOW", dim = 100, iter = 20)
}
\dontshow{
model <- paragraph2vec(x = head(x, 5),
type = "PV-DM", dim = 5, iter = 1, min_count = 0)
}
path <- "mymodel.bin"
\dontshow{
path <- tempfile(pattern = "paragraph2vec", fileext = ".bin")
}
write.paragraph2vec(model, file = path)
model <- read.paragraph2vec(file = path)
vocab <- summary(model, type = "vocabulary", which = "docs")
vocab <- summary(model, type = "vocabulary", which = "words")
embedding <- as.matrix(model, which = "docs")
embedding <- as.matrix(model, which = "words")
\dontshow{
file.remove(path)
}
\dontshow{\} # End of main if statement running only if the required packages are installed}
}
\seealso{
\code{\link{paragraph2vec}}
}
|
# The purpose of this project is to demonstrate your ability to collect, work
# with, and clean a data set. The goal is to prepare tidy data that can be used
# for later analysis. You will be graded by your peers on a series of yes/no
# questions related to the project. You will be required to submit:
# 1) a tidy data set as described below,
# 2) a link to a Github repository with your script for performing the analysis,
# and 3) a code book that describes the variables, the data, and any
# transformations or work that you performed to clean up the data called
# CodeBook.md.
# You should also include a README.md in the repo with your scripts. This repo
# explains how all of the scripts work and how they are connected.
# One of the most exciting areas in all of data science right now is wearable
# computing - see for example this article . Companies like Fitbit, Nike, and
# Jawbone Up are racing to develop the most advanced algorithms to attract new
# users. The data linked to from the course website represent data collected from
# the accelerometers from the Samsung Galaxy S smartphone. A full description is
# available at the site where the data was obtained:
#
# http://archive.ics.uci.edu/ml/datasets/Human+Activity+Recognition+Using+
# Smartphones
#
# Here are the data for the project:
#
# https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%
# 20Dataset.zip
# You should create one R script called run_analysis.R that does the following.
#####################################################################################
#1. Merges the training and the test sets to create one data set.
options(stringsAsFactors=FALSE)
setwd("E:/DataScientist/DataScientistToolbox/DataScience/datasciencecoursera/getdata-projectfiles-UCI HAR Dataset/")
directory3='UCI HAR Dataset'
files3 <- list.files(directory3, full.names=TRUE)
files3 # character class and mode
# [1] "UCI HAR Dataset/activity_labels.txt"
# [2] "UCI HAR Dataset/features.txt"
# [3] "UCI HAR Dataset/features_info.txt"
# [4] "UCI HAR Dataset/README.txt"
# [5] "UCI HAR Dataset/test"
# [6] "UCI HAR Dataset/train"
#read.table()
activity_labels <-read.table(files3[1],as.is=TRUE,colClasses="character",blank.lines.skip=TRUE)
#features<-read.table(files3[2],as.is=TRUE,colClasses="character",blank.lines.skip=TRUE)
# 'data.frame': 6 obs. of 2 variables:
# $ V1: int 1 2 3 4 5 6
# $ V2: chr "WALKING" "WALKING_UPSTAIRS" "WALKING_DOWNSTAIRS" "SITTING" ...
features<-read.csv("UCI HAR Dataset/features.CSV",header=FALSE)
setwd("E:/DataScientist/DataScientistToolbox/DataScience/datasciencecoursera/getdata-projectfiles-UCI HAR Dataset/UCI HAR Dataset/")
directory1='train'
directory2='test'
files <- list.files(directory1, full.names=TRUE)
files
# [1] "train/Inertial Signals"
# [2] "train/subject_train.csv"
# [3] "train/subject_train.txt"
# [4] "train/X_train.csv"
# [5] "train/X_train.txt"
# [6] "train/y_train.csv"
# [7] "train/y_train.txt"
files2 <- list.files(directory2, full.names=TRUE)
files2
# [1] "test/Inertial Signals"
# [2] "test/subject_test.txt"
# [3] "test/X_test.csv"
# [4] "test/X_test.txt"
# [5] "test/y_test.csv"
# [6] "test/y_test.txt"
#read.table()
subject_train<-read.table(files[3])
X_train<-read.table(files[5])
y_train<-read.table(files[7])
subject_test<-read.table(files2[2])
X_test<-read.table(files2[4])
y_test<-read.table(files2[6])
# combine X_train with X_test 7352 rows by 561 col with 2947 rows by 561 col
XX<-rbind(X_train,X_test) # 10299obs by 561 variables
# combine subject_train with subject_test 7352 by 1, 2947 by 1
ss<-rbind(subject_train,subject_test) # 10299obs by 1 variable
# combine y_train with y_test 7352 by 1, 2947 by 1
yy<-rbind(y_train,y_test) # 10299 obs by 1 variable
# add features to XX along 561 as colnames
#fxx<-rbind(features,xx) # don't use do colnames(xx) <- features
head(features)
# V1 V2
# 1 1 tBodyAcc-mean()-X
# 2 2 tBodyAcc-mean()-Y
# 3 3 tBodyAcc-mean()-Z
# 4 4 tBodyAcc-std()-X
# 5 5 tBodyAcc-std()-Y
# 6 6 tBodyAcc-std()-Z
features[1:10,2]
colnames(XX) <- features[,2]
head(XX)
names(XX)
# [1] "tBodyAcc-mean()-X"
# [2] "tBodyAcc-mean()-Y"
# [3] "tBodyAcc-mean()-Z"
# [4] "tBodyAcc-std()-X"
# [5] "tBodyAcc-std()-Y"
# [6] "tBodyAcc-std()-Z"
# [7] "tBodyAcc-mad()-X"
# [8] "tBodyAcc-mad()-Y"
# [9] "tBodyAcc-mad()-Z"
# [10] "tBodyAcc-max()-X"
# add activity to yy along 6 factors split by factors
# merge activity with yy using 1-6 activity code
head(yy)
head(activity_labels)
yy<-merge(yy,activity_labels,by.x="V1",by.y="V1")
head(yy)
# V1 V2
# 1 1 WALKING
# 2 1 WALKING
# 3 1 WALKING
# 4 1 WALKING
# 5 1 WALKING
# 6 1 WALKING
str(yy)
colnames(yy)<-c("actcode","activity")
############# Now we have XX , yy, and ss
dim(XX) # 10299 by 561
dim(yy) # 10299 by 1
dim(ss) # 10299 by 1
# cbind() them
data <- cbind(XX,ss,yy) # 10299obs by 564 variables
str(data)
names(data) # keep only measurements of mean and stdev amongst measurements
# col 562 from ss does not have column heading
unique(data[,562]) # ids 1- 30 rename subject id
names(data)<-sub("V1",'subjectid',names(data)) # worked as expected
#########################################################################
#2. Extracts only the measurements on the mean and stdev for each measurement.
# Used search feature of notepad++ to find which variable names had mean or std
data2<-cbind(data[,1:6],data[,41:46],data[,81:86],data[,121:126],data[,161:166])
data2_1<-cbind(data[,201:202],data[,214:215],data[,227:228],data[,240:241],data[,253:254])
data2_2<-cbind(data[,266:271],data[,294:296],data[,345:350],data[,373:375])
data2_3<-cbind(data[,424:429],data[,452:454],data[,503:504],data[,513],data[,516:517])
# data[,513] colname = "fBodyAccMag-meanFreq()", data[,539]="fBodyBodyGyroMag-meanFreq()"
data2_4<-cbind(data[,526],data[,529:530],data[,539],data[,542:543],data[,552],data[,555:564])
# data[,526] "fBodyBodyAccJerkMag-meanFreq()", data[,552]= "fBodyBodyGyroJerkMag-meanFreq()"
data3<-cbind(data2,data2_1,data2_2,data2_3,data2_4)
# 10299 obs by 69 variables
str(data3)
names(data3)
#############################################################################
#3. Uses descriptive activity names to name the activities in the data set
# columns 70, 73, 76, 79 should be dropped as variable name is not included
# make a note if the meanFreq information is needed later(the names were recorded above incase they are needed to be included later)
# Keeping 'actcode' and 'activity" since 'actcode' is numeric and
# 'activity' is character
# [1] "WALKING"
# [2] "WALKING_UPSTAIRS"
# [3] "WALKING_DOWNSTAIRS"
# [4] "SITTING"
# [5] "STANDING"
# [6] "LAYING"
data3 <-cbind(data3[,1:69],data3[71:72],data3[74:75],data3[77:78],data3[80:89])
# 10299 obs by 83 variables
names(data3)
# These names are descriptive enough. They can not be confused.
###########################################################################
#4. Appropriately labels the data set with descriptive variable names.
# example gsub("-|\\()", "", "fbody-body-gyro-jerck-mag-sd()")
names(data3)<-gsub("-|\\()|\\(|\\)","",names(data3))
#names(data3)<-gsub("-","",names(data3))
#names(data3)
names(data3)<-tolower(names(data3))
names(data3)
names(data3)<-gsub("\\,","with",names(data3))
names(data3)<-gsub("bodybody","body",names(data3))
# seems to be a typo after checking featuresinfo.txt file
names(data3)
###### Decision to keep tname and fname format as this has to do with
# Time Domain and Frequency Domain ideas in Fourier Analysis
# the t and f have enough information.
#######################################################
# put subjectid, activity, actcode in front of dataset
data4<-cbind(data3[,83:85],data3[,1:82])
names(data4)
save(data4,file= "./data4.RData") # saved as an R object
#######################################################################
########### data4 is my official data set after steps 1-4 of project
#5. Creates a second, independent tidy data set with the average of each
# variable for each activity and each subject.
# clear environment , clear console
## restore the saved values to the user's workspace
setwd("E:/DataScientist/DataScientistToolbox/DataScience/datasciencecoursera/getdata-projectfiles-UCI HAR Dataset/UCI HAR Dataset/")
load("data4.RData", .GlobalEnv)
names(data4)
measures<-data4[,4:85]
by1<-data4[,1]
by2<-data4[,2]
by3<-data4[,3]
length(by1) #10299
length(by2)
length(by3)
unique(by1) # 1-30
unique(by2)
unique(by3)
# [1] "WALKING" "WALKING_UPSTAIRS"
# [3] "WALKING_DOWNSTAIRS" "SITTING"
# [5] "STANDING" "LAYING"
# not satisfied with aggregates output will use tapply with two factors
# and loop over the 79 variables.
# data5<-data.structure()
# for(i in 4:85){
# data5[,i]<-tapply(data4[,i],INDEX=list(by1,by2),FUN='mean',simplify=TRUE)
# }
library(reshape2)
library(plyr)
data4melt <- melt(data4,id.vars=c("subjectid","actcode","activity"),
variable.name = "measurement_variables",
value.name = "measurement_value")
str(data4melt)
# 'data.frame': 844518 obs. of 5 variables:
# $ subjectid : int 1 1 1 1 1 1 1 1 1 1 ...
# $ actcode : int 1 1 1 1 1 1 1 1 1 1 ...
# $ activity : chr "WALKING" "WALKING" "WALKING" "WALKING" ...
# $ measurement_variables: Factor w/ 82 levels "tbodyaccmeanx",..: 1 1 1 1 1 1 1 1 1 1 ...
# $ measurement_value : num 0.289 0.278 0.28 0.279 0.277 ...
dim(data4melt)
#[1] 844518 5 Long Form
data5<-dcast(data4melt, subjectid+actcode+activity~measurement_variables,fun.aggregate=mean)
dim(data5)
#[1] 35 85
# data5 is the "Tidy Data Set"
save(data5,file= "./data5.RData")
load("data5.RData", .GlobalEnv)
names(data5)
str(data5)
# Please upload the tidy data set created in step 5 of the instructions. Please
# upload your data set as a txt file created with write.table() using
# row.name=FALSE (do not cut and paste a dataset directly into the text box, as
# this may cause errors saving your submission).
write.table(data5,file= "./data5.txt",row.names=FALSE)
| /run_analysis.R | permissive | dxander/WearableComputingDataCleaningProject | R | false | false | 10,519 | r | # The purpose of this project is to demonstrate your ability to collect, work
# with, and clean a data set. The goal is to prepare tidy data that can be used
# for later analysis. You will be graded by your peers on a series of yes/no
# questions related to the project. You will be required to submit:
# 1) a tidy data set as described below,
# 2) a link to a Github repository with your script for performing the analysis,
# and 3) a code book that describes the variables, the data, and any
# transformations or work that you performed to clean up the data called
# CodeBook.md.
# You should also include a README.md in the repo with your scripts. This repo
# explains how all of the scripts work and how they are connected.
# One of the most exciting areas in all of data science right now is wearable
# computing - see for example this article . Companies like Fitbit, Nike, and
# Jawbone Up are racing to develop the most advanced algorithms to attract new
# users. The data linked to from the course website represent data collected from
# the accelerometers from the Samsung Galaxy S smartphone. A full description is
# available at the site where the data was obtained:
#
# http://archive.ics.uci.edu/ml/datasets/Human+Activity+Recognition+Using+
# Smartphones
#
# Here are the data for the project:
#
# https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%
# 20Dataset.zip
# You should create one R script called run_analysis.R that does the following.
#####################################################################################
#1. Merges the training and the test sets to create one data set.
options(stringsAsFactors=FALSE)
setwd("E:/DataScientist/DataScientistToolbox/DataScience/datasciencecoursera/getdata-projectfiles-UCI HAR Dataset/")
directory3='UCI HAR Dataset'
files3 <- list.files(directory3, full.names=TRUE)
files3 # character class and mode
# [1] "UCI HAR Dataset/activity_labels.txt"
# [2] "UCI HAR Dataset/features.txt"
# [3] "UCI HAR Dataset/features_info.txt"
# [4] "UCI HAR Dataset/README.txt"
# [5] "UCI HAR Dataset/test"
# [6] "UCI HAR Dataset/train"
#read.table()
activity_labels <-read.table(files3[1],as.is=TRUE,colClasses="character",blank.lines.skip=TRUE)
#features<-read.table(files3[2],as.is=TRUE,colClasses="character",blank.lines.skip=TRUE)
# 'data.frame': 6 obs. of 2 variables:
# $ V1: int 1 2 3 4 5 6
# $ V2: chr "WALKING" "WALKING_UPSTAIRS" "WALKING_DOWNSTAIRS" "SITTING" ...
features<-read.csv("UCI HAR Dataset/features.CSV",header=FALSE)
setwd("E:/DataScientist/DataScientistToolbox/DataScience/datasciencecoursera/getdata-projectfiles-UCI HAR Dataset/UCI HAR Dataset/")
directory1='train'
directory2='test'
files <- list.files(directory1, full.names=TRUE)
files
# [1] "train/Inertial Signals"
# [2] "train/subject_train.csv"
# [3] "train/subject_train.txt"
# [4] "train/X_train.csv"
# [5] "train/X_train.txt"
# [6] "train/y_train.csv"
# [7] "train/y_train.txt"
files2 <- list.files(directory2, full.names=TRUE)
files2
# [1] "test/Inertial Signals"
# [2] "test/subject_test.txt"
# [3] "test/X_test.csv"
# [4] "test/X_test.txt"
# [5] "test/y_test.csv"
# [6] "test/y_test.txt"
#read.table()
subject_train<-read.table(files[3])
X_train<-read.table(files[5])
y_train<-read.table(files[7])
subject_test<-read.table(files2[2])
X_test<-read.table(files2[4])
y_test<-read.table(files2[6])
# combine X_train with X_test 7352 rows by 561 col with 2947 rows by 561 col
XX<-rbind(X_train,X_test) # 10299obs by 561 variables
# combine subject_train with subject_test 7352 by 1, 2947 by 1
ss<-rbind(subject_train,subject_test) # 10299obs by 1 variable
# combine y_train with y_test 7352 by 1, 2947 by 1
yy<-rbind(y_train,y_test) # 10299 obs by 1 variable
# add features to XX along 561 as colnames
#fxx<-rbind(features,xx) # don't use do colnames(xx) <- features
head(features)
# V1 V2
# 1 1 tBodyAcc-mean()-X
# 2 2 tBodyAcc-mean()-Y
# 3 3 tBodyAcc-mean()-Z
# 4 4 tBodyAcc-std()-X
# 5 5 tBodyAcc-std()-Y
# 6 6 tBodyAcc-std()-Z
features[1:10,2]
colnames(XX) <- features[,2]
head(XX)
names(XX)
# [1] "tBodyAcc-mean()-X"
# [2] "tBodyAcc-mean()-Y"
# [3] "tBodyAcc-mean()-Z"
# [4] "tBodyAcc-std()-X"
# [5] "tBodyAcc-std()-Y"
# [6] "tBodyAcc-std()-Z"
# [7] "tBodyAcc-mad()-X"
# [8] "tBodyAcc-mad()-Y"
# [9] "tBodyAcc-mad()-Z"
# [10] "tBodyAcc-max()-X"
# add activity to yy along 6 factors split by factors
# merge activity with yy using 1-6 activity code
head(yy)
head(activity_labels)
yy<-merge(yy,activity_labels,by.x="V1",by.y="V1")
head(yy)
# V1 V2
# 1 1 WALKING
# 2 1 WALKING
# 3 1 WALKING
# 4 1 WALKING
# 5 1 WALKING
# 6 1 WALKING
str(yy)
colnames(yy)<-c("actcode","activity")
############# Now we have XX , yy, and ss
dim(XX) # 10299 by 561
dim(yy) # 10299 by 1
dim(ss) # 10299 by 1
# cbind() them
data <- cbind(XX,ss,yy) # 10299obs by 564 variables
str(data)
names(data) # keep only measurements of mean and stdev amongst measurements
# col 562 from ss does not have column heading
unique(data[,562]) # ids 1- 30 rename subject id
names(data)<-sub("V1",'subjectid',names(data)) # worked as expected
#########################################################################
#2. Extracts only the measurements on the mean and stdev for each measurement.
# Used search feature of notepad++ to find which variable names had mean or std
data2<-cbind(data[,1:6],data[,41:46],data[,81:86],data[,121:126],data[,161:166])
data2_1<-cbind(data[,201:202],data[,214:215],data[,227:228],data[,240:241],data[,253:254])
data2_2<-cbind(data[,266:271],data[,294:296],data[,345:350],data[,373:375])
data2_3<-cbind(data[,424:429],data[,452:454],data[,503:504],data[,513],data[,516:517])
# data[,513] colname = "fBodyAccMag-meanFreq()", data[,539]="fBodyBodyGyroMag-meanFreq()"
data2_4<-cbind(data[,526],data[,529:530],data[,539],data[,542:543],data[,552],data[,555:564])
# data[,526] "fBodyBodyAccJerkMag-meanFreq()", data[,552]= "fBodyBodyGyroJerkMag-meanFreq()"
data3<-cbind(data2,data2_1,data2_2,data2_3,data2_4)
# 10299 obs by 69 variables
str(data3)
names(data3)
#############################################################################
#3. Uses descriptive activity names to name the activities in the data set
# columns 70, 73, 76, 79 should be dropped as variable name is not included
# make a note if the meanFreq information is needed later(the names were recorded above incase they are needed to be included later)
# Keeping 'actcode' and 'activity" since 'actcode' is numeric and
# 'activity' is character
# [1] "WALKING"
# [2] "WALKING_UPSTAIRS"
# [3] "WALKING_DOWNSTAIRS"
# [4] "SITTING"
# [5] "STANDING"
# [6] "LAYING"
data3 <-cbind(data3[,1:69],data3[71:72],data3[74:75],data3[77:78],data3[80:89])
# 10299 obs by 83 variables
names(data3)
# These names are descriptive enough. They can not be confused.
###########################################################################
#4. Appropriately labels the data set with descriptive variable names.
# example gsub("-|\\()", "", "fbody-body-gyro-jerck-mag-sd()")
names(data3)<-gsub("-|\\()|\\(|\\)","",names(data3))
#names(data3)<-gsub("-","",names(data3))
#names(data3)
names(data3)<-tolower(names(data3))
names(data3)
names(data3)<-gsub("\\,","with",names(data3))
names(data3)<-gsub("bodybody","body",names(data3))
# seems to be a typo after checking featuresinfo.txt file
names(data3)
###### Decision to keep tname and fname format as this has to do with
# Time Domain and Frequency Domain ideas in Fourier Analysis
# the t and f have enough information.
#######################################################
# put subjectid, activity, actcode in front of dataset
data4<-cbind(data3[,83:85],data3[,1:82])
names(data4)
save(data4,file= "./data4.RData") # saved as an R object
#######################################################################
########### data4 is my official data set after steps 1-4 of project
#5. Creates a second, independent tidy data set with the average of each
# variable for each activity and each subject.
# clear environment , clear console
## restore the saved values to the user's workspace
setwd("E:/DataScientist/DataScientistToolbox/DataScience/datasciencecoursera/getdata-projectfiles-UCI HAR Dataset/UCI HAR Dataset/")
load("data4.RData", .GlobalEnv)
names(data4)
measures<-data4[,4:85]
by1<-data4[,1]
by2<-data4[,2]
by3<-data4[,3]
length(by1) #10299
length(by2)
length(by3)
unique(by1) # 1-30
unique(by2)
unique(by3)
# [1] "WALKING" "WALKING_UPSTAIRS"
# [3] "WALKING_DOWNSTAIRS" "SITTING"
# [5] "STANDING" "LAYING"
# not satisfied with aggregates output will use tapply with two factors
# and loop over the 79 variables.
# data5<-data.structure()
# for(i in 4:85){
# data5[,i]<-tapply(data4[,i],INDEX=list(by1,by2),FUN='mean',simplify=TRUE)
# }
library(reshape2)
library(plyr)
data4melt <- melt(data4,id.vars=c("subjectid","actcode","activity"),
variable.name = "measurement_variables",
value.name = "measurement_value")
str(data4melt)
# 'data.frame': 844518 obs. of 5 variables:
# $ subjectid : int 1 1 1 1 1 1 1 1 1 1 ...
# $ actcode : int 1 1 1 1 1 1 1 1 1 1 ...
# $ activity : chr "WALKING" "WALKING" "WALKING" "WALKING" ...
# $ measurement_variables: Factor w/ 82 levels "tbodyaccmeanx",..: 1 1 1 1 1 1 1 1 1 1 ...
# $ measurement_value : num 0.289 0.278 0.28 0.279 0.277 ...
dim(data4melt)
#[1] 844518 5 Long Form
data5<-dcast(data4melt, subjectid+actcode+activity~measurement_variables,fun.aggregate=mean)
dim(data5)
#[1] 35 85
# data5 is the "Tidy Data Set"
save(data5,file= "./data5.RData")
load("data5.RData", .GlobalEnv)
names(data5)
str(data5)
# Please upload the tidy data set created in step 5 of the instructions. Please
# upload your data set as a txt file created with write.table() using
# row.name=FALSE (do not cut and paste a dataset directly into the text box, as
# this may cause errors saving your submission).
write.table(data5,file= "./data5.txt",row.names=FALSE)
|
##### JF EM DADOS - Coronavírus Casos e Óbitos ######
#Metadados prefeitura de Juiz de Fora - https://covid19.pjf.mg.gov.br/arquivos/obitos_covid19_jf.pdf
# Autor: Marcello Filgueiras
library(tidyverse)
#Trata-se da base de "metadados" dos óbitos de covid em Juiz de Fora.
# Só há divulgação total de mortos em XLS, e não mostram a evolução deles, apenas o último número. Os dados permenorizados estão nesse pdf.
# Não se trata de uma base tidy. Como será demonstrado ao longo do código.
#A primeira questão é o formato em PDF, que dificulta o trabalho.
# Cada linha é um caso.
# A estrutura parece de um csv, mas está longe disso.
# O separador de cada linha as vezes é um ";" "." ou até em alguns casos casos, nada "".
# Dentro de cada linha, o separador de colunas as vezes é ".", "," ou apenas um espaço.
# -------------------- IMPORTING ----------------------------------------------------
library(pdftools)
url <- "https://covid19.pjf.mg.gov.br/arquivos/obitos_covid19_jf.pdf"
obitos_diarios_pjf_raw <- tibble(
texto= pdftools::pdf_text(url),
pag= 1:pdf_length(url))
#Se quiser verificar como estava cada página, é possível verificar por esse table.
# colquei o # para possiblitar o Ctrl + Shift + Enter
obitos_diarios_pjf_raw %>% #slice(1) %>%
pull(texto)
# Separando Linhas --------------------------------------------------------
#No documento inicial, cada linha deveria ser um paciente. Foi feito apenas uma separação com "\\n" no início.
#Mas linhas muito grandes que passam da margem eram divididas em duas linhas, representando duas linhas do mesmo caso.
#Assim optei pela separação de cada linha pelos números iniciais, que são "1.", "2.", "3.", "100." ...
#Mas "d+\\." também pegava também datas que estavam separadas por pontos nas frases
#por isso, optei pela divisão de cada caso por "\\r\\n\\d+\\."
#Ela sempre pega o Enter de uma nova linha, que se for um novo óbito e terá logo após números e um ponto.
#Separando Cada Linha do PDF que corresponde a um caso
obitos_diarios_pjf_separado<- obitos_diarios_pjf_raw %>%
mutate(texto= str_split(texto, "\\n\\d+\\. "))%>%
unnest(texto)
#Aqui printei todo o DF para conferir se não havia duplicidade de linhas
# Novamente, para verificar rodar o código abaixo.
# colquei o # para possiblitar o Ctrl + Shift + Enter
#obitos_diarios_pjf_separado %>%# slice(1000:1578) %>%
#pull(texto)
# -------------- TIDYING ----------------------------------------------------
# Em tese teríamos 4 colunas, c( "genero", "idade", "data_do_obito" e "comorbidades")
#Temos problemas em todas essas colunas:
#gênero: Não só dois, temos 6 gêneros: "idoso", "idosa", "masculino", "feminino", "homem" e "mulher"
#idade: "dd anos"
#data: temos dd/mm/aaaa, dd/mm/aa, dd/mm, "dd do mes" e incríveis "ultimo domingo, 22"
#comorbidade: Se não tem nenhuma, está "Sem Comorbidade Relatada" ou NA. Se tem, começa com "Comorbidade: nome da doença..."
# mas simplesmente separar essas colunas pelo delimitador não é possível.
# Ora estamos com a ordem de c( "genero", "idade", "data_do_obito" e "comorbidades")
# ora estamos com a ordem de c( "genero", "idade", "data_do_obito" e "comorbidades")
# Temos inclusive um caso de 5 colunas: se_é_idoso, genero, idade, data_do_obito e comorbidades
# como "idoso, masculino, 88 anos. óbito em 05/07/2020. comorbidades:dcc, etc."
# Portanto, como primeiro passo para transformar isso em uma tabela tidy:
# Vamos separar uma variável por coluna, buscando cada uma dessas informações com str_extract.
# Separando Colunas -------------------------------------------------------
obitos_diarios_pjf_clean<- obitos_diarios_pjf_separado %>%
#Limpando algarismos que começam no inicio do texto e são seguidos por "." . "1304 não tinha ponto e ficou para trás;
mutate(texto=str_remove(texto, "^\\d+\\.|1304") %>%
str_to_lower(),
#Retirando alguns resíduos do PDF que restaram na linha separada
texto= str_squish(texto)) %>%
#str_remove_all(texto, "\\r\\n") %>%
# str_squish(texto) %>%
#o gênero está na ordem, então aqui um separate funciona.
tidyr::separate(col=texto,
into= c("genero", "texto"),
sep = ",|\\.| ",
extra= "merge")%>%
# Separando idade. Vai dar problema com uma coluna a mais e casos de natimortos.
#mutate(texto= str_trim(texto)) %>%
#tidyr::separate(col=texto,
#into= c("idade", "texto"),
#sep = ",|\\.| ",
#extra= "merge")
# Portanto,Buscando Idade com str_extract e padronizando a idade de bebês;
mutate(idade = case_when(
genero== "natimorto" | str_detect(texto,"natimorto") ~ "0", # ( idade de bebês = 0 ) Triste =/
str_detect(texto,"01 dia") ~ "0", # (idade de bebês = 0) =/ Triste
TRUE ~ str_extract(texto, "\\d+"))%>% #Extraí os primeiros algarismos. Como idade está na frente, não há problema
as.numeric(),
#buscando data do obito em varios formatos
data_obito = str_extract(texto, paste("\\d+/\\d+(/\\d+)*", #dd/mm e dd/mm/yy e dd/mm/yyyy
"\\d{4,4}", #ddmm (sem separador)
"\\d{1,2} de \\w*", #dia em número e mês por extenso
"morreu.*\\d+", #"morreu no último domingo,dd"
sep = "|")),
#Buscando comorbidades, buscando tudo que vem depois de "comorbidades" ou "fator de risco"
comorbidade= case_when(
str_detect(texto,"sem comorbidade") ~ "sem comorbidade", #buscando o que nao tem comorbidade
TRUE ~ str_extract(texto, "comorbidade(s)*.+|fator de risco(s)*.+")),
#Em algumas linhas, a ordem é "data_do_obito","comorbidades". Essa regex acima, nessas linhasm pega somente as comorbidades portanto.
#Entretanto, algumas linhas estão na ordem "comorbidades" e depois "óbito".
comorbidade = str_remove(comorbidade, paste("[oó]bito.+" , #nesses casos, mandei tirar tudo que vem depois de óbito,
"óbito$", #para ficar somente comoribdadescomorbidades
sep= "|")),
comorbidade = str_remove(comorbidade, "comorbidade(s)*(:|,)|comorbidade(s)*/fator de risco:|fator de risco:") %>% #tendo certeza que tudo vem depois é comorbidade, mandei tirar as iniciais de comorbidade
str_squish(),
#Lidandos Falsos NA. Não indicavam o separador "comorbidade:". Vieram somente a doença.
# Por isso vieram nulos, mas não eram NAs.
#Fiz na Mão
comorbidade = ifelse(is.na(comorbidade),
str_extract(texto, "has, dnc, ca|dcc, dm|imunodeficiência, outra pneumopatia crônica"),
as.character(comorbidade)),
#Retirando finais de linhas que ainda continham separadores
comorbidade = str_remove(comorbidade, "(\\.|,|;)$"),
#Por fim, criando a coluna faixa etária, muito mais delimitada que "idoso" ou "não idoso"
faixa_etaria = cut(idade,
breaks = c(-1,20,40,60,80,101),
labels = c("Menos de 20", "20 a 40", "40 a 60",
"60 a 80", "Mais de 80")))
# Padronizando Colunas ----------------------------------------------------
# De posse de uma variável por coluna, o próximo passo é padronizar a forma com que elas aparecem ao longo das coluna
# Nas DOENÇAS, temos várias abreviações e vários nomes completos
#Sigla DNC é Doença Neural Crônica, DRC é Doença Renal Crônica etc.
# As siglas foram todas passadas para seus nomes completos para serem mais facilmente entendidos.
# No campo das DATAS, temos vários padrões:
# Em números, temos "dd/mm/yy", "dd/mm/yyyy", "dd/mm", "d/mm", "dd/m" e "ddmm".
# Por extenso, "dd de mes por extenso" e incríveis "morreu no último domingo, dd".
# A meta é passar tudo para yyyy-mm-dd como objeto de data.
# nos números faltantes sem meses e sem anos, como a base é organizad por dia,
# a linha de cima provavelmente é o mesmo mês do registro. Nesse caso, farei um fill("down")
# para lidar com as faltas
# no Gênero, temos dois gêneros com várias caligrafias diferentes para masculino e feminino, que você pode ver pela função abaixo.
table(obitos_diarios_pjf_clean$genero)
# Assim, esses dois vetores foram criados para usar na hora de padronizar as colunas de genero
genero_feminino <- c( "feminino", "feminina", "idosa", "isosa", "mulher")
genero_masculino <- c( "homem", "idoso", "isoso", "masculino", "natimorto")
# Começando os trabalhos de padronização de variáveis
#Padronizando Gênero e Comorbidades
obitos_diarios_pjf_tidy_gen_com <- obitos_diarios_pjf_clean %>%
#Com as variáveis já separadas, podemos retirar a coluna original do texto.
select(!texto) %>%
#1º Passo - Padronizando GÊNERO
mutate(genero= case_when(
genero %in% genero_feminino ~ "feminino",
genero %in% genero_masculino ~ "masculino",
TRUE ~ as.character(genero)),
#2º Passo - Padronizando COMORBIDADES.
#Criei coluna comorbidade (original) e comorbidades (modificada) para ter controle do que fazia
#Acho que as colunas são autoexplicativas de qual conteúdo da mesma variável doença, despadronizada, passada doença, padronizada
comorbidades= str_remove(comorbidade, "severa descompensada e não tratada|;l"),
comorbidades= str_replace_all(comorbidades, "drc\\b|irc", "doença renal crônica"),
comorbidades= str_replace_all(comorbidades, "dialítico|dialitica", "dialitico"),
comorbidades= str_replace_all(comorbidades, "dc(c)*", "doença cardiovascular crônica"),
comorbidades= str_replace_all(comorbidades, "dm|diabetes mel{1,2}itus", "diabetes"),
comorbidades= str_replace_all(comorbidades, "has|hás|hs|had\\b|hipertensão arterial$", "hipertensão arterial sistêmica"),
comorbidades= str_replace_all(comorbidades, "dpoc", "doença pulmonar obstrutiva crônica"),
comorbidades= str_replace_all(comorbidades, "\\b[cç]a\\b( de)*|paciente oncológico|doença oncológica", "câncer"),
comorbidades= str_replace_all(comorbidades, "câncer útero|útero","câncer útero"),
comorbidades= str_replace_all(comorbidades, "dnc", "doença neurológica crônica"),
comorbidades= str_replace_all(comorbidades, "dhc", "doença hepática crônica"),
comorbidades= str_replace_all(comorbidades, "outra pneumopatia", "pneumopatia"),
comorbidades= str_replace_all(comorbidades, "iam", "infarto agudo do miocárdio"),
comorbidades= str_replace_all(comorbidades, "\\bave", "acidente vascular encefálico"),
comorbidades= str_replace_all(comorbidades, "cardiopata", "cardiopatia"),
comorbidades= str_replace_all(comorbidades, "marcapasso","marca-passo"),
comorbidades= str_replace_all(comorbidades, "imunosupressão","imunossupressão"),
comorbidades= str_replace_all(comorbidades, "\\bfa\\b", "fibrilação atrial"),
comorbidades= str_replace_all(comorbidades, "\\btu\\b", "tumor"),
comorbidades= str_replace_all(comorbidades, "\\bave\\b", "acidente vascular encefálico"),
comorbidades= str_replace_all(comorbidades, "avc", "acidente vascular cerebral"),
comorbidades= str_replace_all(comorbidades, "histórico de avc", "acidente vascular cerebral prévio"),
comorbidades= str_replace_all(comorbidades, "dvc", "doença venosa crônica"),
comorbidades= str_replace_all(comorbidades, "tep", "tromboembolismo pulmonar"),
comorbidades= str_replace_all(comorbidades, "tb( pulmonar)*", "tuberculose pulmonar"),
comorbidades= str_replace_all(comorbidades, "etilismo", "etilista"),
comorbidades= str_replace_all(comorbidades, "hpb", "hiperplasia prostática benigna"),
comorbidades= str_replace_all(comorbidades, "usuário de droga", "drogadição"),
comorbidades= str_replace_all(comorbidades, "puérpera","puerpério"),
comorbidades= str_replace_all(comorbidades, "tce","traumatismo cranioencefálico"),
comorbidades= str_replace_all(comorbidades, "imunosupressão","imunossupressão"),
comorbidades= str_replace_all(comorbidades, "pti","púrpura trombocitopênica idiopática"),
comorbidades= str_replace_all(comorbidades, "drge","doença do refluxo esofágico"),
comorbidades= str_replace_all(comorbidades, "eplepsia|convulsão|crise convulsiva","epilepsia"),
comorbidades= str_replace_all(comorbidades, "transtorno bipolar","bipolar"),
comorbidades= str_replace_all(comorbidades, "transtorno depressivo","depressão"),
comorbidades= str_replace_all(comorbidades, "síndrome demência","demência"),
comorbidades= str_replace_all(comorbidades, "transplantado","transplante"),
comorbidades= str_replace_all(comorbidades, "\\bic(c)*\\b","insuficiência cardiáca"),
comorbidades= str_replace_all(comorbidades, "dac|coronariopata|coranopata","doença arterial coronariana"),
comorbidades= str_replace_all(comorbidades, "da,","doença arterial coronariana,"),#se colocasse da(c)* na linha acima, quando fosse pegar dac, só pegaria dac, por isso optei por uma nova linha
comorbidades= str_replace_all(comorbidades, "deficicência","deficiência"),
comorbidades= str_replace_all(comorbidades, "transplantado","transplante"),
comorbidades= str_replace_all(comorbidades, "dsp","intoxicação diarreica por molusco"),
comorbidades= str_replace_all(comorbidades, "dlp|dislepidemia","dislipidemia"),
comorbidades= str_replace_all(comorbidades,
paste("hiperplasia benigna próstata",
"hiperplasia prostática benigna",
sep= "|"),
"hiperplasia de próstata"),
#arrumando delimitadores dentro da coluna comorbidades " e " e "," por "/"
comorbidades= str_replace_all(comorbidades, " e |,", "/"),
#a intenção era retirar tudo que estava dentro de parentêses geralmente a "sigla(explicação da sigla)"
# mas comorbidades= str_remove_all(comorbidades, "\\(.+\\)"), quando havia mais de dois parenteses por linhas, removia tudo entre elas.
# ex: "doença cardiovascular crônica (dcc), diabetes melitus (dm)" retivara não só cada grupo desse, mas tudo como uma regex "(dcc.*dm)"
# Assim, fiz na mão, retirando cada um dos casos entre parênteses que apareceram:
comorbidades= str_remove_all(comorbidades, paste("\\(diabetes\\)",
"\\(doença cardiovascular crônica\\)",
"\\(doença pulmonar obstrutiva crônica\\)",
"\\(hipertensão arterial sistêmica\\)",
"\\(alzheimer\\)",
"\\(doença renal crônica\\)",
"\\(doença neurológica crônica\\)",
sep= "|")),
#Todas as Comorbidades estão separadas por "," prontas para sofrerem unnest. Finalizando, retirando espaços.
comorbidades = str_squish(comorbidades),
#um caso em que não havia separado nenhum, foi necessário colocar na mão
comorbidades = str_replace(comorbidades,
pattern = "doença cardiovascular crônica marca-passo",
replacement = "doença cardiovascular crônica/marca-passo"),
#retirando espaços depois de barras, str squish nao pegou esse caso
comorbidades= str_replace_all(comorbidades, " / | /|/ ", "/"),
#Arrumando NA e Sem Comorbidades
comorbidades = str_replace(comorbidades, "comorbidades", "sem comorbidade"),
comorbidades = tidyr::replace_na(comorbidades, "sem comorbidade"))
#Coluna Gênero Tidy! Mesma variável de uma só forma
table(obitos_diarios_pjf_clean$genero)
table(obitos_diarios_pjf_tidy_gen_com$genero)
#Dando Unnest nas comorbidades para verificar padronização dessa coluna
obitos_diarios_pjf_tidy_comorbidade_separado <- obitos_diarios_pjf_tidy_gen_com %>%
mutate(comorbidades= str_split(comorbidades, "/")) %>%
unnest(comorbidades)
# Coluna Comorbidades Tidy!
#Pode se verificar abaixo no table()
# Dentro da possibilidades, busquei colocar tudo da mesma doença com nomes diferente com nome da mesma variável.
# Porém falta padronização quanto ao nível de especialização.
# Algumas doenças estão em grandes grupos, como doença cardiovascular crônica,
# mas algumas, que poderiam estar nesses grandes grupos, estão muito especificadas como insuficiência cardíaca e doença arterial coronariana, etc...
# Como a necessidade de mais especialização ou menos varia com o objetivo, optei por não colocar todos casos mais especializados em gênero.
table(obitos_diarios_pjf_tidy_comorbidade_separado$comorbidades)
#Padronizado DATAS - 1º Passo: Passando as datas que estão escritas por extenso para número.
#Tentei fazer essa função para lidar com números por extenso, infelizmente não funcionou.
nomeMes_to_nMes <- function(x){
mutate(
x=stringr::str_replace(x, "janeiro", "01"),
x=stringr::str_replace(x, "fevereiro", "02"),
x=stringr::str_replace(x, "março", "03"),
x=stringr::str_replace(x, "abril", "04"),
x=stringr::str_replace(x, "maio", "05"),
x=stringr::str_replace(x, "junho", "06"),
x=stringr::str_replace(x, "julho", "07"),
x=stringr::str_replace(x, "agosto", "08"),
x=stringr::str_replace(x, "setembro", "09"),
x=stringr::str_replace(x, "outubro", "10"),
x=stringr::str_replace(x, "novembro", "11"),
x=stringr::str_replace(x, "dezembro", "12"))
}
#terá que ser por vez no meio do código. Passando as datas que estão escritas por extenso para número.
obitos_diarios_pjf_tidy_datas_extenso<- obitos_diarios_pjf_tidy_gen_com %>%
mutate(
data_obito= stringr::str_replace(data_obito, "janeiro", "01"),
data_obito=stringr::str_replace(data_obito , "fevereiro", "02"),
data_obito=stringr::str_replace(data_obito , "março", "03"),
data_obito=stringr::str_replace(data_obito , "abril", "04"),
data_obito=stringr::str_replace(data_obito , "maio", "05"),
data_obito=stringr::str_replace(data_obito , "junho", "06"),
data_obito=stringr::str_replace(data_obito , "julho", "07"),
data_obito=stringr::str_replace(data_obito , "agosto", "08"),
data_obito=stringr::str_replace(data_obito , "setembro", "09"),
data_obito=stringr::str_replace(data_obito , "outubro", "10"),
data_obito=stringr::str_replace(data_obito , "novembro", "11"),
data_obito=stringr::str_replace(data_obito , "dezembro", "12"),
data_obito=stringr::str_replace(data_obito , " de ", "/"),
# retirando o ultimo caso por extenso, de "morreu no ultimo domingo" e anos yy para yyyy
data_obito= case_when(
str_detect(data_obito, "/21\\b") ~ str_replace(data_obito,"/21\\b","/2021"),
str_detect(data_obito, "/20\\b") ~ str_replace(data_obito,"/20\\b","/2020"),
str_detect(data_obito, "morreu") ~ str_extract(data_obito, "\\d+"),
# Padronizando casos de datas ddmm sem parentes ou qualquer delimitandor entre.
#Se encontrar quatro números, contínuos, no início da coluna, sem delimitador entre eles,
# faça um paste colocando um delimitardor entre os dois primeiros e os dois últimos
str_detect(data_obito, "^\\d{4,4}") ~ paste(str_extract(data_obito, "\\d{2,2}"),"/",str_extract(data_obito, "\\d{2,2}\\b")),
TRUE ~ data_obito))
# 2º passo data - Lidando com Datas com algum algarismo faltante (d/mm/aaaa ou dd/m/aaaa)
#Separei as Datas em três colunas difrentes "dia", "mes", "ano" para facilitar na faxina.
#Com todos os numeros arrumados, juntei tudo. com unite.
#OBS: O Rstudio está com problema na hora de rodar. Ctrl+Enter só vai até metade desse parágrafo. Tem que selecionar tudo para rodar.
obitos_diarios_pjf_tidy_datas_numero<- obitos_diarios_pjf_tidy_datas_extenso %>%
mutate( data_obito = str_split(data_obito, "/")) %>%
unnest_wider(data_obito) %>%
rename("dia"= "...1", "mes"= "...2", "ano" = "...3" ) %>%
#arrumando meses e dias com apenas um algarismo (dd/m/yyyy, d/mm/yyyy e d/m/yyyy) #Tentei com across não funcionou
mutate(dia = case_when(
str_length(dia) == 1 ~ paste(0, dia, sep= ""),
TRUE ~ as.character(dia)),
mes = case_when(
str_length(mes) == 1 ~ paste(0, mes, sep= ""),
is.na(mes) & pag == 9 ~ "11",
TRUE ~ as.character(mes)),
mes= str_replace(mes,"00","01"),
ano = case_when(#arrumando anos faltantes com auxílio da pagina do documento e o mês que ela se encontra
pag == 31 & dia == 23 ~ str_replace(ano, "2021","2020"), #um caso em que a data está 23/12/2021.
ano == "2020" ~ as.character(ano), #Como a ordem importa no case_when, apenas para não acionar nos casos que já estão completos
ano == "2021" ~ as.character(ano), #Como a ordem importa no case_when, apenas para não acionar nos casos que já estão completos
pag <= 13 ~ "2020", #todas as linhas acima da página 13 são 2020
pag >= 24 ~ "2021", #todas as linhas abaixo da página 24 são 2021
pag >= 14 & pag <= 23 & str_detect(mes, "12") ~ "2020", #o que estiver entre 14 e 23 e for mês 12 é 2020
pag >= 14 & pag <= 23 & str_detect(mes, "01|02|03") ~ "2021" #o que estiver entre 14 e 23 e for mês 1,2 ou 3 é 2021
))
#Ultimos passos antes do tidy final. Unindo para transformar datas character em dates, com lubridate
obitos_diarios_pjf_tidy <- obitos_diarios_pjf_tidy_datas_numero %>%
mutate(across(c(dia:ano), str_squish)) %>% #Ultima Limpada antes da unida, mas across não funcionou muito bem, ora funcionava ora não
unite("data_obito", dia:ano, sep="-") %>% #Unindo as colunas dia, mes e ano em uma só
mutate(data_obito=lubridate::dmy(data_obito)) %>% #Transformando em objeto data
#mutate(data_obito = readr::parse_date(data_obito, format = "%d/%m/%Y")) %>%
select(!comorbidade) %>% #retirando a coluna controle de comorbidades
as_tibble()
# Temos uma tabela Tidy!
# Temos uma base em rds ou xlsx pronta para ser utilizada, não em pdf, que dificulta o trabalho.
# Temos uma variável por coluna "genero", "idade", "data_obito" e "comorbidades".
# Cada valor dessa variável específica está padronizado de uma só forma:
# Temos dois gêneros padronizados em masculino e feminino, não 6 palavras que diziam a mesma coisa.
# Temos todas as comorbidades em seu nome completo e não em siglas que só são reconhecíveis por quem é da área, separadas em "," para operações com unnest.
# Temos idade e páginas em numéricos,datas em formato date como Mr Hadley pede.
# Temos portanto uma base tidy!
# filtros e Classificações ----------------------------------------------------
#Obtendo Faixa Etária
obitos_diarios_pjf_fx_etaria <- obitos_diarios_pjf_tidy %>%
mutate(faixa_etaria = cut(idade,
breaks = c(-1,20,40,60,80,101),
labels = c("Menos de 20", "20 a 40", "40 a 60",
"60 a 80", "Mais de 80")))
#Criando mais e menos de 60 para um novo gráfico
obito_60 <- obitos_diarios_pjf_fx_etaria %>%
mutate(sessenta_anos = case_when(idade %in% c(0:60) ~ "Menos de 60",
idade %in% c(61:120) ~ "Mais de 60")) %>%
mutate(ano_mes = str_sub(data_obito, 1,7)) %>%
group_by(ano_mes, sessenta_anos) %>%
count(sessenta_anos) %>%
pivot_wider(id_cols = c('ano_mes', 'sessenta_anos'),
names_from = sessenta_anos,
values_from = n) %>%
rio::export("obito60.csv")
getwd()
#obtendo nº de comorbidades
numero_de_comorbidades<- obitos_diarios_pjf_tidy_comorbidade_separado %>%
count(comorbidades)%>%
arrange(desc(n))%>%
mutate(total_de_obitos= nrow(obitos_diarios_pjf_tidy),
percentual = n/total_de_obitos,
posicao= 1:nrow(numero_de_comorbidades),
dez_mais= case_when(
posicao <= 10 ~ comorbidades,
posicao >= 11 ~ "Outras Comorbidades"))
numero_de_comorbidades %>%
group_by(dez_mais)%>%
tally(n)%>%
arrange(desc(como))
writexl::write_xlsx(numero_de_comorbidades,
path= "municipal/dados_diarios/numero_de_comorbidades.xlsx")
slice(1:10)
numero_de_comorbidades%>%
slice(1:10) %>%
ggplot(aes(y=comorbidades, x= percentual)) + geom_col()
# "Modelos" ----------------------------------------------------
# Mortes por Mês agrupado por Faixa Etária
obitos_diarios_pjf_total_por_mes<- obitos_diarios_pjf_tidy_datas_numero %>%
mutate(faixa_etaria = cut(idade,
breaks = c(-1,20,40,60,80,101),
labels = c("Menos de 20", "20 a 40", "40 a 60",
"60 a 80", "Mais de 80")))%>%
mutate(across(c(dia:ano), str_squish)) %>%
group_by(ano, faixa_etaria) %>%
count(mes)%>%
mutate(mes_ano= paste(mes,ano,sep= "-") %>%
lubridate::my()) %>%
arrange(mes_ano)%>%
#group_by(mes_ano)%>%
pivot_wider(names_from = faixa_etaria, values_from= n)
# Numero de mortes Por dia com Média Móvel
obitos_diarios_pjf_total<- obitos_diarios_pjf_fx_etaria %>%
# group_by(data_obito) %>%
count(data_obito)%>%
tsibble::as_tsibble(index=data_obito)%>%
mutate(media_movel_mortes_7dias= slider::slide_index_dbl(.i = data_obito,
.x = n,
.f = mean,
.before = 6),
media_movel_mortes_14dias = slider::slide_index_dbl(.i = data_obito,
.x = n,
.f = mean,
.before = 13))%>%
arrange(data_obito)
# Visualização ----------------------------------------------------
#Mortes por Mês Por Faixa Etária
obitos_diarios_pjf_total_por_mes %>%
ggplot(aes(x=mes_ano, y= n, fill=faixa_etaria)) + geom_col ( colour= "black") +
labs(title = "Mortes por Covid em Juiz de Fora por Faixa Etária - PJF",
subtitle = "Percentual de mortes por Cada Faixa Etária em cada mês",
caption= "Fonte: Prefeitura de Juiz de Fora - Elaboração do Gráfico e Faxina de Dados: JF em Dados") +
scale_y_continuous(name = "Nº de Mortes") + xlab(label= "Data da Ocorrência do Óbito") +
theme_classic() + theme( plot.title = element_text(size=18, face="bold" ))
obitos_diarios_pjf_total_por_mes %>%
ggplot(aes(x=mes_ano, y= n, fill=faixa_etaria)) + geom_area(position = "fill", colour= "black") +
labs(title = "Mortes por Covid em Juiz de Fora por Faixa Etária - PJF",
subtitle = "Percentual de mortes por Cada Faixa Etária em cada mês",
caption= "Fonte: Prefeitura de Juiz de Fora - Elaboração do Gráfico e Faxina de Dados: JF em Dados") +
scale_y_continuous(name = "Porcentagem de mortes por mês") + xlab(label= "Data da Ocorrência do Óbito") +
theme_classic() + theme( plot.title = element_text(size=18, face="bold" ))
#Mortes Diárias com Média Móvel
obitos_diarios_pjf_total%>%
ggplot(aes(x=data_obito, y=n)) +geom_col() +
geom_line(aes(x=data_obito, y= media_movel_mortes_7dias),
color= "red", show.legend = TRUE, size= 1.2, alpha= 0.8) +
geom_line(aes(x=data_obito, y= media_movel_mortes_14dias),
color= "blue", show.legend = TRUE, size= 1.2, alpha= 0.8)+
labs(title = "Nº Diário de Mortes por Coronavírus em Juiz de Fora - PJF",
subtitle = "Em Vermelho, média movel dos últimos 7 dias. Em Azul, média móvel dos últimos 14 dias.",
caption= "Fonte: Prefeitura de Juiz de Fora - Elaboração do Gráfico e Faxina de Dados: JF em Dados")+
scale_y_continuous(name = "Nº de Mortes") + xlab(label= "Data da Ocorrência do Óbito") +
theme_classic() + theme( plot.title = element_text(size=18, face="bold" ))
# Exportação ----------------------------------------------------
rio::export(obitos_diarios_pjf_total_por_mes,
file= "municipal/dados_diarios/obitos_diarios_pjf_fx_etaria.csv")
writexl::write_xlsx(obitos_diarios_pjf_total_por_mes,
path= "municipal/dados_diarios/obitos_diarios_fx_etaria.xlsx")
writexl::write_xlsx(obitos_diarios_pjf_total,
path= "municipal/dados_diarios/obitos_diarios_media_movel.xlsx")
| /dados_diarios/dados_diarios_pjf.r | no_license | jfemdados/covid19_pjf | R | false | false | 28,744 | r | ##### JF EM DADOS - Coronavírus Casos e Óbitos ######
#Metadados prefeitura de Juiz de Fora - https://covid19.pjf.mg.gov.br/arquivos/obitos_covid19_jf.pdf
# Autor: Marcello Filgueiras
library(tidyverse)
#Trata-se da base de "metadados" dos óbitos de covid em Juiz de Fora.
# Só há divulgação total de mortos em XLS, e não mostram a evolução deles, apenas o último número. Os dados permenorizados estão nesse pdf.
# Não se trata de uma base tidy. Como será demonstrado ao longo do código.
#A primeira questão é o formato em PDF, que dificulta o trabalho.
# Cada linha é um caso.
# A estrutura parece de um csv, mas está longe disso.
# O separador de cada linha as vezes é um ";" "." ou até em alguns casos casos, nada "".
# Dentro de cada linha, o separador de colunas as vezes é ".", "," ou apenas um espaço.
# -------------------- IMPORTING ----------------------------------------------------
library(pdftools)
url <- "https://covid19.pjf.mg.gov.br/arquivos/obitos_covid19_jf.pdf"
obitos_diarios_pjf_raw <- tibble(
texto= pdftools::pdf_text(url),
pag= 1:pdf_length(url))
#Se quiser verificar como estava cada página, é possível verificar por esse table.
# colquei o # para possiblitar o Ctrl + Shift + Enter
obitos_diarios_pjf_raw %>% #slice(1) %>%
pull(texto)
# Separando Linhas --------------------------------------------------------
#No documento inicial, cada linha deveria ser um paciente. Foi feito apenas uma separação com "\\n" no início.
#Mas linhas muito grandes que passam da margem eram divididas em duas linhas, representando duas linhas do mesmo caso.
#Assim optei pela separação de cada linha pelos números iniciais, que são "1.", "2.", "3.", "100." ...
#Mas "d+\\." também pegava também datas que estavam separadas por pontos nas frases
#por isso, optei pela divisão de cada caso por "\\r\\n\\d+\\."
#Ela sempre pega o Enter de uma nova linha, que se for um novo óbito e terá logo após números e um ponto.
#Separando Cada Linha do PDF que corresponde a um caso
obitos_diarios_pjf_separado<- obitos_diarios_pjf_raw %>%
mutate(texto= str_split(texto, "\\n\\d+\\. "))%>%
unnest(texto)
#Aqui printei todo o DF para conferir se não havia duplicidade de linhas
# Novamente, para verificar rodar o código abaixo.
# colquei o # para possiblitar o Ctrl + Shift + Enter
#obitos_diarios_pjf_separado %>%# slice(1000:1578) %>%
#pull(texto)
# -------------- TIDYING ----------------------------------------------------
# Em tese teríamos 4 colunas, c( "genero", "idade", "data_do_obito" e "comorbidades")
#Temos problemas em todas essas colunas:
#gênero: Não só dois, temos 6 gêneros: "idoso", "idosa", "masculino", "feminino", "homem" e "mulher"
#idade: "dd anos"
#data: temos dd/mm/aaaa, dd/mm/aa, dd/mm, "dd do mes" e incríveis "ultimo domingo, 22"
#comorbidade: Se não tem nenhuma, está "Sem Comorbidade Relatada" ou NA. Se tem, começa com "Comorbidade: nome da doença..."
# mas simplesmente separar essas colunas pelo delimitador não é possível.
# Ora estamos com a ordem de c( "genero", "idade", "data_do_obito" e "comorbidades")
# ora estamos com a ordem de c( "genero", "idade", "data_do_obito" e "comorbidades")
# Temos inclusive um caso de 5 colunas: se_é_idoso, genero, idade, data_do_obito e comorbidades
# como "idoso, masculino, 88 anos. óbito em 05/07/2020. comorbidades:dcc, etc."
# Portanto, como primeiro passo para transformar isso em uma tabela tidy:
# Vamos separar uma variável por coluna, buscando cada uma dessas informações com str_extract.
# Separando Colunas -------------------------------------------------------
obitos_diarios_pjf_clean<- obitos_diarios_pjf_separado %>%
#Limpando algarismos que começam no inicio do texto e são seguidos por "." . "1304 não tinha ponto e ficou para trás;
mutate(texto=str_remove(texto, "^\\d+\\.|1304") %>%
str_to_lower(),
#Retirando alguns resíduos do PDF que restaram na linha separada
texto= str_squish(texto)) %>%
#str_remove_all(texto, "\\r\\n") %>%
# str_squish(texto) %>%
#o gênero está na ordem, então aqui um separate funciona.
tidyr::separate(col=texto,
into= c("genero", "texto"),
sep = ",|\\.| ",
extra= "merge")%>%
# Separando idade. Vai dar problema com uma coluna a mais e casos de natimortos.
#mutate(texto= str_trim(texto)) %>%
#tidyr::separate(col=texto,
#into= c("idade", "texto"),
#sep = ",|\\.| ",
#extra= "merge")
# Portanto,Buscando Idade com str_extract e padronizando a idade de bebês;
mutate(idade = case_when(
genero== "natimorto" | str_detect(texto,"natimorto") ~ "0", # ( idade de bebês = 0 ) Triste =/
str_detect(texto,"01 dia") ~ "0", # (idade de bebês = 0) =/ Triste
TRUE ~ str_extract(texto, "\\d+"))%>% #Extraí os primeiros algarismos. Como idade está na frente, não há problema
as.numeric(),
#buscando data do obito em varios formatos
data_obito = str_extract(texto, paste("\\d+/\\d+(/\\d+)*", #dd/mm e dd/mm/yy e dd/mm/yyyy
"\\d{4,4}", #ddmm (sem separador)
"\\d{1,2} de \\w*", #dia em número e mês por extenso
"morreu.*\\d+", #"morreu no último domingo,dd"
sep = "|")),
#Buscando comorbidades, buscando tudo que vem depois de "comorbidades" ou "fator de risco"
comorbidade= case_when(
str_detect(texto,"sem comorbidade") ~ "sem comorbidade", #buscando o que nao tem comorbidade
TRUE ~ str_extract(texto, "comorbidade(s)*.+|fator de risco(s)*.+")),
#Em algumas linhas, a ordem é "data_do_obito","comorbidades". Essa regex acima, nessas linhasm pega somente as comorbidades portanto.
#Entretanto, algumas linhas estão na ordem "comorbidades" e depois "óbito".
comorbidade = str_remove(comorbidade, paste("[oó]bito.+" , #nesses casos, mandei tirar tudo que vem depois de óbito,
"óbito$", #para ficar somente comoribdadescomorbidades
sep= "|")),
comorbidade = str_remove(comorbidade, "comorbidade(s)*(:|,)|comorbidade(s)*/fator de risco:|fator de risco:") %>% #tendo certeza que tudo vem depois é comorbidade, mandei tirar as iniciais de comorbidade
str_squish(),
#Lidandos Falsos NA. Não indicavam o separador "comorbidade:". Vieram somente a doença.
# Por isso vieram nulos, mas não eram NAs.
#Fiz na Mão
comorbidade = ifelse(is.na(comorbidade),
str_extract(texto, "has, dnc, ca|dcc, dm|imunodeficiência, outra pneumopatia crônica"),
as.character(comorbidade)),
#Retirando finais de linhas que ainda continham separadores
comorbidade = str_remove(comorbidade, "(\\.|,|;)$"),
#Por fim, criando a coluna faixa etária, muito mais delimitada que "idoso" ou "não idoso"
faixa_etaria = cut(idade,
breaks = c(-1,20,40,60,80,101),
labels = c("Menos de 20", "20 a 40", "40 a 60",
"60 a 80", "Mais de 80")))
# Padronizando Colunas ----------------------------------------------------
# De posse de uma variável por coluna, o próximo passo é padronizar a forma com que elas aparecem ao longo das coluna
# Nas DOENÇAS, temos várias abreviações e vários nomes completos
#Sigla DNC é Doença Neural Crônica, DRC é Doença Renal Crônica etc.
# As siglas foram todas passadas para seus nomes completos para serem mais facilmente entendidos.
# No campo das DATAS, temos vários padrões:
# Em números, temos "dd/mm/yy", "dd/mm/yyyy", "dd/mm", "d/mm", "dd/m" e "ddmm".
# Por extenso, "dd de mes por extenso" e incríveis "morreu no último domingo, dd".
# A meta é passar tudo para yyyy-mm-dd como objeto de data.
# nos números faltantes sem meses e sem anos, como a base é organizad por dia,
# a linha de cima provavelmente é o mesmo mês do registro. Nesse caso, farei um fill("down")
# para lidar com as faltas
# no Gênero, temos dois gêneros com várias caligrafias diferentes para masculino e feminino, que você pode ver pela função abaixo.
table(obitos_diarios_pjf_clean$genero)
# Assim, esses dois vetores foram criados para usar na hora de padronizar as colunas de genero
genero_feminino <- c( "feminino", "feminina", "idosa", "isosa", "mulher")
genero_masculino <- c( "homem", "idoso", "isoso", "masculino", "natimorto")
# Começando os trabalhos de padronização de variáveis
#Padronizando Gênero e Comorbidades
obitos_diarios_pjf_tidy_gen_com <- obitos_diarios_pjf_clean %>%
#Com as variáveis já separadas, podemos retirar a coluna original do texto.
select(!texto) %>%
#1º Passo - Padronizando GÊNERO
mutate(genero= case_when(
genero %in% genero_feminino ~ "feminino",
genero %in% genero_masculino ~ "masculino",
TRUE ~ as.character(genero)),
#2º Passo - Padronizando COMORBIDADES.
#Criei coluna comorbidade (original) e comorbidades (modificada) para ter controle do que fazia
#Acho que as colunas são autoexplicativas de qual conteúdo da mesma variável doença, despadronizada, passada doença, padronizada
comorbidades= str_remove(comorbidade, "severa descompensada e não tratada|;l"),
comorbidades= str_replace_all(comorbidades, "drc\\b|irc", "doença renal crônica"),
comorbidades= str_replace_all(comorbidades, "dialítico|dialitica", "dialitico"),
comorbidades= str_replace_all(comorbidades, "dc(c)*", "doença cardiovascular crônica"),
comorbidades= str_replace_all(comorbidades, "dm|diabetes mel{1,2}itus", "diabetes"),
comorbidades= str_replace_all(comorbidades, "has|hás|hs|had\\b|hipertensão arterial$", "hipertensão arterial sistêmica"),
comorbidades= str_replace_all(comorbidades, "dpoc", "doença pulmonar obstrutiva crônica"),
comorbidades= str_replace_all(comorbidades, "\\b[cç]a\\b( de)*|paciente oncológico|doença oncológica", "câncer"),
comorbidades= str_replace_all(comorbidades, "câncer útero|útero","câncer útero"),
comorbidades= str_replace_all(comorbidades, "dnc", "doença neurológica crônica"),
comorbidades= str_replace_all(comorbidades, "dhc", "doença hepática crônica"),
comorbidades= str_replace_all(comorbidades, "outra pneumopatia", "pneumopatia"),
comorbidades= str_replace_all(comorbidades, "iam", "infarto agudo do miocárdio"),
comorbidades= str_replace_all(comorbidades, "\\bave", "acidente vascular encefálico"),
comorbidades= str_replace_all(comorbidades, "cardiopata", "cardiopatia"),
comorbidades= str_replace_all(comorbidades, "marcapasso","marca-passo"),
comorbidades= str_replace_all(comorbidades, "imunosupressão","imunossupressão"),
comorbidades= str_replace_all(comorbidades, "\\bfa\\b", "fibrilação atrial"),
comorbidades= str_replace_all(comorbidades, "\\btu\\b", "tumor"),
comorbidades= str_replace_all(comorbidades, "\\bave\\b", "acidente vascular encefálico"),
comorbidades= str_replace_all(comorbidades, "avc", "acidente vascular cerebral"),
comorbidades= str_replace_all(comorbidades, "histórico de avc", "acidente vascular cerebral prévio"),
comorbidades= str_replace_all(comorbidades, "dvc", "doença venosa crônica"),
comorbidades= str_replace_all(comorbidades, "tep", "tromboembolismo pulmonar"),
comorbidades= str_replace_all(comorbidades, "tb( pulmonar)*", "tuberculose pulmonar"),
comorbidades= str_replace_all(comorbidades, "etilismo", "etilista"),
comorbidades= str_replace_all(comorbidades, "hpb", "hiperplasia prostática benigna"),
comorbidades= str_replace_all(comorbidades, "usuário de droga", "drogadição"),
comorbidades= str_replace_all(comorbidades, "puérpera","puerpério"),
comorbidades= str_replace_all(comorbidades, "tce","traumatismo cranioencefálico"),
comorbidades= str_replace_all(comorbidades, "imunosupressão","imunossupressão"),
comorbidades= str_replace_all(comorbidades, "pti","púrpura trombocitopênica idiopática"),
comorbidades= str_replace_all(comorbidades, "drge","doença do refluxo esofágico"),
comorbidades= str_replace_all(comorbidades, "eplepsia|convulsão|crise convulsiva","epilepsia"),
comorbidades= str_replace_all(comorbidades, "transtorno bipolar","bipolar"),
comorbidades= str_replace_all(comorbidades, "transtorno depressivo","depressão"),
comorbidades= str_replace_all(comorbidades, "síndrome demência","demência"),
comorbidades= str_replace_all(comorbidades, "transplantado","transplante"),
comorbidades= str_replace_all(comorbidades, "\\bic(c)*\\b","insuficiência cardiáca"),
comorbidades= str_replace_all(comorbidades, "dac|coronariopata|coranopata","doença arterial coronariana"),
comorbidades= str_replace_all(comorbidades, "da,","doença arterial coronariana,"),#se colocasse da(c)* na linha acima, quando fosse pegar dac, só pegaria dac, por isso optei por uma nova linha
comorbidades= str_replace_all(comorbidades, "deficicência","deficiência"),
comorbidades= str_replace_all(comorbidades, "transplantado","transplante"),
comorbidades= str_replace_all(comorbidades, "dsp","intoxicação diarreica por molusco"),
comorbidades= str_replace_all(comorbidades, "dlp|dislepidemia","dislipidemia"),
comorbidades= str_replace_all(comorbidades,
paste("hiperplasia benigna próstata",
"hiperplasia prostática benigna",
sep= "|"),
"hiperplasia de próstata"),
#arrumando delimitadores dentro da coluna comorbidades " e " e "," por "/"
comorbidades= str_replace_all(comorbidades, " e |,", "/"),
#a intenção era retirar tudo que estava dentro de parentêses geralmente a "sigla(explicação da sigla)"
# mas comorbidades= str_remove_all(comorbidades, "\\(.+\\)"), quando havia mais de dois parenteses por linhas, removia tudo entre elas.
# ex: "doença cardiovascular crônica (dcc), diabetes melitus (dm)" retivara não só cada grupo desse, mas tudo como uma regex "(dcc.*dm)"
# Assim, fiz na mão, retirando cada um dos casos entre parênteses que apareceram:
comorbidades= str_remove_all(comorbidades, paste("\\(diabetes\\)",
"\\(doença cardiovascular crônica\\)",
"\\(doença pulmonar obstrutiva crônica\\)",
"\\(hipertensão arterial sistêmica\\)",
"\\(alzheimer\\)",
"\\(doença renal crônica\\)",
"\\(doença neurológica crônica\\)",
sep= "|")),
#Todas as Comorbidades estão separadas por "," prontas para sofrerem unnest. Finalizando, retirando espaços.
comorbidades = str_squish(comorbidades),
#um caso em que não havia separado nenhum, foi necessário colocar na mão
comorbidades = str_replace(comorbidades,
pattern = "doença cardiovascular crônica marca-passo",
replacement = "doença cardiovascular crônica/marca-passo"),
#retirando espaços depois de barras, str squish nao pegou esse caso
comorbidades= str_replace_all(comorbidades, " / | /|/ ", "/"),
#Arrumando NA e Sem Comorbidades
comorbidades = str_replace(comorbidades, "comorbidades", "sem comorbidade"),
comorbidades = tidyr::replace_na(comorbidades, "sem comorbidade"))
#Coluna Gênero Tidy! Mesma variável de uma só forma
table(obitos_diarios_pjf_clean$genero)
table(obitos_diarios_pjf_tidy_gen_com$genero)
#Dando Unnest nas comorbidades para verificar padronização dessa coluna
obitos_diarios_pjf_tidy_comorbidade_separado <- obitos_diarios_pjf_tidy_gen_com %>%
mutate(comorbidades= str_split(comorbidades, "/")) %>%
unnest(comorbidades)
# Coluna Comorbidades Tidy!
#Pode se verificar abaixo no table()
# Dentro da possibilidades, busquei colocar tudo da mesma doença com nomes diferente com nome da mesma variável.
# Porém falta padronização quanto ao nível de especialização.
# Algumas doenças estão em grandes grupos, como doença cardiovascular crônica,
# mas algumas, que poderiam estar nesses grandes grupos, estão muito especificadas como insuficiência cardíaca e doença arterial coronariana, etc...
# Como a necessidade de mais especialização ou menos varia com o objetivo, optei por não colocar todos casos mais especializados em gênero.
table(obitos_diarios_pjf_tidy_comorbidade_separado$comorbidades)
#Padronizado DATAS - 1º Passo: Passando as datas que estão escritas por extenso para número.
#Tentei fazer essa função para lidar com números por extenso, infelizmente não funcionou.
nomeMes_to_nMes <- function(x){
mutate(
x=stringr::str_replace(x, "janeiro", "01"),
x=stringr::str_replace(x, "fevereiro", "02"),
x=stringr::str_replace(x, "março", "03"),
x=stringr::str_replace(x, "abril", "04"),
x=stringr::str_replace(x, "maio", "05"),
x=stringr::str_replace(x, "junho", "06"),
x=stringr::str_replace(x, "julho", "07"),
x=stringr::str_replace(x, "agosto", "08"),
x=stringr::str_replace(x, "setembro", "09"),
x=stringr::str_replace(x, "outubro", "10"),
x=stringr::str_replace(x, "novembro", "11"),
x=stringr::str_replace(x, "dezembro", "12"))
}
#terá que ser por vez no meio do código. Passando as datas que estão escritas por extenso para número.
obitos_diarios_pjf_tidy_datas_extenso<- obitos_diarios_pjf_tidy_gen_com %>%
mutate(
data_obito= stringr::str_replace(data_obito, "janeiro", "01"),
data_obito=stringr::str_replace(data_obito , "fevereiro", "02"),
data_obito=stringr::str_replace(data_obito , "março", "03"),
data_obito=stringr::str_replace(data_obito , "abril", "04"),
data_obito=stringr::str_replace(data_obito , "maio", "05"),
data_obito=stringr::str_replace(data_obito , "junho", "06"),
data_obito=stringr::str_replace(data_obito , "julho", "07"),
data_obito=stringr::str_replace(data_obito , "agosto", "08"),
data_obito=stringr::str_replace(data_obito , "setembro", "09"),
data_obito=stringr::str_replace(data_obito , "outubro", "10"),
data_obito=stringr::str_replace(data_obito , "novembro", "11"),
data_obito=stringr::str_replace(data_obito , "dezembro", "12"),
data_obito=stringr::str_replace(data_obito , " de ", "/"),
# retirando o ultimo caso por extenso, de "morreu no ultimo domingo" e anos yy para yyyy
data_obito= case_when(
str_detect(data_obito, "/21\\b") ~ str_replace(data_obito,"/21\\b","/2021"),
str_detect(data_obito, "/20\\b") ~ str_replace(data_obito,"/20\\b","/2020"),
str_detect(data_obito, "morreu") ~ str_extract(data_obito, "\\d+"),
# Padronizando casos de datas ddmm sem parentes ou qualquer delimitandor entre.
#Se encontrar quatro números, contínuos, no início da coluna, sem delimitador entre eles,
# faça um paste colocando um delimitardor entre os dois primeiros e os dois últimos
str_detect(data_obito, "^\\d{4,4}") ~ paste(str_extract(data_obito, "\\d{2,2}"),"/",str_extract(data_obito, "\\d{2,2}\\b")),
TRUE ~ data_obito))
# 2º passo data - Lidando com Datas com algum algarismo faltante (d/mm/aaaa ou dd/m/aaaa)
#Separei as Datas em três colunas difrentes "dia", "mes", "ano" para facilitar na faxina.
#Com todos os numeros arrumados, juntei tudo. com unite.
#OBS: O Rstudio está com problema na hora de rodar. Ctrl+Enter só vai até metade desse parágrafo. Tem que selecionar tudo para rodar.
obitos_diarios_pjf_tidy_datas_numero<- obitos_diarios_pjf_tidy_datas_extenso %>%
mutate( data_obito = str_split(data_obito, "/")) %>%
unnest_wider(data_obito) %>%
rename("dia"= "...1", "mes"= "...2", "ano" = "...3" ) %>%
#arrumando meses e dias com apenas um algarismo (dd/m/yyyy, d/mm/yyyy e d/m/yyyy) #Tentei com across não funcionou
mutate(dia = case_when(
str_length(dia) == 1 ~ paste(0, dia, sep= ""),
TRUE ~ as.character(dia)),
mes = case_when(
str_length(mes) == 1 ~ paste(0, mes, sep= ""),
is.na(mes) & pag == 9 ~ "11",
TRUE ~ as.character(mes)),
mes= str_replace(mes,"00","01"),
ano = case_when(#arrumando anos faltantes com auxílio da pagina do documento e o mês que ela se encontra
pag == 31 & dia == 23 ~ str_replace(ano, "2021","2020"), #um caso em que a data está 23/12/2021.
ano == "2020" ~ as.character(ano), #Como a ordem importa no case_when, apenas para não acionar nos casos que já estão completos
ano == "2021" ~ as.character(ano), #Como a ordem importa no case_when, apenas para não acionar nos casos que já estão completos
pag <= 13 ~ "2020", #todas as linhas acima da página 13 são 2020
pag >= 24 ~ "2021", #todas as linhas abaixo da página 24 são 2021
pag >= 14 & pag <= 23 & str_detect(mes, "12") ~ "2020", #o que estiver entre 14 e 23 e for mês 12 é 2020
pag >= 14 & pag <= 23 & str_detect(mes, "01|02|03") ~ "2021" #o que estiver entre 14 e 23 e for mês 1,2 ou 3 é 2021
))
#Ultimos passos antes do tidy final. Unindo para transformar datas character em dates, com lubridate
obitos_diarios_pjf_tidy <- obitos_diarios_pjf_tidy_datas_numero %>%
mutate(across(c(dia:ano), str_squish)) %>% #Ultima Limpada antes da unida, mas across não funcionou muito bem, ora funcionava ora não
unite("data_obito", dia:ano, sep="-") %>% #Unindo as colunas dia, mes e ano em uma só
mutate(data_obito=lubridate::dmy(data_obito)) %>% #Transformando em objeto data
#mutate(data_obito = readr::parse_date(data_obito, format = "%d/%m/%Y")) %>%
select(!comorbidade) %>% #retirando a coluna controle de comorbidades
as_tibble()
# Temos uma tabela Tidy!
# Temos uma base em rds ou xlsx pronta para ser utilizada, não em pdf, que dificulta o trabalho.
# Temos uma variável por coluna "genero", "idade", "data_obito" e "comorbidades".
# Cada valor dessa variável específica está padronizado de uma só forma:
# Temos dois gêneros padronizados em masculino e feminino, não 6 palavras que diziam a mesma coisa.
# Temos todas as comorbidades em seu nome completo e não em siglas que só são reconhecíveis por quem é da área, separadas em "," para operações com unnest.
# Temos idade e páginas em numéricos,datas em formato date como Mr Hadley pede.
# Temos portanto uma base tidy!
# filtros e Classificações ----------------------------------------------------
#Obtendo Faixa Etária
obitos_diarios_pjf_fx_etaria <- obitos_diarios_pjf_tidy %>%
mutate(faixa_etaria = cut(idade,
breaks = c(-1,20,40,60,80,101),
labels = c("Menos de 20", "20 a 40", "40 a 60",
"60 a 80", "Mais de 80")))
#Criando mais e menos de 60 para um novo gráfico
obito_60 <- obitos_diarios_pjf_fx_etaria %>%
mutate(sessenta_anos = case_when(idade %in% c(0:60) ~ "Menos de 60",
idade %in% c(61:120) ~ "Mais de 60")) %>%
mutate(ano_mes = str_sub(data_obito, 1,7)) %>%
group_by(ano_mes, sessenta_anos) %>%
count(sessenta_anos) %>%
pivot_wider(id_cols = c('ano_mes', 'sessenta_anos'),
names_from = sessenta_anos,
values_from = n) %>%
rio::export("obito60.csv")
getwd()
#obtendo nº de comorbidades
numero_de_comorbidades<- obitos_diarios_pjf_tidy_comorbidade_separado %>%
count(comorbidades)%>%
arrange(desc(n))%>%
mutate(total_de_obitos= nrow(obitos_diarios_pjf_tidy),
percentual = n/total_de_obitos,
posicao= 1:nrow(numero_de_comorbidades),
dez_mais= case_when(
posicao <= 10 ~ comorbidades,
posicao >= 11 ~ "Outras Comorbidades"))
numero_de_comorbidades %>%
group_by(dez_mais)%>%
tally(n)%>%
arrange(desc(como))
writexl::write_xlsx(numero_de_comorbidades,
path= "municipal/dados_diarios/numero_de_comorbidades.xlsx")
slice(1:10)
numero_de_comorbidades%>%
slice(1:10) %>%
ggplot(aes(y=comorbidades, x= percentual)) + geom_col()
# "Modelos" ----------------------------------------------------
# Mortes por Mês agrupado por Faixa Etária
obitos_diarios_pjf_total_por_mes<- obitos_diarios_pjf_tidy_datas_numero %>%
mutate(faixa_etaria = cut(idade,
breaks = c(-1,20,40,60,80,101),
labels = c("Menos de 20", "20 a 40", "40 a 60",
"60 a 80", "Mais de 80")))%>%
mutate(across(c(dia:ano), str_squish)) %>%
group_by(ano, faixa_etaria) %>%
count(mes)%>%
mutate(mes_ano= paste(mes,ano,sep= "-") %>%
lubridate::my()) %>%
arrange(mes_ano)%>%
#group_by(mes_ano)%>%
pivot_wider(names_from = faixa_etaria, values_from= n)
# Numero de mortes Por dia com Média Móvel
obitos_diarios_pjf_total<- obitos_diarios_pjf_fx_etaria %>%
# group_by(data_obito) %>%
count(data_obito)%>%
tsibble::as_tsibble(index=data_obito)%>%
mutate(media_movel_mortes_7dias= slider::slide_index_dbl(.i = data_obito,
.x = n,
.f = mean,
.before = 6),
media_movel_mortes_14dias = slider::slide_index_dbl(.i = data_obito,
.x = n,
.f = mean,
.before = 13))%>%
arrange(data_obito)
# Visualização ----------------------------------------------------
#Mortes por Mês Por Faixa Etária
obitos_diarios_pjf_total_por_mes %>%
ggplot(aes(x=mes_ano, y= n, fill=faixa_etaria)) + geom_col ( colour= "black") +
labs(title = "Mortes por Covid em Juiz de Fora por Faixa Etária - PJF",
subtitle = "Percentual de mortes por Cada Faixa Etária em cada mês",
caption= "Fonte: Prefeitura de Juiz de Fora - Elaboração do Gráfico e Faxina de Dados: JF em Dados") +
scale_y_continuous(name = "Nº de Mortes") + xlab(label= "Data da Ocorrência do Óbito") +
theme_classic() + theme( plot.title = element_text(size=18, face="bold" ))
obitos_diarios_pjf_total_por_mes %>%
ggplot(aes(x=mes_ano, y= n, fill=faixa_etaria)) + geom_area(position = "fill", colour= "black") +
labs(title = "Mortes por Covid em Juiz de Fora por Faixa Etária - PJF",
subtitle = "Percentual de mortes por Cada Faixa Etária em cada mês",
caption= "Fonte: Prefeitura de Juiz de Fora - Elaboração do Gráfico e Faxina de Dados: JF em Dados") +
scale_y_continuous(name = "Porcentagem de mortes por mês") + xlab(label= "Data da Ocorrência do Óbito") +
theme_classic() + theme( plot.title = element_text(size=18, face="bold" ))
#Mortes Diárias com Média Móvel
obitos_diarios_pjf_total%>%
ggplot(aes(x=data_obito, y=n)) +geom_col() +
geom_line(aes(x=data_obito, y= media_movel_mortes_7dias),
color= "red", show.legend = TRUE, size= 1.2, alpha= 0.8) +
geom_line(aes(x=data_obito, y= media_movel_mortes_14dias),
color= "blue", show.legend = TRUE, size= 1.2, alpha= 0.8)+
labs(title = "Nº Diário de Mortes por Coronavírus em Juiz de Fora - PJF",
subtitle = "Em Vermelho, média movel dos últimos 7 dias. Em Azul, média móvel dos últimos 14 dias.",
caption= "Fonte: Prefeitura de Juiz de Fora - Elaboração do Gráfico e Faxina de Dados: JF em Dados")+
scale_y_continuous(name = "Nº de Mortes") + xlab(label= "Data da Ocorrência do Óbito") +
theme_classic() + theme( plot.title = element_text(size=18, face="bold" ))
# Exportação ----------------------------------------------------
rio::export(obitos_diarios_pjf_total_por_mes,
file= "municipal/dados_diarios/obitos_diarios_pjf_fx_etaria.csv")
writexl::write_xlsx(obitos_diarios_pjf_total_por_mes,
path= "municipal/dados_diarios/obitos_diarios_fx_etaria.xlsx")
writexl::write_xlsx(obitos_diarios_pjf_total,
path= "municipal/dados_diarios/obitos_diarios_media_movel.xlsx")
|
library(here)
library(here)
library(dplyr)
library(tidyverse)
library(here)
library(dplyr)
library(tidyverse)
library(MazamaCoreUtils)
library(rnaturalearth)
library(terra)
library(leaflet)
#install.packages ("leaflet")
sf_use_s2(FALSE)
#-----
#Reading in Data
wcr <- read.csv(here("data", "WCR.csv"))
wcr <- read.csv(here("WCRpermitBiOp_allregns_all_years__7Jan2022.xlsx - WCRpermitBiOp_allregns_all_year.csv"))
#-----
#Filtering Data
wcr_rev <- wcr %>%
filter(PermitStatus == "Issued")
filter(PermitStatus == "Issued") #Issued Permits
wcr_rev1 <- wcr_rev %>%
filter(DateIssued >"2012-01-01")
filter(DateIssued >"2012-01-01") #Permits from last 10 years
wcr_rev2 <- wcr_rev1 %>%
filter(DateExpired >= Sys.Date()) #DateField >= Sys.Date() puts it to the date of the system
filter(DateExpired >= Sys.Date()) #DateField >= Sys.Date() puts it to the date of the system #Current Permits
wcr_rev3 <- wcr_rev2 %>%
filter(ResultCode == c("NMFS 10a1A Salmon","4d", "NMFS BiOp DTA", "Tribal 4d"))
filter(ResultCode == c("NMFS 10a1A Salmon","4d", "NMFS BiOp DTA", "Tribal 4d")) #Filtering for relevant permit type
#-----
#Recoding Data
wcr_rev4 <- wcr_rev3 %>%
mutate(LifeStage = recode(LifeStage,
"Smolt" = "Juvenile",
"Fry" = "Juvenile",
"Larvae" = "Juvenile",
"Subadult" = "Adult"))
"Subadult" = "Adult")) #
wcr_rev5 <- wcr_rev4 %>%
mutate(CommonName = recode(CommonName,
@@ -36,29 +45,116 @@ wcr_rev5 <- wcr_rev4 %>%
"Rockfish, Bocaccio" = "bocaccio",
"Rockfish, Yelloweye" = "yelloweye rockfish"))
#------
# Splitting up datasets
adults <- wcr_rev5 %>%
filter(LifeStage == "Adult")
filter(LifeStage == "Adult") #Adult Dataset
juvenile <- wcr_rev5 %>%
filter(LifeStage == "Juvenile")
filter(LifeStage == "Juvenile") # Juvenile Dataset
#------
#Splitting species by run
unique(wcr_spatial$Population)
wcr_spatial %>%
group_by(Population) %>%
mutate( Sp_Run = CommonName, by = Population)
#------
#Practice mapping with leaflet
states <- geojsonio::geojson_read("https://rstudio.github.io/leaflet/json/us-states.geojson", what = "sp")
m <- leaflet(states) %>%
setView(-96, 37.8, 4) %>%
addProviderTiles("MapBox", options = providerTileOptions(
id = "mapbox.light",
accessToken = Sys.getenv('MAPBOX_ACCESS_TOKEN')))
bins <- c(0, 10, 20, 50, 100, 200, 500, 1000, Inf)
pal <- colorBin("YlOrRd", domain = states$density, bins = bins)
m %>% addPolygons(fillColor = ~pal(density),
weight = 2,
opacity = 1,
color = "white",
dashArray = "3",
fillOpacity = 0.7,
highlightOptions = highlightOptions(
weight = 5,
color = "#666",
dashArray = "",
fillOpacity = 0.7,
bringToFront = TRUE))
outline <- quakes[chull(quakes$long, quakes$lat),]
map <- leaflet(quakes) %>%
# Base groups
addTiles(group = "OSM (default)") %>%
addProviderTiles(providers$Stamen.Toner, group = "Toner") %>%
addProviderTiles(providers$Stamen.TonerLite, group = "Toner Lite") %>%
# Overlay groups
addCircles(~long, ~lat, ~10^mag/5, stroke = F, group = "Quakes") %>%
addPolygons(data = outline, lng = ~long, lat = ~lat,
fill = F, weight = 2, color = "#FFFFCC", group = "Outline") %>%
# Layers control
addLayersControl(
baseGroups = c("OSM (default)", "Toner", "Toner Lite"),
overlayGroups = c("Quakes", "Outline"),
options = layersControlOptions(collapsed = FALSE)
)
map
#------
#Actual Mapping with leaflet
#outline <- wcr_spatial[chull(wcr_spatial$long, wcr_spatial$lat),]
# map1 <- leaflet(wcr_spatial) %>%
# # Base groups
# addTiles(group = "PermitStatus(default)") %>%
# addProviderTiles(providers$CommonName, group = "Species") %>%
# addProviderTiles(providers$ResultCode, group = "Permit Type") %>%
# # Overlay groups
# addCircles(~longitude, ~latitude, ~10^mag/5, stroke = F, group = "Species") %>%
# addPolygons(data = outline, lng = ~long, lat = ~lat,
# fill = F, weight = 2, color = "#FFFFCC", group = "Outline") %>%
# # Layers control
# addLayersControl(
# baseGroups = c("PermitStatus(default)", "Species", "Permit Type"),
# overlayGroups = c("Species", "Permit Type"),
# options = layersControlOptions(collapsed = FALSE)
# )
#^^ WIP DOES NOT WORK
#Following code shows leaflet map of active permits WIP !!!
wcr_leaf <- leaflet(wcr_spatial) %>%
addTiles(group = "PermitStatus(default)") %>%
setView(-96, 37.8, 4) %>%
addProviderTiles("MapBox", options = providerTileOptions(
id = "mapbox.light",
accessToken = Sys.getenv('MAPBOX_ACCESS_TOKEN')))
wcr_leaf %>% addPolygons()
#Density of active permits reflected by darker polygons
#------
#Mapping Ind Mort
st_layers(dsn = here("data//WBD_National_GDB", "WBD_National_GDB.gdb"))
st_layers(dsn = here("data", "WBD_National_GDB", "WBD_National_GDB.gdb"))
wbd.hucs <- read_sf(dsn = here("data/WBD_National_GDB", "WBD_National_GDB.gdb"), layer = "WBDHU8")
wbd.hucs <- read_sf(dsn = here("data", "WBD_National_GDB", "WBD_National_GDB.gdb"), layer = "WBDHU8")
wbd.hucs$huc8 <- as.double(wbd.hucs$huc8)
state.bound <- read_sf(here("data/cb_2018_us_state_20m", "cb_2018_us_state_20m.shp"))
state.bound <- read_sf(here("data", "cb_2018_us_state_20m", "cb_2018_us_state_20m.shp"))
wcr.bound <- state.bound %>%
filter(NAME == "Washington" | NAME == "Oregon" |
NAME == "California" | NAME == "Idaho")
wcr_spatial <- right_join(x = wbd.hucs, y = adults, by = c("huc8" = "HUCNumber")) #always have to pick adults or juveniles, never both on same page
wcr_spatialJ <-right_join(x = wbd.hucs, y = juvenile, by = c("huc8" = "HUCNumber"))
#unique(wcr_spatial$CommonName)
# all adults
#sockeye
spatial_sockeye <- wcr_spatial %>%
filter(CommonName == "sockeye salmon") %>%
filter(huc8 != c(99999999, NA))
sockeye_mort <- spatial_sockeye %>%
group_by(huc8) %>%
summarize(sum = sum(IndMort, na.rm = T))
WA.hucs <- wbd.hucs %>%
filter(states %in% "WA")
hucs <- wbd.hucs %>%
filter(states %in% c("WA", "ID", "CA", "OR", "CA,OR"))
sockeye_plot <- ggplot() +
geom_sf(data = wcr.bound, fill = "ivory") +
geom_sf(data = sockeye_mort, aes(fill = sum)) +
theme_void()
ggsave(device = "pdf", here("output", "sockeye_plot.pdf"))
#coho
spatial_coho <- wcr_spatial %>%
filter(CommonName == "coho salmon") %>%
filter(huc8 != c(99999999, NA))
coho_mort <- spatial_coho %>%
group_by(huc8) %>%
summarize(sum = sum(IndMort, na.rm = T))
coho_plot <- ggplot() +
geom_sf(data = wcr.bound, fill = "ivory") +
geom_sf(data = coho_mort, aes(fill = sum)) +
theme_void()
ggsave(device = "pdf", here("output", "coho_plot.pdf"))
#steelhead
spatial_steelhead <- wcr_spatial %>%
filter(CommonName == "steelhead") %>%
filter(huc8 != c(99999999, NA))
steelhead_mort <- spatial_steelhead %>%
group_by(huc8) %>%
summarize(sum = sum(IndMort, na.rm = T))
steelhead_plot <- ggplot() +
geom_sf(data = wcr.bound, fill = "ivory") +
geom_sf(data = steelhead_mort, aes(fill = sum)) +
theme_void()
ggsave(device = "pdf", here("output", "steelhead_plot.pdf"))
#chinook
spatial_chinook <- wcr_spatial %>%
filter(CommonName == "Chinook salmon") %>%
filter(huc8 != c(99999999, NA))
chinook_mort <- spatial_chinook %>%
group_by(huc8) %>%
summarize(sum = sum(IndMort, na.rm = T))
chinook_plot <- ggplot() +
geom_sf(data = wcr.bound, fill = "ivory") +
geom_sf(data = chinook_mort, aes(fill = sum)) +
theme_void()
ggsave(device = "pdf", here("output", "chinook_plot.pdf"))
#canary
spatial_canary <- wcr_spatial %>%
filter(CommonName == "canary rockfish") %>%
filter(huc8 != c(99999999, NA))
canary_mort <- spatial_canary %>%
group_by(huc8) %>%
summarize(sum = sum(IndMort, na.rm = T))
canary_plot <- ggplot() +
geom_sf(data = wcr.bound, fill = "ivory") +
geom_sf(data = canary_mort, aes(fill = sum)) +
theme_void()
ggsave(device = "pdf", here("output", "canary_plot.pdf"))
#boccacio
spatial_bocc <- wcr_spatial %>%
filter(CommonName == "bocaccio") %>%
filter(huc8 != c(99999999, NA))
bocc_mort <- spatial_bocc %>%
group_by(huc8) %>%
summarize(sum = sum(IndMort, na.rm = T))
bocc_plot <- ggplot() +
geom_sf(data = wcr.bound, fill = "ivory") +
geom_sf(data = bocc_mort, aes(fill = sum)) +
theme_void()
ggsave(device = "pdf", here("output", "bocc_plot.pdf"))
#yelloweye
spatial_yelloweye <- wcr_spatial %>%
filter(CommonName == "yelloweye rockfish")
#filter(huc8 != c(99999999, NA))
yelloweye_mort <- spatial_yelloweye %>%
group_by(huc8) %>%
summarize(sum = sum(IndMort, na.rm = T))
WA.hucs <- wbd.hucs %>%
filter(states %in% "WA")
yelloweye_plot <- ggplot() +
geom_sf(data = wcr.bound, fill = "ivory") +
geom_sf(data = yelloweye_mort, aes(fill = sum)) +
theme_void()
ggsave(device = "pdf", here("output", "yelloweye_plot.pdf"))
#green sturgeon
spatial_sturgeon <- wcr_spatial %>%
filter(CommonName == "green sturgeon") %>%
filter(huc8 != c(99999999, NA))
sturgeon_mort <- spatial_sturgeon %>%
group_by(huc8) %>%
summarize(sum = sum(IndMort, na.rm = T))
WA.hucs <- wbd.hucs %>%
filter(states %in% "WA")
sturgeon_plot <- ggplot() +
geom_sf(data = wcr.bound, fill = "ivory") +
geom_sf(data = sturgeon_mort, aes(fill = sum)) +
theme_void()
ggsave(device = "pdf", here("output", "sturegon_plot.pdf"))
#eulachon
spatial_eulachon <- wcr_spatial %>%
filter(CommonName == "eulachon") %>%
filter(huc8 != c(99999999, NA))
eulachon_mort <- spatial_eulachon %>%
group_by(huc8) %>%
summarize(sum = sum(IndMort, na.rm = T))
WA.hucs <- wbd.hucs %>%
filter(states %in% "WA")
eulachon_plot <- ggplot() +
geom_sf(data = wcr.bound, fill = "ivory") +
geom_sf(data = eulachon_mort, aes(fill = sum)) +
theme_void()
ggsave(device = "pdf", here("output", "eulachon_plot.pdf"))
#chum
spatial_chum <- wcr_spatial %>%
filter(CommonName == "chum salmon") %>%
filter(huc8 != c(99999999, NA))
chum_mort <- spatial_chum %>%
group_by(huc8) %>%
summarize(sum = sum(IndMort, na.rm = T))
chum_plot <- ggplot() +
geom_sf(data = wcr.bound, fill = "ivory") +
geom_sf(data = chum_mort, aes(fill = sum)) +
theme_void()
ggsave(device = "pdf", here("output", "chum_plot.pdf"))
#------
#Mapping Juveniles
#chinook j
spatial_chinookJ <- wcr_spatialJ %>%
filter(CommonName == "Chinook salmon") %>%
filter(huc8 != c(99999999, NA))
chinookJ_mort <- spatial_chinookJ %>%
group_by(huc8) %>%
summarize(sum = sum(IndMort, na.rm = T))
chinookJ_plot <- ggplot() +
geom_sf(data = wcr.bound, fill = "ivory") +
geom_sf(data = chinookJ_mort, aes(fill = sum)) +
theme_void()
ggsave(device = "pdf", here("output", "chinookJ_plot.pdf"))
#get_root_filenum <- function(file_list){
#return(unique(str_extract(file_list, "[^- | //s]+")))
#}
#us state map with different geometries of the states
#huc data
#------
# Name points / Geom Points
wa<- data.frame(long = -120.7401, lat = 47.7511, city = "Washington")
ca <- data.frame(long = -119.4179, lat = 36.7783, city = "California")
id <- data.frame(long = -114.7420, lat = 44.0682, city = "Idaho")
or <- data.frame(long =-120.5542, lat = 43.8041, city = "Oregon")
| /code/deprecated/Capstone Data.R | no_license | rory-spurr/ESAPermitsCapstone | R | false | false | 11,407 | r | library(here)
library(here)
library(dplyr)
library(tidyverse)
library(here)
library(dplyr)
library(tidyverse)
library(MazamaCoreUtils)
library(rnaturalearth)
library(terra)
library(leaflet)
#install.packages ("leaflet")
sf_use_s2(FALSE)
#-----
#Reading in Data
wcr <- read.csv(here("data", "WCR.csv"))
wcr <- read.csv(here("WCRpermitBiOp_allregns_all_years__7Jan2022.xlsx - WCRpermitBiOp_allregns_all_year.csv"))
#-----
#Filtering Data
wcr_rev <- wcr %>%
filter(PermitStatus == "Issued")
filter(PermitStatus == "Issued") #Issued Permits
wcr_rev1 <- wcr_rev %>%
filter(DateIssued >"2012-01-01")
filter(DateIssued >"2012-01-01") #Permits from last 10 years
wcr_rev2 <- wcr_rev1 %>%
filter(DateExpired >= Sys.Date()) #DateField >= Sys.Date() puts it to the date of the system
filter(DateExpired >= Sys.Date()) #DateField >= Sys.Date() puts it to the date of the system #Current Permits
wcr_rev3 <- wcr_rev2 %>%
filter(ResultCode == c("NMFS 10a1A Salmon","4d", "NMFS BiOp DTA", "Tribal 4d"))
filter(ResultCode == c("NMFS 10a1A Salmon","4d", "NMFS BiOp DTA", "Tribal 4d")) #Filtering for relevant permit type
#-----
#Recoding Data
wcr_rev4 <- wcr_rev3 %>%
mutate(LifeStage = recode(LifeStage,
"Smolt" = "Juvenile",
"Fry" = "Juvenile",
"Larvae" = "Juvenile",
"Subadult" = "Adult"))
"Subadult" = "Adult")) #
wcr_rev5 <- wcr_rev4 %>%
mutate(CommonName = recode(CommonName,
@@ -36,29 +45,116 @@ wcr_rev5 <- wcr_rev4 %>%
"Rockfish, Bocaccio" = "bocaccio",
"Rockfish, Yelloweye" = "yelloweye rockfish"))
#------
# Splitting up datasets
adults <- wcr_rev5 %>%
filter(LifeStage == "Adult")
filter(LifeStage == "Adult") #Adult Dataset
juvenile <- wcr_rev5 %>%
filter(LifeStage == "Juvenile")
filter(LifeStage == "Juvenile") # Juvenile Dataset
#------
#Splitting species by run
unique(wcr_spatial$Population)
wcr_spatial %>%
group_by(Population) %>%
mutate( Sp_Run = CommonName, by = Population)
#------
#Practice mapping with leaflet
states <- geojsonio::geojson_read("https://rstudio.github.io/leaflet/json/us-states.geojson", what = "sp")
m <- leaflet(states) %>%
setView(-96, 37.8, 4) %>%
addProviderTiles("MapBox", options = providerTileOptions(
id = "mapbox.light",
accessToken = Sys.getenv('MAPBOX_ACCESS_TOKEN')))
bins <- c(0, 10, 20, 50, 100, 200, 500, 1000, Inf)
pal <- colorBin("YlOrRd", domain = states$density, bins = bins)
m %>% addPolygons(fillColor = ~pal(density),
weight = 2,
opacity = 1,
color = "white",
dashArray = "3",
fillOpacity = 0.7,
highlightOptions = highlightOptions(
weight = 5,
color = "#666",
dashArray = "",
fillOpacity = 0.7,
bringToFront = TRUE))
outline <- quakes[chull(quakes$long, quakes$lat),]
map <- leaflet(quakes) %>%
# Base groups
addTiles(group = "OSM (default)") %>%
addProviderTiles(providers$Stamen.Toner, group = "Toner") %>%
addProviderTiles(providers$Stamen.TonerLite, group = "Toner Lite") %>%
# Overlay groups
addCircles(~long, ~lat, ~10^mag/5, stroke = F, group = "Quakes") %>%
addPolygons(data = outline, lng = ~long, lat = ~lat,
fill = F, weight = 2, color = "#FFFFCC", group = "Outline") %>%
# Layers control
addLayersControl(
baseGroups = c("OSM (default)", "Toner", "Toner Lite"),
overlayGroups = c("Quakes", "Outline"),
options = layersControlOptions(collapsed = FALSE)
)
map
#------
#Actual Mapping with leaflet
#outline <- wcr_spatial[chull(wcr_spatial$long, wcr_spatial$lat),]
# map1 <- leaflet(wcr_spatial) %>%
# # Base groups
# addTiles(group = "PermitStatus(default)") %>%
# addProviderTiles(providers$CommonName, group = "Species") %>%
# addProviderTiles(providers$ResultCode, group = "Permit Type") %>%
# # Overlay groups
# addCircles(~longitude, ~latitude, ~10^mag/5, stroke = F, group = "Species") %>%
# addPolygons(data = outline, lng = ~long, lat = ~lat,
# fill = F, weight = 2, color = "#FFFFCC", group = "Outline") %>%
# # Layers control
# addLayersControl(
# baseGroups = c("PermitStatus(default)", "Species", "Permit Type"),
# overlayGroups = c("Species", "Permit Type"),
# options = layersControlOptions(collapsed = FALSE)
# )
#^^ WIP DOES NOT WORK
#Following code shows leaflet map of active permits WIP !!!
wcr_leaf <- leaflet(wcr_spatial) %>%
addTiles(group = "PermitStatus(default)") %>%
setView(-96, 37.8, 4) %>%
addProviderTiles("MapBox", options = providerTileOptions(
id = "mapbox.light",
accessToken = Sys.getenv('MAPBOX_ACCESS_TOKEN')))
wcr_leaf %>% addPolygons()
#Density of active permits reflected by darker polygons
#------
#Mapping Ind Mort
st_layers(dsn = here("data//WBD_National_GDB", "WBD_National_GDB.gdb"))
st_layers(dsn = here("data", "WBD_National_GDB", "WBD_National_GDB.gdb"))
wbd.hucs <- read_sf(dsn = here("data/WBD_National_GDB", "WBD_National_GDB.gdb"), layer = "WBDHU8")
wbd.hucs <- read_sf(dsn = here("data", "WBD_National_GDB", "WBD_National_GDB.gdb"), layer = "WBDHU8")
wbd.hucs$huc8 <- as.double(wbd.hucs$huc8)
state.bound <- read_sf(here("data/cb_2018_us_state_20m", "cb_2018_us_state_20m.shp"))
state.bound <- read_sf(here("data", "cb_2018_us_state_20m", "cb_2018_us_state_20m.shp"))
wcr.bound <- state.bound %>%
filter(NAME == "Washington" | NAME == "Oregon" |
NAME == "California" | NAME == "Idaho")
wcr_spatial <- right_join(x = wbd.hucs, y = adults, by = c("huc8" = "HUCNumber")) #always have to pick adults or juveniles, never both on same page
wcr_spatialJ <-right_join(x = wbd.hucs, y = juvenile, by = c("huc8" = "HUCNumber"))
#unique(wcr_spatial$CommonName)
# all adults
#sockeye
spatial_sockeye <- wcr_spatial %>%
filter(CommonName == "sockeye salmon") %>%
filter(huc8 != c(99999999, NA))
sockeye_mort <- spatial_sockeye %>%
group_by(huc8) %>%
summarize(sum = sum(IndMort, na.rm = T))
WA.hucs <- wbd.hucs %>%
filter(states %in% "WA")
hucs <- wbd.hucs %>%
filter(states %in% c("WA", "ID", "CA", "OR", "CA,OR"))
sockeye_plot <- ggplot() +
geom_sf(data = wcr.bound, fill = "ivory") +
geom_sf(data = sockeye_mort, aes(fill = sum)) +
theme_void()
ggsave(device = "pdf", here("output", "sockeye_plot.pdf"))
#coho
spatial_coho <- wcr_spatial %>%
filter(CommonName == "coho salmon") %>%
filter(huc8 != c(99999999, NA))
coho_mort <- spatial_coho %>%
group_by(huc8) %>%
summarize(sum = sum(IndMort, na.rm = T))
coho_plot <- ggplot() +
geom_sf(data = wcr.bound, fill = "ivory") +
geom_sf(data = coho_mort, aes(fill = sum)) +
theme_void()
ggsave(device = "pdf", here("output", "coho_plot.pdf"))
#steelhead
spatial_steelhead <- wcr_spatial %>%
filter(CommonName == "steelhead") %>%
filter(huc8 != c(99999999, NA))
steelhead_mort <- spatial_steelhead %>%
group_by(huc8) %>%
summarize(sum = sum(IndMort, na.rm = T))
steelhead_plot <- ggplot() +
geom_sf(data = wcr.bound, fill = "ivory") +
geom_sf(data = steelhead_mort, aes(fill = sum)) +
theme_void()
ggsave(device = "pdf", here("output", "steelhead_plot.pdf"))
#chinook
spatial_chinook <- wcr_spatial %>%
filter(CommonName == "Chinook salmon") %>%
filter(huc8 != c(99999999, NA))
chinook_mort <- spatial_chinook %>%
group_by(huc8) %>%
summarize(sum = sum(IndMort, na.rm = T))
chinook_plot <- ggplot() +
geom_sf(data = wcr.bound, fill = "ivory") +
geom_sf(data = chinook_mort, aes(fill = sum)) +
theme_void()
ggsave(device = "pdf", here("output", "chinook_plot.pdf"))
#canary
spatial_canary <- wcr_spatial %>%
filter(CommonName == "canary rockfish") %>%
filter(huc8 != c(99999999, NA))
canary_mort <- spatial_canary %>%
group_by(huc8) %>%
summarize(sum = sum(IndMort, na.rm = T))
canary_plot <- ggplot() +
geom_sf(data = wcr.bound, fill = "ivory") +
geom_sf(data = canary_mort, aes(fill = sum)) +
theme_void()
ggsave(device = "pdf", here("output", "canary_plot.pdf"))
#boccacio
spatial_bocc <- wcr_spatial %>%
filter(CommonName == "bocaccio") %>%
filter(huc8 != c(99999999, NA))
bocc_mort <- spatial_bocc %>%
group_by(huc8) %>%
summarize(sum = sum(IndMort, na.rm = T))
bocc_plot <- ggplot() +
geom_sf(data = wcr.bound, fill = "ivory") +
geom_sf(data = bocc_mort, aes(fill = sum)) +
theme_void()
ggsave(device = "pdf", here("output", "bocc_plot.pdf"))
#yelloweye
spatial_yelloweye <- wcr_spatial %>%
filter(CommonName == "yelloweye rockfish")
#filter(huc8 != c(99999999, NA))
yelloweye_mort <- spatial_yelloweye %>%
group_by(huc8) %>%
summarize(sum = sum(IndMort, na.rm = T))
WA.hucs <- wbd.hucs %>%
filter(states %in% "WA")
yelloweye_plot <- ggplot() +
geom_sf(data = wcr.bound, fill = "ivory") +
geom_sf(data = yelloweye_mort, aes(fill = sum)) +
theme_void()
ggsave(device = "pdf", here("output", "yelloweye_plot.pdf"))
#green sturgeon
spatial_sturgeon <- wcr_spatial %>%
filter(CommonName == "green sturgeon") %>%
filter(huc8 != c(99999999, NA))
sturgeon_mort <- spatial_sturgeon %>%
group_by(huc8) %>%
summarize(sum = sum(IndMort, na.rm = T))
WA.hucs <- wbd.hucs %>%
filter(states %in% "WA")
sturgeon_plot <- ggplot() +
geom_sf(data = wcr.bound, fill = "ivory") +
geom_sf(data = sturgeon_mort, aes(fill = sum)) +
theme_void()
ggsave(device = "pdf", here("output", "sturegon_plot.pdf"))
#eulachon
spatial_eulachon <- wcr_spatial %>%
filter(CommonName == "eulachon") %>%
filter(huc8 != c(99999999, NA))
eulachon_mort <- spatial_eulachon %>%
group_by(huc8) %>%
summarize(sum = sum(IndMort, na.rm = T))
WA.hucs <- wbd.hucs %>%
filter(states %in% "WA")
eulachon_plot <- ggplot() +
geom_sf(data = wcr.bound, fill = "ivory") +
geom_sf(data = eulachon_mort, aes(fill = sum)) +
theme_void()
ggsave(device = "pdf", here("output", "eulachon_plot.pdf"))
#chum
spatial_chum <- wcr_spatial %>%
filter(CommonName == "chum salmon") %>%
filter(huc8 != c(99999999, NA))
chum_mort <- spatial_chum %>%
group_by(huc8) %>%
summarize(sum = sum(IndMort, na.rm = T))
chum_plot <- ggplot() +
geom_sf(data = wcr.bound, fill = "ivory") +
geom_sf(data = chum_mort, aes(fill = sum)) +
theme_void()
ggsave(device = "pdf", here("output", "chum_plot.pdf"))
#------
#Mapping Juveniles
#chinook j
spatial_chinookJ <- wcr_spatialJ %>%
filter(CommonName == "Chinook salmon") %>%
filter(huc8 != c(99999999, NA))
chinookJ_mort <- spatial_chinookJ %>%
group_by(huc8) %>%
summarize(sum = sum(IndMort, na.rm = T))
chinookJ_plot <- ggplot() +
geom_sf(data = wcr.bound, fill = "ivory") +
geom_sf(data = chinookJ_mort, aes(fill = sum)) +
theme_void()
ggsave(device = "pdf", here("output", "chinookJ_plot.pdf"))
#get_root_filenum <- function(file_list){
#return(unique(str_extract(file_list, "[^- | //s]+")))
#}
#us state map with different geometries of the states
#huc data
#------
# Name points / Geom Points
wa<- data.frame(long = -120.7401, lat = 47.7511, city = "Washington")
ca <- data.frame(long = -119.4179, lat = 36.7783, city = "California")
id <- data.frame(long = -114.7420, lat = 44.0682, city = "Idaho")
or <- data.frame(long =-120.5542, lat = 43.8041, city = "Oregon")
|
## ---- include = FALSE----------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
library(ggplot2)
## ----eval = F------------------------------------------------------------
# install.packages("horseshoe")
## ----setup---------------------------------------------------------------
library(horseshoe)
## ----fig.width = 6, fig.height= 5----------------------------------------
tau.values <- c(0.005, 0.05, 0.5)
y.values <- seq(-5, 5, length = 100)
df <- data.frame(tau = rep(tau.values, each = length(y.values)),
y = rep(y.values, 3),
post.mean = c(HS.post.mean(y.values, tau = tau.values[1], Sigma2=1),
HS.post.mean(y.values, tau = tau.values[2], Sigma2=1),
HS.post.mean(y.values, tau = tau.values[3], Sigma2=1)) )
ggplot(data = df, aes(x = y, y = post.mean, group = tau, color = factor(tau))) +
geom_line(size = 1.5) +
scale_color_brewer(palette="Dark2") +
geom_abline(lty = 2) + geom_hline(yintercept = 0, colour = "grey") +
theme_classic() + ylab("") + labs(color = "Tau") +
ggtitle("Horseshoe posterior mean for three values of tau")
## ----fig.width = 6, fig.height= 4----------------------------------------
df <- data.frame(index = 1:50,
truth <- c(rep(5, 10), rep(0, 40)),
y <- truth + rnorm(50) #observations
)
ggplot(data = df, aes(x = index, y = truth)) +
geom_point(size = 2) +
geom_point(aes(x = index, y = y), size = 2, col = "blue") +
theme_classic() + ylab("") +
ggtitle("Black = truth, Blue = observations")
## ------------------------------------------------------------------------
(tau.est <- HS.MMLE(df$y, Sigma2 = 1))
## ----fig.width = 6, fig.height= 4----------------------------------------
post.mean <- HS.post.mean(df$y, tau.est, 1)
df$post.mean <- post.mean
ggplot(data = df, aes(x = index, y = truth)) +
geom_point(size = 2) +
geom_point(aes(x = index, y = y), size = 2, col = "blue") +
theme_classic() + ylab("") +
geom_point(aes(x = index, y = post.mean), size = 2, col = "red") +
ggtitle("Black = truth, Blue = observations, Red = estimates")
## ---- results = 'hide'---------------------------------------------------
hs.object <- HS.normal.means(df$y, method.tau = "truncatedCauchy", method.sigma = "Jeffreys")
## ----fig.width = 6, fig.height= 4----------------------------------------
df$post.mean.full <- hs.object$BetaHat
ggplot(data = df, aes(x = index, y = truth)) +
geom_point(size = 2) +
geom_point(aes(x = index, y = y), size = 2, col = "blue") +
theme_classic() + ylab("") +
geom_point(aes(x = index, y = post.mean.full), size = 2, col = "red") +
ggtitle("Black = truth, Blue = observations, Red = estimates")
## ----fig.width = 6, fig.height= 4----------------------------------------
df$lower.CI <- hs.object$LeftCI
df$upper.CI <- hs.object$RightCI
ggplot(data = df, aes(x = index, y = truth)) +
geom_point(size = 2) +
theme_classic() + ylab("") +
geom_point(aes(x = index, y = post.mean.full), size = 2, col = "red") +
geom_errorbar(aes(ymin = lower.CI, ymax = upper.CI), width = .1, col = "red") +
ggtitle("Black = truth, Red = estimates with 95% credible intervals")
## ------------------------------------------------------------------------
df$selected.CI <- HS.var.select(hs.object, df$y, method = "intervals")
## ----fig.width = 6, fig.height= 4----------------------------------------
ggplot(data = df, aes(x = index, y = truth)) +
geom_point(size = 2) +
theme_classic() + ylab("") +
geom_point(aes(x = index, y = post.mean.full, col = factor(selected.CI)),
size = 2) +
geom_errorbar(aes(ymin = lower.CI, ymax = upper.CI, col = factor(selected.CI)),
width = .1) +
theme(legend.position="none") +
ggtitle("Black = truth, Blue = selected as signal, Red = selected as noise")
## ----fig.width = 6, fig.height= 4----------------------------------------
df$selected.thres <- HS.var.select(hs.object, df$y, method = "threshold")
ggplot(data = df, aes(x = index, y = truth)) +
geom_point(size = 2) +
theme_classic() + ylab("") +
geom_point(aes(x = index, y = post.mean.full, col = factor(selected.thres)),
size = 2) +
geom_errorbar(aes(ymin = lower.CI, ymax = upper.CI, col = factor(selected.thres)),
width = .1) +
theme(legend.position="none") +
ggtitle("Black = truth, Blue = selected as signal, Red = selected as noise")
## ------------------------------------------------------------------------
X <- matrix(rnorm(50*100), 50)
beta <- c(rep(6, 10), rep(0, 90))
y <- X %*% beta + rnorm(50)
## ----fig.width = 6, fig.height= 4----------------------------------------
hs.object <- horseshoe(y, X, method.tau = "truncatedCauchy", method.sigma ="Jeffreys")
df <- data.frame(index = 1:100,
truth = beta,
post.mean = hs.object$BetaHat,
lower.CI <- hs.object$LeftCI,
upper.CI <- hs.object$RightCI
)
ggplot(data = df, aes(x = index, y = truth)) +
geom_point(size = 2) +
theme_classic() + ylab("") +
geom_point(aes(x = index, y = post.mean), size = 2, col = "red") +
geom_errorbar(aes(ymin = lower.CI, ymax = upper.CI), width = .1, col = "red") +
ggtitle("Black = truth, Red = estimates with 95% credible intervals")
## ------------------------------------------------------------------------
df$selected.CI <- HS.var.select(hs.object, df$y, method = "intervals")
## ----fig.width = 6, fig.height= 4----------------------------------------
ggplot(data = df, aes(x = index, y = truth)) +
geom_point(size = 2) +
theme_classic() + ylab("") +
geom_point(aes(x = index, y = post.mean, col = factor(selected.CI)),
size = 2) +
geom_errorbar(aes(ymin = lower.CI, ymax = upper.CI, col = factor(selected.CI)),
width = .1) +
theme(legend.position="none") +
ggtitle("Black = truth, Blue = selected as signal, Red = selected as noise")
| /inst/doc/horseshoe-vignette.R | no_license | cran/horseshoe | R | false | false | 6,103 | r | ## ---- include = FALSE----------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
library(ggplot2)
## ----eval = F------------------------------------------------------------
# install.packages("horseshoe")
## ----setup---------------------------------------------------------------
library(horseshoe)
## ----fig.width = 6, fig.height= 5----------------------------------------
tau.values <- c(0.005, 0.05, 0.5)
y.values <- seq(-5, 5, length = 100)
df <- data.frame(tau = rep(tau.values, each = length(y.values)),
y = rep(y.values, 3),
post.mean = c(HS.post.mean(y.values, tau = tau.values[1], Sigma2=1),
HS.post.mean(y.values, tau = tau.values[2], Sigma2=1),
HS.post.mean(y.values, tau = tau.values[3], Sigma2=1)) )
ggplot(data = df, aes(x = y, y = post.mean, group = tau, color = factor(tau))) +
geom_line(size = 1.5) +
scale_color_brewer(palette="Dark2") +
geom_abline(lty = 2) + geom_hline(yintercept = 0, colour = "grey") +
theme_classic() + ylab("") + labs(color = "Tau") +
ggtitle("Horseshoe posterior mean for three values of tau")
## ----fig.width = 6, fig.height= 4----------------------------------------
df <- data.frame(index = 1:50,
truth <- c(rep(5, 10), rep(0, 40)),
y <- truth + rnorm(50) #observations
)
ggplot(data = df, aes(x = index, y = truth)) +
geom_point(size = 2) +
geom_point(aes(x = index, y = y), size = 2, col = "blue") +
theme_classic() + ylab("") +
ggtitle("Black = truth, Blue = observations")
## ------------------------------------------------------------------------
(tau.est <- HS.MMLE(df$y, Sigma2 = 1))
## ----fig.width = 6, fig.height= 4----------------------------------------
post.mean <- HS.post.mean(df$y, tau.est, 1)
df$post.mean <- post.mean
ggplot(data = df, aes(x = index, y = truth)) +
geom_point(size = 2) +
geom_point(aes(x = index, y = y), size = 2, col = "blue") +
theme_classic() + ylab("") +
geom_point(aes(x = index, y = post.mean), size = 2, col = "red") +
ggtitle("Black = truth, Blue = observations, Red = estimates")
## ---- results = 'hide'---------------------------------------------------
hs.object <- HS.normal.means(df$y, method.tau = "truncatedCauchy", method.sigma = "Jeffreys")
## ----fig.width = 6, fig.height= 4----------------------------------------
df$post.mean.full <- hs.object$BetaHat
ggplot(data = df, aes(x = index, y = truth)) +
geom_point(size = 2) +
geom_point(aes(x = index, y = y), size = 2, col = "blue") +
theme_classic() + ylab("") +
geom_point(aes(x = index, y = post.mean.full), size = 2, col = "red") +
ggtitle("Black = truth, Blue = observations, Red = estimates")
## ----fig.width = 6, fig.height= 4----------------------------------------
df$lower.CI <- hs.object$LeftCI
df$upper.CI <- hs.object$RightCI
ggplot(data = df, aes(x = index, y = truth)) +
geom_point(size = 2) +
theme_classic() + ylab("") +
geom_point(aes(x = index, y = post.mean.full), size = 2, col = "red") +
geom_errorbar(aes(ymin = lower.CI, ymax = upper.CI), width = .1, col = "red") +
ggtitle("Black = truth, Red = estimates with 95% credible intervals")
## ------------------------------------------------------------------------
df$selected.CI <- HS.var.select(hs.object, df$y, method = "intervals")
## ----fig.width = 6, fig.height= 4----------------------------------------
ggplot(data = df, aes(x = index, y = truth)) +
geom_point(size = 2) +
theme_classic() + ylab("") +
geom_point(aes(x = index, y = post.mean.full, col = factor(selected.CI)),
size = 2) +
geom_errorbar(aes(ymin = lower.CI, ymax = upper.CI, col = factor(selected.CI)),
width = .1) +
theme(legend.position="none") +
ggtitle("Black = truth, Blue = selected as signal, Red = selected as noise")
## ----fig.width = 6, fig.height= 4----------------------------------------
df$selected.thres <- HS.var.select(hs.object, df$y, method = "threshold")
ggplot(data = df, aes(x = index, y = truth)) +
geom_point(size = 2) +
theme_classic() + ylab("") +
geom_point(aes(x = index, y = post.mean.full, col = factor(selected.thres)),
size = 2) +
geom_errorbar(aes(ymin = lower.CI, ymax = upper.CI, col = factor(selected.thres)),
width = .1) +
theme(legend.position="none") +
ggtitle("Black = truth, Blue = selected as signal, Red = selected as noise")
## ------------------------------------------------------------------------
X <- matrix(rnorm(50*100), 50)
beta <- c(rep(6, 10), rep(0, 90))
y <- X %*% beta + rnorm(50)
## ----fig.width = 6, fig.height= 4----------------------------------------
hs.object <- horseshoe(y, X, method.tau = "truncatedCauchy", method.sigma ="Jeffreys")
df <- data.frame(index = 1:100,
truth = beta,
post.mean = hs.object$BetaHat,
lower.CI <- hs.object$LeftCI,
upper.CI <- hs.object$RightCI
)
ggplot(data = df, aes(x = index, y = truth)) +
geom_point(size = 2) +
theme_classic() + ylab("") +
geom_point(aes(x = index, y = post.mean), size = 2, col = "red") +
geom_errorbar(aes(ymin = lower.CI, ymax = upper.CI), width = .1, col = "red") +
ggtitle("Black = truth, Red = estimates with 95% credible intervals")
## ------------------------------------------------------------------------
df$selected.CI <- HS.var.select(hs.object, df$y, method = "intervals")
## ----fig.width = 6, fig.height= 4----------------------------------------
ggplot(data = df, aes(x = index, y = truth)) +
geom_point(size = 2) +
theme_classic() + ylab("") +
geom_point(aes(x = index, y = post.mean, col = factor(selected.CI)),
size = 2) +
geom_errorbar(aes(ymin = lower.CI, ymax = upper.CI, col = factor(selected.CI)),
width = .1) +
theme(legend.position="none") +
ggtitle("Black = truth, Blue = selected as signal, Red = selected as noise")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/route53domains_operations.R
\name{route53domains_delete_tags_for_domain}
\alias{route53domains_delete_tags_for_domain}
\title{This operation deletes the specified tags for a domain}
\usage{
route53domains_delete_tags_for_domain(DomainName, TagsToDelete)
}
\arguments{
\item{DomainName}{[required] The domain for which you want to delete one or more tags.}
\item{TagsToDelete}{[required] A list of tag keys to delete.}
}
\description{
This operation deletes the specified tags for a domain.
}
\details{
All tag operations are eventually consistent; subsequent operations
might not immediately represent all issued operations.
}
\section{Request syntax}{
\preformatted{svc$delete_tags_for_domain(
DomainName = "string",
TagsToDelete = list(
"string"
)
)
}
}
\keyword{internal}
| /paws/man/route53domains_delete_tags_for_domain.Rd | permissive | johnnytommy/paws | R | false | true | 865 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/route53domains_operations.R
\name{route53domains_delete_tags_for_domain}
\alias{route53domains_delete_tags_for_domain}
\title{This operation deletes the specified tags for a domain}
\usage{
route53domains_delete_tags_for_domain(DomainName, TagsToDelete)
}
\arguments{
\item{DomainName}{[required] The domain for which you want to delete one or more tags.}
\item{TagsToDelete}{[required] A list of tag keys to delete.}
}
\description{
This operation deletes the specified tags for a domain.
}
\details{
All tag operations are eventually consistent; subsequent operations
might not immediately represent all issued operations.
}
\section{Request syntax}{
\preformatted{svc$delete_tags_for_domain(
DomainName = "string",
TagsToDelete = list(
"string"
)
)
}
}
\keyword{internal}
|
library(shiny)
shinyUI( fluidPage(
headerPanel("Next Word Prediction App"
),
sidebarPanel(
h3("Introducton"),
p("This application predicts the next possible word in a phrase or sentence. To use it, simply type word(s) on the text field on the screen
and up to 4 possible next words will display in buttons below the field. Click on your intended match to add it to the field."),
p("The application uses natural language processing, namely, n-grams, Markov model, and Katz's back-off model to perform text prediction."),
p("This N-gram Word Predictor was developed as a capstone project to complete the Johns Hopkins data science Course.")
),
mainPanel(
h3("Input"),
textInput("inputTxt", "Type in word(s) below:", width = "90%"),
uiOutput("words"),
br(),
wellPanel(
h4("Details"),
HTML("<p> Github Repository <a href='https://github.com/enrique1790/Data-Science-Captsone-Project' target='_blank'>https://github.com/enrique1790/Data-Science-Captsone-Project</a></p>"),
HTML("<p> Slide Deck Presentation <a href='http://rpubs.com/enrique1790/444617' target='_blank'>http://rpubs.com/enrique1790/444617</a></p>"),
h4("Author:"),
p("Enrique Estrada")
)
)
))
| /ui.R | no_license | 28kingb/Peer-graded-Assignment-Milestone-Report | R | false | false | 1,252 | r | library(shiny)
shinyUI( fluidPage(
headerPanel("Next Word Prediction App"
),
sidebarPanel(
h3("Introducton"),
p("This application predicts the next possible word in a phrase or sentence. To use it, simply type word(s) on the text field on the screen
and up to 4 possible next words will display in buttons below the field. Click on your intended match to add it to the field."),
p("The application uses natural language processing, namely, n-grams, Markov model, and Katz's back-off model to perform text prediction."),
p("This N-gram Word Predictor was developed as a capstone project to complete the Johns Hopkins data science Course.")
),
mainPanel(
h3("Input"),
textInput("inputTxt", "Type in word(s) below:", width = "90%"),
uiOutput("words"),
br(),
wellPanel(
h4("Details"),
HTML("<p> Github Repository <a href='https://github.com/enrique1790/Data-Science-Captsone-Project' target='_blank'>https://github.com/enrique1790/Data-Science-Captsone-Project</a></p>"),
HTML("<p> Slide Deck Presentation <a href='http://rpubs.com/enrique1790/444617' target='_blank'>http://rpubs.com/enrique1790/444617</a></p>"),
h4("Author:"),
p("Enrique Estrada")
)
)
))
|
GlmnetBinomialQuadOrthMultiSubset.FinalTrainTestEvalAICc <-
function(Species, VariableNamesIn, SubsetVariableNumber, TotPres, VariableSubsetsIn, PresenceDat.df, PseudoabsenceDat.df, BackgroundDat.df, kfoldgrpp, kfoldgrpa, polycoefsall.df, cvglmnetfoldsin, alphain, OutDirectIn, FunctDirectIn..., SetRunIDIn, CVGlmnetRuns, Output, DataSetType) {
setwd(OutDirectIn)
library(foreach)
library(doParallel)
#FunctDirectIn <- "C:/Users/James/Documents/R/win-library/"
# Make sure VariableNames is a data frame
VariableNamesIn <- data.frame(VariableNamesIn, stringsAsFactors=FALSE)
#
RowNames.df <- data.frame(rownames(VariableSubsetsIn), stringsAsFactors=FALSE) # Save row names to use in output
SubsetSize.mat <- matrix(c("Singlets", "Doublets", "Triplets", "Quartets", "Quintets", "Sextets", "Septets", "Octets", "Nonets",
"Dectets", "Undectets", "Duodectets","Tredectets", "Quattuordectets", "Quindectets", "Sexdectets", "Septendectets", "Octodectets", "Novemdectets",
"Vigetets", "Unvigetets", "Duovigetets", "Trevigetets", "Quattuorvigetets", "Quinvigetets", "Sexvigetets", "Septenvigetets", "Octovigetet",
"Novemvigetets", "Trigetets", "Untrigetets", "Duotrigetets", "Tretrigetets", "Quottuortrigetets", "Quintrigetets",
"Sextrigetets", "Septentrigetets", "Octotrigetets", "Novemtrigetets", "Quadragetets", "Unquadragetets", "Duoquadragetets", "Trequadragetets",
"Quattuorquadragetets", "Quinquadragetets", "Sexquadragetets", "Octoquadragetets", "Octoquadragetets", "Novemquadragetets", "Quinquagetets",
"Unquinquagetets", "Duoquinquagetets", "Trequinguagetets", "Quattuorquinquagetets", "Quinquinquagetets",
"Sexquinquagetets", "Septenquinquagetets", "Octoquinquagetets", "Novemquinquagetets", "Sexagetets"), ncol=1, nrow=60, byrow=TRUE, dimnames=list(c
(seq(1:60)), c("Subset")))
SubsetSize.df <- as.data.frame(SubsetSize.mat, stringsAsFactors=FALSE)
Subset <- SubsetSize.df[SubsetVariableNumber,]
if(is.na(Subset)) {
Subset <- ""
}
TotVars <- nrow(VariableNamesIn)
# Set default value for CVGlmnetRuns if not entered (number runs of cv.glmnet)
if(missing(DataSetType)) { DataSetType="" }
if(missing(CVGlmnetRuns)) { CVGlmnetRuns=1 }
#Output=FALSE
if(missing(Output)) { Output=TRUE }
if(missing(SetRunIDIn)) { SetRunIDIn="" }
#
ModelType1 <- paste("GlmnetBinomialQuad", Subset, sep="")
Model <- paste("GlmnetBinomialQuad for", SubsetVariableNumber, "Feature Subset")
tail(VariableSubsetsIn)
#VariableSubsetsIn[2998,]
# Register cluster for running parallel with doParallel pkg
# Use a minimum of one core or the number of available cores minus one
cores <- detectCores() # detect cores running
workcores <- max(1, cores-1)
cl <- makeCluster(workcores) # make cluster of workcores
#cl <- makeCluster(1)
registerDoParallel(cl) # register cluster
getDoParWorkers() # check clusters registered
#registerDoSEQ()
#
if(nrow(VariableSubsetsIn)<2) {
ivec <- 1
} else {
ivec <- c(seq(1, nrow(VariableSubsetsIn), 1))
}
#ivec <- c(1,2,3)
#ivec <- c(1,2,3,4)
#ivec <- c(1)
###
# Set up foreach to output to ten different list elements corresponding to
# the two Old World and New World sets of five evaluation statistics
## Designate final training data and add count column as factor
GlmnetPresTrainData <- PresenceDat.df[kfoldgrpp==3,]
GlmnetPresTrainData$Count <- 1
head(GlmnetPresTrainData)
#str(GlmnetPresTrainData)
GlmnetAbsTrainData <- PseudoabsenceDat.df[kfoldgrpp==3,]
GlmnetAbsTrainData$Count <- 0
##############################################################
t1 <- Sys.time()
#VariablesSubsets <- VariableKeepSubsets
GlmnetSubsetEvalStats <- foreach(i=ivec, .combine='rbind') %dopar% {
library(dismo)
library(raster)
library(glmnet)
#i=1
## Use function to round up from .5 from http://stackoverflow.com/questions/12688717/round-up-from-5-in-r
Round2 <- function(x, n) {
posneg = sign(x)
z = abs(x)*10^n
z = z + 0.5
z = trunc(z)
z = z/10^n
z*posneg
}
#
## Define function to predict matrix with Glmnet
GLMBinomialResp_PredictMatrix <-
function(CoefficientsIn, xData.dfIn) {
#CoefficientsIn <- AvgCoefficients.df
#xDataIn <- PresAbsDat.df
# Define function makeglm.R: Creates a "fake" glm object with specific coefficients that you can use for predicting without fitting a model first
# https://gist.github.com/MrFlick/ae299d8f3760f02de6bf
##
makeglm <- function(formula, ..., family, data=NULL) {
dots <- list(...)
out<-list()
tt <- terms(formula, data=data)
if(!is.null(data)) {
mf <- model.frame(tt, data)
vn <- sapply(attr(tt, "variables")[-1], deparse)
if((yvar <- attr(tt, "response"))>0)
vn <- vn[-yvar]
xlvl <- lapply(data[vn], function(x) if (is.factor(x))
levels(x)
else if (is.character(x))
levels(as.factor(x))
else
NULL)
attr(out, "xlevels") <- xlvl[!vapply(xlvl,is.null,NA)]
attr(tt, "dataClasses") <- sapply(data[vn], stats:::.MFclass)
}
out$terms <- tt
coef <- numeric(0)
stopifnot(length(dots)>1 & !is.null(names(dots)))
for(i in seq_along(dots)) {
if((n<-names(dots)[i]) != "") {
v <- dots[[i]]
if(!is.null(names(v))) {
coef[paste0(n, names(v))] <- v
} else {
stopifnot(length(v)==1)
coef[n] <- v
}
} else {
coef["(Intercept)"] <- dots[[i]]
}
}
out$coefficients <- coef
out$rank <- length(coef)
if (!missing(family)) {
out$family <- if (class(family) == "family") {
family
} else if (class(family) == "function") {
family()
} else if (class(family) == "character") {
get(family)()
} else {
stop(paste("invalid family class:", class(family)))
}
out$qr <- list(pivot=seq_len(out$rank))
out$deviance <- 1
out$null.deviance <- 1
out$aic <- 1
class(out) <- c("glm","lm")
} else {
class(out) <- "lm"
out$fitted.values <- predict(out, newdata=dd)
out$residuals <- out$mf[attr(tt, "response")] - out$fitted.values
out$df.residual <- nrow(data) - out$rank
out$model <- data
#QR doesn't work
}
out
}
###################################################
setwd(OutDirectIn)
##
# Retrieve variables in regression
VarNamesUsed <- as.vector(rownames(CoefficientsIn)[2:nrow(CoefficientsIn)])
# Make sure input data matches VarNamesUsed
head(xData.dfIn)
nrow(xData.dfIn)
xData.dfIn <- subset(xData.dfIn, select=VarNamesUsed)
# Replace "^" with "P"
colnames(xData.dfIn) <- gsub("\\^2", "P2", colnames(xData.dfIn))
## Prepare to construct regression formula from CoefficientsIn
Coefficients.mat1 <- t(as.matrix(CoefficientsIn))
Coefficients.df <- data.frame(Coefficients.mat1, stringsAsFactors=FALSE)
colnames(Coefficients.df) <- colnames(Coefficients.mat1)
colnames(Coefficients.df) <- gsub("\\^2", "P2", colnames(Coefficients.df))
#str(Coefficients.df)
## Create dummy fake training data for binomial GLM to use in makeglm function
## for assigning coefficients to GLM model
FakeYTrainData <- sample(0:1, nrow(xData.dfIn), replace=T)
# Prepare GLM formula for makeglm
fla <- paste("FakeYTrainData ~", paste(colnames(Coefficients.df[2:ncol(Coefficients.df)]), collapse=" + "))
###########
## Create character representation of coefficients for makeglm function including intercept and variables with coefficients
CoefficientsVector <- c(seq(1:ncol(Coefficients.df)))
for(i in CoefficientsVector) {
#i=2
if(i==1) {
CoefficientsVector[i] <- paste(Coefficients.df[1,1])
} else {
CoefficientsVector[i] <- paste0(colnames(Coefficients.df)[i], "=", Coefficients.df[1,i])
}
}
###########
CoefForm <- paste(CoefficientsVector, collapse=", ")
## Use makeglm function to calculate dummy GLM and assign coefficients
GLMBinomialModel <- eval(parse(text = paste("makeglm(as.formula(fla), family=binomial, data=xData.dfIn,", CoefForm, ")")))
## Use GLM model with assigned coefficients to predict input matrix data values
GLMscore.df <- data.frame("GLMScore" = predict(GLMBinomialModel, newdata=xData.dfIn, type="response"))
head(GLMscore.df)
##
return(GLMscore.df)
}
##############################################################################
###############
# Split VarNames of VariableSubsetsIn into separate variables
#str(VariableSubsets)
VariableNamesSel <- c(unlist(VariableSubsetsIn[i,]))
# Split VarNames of VariableSubsetsIn into separate variables
if(grepl("-",VariableNamesSel)) {
VarNames <- unlist(strsplit(VariableNamesSel, "-"))
} else {
VarNames <- unlist(VariableNamesSel)
}
SubsetVarNum <- length(VarNames)
###
## Loop through different kfold groups with equal prevalence (% presence data)
## and save coefficients and lambda.min
CoefficientsList <- list()
ModelStatsList <- list()
ModelStats.df <- data.frame(t(as.matrix(c(0,0,0,0))))
colnames(ModelStats.df) <- c("ModelNo", "deviance.min", "lambda.min", "AICc2")
######################
ta <- Sys.time()
for(q in 1:CVGlmnetRuns) {
#q=1
# Create kfold groups with equal prevalence (percent presence) for
# presence and absence portions of PresAbsDatTrain.df
NTrainkfoldgrpp <- kfold(GlmnetPresTrainData, cvglmnetfoldsin)
NTrainkfoldgrpa <- kfold(GlmnetAbsTrainData, cvglmnetfoldsin)
# Join presence and absence data and join kfold groups
PresAbsTrainDat.df <- rbind(GlmnetPresTrainData, GlmnetAbsTrainData)
nrow(PresAbsTrainDat.df)
###
NTrainkfoldgrppa <- c(NTrainkfoldgrpp, NTrainkfoldgrpa)
length(NTrainkfoldgrppa)
#
x <- as.matrix(PresAbsTrainDat.df[,c(1:ncol(PresAbsTrainDat.df)-1)])
nrow(x)
head(x)
# Subset by environmental variables to be used in model from VariableSubset
x <- x[, c(VarNames), drop=FALSE]
x <- apply(x, 2, as.numeric)
head(x)
#str(x1)
################
# Specify model training data
xTrainData.df <- x
head(xTrainData.df)
nrow(xTrainData.df)
######
## Add orthogonalized quadratic versions for main effect variables (no interaction)
xTrainDataQ.df <- cbind(xTrainData.df, xTrainData.df)
colcount <- 1
for(n in 1:ncol(xTrainData.df)) {
#n=3
# Use poly function to obtain orthoganolized main effect and squared quadratic values
# Keep only columns for poly function coefficients with variable name
polyvar <- colnames(xTrainData.df)[n]
polycoefs.df <- polycoefsall.df[ , grepl(polyvar, names(polycoefsall.df))]
# Transfer coefficients to properly formatted list for poly function
polycoeflist <- list()
polycoeflist$alpha <- polycoefs.df[1:2,1]
polycoeflist$norm2 <- polycoefs.df[,2]
###
xTrainDataQ.df[,colcount:(colcount+1)] <- poly(xTrainData.df[,n], 2, coefs=polycoeflist)
head(xTrainDataQ.df)
colnames(xTrainDataQ.df)[colcount:(colcount+1)] <- c(colnames(xTrainData.df)[n], paste0(colnames(xTrainData.df)[n], "P2"))
colcount <- colcount + 2
}
head(xTrainDataQ.df)
xTrainDataQ <- as.matrix(xTrainDataQ.df)
head(xTrainDataQ)
nrow(xTrainDataQ)
#####################
# Identify response variable for training data
yTrainData <- PresAbsTrainDat.df$Count
length(yTrainData)
#
# Run cv glmnet binomial model for response probability of presence using cross validation
# to select the model coefficients with the minimum deviance (default)
#
cv_glmnetBinomialModel = cv.glmnet(xTrainDataQ, yTrainData, foldid=NTrainkfoldgrppa, family = "binomial", nfolds = cvglmnetfoldsin, alpha=alphain)
# Retrieve coefficients from selected Binomial model
s_in = "lambda.min"
Coefficients.df1 <- data.frame(as.matrix(coef(cv_glmnetBinomialModel, s=s_in)), stringsAsFactors=FALSE)
colnames(Coefficients.df1) <- "Coefficients"
#str(cv_glmnetBinomialModel)
lambda.min <- cv_glmnetBinomialModel$lambda.min
deviance.min <- min(cv_glmnetBinomialModel$cvm)
####################################################
###
# Caculate AICc using formula from Johnny Heineken at https://stats.stackexchange.com/questions/25817/is-it-possible-to-calculate-aic-and-bic-for-lasso-regression-models
n <- cv_glmnetBinomialModel$glmnet.fit$nobs
## cv.glmnet deviance is divided by number of observation (see https://stackoverflow.com/questions/43468665/poisson-deviance-glmnet), so need to multiply by n
deviance.fit <- n*(cv_glmnetBinomialModel$cvm[match(cv_glmnetBinomialModel$lambda.min, cv_glmnetBinomialModel$lambda)])
fit.nulldev <- cv_glmnetBinomialModel$glmnet.fit$nulldev
k <- cv_glmnetBinomialModel$glmnet.fit$df[match(cv_glmnetBinomialModel$lambda.min, cv_glmnetBinomialModel$lambda)]
tLL <- fit.nulldev - deviance.fit
AICc2 <- -tLL+2*k+2*k*(k+1)/(n-k-1)
###
ModelStats.df[1,1] <- q
ModelStats.df[1,2] <- deviance.min
ModelStats.df[1,3] <- lambda.min
ModelStats.df[1,4] <- AICc2
CoefficientsList[[q]] <- Coefficients.df1
ModelStatsList[[q]] <- ModelStats.df
}
tb <- Sys.time()
difftime(tb, ta, units = "mins")
#############################
#
ModelStatsResults.df <- do.call(rbind,ModelStatsList)
ModelStatsResults.df <- ModelStatsResults.df[order(ModelStatsResults.df$deviance.min),]
nrow(ModelStatsResults.df)
CoefficientSets.df <- do.call(cbind, CoefficientsList)
AvgCoefficients.df1 <- data.frame(apply(CoefficientSets.df,1,mean))
colnames(AvgCoefficients.df1) <- "Coefficients"
# Retrieve variable names
VarNamesGLM <- data.frame(rownames(AvgCoefficients.df1), stringsAsFactors=FALSE)
# Join variable names with coefficients
AvgCoefficients.df <- cbind(VarNamesGLM, AvgCoefficients.df1)
colnames(AvgCoefficients.df) <- c("VarNames", "Coefficients")
# Remove Variables with coefficient of zero
AvgCoefficients.df1 <- AvgCoefficients.df[!AvgCoefficients.df$Coefficients==0,]
# Remove VarNames column
AvgCoefficients.df <- AvgCoefficients.df1[, 2, drop=FALSE]
#
AICc2 <- mean(ModelStatsResults.df$AICc2)
## If there are no coefficients return NA for this iteration
# GlmnetSubsetEvalL <- list()
# if(nrow(AvgCoefficients.df)==1) {
# k <- 2
# GlmnetSubsetEvalL[[k]] <- c(rep(NA,14))
# return(GlmnetSubsetEvalL)
# }
#####################################################
#Save coefficients
#setwd(OutDirectIn)
#write.table(AvgCoefficients.df, file=paste("Coefficients", ModelType1, "_", "TrainSetEnsAvg", KeepModels, "of", CVGlmnetRuns, "_", SubsetVariableNumber, "Var_", Loop, ".csv", sep=""), sep=",", col.names=NA)
#
VarNamesUsed <- as.vector(rownames(AvgCoefficients.df)[2:nrow(AvgCoefficients.df)])
VarsUsed <- gsub("P2", "", VarNamesUsed)
VarsUsed <- unique(VarsUsed)
nenvars <- length(VarsUsed)
if(nenvars > 1) {
EnvVarsUsed <- paste0(VarsUsed, collapse="-")
} else if(nenvars ==1) {
EnvVarsUsed <- VarsUsed
} else {
EnvVarsUsed <- " "
}
##
#checkModel <- readRDS("cv_glmnetBinomialQuadModel.rds")
#coef(checkModel, s= "lambda.min")
#VariableSubsets[2998,]
#####################################
## Calculate AICc_bg with point values
# Obtain number of parameters in Glmnet model
nparams <- (nrow(AvgCoefficients.df) - 1)
#
## From ENMeval Package documentation: AICc is the Akaike Information Criterion corrected for small
## sample sizes calculated as: (2 * K - 2 * logLikelihood) + (2 * K) * (K + 1)=(n - K - 1)
## where K is the number of parameters in the model (i.e., number of non-zero parameters in Glmnet
## lambda file) and n is the number of occurrence localities.
## The logLikelihood is sum(log(vals/total))
## vals is vector of Glmnet raw values at occurence localities
## total is the sum of Glmnet raw values across the entire study area
##
head(PresAbsTrainDat.df)
nrow(PresAbsTrainDat.df)
######
xPresTrainData.df1 <- PresAbsTrainDat.df[1:nrow(GlmnetPresTrainData),]
nrow(xPresTrainData.df1)
# Subset PresTestData by VarNames
xPresData.dfIn <- xPresTrainData.df1[, c(VarsUsed), drop=FALSE]
head(xPresData.dfIn)
nrow(xPresData.dfIn)
#####################
## Add orthogonalized quadratic versions for main effect variables (no interaction)
##
xPresDataQ.dfIn <- cbind(xPresData.dfIn, xPresData.dfIn)
colcount <- 1
for(m in 1:ncol(xPresData.dfIn)) {
#m=3
# Use poly function to obtain orthoganolized main effect and squared quadratic values
# Keep only columns for poly function coefficients with variable name
polyvar <- colnames(xPresData.dfIn)[m]
polycoefs.df <- polycoefsall.df[ , grepl(polyvar, names(polycoefsall.df))]
# Transfer coefficients to properly formatted list for poly function
polycoeflist <- list()
polycoeflist$alpha <- polycoefs.df[1:2,1]
polycoeflist$norm2 <- polycoefs.df[,2]
###
xPresDataQ.dfIn[,colcount:(colcount+1)] <- poly(xPresData.dfIn[,m], 2, coefs=polycoeflist)
head(xPresDataQ.dfIn)
colnames(xPresDataQ.dfIn)[colcount:(colcount+1)] <- c(colnames(xPresData.dfIn)[m], paste0(colnames(xPresData.dfIn)[m], "P2"))
colcount <- colcount + 2
}
head(xPresDataQ.dfIn)
# Select only terms used in ensemble model
xDataQ.dfIn <- xPresDataQ.dfIn[, c(VarNamesUsed), drop=FALSE]
head(xDataQ.dfIn)
#####################
# Obtain values for training presence data output by glmnet run
CoefficientsIn <- AvgCoefficients.df
presvals.df <- GLMBinomialResp_PredictMatrix(CoefficientsIn, xDataQ.dfIn)
#str(xTrainData)
# Obtain values for background data output by Glmnet run
# Select only terms used in ensemble model
xData.dfIn <- BackgroundDat.df[, c(VarsUsed), drop=FALSE]
head(xData.dfIn)
#####################
## Add orthogonalized quadratic versions for main effect variables (no interaction)
##
xDataQ.dfIn <- cbind(xData.dfIn, xData.dfIn)
colcount <- 1
for(m in 1:ncol(xData.dfIn)) {
#m=3
# Use poly function to obtain orthoganolized main effect and squared quadratic values
# Keep only columns for poly function coefficients with variable name
polyvar <- colnames(xData.dfIn)[m]
polycoefs.df <- polycoefsall.df[ , grepl(polyvar, names(polycoefsall.df))]
# Transfer coefficients to properly formatted list for poly function
polycoeflist <- list()
polycoeflist$alpha <- polycoefs.df[1:2,1]
polycoeflist$norm2 <- polycoefs.df[,2]
###
xDataQ.dfIn[,colcount:(colcount+1)] <- poly(xData.dfIn[,m], 2, coefs=polycoeflist)
head(xDataQ.dfIn)
colnames(xDataQ.dfIn)[colcount:(colcount+1)] <- c(colnames(xData.dfIn)[m], paste0(colnames(xData.dfIn)[m], "P2"))
colcount <- colcount + 2
}
head(xDataQ.dfIn)
# Select only terms used in ensemble model
xDataQ.dfIn <- xDataQ.dfIn[, c(VarNamesUsed), drop=FALSE]
head(xDataQ.dfIn)
#####################
# Obtain values for training presence/absence data output by glmnet run
CoefficientsIn <- AvgCoefficients.df
backgroundvals.df <- GLMBinomialResp_PredictMatrix(CoefficientsIn, xDataQ.dfIn)
head(backgroundvals.df)
## Join together presence and background GLMscores
PresBckgrndData.df <- rbind(presvals.df, backgroundvals.df)
# Change any values of zero to 0.001
PresBckgrndData.df[,1][PresBckgrndData.df[,1]==0] <- 0.001
## Sum all values
SumVal <- apply(PresBckgrndData.df,2,sum)
## Divide all values by SumVal
SumValDivFunc <- function(x) {x/SumVal}
PresBckgrndDataRAW.df <- data.frame(apply(PresBckgrndData.df,2,SumValDivFunc))
head(PresBckgrndDataRAW.df)
nrow(PresBckgrndDataRAW.df)
# Keep values for presence data as vector
vals <- PresBckgrndDataRAW.df[1:nrow(GlmnetPresTrainData),1]
head(vals)
n <- length(vals) # number of occurence localities
# Keep values for background data as vector
backgroundvals <- PresBckgrndDataRAW.df[(nrow(GlmnetPresTrainData)+1):nrow(PresBckgrndDataRAW.df),1]
length(backgroundvals)
# total is sum of GLM raw values across entire study area, includes background and occurrence localities
# Calculate sum of all values
totalocc <- sum(vals) # sum from occurrence localities
totalbg <- sum(backgroundvals) # sum from background localities
total <- totalocc + totalbg # grand total sum
#
logLikelihood <- sum(log(vals/total))
K <- nparams
AICc_bg <- -1*((2*K - 2*logLikelihood) + (2*K)*(K+1)/(n-K-1))
NumDVars <- K
###
#############################################################################
########### Evaluate model using training and test data
GlmnetSubsetEvalL <- list()
TestDataTypes <- c("FinalTrain", "FinalTest")
for(TestDataType in TestDataTypes) {
#TestDataType="FinalTrain"
if(TestDataType=="FinalTrain") {
k <- 3
} else {
k <- 4
}
# Specify model testing data
GlmnetPresTestData <- PresenceDat.df[kfoldgrpp == k, ]
head(GlmnetPresTestData)
nrow(GlmnetPresTestData)
###
GlmnetAbsTestData <- PseudoabsenceDat.df[kfoldgrpa == k, ]
########################
### Predict model values for test matrix to get values for test presence and absence points
## First presence points
setwd(OutDirectIn)
head(GlmnetPresTestData)
# Subset PresTestData by VarNames
xData.dfIn <- GlmnetPresTestData[, c(VarsUsed), drop=FALSE]
head(xData.dfIn)
#####################
## Add orthogonalized quadratic versions for main effect variables (no interaction)
##
xDataQ.dfIn <- cbind(xData.dfIn, xData.dfIn)
colcount <- 1
for(m in 1:ncol(xData.dfIn)) {
#m=3
# Use poly function to obtain orthoganolized main effect and squared quadratic values
# Keep only columns for poly function coefficients with variable name
polyvar <- colnames(xData.dfIn)[m]
polycoefs.df <- polycoefsall.df[ , grepl(polyvar, names(polycoefsall.df))]
# Transfer coefficients to properly formatted list for poly function
polycoeflist <- list()
polycoeflist$alpha <- polycoefs.df[1:2,1]
polycoeflist$norm2 <- polycoefs.df[,2]
###
xDataQ.dfIn[,colcount:(colcount+1)] <- poly(xData.dfIn[,m], 2, coefs=polycoeflist)
head(xDataQ.dfIn)
colnames(xDataQ.dfIn)[colcount:(colcount+1)] <- c(colnames(xData.dfIn)[m], paste0(colnames(xData.dfIn)[m], "P2"))
colcount <- colcount + 2
}
head(xDataQ.dfIn)
# Select only terms used in ensemble model
xDataQ.dfIn <- xDataQ.dfIn[, c(VarNamesUsed), drop=FALSE]
head(xDataQ.dfIn)
#####################
CoefficientsIn <- AvgCoefficients.df
prespred.df <- GLMBinomialResp_PredictMatrix(CoefficientsIn, xDataQ.dfIn)
colnames(prespred.df) <- "GlmnetScore"
head(prespred.df)
## Then absence points
head(GlmnetAbsTestData)
# Subset AbsTestData by VarNames
xData.dfIn <- GlmnetAbsTestData[, c(VarsUsed), drop=FALSE]
head(xData.dfIn)
#
#####################
## Add orthogonalized quadratic versions for main effect variables (no interaction)
##
xDataQ.dfIn <- cbind(xData.dfIn, xData.dfIn)
colcount <- 1
for(m in 1:ncol(xData.dfIn)) {
#m=3
# Use poly function to obtain orthoganolized main effect and squared quadratic values
# Keep only columns for poly function coefficients with variable name
polyvar <- colnames(xData.dfIn)[m]
polycoefs.df <- polycoefsall.df[ , grepl(polyvar, names(polycoefsall.df))]
# Transfer coefficients to properly formatted list for poly function
polycoeflist <- list()
polycoeflist$alpha <- polycoefs.df[1:2,1]
polycoeflist$norm2 <- polycoefs.df[,2]
###
xDataQ.dfIn[,colcount:(colcount+1)] <- poly(xData.dfIn[,m], 2, coefs=polycoeflist)
head(xDataQ.dfIn)
colnames(xDataQ.dfIn)[colcount:(colcount+1)] <- c(colnames(xData.dfIn)[m], paste0(colnames(xData.dfIn)[m], "P2"))
colcount <- colcount + 2
}
head(xDataQ.dfIn)
# Select only terms used in ensemble model
xDataQ.dfIn <- xDataQ.dfIn[, c(VarNamesUsed), drop=FALSE]
head(xDataQ.dfIn)
#####################
abspred.df <- GLMBinomialResp_PredictMatrix(CoefficientsIn, xDataQ.dfIn)
colnames(abspred.df) <- "GlmnetScore"
#############################################################################################
# This section evaluates the Glmnet model using the PresenceAbsence package
#############################################################################################
#### Create a dataset with model predictions for presence and absence points
# Use extracted prediction values for presence and absence points for each of three
# models previously calculated in loop
#
library(gtools)
## Create directory of output for class pair run
#setwd(paste("C:/Users/JLTracy/Documents/R/win-library/3.0/10minClimIntEvalSTB1000", "/", output2, sep=""))
# For presence data, assign a column of "1" under the name "OBSERVED" to indicate presence data
# and assign the model results a name "GlmnetScoreN" where N is the name of the rep
# Also assign a column "id" for the row numbers to use in merging the data frames later
presa.df <- data.frame(c(rep(1, nrow(prespred.df))))
names(prespred.df) <- c("Glmnet")
names(presa.df) <- c("OBSERVED")
pres.df <- data.frame(cbind(id=1:nrow(presa.df), presa.df, prespred.df))
nrow(pres.df)
# Repeat above process with absence data, but assign "OBSERVED" a value of 0
absa.df <- data.frame(c(rep(0, nrow(abspred.df))))
names(abspred.df) <- c("Glmnet")
names(absa.df) <- c("OBSERVED")
abs.df <- data.frame(cbind(id=1:nrow(absa.df), absa.df, abspred.df))
# For each model output, merge presence and absence data using "id' column as guide when all=TRUE
# NOTE: PresenceAbsence package cannot handle several models at one time if the sample sizes differ
# so have to analyze each model output separately
presabspred <- rbind(pres.df, abs.df)
tail(presabspred)
head(presabspred)
# Drop the id column used in merging for each dataset
presabspred$id <- NULL
# Make a column of data with the species name with same number of rows as data from each model
SPECIES <- data.frame(c(rep(Species, nrow(presabspred))))
names(SPECIES) <- c("SPECIES")
# Make final dataset SPDATA by putting together SPECIES with extracted environmental data.
SPDATA <- data.frame(SPECIES, presabspred)
head(SPDATA)
#SPDATA[100:160,]
################################################################################
### Run this block of code to evaluate model results with PresenceAbsence package
################################################################################
library(PresenceAbsence)
#setwd(paste("C:/Users/JLTracy/Documents/R/win-library/3.0/10minClimIntEvalSTB1000", "/", output2, sep=""))
#starttime <- Sys.time()
#### FOR OLD WORLD DATA EVALUATION STATISTICS
### Define variables for later use.
accurun <- list()
accusum <- matrix(data=NA, ncol=11, nrow=1, byrow=TRUE, dimnames=list(NULL, c("MaxTSS", "Specificity_maxTSS", "Sensitivity_maxTSS", "AUC", "MaxKappa", "ThresholdMaxTSS", "AICc_bg", "AICc2", "NumDVars", "NumEnVars", "EnvVarsUsed")))
species <- as.character(unique(SPDATA$SPECIES))
model.names <- as.character(names(SPDATA)[-c(1, 2)])
N.models <- ncol(SPDATA) - 2
N.sp <- length(species)
N.obs <- length(SPDATA$SPECIES[SPDATA$SPECIES == species[1]])
Obs.prev <- table(SPDATA$SPECIES, SPDATA$OBSERVED)[, 2]/N.obs
Obs.prev <- Round2(Obs.prev, 2)
### Mainly just run this code
graphics.off()
sp <- 1
# Read in dataset for loop
DATA <- SPDATA[SPDATA$SPECIES == species[sp], ]
head(DATA)
#
# To assess accuracy per threshold, use limited threshold available for
# model based upon number of environmental layers in model
# ("NumGrids")
#NumGrids <- max(40, SubsetVarNum)
#PossThresholds <- seq(1/NumGrids,1,length=NumGrids)
PossThresholds <- 100
#accu <- data.frame(presence.absence.accuracy(SPDATA, which.model = 1, threshold = PossThresholds, st.dev=FALSE))
# accu <- presence.absence.accuracy(DATA, which.model = 1, threshold = c(0.90, 0.91, 0.92, 0.93, 0.94, 0.95, 0.96, 0.975, 0.98, 0.99, 0.999999))
#accu <- presence.absence.accuracy(DATA, which.model = 1, threshold = 100, st.dev=FALSE)
# print(paste("Species:", species[sp], "Model:", model.names)) not used
accu <- data.frame(presence.absence.accuracy(DATA, which.model = 1, threshold = 100, st.dev=FALSE))
# print(paste("Species:", species[sp], "Model:", model.names)) not used
head(accu)
maxSSS <- data.frame(accu$sensitivity + accu$specificity)
names(maxSSS) <- c("maxSSS")
head(maxSSS)
TSS <- data.frame(accu$sensitivity + accu$specificity - 1)
names(TSS) <- c("TSS")
accurun <- data.frame(accu, maxSSS, TSS)
head(accurun)
accurun$Conditions <- paste("Glmnet", Subset, sep="")
maxKappa <- max(accurun$Kappa)
maxTSS <- max(accurun$TSS)
AUC <- max(accurun$AUC)
# Find and average thresholds at TSS = maxTSS. In the case of tied optimal
# thresholds, we select the mean threshold producing maximum TSS following
# (Freeman and Moisen 2008). But, in the case of discrete thresholds as found
# in envelope models, if the mean optimal threshold does not represent an
# actual discrete threshold, we select the nearest discrete threshold to the
# mean among 3 or more thresholds, or the smaller of two adjacent discrete thresholds.
ThresholdsMaxTSS <- accurun$threshold[which(accurun$TSS == maxTSS)]
ThresholdMaxTSS <- mean(ThresholdsMaxTSS)
#ThresholdMaxTSSM <- mean(ThresholdsMaxTSS)
## Following commented code for envelope score
#if (length(ThresholdsMaxTSS) < 3) {
#ThresholdMaxTSS <- min(ThresholdsMaxTSS)
#} else { ThresholdMaxTSS <- PossThresholds[which(abs(PossThresholds - ThresholdMaxTSSM)== min(abs(PossThresholds - ThresholdMaxTSSM)))]
#}
# Calculate specificity and sensitivity at maxTSS
Specificity_maxTSS <- accurun$specificity[which(accurun$TSS == maxTSS)]
Specificity_maxTSS <- mean(Specificity_maxTSS)
Sensitivity_maxTSS <- accurun$sensitivity[which(accurun$TSS == maxTSS)]
Sensitivity_maxTSS <- mean(Sensitivity_maxTSS)
#CheckTSS <- Specificity_maxTSS + Sensitivity_maxTSS - 1 # should equal maxTSS
accusum[1,1] <- max(maxTSS, 0.0001)
accusum[1,2] <- max(Specificity_maxTSS, 0.0001)
accusum[1,3] <- max(Sensitivity_maxTSS, 0.0001)
accusum[1,4] <- max(AUC, 0.0001)
accusum[1,5] <- max(maxKappa, 0.0001)
accusum[1,6] <- max(mean(ThresholdMaxTSS), 0.0001)
accusum[1,7] <- AICc_bg
accusum[1,8] <- AICc2
accusum[1,9] <- NumDVars
accusum[1,10] <- nenvars
accusum[1,11] <- EnvVarsUsed
# Save Threshold value and multiply by 1000 for grid calibration
ThresholdK <- (max(ThresholdMaxTSS, 0.0001))*1000
###############################
#endtime <- Sys.time()
#durtime <- endtime - starttime
# Save evaluation statistics to .csv file
#setwd(paste("C:/Users/JLTracy/Documents/R/win-library/3.0/10minClimIntEvalSTB1000", "/", output2, sep=""))
accusum.df <- data.frame(accusum, stringsAsFactors=FALSE)
accusum.df$Model <- ModelType1
#
# Save variable matrix coordinates and Old World evaluation statistics to a vector
GlmnetSubsetEvalL[[k]] <- c(VariableNamesSel, accusum.df$EnvVarsUsed, SubsetVariableNumber, TestDataType, accusum.df$MaxTSS, accusum.df$MaxKappa, accusum.df$AUC, accusum.df$Specificity_maxTSS, accusum.df$Sensitivity_maxTSS, accusum.df$ThresholdMaxTSS, accusum.df$AICc_bg, accusum.df$AICc2, accusum.df$NumDVars, accusum.df$NumEnVars)
#
}
return(GlmnetSubsetEvalL)
}
###############################################################################
t2 <- Sys.time()
difftime(t2, t1, units = "mins")
###
#GlmnetSubsetEvalStats <- GlmnetSubsetEvalL
GlmnetSubsetEvalStats.df <- data.frame(do.call(rbind, GlmnetSubsetEvalStats), stringsAsFactors=FALSE)
head(GlmnetSubsetEvalStats.df)
tail(GlmnetSubsetEvalStats.df)
nrow(GlmnetSubsetEvalStats.df)
ncol(GlmnetSubsetEvalStats.df)
colnames(GlmnetSubsetEvalStats.df) <- c("VarNames", "EnvVarsUsed", "SubsetVariableNumber", "DataType", "TSS", "Kappa", "AUC", "Spec", "Sens", "ThreshMxTSS", "AICc_bg", "AICc2", "NumDVars", "NumEnVars")
# # Omit any rows with NA values
# GlmnetSubsetEvalStats.df <- na.omit(GlmnetSubsetEvalStats.df)
# any(is.na(GlmnetSubsetEvalStats.df))
# # Keep only NumberModSets rows
# GlmnetSubsetEvalStats.df <- GlmnetSubsetEvalStats.df[1:NumberModSets,]
rownames(GlmnetSubsetEvalStats.df) <- c(seq(1:nrow(GlmnetSubsetEvalStats.df)))
#str(GlmnetSubsetEvalStats.df1)
# Save output
# Convert second column and fourth through 11th columns from character to numeric
GlmnetSubsetEvalStats.df[,c(3,5:14)] <- sapply(GlmnetSubsetEvalStats.df[,c(3,5:14)], function(x) as.numeric(as.character(x)))
if(Output==TRUE) {
setwd(OutDirectIn)
Sets <- nrow(GlmnetSubsetEvalStats.df)/2
if(DataSetType!="") {
if(SetRunIDIn!="") {
write.table(GlmnetSubsetEvalStats.df, file=paste0(Species, "GlmnetResults_TrainTest_", TotVars, "TotVars_", SubsetVariableNumber, "Vars_", DataSetType, "_", Sets, "_", SetRunIDIn, ".csv"), sep=",", col.names=NA)
} else {
write.table(GlmnetSubsetEvalStats.df, file=paste0(Species, "GlmnetResults_TrainTest_", TotVars, "TotVars_", SubsetVariableNumber, "Vars_", DataSetType, "_", Sets, ".csv"), sep=",", col.names=NA)
}
} else {
if(SetRunIDIn!="") {
write.table(GlmnetSubsetEvalStats.df, file=paste0(Species, "GlmnetResults_FinalTrainTest_", TotVars, "TotVars_", SubsetVariableNumber, "Vars_", Sets, "_", SetRunIDIn, ".csv"), sep=",", col.names=NA)
} else {
write.table(GlmnetSubsetEvalStats.df, file=paste0(Species, "GlmnetResults_FinalTrainTest_", TotVars, "TotVars_", SubsetVariableNumber, "Vars_", Sets, ".csv"), sep=",", col.names=NA)
}
}
out5 <- paste("\n cvglmnetfolds: ", cvglmnetfoldsin, " alpha: ", alphain, " CVGlmnetRuns: ", CVGlmnetRuns)
cat(out5, file=paste(Species, "GlmnetWrapperResultsSummary_", TotVars, "TotVars_", ".txt", sep=""), sep="\n", append=TRUE)
}
#
stopCluster(cl)
registerDoSEQ()
closeAllConnections()
gc()
# Delete temp directories created for individual Glmnet runs
if(TempDir!="") { unlink(TempDir, recursive=TRUE) }
#
return(GlmnetSubsetEvalStats.df)
}
| /Glmnet/RCode/Functions/GlmnetBinomialQuadOrthMultiSubset_FinalTrainTestEvalAICc_Function.R | no_license | jamesltracy/RSFSA_R | R | false | false | 36,968 | r | GlmnetBinomialQuadOrthMultiSubset.FinalTrainTestEvalAICc <-
function(Species, VariableNamesIn, SubsetVariableNumber, TotPres, VariableSubsetsIn, PresenceDat.df, PseudoabsenceDat.df, BackgroundDat.df, kfoldgrpp, kfoldgrpa, polycoefsall.df, cvglmnetfoldsin, alphain, OutDirectIn, FunctDirectIn..., SetRunIDIn, CVGlmnetRuns, Output, DataSetType) {
setwd(OutDirectIn)
library(foreach)
library(doParallel)
#FunctDirectIn <- "C:/Users/James/Documents/R/win-library/"
# Make sure VariableNames is a data frame
VariableNamesIn <- data.frame(VariableNamesIn, stringsAsFactors=FALSE)
#
RowNames.df <- data.frame(rownames(VariableSubsetsIn), stringsAsFactors=FALSE) # Save row names to use in output
SubsetSize.mat <- matrix(c("Singlets", "Doublets", "Triplets", "Quartets", "Quintets", "Sextets", "Septets", "Octets", "Nonets",
"Dectets", "Undectets", "Duodectets","Tredectets", "Quattuordectets", "Quindectets", "Sexdectets", "Septendectets", "Octodectets", "Novemdectets",
"Vigetets", "Unvigetets", "Duovigetets", "Trevigetets", "Quattuorvigetets", "Quinvigetets", "Sexvigetets", "Septenvigetets", "Octovigetet",
"Novemvigetets", "Trigetets", "Untrigetets", "Duotrigetets", "Tretrigetets", "Quottuortrigetets", "Quintrigetets",
"Sextrigetets", "Septentrigetets", "Octotrigetets", "Novemtrigetets", "Quadragetets", "Unquadragetets", "Duoquadragetets", "Trequadragetets",
"Quattuorquadragetets", "Quinquadragetets", "Sexquadragetets", "Octoquadragetets", "Octoquadragetets", "Novemquadragetets", "Quinquagetets",
"Unquinquagetets", "Duoquinquagetets", "Trequinguagetets", "Quattuorquinquagetets", "Quinquinquagetets",
"Sexquinquagetets", "Septenquinquagetets", "Octoquinquagetets", "Novemquinquagetets", "Sexagetets"), ncol=1, nrow=60, byrow=TRUE, dimnames=list(c
(seq(1:60)), c("Subset")))
SubsetSize.df <- as.data.frame(SubsetSize.mat, stringsAsFactors=FALSE)
Subset <- SubsetSize.df[SubsetVariableNumber,]
if(is.na(Subset)) {
Subset <- ""
}
TotVars <- nrow(VariableNamesIn)
# Set default value for CVGlmnetRuns if not entered (number runs of cv.glmnet)
if(missing(DataSetType)) { DataSetType="" }
if(missing(CVGlmnetRuns)) { CVGlmnetRuns=1 }
#Output=FALSE
if(missing(Output)) { Output=TRUE }
if(missing(SetRunIDIn)) { SetRunIDIn="" }
#
ModelType1 <- paste("GlmnetBinomialQuad", Subset, sep="")
Model <- paste("GlmnetBinomialQuad for", SubsetVariableNumber, "Feature Subset")
tail(VariableSubsetsIn)
#VariableSubsetsIn[2998,]
# Register cluster for running parallel with doParallel pkg
# Use a minimum of one core or the number of available cores minus one
cores <- detectCores() # detect cores running
workcores <- max(1, cores-1)
cl <- makeCluster(workcores) # make cluster of workcores
#cl <- makeCluster(1)
registerDoParallel(cl) # register cluster
getDoParWorkers() # check clusters registered
#registerDoSEQ()
#
if(nrow(VariableSubsetsIn)<2) {
ivec <- 1
} else {
ivec <- c(seq(1, nrow(VariableSubsetsIn), 1))
}
#ivec <- c(1,2,3)
#ivec <- c(1,2,3,4)
#ivec <- c(1)
###
# Set up foreach to output to ten different list elements corresponding to
# the two Old World and New World sets of five evaluation statistics
## Designate final training data and add count column as factor
GlmnetPresTrainData <- PresenceDat.df[kfoldgrpp==3,]
GlmnetPresTrainData$Count <- 1
head(GlmnetPresTrainData)
#str(GlmnetPresTrainData)
GlmnetAbsTrainData <- PseudoabsenceDat.df[kfoldgrpp==3,]
GlmnetAbsTrainData$Count <- 0
##############################################################
t1 <- Sys.time()
#VariablesSubsets <- VariableKeepSubsets
GlmnetSubsetEvalStats <- foreach(i=ivec, .combine='rbind') %dopar% {
library(dismo)
library(raster)
library(glmnet)
#i=1
## Use function to round up from .5 from http://stackoverflow.com/questions/12688717/round-up-from-5-in-r
Round2 <- function(x, n) {
posneg = sign(x)
z = abs(x)*10^n
z = z + 0.5
z = trunc(z)
z = z/10^n
z*posneg
}
#
## Define function to predict matrix with Glmnet
GLMBinomialResp_PredictMatrix <-
function(CoefficientsIn, xData.dfIn) {
#CoefficientsIn <- AvgCoefficients.df
#xDataIn <- PresAbsDat.df
# Define function makeglm.R: Creates a "fake" glm object with specific coefficients that you can use for predicting without fitting a model first
# https://gist.github.com/MrFlick/ae299d8f3760f02de6bf
##
makeglm <- function(formula, ..., family, data=NULL) {
dots <- list(...)
out<-list()
tt <- terms(formula, data=data)
if(!is.null(data)) {
mf <- model.frame(tt, data)
vn <- sapply(attr(tt, "variables")[-1], deparse)
if((yvar <- attr(tt, "response"))>0)
vn <- vn[-yvar]
xlvl <- lapply(data[vn], function(x) if (is.factor(x))
levels(x)
else if (is.character(x))
levels(as.factor(x))
else
NULL)
attr(out, "xlevels") <- xlvl[!vapply(xlvl,is.null,NA)]
attr(tt, "dataClasses") <- sapply(data[vn], stats:::.MFclass)
}
out$terms <- tt
coef <- numeric(0)
stopifnot(length(dots)>1 & !is.null(names(dots)))
for(i in seq_along(dots)) {
if((n<-names(dots)[i]) != "") {
v <- dots[[i]]
if(!is.null(names(v))) {
coef[paste0(n, names(v))] <- v
} else {
stopifnot(length(v)==1)
coef[n] <- v
}
} else {
coef["(Intercept)"] <- dots[[i]]
}
}
out$coefficients <- coef
out$rank <- length(coef)
if (!missing(family)) {
out$family <- if (class(family) == "family") {
family
} else if (class(family) == "function") {
family()
} else if (class(family) == "character") {
get(family)()
} else {
stop(paste("invalid family class:", class(family)))
}
out$qr <- list(pivot=seq_len(out$rank))
out$deviance <- 1
out$null.deviance <- 1
out$aic <- 1
class(out) <- c("glm","lm")
} else {
class(out) <- "lm"
out$fitted.values <- predict(out, newdata=dd)
out$residuals <- out$mf[attr(tt, "response")] - out$fitted.values
out$df.residual <- nrow(data) - out$rank
out$model <- data
#QR doesn't work
}
out
}
###################################################
setwd(OutDirectIn)
##
# Retrieve variables in regression
VarNamesUsed <- as.vector(rownames(CoefficientsIn)[2:nrow(CoefficientsIn)])
# Make sure input data matches VarNamesUsed
head(xData.dfIn)
nrow(xData.dfIn)
xData.dfIn <- subset(xData.dfIn, select=VarNamesUsed)
# Replace "^" with "P"
colnames(xData.dfIn) <- gsub("\\^2", "P2", colnames(xData.dfIn))
## Prepare to construct regression formula from CoefficientsIn
Coefficients.mat1 <- t(as.matrix(CoefficientsIn))
Coefficients.df <- data.frame(Coefficients.mat1, stringsAsFactors=FALSE)
colnames(Coefficients.df) <- colnames(Coefficients.mat1)
colnames(Coefficients.df) <- gsub("\\^2", "P2", colnames(Coefficients.df))
#str(Coefficients.df)
## Create dummy fake training data for binomial GLM to use in makeglm function
## for assigning coefficients to GLM model
FakeYTrainData <- sample(0:1, nrow(xData.dfIn), replace=T)
# Prepare GLM formula for makeglm
fla <- paste("FakeYTrainData ~", paste(colnames(Coefficients.df[2:ncol(Coefficients.df)]), collapse=" + "))
###########
## Create character representation of coefficients for makeglm function including intercept and variables with coefficients
CoefficientsVector <- c(seq(1:ncol(Coefficients.df)))
for(i in CoefficientsVector) {
#i=2
if(i==1) {
CoefficientsVector[i] <- paste(Coefficients.df[1,1])
} else {
CoefficientsVector[i] <- paste0(colnames(Coefficients.df)[i], "=", Coefficients.df[1,i])
}
}
###########
CoefForm <- paste(CoefficientsVector, collapse=", ")
## Use makeglm function to calculate dummy GLM and assign coefficients
GLMBinomialModel <- eval(parse(text = paste("makeglm(as.formula(fla), family=binomial, data=xData.dfIn,", CoefForm, ")")))
## Use GLM model with assigned coefficients to predict input matrix data values
GLMscore.df <- data.frame("GLMScore" = predict(GLMBinomialModel, newdata=xData.dfIn, type="response"))
head(GLMscore.df)
##
return(GLMscore.df)
}
##############################################################################
###############
# Split VarNames of VariableSubsetsIn into separate variables
#str(VariableSubsets)
VariableNamesSel <- c(unlist(VariableSubsetsIn[i,]))
# Split VarNames of VariableSubsetsIn into separate variables
if(grepl("-",VariableNamesSel)) {
VarNames <- unlist(strsplit(VariableNamesSel, "-"))
} else {
VarNames <- unlist(VariableNamesSel)
}
SubsetVarNum <- length(VarNames)
###
## Loop through different kfold groups with equal prevalence (% presence data)
## and save coefficients and lambda.min
CoefficientsList <- list()
ModelStatsList <- list()
ModelStats.df <- data.frame(t(as.matrix(c(0,0,0,0))))
colnames(ModelStats.df) <- c("ModelNo", "deviance.min", "lambda.min", "AICc2")
######################
ta <- Sys.time()
for(q in 1:CVGlmnetRuns) {
#q=1
# Create kfold groups with equal prevalence (percent presence) for
# presence and absence portions of PresAbsDatTrain.df
NTrainkfoldgrpp <- kfold(GlmnetPresTrainData, cvglmnetfoldsin)
NTrainkfoldgrpa <- kfold(GlmnetAbsTrainData, cvglmnetfoldsin)
# Join presence and absence data and join kfold groups
PresAbsTrainDat.df <- rbind(GlmnetPresTrainData, GlmnetAbsTrainData)
nrow(PresAbsTrainDat.df)
###
NTrainkfoldgrppa <- c(NTrainkfoldgrpp, NTrainkfoldgrpa)
length(NTrainkfoldgrppa)
#
x <- as.matrix(PresAbsTrainDat.df[,c(1:ncol(PresAbsTrainDat.df)-1)])
nrow(x)
head(x)
# Subset by environmental variables to be used in model from VariableSubset
x <- x[, c(VarNames), drop=FALSE]
x <- apply(x, 2, as.numeric)
head(x)
#str(x1)
################
# Specify model training data
xTrainData.df <- x
head(xTrainData.df)
nrow(xTrainData.df)
######
## Add orthogonalized quadratic versions for main effect variables (no interaction)
xTrainDataQ.df <- cbind(xTrainData.df, xTrainData.df)
colcount <- 1
for(n in 1:ncol(xTrainData.df)) {
#n=3
# Use poly function to obtain orthoganolized main effect and squared quadratic values
# Keep only columns for poly function coefficients with variable name
polyvar <- colnames(xTrainData.df)[n]
polycoefs.df <- polycoefsall.df[ , grepl(polyvar, names(polycoefsall.df))]
# Transfer coefficients to properly formatted list for poly function
polycoeflist <- list()
polycoeflist$alpha <- polycoefs.df[1:2,1]
polycoeflist$norm2 <- polycoefs.df[,2]
###
xTrainDataQ.df[,colcount:(colcount+1)] <- poly(xTrainData.df[,n], 2, coefs=polycoeflist)
head(xTrainDataQ.df)
colnames(xTrainDataQ.df)[colcount:(colcount+1)] <- c(colnames(xTrainData.df)[n], paste0(colnames(xTrainData.df)[n], "P2"))
colcount <- colcount + 2
}
head(xTrainDataQ.df)
xTrainDataQ <- as.matrix(xTrainDataQ.df)
head(xTrainDataQ)
nrow(xTrainDataQ)
#####################
# Identify response variable for training data
yTrainData <- PresAbsTrainDat.df$Count
length(yTrainData)
#
# Run cv glmnet binomial model for response probability of presence using cross validation
# to select the model coefficients with the minimum deviance (default)
#
cv_glmnetBinomialModel = cv.glmnet(xTrainDataQ, yTrainData, foldid=NTrainkfoldgrppa, family = "binomial", nfolds = cvglmnetfoldsin, alpha=alphain)
# Retrieve coefficients from selected Binomial model
s_in = "lambda.min"
Coefficients.df1 <- data.frame(as.matrix(coef(cv_glmnetBinomialModel, s=s_in)), stringsAsFactors=FALSE)
colnames(Coefficients.df1) <- "Coefficients"
#str(cv_glmnetBinomialModel)
lambda.min <- cv_glmnetBinomialModel$lambda.min
deviance.min <- min(cv_glmnetBinomialModel$cvm)
####################################################
###
# Caculate AICc using formula from Johnny Heineken at https://stats.stackexchange.com/questions/25817/is-it-possible-to-calculate-aic-and-bic-for-lasso-regression-models
n <- cv_glmnetBinomialModel$glmnet.fit$nobs
## cv.glmnet deviance is divided by number of observation (see https://stackoverflow.com/questions/43468665/poisson-deviance-glmnet), so need to multiply by n
deviance.fit <- n*(cv_glmnetBinomialModel$cvm[match(cv_glmnetBinomialModel$lambda.min, cv_glmnetBinomialModel$lambda)])
fit.nulldev <- cv_glmnetBinomialModel$glmnet.fit$nulldev
k <- cv_glmnetBinomialModel$glmnet.fit$df[match(cv_glmnetBinomialModel$lambda.min, cv_glmnetBinomialModel$lambda)]
tLL <- fit.nulldev - deviance.fit
AICc2 <- -tLL+2*k+2*k*(k+1)/(n-k-1)
###
ModelStats.df[1,1] <- q
ModelStats.df[1,2] <- deviance.min
ModelStats.df[1,3] <- lambda.min
ModelStats.df[1,4] <- AICc2
CoefficientsList[[q]] <- Coefficients.df1
ModelStatsList[[q]] <- ModelStats.df
}
tb <- Sys.time()
difftime(tb, ta, units = "mins")
#############################
#
ModelStatsResults.df <- do.call(rbind,ModelStatsList)
ModelStatsResults.df <- ModelStatsResults.df[order(ModelStatsResults.df$deviance.min),]
nrow(ModelStatsResults.df)
CoefficientSets.df <- do.call(cbind, CoefficientsList)
AvgCoefficients.df1 <- data.frame(apply(CoefficientSets.df,1,mean))
colnames(AvgCoefficients.df1) <- "Coefficients"
# Retrieve variable names
VarNamesGLM <- data.frame(rownames(AvgCoefficients.df1), stringsAsFactors=FALSE)
# Join variable names with coefficients
AvgCoefficients.df <- cbind(VarNamesGLM, AvgCoefficients.df1)
colnames(AvgCoefficients.df) <- c("VarNames", "Coefficients")
# Remove Variables with coefficient of zero
AvgCoefficients.df1 <- AvgCoefficients.df[!AvgCoefficients.df$Coefficients==0,]
# Remove VarNames column
AvgCoefficients.df <- AvgCoefficients.df1[, 2, drop=FALSE]
#
AICc2 <- mean(ModelStatsResults.df$AICc2)
## If there are no coefficients return NA for this iteration
# GlmnetSubsetEvalL <- list()
# if(nrow(AvgCoefficients.df)==1) {
# k <- 2
# GlmnetSubsetEvalL[[k]] <- c(rep(NA,14))
# return(GlmnetSubsetEvalL)
# }
#####################################################
#Save coefficients
#setwd(OutDirectIn)
#write.table(AvgCoefficients.df, file=paste("Coefficients", ModelType1, "_", "TrainSetEnsAvg", KeepModels, "of", CVGlmnetRuns, "_", SubsetVariableNumber, "Var_", Loop, ".csv", sep=""), sep=",", col.names=NA)
#
VarNamesUsed <- as.vector(rownames(AvgCoefficients.df)[2:nrow(AvgCoefficients.df)])
VarsUsed <- gsub("P2", "", VarNamesUsed)
VarsUsed <- unique(VarsUsed)
nenvars <- length(VarsUsed)
if(nenvars > 1) {
EnvVarsUsed <- paste0(VarsUsed, collapse="-")
} else if(nenvars ==1) {
EnvVarsUsed <- VarsUsed
} else {
EnvVarsUsed <- " "
}
##
#checkModel <- readRDS("cv_glmnetBinomialQuadModel.rds")
#coef(checkModel, s= "lambda.min")
#VariableSubsets[2998,]
#####################################
## Calculate AICc_bg with point values
# Obtain number of parameters in Glmnet model
nparams <- (nrow(AvgCoefficients.df) - 1)
#
## From ENMeval Package documentation: AICc is the Akaike Information Criterion corrected for small
## sample sizes calculated as: (2 * K - 2 * logLikelihood) + (2 * K) * (K + 1)=(n - K - 1)
## where K is the number of parameters in the model (i.e., number of non-zero parameters in Glmnet
## lambda file) and n is the number of occurrence localities.
## The logLikelihood is sum(log(vals/total))
## vals is vector of Glmnet raw values at occurence localities
## total is the sum of Glmnet raw values across the entire study area
##
head(PresAbsTrainDat.df)
nrow(PresAbsTrainDat.df)
######
xPresTrainData.df1 <- PresAbsTrainDat.df[1:nrow(GlmnetPresTrainData),]
nrow(xPresTrainData.df1)
# Subset PresTestData by VarNames
xPresData.dfIn <- xPresTrainData.df1[, c(VarsUsed), drop=FALSE]
head(xPresData.dfIn)
nrow(xPresData.dfIn)
#####################
## Add orthogonalized quadratic versions for main effect variables (no interaction)
##
xPresDataQ.dfIn <- cbind(xPresData.dfIn, xPresData.dfIn)
colcount <- 1
for(m in 1:ncol(xPresData.dfIn)) {
#m=3
# Use poly function to obtain orthoganolized main effect and squared quadratic values
# Keep only columns for poly function coefficients with variable name
polyvar <- colnames(xPresData.dfIn)[m]
polycoefs.df <- polycoefsall.df[ , grepl(polyvar, names(polycoefsall.df))]
# Transfer coefficients to properly formatted list for poly function
polycoeflist <- list()
polycoeflist$alpha <- polycoefs.df[1:2,1]
polycoeflist$norm2 <- polycoefs.df[,2]
###
xPresDataQ.dfIn[,colcount:(colcount+1)] <- poly(xPresData.dfIn[,m], 2, coefs=polycoeflist)
head(xPresDataQ.dfIn)
colnames(xPresDataQ.dfIn)[colcount:(colcount+1)] <- c(colnames(xPresData.dfIn)[m], paste0(colnames(xPresData.dfIn)[m], "P2"))
colcount <- colcount + 2
}
head(xPresDataQ.dfIn)
# Select only terms used in ensemble model
xDataQ.dfIn <- xPresDataQ.dfIn[, c(VarNamesUsed), drop=FALSE]
head(xDataQ.dfIn)
#####################
# Obtain values for training presence data output by glmnet run
CoefficientsIn <- AvgCoefficients.df
presvals.df <- GLMBinomialResp_PredictMatrix(CoefficientsIn, xDataQ.dfIn)
#str(xTrainData)
# Obtain values for background data output by Glmnet run
# Select only terms used in ensemble model
xData.dfIn <- BackgroundDat.df[, c(VarsUsed), drop=FALSE]
head(xData.dfIn)
#####################
## Add orthogonalized quadratic versions for main effect variables (no interaction)
##
xDataQ.dfIn <- cbind(xData.dfIn, xData.dfIn)
colcount <- 1
for(m in 1:ncol(xData.dfIn)) {
#m=3
# Use poly function to obtain orthoganolized main effect and squared quadratic values
# Keep only columns for poly function coefficients with variable name
polyvar <- colnames(xData.dfIn)[m]
polycoefs.df <- polycoefsall.df[ , grepl(polyvar, names(polycoefsall.df))]
# Transfer coefficients to properly formatted list for poly function
polycoeflist <- list()
polycoeflist$alpha <- polycoefs.df[1:2,1]
polycoeflist$norm2 <- polycoefs.df[,2]
###
xDataQ.dfIn[,colcount:(colcount+1)] <- poly(xData.dfIn[,m], 2, coefs=polycoeflist)
head(xDataQ.dfIn)
colnames(xDataQ.dfIn)[colcount:(colcount+1)] <- c(colnames(xData.dfIn)[m], paste0(colnames(xData.dfIn)[m], "P2"))
colcount <- colcount + 2
}
head(xDataQ.dfIn)
# Select only terms used in ensemble model
xDataQ.dfIn <- xDataQ.dfIn[, c(VarNamesUsed), drop=FALSE]
head(xDataQ.dfIn)
#####################
# Obtain values for training presence/absence data output by glmnet run
CoefficientsIn <- AvgCoefficients.df
backgroundvals.df <- GLMBinomialResp_PredictMatrix(CoefficientsIn, xDataQ.dfIn)
head(backgroundvals.df)
## Join together presence and background GLMscores
PresBckgrndData.df <- rbind(presvals.df, backgroundvals.df)
# Change any values of zero to 0.001
PresBckgrndData.df[,1][PresBckgrndData.df[,1]==0] <- 0.001
## Sum all values
SumVal <- apply(PresBckgrndData.df,2,sum)
## Divide all values by SumVal
SumValDivFunc <- function(x) {x/SumVal}
PresBckgrndDataRAW.df <- data.frame(apply(PresBckgrndData.df,2,SumValDivFunc))
head(PresBckgrndDataRAW.df)
nrow(PresBckgrndDataRAW.df)
# Keep values for presence data as vector
vals <- PresBckgrndDataRAW.df[1:nrow(GlmnetPresTrainData),1]
head(vals)
n <- length(vals) # number of occurence localities
# Keep values for background data as vector
backgroundvals <- PresBckgrndDataRAW.df[(nrow(GlmnetPresTrainData)+1):nrow(PresBckgrndDataRAW.df),1]
length(backgroundvals)
# total is sum of GLM raw values across entire study area, includes background and occurrence localities
# Calculate sum of all values
totalocc <- sum(vals) # sum from occurrence localities
totalbg <- sum(backgroundvals) # sum from background localities
total <- totalocc + totalbg # grand total sum
#
logLikelihood <- sum(log(vals/total))
K <- nparams
AICc_bg <- -1*((2*K - 2*logLikelihood) + (2*K)*(K+1)/(n-K-1))
NumDVars <- K
###
#############################################################################
########### Evaluate model using training and test data
GlmnetSubsetEvalL <- list()
TestDataTypes <- c("FinalTrain", "FinalTest")
for(TestDataType in TestDataTypes) {
#TestDataType="FinalTrain"
if(TestDataType=="FinalTrain") {
k <- 3
} else {
k <- 4
}
# Specify model testing data
GlmnetPresTestData <- PresenceDat.df[kfoldgrpp == k, ]
head(GlmnetPresTestData)
nrow(GlmnetPresTestData)
###
GlmnetAbsTestData <- PseudoabsenceDat.df[kfoldgrpa == k, ]
########################
### Predict model values for test matrix to get values for test presence and absence points
## First presence points
setwd(OutDirectIn)
head(GlmnetPresTestData)
# Subset PresTestData by VarNames
xData.dfIn <- GlmnetPresTestData[, c(VarsUsed), drop=FALSE]
head(xData.dfIn)
#####################
## Add orthogonalized quadratic versions for main effect variables (no interaction)
##
xDataQ.dfIn <- cbind(xData.dfIn, xData.dfIn)
colcount <- 1
for(m in 1:ncol(xData.dfIn)) {
#m=3
# Use poly function to obtain orthoganolized main effect and squared quadratic values
# Keep only columns for poly function coefficients with variable name
polyvar <- colnames(xData.dfIn)[m]
polycoefs.df <- polycoefsall.df[ , grepl(polyvar, names(polycoefsall.df))]
# Transfer coefficients to properly formatted list for poly function
polycoeflist <- list()
polycoeflist$alpha <- polycoefs.df[1:2,1]
polycoeflist$norm2 <- polycoefs.df[,2]
###
xDataQ.dfIn[,colcount:(colcount+1)] <- poly(xData.dfIn[,m], 2, coefs=polycoeflist)
head(xDataQ.dfIn)
colnames(xDataQ.dfIn)[colcount:(colcount+1)] <- c(colnames(xData.dfIn)[m], paste0(colnames(xData.dfIn)[m], "P2"))
colcount <- colcount + 2
}
head(xDataQ.dfIn)
# Select only terms used in ensemble model
xDataQ.dfIn <- xDataQ.dfIn[, c(VarNamesUsed), drop=FALSE]
head(xDataQ.dfIn)
#####################
CoefficientsIn <- AvgCoefficients.df
prespred.df <- GLMBinomialResp_PredictMatrix(CoefficientsIn, xDataQ.dfIn)
colnames(prespred.df) <- "GlmnetScore"
head(prespred.df)
## Then absence points
head(GlmnetAbsTestData)
# Subset AbsTestData by VarNames
xData.dfIn <- GlmnetAbsTestData[, c(VarsUsed), drop=FALSE]
head(xData.dfIn)
#
#####################
## Add orthogonalized quadratic versions for main effect variables (no interaction)
##
xDataQ.dfIn <- cbind(xData.dfIn, xData.dfIn)
colcount <- 1
for(m in 1:ncol(xData.dfIn)) {
#m=3
# Use poly function to obtain orthoganolized main effect and squared quadratic values
# Keep only columns for poly function coefficients with variable name
polyvar <- colnames(xData.dfIn)[m]
polycoefs.df <- polycoefsall.df[ , grepl(polyvar, names(polycoefsall.df))]
# Transfer coefficients to properly formatted list for poly function
polycoeflist <- list()
polycoeflist$alpha <- polycoefs.df[1:2,1]
polycoeflist$norm2 <- polycoefs.df[,2]
###
xDataQ.dfIn[,colcount:(colcount+1)] <- poly(xData.dfIn[,m], 2, coefs=polycoeflist)
head(xDataQ.dfIn)
colnames(xDataQ.dfIn)[colcount:(colcount+1)] <- c(colnames(xData.dfIn)[m], paste0(colnames(xData.dfIn)[m], "P2"))
colcount <- colcount + 2
}
head(xDataQ.dfIn)
# Select only terms used in ensemble model
xDataQ.dfIn <- xDataQ.dfIn[, c(VarNamesUsed), drop=FALSE]
head(xDataQ.dfIn)
#####################
abspred.df <- GLMBinomialResp_PredictMatrix(CoefficientsIn, xDataQ.dfIn)
colnames(abspred.df) <- "GlmnetScore"
#############################################################################################
# This section evaluates the Glmnet model using the PresenceAbsence package
#############################################################################################
#### Create a dataset with model predictions for presence and absence points
# Use extracted prediction values for presence and absence points for each of three
# models previously calculated in loop
#
library(gtools)
## Create directory of output for class pair run
#setwd(paste("C:/Users/JLTracy/Documents/R/win-library/3.0/10minClimIntEvalSTB1000", "/", output2, sep=""))
# For presence data, assign a column of "1" under the name "OBSERVED" to indicate presence data
# and assign the model results a name "GlmnetScoreN" where N is the name of the rep
# Also assign a column "id" for the row numbers to use in merging the data frames later
presa.df <- data.frame(c(rep(1, nrow(prespred.df))))
names(prespred.df) <- c("Glmnet")
names(presa.df) <- c("OBSERVED")
pres.df <- data.frame(cbind(id=1:nrow(presa.df), presa.df, prespred.df))
nrow(pres.df)
# Repeat above process with absence data, but assign "OBSERVED" a value of 0
absa.df <- data.frame(c(rep(0, nrow(abspred.df))))
names(abspred.df) <- c("Glmnet")
names(absa.df) <- c("OBSERVED")
abs.df <- data.frame(cbind(id=1:nrow(absa.df), absa.df, abspred.df))
# For each model output, merge presence and absence data using "id' column as guide when all=TRUE
# NOTE: PresenceAbsence package cannot handle several models at one time if the sample sizes differ
# so have to analyze each model output separately
presabspred <- rbind(pres.df, abs.df)
tail(presabspred)
head(presabspred)
# Drop the id column used in merging for each dataset
presabspred$id <- NULL
# Make a column of data with the species name with same number of rows as data from each model
SPECIES <- data.frame(c(rep(Species, nrow(presabspred))))
names(SPECIES) <- c("SPECIES")
# Make final dataset SPDATA by putting together SPECIES with extracted environmental data.
SPDATA <- data.frame(SPECIES, presabspred)
head(SPDATA)
#SPDATA[100:160,]
################################################################################
### Run this block of code to evaluate model results with PresenceAbsence package
################################################################################
library(PresenceAbsence)
#setwd(paste("C:/Users/JLTracy/Documents/R/win-library/3.0/10minClimIntEvalSTB1000", "/", output2, sep=""))
#starttime <- Sys.time()
#### FOR OLD WORLD DATA EVALUATION STATISTICS
### Define variables for later use.
accurun <- list()
accusum <- matrix(data=NA, ncol=11, nrow=1, byrow=TRUE, dimnames=list(NULL, c("MaxTSS", "Specificity_maxTSS", "Sensitivity_maxTSS", "AUC", "MaxKappa", "ThresholdMaxTSS", "AICc_bg", "AICc2", "NumDVars", "NumEnVars", "EnvVarsUsed")))
species <- as.character(unique(SPDATA$SPECIES))
model.names <- as.character(names(SPDATA)[-c(1, 2)])
N.models <- ncol(SPDATA) - 2
N.sp <- length(species)
N.obs <- length(SPDATA$SPECIES[SPDATA$SPECIES == species[1]])
Obs.prev <- table(SPDATA$SPECIES, SPDATA$OBSERVED)[, 2]/N.obs
Obs.prev <- Round2(Obs.prev, 2)
### Mainly just run this code
graphics.off()
sp <- 1
# Read in dataset for loop
DATA <- SPDATA[SPDATA$SPECIES == species[sp], ]
head(DATA)
#
# To assess accuracy per threshold, use limited threshold available for
# model based upon number of environmental layers in model
# ("NumGrids")
#NumGrids <- max(40, SubsetVarNum)
#PossThresholds <- seq(1/NumGrids,1,length=NumGrids)
PossThresholds <- 100
#accu <- data.frame(presence.absence.accuracy(SPDATA, which.model = 1, threshold = PossThresholds, st.dev=FALSE))
# accu <- presence.absence.accuracy(DATA, which.model = 1, threshold = c(0.90, 0.91, 0.92, 0.93, 0.94, 0.95, 0.96, 0.975, 0.98, 0.99, 0.999999))
#accu <- presence.absence.accuracy(DATA, which.model = 1, threshold = 100, st.dev=FALSE)
# print(paste("Species:", species[sp], "Model:", model.names)) not used
accu <- data.frame(presence.absence.accuracy(DATA, which.model = 1, threshold = 100, st.dev=FALSE))
# print(paste("Species:", species[sp], "Model:", model.names)) not used
head(accu)
maxSSS <- data.frame(accu$sensitivity + accu$specificity)
names(maxSSS) <- c("maxSSS")
head(maxSSS)
TSS <- data.frame(accu$sensitivity + accu$specificity - 1)
names(TSS) <- c("TSS")
accurun <- data.frame(accu, maxSSS, TSS)
head(accurun)
accurun$Conditions <- paste("Glmnet", Subset, sep="")
maxKappa <- max(accurun$Kappa)
maxTSS <- max(accurun$TSS)
AUC <- max(accurun$AUC)
# Find and average thresholds at TSS = maxTSS. In the case of tied optimal
# thresholds, we select the mean threshold producing maximum TSS following
# (Freeman and Moisen 2008). But, in the case of discrete thresholds as found
# in envelope models, if the mean optimal threshold does not represent an
# actual discrete threshold, we select the nearest discrete threshold to the
# mean among 3 or more thresholds, or the smaller of two adjacent discrete thresholds.
ThresholdsMaxTSS <- accurun$threshold[which(accurun$TSS == maxTSS)]
ThresholdMaxTSS <- mean(ThresholdsMaxTSS)
#ThresholdMaxTSSM <- mean(ThresholdsMaxTSS)
## Following commented code for envelope score
#if (length(ThresholdsMaxTSS) < 3) {
#ThresholdMaxTSS <- min(ThresholdsMaxTSS)
#} else { ThresholdMaxTSS <- PossThresholds[which(abs(PossThresholds - ThresholdMaxTSSM)== min(abs(PossThresholds - ThresholdMaxTSSM)))]
#}
# Calculate specificity and sensitivity at maxTSS
Specificity_maxTSS <- accurun$specificity[which(accurun$TSS == maxTSS)]
Specificity_maxTSS <- mean(Specificity_maxTSS)
Sensitivity_maxTSS <- accurun$sensitivity[which(accurun$TSS == maxTSS)]
Sensitivity_maxTSS <- mean(Sensitivity_maxTSS)
#CheckTSS <- Specificity_maxTSS + Sensitivity_maxTSS - 1 # should equal maxTSS
accusum[1,1] <- max(maxTSS, 0.0001)
accusum[1,2] <- max(Specificity_maxTSS, 0.0001)
accusum[1,3] <- max(Sensitivity_maxTSS, 0.0001)
accusum[1,4] <- max(AUC, 0.0001)
accusum[1,5] <- max(maxKappa, 0.0001)
accusum[1,6] <- max(mean(ThresholdMaxTSS), 0.0001)
accusum[1,7] <- AICc_bg
accusum[1,8] <- AICc2
accusum[1,9] <- NumDVars
accusum[1,10] <- nenvars
accusum[1,11] <- EnvVarsUsed
# Save Threshold value and multiply by 1000 for grid calibration
ThresholdK <- (max(ThresholdMaxTSS, 0.0001))*1000
###############################
#endtime <- Sys.time()
#durtime <- endtime - starttime
# Save evaluation statistics to .csv file
#setwd(paste("C:/Users/JLTracy/Documents/R/win-library/3.0/10minClimIntEvalSTB1000", "/", output2, sep=""))
accusum.df <- data.frame(accusum, stringsAsFactors=FALSE)
accusum.df$Model <- ModelType1
#
# Save variable matrix coordinates and Old World evaluation statistics to a vector
GlmnetSubsetEvalL[[k]] <- c(VariableNamesSel, accusum.df$EnvVarsUsed, SubsetVariableNumber, TestDataType, accusum.df$MaxTSS, accusum.df$MaxKappa, accusum.df$AUC, accusum.df$Specificity_maxTSS, accusum.df$Sensitivity_maxTSS, accusum.df$ThresholdMaxTSS, accusum.df$AICc_bg, accusum.df$AICc2, accusum.df$NumDVars, accusum.df$NumEnVars)
#
}
return(GlmnetSubsetEvalL)
}
###############################################################################
t2 <- Sys.time()
difftime(t2, t1, units = "mins")
###
#GlmnetSubsetEvalStats <- GlmnetSubsetEvalL
GlmnetSubsetEvalStats.df <- data.frame(do.call(rbind, GlmnetSubsetEvalStats), stringsAsFactors=FALSE)
head(GlmnetSubsetEvalStats.df)
tail(GlmnetSubsetEvalStats.df)
nrow(GlmnetSubsetEvalStats.df)
ncol(GlmnetSubsetEvalStats.df)
colnames(GlmnetSubsetEvalStats.df) <- c("VarNames", "EnvVarsUsed", "SubsetVariableNumber", "DataType", "TSS", "Kappa", "AUC", "Spec", "Sens", "ThreshMxTSS", "AICc_bg", "AICc2", "NumDVars", "NumEnVars")
# # Omit any rows with NA values
# GlmnetSubsetEvalStats.df <- na.omit(GlmnetSubsetEvalStats.df)
# any(is.na(GlmnetSubsetEvalStats.df))
# # Keep only NumberModSets rows
# GlmnetSubsetEvalStats.df <- GlmnetSubsetEvalStats.df[1:NumberModSets,]
rownames(GlmnetSubsetEvalStats.df) <- c(seq(1:nrow(GlmnetSubsetEvalStats.df)))
#str(GlmnetSubsetEvalStats.df1)
# Save output
# Convert second column and fourth through 11th columns from character to numeric
GlmnetSubsetEvalStats.df[,c(3,5:14)] <- sapply(GlmnetSubsetEvalStats.df[,c(3,5:14)], function(x) as.numeric(as.character(x)))
if(Output==TRUE) {
setwd(OutDirectIn)
Sets <- nrow(GlmnetSubsetEvalStats.df)/2
if(DataSetType!="") {
if(SetRunIDIn!="") {
write.table(GlmnetSubsetEvalStats.df, file=paste0(Species, "GlmnetResults_TrainTest_", TotVars, "TotVars_", SubsetVariableNumber, "Vars_", DataSetType, "_", Sets, "_", SetRunIDIn, ".csv"), sep=",", col.names=NA)
} else {
write.table(GlmnetSubsetEvalStats.df, file=paste0(Species, "GlmnetResults_TrainTest_", TotVars, "TotVars_", SubsetVariableNumber, "Vars_", DataSetType, "_", Sets, ".csv"), sep=",", col.names=NA)
}
} else {
if(SetRunIDIn!="") {
write.table(GlmnetSubsetEvalStats.df, file=paste0(Species, "GlmnetResults_FinalTrainTest_", TotVars, "TotVars_", SubsetVariableNumber, "Vars_", Sets, "_", SetRunIDIn, ".csv"), sep=",", col.names=NA)
} else {
write.table(GlmnetSubsetEvalStats.df, file=paste0(Species, "GlmnetResults_FinalTrainTest_", TotVars, "TotVars_", SubsetVariableNumber, "Vars_", Sets, ".csv"), sep=",", col.names=NA)
}
}
out5 <- paste("\n cvglmnetfolds: ", cvglmnetfoldsin, " alpha: ", alphain, " CVGlmnetRuns: ", CVGlmnetRuns)
cat(out5, file=paste(Species, "GlmnetWrapperResultsSummary_", TotVars, "TotVars_", ".txt", sep=""), sep="\n", append=TRUE)
}
#
stopCluster(cl)
registerDoSEQ()
closeAllConnections()
gc()
# Delete temp directories created for individual Glmnet runs
if(TempDir!="") { unlink(TempDir, recursive=TRUE) }
#
return(GlmnetSubsetEvalStats.df)
}
|
#------------------------------------------------------------------------------#
# RemoveEmailCmd #
#------------------------------------------------------------------------------#
#' RemoveEmailCmd
#'
#' \code{RemoveEmailCmd} Command for the RemoveEmail class.
#'
#' Class that encapsulates the command to execute an object of the RemoveEmail
#' class
#'
#' @usage RemoveEmailCmd$new()
#'
#' @template textStudioParams
#' @template textStudioMethods
#' @template textStudioClasses
#' @template textStudioDesign
#'
#' @docType class
#' @author John James, \email{jjames@@dataScienceSalon.org}
#' @family TextStudio Classes
#' @export
RemoveEmailCmd <- R6::R6Class(
classname = "RemoveEmailCmd",
lock_objects = FALSE,
lock_class = FALSE,
inherit = TextStudio0,
public = list(
initialize = function() {
private$loadDependencies(name = 'RemoveEmailCmd')
invisible(self)
},
execute = function(x) {
x <- RemoveEmail$new(x)$execute()
return(x)
}
)
)
| /R/RemoveEmailCmd.R | no_license | john-james-ai/NLPStudio-2.0 | R | false | false | 1,059 | r | #------------------------------------------------------------------------------#
# RemoveEmailCmd #
#------------------------------------------------------------------------------#
#' RemoveEmailCmd
#'
#' \code{RemoveEmailCmd} Command for the RemoveEmail class.
#'
#' Class that encapsulates the command to execute an object of the RemoveEmail
#' class
#'
#' @usage RemoveEmailCmd$new()
#'
#' @template textStudioParams
#' @template textStudioMethods
#' @template textStudioClasses
#' @template textStudioDesign
#'
#' @docType class
#' @author John James, \email{jjames@@dataScienceSalon.org}
#' @family TextStudio Classes
#' @export
RemoveEmailCmd <- R6::R6Class(
classname = "RemoveEmailCmd",
lock_objects = FALSE,
lock_class = FALSE,
inherit = TextStudio0,
public = list(
initialize = function() {
private$loadDependencies(name = 'RemoveEmailCmd')
invisible(self)
},
execute = function(x) {
x <- RemoveEmail$new(x)$execute()
return(x)
}
)
)
|
## cerenkov_ml.R -- master script for tuning, training, and testing classifiers using R/parallel;
## it can run on a single multicore machine or using a PSOCKcluster of EC2 instances
##
## Author: Stephen Ramsey
##
## Packages required by this script:
## PRROC
##
## Packages conditionally required by this script:
## xgboost, ranger, dismo, Matrix, aws.ec2, pbapply
##
## Note: do not use this program with Ranger version 0.6.6 (stability issues); use Ranger 0.6.0 only
##
## If you want to use partial least squares classification for feature reduction, you also need:
## pls, methods
## How to run this script in EC2 using a single m4.16xlarge instance:
## (0) run a "m4.16xlarge" EC2 instance with the "CERENKOV_CLUSTER8" AMI
## (1) set "g_par$flag_create_ec2_instances = FALSE" below and "g_par$notify_by_text_msg = TRUE"
## (2) set "g_par$nthreads_per_process = 1"
## (3) scp "cerenkov_ml.R", "cerenkov_ml_base_functions.R", and the various required ".Rdata" files into the instance
## (4) ssh into the instance
## (5) bash$ nohup Rscript cerenkov_ml.R &
## (6) bash$ tail -f nohup.out
## How to run this script in EC2 using multiple worker nodes:
## (0) make sure you do not have any EC2 instances running (this script will use up your quota of 20 instances)
## (1) launch a m4.2xlarge instance using the "CERENKOV_CLUSTER8" AMI
## (2) assign the instance to security group PSOCKcluster
## (3) assign the instance to subnet CERENKOV_CLUSTER
## (4) optionally enable termination protection
## (5) under "add tags", name the instance "HEAD NODE"
## (6) go to step (3) in the "How to run this script in EC2 using a single m4.16xlarge instance" section above (where "instance" means the head node instance), and continue through step (6)
g_args <- commandArgs(trailingOnly=TRUE)
## ============================ define global parameters =============================
g_par <- list(
num_folds_cross_validation = 5, ## we are standardizing on 5-fold CV
num_cv_replications = 200, ## set to anywhere from 1--200, typically
flag_create_fork_cluster = TRUE, ## TRUE or FALSE
override_num_fork_processes = 64, ## for EC2, set to 64; for my MBP, set to 8
show_progress_bar = FALSE,
notify_by_text_msg = TRUE,
flag_locus_sampling = TRUE, ## set to false if you want SNP-level sampling
flag_xgb_importance = FALSE, ## if you set this to TRUE, make sure you set num_cv_replications=1 and num_folds_cross_validation=1
random_number_seed = if (is.na(g_args[1])) 1337 else as.integer(g_args[1]),
nthreads_per_process = 1,
flag_randomize_classifier_order = FALSE,
flag_create_ec2_instances = FALSE, ## don't set this to true if you set "flag_create_fork_cluster" to true
analysis_label = "compare_with_gwava",
output_file_base_name = "cerenkov_ml_results",
parallel_use_load_balancing = TRUE,
debug_file_parallel="" ## set to empty string for production run (makes error text go to stdout)
)
g_par$aws_sns_topic_arn <- if (g_par$notify_by_text_msg) { "arn:aws:sns:us-west-2:315280700912:ramseylab" } else { NULL }
source("cerenkov_aws_functions.R") ## load functions used for AWS
source("cerenkov_ml_base_functions.R") ## load functions used for machine-learning
## ============================== load OSU feature data; check for problems with feature data =================================
print("loading OSU data")
load(file="features_cerenkov2_osu18.Rdata")
stopifnot(g_feature_matrix_is_OK(g_feat_cerenkov2_df))
library(Matrix)
g_feat_cerenkov2_matrix_sparse <- sparse.model.matrix(label ~ .-1, data=g_feat_cerenkov2_df)
g_snp_names <- rownames(g_feat_cerenkov2_df)
g_label_vec <- as.integer(as.character(g_feat_cerenkov2_df$label))
## ============================== load feature data; check for problems with feature data =================================
print("loading GWAVA data")
load(file="features_gwava_osu18.Rdata") ## creates an R object called "g_feat_gwava_df"
stopifnot(g_feature_matrix_is_OK(g_feat_gwava_df))
stopifnot(g_snp_names == rownames(g_feat_gwava_df))
stopifnot(g_feat_cerenkov2_df$label == g_feat_gwava_df$label)
g_feat_gwava_matrix_sparse <- sparse.model.matrix(label ~ .-1, data=g_feat_gwava_df)
## build a list of the feature matrices that we will need
g_classifier_feature_matrices_list <- list(
feat_cerenkov2_sparsematrix=g_feat_cerenkov2_matrix_sparse,
feat_GWAVA_sparsematrix=g_feat_gwava_matrix_sparse,
feat_GWAVA_df=g_feat_gwava_df[, which(names(g_feat_gwava_df) != "label")]
)
## ============================== run invariant setup code =================================
source("cerenkov_ml_run_setup.R")
## ============================== make closures for classifiers =================================
g_classifier_function_xgboost <- g_make_classifier_function_xgboost(p_nthread=g_par$nthreads_per_process,
g_get_perf_results,
p_feature_importance_type=NULL,
p_make_objective_function=function(...){"binary:logistic"},
p_case_group_ids=g_snp_locus_ids)
g_classifier_function_ranger <- g_make_classifier_function_ranger(p_nthread=g_par$nthreads_per_process,
p_get_perf_results=g_get_perf_results,
p_feature_importance_type="impurity")
g_classifier_functions_list <- list(
XGB=g_classifier_function_xgboost,
ranger=g_classifier_function_ranger
)
## ============================== assemble final list of feature matrices =================================
## free up memory
rm(g_feat_cerenkov2_df)
rm(g_feat_cerenkov2_matrix_sparse)
rm(g_feat_gwava_df)
rm(g_feat_gwava_matrix_sparse)
## ============================== make hyperparameter lists =================================
## ------------------ xgboost hyperparameter lists --------------
g_hyperparameter_xgb_gwava_best <- g_make_hyperparameter_grid_list(list(eta=c(0.15),
nrounds=c(40),
gamma=c(5),
lambda=c(10),
subsample=c(1),
# colsample_bytree=c(0.75, 0.85, 1.0),
base_score=g_class_count_frac_positive,
scale_pos_weight=c(1.0),
max_depth=c(7)))
g_hyperparameter_xgb_osu_best <- g_make_hyperparameter_grid_list(list(eta=c(0.1),
nrounds=c(40),
gamma=c(10),
lambda=c(1),
subsample=c(1),
# colsample_bytree=c(0.75, 0.85, 1.0),
base_score=g_class_count_frac_positive,
scale_pos_weight=c(1.0),
max_depth=c(7)))
g_classifier_list_xgb_OSU <- lapply(g_hyperparameter_xgb_osu_best,
function(p_hyp) {
list(classifier_feature_matrix_name="feat_cerenkov2_sparsematrix",
classifier_function_name=ifelse(g_par$flag_xgb_importance, "XGB_importance", "XGB"),
classifier_hyperparameter_set_type_name="XGB",
classifier_set_name="OSU_XGB",
classifier_hyperparameter_list=p_hyp)
})
g_classifier_list_xgb_GWAVA <- lapply(g_hyperparameter_xgb_gwava_best,
function(p_hyp) {
list(classifier_feature_matrix_name="feat_GWAVA_sparsematrix",
classifier_function_name=ifelse(g_par$flag_xgb_importance, "XGB_importance", "XGB"),
classifier_hyperparameter_set_type_name="XGB",
classifier_set_name="GWAVA_XGB",
classifier_hyperparameter_list=p_hyp)
})
g_classifier_list_gwava_published <- list(list(classifier_feature_matrix_name="feat_GWAVA_df",
classifier_function_name="ranger",
classifier_hyperparameter_set_type_name="ranger",
classifier_set_name="GWAVA_RF_published",
classifier_hyperparameter_list=list(probability=FALSE,
mtry=14,
num.trees=100,
weight_positive_class=1,
replace=TRUE,
sample.fraction=1)))
## ------------------ ranger hyperparameter lists --------------
## WARNING: do not set "probability=TRUE" for ranger; memory leak badness will result
### ==================== DO NOT DELETE THIS CODE; KEEP BECAUSE YOU WILL NEED IT LATER ===================
## g_hyperparameter_grid_list_ranger <- g_make_hyperparameter_grid_list(list(mtry=c(15, 20),
## num.trees=c(200, 300),
## probability=FALSE, ## do not set "probability=TRUE"
## weight_positive_class=1, #c(g_class_count_ratio_negative_to_positive, 1),
## replace=TRUE,
## sample.fraction=1))
### ==================== DO NOT DELETE THIS CODE; KEEP BECAUSE YOU WILL NEED IT LATER ===================
## ============================== assemble classifier list =================================
## TODO: make a "g_check_classifier_list" function that checks for incorrect classifier function
## names, incorrect feature matrix names, etc.
g_classifier_list <- c(
g_classifier_list_xgb_OSU,
g_classifier_list_xgb_GWAVA,
g_classifier_list_gwava_published
)
## ============================== run invariant machine-learning code =================================
source("cerenkov_ml_run_ml.R")
| /src/R/cerenkov_ml_compare_with_gwava.R | permissive | zheng-liu/cerenkov-python | R | false | false | 11,630 | r | ## cerenkov_ml.R -- master script for tuning, training, and testing classifiers using R/parallel;
## it can run on a single multicore machine or using a PSOCKcluster of EC2 instances
##
## Author: Stephen Ramsey
##
## Packages required by this script:
## PRROC
##
## Packages conditionally required by this script:
## xgboost, ranger, dismo, Matrix, aws.ec2, pbapply
##
## Note: do not use this program with Ranger version 0.6.6 (stability issues); use Ranger 0.6.0 only
##
## If you want to use partial least squares classification for feature reduction, you also need:
## pls, methods
## How to run this script in EC2 using a single m4.16xlarge instance:
## (0) run a "m4.16xlarge" EC2 instance with the "CERENKOV_CLUSTER8" AMI
## (1) set "g_par$flag_create_ec2_instances = FALSE" below and "g_par$notify_by_text_msg = TRUE"
## (2) set "g_par$nthreads_per_process = 1"
## (3) scp "cerenkov_ml.R", "cerenkov_ml_base_functions.R", and the various required ".Rdata" files into the instance
## (4) ssh into the instance
## (5) bash$ nohup Rscript cerenkov_ml.R &
## (6) bash$ tail -f nohup.out
## How to run this script in EC2 using multiple worker nodes:
## (0) make sure you do not have any EC2 instances running (this script will use up your quota of 20 instances)
## (1) launch a m4.2xlarge instance using the "CERENKOV_CLUSTER8" AMI
## (2) assign the instance to security group PSOCKcluster
## (3) assign the instance to subnet CERENKOV_CLUSTER
## (4) optionally enable termination protection
## (5) under "add tags", name the instance "HEAD NODE"
## (6) go to step (3) in the "How to run this script in EC2 using a single m4.16xlarge instance" section above (where "instance" means the head node instance), and continue through step (6)
g_args <- commandArgs(trailingOnly=TRUE)
## ============================ define global parameters =============================
g_par <- list(
num_folds_cross_validation = 5, ## we are standardizing on 5-fold CV
num_cv_replications = 200, ## set to anywhere from 1--200, typically
flag_create_fork_cluster = TRUE, ## TRUE or FALSE
override_num_fork_processes = 64, ## for EC2, set to 64; for my MBP, set to 8
show_progress_bar = FALSE,
notify_by_text_msg = TRUE,
flag_locus_sampling = TRUE, ## set to false if you want SNP-level sampling
flag_xgb_importance = FALSE, ## if you set this to TRUE, make sure you set num_cv_replications=1 and num_folds_cross_validation=1
random_number_seed = if (is.na(g_args[1])) 1337 else as.integer(g_args[1]),
nthreads_per_process = 1,
flag_randomize_classifier_order = FALSE,
flag_create_ec2_instances = FALSE, ## don't set this to true if you set "flag_create_fork_cluster" to true
analysis_label = "compare_with_gwava",
output_file_base_name = "cerenkov_ml_results",
parallel_use_load_balancing = TRUE,
debug_file_parallel="" ## set to empty string for production run (makes error text go to stdout)
)
g_par$aws_sns_topic_arn <- if (g_par$notify_by_text_msg) { "arn:aws:sns:us-west-2:315280700912:ramseylab" } else { NULL }
source("cerenkov_aws_functions.R") ## load functions used for AWS
source("cerenkov_ml_base_functions.R") ## load functions used for machine-learning
## ============================== load OSU feature data; check for problems with feature data =================================
print("loading OSU data")
load(file="features_cerenkov2_osu18.Rdata")
stopifnot(g_feature_matrix_is_OK(g_feat_cerenkov2_df))
library(Matrix)
g_feat_cerenkov2_matrix_sparse <- sparse.model.matrix(label ~ .-1, data=g_feat_cerenkov2_df)
g_snp_names <- rownames(g_feat_cerenkov2_df)
g_label_vec <- as.integer(as.character(g_feat_cerenkov2_df$label))
## ============================== load feature data; check for problems with feature data =================================
print("loading GWAVA data")
load(file="features_gwava_osu18.Rdata") ## creates an R object called "g_feat_gwava_df"
stopifnot(g_feature_matrix_is_OK(g_feat_gwava_df))
stopifnot(g_snp_names == rownames(g_feat_gwava_df))
stopifnot(g_feat_cerenkov2_df$label == g_feat_gwava_df$label)
g_feat_gwava_matrix_sparse <- sparse.model.matrix(label ~ .-1, data=g_feat_gwava_df)
## build a list of the feature matrices that we will need
g_classifier_feature_matrices_list <- list(
feat_cerenkov2_sparsematrix=g_feat_cerenkov2_matrix_sparse,
feat_GWAVA_sparsematrix=g_feat_gwava_matrix_sparse,
feat_GWAVA_df=g_feat_gwava_df[, which(names(g_feat_gwava_df) != "label")]
)
## ============================== run invariant setup code =================================
source("cerenkov_ml_run_setup.R")
## ============================== make closures for classifiers =================================
g_classifier_function_xgboost <- g_make_classifier_function_xgboost(p_nthread=g_par$nthreads_per_process,
g_get_perf_results,
p_feature_importance_type=NULL,
p_make_objective_function=function(...){"binary:logistic"},
p_case_group_ids=g_snp_locus_ids)
g_classifier_function_ranger <- g_make_classifier_function_ranger(p_nthread=g_par$nthreads_per_process,
p_get_perf_results=g_get_perf_results,
p_feature_importance_type="impurity")
g_classifier_functions_list <- list(
XGB=g_classifier_function_xgboost,
ranger=g_classifier_function_ranger
)
## ============================== assemble final list of feature matrices =================================
## free up memory
rm(g_feat_cerenkov2_df)
rm(g_feat_cerenkov2_matrix_sparse)
rm(g_feat_gwava_df)
rm(g_feat_gwava_matrix_sparse)
## ============================== make hyperparameter lists =================================
## ------------------ xgboost hyperparameter lists --------------
g_hyperparameter_xgb_gwava_best <- g_make_hyperparameter_grid_list(list(eta=c(0.15),
nrounds=c(40),
gamma=c(5),
lambda=c(10),
subsample=c(1),
# colsample_bytree=c(0.75, 0.85, 1.0),
base_score=g_class_count_frac_positive,
scale_pos_weight=c(1.0),
max_depth=c(7)))
g_hyperparameter_xgb_osu_best <- g_make_hyperparameter_grid_list(list(eta=c(0.1),
nrounds=c(40),
gamma=c(10),
lambda=c(1),
subsample=c(1),
# colsample_bytree=c(0.75, 0.85, 1.0),
base_score=g_class_count_frac_positive,
scale_pos_weight=c(1.0),
max_depth=c(7)))
g_classifier_list_xgb_OSU <- lapply(g_hyperparameter_xgb_osu_best,
function(p_hyp) {
list(classifier_feature_matrix_name="feat_cerenkov2_sparsematrix",
classifier_function_name=ifelse(g_par$flag_xgb_importance, "XGB_importance", "XGB"),
classifier_hyperparameter_set_type_name="XGB",
classifier_set_name="OSU_XGB",
classifier_hyperparameter_list=p_hyp)
})
g_classifier_list_xgb_GWAVA <- lapply(g_hyperparameter_xgb_gwava_best,
function(p_hyp) {
list(classifier_feature_matrix_name="feat_GWAVA_sparsematrix",
classifier_function_name=ifelse(g_par$flag_xgb_importance, "XGB_importance", "XGB"),
classifier_hyperparameter_set_type_name="XGB",
classifier_set_name="GWAVA_XGB",
classifier_hyperparameter_list=p_hyp)
})
g_classifier_list_gwava_published <- list(list(classifier_feature_matrix_name="feat_GWAVA_df",
classifier_function_name="ranger",
classifier_hyperparameter_set_type_name="ranger",
classifier_set_name="GWAVA_RF_published",
classifier_hyperparameter_list=list(probability=FALSE,
mtry=14,
num.trees=100,
weight_positive_class=1,
replace=TRUE,
sample.fraction=1)))
## ------------------ ranger hyperparameter lists --------------
## WARNING: do not set "probability=TRUE" for ranger; memory leak badness will result
### ==================== DO NOT DELETE THIS CODE; KEEP BECAUSE YOU WILL NEED IT LATER ===================
## g_hyperparameter_grid_list_ranger <- g_make_hyperparameter_grid_list(list(mtry=c(15, 20),
## num.trees=c(200, 300),
## probability=FALSE, ## do not set "probability=TRUE"
## weight_positive_class=1, #c(g_class_count_ratio_negative_to_positive, 1),
## replace=TRUE,
## sample.fraction=1))
### ==================== DO NOT DELETE THIS CODE; KEEP BECAUSE YOU WILL NEED IT LATER ===================
## ============================== assemble classifier list =================================
## TODO: make a "g_check_classifier_list" function that checks for incorrect classifier function
## names, incorrect feature matrix names, etc.
g_classifier_list <- c(
g_classifier_list_xgb_OSU,
g_classifier_list_xgb_GWAVA,
g_classifier_list_gwava_published
)
## ============================== run invariant machine-learning code =================================
source("cerenkov_ml_run_ml.R")
|
Device <- R6::R6Class(
classname = "Device",
public = list(
pointer = NULL,
initialize = function(type = NULL, index = NULL, pointer = NULL) {
if (!is.null(pointer))
self$pointer <- pointer
else if (!is.null(type))
self$pointer <- device_from_r(type, index)
else
stop("You must specify a type and a index (or a Device pointer)")
},
has_index = function() {
device_has_index(self$pointer)
},
is_cuda = function() {
device_is_cuda(self$pointer)
},
is_cpu = function() {
device_is_cpu(self$pointer)
},
set_index = function(index) {
device_set_index(self$pointer, index)
invisible(self)
},
print = function() {
out <- self$type
index <- self$index
if (index >= 0)
out <- paste0(out, ":", index)
cat(out)
}
),
active = list(
index = function(x) {
if (missing(x))
get_device_index(self$pointer)
else
set_device_index(self$pointer, x)
},
type = function(x) {
if (missing(x))
get_device_type(self$pointer)
else
stop("Can't change device type.", call. = FALSE)
}
)
)
#' Create a Device
#'
#' @param type a device type, 'cuda' or 'cpu'.
#' @param index an index for the device (starting from 0). only used for 'cuda'
#' devices.
#'
#' @export
tch_device <- function(type, index = NULL) {
if (grepl(type, ":") && is.null(index)) {
type_index <- strsplit(type, ":")[[1]]
Device$new(type_index[1], as.integer(type_index[2]))
} else {
Device$new(type, index)
}
}
`==.Device` <- function(e1, e2) {
device_equals(e1$pointer, e2$pointer)
}
| /R/device.R | permissive | dfalbel/torch | R | false | false | 1,699 | r | Device <- R6::R6Class(
classname = "Device",
public = list(
pointer = NULL,
initialize = function(type = NULL, index = NULL, pointer = NULL) {
if (!is.null(pointer))
self$pointer <- pointer
else if (!is.null(type))
self$pointer <- device_from_r(type, index)
else
stop("You must specify a type and a index (or a Device pointer)")
},
has_index = function() {
device_has_index(self$pointer)
},
is_cuda = function() {
device_is_cuda(self$pointer)
},
is_cpu = function() {
device_is_cpu(self$pointer)
},
set_index = function(index) {
device_set_index(self$pointer, index)
invisible(self)
},
print = function() {
out <- self$type
index <- self$index
if (index >= 0)
out <- paste0(out, ":", index)
cat(out)
}
),
active = list(
index = function(x) {
if (missing(x))
get_device_index(self$pointer)
else
set_device_index(self$pointer, x)
},
type = function(x) {
if (missing(x))
get_device_type(self$pointer)
else
stop("Can't change device type.", call. = FALSE)
}
)
)
#' Create a Device
#'
#' @param type a device type, 'cuda' or 'cpu'.
#' @param index an index for the device (starting from 0). only used for 'cuda'
#' devices.
#'
#' @export
tch_device <- function(type, index = NULL) {
if (grepl(type, ":") && is.null(index)) {
type_index <- strsplit(type, ":")[[1]]
Device$new(type_index[1], as.integer(type_index[2]))
} else {
Device$new(type, index)
}
}
`==.Device` <- function(e1, e2) {
device_equals(e1$pointer, e2$pointer)
}
|
N<-20
x<-c(3060, 2840, 1780, 3280, 3550, 2450, 2200,
3070, 2100, 4100, 3630, 3060, 3280, 1870,
2980, 3120, 2150, 3830, 4300, 1880)
| /data/scr_12/chap6/data611.R | no_license | yugitti/bayze | R | false | false | 131 | r | N<-20
x<-c(3060, 2840, 1780, 3280, 3550, 2450, 2200,
3070, 2100, 4100, 3630, 3060, 3280, 1870,
2980, 3120, 2150, 3830, 4300, 1880)
|
library(shiny)
library(BH)
library(rCharts)
require(markdown)
require(data.table)
library(dplyr)
library(DT)
shinyUI(
navbarPage("LEGO Set Visualizer",
# multi-page user-interface that includes a navigation bar.
tabPanel("Explore the Data",
sidebarPanel(
sliderInput("timeline",
"Timeline:",
min = 1950,
max = 2015,
value = c(1996, 2015)),
sliderInput("pieces",
"Number of Pieces:",
min = -1,
max = 5922,
value = c(271, 2448)
),
#format = "####"),
uiOutput("themesControl"), # the id
actionButton(inputId = "clearAll",
label = "Clear selection",
icon = icon("square-o")),
actionButton(inputId = "selectAll",
label = "Select all",
icon = icon("check-square-o"))
),
mainPanel(
tabsetPanel(
# Data
tabPanel(p(icon("table"), "Dataset"),
dataTableOutput(outputId="dTable")
), # end of "Dataset" tab panel
tabPanel(p(icon("line-chart"), "Visualize the Data"),
h4('Number of Themes by Year', align = "center"),
showOutput("themesByYear", "nvd3"),
h4('Number of Pieces by Year', align = "center"),
h5('Please hover over each point to see the Set Name and ID.',
align ="center"),
showOutput("piecesByYear", "nvd3"),
h4('Number of Average Pieces by Year', align = "center"),
showOutput("piecesByYearAvg", "nvd3"),
h4('Number of Average Pieces by Theme', align = "center"),
showOutput("piecesByThemeAvg", "nvd3")
) # end of "Visualize the Data" tab panel
)
)
), # end of "Explore Dataset" tab panel
tabPanel(p(icon("search"), "LookUp on Brickset Website"),
mainPanel(
h4("The page popped-up is the LEGO set database on Brickset.com."),
h4("Step 1. Please type the Set ID below and press the 'Go!' button:"),
textInput(inputId="setid", label = "Input Set ID"),
#p('Output Set ID:'),
#textOutput('setid'),
actionButton("goButtonAdd", "Go!"),
h5('Output Address:'),
textOutput("address"),
p(""),
h4("Step 2. Please click the button below.
The link to the Set's page is being generated."),
p(""),
actionButton("goButtonDirect", "Generate Link Below!"),
p(""),
htmlOutput("inc"),
p("I was supposed to show you in an iframe below. However, it only
worked on localhost and has security issue after deployed to the cloud. Ooops...")
)
),
tabPanel("About",
mainPanel(
includeMarkdown("about.md")
)
)
)
)
| /project/ui.R | no_license | bianyin102938/Coursera-Developing-Data-Products | R | false | false | 3,659 | r | library(shiny)
library(BH)
library(rCharts)
require(markdown)
require(data.table)
library(dplyr)
library(DT)
shinyUI(
navbarPage("LEGO Set Visualizer",
# multi-page user-interface that includes a navigation bar.
tabPanel("Explore the Data",
sidebarPanel(
sliderInput("timeline",
"Timeline:",
min = 1950,
max = 2015,
value = c(1996, 2015)),
sliderInput("pieces",
"Number of Pieces:",
min = -1,
max = 5922,
value = c(271, 2448)
),
#format = "####"),
uiOutput("themesControl"), # the id
actionButton(inputId = "clearAll",
label = "Clear selection",
icon = icon("square-o")),
actionButton(inputId = "selectAll",
label = "Select all",
icon = icon("check-square-o"))
),
mainPanel(
tabsetPanel(
# Data
tabPanel(p(icon("table"), "Dataset"),
dataTableOutput(outputId="dTable")
), # end of "Dataset" tab panel
tabPanel(p(icon("line-chart"), "Visualize the Data"),
h4('Number of Themes by Year', align = "center"),
showOutput("themesByYear", "nvd3"),
h4('Number of Pieces by Year', align = "center"),
h5('Please hover over each point to see the Set Name and ID.',
align ="center"),
showOutput("piecesByYear", "nvd3"),
h4('Number of Average Pieces by Year', align = "center"),
showOutput("piecesByYearAvg", "nvd3"),
h4('Number of Average Pieces by Theme', align = "center"),
showOutput("piecesByThemeAvg", "nvd3")
) # end of "Visualize the Data" tab panel
)
)
), # end of "Explore Dataset" tab panel
tabPanel(p(icon("search"), "LookUp on Brickset Website"),
mainPanel(
h4("The page popped-up is the LEGO set database on Brickset.com."),
h4("Step 1. Please type the Set ID below and press the 'Go!' button:"),
textInput(inputId="setid", label = "Input Set ID"),
#p('Output Set ID:'),
#textOutput('setid'),
actionButton("goButtonAdd", "Go!"),
h5('Output Address:'),
textOutput("address"),
p(""),
h4("Step 2. Please click the button below.
The link to the Set's page is being generated."),
p(""),
actionButton("goButtonDirect", "Generate Link Below!"),
p(""),
htmlOutput("inc"),
p("I was supposed to show you in an iframe below. However, it only
worked on localhost and has security issue after deployed to the cloud. Ooops...")
)
),
tabPanel("About",
mainPanel(
includeMarkdown("about.md")
)
)
)
)
|
plotnae <- function(d, mpl, return.nae = FALSE, ...){
tra <- TraFromMpl(mpl)
d$to[which(d$delta == 0)] <- "cens"
nae <- mvna(data = d, state.names = 1:ncol(tra), tra = tra, cens.name = "cens")
plot(nae, bty = "n", ...)
if(return.nae){
return(nae)
}
} | /R/plotnae.R | no_license | cran/simMSM | R | false | false | 274 | r | plotnae <- function(d, mpl, return.nae = FALSE, ...){
tra <- TraFromMpl(mpl)
d$to[which(d$delta == 0)] <- "cens"
nae <- mvna(data = d, state.names = 1:ncol(tra), tra = tra, cens.name = "cens")
plot(nae, bty = "n", ...)
if(return.nae){
return(nae)
}
} |
#' Taxon data for calculating benthic productivity
#'
#' Taxon information contains conversion factors, lifestyle traits, and environmental information needed to calculate annual benthic productivity.
#'
#' @details
#' This matrix contains data require for calculating benthic productivity using \link[BenthicPro]{BenthicPB} and is modified from \link[BenthicPro]{BenthProdExampleData}.
#' More details see \link[BenthicPro]{BenthicPB} and Thomas Brey's "Virtual Handbook on Population Dynamics" (\url{http://www.thomas-brey.de/science/virtualhandbook/spreadsheets/index.html})
#' @docType data
#' @keywords datasets
#' @format A matrix includes conversion factors, lifestyle traits, and environmental information.
#' @references
#' \itemize{Henrike Andresen and Thomas Brey (2018). BenthicPro: Benthic Energy Flow. R package version 1.0.}
#' \itemize{Brey, Thomas (2012) A multi-parameter artificial neural network model to estimate macrobenthic invertebrate productivity and production. Limnol. Oceanogr.: Methods 10, 2012, 581-589. DOI 10.4319/lom.2012.10.581}
#' @name mpr
#'
NULL
| /R/mpr.R | no_license | chihlinwei/bbbs | R | false | false | 1,085 | r | #' Taxon data for calculating benthic productivity
#'
#' Taxon information contains conversion factors, lifestyle traits, and environmental information needed to calculate annual benthic productivity.
#'
#' @details
#' This matrix contains data require for calculating benthic productivity using \link[BenthicPro]{BenthicPB} and is modified from \link[BenthicPro]{BenthProdExampleData}.
#' More details see \link[BenthicPro]{BenthicPB} and Thomas Brey's "Virtual Handbook on Population Dynamics" (\url{http://www.thomas-brey.de/science/virtualhandbook/spreadsheets/index.html})
#' @docType data
#' @keywords datasets
#' @format A matrix includes conversion factors, lifestyle traits, and environmental information.
#' @references
#' \itemize{Henrike Andresen and Thomas Brey (2018). BenthicPro: Benthic Energy Flow. R package version 1.0.}
#' \itemize{Brey, Thomas (2012) A multi-parameter artificial neural network model to estimate macrobenthic invertebrate productivity and production. Limnol. Oceanogr.: Methods 10, 2012, 581-589. DOI 10.4319/lom.2012.10.581}
#' @name mpr
#'
NULL
|
library("testthat");
source("../testCatMultiple.r");
source("../replaceNaN.r")
# categorical multiple phenotype with two arrays
pheno_0_0 <- c(1,2,-1,NA,1,2,-1,NA);
pheno_0_1 <- c(NA,-1,NA,NA,NA,-1,NA,NA)
other_0_0 <- c(1,1,1,1,NA,NA,-1,-1)
data <- cbind.data.frame(pheno_0_0,pheno_0_1,other_0_0)
colnames(data)[1] <- "pheno_0_0"
colnames(data)[2] <- "pheno_0_1"
colnames(data)[3] <- "xother_0_0"
####
# include ALL - pheno with NK values aren't included as -ve examples
idxNA <- restrictSample2('test1',data[,1:2], "ALL", 1)
expect_equal(sort(idxNA), c(2,3,6,7))
# here examples 2 and 6 aren't included because they correspond to the -ve class
idxNA <- restrictSample2('test1',data[,1:2], "ALL", 2)
expect_equal(sort(idxNA), c(3,7))
####
# include NO_NAN
idxNA <- restrictSample2('test1',data[,1:2], "NO_NAN", 1)
cat("\n")
expect_equal(sort(idxNA), c(2,3,4,6,7,8))
# here examples 2 and 6 aren't included because they correspond to the -ve class
idxNA <- restrictSample2('test1',data[,1:2], "NO_NAN", 2)
cat("\n")
expect_equal(sort(idxNA), c(3,4,7,8))
####
# include only those with (non missing) value for other field
idxNA <- restrictSample2('test1',data[,1:2], "other", 1)
cat("\n")
expect_equal(sort(idxNA), c(2,3,5,6,7,8))
#
idxNA <- restrictSample2('test1',data[,1:2], "other", 2)
cat("\n")
expect_equal(sort(idxNA), c(3,5,6,7,8))
## if cat mult is not numeric then can't have missing values
pheno_0_0 <- c("A","B","C",NA,"A","B","C",NA);
pheno_0_1 <- c(NA,"C",NA,NA,NA,"C",NA,NA)
other_0_0 <- c("A","A","A","A",NA,NA,"C","C")
data <- cbind.data.frame(pheno_0_0,pheno_0_1,other_0_0)
colnames(data)[1] <- "pheno_0_0"
colnames(data)[2] <- "pheno_0_1"
colnames(data)[3] <- "xother_0_0"
idxNA <- restrictSample2('test1',data[,1:2], "ALL", "A")
expect_equal(idxNA, NULL)
idxNA <- restrictSample2('test1',data[,1:2], "NO_NAN", "A")
expect_equal(sort(idxNA), c(4,8))
idxNA <- restrictSample2('test1',data[,1:2], "other", "A")
expect_equal(sort(idxNA), c(5,6))
| /WAS/unittests/test_testCatMultiple.r | permissive | astheeggeggs/PHESANT | R | false | false | 1,976 | r | library("testthat");
source("../testCatMultiple.r");
source("../replaceNaN.r")
# categorical multiple phenotype with two arrays
pheno_0_0 <- c(1,2,-1,NA,1,2,-1,NA);
pheno_0_1 <- c(NA,-1,NA,NA,NA,-1,NA,NA)
other_0_0 <- c(1,1,1,1,NA,NA,-1,-1)
data <- cbind.data.frame(pheno_0_0,pheno_0_1,other_0_0)
colnames(data)[1] <- "pheno_0_0"
colnames(data)[2] <- "pheno_0_1"
colnames(data)[3] <- "xother_0_0"
####
# include ALL - pheno with NK values aren't included as -ve examples
idxNA <- restrictSample2('test1',data[,1:2], "ALL", 1)
expect_equal(sort(idxNA), c(2,3,6,7))
# here examples 2 and 6 aren't included because they correspond to the -ve class
idxNA <- restrictSample2('test1',data[,1:2], "ALL", 2)
expect_equal(sort(idxNA), c(3,7))
####
# include NO_NAN
idxNA <- restrictSample2('test1',data[,1:2], "NO_NAN", 1)
cat("\n")
expect_equal(sort(idxNA), c(2,3,4,6,7,8))
# here examples 2 and 6 aren't included because they correspond to the -ve class
idxNA <- restrictSample2('test1',data[,1:2], "NO_NAN", 2)
cat("\n")
expect_equal(sort(idxNA), c(3,4,7,8))
####
# include only those with (non missing) value for other field
idxNA <- restrictSample2('test1',data[,1:2], "other", 1)
cat("\n")
expect_equal(sort(idxNA), c(2,3,5,6,7,8))
#
idxNA <- restrictSample2('test1',data[,1:2], "other", 2)
cat("\n")
expect_equal(sort(idxNA), c(3,5,6,7,8))
## if cat mult is not numeric then can't have missing values
pheno_0_0 <- c("A","B","C",NA,"A","B","C",NA);
pheno_0_1 <- c(NA,"C",NA,NA,NA,"C",NA,NA)
other_0_0 <- c("A","A","A","A",NA,NA,"C","C")
data <- cbind.data.frame(pheno_0_0,pheno_0_1,other_0_0)
colnames(data)[1] <- "pheno_0_0"
colnames(data)[2] <- "pheno_0_1"
colnames(data)[3] <- "xother_0_0"
idxNA <- restrictSample2('test1',data[,1:2], "ALL", "A")
expect_equal(idxNA, NULL)
idxNA <- restrictSample2('test1',data[,1:2], "NO_NAN", "A")
expect_equal(sort(idxNA), c(4,8))
idxNA <- restrictSample2('test1',data[,1:2], "other", "A")
expect_equal(sort(idxNA), c(5,6))
|
# Get_PROTO_Data.r
source("../common/DataUtil.r")
library(ggplot2)
source("../common/PlotUtil.r")
# P A R A M S
sPlotFile <- "_PROTO_.png"
fnGroupBy <- function(dfIn) {group_by(dfIn, copies, lifem
, simlength
, auditfrequency)}
fnSubset <- function(dfIn) {subset(dfIn, copies==5 & lifem<=1000
& auditfrequency==10000)}
sTitleLine <- ( "With moderate auditing, in a peaceful world, "
%+% "five copies are nearly immortal"
%+% "\n"
%+% "\n(Annual total auditing, duration = 30 & 50 years)"
)
sLegendLabel <- "Length of\nSimulation\n(years)"
lLegendItemLabels <- c("30", "50")
sXLabel <- ("1MB sector half-life (megahours)"
%+% " (lower error rate =====>)")
sYLabel <- ("permanent document losses (%)")
# Also change summarize function and ggplot(color=...).
# G E T D A T A
# Get the data into the right form for these plots.
alldat.df <- fndfGetGiantDataRaw("")
newdat <- alldat.df %>% fnGroupBy() %>%
summarize(mdmlosspct=round(midmean(lost/docstotal)*100.0, 2), n=n()) %>%
fnSubset()
trows <- newdat
# P L O T D A T A
gp <- ggplot(data=trows
, aes(x=lifem,y=safe(mdmlosspct), color=factor(simlength))
)
gp <- gp + labs(color=sLegendLabel)
gp <- fnPlotLogScales(gp, x="YES", y="YES"
,xbreaks=c(2,5,10,100,1000)
,ybreaks=c(0.01,0.10,1.00)
)
gp <- gp + geom_line(
size=3
, show.legend=TRUE
)
gp <- gp + geom_point(data=trows
, size=6
, show.legend=TRUE
, color="black"
)
gp <- gp + theme(legend.position=c(0.8,0.7))
gp <- gp + theme(legend.background=element_rect(fill="lightgray",
size=0.5, linetype="solid"))
gp <- gp + theme(legend.key.size=unit(0.3, "in"))
gp <- gp + theme(legend.key.width=unit(0.6, "in"))
gp <- gp + theme(legend.text=element_text(size=16))
gp <- gp + theme(legend.title=element_text(size=14))
gp <- gp + scale_color_discrete(labels=lLegendItemLabels)
gp <- fnPlotTitles(gp
, titleline=sTitleLine
, xlabel=sXLabel
, ylabel=sYLabel
)
# Label the percentage lines out on the right side.
xlabelposition <- log10(800)
gp <- fnPlotPercentLine(gp, xloc=xlabelposition)
gp <- fnPlotMilleLine(gp, xloc=xlabelposition)
gp <- fnPlotSubMilleLine(gp, xloc=xlabelposition)
plot(gp)
fnPlotMakeFile(gp, sPlotFile)
# Unwind any remaining sink()s to close output files.
while (sink.number() > 0) {sink()}
| /pictures/common/Get_PROTO_Data.r | permissive | MIT-Informatics/PreservationSimulation | R | false | false | 2,713 | r | # Get_PROTO_Data.r
source("../common/DataUtil.r")
library(ggplot2)
source("../common/PlotUtil.r")
# P A R A M S
sPlotFile <- "_PROTO_.png"
fnGroupBy <- function(dfIn) {group_by(dfIn, copies, lifem
, simlength
, auditfrequency)}
fnSubset <- function(dfIn) {subset(dfIn, copies==5 & lifem<=1000
& auditfrequency==10000)}
sTitleLine <- ( "With moderate auditing, in a peaceful world, "
%+% "five copies are nearly immortal"
%+% "\n"
%+% "\n(Annual total auditing, duration = 30 & 50 years)"
)
sLegendLabel <- "Length of\nSimulation\n(years)"
lLegendItemLabels <- c("30", "50")
sXLabel <- ("1MB sector half-life (megahours)"
%+% " (lower error rate =====>)")
sYLabel <- ("permanent document losses (%)")
# Also change summarize function and ggplot(color=...).
# G E T D A T A
# Get the data into the right form for these plots.
alldat.df <- fndfGetGiantDataRaw("")
newdat <- alldat.df %>% fnGroupBy() %>%
summarize(mdmlosspct=round(midmean(lost/docstotal)*100.0, 2), n=n()) %>%
fnSubset()
trows <- newdat
# P L O T D A T A
gp <- ggplot(data=trows
, aes(x=lifem,y=safe(mdmlosspct), color=factor(simlength))
)
gp <- gp + labs(color=sLegendLabel)
gp <- fnPlotLogScales(gp, x="YES", y="YES"
,xbreaks=c(2,5,10,100,1000)
,ybreaks=c(0.01,0.10,1.00)
)
gp <- gp + geom_line(
size=3
, show.legend=TRUE
)
gp <- gp + geom_point(data=trows
, size=6
, show.legend=TRUE
, color="black"
)
gp <- gp + theme(legend.position=c(0.8,0.7))
gp <- gp + theme(legend.background=element_rect(fill="lightgray",
size=0.5, linetype="solid"))
gp <- gp + theme(legend.key.size=unit(0.3, "in"))
gp <- gp + theme(legend.key.width=unit(0.6, "in"))
gp <- gp + theme(legend.text=element_text(size=16))
gp <- gp + theme(legend.title=element_text(size=14))
gp <- gp + scale_color_discrete(labels=lLegendItemLabels)
gp <- fnPlotTitles(gp
, titleline=sTitleLine
, xlabel=sXLabel
, ylabel=sYLabel
)
# Label the percentage lines out on the right side.
xlabelposition <- log10(800)
gp <- fnPlotPercentLine(gp, xloc=xlabelposition)
gp <- fnPlotMilleLine(gp, xloc=xlabelposition)
gp <- fnPlotSubMilleLine(gp, xloc=xlabelposition)
plot(gp)
fnPlotMakeFile(gp, sPlotFile)
# Unwind any remaining sink()s to close output files.
while (sink.number() > 0) {sink()}
|
###Om ganeshaaya namaha##
#####Parameters used for generating the model###
##Control- class_0
##Treatement- class_1
#rm(list=ls())
#rm(.Random.seed, envir=globalenv())
#time_init=proc.time()
print("New-Code")
load("variables.RData")
set.seed( as.integer((as.double(Sys.time())*1000+Sys.getpid()) %% 2^31) )
norm_flag=0
#print(runif(n=100,min=0,max=100))
#####Please check with random seed###
#rm(list=ls())
#set.seed(101)
library(class)
library(boot)
library(MASS)
#library(caret)
#library(ISLR)
#library("Biostrings")
#setwd("/home/kashyap/Desktop/Masters_thesis_related/Codes")
time2=proc.time()
normalize=function(x){return(x/max(x))}
t_test_vals=function(c_pro_all)
{
t_test_value_pro=numeric((length(c_pro_all[1,])-1))
len_t_test_pro=length(t_test_value_pro)
for(i in 1:length(t_test_value_pro)){
k1=t.test(c_pro_all[,i]~c_pro_all[,len_t_test_pro+1])
t_test_value_pro[i]=abs(k1$statistic)
}
return(t_test_value_pro)
}
clrscr<-function(){
for(i in 1:100) {cat("\n")}
}
diff_elem_removal=function(x,y){
##X is an array from which the elements which are there in y should be removed
remov_array=rep(0,(length(x)+100))
count=0
for(i in 1:length(x)){
match_flag=0
for(j in 1:length(y)){
if(x[i]==y[j]){
match_flag=1
}
}
if(match_flag){
count=count+1
remov_array[count]=i
}
}
remov_array=remov_array[remov_array!=0]
return(x[-remov_array])
}
diff_elem_removal_total=function(p1,q1)
{
#Here p1 is the protein to peptide list vector
#q1 is the proteins selected by t-test
r1=list()
count=0
for(i in 1:length(p1)){
k1=p1[[i]]
v1=setdiff(k1,q1)
#print(k1)
#print(v1)
if(length(v1)){
l1=diff_elem_removal(k1,v1)
#print(l1)
#print("----------------")
if(length(l1)>0){
#if(1){
count=count+1
r1[[count]]=l1
}
}
else{
count=count+1
r1[[count]]=k1
}
}
return(r1)
}
#####This function is to reduce the prot_pept_list from random proteins to the ascending stuff ######
prot_pept_list_reduction=function(r1){
max_elem=0
count_2=0
prot_uniq_array=rep(0,100000)
for(i in 1:length(r1)){
for(j in 1:length(r1[[i]])){
count_2=count_2+1
prot_uniq_array[count_2]=r1[[i]][j]
}
}
r2=r1
prot_uniq_array1=prot_uniq_array[prot_uniq_array>0]
prot_uniq_array1=unique(prot_uniq_array1)
uniq_prots=1:length(prot_uniq_array1)
for(i in 1:length(r1)){
for(j in 1:length(r1[[i]])){
for(k in 1:length(uniq_prots)){
if(r1[[i]][j]==prot_uniq_array1[k]){
r2[[i]][j]=uniq_prots[k]
}
}
}
}
return(r2)
}
#####Reduction of prot_pept_list ends######
######Function for the protein to peptide list starts####
euc_norm <- function(x) sqrt(sum(x^2))
if(1){
pro_trt_file="prot_file1.fasta"
pep_trt_file="pept_file1.fasta"
}
pro_cntrl_file="prot_file1.fasta"
pep_cntrl_file="pept_file1.fasta"
if(1){
Npro_factor=0.03
#Npro_trt=fasta_file_to_array_conv(pro_trt_file,pep_trt_file,"pro")
#Npro_cntrl=fasta_file_to_array_conv(pro_cntrl_file,pep_cntrl_file,"pro")
Npro_for_analysis=ceiling(Npro_factor*Npro_trt)
#Npep_trt=fasta_file_to_array_conv(pro_trt_file,pep_trt_file,"pep")
#Npep_cntrl=fasta_file_to_array_conv(pro_cntrl_file,pep_cntrl_file,"pep")
}
noise_factor_gauss=1
noise_factor_exp=0
Npro=Npro_trt
#Npro_trt_test=60
#Npro_cntrl_test=60
#Npep_trt=ceiling(15*Npro_trt)
#Npep_cntrl=ceiling(15*Npro_cntrl)
Npep=Npep_trt
theta_a=100
thetac_a=100
thetaa_a=10000000
number_of_samples_train_cntrl=10 #number of samples in each class
number_of_samples_train_trt=10
number_of_samples_test_cntrl=10 #number of test samples fr control
number_of_samples_test_trt=10 #number of test samples fr treatement
amin=1.5
amax=1.6
al=1.55
fold_change=function(flag,amin,amax){ ##Please give amin and amax in just two decimal points.
x1=runif(1,amin,amax)
if(flag){
return(x1)
}
else{
return(1/x1)
}
}
euc_norm <- function(x) sqrt(sum(x^2))
M_cal=20 #Number of calibarations to be made in abc rejection algo.
#Generating the synthetic sample data S0####
###Only control sample used#####
###initial parameters as used in table 2
ka0=5
kc0=2
thetac0=100
thetaa0=10000000
theta0=100
phi0=0.4
al0=1.55
gamma_dist_a0=rgamma(1000,shape=ka0,scale=thetaa0)
gamma_dist_c0=rgamma(1000,shape=kc0,scale=thetac0)
gamma_l0=numeric(Npro_cntrl)
Npro_cntrl_a=floor(0.85*Npro)
Npro_cntrl_c=Npro-Npro_cntrl_a
for(i in 1:(Npro_cntrl_c+Npro_cntrl_a)){
if(i<Npro_cntrl_c){
gamma_l0[i]=sample(gamma_dist_c0,1)
}
else{
gamma_l0[i]=sample(gamma_dist_a0,1)
}
}
mean_vec_cont=gamma_l0
mean_vec_cont0=gamma_l0
sig_matrix0=matrix(numeric(Npro*Npro),nrow=Npro)
sig_sq_vec0=phi0*mean_vec_cont*mean_vec_cont
for(i in 1:Npro){
sig_matrix0[i,i]=sig_sq_vec0[i]
}
c_pro_control0=mvrnorm(n=number_of_samples_train_cntrl,mu = mean_vec_cont,Sigma = sig_matrix0)
c_pro_treatement0=mvrnorm(n=number_of_samples_train_cntrl,mu = mean_vec_cont,Sigma = sig_matrix0)
if(norm_flag){
c_pro_control0[,Npro_cntrl_c:length(c_pro_control0[1,])]=runif(1,min=0,max=10e-6)*c_pro_control0[,Npro_cntrl_c:length(c_pro_control0[1,])]
c_pro_treatement0[,Npro_cntrl_c:length(c_pro_treatement0[1,])]=runif(1,min=0,max=10e-6)*c_pro_treatement0[,Npro_cntrl_c:length(c_pro_treatement0[1,])]
}
ratio_vec0=colMeans(c_pro_control0)/colMeans(c_pro_treatement0)
######For calculating the threshold for rejection sampling
k_rand=sample(160:240,1)/100
kc_rand=sample(160:240,1)/100
ka_rand=sample(400:600,1)/100
thetac_rand=sample(80:120,1)
thetaa_rand=sample(9e6:11e6,1)
phi_rand=sample(0.3:0.5,1)
gamma_distc_rand=rgamma(1000,shape=kc_rand,scale=thetac_rand)
gamma_dista_rand=rgamma(1000,shape=ka_rand,scale=thetaa_rand)
gamma_rand=numeric(Npro_cntrl)
for(i in 1:(Npro_cntrl_c+Npro_cntrl_a)){
if(i<Npro_cntrl_c){
gamma_rand[i]=sample(gamma_distc_rand,1)
}
else{
gamma_rand[i]=sample(gamma_dista_rand,1)
}
}
mean_vec_cont_rand=gamma_rand
sig_matrix_rand=matrix(numeric(Npro_cntrl*Npro_cntrl),nrow=Npro_cntrl)
sig_sq_vec_rand=phi_rand*mean_vec_cont_rand*mean_vec_cont_rand
for(i in 1:Npro_cntrl){
sig_matrix_rand[i,i]=sig_sq_vec_rand[i]
}
c_pro_control_rand=mvrnorm(n=number_of_samples_train_cntrl,mu = mean_vec_cont_rand,Sigma = sig_matrix_rand)
c_pro_treatement_rand=mvrnorm(n=number_of_samples_train_cntrl,mu = mean_vec_cont_rand,Sigma = sig_matrix_rand)
if(norm_flag){
c_pro_control_rand[,Npro_cntrl_c:length(c_pro_control_rand[1,])]=runif(1,min=0,max=10e-6)*c_pro_control_rand[,Npro_cntrl_c:length(c_pro_control_rand[1,])]
c_pro_treatement_rand[,Npro_cntrl_c:length(c_pro_treatement_rand[1,])]=runif(1,min=0,max=10e-6)*c_pro_treatement_rand[,Npro_cntrl_c:length(c_pro_treatement_rand[1,])]
}
diff_rand_vec_cntrl=colMeans(c_pro_control_rand)-colMeans(c_pro_control0)
thresh_key_cntrl=1*Npro_factor*euc_norm(diff_rand_vec_cntrl)
#print(rgamma(100,shape=k_rand,scale=theta_rand))
#print(thresh_key_cntrl)
#print(Npro_factor)
while(0){
x=1
}
print("ganganna")
thresh_key_cntrl=1*euc_norm(diff_rand_vec_cntrl)
fold_change_vec0=numeric(Npro)
print("goofy")
for(i in 1:Npro){
if(ratio_vec0[i]>1)
{
fold_change_vec0[i]=fold_change(1,al0,al0)
}
else{
fold_change_vec0[i]=fold_change(0,al0,al0)
}
}
mean_vec_treatement0=mean_vec_cont0*fold_change_vec0
mean_vec_treatement_rand=mean_vec_cont_rand*fold_change_vec0
c_pro_treatement0=mvrnorm(n=number_of_samples_train_trt,mu = mean_vec_treatement0,Sigma = sig_matrix0)
c_pro_treatement_rand=mvrnorm(n=number_of_samples_train_trt,mu = mean_vec_treatement_rand,Sigma = sig_matrix_rand)
if(norm_flag){
c_pro_treatement0[,Npro_cntrl_c:length(c_pro_treatement0[1,])]=runif(1,min=0,max=10e-6)*c_pro_treatement0[,Npro_cntrl_c:length(c_pro_treatement0[1,])]
c_pro_treatement_rand[,Npro_cntrl_c:length(c_pro_treatement_rand[1,])]=runif(1,min=0,max=10e-6)*c_pro_treatement_rand[,Npro_cntrl_c:length(c_pro_treatement_rand[1,])]
}
diff_rand_vec_trt=colMeans(c_pro_treatement_rand)-colMeans(c_pro_treatement0)
thresh_key_trt=euc_norm(diff_rand_vec_trt)
thresh_key_cntrl=euc_norm(diff_rand_vec_cntrl)
print("India")
k_list=list()
theta_list=list()
phi_list=list()
thetaa_list=list()
thetac_list=list()
ka_list=list()
kc_list=list()
count_1=0
###Synthetic sample data S0 done#####
###ABC-Rejection Sampling#########
norm_array=numeric(M_cal)
for(j in 1:M_cal){
k=sample(160:240,1)/100
theta=sample(800:1200,1)
kc=sample(160:240,1)/100
ka=sample(450:550,1)/100
thetac=sample(80:120,1)
thetaa=sample(9e6:11e6,1)
#gamma_dist=rgamma(1000,shape=k,scale=theta_a)
gamma_dist_c=rgamma(1000,shape = kc,scale = thetac)
gamma_dist_a=rgamma(1000,shape = ka,scale = thetaa)
gamma_l=numeric(Npro_cntrl)
#for(i in 1:Npro_cntrl){
# gamma_l[i]=sample(gamma_dist,1)
#}
for(i in 1:(Npro_cntrl_c+Npro_cntrl_a)){
if(i<Npro_cntrl_c){
gamma_l[i]=sample(gamma_dist_c,1)
}
else{
gamma_l[i]=sample(gamma_dist_a,1)
}
}
mean_vec_cont=gamma_l
phi=runif(Npro,0.3,0.5) ###Defining the coefficient of variation
sig_matrix=matrix(numeric(Npro_cntrl*Npro_cntrl),nrow=Npro_cntrl)
sig_sq_vec=phi*mean_vec_cont*mean_vec_cont
for(i in 1:Npro_cntrl){
sig_matrix[i,i]=sig_sq_vec[i]
}
c_pro_control=mvrnorm(n=number_of_samples_train_cntrl,mu = mean_vec_cont,Sigma = sig_matrix)
if(norm_flag){
c_pro_control[,Npro_cntrl_c:length(c_pro_control[1,])]=runif(1,min=0,max=10e-6)*c_pro_control[,Npro_cntrl_c:length(c_pro_control[1,])]
}
diff_vec=colMeans(c_pro_control)-colMeans(c_pro_control0)
norm_array[j]=euc_norm(diff_vec)
# print(euc_norm(diff_vec))
#thresh_key=2*(median(norm_array)+min(norm_array))/3
if(norm_array[j]<thresh_key_cntrl){
#print(diff_vec)
count_1=count_1+1
k_list[count_1]=k
theta_list[count_1]=theta
phi_list[count_1]=phi
ka_list[count_1]=ka
kc_list[count_1]=kc
thetaa_list[count_1]=thetaa
thetac_list[count_1]=thetac
}
}
k_vec=theta_vec=phi_vec=numeric(length(k_list))
ka_vec=kc_vec=thetaa_vec=thetac_vec=phi_vec=numeric(length(ka_list))
for(i in 1:length(k_vec)){
k_vec[i]=k_list[[i]]
theta_vec[i]=theta_list[[i]]
phi_vec[i]=phi_list[[i]]
}
print("french")
for(i in 1:length(ka_vec)){
ka_vec[i]=ka_list[[i]]
kc_vec[i]=kc_list[[i]]
thetaa_vec[i]=thetaa_list[[i]]
thetac_vec[i]=thetac_list[[i]]
}
k_opt=mean(k_vec)
theta_opt=mean(theta_vec)
phi_opt=mean(phi_vec)
ka_opt=mean(ka_vec)
kc_opt=mean(kc_vec)
thetaa_opt=mean(thetaa_vec)
thetac_opt=mean(thetac_vec)
#############ABC Rejection sampling done#######
##### Data from optimal parameters being generated#######
print("glucose")
gamma_dist_opt=rgamma(1000,shape=k_opt,scale=theta_opt)
gamma_dist_a_opt=rgamma(1000,shape=ka_opt,scale=thetaa_opt)
gamma_dist_c_opt=rgamma(1000,shape=kc_opt,scale=thetac_opt)
gamma_l_opt=numeric(Npro_cntrl)
for(i in 1:(Npro_cntrl_c+Npro_cntrl_a)){
if(i<=Npro_cntrl_c){
gamma_l_opt[i]=sample(gamma_dist_c_opt,1)
}
else{
gamma_l_opt[i]=sample(gamma_dist_a_opt,1)
}
}
mean_vec_cont_opt=gamma_l_opt
sig_matrix_opt=matrix(numeric(Npro*Npro),nrow=Npro)
sig_sq_vec_opt=phi_opt*mean_vec_cont_opt*mean_vec_cont_opt
for(i in 1:Npro_cntrl){
sig_matrix_opt[i,i]=sig_sq_vec_opt[i]
}
#####Generating dummy data till i understand the protein file###
flag_vec=numeric(Npro)
for(i in 1:Npro)
{
flag_vec[i]=sample(0:1,1)
}
####flag_vec is used to say if the protein is overexpressed or not #####
mean_vec_treatement1=gamma_l_opt
fold_change_vec=numeric(Npro)
for(i in 1:Npro){
fold_change_vec[i]=fold_change(flag_vec[i],amin,amax)
}
ratio_vec=colMeans(c_pro_treatement0)/colMeans(c_pro_control0)
for(i in 1:Npro){
if(ratio_vec[i]>1)
{
fold_change_vec[i]=fold_change(1,amin,amax)
}
else{
fold_change_vec[i]=fold_change(0,amin,amax)
}
}
fold_change_vec=ratio_vec
mean_vec_treatement=mean_vec_treatement1*fold_change_vec
####Generating random peptide control data###
c_pro_control=mvrnorm(n=number_of_samples_train_cntrl,mu = mean_vec_cont_opt,Sigma = sig_matrix_opt)
c_pro_treatement=mvrnorm(n=number_of_samples_train_trt,mu = mean_vec_treatement,Sigma = sig_matrix_opt)
if(norm_flag){
c_pro_control[,Npro_cntrl_c:length(c_pro_control[1,])]=runif(1,min=0,max=10e-6)*c_pro_control[,Npro_cntrl_c:length(c_pro_control[1,])]
c_pro_treatement[,Npro_cntrl_c:length(c_pro_treatement[1,])]=runif(1,min=0,max=10e-6)*c_pro_treatement[,Npro_cntrl_c:length(c_pro_treatement[1,])]
}
l1=c_pro_control
l1=data.frame(cbind(l1,rep(0,number_of_samples_train_cntrl)))
colnames(l1)[Npro+1]="type"
l2=c_pro_treatement
l2=data.frame(cbind(l2,rep(1,number_of_samples_train_trt)))
colnames(l2)[Npro+1]="type"
#colnames(xlj_control)[Npro+1]="type"
c_pro_all=rbind(l1,l2)
c_pro_all$type=as.factor(c_pro_all$type)
t_test_value_pro=numeric((length(c_pro_all[1,])-1))
len_t_test_pro=length(t_test_value_pro)
for(i in 1:length(t_test_value_pro)){
k1=t.test(c_pro_all[,i]~c_pro_all[,len_t_test_pro+1])
t_test_value_pro[i]=abs(k1$statistic)
}
mean_vec_cont_opt=mean_vec_cont_opt[order(t_test_value_pro,decreasing = TRUE)]
mean_vec_cont_opt=mean_vec_cont_opt[1:Npro_for_analysis]
fold_change_vec=fold_change_vec[order(t_test_value_pro,decreasing = TRUE)]
fold_change_vec=fold_change_vec[1:Npro_for_analysis]
pro_pept_listed=1:Npro_cntrl
pro_list=pro_pept_listed[order(t_test_value_pro,decreasing=TRUE)][1:Npro_for_analysis]
Npro=Npro_for_analysis
Npro_trt=Npro
Npro_cntrl=Npro
Npro_cntrl_a=floor(0.85*Npro)
Npro_cntrl_c=Npro-Npro_cntrl_a
Npep_trt=ceiling(1.5*Npro_trt)
Npep_cntrl=ceiling(1.5*Npro_cntrl)
Npep=Npep_trt
sig_matrix_opt=matrix(numeric(Npro*Npro),nrow=Npro)
sig_sq_vec_opt=phi_opt*mean_vec_cont_opt*mean_vec_cont_opt
###########prot_pept_file start#######
#prot_pept_list_cntrl=fasta_file_to_array_conv(pro_cntrl_file,pep_cntrl_file,1)
#prot_pept_list_trt=fasta_file_to_array_conv(pro_trt_file,pep_trt_file,1)
prot_pept_list_cntrl_new1=diff_elem_removal_total(prot_pept_list_cntrl,pro_list)
prot_pept_list_cntrl_new=prot_pept_list_reduction(prot_pept_list_cntrl_new1)
prot_pept_list_cntrl=prot_pept_list_cntrl_new
prot_pept_list_trt=prot_pept_list_cntrl
#print(prot_pept_list_cntrl)
Npep_cntrl=length(prot_pept_list_cntrl)
Npep_trt=Npep_cntrl
Npep=Npep_cntrl
#######Prot_pept_file_end############
for(i in 1:Npro){
sig_matrix_opt[i,i]=sig_sq_vec_opt[i]
}
#xlj_trt=data.frame(cbind(xlj_trt,rep(1,number_of_samples_train_trt)))
#print("gulshan")
mean_vec_treatement=mean_vec_cont_opt*fold_change_vec
#####For threshold generation####
if(1){
k_rand=sample(160:240,1)/100
kc_rand=sample(160:240,1)/100
ka_rand=sample(400:600,1)/100
thetac_rand=sample(80:120,1)
thetaa_rand=sample(9e6:11e6,1)
phi_rand=sample(0.3:0.5,1)
gamma_distc_rand=rgamma(1000,shape=kc_rand,scale=thetac_rand)
gamma_dista_rand=rgamma(1000,shape=ka_rand,scale=thetaa_rand)
gamma_rand=numeric(Npro_cntrl)
for(i in 1:(Npro_cntrl_c+Npro_cntrl_a)){
if(i<Npro_cntrl_c){
gamma_rand[i]=sample(gamma_distc_rand,1)
}
else{
gamma_rand[i]=sample(gamma_dista_rand,1)
}
}
mean_vec_cont_rand=gamma_rand
sig_matrix_rand=matrix(numeric(Npro_cntrl*Npro_cntrl),nrow=Npro_cntrl)
sig_sq_vec_rand=phi_rand*mean_vec_cont_rand*mean_vec_cont_rand
for(i in 1:Npro){
sig_matrix_rand[i,i]=sig_sq_vec_rand[i]
}
mean_vec_treatement_rand=mean_vec_cont_rand*fold_change_vec
#print(mean_vec_treatement_rand)
c_pro_control_rand=mvrnorm(n=number_of_samples_train_cntrl,mu = mean_vec_cont_rand,Sigma = sig_matrix_rand)
c_pro_treatement_rand=mvrnorm(n=number_of_samples_test_trt,mu = mean_vec_treatement_rand,Sigma = sig_matrix_rand)
if(norm_flag){
c_pro_control_rand[,Npro_cntrl_c:length(c_pro_control_rand[1,])]=runif(1,min=0,max=10e-6)*c_pro_control_rand[,Npro_cntrl_c:length(c_pro_control_rand[1,])]
c_pro_treatement_rand[,Npro_cntrl_c:length(c_pro_treatement_rand[1,])]=runif(1,min=0,max=10e-6)*c_pro_treatement_rand[,Npro_cntrl_c:length(c_pro_treatement_rand[1,])]
}
diff_rand_vec_cntrl=colMeans(c_pro_control_rand)-colMeans(c_pro_control0)
print(mean_vec_cont_rand)
thresh_key_cntrl=euc_norm(diff_rand_vec_cntrl)
print("lellina")
#print(thresh_key_cntrl)
}
#####Peptide generating function#####
peptide_generation1=function(mean_vec_cont_opt,fold_change_vec,phi_opt,Npro_cntrl,Npro_trt,prot_pept_list_cntrl,Npro_for_analysis,
prot_pept_list_cntrl_test,prot_pept_list_trt,prot_pept_list_trt_test,train_test_flag,number_of_samples_train_cntrl,number_of_samples_train_trt)
{
time1=proc.time()
#print(fold_change_vec)
#print(number_of_samples_train_cntrl)
#print("lellina23")
#print(thresh_key_cntrl)
mean_vec_treatement=mean_vec_cont_opt*fold_change_vec
#print("missile")
#print(mean_vec_treatement)
c_pro_control=mvrnorm(n=number_of_samples_train_cntrl,mu = mean_vec_cont_opt,Sigma = sig_matrix_opt)
c_pro_treatement=mvrnorm(n=number_of_samples_train_trt,mu = mean_vec_treatement,Sigma = sig_matrix_opt)
#print("gokarna")
#print(length(c_pro_treatement[1,]))
#print(c_pro_treatement[,Npro_cntrl_c:length(c_pro_treatement[1,])])
#print("MOOCS")
if(norm_flag){
c_pro_control[,Npro_cntrl_c:length(c_pro_control[1,])]=runif(1,min=0,max=10e-6)*c_pro_control[,Npro_cntrl_c:length(c_pro_control[1,])]
c_pro_treatement[,Npro_cntrl_c:length(c_pro_treatement[1,])]=runif(1,min=0,max=10e-6)*c_pro_treatement[,Npro_cntrl_c:length(c_pro_treatement[1,])]
}
c_pep_control=matrix(numeric(Npep_cntrl*number_of_samples_train_cntrl),nrow = number_of_samples_train_cntrl)
c_pep_treatement=matrix(numeric(Npep_trt*number_of_samples_train_trt),nrow = number_of_samples_train_trt)
max_length_train=max(length(c_pep_control[1,]),length(c_pep_treatement[1,]))
for(j in 1:max_length_train){
#p3=sample(1:length(c_pro_control[1,]),sample(1:3,1))
#prot_pept_list_cntrl[[j]]=
p3=prot_pept_list_cntrl[[j]]
for(i in 1:length(c_pep_control[,1])){
#cat(i,"-",j,"-",p3,"\n")
for(k in 1:length(p3)){
c_pep_control[i,j]=c_pep_control[i,j]+c_pro_control[i,p3[k]]
c_pep_treatement[i,j]=c_pep_treatement[i,j]+c_pro_treatement[i,p3[k]]
}
}
}
#print(c_pep_control)
###print("grain")
####Generating random peptide treatement data###
checpoint_flag=0
###print("am out of this loop")
if(checpoint_flag){
print("checkpoint1")
print(proc.time()-time1)
}
mu_matrix_cntrl=matrix(numeric(Npep_cntrl*number_of_samples_train_cntrl),nrow = number_of_samples_train_cntrl)
mu_matrix_trt=matrix(numeric(Npep_trt*number_of_samples_train_trt),nrow = number_of_samples_train_trt)
kappa=5
efficiency_vector=runif(Npep,0.1,1)
for(i in 1:length(mu_matrix_cntrl[,1])){
for(j in 1:length(mu_matrix_cntrl[1,])){
mu_matrix_cntrl[i,j]=c_pep_control[i,j]*kappa*efficiency_vector[j]
mu_matrix_trt[i,j]=c_pep_treatement[i,j]*kappa*efficiency_vector[j]
}
}
if(checpoint_flag){
print("checkpoint2")
print(proc.time()-time1)
}
###print("ginger")
alpha=0.03
beta=3.6
var_vector_noisy_gaussian_cntrl=alpha*(mu_matrix_cntrl^2)+beta*mu_matrix_cntrl
var_vector_noisy_gaussian_trt=alpha*(mu_matrix_trt^2)+beta*mu_matrix_trt
total_vector_cntrl=matrix(numeric(Npep_cntrl*number_of_samples_train_cntrl),nrow = number_of_samples_train_cntrl)
total_vector_trt=matrix(numeric(Npep_cntrl*number_of_samples_train_trt),nrow = number_of_samples_train_trt)
if(checpoint_flag){
print("checkpoint2-a")
print(proc.time()-time1)
}
p1=matrix(numeric(Npep_cntrl*number_of_samples_train_cntrl),nrow = number_of_samples_train_cntrl)
p2=p1
if(0){
for(i in 1:length(total_vector_cntrl[,1])){
for(j in 1:length(total_vector_cntrl[1,])){
p1[i,j]=mvrnorm(n=1,mu = 0, Sigma = abs(var_vector_noisy_gaussian_cntrl[i,j]))
p2[i,j]=mvrnorm(n=1,mu = 0, Sigma = abs(var_vector_noisy_gaussian_cntrl[i,j]))
}
}
total_vector_cntrl1=mu_matrix_cntrl+p1
total_vector_trt1=mu_matrix_trt+p2
}
if(checpoint_flag){
print("checkpoint2-a1")
print(proc.time()-time1)
}
for(i in 1:length(total_vector_cntrl[,1])){
#print("hello-1")
#print(length(total_vector_cntrl[1,]))
#print(length(total_vector_cntrl[,1]))
#print("hello-2")
for(j in 1:length(total_vector_cntrl[1,])){
#cat(i,j,"\n")
total_vector_cntrl[i,j]=mu_matrix_cntrl[i,j]+noise_factor_gauss*mvrnorm(n=1,mu = 0, Sigma = abs(var_vector_noisy_gaussian_cntrl[i,j]))+noise_factor_exp*rexp(1,rate = abs(mu_matrix_cntrl[i,j]))
total_vector_trt[i,j]=mu_matrix_trt[i,j]+noise_factor_gauss*mvrnorm(n=1,mu = 0, Sigma = abs(var_vector_noisy_gaussian_trt[i,j]))+noise_factor_exp*rexp(1,rate = abs(mu_matrix_trt[i,j]))#+
#total_vector_cntrl[i,j]=mu_matrix_cntrl[i,j]+noise_factor*mvrnorm(n=1,mu = 0, Sigma = abs(var_vector_noisy_gaussian_cntrl[i,j]))
#total_vector_trt[i,j]=mu_matrix_trt[i,j]+noise_factor*mvrnorm(n=1,mu = 0, Sigma = abs(var_vector_noisy_gaussian_trt[i,j]))
#total_vector_cntrl[,j]=mu_matrix_cntrl[,j]+noise_factor*mvrnorm(n=1,mu = 0, Sigma = abs(var_vector_noisy_gaussian_cntrl[1,j]))
#total_vector_trt[,j]=mu_matrix_trt[,j]+noise_factor*mvrnorm(n=1,mu = 0, Sigma = abs(var_vector_noisy_gaussian_trt[1,j]))
#total_vector_cntrl[i,]=mu_matrix_cntrl[i,]+noise_factor*mvrnorm(n=1,mu = 0, Sigma = abs(var_vector_noisy_gaussian_cntrl[i,1]))
#total_vector_trt[i,]=mu_matrix_trt[i,]+noise_factor*mvrnorm(n=1,mu = 0, Sigma = abs(var_vector_noisy_gaussian_trt[i,1]))
}
}
if(checpoint_flag){
print("checkpoint2-b")
print(proc.time()-time1)
}
#total_vector_cntrl=data.frame(cbind(total_vector_cntrl,numeric(50)))
#total_vector_trt=data.frame(cbind(total_vector_trt,numeric(50)+1))
#total_vector_cntrl$X31=factor(total_vector_cntrl$X31)
#total_vector_trt$X31=factor(total_vector_trt$X31)
###To calculate rolled up abundances###
#lolol
x_pro_control=matrix(numeric(Npro_cntrl*number_of_samples_train_cntrl),nrow = number_of_samples_train_cntrl)
x_pro_trt=matrix(numeric(Npro_trt*number_of_samples_train_trt),nrow = number_of_samples_train_trt)
pept_to_prot_cntrl=list()
pept_to_prot_trt=list()
if(checpoint_flag){
print("checkpoint3")
print(proc.time()-time1)
}
for(i in 1:length(prot_pept_list_cntrl)){
for(j in 1:length(prot_pept_list_cntrl[i][[1]])){
###print(prot_pept_list_cntrl[i][[1]][j])
k1=prot_pept_list_cntrl[i][[1]][j]
###print(k1)
pept_to_prot_cntrl[k1][[1]][length(pept_to_prot_cntrl[k1][[1]])+1]=i
pept_to_prot_trt[k1][[1]][length(pept_to_prot_trt[k1][[1]])+1]=i
}
}
#print("||||||||")
#print(prot_pept_list_trt)
#print("||||||||")
###print("greek2")
###print("greek1")
###print("greek3")
if(checpoint_flag){
print("checkpoint4")
print(proc.time()-time1)
}
xlj_control=matrix(numeric(Npro_cntrl*number_of_samples_train_cntrl),nrow = number_of_samples_train_cntrl)
xlj_trt=matrix(numeric(Npro_trt*number_of_samples_train_trt),nrow = number_of_samples_train_trt)
#print("bozer")
#print(total_vector_cntrl)
###############################
for(i in 1:length(pept_to_prot_cntrl)){
if(pept_to_prot_cntrl[2]=="NULL"){
pept_to_prot_cntrl[2]=pept_to_prot_cntrl[3]
}
if(pept_to_prot_cntrl[1]=="NULL"){
pept_to_prot_cntrl[1]=pept_to_prot_cntrl[2]
}
if(pept_to_prot_cntrl[i]=="NULL"){
pept_to_prot_cntrl[i]=pept_to_prot_cntrl[1]
}
}
pept_to_prot_trt=pept_to_prot_cntrl
############################
#print("fine till here")
for(j in 1:length(xlj_control[1,])){ #for all proteins
for(i in 1:length(xlj_control[,1])) { #for all samples
for(k in 1:length(pept_to_prot_cntrl[j][[1]])){
p4=pept_to_prot_cntrl[j][[1]][k]
#print(p4)
if(is.null(p4)){
p4=pept_to_prot_cntrl[1][[1]][1]
}
xlj_control[i,j]=xlj_control[i,j]+total_vector_cntrl[i,p4]
xlj_control[i,j]=xlj_control[i,j]/length(p4)
xlj_trt[i,j]=xlj_trt[i,j]+total_vector_trt[i,p4]
xlj_trt[i,j]=xlj_trt[i,j]/length(p4)
}
}
}
# print("bozer")
#print(xlj_trt)
###print("greekza")
########
if(checpoint_flag){
print("checkpoint5")
print(proc.time()-time1)
}
xlj_control=data.frame(xlj_control)
xlj_control=data.frame(cbind(xlj_control,numeric(number_of_samples_train_cntrl)))
colnames(xlj_control)[Npro+1]="type"
#xlj_control$type=as.factor(xlj_control$type)
xlj_trt=data.frame(xlj_trt)
xlj_trt=data.frame(cbind(xlj_trt,rep(1,number_of_samples_train_trt)))
colnames(xlj_trt)[Npro+1]="type"
#print("briggy")
#print(xlj_control_test)
#xlj_control$"type"=numeric(50)
#xlj_trt$"type"=(numeric(number_of_samples_train_trt)+1)
if(1){
xlj_control=data.frame(xlj_control)
xlj_trt=data.frame(xlj_trt)
xlj_all=rbind(xlj_control,xlj_trt)
xlj_all$type=as.factor(xlj_all$type)
}
t_test_value=numeric((length(xlj_all[1,])-1))
len_t_test=length(t_test_value)
xlj_all_orig=xlj_all
p11=xlj_all$type
#p22=xlj_all_test$type
Npro=Npro_for_analysis
Npro_trt=Npro
Npro_cntrl=Npro
#print("kikiki")
#print(xlj_all_test)
if(checpoint_flag){
print("checkpoint6")
print(proc.time()-time1)
}
if(train_test_flag){
return(xlj_all)}
}
####xlj_all is the required data as per equation 12###
####xlj_all_test is the required test data####
if(1){
xlj_all=peptide_generation1(mean_vec_cont_opt,fold_change_vec,phi_opt,Npro_cntrl,Npro_trt,prot_pept_list_cntrl,Npro_for_analysis,
prot_pept_list_cntrl_test,prot_pept_list_trt,prot_pept_list_trt_test,train_test_flag=1,number_of_samples_train_cntrl,number_of_samples_train_trt)
xlj_all_test=peptide_generation1(mean_vec_cont_opt,fold_change_vec,phi_opt,Npro_cntrl,Npro_trt,prot_pept_list_cntrl,Npro_for_analysis,
prot_pept_list_cntrl_test,prot_pept_list_trt,prot_pept_list_trt_test,train_test_flag=1,number_of_samples_test_cntrl,number_of_samples_test_trt)
#xlj_all_test= xlj_all_test[sample(nrow(xlj_all_test)),]
}
#1. LDA classifier#
####Add the mean and the gaussian vector####
#SNR=1/(alpha+(beta/alpha))
##print("MCMC algorithm starting")
#######Upamanyu's algorithm-3...The ABC-MCMC-algorithm#####
#####First three steps of ABC-MCMC#####
###gamma_0 is the gamma related to S^(0)_(0) in the paper. This is generated with k_opt and theta_opt
###gamma_dist_0 is the proper gamma distribution pertaining to S_(0) in the paper. Its generated with k_0 and theta_0.
#Step1: Sampling gamma and generating mean vectors##
gamma_dist_0=rgamma(100,shape=k_opt,scale=theta_opt)
gamma_dist_a_0=rgamma(1000,shape=ka_opt,scale=thetaa_opt)
gamma_dist_c_0=rgamma(1000,shape=kc_opt,scale=thetac_opt)
c_pro_control_0=c_pro_control0
c_pro_treatment_0=c_pro_treatement0
gamma_0=numeric(Npro)
for(i in 1:(Npro_cntrl_c+Npro_cntrl_a)){
if(i<=Npro_cntrl_c){
gamma_0[i]=sample(gamma_dist_c_0,1)
}
else{
gamma_0[i]=sample(gamma_dist_a_0,1)
}
}
mean_vec_cntrl_0_0=gamma_0
mean_vec_trt_0_0=gamma_0*fold_change_vec
sig_matrix_0_0=matrix(numeric(Npro*Npro),nrow=Npro)
sig_sq_vec_0_0=phi_opt*mean_vec_cntrl_0_0*mean_vec_cntrl_0_0
for(i in 1:Npro){
sig_matrix_0_0[i,i]=sig_sq_vec_0_0[i]
}
###Step2: Generating protein data
c_pro_control_0_0=mvrnorm(n=number_of_samples_train_cntrl,mu = mean_vec_cntrl_0_0,Sigma = sig_matrix_0_0)
c_pro_treatment_0_0=mvrnorm(n=number_of_samples_train_trt,mu = mean_vec_trt_0_0,Sigma = sig_matrix_0_0)
#print("goi")
diff_vec_cntrl=colMeans(c_pro_control_0_0)-colMeans(c_pro_control_0)
diff_vec_trt=colMeans(c_pro_treatment_0_0)-colMeans(c_pro_treatment_0)
norm_cntrl=euc_norm(diff_vec_cntrl)
norm_trt=euc_norm(diff_vec_trt)
print("here maxhappan")
print(gamma_0)
while(norm_cntrl>2*thresh_key_cntrl | norm_trt>2*thresh_key_cntrl){
#print("i am here")
# print(norm_cntrl)
#print(norm_trt)
#print(0.8*thresh_key_cntrl)
#print(0.8*thresh_key_trt)
#print("-----------")
for(i in 1:(Npro_cntrl_c+Npro_cntrl_a)){
if(i<=Npro_cntrl_c){
gamma_0[i]=sample(gamma_dist_c_0,1)
}
else{
gamma_0[i]=sample(gamma_dist_a_0,1)
}
}
mean_vec_cntrl_0_0=gamma_0
mean_vec_trt_0_0=gamma_0*fold_change_vec
sig_matrix_0_0=matrix(numeric(Npro*Npro),nrow=Npro)
sig_sq_vec_0_0=phi_opt*mean_vec_cntrl_0_0*mean_vec_cntrl_0_0
for(i in 1:Npro){
sig_matrix_0_0[i,i]=sig_sq_vec_0_0[i]
}
mean_vec_cntrl_0_0=mean_vec_cont_opt
#print("DISHA")
mean_vec_trt_0_0=mean_vec_cont_opt*fold_change_vec
###Step2: Generating protein data
c_pro_control_0_0=mvrnorm(n=number_of_samples_train_cntrl,mu = mean_vec_cntrl_0_0,Sigma = sig_matrix_0_0)
c_pro_treatment_0_0=mvrnorm(n=number_of_samples_train_trt,mu = mean_vec_trt_0_0,Sigma = sig_matrix_0_0)
diff_vec_cntrl=colMeans(c_pro_control_0_0)-colMeans(c_pro_control_0)
diff_vec_trt=colMeans(c_pro_treatment_0_0)-colMeans(c_pro_treatment_0)
# print("hig")
norm_cntrl=euc_norm(diff_vec_cntrl)
#print(norm_cntrl)
norm_trt=euc_norm(diff_vec_trt)
#print(norm_trt)
#print("kig")
}
#####Step 3 is just an if condition which is taken care at the begining
#####Steps 5, 6 and 7 of the markov chain####
c_pro_control_markov=c_pro_control_0_0
c_pro_trt_markov=c_pro_treatment_0_0
gamma_vec=colMeans(c_pro_control_markov)
#####Main MCMC chain#####
count_markov=0
markov_iter=10000
gamma_markov=matrix(0L, nrow = markov_iter,ncol=Npro)
for(j in 1:markov_iter){
gamma_vec=colMeans(c_pro_control_markov)
mean_vec_cntrl_markov=gamma_vec
mean_vec_trt_markov=gamma_vec*fold_change_vec
###print(i)
sig_matrix_markov=matrix(numeric(Npro*Npro),nrow=Npro)
sig_sq_vec_markov=phi_opt*mean_vec_cntrl_markov*mean_vec_cntrl_markov
sig_sq_vec_markov=rep(0.1,length(mean_vec_cont))
for(i in 1:Npro){
sig_matrix_markov[i,i]=sig_sq_vec_markov[i]
}
c_pro_control_markov=mvrnorm(n=number_of_samples_train_cntrl,mu = mean_vec_cntrl_markov,Sigma = sig_matrix_markov)
c_pro_trt_markov=mvrnorm(n=number_of_samples_train_trt,mu = mean_vec_trt_markov,Sigma = sig_matrix_markov)
diff_vec_cntrl_markov=colMeans(c_pro_control_markov)-colMeans(c_pro_control_0)
diff_vec_trt_markov=colMeans(c_pro_trt_markov)-colMeans(c_pro_treatment_0)
norm_cntrl_markov=euc_norm(diff_vec_cntrl_markov)
norm_trt_markov=euc_norm(diff_vec_trt_markov)
###print("-----------")
###print(norm_cntrl_markov)
###print(norm_trt_markov)
###print("-----------")
if(norm_cntrl_markov<0.6*thresh_key_cntrl & norm_trt_markov<0.6*thresh_key_cntrl){
#mean_ratio=
gamma_vec=colMeans(c_pro_control_markov)
###print(j)
count_markov=count_markov+1
}
gamma_markov[j,]=gamma_vec
}
print("MCMC algorithm done")
######Algorithm-3 in upamanyu paper done#####
#####Generation of kernel data and then classification###
#print("running the function")
#print(gamma_vec)
markov_Data=peptide_generation1(mean_vec_cont_opt,fold_change_vec,phi_opt,Npro_cntrl,Npro_trt,prot_pept_list_cntrl,Npro_for_analysis,
prot_pept_list_cntrl_test,prot_pept_list_trt,prot_pept_list_trt_test,train_test_flag=1,number_of_samples_train_cntrl,number_of_samples_train_trt)
print("mrinal")
markov_Data_test=peptide_generation1(mean_vec_cont_opt,fold_change_vec,phi_opt,Npro_cntrl,Npro_trt,prot_pept_list_cntrl,Npro_for_analysis,
prot_pept_list_cntrl_test,prot_pept_list_trt,prot_pept_list_trt_test,train_test_flag = 0,number_of_samples_test_cntrl,number_of_samples_test_trt)
print("dingchaka")
gaussian_normal_kernel=mvrnorm(n=1000,mu=numeric(Npro),Sigma = diag(Npro))
gaussian_normal_kernel_density=density(gaussian_normal_kernel,bw=0.5)
xlj_all1=xlj_all
xlj_all_test1=xlj_all_test
#xlj_all_test1=xlj_all_test
####Designing classifiers####
xlj_all1=xlj_all
xlj_all_test1=xlj_all_test
if(1){
lda_classifier_51<-lda(type ~ .,data=xlj_all1)
predictions_lda_51=predict(lda_classifier_51,xlj_all_test1[,1:Npro])$class
table_data_lda_51=table(predictions_lda_51,xlj_all_test1[,Npro+1])
predictions_lda_51_app=predict(lda_classifier_51,xlj_all1[,1:Npro])$class
table_data_lda_51_app=table(predictions_lda_51_app,xlj_all1[,Npro+1])
print(table_data_lda_51)
training_labels1=xlj_all1$type
knn_trained1<-knn(train = xlj_all1[,1:Npro] , test =xlj_all_test1[,1:Npro] , cl = training_labels1, k=3)
table_data_knn_51=table(knn_trained1,xlj_all_test1[,Npro+1])
knn_trained_app1<-knn(train = xlj_all1[,1:Npro] , test =xlj_all1[,1:Npro] , cl = training_labels1, k=3)
table_data_knn_51_app1=table(knn_trained_app1,xlj_all1[,Npro+1])
print(table_data_knn_51)
}
abc_mcmc_result_vector=rep(2,length(xlj_all_test[,1]))
abc_mcmc_result_vector1=rep(2,length(xlj_all_test[,1]))
abc_mcmc_result_vector2=rep(2,length(xlj_all_test[,1]))
abc_mcmc_result_vector3=rep(2,length(xlj_all_test[,1]))
abc_mcmc_result_vector4=rep(2,length(xlj_all_test[,1]))
abc_mcmc_result_vector5=rep(2,length(xlj_all_test[,1]))
abc_mcmc_result_vector6=rep(2,length(xlj_all_test[,1]))
abc_mcmc_result_vector7=rep(2,length(xlj_all_test[,1]))
abc_mcmc_result_vector8=rep(2,length(xlj_all_test[,1]))
abc_mcmc_result_vector9=rep(2,length(xlj_all_test[,1]))
abc_mcmc_result_vector10=rep(2,length(xlj_all_test[,1]))
abc_mcmc_result_vector11=rep(2,length(xlj_all_test[,1]))
abc_mcmc_result_vector12=rep(2,length(xlj_all_test[,1]))
abc_mcmc_result_vector13=rep(2,length(xlj_all_test[,1]))
abc_mcmc_result_vector14=rep(2,length(xlj_all_test[,1]))
print("1234")
#print(table_data_lda_50)
#print(table_data_knn_50)
total_outside_sum_one_arr=numeric(nrow(xlj_all_test))
total_outside_sum_zero_arr=numeric(nrow(xlj_all_test))
total_outside_sum_one_arr1=numeric(nrow(xlj_all_test))
total_outside_sum_zero_arr1=numeric(nrow(xlj_all_test))
total_outside_sum_one_arr2=numeric(nrow(xlj_all_test))
total_outside_sum_zero_arr2=numeric(nrow(xlj_all_test))
total_outside_sum_one_arr3=numeric(nrow(xlj_all_test))
total_outside_sum_zero_arr3=numeric(nrow(xlj_all_test))
total_outside_sum_one_arr4=numeric(nrow(xlj_all_test))
total_outside_sum_zero_arr4=numeric(nrow(xlj_all_test))
total_outside_sum_one_arr5=numeric(nrow(xlj_all_test))
total_outside_sum_zero_arr5=numeric(nrow(xlj_all_test))
total_outside_sum_one_arr6=numeric(nrow(xlj_all_test))
total_outside_sum_zero_arr6=numeric(nrow(xlj_all_test))
total_outside_sum_one_arr7=numeric(nrow(xlj_all_test))
total_outside_sum_zero_arr7=numeric(nrow(xlj_all_test))
total_outside_sum_one_arr8=numeric(nrow(xlj_all_test))
total_outside_sum_zero_arr8=numeric(nrow(xlj_all_test))
total_outside_sum_one_arr9=numeric(nrow(xlj_all_test))
total_outside_sum_zero_arr9=numeric(nrow(xlj_all_test))
total_outside_sum_one_arr10=numeric(nrow(xlj_all_test))
total_outside_sum_zero_arr10=numeric(nrow(xlj_all_test))
total_outside_sum_one_arr11=numeric(nrow(xlj_all_test))
total_outside_sum_zero_arr11=numeric(nrow(xlj_all_test))
total_outside_sum_one_arr12=numeric(nrow(xlj_all_test))
total_outside_sum_zero_arr12=numeric(nrow(xlj_all_test))
total_outside_sum_one_arr13=numeric(nrow(xlj_all_test))
total_outside_sum_zero_arr13=numeric(nrow(xlj_all_test))
total_outside_sum_one_arr14=numeric(nrow(xlj_all_test))
total_outside_sum_zero_arr14=numeric(nrow(xlj_all_test))
outside_full_sum_one_arr=numeric(nrow(xlj_all_test))
outside_full_sum_zero_arr=numeric(nrow(xlj_all_test))
#######Prediction by the ABC MCMC model #####
if(1){
for(i in 1:nrow(xlj_all_test)){
test_data=xlj_all_test[i,1:Npro]
total_outside_sum_zero=0 ####outside Sum for all the markov iterations
total_outside_sum_one=0
total_outside_sum_zero=0 ####outside Sum for all the markov iterations
total_outside_sum_one=0
outside_full_sum_zero=0
outside_full_sum_one=0
total_outside_sum_zero1=0 ####outside Sum for all the markov iterations
total_outside_sum_one1=0
total_outside_sum_zero2=0 ####outside Sum for all the markov iterations
total_outside_sum_one2=0
total_outside_sum_zero1=0 ####outside Sum for all the markov iterations
total_outside_sum_one1=0
total_outside_sum_zero2=0 ####outside Sum for all the markov iterations
total_outside_sum_one2=0
total_outside_sum_zero3=0 ####outside Sum for all the markov iterations
total_outside_sum_one3=0
total_outside_sum_zero4=0 ####outside Sum for all the markov iterations
total_outside_sum_one4=0
total_outside_sum_zero5=0 ####outside Sum for all the markov iterations
total_outside_sum_one5=0
total_outside_sum_zero6=0 ####outside Sum for all the markov iterations
total_outside_sum_one6=0
total_outside_sum_zero7=0 ####outside Sum for all the markov iterations
total_outside_sum_one7=0
total_outside_sum_zero8=0 ####outside Sum for all the markov iterations
total_outside_sum_one8=0
total_outside_sum_zero9=0 ####outside Sum for all the markov iterations
total_outside_sum_one9=0
total_outside_sum_zero10=0 ####outside Sum for all the markov iterations
total_outside_sum_one10=0
total_outside_sum_zero11=0 ####outside Sum for all the markov iterations
total_outside_sum_one11=0
total_outside_sum_zero12=0 ####outside Sum for all the markov iterations
total_outside_sum_one12=0
total_outside_sum_zero13=0 ####outside Sum for all the markov iterations
total_outside_sum_one13=0
total_outside_sum_zero14=0 ####outside Sum for all the markov iterations
total_outside_sum_one14=0
print(paste("Running the row",i))
##Only last 70% are considered for the calculations as first 30% are considered as the burn-in stage
count_123=0
print("here man")
##Only last 70% are considered for the calculations as first 30% are considered as the burn-in stage
count_123=0
print("here man")
for(j in (ceiling(0.9985*markov_iter)):(markov_iter-1)){
# ##print(paste("Markov-chain_number",j))
#total_outside_sum_zero=0
#total_outside_sum_one=0
#ptm <- proc.time()
markov_Data=peptide_generation1(gamma_markov[j,],fold_change_vec,phi_opt,Npro_cntrl,Npro_trt,prot_pept_list_cntrl,Npro_for_analysis,
prot_pept_list_cntrl_test,prot_pept_list_trt,prot_pept_list_trt_test,train_test_flag = 1,number_of_samples_train_cntrl,number_of_samples_train_trt)
###print(proc.time() - ptm)
#print("time-1")
markov_row_length=length(markov_Data[,1])
markov_length_class=markov_row_length/2
total_inside_sum_zero=0 ###inside sum is for sum of all the n-values of markov_data
total_inside_sum_one=0
inside_full_sum_zero=0
inside_full_sum_one=0
total_inside_sum_zero1=0 ###inside sum is for sum of all the n-values of markov_data
total_inside_sum_one1=0
inside_full_sum_zero1=0
inside_full_sum_one1=0
total_inside_sum_zero2=0 ###inside sum is for sum of all the n-values of markov_data
total_inside_sum_one2=0
inside_full_sum_zero2=0
inside_full_sum_one2=0
total_inside_sum_zero3=0 ###inside sum is for sum of all the n-values of markov_data
total_inside_sum_one3=0
inside_full_sum_zero3=0
inside_full_sum_one3=0
total_inside_sum_zero4=0 ###inside sum is for sum of all the n-values of markov_data
total_inside_sum_one4=0
inside_full_sum_zero4=0
inside_full_sum_one4=0
total_inside_sum_zero5=0 ###inside sum is for sum of all the n-values of markov_data
total_inside_sum_one5=0
inside_full_sum_zero5=0
inside_full_sum_one5=0
total_inside_sum_zero6=0 ###inside sum is for sum of all the n-values of markov_data
total_inside_sum_one6=0
inside_full_sum_zero6=0
inside_full_sum_one6=0
total_inside_sum_zero7=0 ###inside sum is for sum of all the n-values of markov_data
total_inside_sum_one7=0
inside_full_sum_zero7=0
inside_full_sum_one7=0
total_inside_sum_zero8=0 ###inside sum is for sum of all the n-values of markov_data
total_inside_sum_one8=0
inside_full_sum_zero8=0
inside_full_sum_one8=0
total_inside_sum_zero9=0 ###inside sum is for sum of all the n-values of markov_data
total_inside_sum_one9=0
inside_full_sum_zero9=0
inside_full_sum_one9=0
total_inside_sum_zero10=0 ###inside sum is for sum of all the n-values of markov_data
total_inside_sum_one10=0
inside_full_sum_zero10=0
inside_full_sum_one10=0
total_inside_sum_zero11=0 ###inside sum is for sum of all the n-values of markov_data
total_inside_sum_one11=0
inside_full_sum_zero11=0
inside_full_sum_one11=0
total_inside_sum_zero12=0 ###inside sum is for sum of all the n-values of markov_data
total_inside_sum_one12=0
inside_full_sum_zero12=0
inside_full_sum_one12=0
total_inside_sum_zero13=0 ###inside sum is for sum of all the n-values of markov_data
total_inside_sum_one13=0
inside_full_sum_zero13=0
inside_full_sum_one13=0
total_inside_sum_zero14=0 ###inside sum is for sum of all the n-values of markov_data
total_inside_sum_one14=0
inside_full_sum_zero14=0
inside_full_sum_one14=0
for(k in 1:(markov_length_class)){
#inside_sum_zero=0
diff_data_zero=markov_Data[k,1:Npro]-test_data
diff_data_zero=markov_Data[k,1:Npro]-test_data
t0=diff_data_zero
##print(paste("Zero-",sum(diff_data_zero)))
#diff_data_zero=abs(normalize(markov_Data[k,1:Npro])-normalize(test_data))
##print("###########zero#########")
##print(diff_data_zero)
mean_diff_data_zero=sum(diff_data_zero)/length(diff_data_zero)
# #print(paste("Zero-",sum(diff_data_zero)))
##print(paste("zero-",mean_diff_data_zero))
#inside_full_sum_zero=inside_full_sum_zero+mean_diff_data_zero
inside_full_sum_zero=inside_full_sum_zero+sum(diff_data_zero)
##print("#########################")
if(1){
#diff_data_zero=diff_data_zero/abs(max(diff_data_zero))
#print(diff_data_zero)
k0=diff_data_zero
k00=k0
k0=k00/4000
diff_data_zero=k0/12000
diff_data_zero1=k0/14000
diff_data_zero2=k0/16000
diff_data_zero3=k0/18000
diff_data_zero4=k0/20000
diff_data_zero5=k0/22000
diff_data_zero6=k0/24000
diff_data_zero7=k0/26000
diff_data_zero8=k0/28000
diff_data_zero9=k0/30000
diff_data_zero10=k0/32000
diff_data_zero11=k0/34000
diff_data_zero12=k0/36000
diff_data_zero13=k0/38000
diff_data_zero14=k0/40000
kernel_data=approx(gaussian_normal_kernel_density$x,gaussian_normal_kernel_density$y,xout = diff_data_zero)
kernel_data$y[is.na(kernel_data$y)]=0
total_inside_sum_zero=total_inside_sum_zero+mean(kernel_data$y)
kernel_data1=approx(gaussian_normal_kernel_density$x,gaussian_normal_kernel_density$y,xout = diff_data_zero1)
kernel_data1$y[is.na(kernel_data1$y)]=0
total_inside_sum_zero1=total_inside_sum_zero1+mean(kernel_data1$y)
kernel_data2=approx(gaussian_normal_kernel_density$x,gaussian_normal_kernel_density$y,xout = diff_data_zero2)
kernel_data2$y[is.na(kernel_data2$y)]=0
total_inside_sum_zero2=total_inside_sum_zero2+mean(kernel_data2$y)
kernel_data3=approx(gaussian_normal_kernel_density$x,gaussian_normal_kernel_density$y,xout = diff_data_zero3)
kernel_data3$y[is.na(kernel_data3$y)]=0
total_inside_sum_zero3=total_inside_sum_zero3+mean(kernel_data3$y)
kernel_data4=approx(gaussian_normal_kernel_density$x,gaussian_normal_kernel_density$y,xout = diff_data_zero4)
kernel_data4$y[is.na(kernel_data4$y)]=0
total_inside_sum_zero4=total_inside_sum_zero4+mean(kernel_data4$y)
kernel_data5=approx(gaussian_normal_kernel_density$x,gaussian_normal_kernel_density$y,xout = diff_data_zero5)
kernel_data5$y[is.na(kernel_data5$y)]=0
total_inside_sum_zero5=total_inside_sum_zero5+mean(kernel_data5$y)
kernel_data6=approx(gaussian_normal_kernel_density$x,gaussian_normal_kernel_density$y,xout = diff_data_zero6)
kernel_data6$y[is.na(kernel_data6$y)]=0
total_inside_sum_zero6=total_inside_sum_zero6+mean(kernel_data6$y)
kernel_data7=approx(gaussian_normal_kernel_density$x,gaussian_normal_kernel_density$y,xout = diff_data_zero7)
kernel_data7$y[is.na(kernel_data7$y)]=0
total_inside_sum_zero7=total_inside_sum_zero7+mean(kernel_data7$y)
kernel_data8=approx(gaussian_normal_kernel_density$x,gaussian_normal_kernel_density$y,xout = diff_data_zero8)
kernel_data8$y[is.na(kernel_data8$y)]=0
total_inside_sum_zero8=total_inside_sum_zero8+mean(kernel_data8$y)
kernel_data9=approx(gaussian_normal_kernel_density$x,gaussian_normal_kernel_density$y,xout = diff_data_zero9)
kernel_data9$y[is.na(kernel_data9$y)]=0
total_inside_sum_zero9=total_inside_sum_zero9+mean(kernel_data9$y)
kernel_data10=approx(gaussian_normal_kernel_density$x,gaussian_normal_kernel_density$y,xout = diff_data_zero10)
kernel_data10$y[is.na(kernel_data10$y)]=0
total_inside_sum_zero10=total_inside_sum_zero10+mean(kernel_data10$y)
kernel_data11=approx(gaussian_normal_kernel_density$x,gaussian_normal_kernel_density$y,xout = diff_data_zero11)
kernel_data11$y[is.na(kernel_data11$y)]=0
total_inside_sum_zero11=total_inside_sum_zero11+mean(kernel_data11$y)
kernel_data12=approx(gaussian_normal_kernel_density$x,gaussian_normal_kernel_density$y,xout = diff_data_zero12)
kernel_data12$y[is.na(kernel_data12$y)]=0
total_inside_sum_zero12=total_inside_sum_zero12+mean(kernel_data12$y)
kernel_data13=approx(gaussian_normal_kernel_density$x,gaussian_normal_kernel_density$y,xout = diff_data_zero13)
kernel_data13$y[is.na(kernel_data13$y)]=0
total_inside_sum_zero13=total_inside_sum_zero13+mean(kernel_data13$y)
kernel_data14=approx(gaussian_normal_kernel_density$x,gaussian_normal_kernel_density$y,xout = diff_data_zero14)
kernel_data14$y[is.na(kernel_data14$y)]=0
total_inside_sum_zero14=total_inside_sum_zero14+mean(kernel_data14$y)
}
}
for(l in (markov_length_class+1):(markov_row_length)){
#inside_sum_one=0
###print("gunda")
###print(l)
diff_data_one=markov_Data[l,1:Npro]-test_data
# #print(paste("one-",sum(diff_data_one)))
#diff_data_one=abs(normalize(markov_Data[l,1:Npro])-normalize(test_data))
###print(diff_data_one)
t1=diff_data_one
##print("###########one#########")
mean_diff_data_one=sum(diff_data_one)/length(diff_data_one)
##print(paste("one-",mean_diff_data_one))
# #print(paste("one-",sum(diff_data_one)))
#inside_full_sum_one=inside_full_sum_one+mean_diff_data_one
inside_full_sum_one=inside_full_sum_one+sum(diff_data_one)
#plot(diff_data_one)
#k22=cbind(diff_data_one,diff_data_zero)
#plot(colMeans(k22))
##print("#########################")
if(1){
# diff_data_one=diff_data_one/abs(max(diff_data_one))
# print(diff_data_one)
k1=diff_data_one
k11=k1
k1=k11/4000
diff_data_one=k1/12000
diff_data_one1=k1/14000
diff_data_one2=k1/16000
diff_data_one3=k1/18000
diff_data_one4=k1/20000
diff_data_one5=k1/22000
diff_data_one6=k1/24000
diff_data_one7=k1/26000
diff_data_one8=k1/28000
diff_data_one9=k1/30000
diff_data_one10=k1/32000
diff_data_one11=k1/34000
diff_data_one12=k1/36000
diff_data_one13=k1/38000
diff_data_one14=k1/40000
#print(diff_data_one)
kernel_data=approx(gaussian_normal_kernel_density$x,gaussian_normal_kernel_density$y,xout = diff_data_one)
kernel_data$y[is.na(kernel_data$y)]=0
total_inside_sum_one=total_inside_sum_one+mean(kernel_data$y)
kernel_data1=approx(gaussian_normal_kernel_density$x,gaussian_normal_kernel_density$y,xout = diff_data_one1)
kernel_data1$y[is.na(kernel_data1$y)]=0
total_inside_sum_one1=total_inside_sum_one1+mean(kernel_data1$y)
kernel_data2=approx(gaussian_normal_kernel_density$x,gaussian_normal_kernel_density$y,xout = diff_data_one2)
kernel_data2$y[is.na(kernel_data2$y)]=0
total_inside_sum_one2=total_inside_sum_one2+mean(kernel_data2$y)
kernel_data3=approx(gaussian_normal_kernel_density$x,gaussian_normal_kernel_density$y,xout = diff_data_one3)
kernel_data3$y[is.na(kernel_data3$y)]=0
total_inside_sum_one3=total_inside_sum_one3+mean(kernel_data3$y)
kernel_data4=approx(gaussian_normal_kernel_density$x,gaussian_normal_kernel_density$y,xout = diff_data_one4)
kernel_data4$y[is.na(kernel_data4$y)]=0
total_inside_sum_one4=total_inside_sum_one4+mean(kernel_data4$y)
kernel_data5=approx(gaussian_normal_kernel_density$x,gaussian_normal_kernel_density$y,xout = diff_data_one5)
kernel_data5$y[is.na(kernel_data5$y)]=0
total_inside_sum_one5=total_inside_sum_one5+mean(kernel_data5$y)
kernel_data6=approx(gaussian_normal_kernel_density$x,gaussian_normal_kernel_density$y,xout = diff_data_one6)
kernel_data6$y[is.na(kernel_data6$y)]=0
total_inside_sum_one6=total_inside_sum_one6+mean(kernel_data6$y)
kernel_data7=approx(gaussian_normal_kernel_density$x,gaussian_normal_kernel_density$y,xout = diff_data_one7)
kernel_data7$y[is.na(kernel_data7$y)]=0
total_inside_sum_one7=total_inside_sum_one7+mean(kernel_data7$y)
kernel_data8=approx(gaussian_normal_kernel_density$x,gaussian_normal_kernel_density$y,xout = diff_data_one8)
kernel_data8$y[is.na(kernel_data8$y)]=0
total_inside_sum_one8=total_inside_sum_one8+mean(kernel_data8$y)
kernel_data9=approx(gaussian_normal_kernel_density$x,gaussian_normal_kernel_density$y,xout = diff_data_one9)
kernel_data9$y[is.na(kernel_data9$y)]=0
total_inside_sum_one9=total_inside_sum_one9+mean(kernel_data9$y)
kernel_data10=approx(gaussian_normal_kernel_density$x,gaussian_normal_kernel_density$y,xout = diff_data_one10)
kernel_data10$y[is.na(kernel_data10$y)]=0
total_inside_sum_one10=total_inside_sum_one10+mean(kernel_data10$y)
kernel_data11=approx(gaussian_normal_kernel_density$x,gaussian_normal_kernel_density$y,xout = diff_data_one11)
kernel_data11$y[is.na(kernel_data11$y)]=0
total_inside_sum_one11=total_inside_sum_one11+mean(kernel_data11$y)
kernel_data12=approx(gaussian_normal_kernel_density$x,gaussian_normal_kernel_density$y,xout = diff_data_one12)
kernel_data12$y[is.na(kernel_data12$y)]=0
total_inside_sum_one12=total_inside_sum_one12+mean(kernel_data12$y)
kernel_data13=approx(gaussian_normal_kernel_density$x,gaussian_normal_kernel_density$y,xout = diff_data_one13)
kernel_data13$y[is.na(kernel_data13$y)]=0
total_inside_sum_one13=total_inside_sum_one13+mean(kernel_data13$y)
kernel_data14=approx(gaussian_normal_kernel_density$x,gaussian_normal_kernel_density$y,xout = diff_data_one14)
kernel_data14$y[is.na(kernel_data14$y)]=0
total_inside_sum_one14=total_inside_sum_one14+mean(kernel_data14$y)
}
k22=cbind(diff_data_one,diff_data_zero)
# plot(colMeans(k22))
if(is.na(total_inside_sum_one)& count_123==0){
###print(kernel_data$y)
count_123=1
p11=t1
}
}
#total_outside_sum_zero=total_outside_sum_zero+total_inside_sum_zero
#total_outside_sum_one=total_outside_sum_one+total_inside_sum_one
##print(proc.time() - ptm)
#print("time 1-a")
}
total_outside_sum_zero=total_outside_sum_zero+total_inside_sum_zero
total_outside_sum_one=total_outside_sum_one+total_inside_sum_one
total_outside_sum_zero1=total_outside_sum_zero1+total_inside_sum_zero1
total_outside_sum_one1=total_outside_sum_one1+total_inside_sum_one1
total_outside_sum_zero2=total_outside_sum_zero2+total_inside_sum_zero2
total_outside_sum_one2=total_outside_sum_one2+total_inside_sum_one2
total_outside_sum_zero3=total_outside_sum_zero3+total_inside_sum_zero3
total_outside_sum_one3=total_outside_sum_one3+total_inside_sum_one3
total_outside_sum_zero4=total_outside_sum_zero4+total_inside_sum_zero4
total_outside_sum_one4=total_outside_sum_one4+total_inside_sum_one4
total_outside_sum_zero5=total_outside_sum_zero5+total_inside_sum_zero5
total_outside_sum_one5=total_outside_sum_one5+total_inside_sum_one5
total_outside_sum_zero6=total_outside_sum_zero6+total_inside_sum_zero6
total_outside_sum_one6=total_outside_sum_one6+total_inside_sum_one6
total_outside_sum_zero7=total_outside_sum_zero7+total_inside_sum_zero7
total_outside_sum_one7=total_outside_sum_one7+total_inside_sum_one7
total_outside_sum_zero8=total_outside_sum_zero8+total_inside_sum_zero8
total_outside_sum_one8=total_outside_sum_one8+total_inside_sum_one8
total_outside_sum_zero9=total_outside_sum_zero9+total_inside_sum_zero9
total_outside_sum_one9=total_outside_sum_one9+total_inside_sum_one9
total_outside_sum_zero10=total_outside_sum_zero10+total_inside_sum_zero10
total_outside_sum_one10=total_outside_sum_one10+total_inside_sum_one10
total_outside_sum_zero11=total_outside_sum_zero11+total_inside_sum_zero11
total_outside_sum_one11=total_outside_sum_one11+total_inside_sum_one11
total_outside_sum_zero12=total_outside_sum_zero12+total_inside_sum_zero12
total_outside_sum_one12=total_outside_sum_one12+total_inside_sum_one12
total_outside_sum_zero13=total_outside_sum_zero13+total_inside_sum_zero13
total_outside_sum_one13=total_outside_sum_one13+total_inside_sum_one13
total_outside_sum_zero14=total_outside_sum_zero14+total_inside_sum_zero14
total_outside_sum_one14=total_outside_sum_one14+total_inside_sum_one14
outside_full_sum_zero=outside_full_sum_zero+inside_full_sum_zero
outside_full_sum_one=outside_full_sum_one+inside_full_sum_one
###print("glagla")
###print(total_outside_sum_one)
###print(total_outside_sum_zero)
total_outside_sum_one_arr[i]=total_outside_sum_one
total_outside_sum_zero_arr[i]=total_outside_sum_zero
total_outside_sum_one_arr1[i]=total_outside_sum_one1
total_outside_sum_zero_arr1[i]=total_outside_sum_zero1
total_outside_sum_one_arr2[i]=total_outside_sum_one2
total_outside_sum_zero_arr2[i]=total_outside_sum_zero2
total_outside_sum_one_arr3[i]=total_outside_sum_one3
total_outside_sum_zero_arr3[i]=total_outside_sum_zero3
total_outside_sum_one_arr4[i]=total_outside_sum_one4
total_outside_sum_zero_arr4[i]=total_outside_sum_zero4
total_outside_sum_one_arr5[i]=total_outside_sum_one5
total_outside_sum_zero_arr5[i]=total_outside_sum_zero5
total_outside_sum_one_arr6[i]=total_outside_sum_one6
total_outside_sum_zero_arr6[i]=total_outside_sum_zero6
total_outside_sum_one_arr7[i]=total_outside_sum_one7
total_outside_sum_zero_arr7[i]=total_outside_sum_zero7
total_outside_sum_one_arr8[i]=total_outside_sum_one8
total_outside_sum_zero_arr8[i]=total_outside_sum_zero8
total_outside_sum_one_arr9[i]=total_outside_sum_one9
total_outside_sum_zero_arr9[i]=total_outside_sum_zero9
total_outside_sum_one_arr10[i]=total_outside_sum_one10
total_outside_sum_zero_arr10[i]=total_outside_sum_zero10
total_outside_sum_one_arr11[i]=total_outside_sum_one11
total_outside_sum_zero_arr11[i]=total_outside_sum_zero11
total_outside_sum_one_arr12[i]=total_outside_sum_one12
total_outside_sum_zero_arr12[i]=total_outside_sum_zero12
total_outside_sum_one_arr13[i]=total_outside_sum_one13
total_outside_sum_zero_arr13[i]=total_outside_sum_zero13
total_outside_sum_one_arr14[i]=total_outside_sum_one14
total_outside_sum_zero_arr14[i]=total_outside_sum_zero14
outside_full_sum_one_arr[i]=outside_full_sum_one
outside_full_sum_zero_arr[i]=outside_full_sum_zero
if(total_outside_sum_one<total_outside_sum_zero){
#if(abs(outside_full_sum_one)>abs(outside_full_sum_zero)){
abc_mcmc_result_vector[i]=0
}
else{
abc_mcmc_result_vector[i]=1
}
if(total_outside_sum_one1<total_outside_sum_zero1){
#if(abs(outside_full_sum_one)>abs(outside_full_sum_zero)){
abc_mcmc_result_vector1[i]=0
}
else{
abc_mcmc_result_vector1[i]=1
}
if(total_outside_sum_one2<total_outside_sum_zero2){
#if(abs(outside_full_sum_one)>abs(outside_full_sum_zero)){
abc_mcmc_result_vector2[i]=0
}
else{
abc_mcmc_result_vector2[i]=1
}
if(total_outside_sum_one3<total_outside_sum_zero3){
#if(abs(outside_full_sum_one)>abs(outside_full_sum_zero)){
abc_mcmc_result_vector3[i]=0
}
else{
abc_mcmc_result_vector3[i]=1
}
if(total_outside_sum_one4<total_outside_sum_zero4){
#if(abs(outside_full_sum_one)>abs(outside_full_sum_zero)){
abc_mcmc_result_vector4[i]=0
}
else{
abc_mcmc_result_vector4[i]=1
}
if(total_outside_sum_one5<total_outside_sum_zero5){
#if(abs(outside_full_sum_one)>abs(outside_full_sum_zero)){
abc_mcmc_result_vector5[i]=0
}
else{
abc_mcmc_result_vector5[i]=1
}
if(total_outside_sum_one6<total_outside_sum_zero6){
#if(abs(outside_full_sum_one)>abs(outside_full_sum_zero)){
abc_mcmc_result_vector6[i]=0
}
else{
abc_mcmc_result_vector6[i]=1
}
if(total_outside_sum_one7<total_outside_sum_zero7){
#if(abs(outside_full_sum_one)>abs(outside_full_sum_zero)){
abc_mcmc_result_vector7[i]=0
}
else{
abc_mcmc_result_vector7[i]=1
}
if(total_outside_sum_one8<total_outside_sum_zero8){
#if(abs(outside_full_sum_one)>abs(outside_full_sum_zero)){
abc_mcmc_result_vector8[i]=0
}
else{
abc_mcmc_result_vector8[i]=1
}
if(total_outside_sum_one9<total_outside_sum_zero9){
#if(abs(outside_full_sum_one)>abs(outside_full_sum_zero)){
abc_mcmc_result_vector9[i]=0
}
else{
abc_mcmc_result_vector9[i]=1
}
if(total_outside_sum_one10<total_outside_sum_zero10){
#if(abs(outside_full_sum_one)>abs(outside_full_sum_zero)){
abc_mcmc_result_vector10[i]=0
}
else{
abc_mcmc_result_vector10[i]=1
}
if(total_outside_sum_one11<total_outside_sum_zero11){
#if(abs(outside_full_sum_one)>abs(outside_full_sum_zero)){
abc_mcmc_result_vector11[i]=0
}
else{
abc_mcmc_result_vector11[i]=1
}
if(total_outside_sum_one12<total_outside_sum_zero12){
#if(abs(outside_full_sum_one)>abs(outside_full_sum_zero)){
abc_mcmc_result_vector12[i]=0
}
else{
abc_mcmc_result_vector12[i]=1
}
if(total_outside_sum_one13<total_outside_sum_zero13){
#if(abs(outside_full_sum_one)>abs(outside_full_sum_zero)){
abc_mcmc_result_vector13[i]=0
}
else{
abc_mcmc_result_vector13[i]=1
}
if(total_outside_sum_one14<total_outside_sum_zero14){
#if(abs(outside_full_sum_one)>abs(outside_full_sum_zero)){
abc_mcmc_result_vector14[i]=0
}
else{
abc_mcmc_result_vector14[i]=1
}
#print("time-2")
###print(proc.time() - ptm)
print(abc_mcmc_result_vector)
print("-----------1---------")
print(abc_mcmc_result_vector1)
print("-----------2---------")
print(abc_mcmc_result_vector2)
print("-----------3---------")
print(abc_mcmc_result_vector3)
print("-----------4---------")
print(abc_mcmc_result_vector4)
print("-----------5---------")
print(abc_mcmc_result_vector5)
print("-----------6---------")
print(abc_mcmc_result_vector6)
print("-----------7---------")
print(abc_mcmc_result_vector7)
print("-----------8---------")
print(abc_mcmc_result_vector8)
print("-----------9---------")
print(abc_mcmc_result_vector9)
print("-----------10---------")
print(abc_mcmc_result_vector10)
print("-----------11---------")
print(abc_mcmc_result_vector11)
print("-----------12---------")
print(abc_mcmc_result_vector12)
print("-----------13---------")
print(abc_mcmc_result_vector13)
print("-----------14---------")
print(abc_mcmc_result_vector14)
}
#print("------------------------")
#print("Fullll ABC-MCMC model done")
#print("------------------------")
table_data_abc_mcmc_0=table(abc_mcmc_result_vector,xlj_all_test[,Npro+1])
table_data_abc_mcmc_1=table(abc_mcmc_result_vector1,xlj_all_test[,Npro+1])
table_data_abc_mcmc_2=table(abc_mcmc_result_vector2,xlj_all_test[,Npro+1])
table_data_abc_mcmc_3=table(abc_mcmc_result_vector3,xlj_all_test[,Npro+1])
table_data_abc_mcmc_4=table(abc_mcmc_result_vector4,xlj_all_test[,Npro+1])
table_data_abc_mcmc_5=table(abc_mcmc_result_vector5,xlj_all_test[,Npro+1])
table_data_abc_mcmc_6=table(abc_mcmc_result_vector6,xlj_all_test[,Npro+1])
table_data_abc_mcmc_7=table(abc_mcmc_result_vector7,xlj_all_test[,Npro+1])
table_data_abc_mcmc_8=table(abc_mcmc_result_vector8,xlj_all_test[,Npro+1])
table_data_abc_mcmc_9=table(abc_mcmc_result_vector9,xlj_all_test[,Npro+1])
table_data_abc_mcmc_10=table(abc_mcmc_result_vector10,xlj_all_test[,Npro+1])
table_data_abc_mcmc_11=table(abc_mcmc_result_vector11,xlj_all_test[,Npro+1])
table_data_abc_mcmc_12=table(abc_mcmc_result_vector12,xlj_all_test[,Npro+1])
table_data_abc_mcmc_13=table(abc_mcmc_result_vector13,xlj_all_test[,Npro+1])
table_data_abc_mcmc_14=table(abc_mcmc_result_vector14,xlj_all_test[,Npro+1])
}
final_time=proc.time()
#print("K balahander")
#print(table_data_abc_mcmc_50)
abc_mcmc_result_vector_final1=rbind(abc_mcmc_result_vector,abc_mcmc_result_vector1,abc_mcmc_result_vector2,abc_mcmc_result_vector3)
abc_mcmc_result_vector_final2=rbind(abc_mcmc_result_vector4,abc_mcmc_result_vector5,abc_mcmc_result_vector6,abc_mcmc_result_vector7)
abc_mcmc_result_vector_final3=rbind(abc_mcmc_result_vector8,abc_mcmc_result_vector9,abc_mcmc_result_vector10,abc_mcmc_result_vector11)
abc_mcmc_result_vector_final4=rbind(abc_mcmc_result_vector12,abc_mcmc_result_vector13,abc_mcmc_result_vector14)
abc_mcmc_result_vector_final=rbind(abc_mcmc_result_vector_final1,abc_mcmc_result_vector_final2,abc_mcmc_result_vector_final3,abc_mcmc_result_vector_final4)
max_vector_return=function(abc_mcmc_result_vector123){
nrows=nrow(abc_mcmc_result_vector123)
abc_mcmc_result_vector_total=rep(2,ncol(abc_mcmc_result_vector123))
for(i in 1:ncol(abc_mcmc_result_vector123)){
print(sum(abc_mcmc_result_vector123[,i]))
if(sum(abc_mcmc_result_vector123[,i])>nrows/2)
{
abc_mcmc_result_vector_total[i]=1
}
else{
abc_mcmc_result_vector_total[i]=0
}
}
return(abc_mcmc_result_vector_total)
}
abc_mcmc_result_vector_total=max_vector_return(abc_mcmc_result_vector_final)
table_data_abc_mcmc_total=table(abc_mcmc_result_vector_total,xlj_all_test[,Npro+1])
print("------All tables-----")
print(table_data_lda_51)
print(table_data_knn_51)
print(table_data_abc_mcmc_total)
print("%%%%%")
print(proc.time()-time2)
| /ABC_MCMC_Bayesian_model_Esmeail_sun_paper.R | no_license | karshyap/Bayesian_methods_for-protenomics | R | false | false | 65,054 | r | ###Om ganeshaaya namaha##
#####Parameters used for generating the model###
##Control- class_0
##Treatement- class_1
#rm(list=ls())
#rm(.Random.seed, envir=globalenv())
#time_init=proc.time()
print("New-Code")
load("variables.RData")
set.seed( as.integer((as.double(Sys.time())*1000+Sys.getpid()) %% 2^31) )
norm_flag=0
#print(runif(n=100,min=0,max=100))
#####Please check with random seed###
#rm(list=ls())
#set.seed(101)
library(class)
library(boot)
library(MASS)
#library(caret)
#library(ISLR)
#library("Biostrings")
#setwd("/home/kashyap/Desktop/Masters_thesis_related/Codes")
time2=proc.time()
normalize=function(x){return(x/max(x))}
t_test_vals=function(c_pro_all)
{
t_test_value_pro=numeric((length(c_pro_all[1,])-1))
len_t_test_pro=length(t_test_value_pro)
for(i in 1:length(t_test_value_pro)){
k1=t.test(c_pro_all[,i]~c_pro_all[,len_t_test_pro+1])
t_test_value_pro[i]=abs(k1$statistic)
}
return(t_test_value_pro)
}
clrscr<-function(){
for(i in 1:100) {cat("\n")}
}
diff_elem_removal=function(x,y){
##X is an array from which the elements which are there in y should be removed
remov_array=rep(0,(length(x)+100))
count=0
for(i in 1:length(x)){
match_flag=0
for(j in 1:length(y)){
if(x[i]==y[j]){
match_flag=1
}
}
if(match_flag){
count=count+1
remov_array[count]=i
}
}
remov_array=remov_array[remov_array!=0]
return(x[-remov_array])
}
diff_elem_removal_total=function(p1,q1)
{
#Here p1 is the protein to peptide list vector
#q1 is the proteins selected by t-test
r1=list()
count=0
for(i in 1:length(p1)){
k1=p1[[i]]
v1=setdiff(k1,q1)
#print(k1)
#print(v1)
if(length(v1)){
l1=diff_elem_removal(k1,v1)
#print(l1)
#print("----------------")
if(length(l1)>0){
#if(1){
count=count+1
r1[[count]]=l1
}
}
else{
count=count+1
r1[[count]]=k1
}
}
return(r1)
}
#####This function is to reduce the prot_pept_list from random proteins to the ascending stuff ######
prot_pept_list_reduction=function(r1){
max_elem=0
count_2=0
prot_uniq_array=rep(0,100000)
for(i in 1:length(r1)){
for(j in 1:length(r1[[i]])){
count_2=count_2+1
prot_uniq_array[count_2]=r1[[i]][j]
}
}
r2=r1
prot_uniq_array1=prot_uniq_array[prot_uniq_array>0]
prot_uniq_array1=unique(prot_uniq_array1)
uniq_prots=1:length(prot_uniq_array1)
for(i in 1:length(r1)){
for(j in 1:length(r1[[i]])){
for(k in 1:length(uniq_prots)){
if(r1[[i]][j]==prot_uniq_array1[k]){
r2[[i]][j]=uniq_prots[k]
}
}
}
}
return(r2)
}
#####Reduction of prot_pept_list ends######
######Function for the protein to peptide list starts####
euc_norm <- function(x) sqrt(sum(x^2))
if(1){
pro_trt_file="prot_file1.fasta"
pep_trt_file="pept_file1.fasta"
}
pro_cntrl_file="prot_file1.fasta"
pep_cntrl_file="pept_file1.fasta"
if(1){
Npro_factor=0.03
#Npro_trt=fasta_file_to_array_conv(pro_trt_file,pep_trt_file,"pro")
#Npro_cntrl=fasta_file_to_array_conv(pro_cntrl_file,pep_cntrl_file,"pro")
Npro_for_analysis=ceiling(Npro_factor*Npro_trt)
#Npep_trt=fasta_file_to_array_conv(pro_trt_file,pep_trt_file,"pep")
#Npep_cntrl=fasta_file_to_array_conv(pro_cntrl_file,pep_cntrl_file,"pep")
}
noise_factor_gauss=1
noise_factor_exp=0
Npro=Npro_trt
#Npro_trt_test=60
#Npro_cntrl_test=60
#Npep_trt=ceiling(15*Npro_trt)
#Npep_cntrl=ceiling(15*Npro_cntrl)
Npep=Npep_trt
theta_a=100
thetac_a=100
thetaa_a=10000000
number_of_samples_train_cntrl=10 #number of samples in each class
number_of_samples_train_trt=10
number_of_samples_test_cntrl=10 #number of test samples fr control
number_of_samples_test_trt=10 #number of test samples fr treatement
amin=1.5
amax=1.6
al=1.55
fold_change=function(flag,amin,amax){ ##Please give amin and amax in just two decimal points.
x1=runif(1,amin,amax)
if(flag){
return(x1)
}
else{
return(1/x1)
}
}
euc_norm <- function(x) sqrt(sum(x^2))
M_cal=20 #Number of calibarations to be made in abc rejection algo.
#Generating the synthetic sample data S0####
###Only control sample used#####
###initial parameters as used in table 2
ka0=5
kc0=2
thetac0=100
thetaa0=10000000
theta0=100
phi0=0.4
al0=1.55
gamma_dist_a0=rgamma(1000,shape=ka0,scale=thetaa0)
gamma_dist_c0=rgamma(1000,shape=kc0,scale=thetac0)
gamma_l0=numeric(Npro_cntrl)
Npro_cntrl_a=floor(0.85*Npro)
Npro_cntrl_c=Npro-Npro_cntrl_a
for(i in 1:(Npro_cntrl_c+Npro_cntrl_a)){
if(i<Npro_cntrl_c){
gamma_l0[i]=sample(gamma_dist_c0,1)
}
else{
gamma_l0[i]=sample(gamma_dist_a0,1)
}
}
mean_vec_cont=gamma_l0
mean_vec_cont0=gamma_l0
sig_matrix0=matrix(numeric(Npro*Npro),nrow=Npro)
sig_sq_vec0=phi0*mean_vec_cont*mean_vec_cont
for(i in 1:Npro){
sig_matrix0[i,i]=sig_sq_vec0[i]
}
c_pro_control0=mvrnorm(n=number_of_samples_train_cntrl,mu = mean_vec_cont,Sigma = sig_matrix0)
c_pro_treatement0=mvrnorm(n=number_of_samples_train_cntrl,mu = mean_vec_cont,Sigma = sig_matrix0)
if(norm_flag){
c_pro_control0[,Npro_cntrl_c:length(c_pro_control0[1,])]=runif(1,min=0,max=10e-6)*c_pro_control0[,Npro_cntrl_c:length(c_pro_control0[1,])]
c_pro_treatement0[,Npro_cntrl_c:length(c_pro_treatement0[1,])]=runif(1,min=0,max=10e-6)*c_pro_treatement0[,Npro_cntrl_c:length(c_pro_treatement0[1,])]
}
ratio_vec0=colMeans(c_pro_control0)/colMeans(c_pro_treatement0)
######For calculating the threshold for rejection sampling
k_rand=sample(160:240,1)/100
kc_rand=sample(160:240,1)/100
ka_rand=sample(400:600,1)/100
thetac_rand=sample(80:120,1)
thetaa_rand=sample(9e6:11e6,1)
phi_rand=sample(0.3:0.5,1)
gamma_distc_rand=rgamma(1000,shape=kc_rand,scale=thetac_rand)
gamma_dista_rand=rgamma(1000,shape=ka_rand,scale=thetaa_rand)
gamma_rand=numeric(Npro_cntrl)
for(i in 1:(Npro_cntrl_c+Npro_cntrl_a)){
if(i<Npro_cntrl_c){
gamma_rand[i]=sample(gamma_distc_rand,1)
}
else{
gamma_rand[i]=sample(gamma_dista_rand,1)
}
}
mean_vec_cont_rand=gamma_rand
sig_matrix_rand=matrix(numeric(Npro_cntrl*Npro_cntrl),nrow=Npro_cntrl)
sig_sq_vec_rand=phi_rand*mean_vec_cont_rand*mean_vec_cont_rand
for(i in 1:Npro_cntrl){
sig_matrix_rand[i,i]=sig_sq_vec_rand[i]
}
c_pro_control_rand=mvrnorm(n=number_of_samples_train_cntrl,mu = mean_vec_cont_rand,Sigma = sig_matrix_rand)
c_pro_treatement_rand=mvrnorm(n=number_of_samples_train_cntrl,mu = mean_vec_cont_rand,Sigma = sig_matrix_rand)
if(norm_flag){
c_pro_control_rand[,Npro_cntrl_c:length(c_pro_control_rand[1,])]=runif(1,min=0,max=10e-6)*c_pro_control_rand[,Npro_cntrl_c:length(c_pro_control_rand[1,])]
c_pro_treatement_rand[,Npro_cntrl_c:length(c_pro_treatement_rand[1,])]=runif(1,min=0,max=10e-6)*c_pro_treatement_rand[,Npro_cntrl_c:length(c_pro_treatement_rand[1,])]
}
diff_rand_vec_cntrl=colMeans(c_pro_control_rand)-colMeans(c_pro_control0)
thresh_key_cntrl=1*Npro_factor*euc_norm(diff_rand_vec_cntrl)
#print(rgamma(100,shape=k_rand,scale=theta_rand))
#print(thresh_key_cntrl)
#print(Npro_factor)
while(0){
x=1
}
print("ganganna")
thresh_key_cntrl=1*euc_norm(diff_rand_vec_cntrl)
fold_change_vec0=numeric(Npro)
print("goofy")
for(i in 1:Npro){
if(ratio_vec0[i]>1)
{
fold_change_vec0[i]=fold_change(1,al0,al0)
}
else{
fold_change_vec0[i]=fold_change(0,al0,al0)
}
}
mean_vec_treatement0=mean_vec_cont0*fold_change_vec0
mean_vec_treatement_rand=mean_vec_cont_rand*fold_change_vec0
c_pro_treatement0=mvrnorm(n=number_of_samples_train_trt,mu = mean_vec_treatement0,Sigma = sig_matrix0)
c_pro_treatement_rand=mvrnorm(n=number_of_samples_train_trt,mu = mean_vec_treatement_rand,Sigma = sig_matrix_rand)
if(norm_flag){
c_pro_treatement0[,Npro_cntrl_c:length(c_pro_treatement0[1,])]=runif(1,min=0,max=10e-6)*c_pro_treatement0[,Npro_cntrl_c:length(c_pro_treatement0[1,])]
c_pro_treatement_rand[,Npro_cntrl_c:length(c_pro_treatement_rand[1,])]=runif(1,min=0,max=10e-6)*c_pro_treatement_rand[,Npro_cntrl_c:length(c_pro_treatement_rand[1,])]
}
diff_rand_vec_trt=colMeans(c_pro_treatement_rand)-colMeans(c_pro_treatement0)
thresh_key_trt=euc_norm(diff_rand_vec_trt)
thresh_key_cntrl=euc_norm(diff_rand_vec_cntrl)
print("India")
k_list=list()
theta_list=list()
phi_list=list()
thetaa_list=list()
thetac_list=list()
ka_list=list()
kc_list=list()
count_1=0
###Synthetic sample data S0 done#####
###ABC-Rejection Sampling#########
norm_array=numeric(M_cal)
for(j in 1:M_cal){
k=sample(160:240,1)/100
theta=sample(800:1200,1)
kc=sample(160:240,1)/100
ka=sample(450:550,1)/100
thetac=sample(80:120,1)
thetaa=sample(9e6:11e6,1)
#gamma_dist=rgamma(1000,shape=k,scale=theta_a)
gamma_dist_c=rgamma(1000,shape = kc,scale = thetac)
gamma_dist_a=rgamma(1000,shape = ka,scale = thetaa)
gamma_l=numeric(Npro_cntrl)
#for(i in 1:Npro_cntrl){
# gamma_l[i]=sample(gamma_dist,1)
#}
for(i in 1:(Npro_cntrl_c+Npro_cntrl_a)){
if(i<Npro_cntrl_c){
gamma_l[i]=sample(gamma_dist_c,1)
}
else{
gamma_l[i]=sample(gamma_dist_a,1)
}
}
mean_vec_cont=gamma_l
phi=runif(Npro,0.3,0.5) ###Defining the coefficient of variation
sig_matrix=matrix(numeric(Npro_cntrl*Npro_cntrl),nrow=Npro_cntrl)
sig_sq_vec=phi*mean_vec_cont*mean_vec_cont
for(i in 1:Npro_cntrl){
sig_matrix[i,i]=sig_sq_vec[i]
}
c_pro_control=mvrnorm(n=number_of_samples_train_cntrl,mu = mean_vec_cont,Sigma = sig_matrix)
if(norm_flag){
c_pro_control[,Npro_cntrl_c:length(c_pro_control[1,])]=runif(1,min=0,max=10e-6)*c_pro_control[,Npro_cntrl_c:length(c_pro_control[1,])]
}
diff_vec=colMeans(c_pro_control)-colMeans(c_pro_control0)
norm_array[j]=euc_norm(diff_vec)
# print(euc_norm(diff_vec))
#thresh_key=2*(median(norm_array)+min(norm_array))/3
if(norm_array[j]<thresh_key_cntrl){
#print(diff_vec)
count_1=count_1+1
k_list[count_1]=k
theta_list[count_1]=theta
phi_list[count_1]=phi
ka_list[count_1]=ka
kc_list[count_1]=kc
thetaa_list[count_1]=thetaa
thetac_list[count_1]=thetac
}
}
k_vec=theta_vec=phi_vec=numeric(length(k_list))
ka_vec=kc_vec=thetaa_vec=thetac_vec=phi_vec=numeric(length(ka_list))
for(i in 1:length(k_vec)){
k_vec[i]=k_list[[i]]
theta_vec[i]=theta_list[[i]]
phi_vec[i]=phi_list[[i]]
}
print("french")
for(i in 1:length(ka_vec)){
ka_vec[i]=ka_list[[i]]
kc_vec[i]=kc_list[[i]]
thetaa_vec[i]=thetaa_list[[i]]
thetac_vec[i]=thetac_list[[i]]
}
k_opt=mean(k_vec)
theta_opt=mean(theta_vec)
phi_opt=mean(phi_vec)
ka_opt=mean(ka_vec)
kc_opt=mean(kc_vec)
thetaa_opt=mean(thetaa_vec)
thetac_opt=mean(thetac_vec)
#############ABC Rejection sampling done#######
##### Data from optimal parameters being generated#######
print("glucose")
gamma_dist_opt=rgamma(1000,shape=k_opt,scale=theta_opt)
gamma_dist_a_opt=rgamma(1000,shape=ka_opt,scale=thetaa_opt)
gamma_dist_c_opt=rgamma(1000,shape=kc_opt,scale=thetac_opt)
gamma_l_opt=numeric(Npro_cntrl)
for(i in 1:(Npro_cntrl_c+Npro_cntrl_a)){
if(i<=Npro_cntrl_c){
gamma_l_opt[i]=sample(gamma_dist_c_opt,1)
}
else{
gamma_l_opt[i]=sample(gamma_dist_a_opt,1)
}
}
mean_vec_cont_opt=gamma_l_opt
sig_matrix_opt=matrix(numeric(Npro*Npro),nrow=Npro)
sig_sq_vec_opt=phi_opt*mean_vec_cont_opt*mean_vec_cont_opt
for(i in 1:Npro_cntrl){
sig_matrix_opt[i,i]=sig_sq_vec_opt[i]
}
#####Generating dummy data till i understand the protein file###
flag_vec=numeric(Npro)
for(i in 1:Npro)
{
flag_vec[i]=sample(0:1,1)
}
####flag_vec is used to say if the protein is overexpressed or not #####
mean_vec_treatement1=gamma_l_opt
fold_change_vec=numeric(Npro)
for(i in 1:Npro){
fold_change_vec[i]=fold_change(flag_vec[i],amin,amax)
}
ratio_vec=colMeans(c_pro_treatement0)/colMeans(c_pro_control0)
for(i in 1:Npro){
if(ratio_vec[i]>1)
{
fold_change_vec[i]=fold_change(1,amin,amax)
}
else{
fold_change_vec[i]=fold_change(0,amin,amax)
}
}
fold_change_vec=ratio_vec
mean_vec_treatement=mean_vec_treatement1*fold_change_vec
####Generating random peptide control data###
c_pro_control=mvrnorm(n=number_of_samples_train_cntrl,mu = mean_vec_cont_opt,Sigma = sig_matrix_opt)
c_pro_treatement=mvrnorm(n=number_of_samples_train_trt,mu = mean_vec_treatement,Sigma = sig_matrix_opt)
if(norm_flag){
c_pro_control[,Npro_cntrl_c:length(c_pro_control[1,])]=runif(1,min=0,max=10e-6)*c_pro_control[,Npro_cntrl_c:length(c_pro_control[1,])]
c_pro_treatement[,Npro_cntrl_c:length(c_pro_treatement[1,])]=runif(1,min=0,max=10e-6)*c_pro_treatement[,Npro_cntrl_c:length(c_pro_treatement[1,])]
}
l1=c_pro_control
l1=data.frame(cbind(l1,rep(0,number_of_samples_train_cntrl)))
colnames(l1)[Npro+1]="type"
l2=c_pro_treatement
l2=data.frame(cbind(l2,rep(1,number_of_samples_train_trt)))
colnames(l2)[Npro+1]="type"
#colnames(xlj_control)[Npro+1]="type"
c_pro_all=rbind(l1,l2)
c_pro_all$type=as.factor(c_pro_all$type)
t_test_value_pro=numeric((length(c_pro_all[1,])-1))
len_t_test_pro=length(t_test_value_pro)
for(i in 1:length(t_test_value_pro)){
k1=t.test(c_pro_all[,i]~c_pro_all[,len_t_test_pro+1])
t_test_value_pro[i]=abs(k1$statistic)
}
mean_vec_cont_opt=mean_vec_cont_opt[order(t_test_value_pro,decreasing = TRUE)]
mean_vec_cont_opt=mean_vec_cont_opt[1:Npro_for_analysis]
fold_change_vec=fold_change_vec[order(t_test_value_pro,decreasing = TRUE)]
fold_change_vec=fold_change_vec[1:Npro_for_analysis]
pro_pept_listed=1:Npro_cntrl
pro_list=pro_pept_listed[order(t_test_value_pro,decreasing=TRUE)][1:Npro_for_analysis]
Npro=Npro_for_analysis
Npro_trt=Npro
Npro_cntrl=Npro
Npro_cntrl_a=floor(0.85*Npro)
Npro_cntrl_c=Npro-Npro_cntrl_a
Npep_trt=ceiling(1.5*Npro_trt)
Npep_cntrl=ceiling(1.5*Npro_cntrl)
Npep=Npep_trt
sig_matrix_opt=matrix(numeric(Npro*Npro),nrow=Npro)
sig_sq_vec_opt=phi_opt*mean_vec_cont_opt*mean_vec_cont_opt
###########prot_pept_file start#######
#prot_pept_list_cntrl=fasta_file_to_array_conv(pro_cntrl_file,pep_cntrl_file,1)
#prot_pept_list_trt=fasta_file_to_array_conv(pro_trt_file,pep_trt_file,1)
prot_pept_list_cntrl_new1=diff_elem_removal_total(prot_pept_list_cntrl,pro_list)
prot_pept_list_cntrl_new=prot_pept_list_reduction(prot_pept_list_cntrl_new1)
prot_pept_list_cntrl=prot_pept_list_cntrl_new
prot_pept_list_trt=prot_pept_list_cntrl
#print(prot_pept_list_cntrl)
Npep_cntrl=length(prot_pept_list_cntrl)
Npep_trt=Npep_cntrl
Npep=Npep_cntrl
#######Prot_pept_file_end############
for(i in 1:Npro){
sig_matrix_opt[i,i]=sig_sq_vec_opt[i]
}
#xlj_trt=data.frame(cbind(xlj_trt,rep(1,number_of_samples_train_trt)))
#print("gulshan")
mean_vec_treatement=mean_vec_cont_opt*fold_change_vec
#####For threshold generation####
if(1){
k_rand=sample(160:240,1)/100
kc_rand=sample(160:240,1)/100
ka_rand=sample(400:600,1)/100
thetac_rand=sample(80:120,1)
thetaa_rand=sample(9e6:11e6,1)
phi_rand=sample(0.3:0.5,1)
gamma_distc_rand=rgamma(1000,shape=kc_rand,scale=thetac_rand)
gamma_dista_rand=rgamma(1000,shape=ka_rand,scale=thetaa_rand)
gamma_rand=numeric(Npro_cntrl)
for(i in 1:(Npro_cntrl_c+Npro_cntrl_a)){
if(i<Npro_cntrl_c){
gamma_rand[i]=sample(gamma_distc_rand,1)
}
else{
gamma_rand[i]=sample(gamma_dista_rand,1)
}
}
mean_vec_cont_rand=gamma_rand
sig_matrix_rand=matrix(numeric(Npro_cntrl*Npro_cntrl),nrow=Npro_cntrl)
sig_sq_vec_rand=phi_rand*mean_vec_cont_rand*mean_vec_cont_rand
for(i in 1:Npro){
sig_matrix_rand[i,i]=sig_sq_vec_rand[i]
}
mean_vec_treatement_rand=mean_vec_cont_rand*fold_change_vec
#print(mean_vec_treatement_rand)
c_pro_control_rand=mvrnorm(n=number_of_samples_train_cntrl,mu = mean_vec_cont_rand,Sigma = sig_matrix_rand)
c_pro_treatement_rand=mvrnorm(n=number_of_samples_test_trt,mu = mean_vec_treatement_rand,Sigma = sig_matrix_rand)
if(norm_flag){
c_pro_control_rand[,Npro_cntrl_c:length(c_pro_control_rand[1,])]=runif(1,min=0,max=10e-6)*c_pro_control_rand[,Npro_cntrl_c:length(c_pro_control_rand[1,])]
c_pro_treatement_rand[,Npro_cntrl_c:length(c_pro_treatement_rand[1,])]=runif(1,min=0,max=10e-6)*c_pro_treatement_rand[,Npro_cntrl_c:length(c_pro_treatement_rand[1,])]
}
diff_rand_vec_cntrl=colMeans(c_pro_control_rand)-colMeans(c_pro_control0)
print(mean_vec_cont_rand)
thresh_key_cntrl=euc_norm(diff_rand_vec_cntrl)
print("lellina")
#print(thresh_key_cntrl)
}
#####Peptide generating function#####
peptide_generation1=function(mean_vec_cont_opt,fold_change_vec,phi_opt,Npro_cntrl,Npro_trt,prot_pept_list_cntrl,Npro_for_analysis,
prot_pept_list_cntrl_test,prot_pept_list_trt,prot_pept_list_trt_test,train_test_flag,number_of_samples_train_cntrl,number_of_samples_train_trt)
{
time1=proc.time()
#print(fold_change_vec)
#print(number_of_samples_train_cntrl)
#print("lellina23")
#print(thresh_key_cntrl)
mean_vec_treatement=mean_vec_cont_opt*fold_change_vec
#print("missile")
#print(mean_vec_treatement)
c_pro_control=mvrnorm(n=number_of_samples_train_cntrl,mu = mean_vec_cont_opt,Sigma = sig_matrix_opt)
c_pro_treatement=mvrnorm(n=number_of_samples_train_trt,mu = mean_vec_treatement,Sigma = sig_matrix_opt)
#print("gokarna")
#print(length(c_pro_treatement[1,]))
#print(c_pro_treatement[,Npro_cntrl_c:length(c_pro_treatement[1,])])
#print("MOOCS")
if(norm_flag){
c_pro_control[,Npro_cntrl_c:length(c_pro_control[1,])]=runif(1,min=0,max=10e-6)*c_pro_control[,Npro_cntrl_c:length(c_pro_control[1,])]
c_pro_treatement[,Npro_cntrl_c:length(c_pro_treatement[1,])]=runif(1,min=0,max=10e-6)*c_pro_treatement[,Npro_cntrl_c:length(c_pro_treatement[1,])]
}
c_pep_control=matrix(numeric(Npep_cntrl*number_of_samples_train_cntrl),nrow = number_of_samples_train_cntrl)
c_pep_treatement=matrix(numeric(Npep_trt*number_of_samples_train_trt),nrow = number_of_samples_train_trt)
max_length_train=max(length(c_pep_control[1,]),length(c_pep_treatement[1,]))
for(j in 1:max_length_train){
#p3=sample(1:length(c_pro_control[1,]),sample(1:3,1))
#prot_pept_list_cntrl[[j]]=
p3=prot_pept_list_cntrl[[j]]
for(i in 1:length(c_pep_control[,1])){
#cat(i,"-",j,"-",p3,"\n")
for(k in 1:length(p3)){
c_pep_control[i,j]=c_pep_control[i,j]+c_pro_control[i,p3[k]]
c_pep_treatement[i,j]=c_pep_treatement[i,j]+c_pro_treatement[i,p3[k]]
}
}
}
#print(c_pep_control)
###print("grain")
####Generating random peptide treatement data###
checpoint_flag=0
###print("am out of this loop")
if(checpoint_flag){
print("checkpoint1")
print(proc.time()-time1)
}
mu_matrix_cntrl=matrix(numeric(Npep_cntrl*number_of_samples_train_cntrl),nrow = number_of_samples_train_cntrl)
mu_matrix_trt=matrix(numeric(Npep_trt*number_of_samples_train_trt),nrow = number_of_samples_train_trt)
kappa=5
efficiency_vector=runif(Npep,0.1,1)
for(i in 1:length(mu_matrix_cntrl[,1])){
for(j in 1:length(mu_matrix_cntrl[1,])){
mu_matrix_cntrl[i,j]=c_pep_control[i,j]*kappa*efficiency_vector[j]
mu_matrix_trt[i,j]=c_pep_treatement[i,j]*kappa*efficiency_vector[j]
}
}
if(checpoint_flag){
print("checkpoint2")
print(proc.time()-time1)
}
###print("ginger")
alpha=0.03
beta=3.6
var_vector_noisy_gaussian_cntrl=alpha*(mu_matrix_cntrl^2)+beta*mu_matrix_cntrl
var_vector_noisy_gaussian_trt=alpha*(mu_matrix_trt^2)+beta*mu_matrix_trt
total_vector_cntrl=matrix(numeric(Npep_cntrl*number_of_samples_train_cntrl),nrow = number_of_samples_train_cntrl)
total_vector_trt=matrix(numeric(Npep_cntrl*number_of_samples_train_trt),nrow = number_of_samples_train_trt)
if(checpoint_flag){
print("checkpoint2-a")
print(proc.time()-time1)
}
p1=matrix(numeric(Npep_cntrl*number_of_samples_train_cntrl),nrow = number_of_samples_train_cntrl)
p2=p1
if(0){
for(i in 1:length(total_vector_cntrl[,1])){
for(j in 1:length(total_vector_cntrl[1,])){
p1[i,j]=mvrnorm(n=1,mu = 0, Sigma = abs(var_vector_noisy_gaussian_cntrl[i,j]))
p2[i,j]=mvrnorm(n=1,mu = 0, Sigma = abs(var_vector_noisy_gaussian_cntrl[i,j]))
}
}
total_vector_cntrl1=mu_matrix_cntrl+p1
total_vector_trt1=mu_matrix_trt+p2
}
if(checpoint_flag){
print("checkpoint2-a1")
print(proc.time()-time1)
}
for(i in 1:length(total_vector_cntrl[,1])){
#print("hello-1")
#print(length(total_vector_cntrl[1,]))
#print(length(total_vector_cntrl[,1]))
#print("hello-2")
for(j in 1:length(total_vector_cntrl[1,])){
#cat(i,j,"\n")
total_vector_cntrl[i,j]=mu_matrix_cntrl[i,j]+noise_factor_gauss*mvrnorm(n=1,mu = 0, Sigma = abs(var_vector_noisy_gaussian_cntrl[i,j]))+noise_factor_exp*rexp(1,rate = abs(mu_matrix_cntrl[i,j]))
total_vector_trt[i,j]=mu_matrix_trt[i,j]+noise_factor_gauss*mvrnorm(n=1,mu = 0, Sigma = abs(var_vector_noisy_gaussian_trt[i,j]))+noise_factor_exp*rexp(1,rate = abs(mu_matrix_trt[i,j]))#+
#total_vector_cntrl[i,j]=mu_matrix_cntrl[i,j]+noise_factor*mvrnorm(n=1,mu = 0, Sigma = abs(var_vector_noisy_gaussian_cntrl[i,j]))
#total_vector_trt[i,j]=mu_matrix_trt[i,j]+noise_factor*mvrnorm(n=1,mu = 0, Sigma = abs(var_vector_noisy_gaussian_trt[i,j]))
#total_vector_cntrl[,j]=mu_matrix_cntrl[,j]+noise_factor*mvrnorm(n=1,mu = 0, Sigma = abs(var_vector_noisy_gaussian_cntrl[1,j]))
#total_vector_trt[,j]=mu_matrix_trt[,j]+noise_factor*mvrnorm(n=1,mu = 0, Sigma = abs(var_vector_noisy_gaussian_trt[1,j]))
#total_vector_cntrl[i,]=mu_matrix_cntrl[i,]+noise_factor*mvrnorm(n=1,mu = 0, Sigma = abs(var_vector_noisy_gaussian_cntrl[i,1]))
#total_vector_trt[i,]=mu_matrix_trt[i,]+noise_factor*mvrnorm(n=1,mu = 0, Sigma = abs(var_vector_noisy_gaussian_trt[i,1]))
}
}
if(checpoint_flag){
print("checkpoint2-b")
print(proc.time()-time1)
}
#total_vector_cntrl=data.frame(cbind(total_vector_cntrl,numeric(50)))
#total_vector_trt=data.frame(cbind(total_vector_trt,numeric(50)+1))
#total_vector_cntrl$X31=factor(total_vector_cntrl$X31)
#total_vector_trt$X31=factor(total_vector_trt$X31)
###To calculate rolled up abundances###
#lolol
x_pro_control=matrix(numeric(Npro_cntrl*number_of_samples_train_cntrl),nrow = number_of_samples_train_cntrl)
x_pro_trt=matrix(numeric(Npro_trt*number_of_samples_train_trt),nrow = number_of_samples_train_trt)
pept_to_prot_cntrl=list()
pept_to_prot_trt=list()
if(checpoint_flag){
print("checkpoint3")
print(proc.time()-time1)
}
for(i in 1:length(prot_pept_list_cntrl)){
for(j in 1:length(prot_pept_list_cntrl[i][[1]])){
###print(prot_pept_list_cntrl[i][[1]][j])
k1=prot_pept_list_cntrl[i][[1]][j]
###print(k1)
pept_to_prot_cntrl[k1][[1]][length(pept_to_prot_cntrl[k1][[1]])+1]=i
pept_to_prot_trt[k1][[1]][length(pept_to_prot_trt[k1][[1]])+1]=i
}
}
#print("||||||||")
#print(prot_pept_list_trt)
#print("||||||||")
###print("greek2")
###print("greek1")
###print("greek3")
if(checpoint_flag){
print("checkpoint4")
print(proc.time()-time1)
}
xlj_control=matrix(numeric(Npro_cntrl*number_of_samples_train_cntrl),nrow = number_of_samples_train_cntrl)
xlj_trt=matrix(numeric(Npro_trt*number_of_samples_train_trt),nrow = number_of_samples_train_trt)
#print("bozer")
#print(total_vector_cntrl)
###############################
for(i in 1:length(pept_to_prot_cntrl)){
if(pept_to_prot_cntrl[2]=="NULL"){
pept_to_prot_cntrl[2]=pept_to_prot_cntrl[3]
}
if(pept_to_prot_cntrl[1]=="NULL"){
pept_to_prot_cntrl[1]=pept_to_prot_cntrl[2]
}
if(pept_to_prot_cntrl[i]=="NULL"){
pept_to_prot_cntrl[i]=pept_to_prot_cntrl[1]
}
}
pept_to_prot_trt=pept_to_prot_cntrl
############################
#print("fine till here")
for(j in 1:length(xlj_control[1,])){ #for all proteins
for(i in 1:length(xlj_control[,1])) { #for all samples
for(k in 1:length(pept_to_prot_cntrl[j][[1]])){
p4=pept_to_prot_cntrl[j][[1]][k]
#print(p4)
if(is.null(p4)){
p4=pept_to_prot_cntrl[1][[1]][1]
}
xlj_control[i,j]=xlj_control[i,j]+total_vector_cntrl[i,p4]
xlj_control[i,j]=xlj_control[i,j]/length(p4)
xlj_trt[i,j]=xlj_trt[i,j]+total_vector_trt[i,p4]
xlj_trt[i,j]=xlj_trt[i,j]/length(p4)
}
}
}
# print("bozer")
#print(xlj_trt)
###print("greekza")
########
if(checpoint_flag){
print("checkpoint5")
print(proc.time()-time1)
}
xlj_control=data.frame(xlj_control)
xlj_control=data.frame(cbind(xlj_control,numeric(number_of_samples_train_cntrl)))
colnames(xlj_control)[Npro+1]="type"
#xlj_control$type=as.factor(xlj_control$type)
xlj_trt=data.frame(xlj_trt)
xlj_trt=data.frame(cbind(xlj_trt,rep(1,number_of_samples_train_trt)))
colnames(xlj_trt)[Npro+1]="type"
#print("briggy")
#print(xlj_control_test)
#xlj_control$"type"=numeric(50)
#xlj_trt$"type"=(numeric(number_of_samples_train_trt)+1)
if(1){
xlj_control=data.frame(xlj_control)
xlj_trt=data.frame(xlj_trt)
xlj_all=rbind(xlj_control,xlj_trt)
xlj_all$type=as.factor(xlj_all$type)
}
t_test_value=numeric((length(xlj_all[1,])-1))
len_t_test=length(t_test_value)
xlj_all_orig=xlj_all
p11=xlj_all$type
#p22=xlj_all_test$type
Npro=Npro_for_analysis
Npro_trt=Npro
Npro_cntrl=Npro
#print("kikiki")
#print(xlj_all_test)
if(checpoint_flag){
print("checkpoint6")
print(proc.time()-time1)
}
if(train_test_flag){
return(xlj_all)}
}
####xlj_all is the required data as per equation 12###
####xlj_all_test is the required test data####
if(1){
xlj_all=peptide_generation1(mean_vec_cont_opt,fold_change_vec,phi_opt,Npro_cntrl,Npro_trt,prot_pept_list_cntrl,Npro_for_analysis,
prot_pept_list_cntrl_test,prot_pept_list_trt,prot_pept_list_trt_test,train_test_flag=1,number_of_samples_train_cntrl,number_of_samples_train_trt)
xlj_all_test=peptide_generation1(mean_vec_cont_opt,fold_change_vec,phi_opt,Npro_cntrl,Npro_trt,prot_pept_list_cntrl,Npro_for_analysis,
prot_pept_list_cntrl_test,prot_pept_list_trt,prot_pept_list_trt_test,train_test_flag=1,number_of_samples_test_cntrl,number_of_samples_test_trt)
#xlj_all_test= xlj_all_test[sample(nrow(xlj_all_test)),]
}
#1. LDA classifier#
####Add the mean and the gaussian vector####
#SNR=1/(alpha+(beta/alpha))
##print("MCMC algorithm starting")
#######Upamanyu's algorithm-3...The ABC-MCMC-algorithm#####
#####First three steps of ABC-MCMC#####
###gamma_0 is the gamma related to S^(0)_(0) in the paper. This is generated with k_opt and theta_opt
###gamma_dist_0 is the proper gamma distribution pertaining to S_(0) in the paper. Its generated with k_0 and theta_0.
#Step1: Sampling gamma and generating mean vectors##
gamma_dist_0=rgamma(100,shape=k_opt,scale=theta_opt)
gamma_dist_a_0=rgamma(1000,shape=ka_opt,scale=thetaa_opt)
gamma_dist_c_0=rgamma(1000,shape=kc_opt,scale=thetac_opt)
c_pro_control_0=c_pro_control0
c_pro_treatment_0=c_pro_treatement0
gamma_0=numeric(Npro)
for(i in 1:(Npro_cntrl_c+Npro_cntrl_a)){
if(i<=Npro_cntrl_c){
gamma_0[i]=sample(gamma_dist_c_0,1)
}
else{
gamma_0[i]=sample(gamma_dist_a_0,1)
}
}
mean_vec_cntrl_0_0=gamma_0
mean_vec_trt_0_0=gamma_0*fold_change_vec
sig_matrix_0_0=matrix(numeric(Npro*Npro),nrow=Npro)
sig_sq_vec_0_0=phi_opt*mean_vec_cntrl_0_0*mean_vec_cntrl_0_0
for(i in 1:Npro){
sig_matrix_0_0[i,i]=sig_sq_vec_0_0[i]
}
###Step2: Generating protein data
c_pro_control_0_0=mvrnorm(n=number_of_samples_train_cntrl,mu = mean_vec_cntrl_0_0,Sigma = sig_matrix_0_0)
c_pro_treatment_0_0=mvrnorm(n=number_of_samples_train_trt,mu = mean_vec_trt_0_0,Sigma = sig_matrix_0_0)
#print("goi")
diff_vec_cntrl=colMeans(c_pro_control_0_0)-colMeans(c_pro_control_0)
diff_vec_trt=colMeans(c_pro_treatment_0_0)-colMeans(c_pro_treatment_0)
norm_cntrl=euc_norm(diff_vec_cntrl)
norm_trt=euc_norm(diff_vec_trt)
print("here maxhappan")
print(gamma_0)
while(norm_cntrl>2*thresh_key_cntrl | norm_trt>2*thresh_key_cntrl){
#print("i am here")
# print(norm_cntrl)
#print(norm_trt)
#print(0.8*thresh_key_cntrl)
#print(0.8*thresh_key_trt)
#print("-----------")
for(i in 1:(Npro_cntrl_c+Npro_cntrl_a)){
if(i<=Npro_cntrl_c){
gamma_0[i]=sample(gamma_dist_c_0,1)
}
else{
gamma_0[i]=sample(gamma_dist_a_0,1)
}
}
mean_vec_cntrl_0_0=gamma_0
mean_vec_trt_0_0=gamma_0*fold_change_vec
sig_matrix_0_0=matrix(numeric(Npro*Npro),nrow=Npro)
sig_sq_vec_0_0=phi_opt*mean_vec_cntrl_0_0*mean_vec_cntrl_0_0
for(i in 1:Npro){
sig_matrix_0_0[i,i]=sig_sq_vec_0_0[i]
}
mean_vec_cntrl_0_0=mean_vec_cont_opt
#print("DISHA")
mean_vec_trt_0_0=mean_vec_cont_opt*fold_change_vec
###Step2: Generating protein data
c_pro_control_0_0=mvrnorm(n=number_of_samples_train_cntrl,mu = mean_vec_cntrl_0_0,Sigma = sig_matrix_0_0)
c_pro_treatment_0_0=mvrnorm(n=number_of_samples_train_trt,mu = mean_vec_trt_0_0,Sigma = sig_matrix_0_0)
diff_vec_cntrl=colMeans(c_pro_control_0_0)-colMeans(c_pro_control_0)
diff_vec_trt=colMeans(c_pro_treatment_0_0)-colMeans(c_pro_treatment_0)
# print("hig")
norm_cntrl=euc_norm(diff_vec_cntrl)
#print(norm_cntrl)
norm_trt=euc_norm(diff_vec_trt)
#print(norm_trt)
#print("kig")
}
#####Step 3 is just an if condition which is taken care at the begining
#####Steps 5, 6 and 7 of the markov chain####
c_pro_control_markov=c_pro_control_0_0
c_pro_trt_markov=c_pro_treatment_0_0
gamma_vec=colMeans(c_pro_control_markov)
#####Main MCMC chain#####
count_markov=0
markov_iter=10000
gamma_markov=matrix(0L, nrow = markov_iter,ncol=Npro)
for(j in 1:markov_iter){
gamma_vec=colMeans(c_pro_control_markov)
mean_vec_cntrl_markov=gamma_vec
mean_vec_trt_markov=gamma_vec*fold_change_vec
###print(i)
sig_matrix_markov=matrix(numeric(Npro*Npro),nrow=Npro)
sig_sq_vec_markov=phi_opt*mean_vec_cntrl_markov*mean_vec_cntrl_markov
sig_sq_vec_markov=rep(0.1,length(mean_vec_cont))
for(i in 1:Npro){
sig_matrix_markov[i,i]=sig_sq_vec_markov[i]
}
c_pro_control_markov=mvrnorm(n=number_of_samples_train_cntrl,mu = mean_vec_cntrl_markov,Sigma = sig_matrix_markov)
c_pro_trt_markov=mvrnorm(n=number_of_samples_train_trt,mu = mean_vec_trt_markov,Sigma = sig_matrix_markov)
diff_vec_cntrl_markov=colMeans(c_pro_control_markov)-colMeans(c_pro_control_0)
diff_vec_trt_markov=colMeans(c_pro_trt_markov)-colMeans(c_pro_treatment_0)
norm_cntrl_markov=euc_norm(diff_vec_cntrl_markov)
norm_trt_markov=euc_norm(diff_vec_trt_markov)
###print("-----------")
###print(norm_cntrl_markov)
###print(norm_trt_markov)
###print("-----------")
if(norm_cntrl_markov<0.6*thresh_key_cntrl & norm_trt_markov<0.6*thresh_key_cntrl){
#mean_ratio=
gamma_vec=colMeans(c_pro_control_markov)
###print(j)
count_markov=count_markov+1
}
gamma_markov[j,]=gamma_vec
}
print("MCMC algorithm done")
######Algorithm-3 in upamanyu paper done#####
#####Generation of kernel data and then classification###
#print("running the function")
#print(gamma_vec)
markov_Data=peptide_generation1(mean_vec_cont_opt,fold_change_vec,phi_opt,Npro_cntrl,Npro_trt,prot_pept_list_cntrl,Npro_for_analysis,
prot_pept_list_cntrl_test,prot_pept_list_trt,prot_pept_list_trt_test,train_test_flag=1,number_of_samples_train_cntrl,number_of_samples_train_trt)
print("mrinal")
markov_Data_test=peptide_generation1(mean_vec_cont_opt,fold_change_vec,phi_opt,Npro_cntrl,Npro_trt,prot_pept_list_cntrl,Npro_for_analysis,
prot_pept_list_cntrl_test,prot_pept_list_trt,prot_pept_list_trt_test,train_test_flag = 0,number_of_samples_test_cntrl,number_of_samples_test_trt)
print("dingchaka")
gaussian_normal_kernel=mvrnorm(n=1000,mu=numeric(Npro),Sigma = diag(Npro))
gaussian_normal_kernel_density=density(gaussian_normal_kernel,bw=0.5)
xlj_all1=xlj_all
xlj_all_test1=xlj_all_test
#xlj_all_test1=xlj_all_test
####Designing classifiers####
xlj_all1=xlj_all
xlj_all_test1=xlj_all_test
if(1){
lda_classifier_51<-lda(type ~ .,data=xlj_all1)
predictions_lda_51=predict(lda_classifier_51,xlj_all_test1[,1:Npro])$class
table_data_lda_51=table(predictions_lda_51,xlj_all_test1[,Npro+1])
predictions_lda_51_app=predict(lda_classifier_51,xlj_all1[,1:Npro])$class
table_data_lda_51_app=table(predictions_lda_51_app,xlj_all1[,Npro+1])
print(table_data_lda_51)
training_labels1=xlj_all1$type
knn_trained1<-knn(train = xlj_all1[,1:Npro] , test =xlj_all_test1[,1:Npro] , cl = training_labels1, k=3)
table_data_knn_51=table(knn_trained1,xlj_all_test1[,Npro+1])
knn_trained_app1<-knn(train = xlj_all1[,1:Npro] , test =xlj_all1[,1:Npro] , cl = training_labels1, k=3)
table_data_knn_51_app1=table(knn_trained_app1,xlj_all1[,Npro+1])
print(table_data_knn_51)
}
abc_mcmc_result_vector=rep(2,length(xlj_all_test[,1]))
abc_mcmc_result_vector1=rep(2,length(xlj_all_test[,1]))
abc_mcmc_result_vector2=rep(2,length(xlj_all_test[,1]))
abc_mcmc_result_vector3=rep(2,length(xlj_all_test[,1]))
abc_mcmc_result_vector4=rep(2,length(xlj_all_test[,1]))
abc_mcmc_result_vector5=rep(2,length(xlj_all_test[,1]))
abc_mcmc_result_vector6=rep(2,length(xlj_all_test[,1]))
abc_mcmc_result_vector7=rep(2,length(xlj_all_test[,1]))
abc_mcmc_result_vector8=rep(2,length(xlj_all_test[,1]))
abc_mcmc_result_vector9=rep(2,length(xlj_all_test[,1]))
abc_mcmc_result_vector10=rep(2,length(xlj_all_test[,1]))
abc_mcmc_result_vector11=rep(2,length(xlj_all_test[,1]))
abc_mcmc_result_vector12=rep(2,length(xlj_all_test[,1]))
abc_mcmc_result_vector13=rep(2,length(xlj_all_test[,1]))
abc_mcmc_result_vector14=rep(2,length(xlj_all_test[,1]))
print("1234")
#print(table_data_lda_50)
#print(table_data_knn_50)
total_outside_sum_one_arr=numeric(nrow(xlj_all_test))
total_outside_sum_zero_arr=numeric(nrow(xlj_all_test))
total_outside_sum_one_arr1=numeric(nrow(xlj_all_test))
total_outside_sum_zero_arr1=numeric(nrow(xlj_all_test))
total_outside_sum_one_arr2=numeric(nrow(xlj_all_test))
total_outside_sum_zero_arr2=numeric(nrow(xlj_all_test))
total_outside_sum_one_arr3=numeric(nrow(xlj_all_test))
total_outside_sum_zero_arr3=numeric(nrow(xlj_all_test))
total_outside_sum_one_arr4=numeric(nrow(xlj_all_test))
total_outside_sum_zero_arr4=numeric(nrow(xlj_all_test))
total_outside_sum_one_arr5=numeric(nrow(xlj_all_test))
total_outside_sum_zero_arr5=numeric(nrow(xlj_all_test))
total_outside_sum_one_arr6=numeric(nrow(xlj_all_test))
total_outside_sum_zero_arr6=numeric(nrow(xlj_all_test))
total_outside_sum_one_arr7=numeric(nrow(xlj_all_test))
total_outside_sum_zero_arr7=numeric(nrow(xlj_all_test))
total_outside_sum_one_arr8=numeric(nrow(xlj_all_test))
total_outside_sum_zero_arr8=numeric(nrow(xlj_all_test))
total_outside_sum_one_arr9=numeric(nrow(xlj_all_test))
total_outside_sum_zero_arr9=numeric(nrow(xlj_all_test))
total_outside_sum_one_arr10=numeric(nrow(xlj_all_test))
total_outside_sum_zero_arr10=numeric(nrow(xlj_all_test))
total_outside_sum_one_arr11=numeric(nrow(xlj_all_test))
total_outside_sum_zero_arr11=numeric(nrow(xlj_all_test))
total_outside_sum_one_arr12=numeric(nrow(xlj_all_test))
total_outside_sum_zero_arr12=numeric(nrow(xlj_all_test))
total_outside_sum_one_arr13=numeric(nrow(xlj_all_test))
total_outside_sum_zero_arr13=numeric(nrow(xlj_all_test))
total_outside_sum_one_arr14=numeric(nrow(xlj_all_test))
total_outside_sum_zero_arr14=numeric(nrow(xlj_all_test))
outside_full_sum_one_arr=numeric(nrow(xlj_all_test))
outside_full_sum_zero_arr=numeric(nrow(xlj_all_test))
#######Prediction by the ABC MCMC model #####
if(1){
for(i in 1:nrow(xlj_all_test)){
test_data=xlj_all_test[i,1:Npro]
total_outside_sum_zero=0 ####outside Sum for all the markov iterations
total_outside_sum_one=0
total_outside_sum_zero=0 ####outside Sum for all the markov iterations
total_outside_sum_one=0
outside_full_sum_zero=0
outside_full_sum_one=0
total_outside_sum_zero1=0 ####outside Sum for all the markov iterations
total_outside_sum_one1=0
total_outside_sum_zero2=0 ####outside Sum for all the markov iterations
total_outside_sum_one2=0
total_outside_sum_zero1=0 ####outside Sum for all the markov iterations
total_outside_sum_one1=0
total_outside_sum_zero2=0 ####outside Sum for all the markov iterations
total_outside_sum_one2=0
total_outside_sum_zero3=0 ####outside Sum for all the markov iterations
total_outside_sum_one3=0
total_outside_sum_zero4=0 ####outside Sum for all the markov iterations
total_outside_sum_one4=0
total_outside_sum_zero5=0 ####outside Sum for all the markov iterations
total_outside_sum_one5=0
total_outside_sum_zero6=0 ####outside Sum for all the markov iterations
total_outside_sum_one6=0
total_outside_sum_zero7=0 ####outside Sum for all the markov iterations
total_outside_sum_one7=0
total_outside_sum_zero8=0 ####outside Sum for all the markov iterations
total_outside_sum_one8=0
total_outside_sum_zero9=0 ####outside Sum for all the markov iterations
total_outside_sum_one9=0
total_outside_sum_zero10=0 ####outside Sum for all the markov iterations
total_outside_sum_one10=0
total_outside_sum_zero11=0 ####outside Sum for all the markov iterations
total_outside_sum_one11=0
total_outside_sum_zero12=0 ####outside Sum for all the markov iterations
total_outside_sum_one12=0
total_outside_sum_zero13=0 ####outside Sum for all the markov iterations
total_outside_sum_one13=0
total_outside_sum_zero14=0 ####outside Sum for all the markov iterations
total_outside_sum_one14=0
print(paste("Running the row",i))
##Only last 70% are considered for the calculations as first 30% are considered as the burn-in stage
count_123=0
print("here man")
##Only last 70% are considered for the calculations as first 30% are considered as the burn-in stage
count_123=0
print("here man")
for(j in (ceiling(0.9985*markov_iter)):(markov_iter-1)){
# ##print(paste("Markov-chain_number",j))
#total_outside_sum_zero=0
#total_outside_sum_one=0
#ptm <- proc.time()
markov_Data=peptide_generation1(gamma_markov[j,],fold_change_vec,phi_opt,Npro_cntrl,Npro_trt,prot_pept_list_cntrl,Npro_for_analysis,
prot_pept_list_cntrl_test,prot_pept_list_trt,prot_pept_list_trt_test,train_test_flag = 1,number_of_samples_train_cntrl,number_of_samples_train_trt)
###print(proc.time() - ptm)
#print("time-1")
markov_row_length=length(markov_Data[,1])
markov_length_class=markov_row_length/2
total_inside_sum_zero=0 ###inside sum is for sum of all the n-values of markov_data
total_inside_sum_one=0
inside_full_sum_zero=0
inside_full_sum_one=0
total_inside_sum_zero1=0 ###inside sum is for sum of all the n-values of markov_data
total_inside_sum_one1=0
inside_full_sum_zero1=0
inside_full_sum_one1=0
total_inside_sum_zero2=0 ###inside sum is for sum of all the n-values of markov_data
total_inside_sum_one2=0
inside_full_sum_zero2=0
inside_full_sum_one2=0
total_inside_sum_zero3=0 ###inside sum is for sum of all the n-values of markov_data
total_inside_sum_one3=0
inside_full_sum_zero3=0
inside_full_sum_one3=0
total_inside_sum_zero4=0 ###inside sum is for sum of all the n-values of markov_data
total_inside_sum_one4=0
inside_full_sum_zero4=0
inside_full_sum_one4=0
total_inside_sum_zero5=0 ###inside sum is for sum of all the n-values of markov_data
total_inside_sum_one5=0
inside_full_sum_zero5=0
inside_full_sum_one5=0
total_inside_sum_zero6=0 ###inside sum is for sum of all the n-values of markov_data
total_inside_sum_one6=0
inside_full_sum_zero6=0
inside_full_sum_one6=0
total_inside_sum_zero7=0 ###inside sum is for sum of all the n-values of markov_data
total_inside_sum_one7=0
inside_full_sum_zero7=0
inside_full_sum_one7=0
total_inside_sum_zero8=0 ###inside sum is for sum of all the n-values of markov_data
total_inside_sum_one8=0
inside_full_sum_zero8=0
inside_full_sum_one8=0
total_inside_sum_zero9=0 ###inside sum is for sum of all the n-values of markov_data
total_inside_sum_one9=0
inside_full_sum_zero9=0
inside_full_sum_one9=0
total_inside_sum_zero10=0 ###inside sum is for sum of all the n-values of markov_data
total_inside_sum_one10=0
inside_full_sum_zero10=0
inside_full_sum_one10=0
total_inside_sum_zero11=0 ###inside sum is for sum of all the n-values of markov_data
total_inside_sum_one11=0
inside_full_sum_zero11=0
inside_full_sum_one11=0
total_inside_sum_zero12=0 ###inside sum is for sum of all the n-values of markov_data
total_inside_sum_one12=0
inside_full_sum_zero12=0
inside_full_sum_one12=0
total_inside_sum_zero13=0 ###inside sum is for sum of all the n-values of markov_data
total_inside_sum_one13=0
inside_full_sum_zero13=0
inside_full_sum_one13=0
total_inside_sum_zero14=0 ###inside sum is for sum of all the n-values of markov_data
total_inside_sum_one14=0
inside_full_sum_zero14=0
inside_full_sum_one14=0
for(k in 1:(markov_length_class)){
#inside_sum_zero=0
diff_data_zero=markov_Data[k,1:Npro]-test_data
diff_data_zero=markov_Data[k,1:Npro]-test_data
t0=diff_data_zero
##print(paste("Zero-",sum(diff_data_zero)))
#diff_data_zero=abs(normalize(markov_Data[k,1:Npro])-normalize(test_data))
##print("###########zero#########")
##print(diff_data_zero)
mean_diff_data_zero=sum(diff_data_zero)/length(diff_data_zero)
# #print(paste("Zero-",sum(diff_data_zero)))
##print(paste("zero-",mean_diff_data_zero))
#inside_full_sum_zero=inside_full_sum_zero+mean_diff_data_zero
inside_full_sum_zero=inside_full_sum_zero+sum(diff_data_zero)
##print("#########################")
if(1){
#diff_data_zero=diff_data_zero/abs(max(diff_data_zero))
#print(diff_data_zero)
k0=diff_data_zero
k00=k0
k0=k00/4000
diff_data_zero=k0/12000
diff_data_zero1=k0/14000
diff_data_zero2=k0/16000
diff_data_zero3=k0/18000
diff_data_zero4=k0/20000
diff_data_zero5=k0/22000
diff_data_zero6=k0/24000
diff_data_zero7=k0/26000
diff_data_zero8=k0/28000
diff_data_zero9=k0/30000
diff_data_zero10=k0/32000
diff_data_zero11=k0/34000
diff_data_zero12=k0/36000
diff_data_zero13=k0/38000
diff_data_zero14=k0/40000
kernel_data=approx(gaussian_normal_kernel_density$x,gaussian_normal_kernel_density$y,xout = diff_data_zero)
kernel_data$y[is.na(kernel_data$y)]=0
total_inside_sum_zero=total_inside_sum_zero+mean(kernel_data$y)
kernel_data1=approx(gaussian_normal_kernel_density$x,gaussian_normal_kernel_density$y,xout = diff_data_zero1)
kernel_data1$y[is.na(kernel_data1$y)]=0
total_inside_sum_zero1=total_inside_sum_zero1+mean(kernel_data1$y)
kernel_data2=approx(gaussian_normal_kernel_density$x,gaussian_normal_kernel_density$y,xout = diff_data_zero2)
kernel_data2$y[is.na(kernel_data2$y)]=0
total_inside_sum_zero2=total_inside_sum_zero2+mean(kernel_data2$y)
kernel_data3=approx(gaussian_normal_kernel_density$x,gaussian_normal_kernel_density$y,xout = diff_data_zero3)
kernel_data3$y[is.na(kernel_data3$y)]=0
total_inside_sum_zero3=total_inside_sum_zero3+mean(kernel_data3$y)
kernel_data4=approx(gaussian_normal_kernel_density$x,gaussian_normal_kernel_density$y,xout = diff_data_zero4)
kernel_data4$y[is.na(kernel_data4$y)]=0
total_inside_sum_zero4=total_inside_sum_zero4+mean(kernel_data4$y)
kernel_data5=approx(gaussian_normal_kernel_density$x,gaussian_normal_kernel_density$y,xout = diff_data_zero5)
kernel_data5$y[is.na(kernel_data5$y)]=0
total_inside_sum_zero5=total_inside_sum_zero5+mean(kernel_data5$y)
kernel_data6=approx(gaussian_normal_kernel_density$x,gaussian_normal_kernel_density$y,xout = diff_data_zero6)
kernel_data6$y[is.na(kernel_data6$y)]=0
total_inside_sum_zero6=total_inside_sum_zero6+mean(kernel_data6$y)
kernel_data7=approx(gaussian_normal_kernel_density$x,gaussian_normal_kernel_density$y,xout = diff_data_zero7)
kernel_data7$y[is.na(kernel_data7$y)]=0
total_inside_sum_zero7=total_inside_sum_zero7+mean(kernel_data7$y)
kernel_data8=approx(gaussian_normal_kernel_density$x,gaussian_normal_kernel_density$y,xout = diff_data_zero8)
kernel_data8$y[is.na(kernel_data8$y)]=0
total_inside_sum_zero8=total_inside_sum_zero8+mean(kernel_data8$y)
kernel_data9=approx(gaussian_normal_kernel_density$x,gaussian_normal_kernel_density$y,xout = diff_data_zero9)
kernel_data9$y[is.na(kernel_data9$y)]=0
total_inside_sum_zero9=total_inside_sum_zero9+mean(kernel_data9$y)
kernel_data10=approx(gaussian_normal_kernel_density$x,gaussian_normal_kernel_density$y,xout = diff_data_zero10)
kernel_data10$y[is.na(kernel_data10$y)]=0
total_inside_sum_zero10=total_inside_sum_zero10+mean(kernel_data10$y)
kernel_data11=approx(gaussian_normal_kernel_density$x,gaussian_normal_kernel_density$y,xout = diff_data_zero11)
kernel_data11$y[is.na(kernel_data11$y)]=0
total_inside_sum_zero11=total_inside_sum_zero11+mean(kernel_data11$y)
kernel_data12=approx(gaussian_normal_kernel_density$x,gaussian_normal_kernel_density$y,xout = diff_data_zero12)
kernel_data12$y[is.na(kernel_data12$y)]=0
total_inside_sum_zero12=total_inside_sum_zero12+mean(kernel_data12$y)
kernel_data13=approx(gaussian_normal_kernel_density$x,gaussian_normal_kernel_density$y,xout = diff_data_zero13)
kernel_data13$y[is.na(kernel_data13$y)]=0
total_inside_sum_zero13=total_inside_sum_zero13+mean(kernel_data13$y)
kernel_data14=approx(gaussian_normal_kernel_density$x,gaussian_normal_kernel_density$y,xout = diff_data_zero14)
kernel_data14$y[is.na(kernel_data14$y)]=0
total_inside_sum_zero14=total_inside_sum_zero14+mean(kernel_data14$y)
}
}
for(l in (markov_length_class+1):(markov_row_length)){
#inside_sum_one=0
###print("gunda")
###print(l)
diff_data_one=markov_Data[l,1:Npro]-test_data
# #print(paste("one-",sum(diff_data_one)))
#diff_data_one=abs(normalize(markov_Data[l,1:Npro])-normalize(test_data))
###print(diff_data_one)
t1=diff_data_one
##print("###########one#########")
mean_diff_data_one=sum(diff_data_one)/length(diff_data_one)
##print(paste("one-",mean_diff_data_one))
# #print(paste("one-",sum(diff_data_one)))
#inside_full_sum_one=inside_full_sum_one+mean_diff_data_one
inside_full_sum_one=inside_full_sum_one+sum(diff_data_one)
#plot(diff_data_one)
#k22=cbind(diff_data_one,diff_data_zero)
#plot(colMeans(k22))
##print("#########################")
if(1){
# diff_data_one=diff_data_one/abs(max(diff_data_one))
# print(diff_data_one)
k1=diff_data_one
k11=k1
k1=k11/4000
diff_data_one=k1/12000
diff_data_one1=k1/14000
diff_data_one2=k1/16000
diff_data_one3=k1/18000
diff_data_one4=k1/20000
diff_data_one5=k1/22000
diff_data_one6=k1/24000
diff_data_one7=k1/26000
diff_data_one8=k1/28000
diff_data_one9=k1/30000
diff_data_one10=k1/32000
diff_data_one11=k1/34000
diff_data_one12=k1/36000
diff_data_one13=k1/38000
diff_data_one14=k1/40000
#print(diff_data_one)
kernel_data=approx(gaussian_normal_kernel_density$x,gaussian_normal_kernel_density$y,xout = diff_data_one)
kernel_data$y[is.na(kernel_data$y)]=0
total_inside_sum_one=total_inside_sum_one+mean(kernel_data$y)
kernel_data1=approx(gaussian_normal_kernel_density$x,gaussian_normal_kernel_density$y,xout = diff_data_one1)
kernel_data1$y[is.na(kernel_data1$y)]=0
total_inside_sum_one1=total_inside_sum_one1+mean(kernel_data1$y)
kernel_data2=approx(gaussian_normal_kernel_density$x,gaussian_normal_kernel_density$y,xout = diff_data_one2)
kernel_data2$y[is.na(kernel_data2$y)]=0
total_inside_sum_one2=total_inside_sum_one2+mean(kernel_data2$y)
kernel_data3=approx(gaussian_normal_kernel_density$x,gaussian_normal_kernel_density$y,xout = diff_data_one3)
kernel_data3$y[is.na(kernel_data3$y)]=0
total_inside_sum_one3=total_inside_sum_one3+mean(kernel_data3$y)
kernel_data4=approx(gaussian_normal_kernel_density$x,gaussian_normal_kernel_density$y,xout = diff_data_one4)
kernel_data4$y[is.na(kernel_data4$y)]=0
total_inside_sum_one4=total_inside_sum_one4+mean(kernel_data4$y)
kernel_data5=approx(gaussian_normal_kernel_density$x,gaussian_normal_kernel_density$y,xout = diff_data_one5)
kernel_data5$y[is.na(kernel_data5$y)]=0
total_inside_sum_one5=total_inside_sum_one5+mean(kernel_data5$y)
kernel_data6=approx(gaussian_normal_kernel_density$x,gaussian_normal_kernel_density$y,xout = diff_data_one6)
kernel_data6$y[is.na(kernel_data6$y)]=0
total_inside_sum_one6=total_inside_sum_one6+mean(kernel_data6$y)
kernel_data7=approx(gaussian_normal_kernel_density$x,gaussian_normal_kernel_density$y,xout = diff_data_one7)
kernel_data7$y[is.na(kernel_data7$y)]=0
total_inside_sum_one7=total_inside_sum_one7+mean(kernel_data7$y)
kernel_data8=approx(gaussian_normal_kernel_density$x,gaussian_normal_kernel_density$y,xout = diff_data_one8)
kernel_data8$y[is.na(kernel_data8$y)]=0
total_inside_sum_one8=total_inside_sum_one8+mean(kernel_data8$y)
kernel_data9=approx(gaussian_normal_kernel_density$x,gaussian_normal_kernel_density$y,xout = diff_data_one9)
kernel_data9$y[is.na(kernel_data9$y)]=0
total_inside_sum_one9=total_inside_sum_one9+mean(kernel_data9$y)
kernel_data10=approx(gaussian_normal_kernel_density$x,gaussian_normal_kernel_density$y,xout = diff_data_one10)
kernel_data10$y[is.na(kernel_data10$y)]=0
total_inside_sum_one10=total_inside_sum_one10+mean(kernel_data10$y)
kernel_data11=approx(gaussian_normal_kernel_density$x,gaussian_normal_kernel_density$y,xout = diff_data_one11)
kernel_data11$y[is.na(kernel_data11$y)]=0
total_inside_sum_one11=total_inside_sum_one11+mean(kernel_data11$y)
kernel_data12=approx(gaussian_normal_kernel_density$x,gaussian_normal_kernel_density$y,xout = diff_data_one12)
kernel_data12$y[is.na(kernel_data12$y)]=0
total_inside_sum_one12=total_inside_sum_one12+mean(kernel_data12$y)
kernel_data13=approx(gaussian_normal_kernel_density$x,gaussian_normal_kernel_density$y,xout = diff_data_one13)
kernel_data13$y[is.na(kernel_data13$y)]=0
total_inside_sum_one13=total_inside_sum_one13+mean(kernel_data13$y)
kernel_data14=approx(gaussian_normal_kernel_density$x,gaussian_normal_kernel_density$y,xout = diff_data_one14)
kernel_data14$y[is.na(kernel_data14$y)]=0
total_inside_sum_one14=total_inside_sum_one14+mean(kernel_data14$y)
}
k22=cbind(diff_data_one,diff_data_zero)
# plot(colMeans(k22))
if(is.na(total_inside_sum_one)& count_123==0){
###print(kernel_data$y)
count_123=1
p11=t1
}
}
#total_outside_sum_zero=total_outside_sum_zero+total_inside_sum_zero
#total_outside_sum_one=total_outside_sum_one+total_inside_sum_one
##print(proc.time() - ptm)
#print("time 1-a")
}
total_outside_sum_zero=total_outside_sum_zero+total_inside_sum_zero
total_outside_sum_one=total_outside_sum_one+total_inside_sum_one
total_outside_sum_zero1=total_outside_sum_zero1+total_inside_sum_zero1
total_outside_sum_one1=total_outside_sum_one1+total_inside_sum_one1
total_outside_sum_zero2=total_outside_sum_zero2+total_inside_sum_zero2
total_outside_sum_one2=total_outside_sum_one2+total_inside_sum_one2
total_outside_sum_zero3=total_outside_sum_zero3+total_inside_sum_zero3
total_outside_sum_one3=total_outside_sum_one3+total_inside_sum_one3
total_outside_sum_zero4=total_outside_sum_zero4+total_inside_sum_zero4
total_outside_sum_one4=total_outside_sum_one4+total_inside_sum_one4
total_outside_sum_zero5=total_outside_sum_zero5+total_inside_sum_zero5
total_outside_sum_one5=total_outside_sum_one5+total_inside_sum_one5
total_outside_sum_zero6=total_outside_sum_zero6+total_inside_sum_zero6
total_outside_sum_one6=total_outside_sum_one6+total_inside_sum_one6
total_outside_sum_zero7=total_outside_sum_zero7+total_inside_sum_zero7
total_outside_sum_one7=total_outside_sum_one7+total_inside_sum_one7
total_outside_sum_zero8=total_outside_sum_zero8+total_inside_sum_zero8
total_outside_sum_one8=total_outside_sum_one8+total_inside_sum_one8
total_outside_sum_zero9=total_outside_sum_zero9+total_inside_sum_zero9
total_outside_sum_one9=total_outside_sum_one9+total_inside_sum_one9
total_outside_sum_zero10=total_outside_sum_zero10+total_inside_sum_zero10
total_outside_sum_one10=total_outside_sum_one10+total_inside_sum_one10
total_outside_sum_zero11=total_outside_sum_zero11+total_inside_sum_zero11
total_outside_sum_one11=total_outside_sum_one11+total_inside_sum_one11
total_outside_sum_zero12=total_outside_sum_zero12+total_inside_sum_zero12
total_outside_sum_one12=total_outside_sum_one12+total_inside_sum_one12
total_outside_sum_zero13=total_outside_sum_zero13+total_inside_sum_zero13
total_outside_sum_one13=total_outside_sum_one13+total_inside_sum_one13
total_outside_sum_zero14=total_outside_sum_zero14+total_inside_sum_zero14
total_outside_sum_one14=total_outside_sum_one14+total_inside_sum_one14
outside_full_sum_zero=outside_full_sum_zero+inside_full_sum_zero
outside_full_sum_one=outside_full_sum_one+inside_full_sum_one
###print("glagla")
###print(total_outside_sum_one)
###print(total_outside_sum_zero)
total_outside_sum_one_arr[i]=total_outside_sum_one
total_outside_sum_zero_arr[i]=total_outside_sum_zero
total_outside_sum_one_arr1[i]=total_outside_sum_one1
total_outside_sum_zero_arr1[i]=total_outside_sum_zero1
total_outside_sum_one_arr2[i]=total_outside_sum_one2
total_outside_sum_zero_arr2[i]=total_outside_sum_zero2
total_outside_sum_one_arr3[i]=total_outside_sum_one3
total_outside_sum_zero_arr3[i]=total_outside_sum_zero3
total_outside_sum_one_arr4[i]=total_outside_sum_one4
total_outside_sum_zero_arr4[i]=total_outside_sum_zero4
total_outside_sum_one_arr5[i]=total_outside_sum_one5
total_outside_sum_zero_arr5[i]=total_outside_sum_zero5
total_outside_sum_one_arr6[i]=total_outside_sum_one6
total_outside_sum_zero_arr6[i]=total_outside_sum_zero6
total_outside_sum_one_arr7[i]=total_outside_sum_one7
total_outside_sum_zero_arr7[i]=total_outside_sum_zero7
total_outside_sum_one_arr8[i]=total_outside_sum_one8
total_outside_sum_zero_arr8[i]=total_outside_sum_zero8
total_outside_sum_one_arr9[i]=total_outside_sum_one9
total_outside_sum_zero_arr9[i]=total_outside_sum_zero9
total_outside_sum_one_arr10[i]=total_outside_sum_one10
total_outside_sum_zero_arr10[i]=total_outside_sum_zero10
total_outside_sum_one_arr11[i]=total_outside_sum_one11
total_outside_sum_zero_arr11[i]=total_outside_sum_zero11
total_outside_sum_one_arr12[i]=total_outside_sum_one12
total_outside_sum_zero_arr12[i]=total_outside_sum_zero12
total_outside_sum_one_arr13[i]=total_outside_sum_one13
total_outside_sum_zero_arr13[i]=total_outside_sum_zero13
total_outside_sum_one_arr14[i]=total_outside_sum_one14
total_outside_sum_zero_arr14[i]=total_outside_sum_zero14
outside_full_sum_one_arr[i]=outside_full_sum_one
outside_full_sum_zero_arr[i]=outside_full_sum_zero
if(total_outside_sum_one<total_outside_sum_zero){
#if(abs(outside_full_sum_one)>abs(outside_full_sum_zero)){
abc_mcmc_result_vector[i]=0
}
else{
abc_mcmc_result_vector[i]=1
}
if(total_outside_sum_one1<total_outside_sum_zero1){
#if(abs(outside_full_sum_one)>abs(outside_full_sum_zero)){
abc_mcmc_result_vector1[i]=0
}
else{
abc_mcmc_result_vector1[i]=1
}
if(total_outside_sum_one2<total_outside_sum_zero2){
#if(abs(outside_full_sum_one)>abs(outside_full_sum_zero)){
abc_mcmc_result_vector2[i]=0
}
else{
abc_mcmc_result_vector2[i]=1
}
if(total_outside_sum_one3<total_outside_sum_zero3){
#if(abs(outside_full_sum_one)>abs(outside_full_sum_zero)){
abc_mcmc_result_vector3[i]=0
}
else{
abc_mcmc_result_vector3[i]=1
}
if(total_outside_sum_one4<total_outside_sum_zero4){
#if(abs(outside_full_sum_one)>abs(outside_full_sum_zero)){
abc_mcmc_result_vector4[i]=0
}
else{
abc_mcmc_result_vector4[i]=1
}
if(total_outside_sum_one5<total_outside_sum_zero5){
#if(abs(outside_full_sum_one)>abs(outside_full_sum_zero)){
abc_mcmc_result_vector5[i]=0
}
else{
abc_mcmc_result_vector5[i]=1
}
if(total_outside_sum_one6<total_outside_sum_zero6){
#if(abs(outside_full_sum_one)>abs(outside_full_sum_zero)){
abc_mcmc_result_vector6[i]=0
}
else{
abc_mcmc_result_vector6[i]=1
}
if(total_outside_sum_one7<total_outside_sum_zero7){
#if(abs(outside_full_sum_one)>abs(outside_full_sum_zero)){
abc_mcmc_result_vector7[i]=0
}
else{
abc_mcmc_result_vector7[i]=1
}
if(total_outside_sum_one8<total_outside_sum_zero8){
#if(abs(outside_full_sum_one)>abs(outside_full_sum_zero)){
abc_mcmc_result_vector8[i]=0
}
else{
abc_mcmc_result_vector8[i]=1
}
if(total_outside_sum_one9<total_outside_sum_zero9){
#if(abs(outside_full_sum_one)>abs(outside_full_sum_zero)){
abc_mcmc_result_vector9[i]=0
}
else{
abc_mcmc_result_vector9[i]=1
}
if(total_outside_sum_one10<total_outside_sum_zero10){
#if(abs(outside_full_sum_one)>abs(outside_full_sum_zero)){
abc_mcmc_result_vector10[i]=0
}
else{
abc_mcmc_result_vector10[i]=1
}
if(total_outside_sum_one11<total_outside_sum_zero11){
#if(abs(outside_full_sum_one)>abs(outside_full_sum_zero)){
abc_mcmc_result_vector11[i]=0
}
else{
abc_mcmc_result_vector11[i]=1
}
if(total_outside_sum_one12<total_outside_sum_zero12){
#if(abs(outside_full_sum_one)>abs(outside_full_sum_zero)){
abc_mcmc_result_vector12[i]=0
}
else{
abc_mcmc_result_vector12[i]=1
}
if(total_outside_sum_one13<total_outside_sum_zero13){
#if(abs(outside_full_sum_one)>abs(outside_full_sum_zero)){
abc_mcmc_result_vector13[i]=0
}
else{
abc_mcmc_result_vector13[i]=1
}
if(total_outside_sum_one14<total_outside_sum_zero14){
#if(abs(outside_full_sum_one)>abs(outside_full_sum_zero)){
abc_mcmc_result_vector14[i]=0
}
else{
abc_mcmc_result_vector14[i]=1
}
#print("time-2")
###print(proc.time() - ptm)
print(abc_mcmc_result_vector)
print("-----------1---------")
print(abc_mcmc_result_vector1)
print("-----------2---------")
print(abc_mcmc_result_vector2)
print("-----------3---------")
print(abc_mcmc_result_vector3)
print("-----------4---------")
print(abc_mcmc_result_vector4)
print("-----------5---------")
print(abc_mcmc_result_vector5)
print("-----------6---------")
print(abc_mcmc_result_vector6)
print("-----------7---------")
print(abc_mcmc_result_vector7)
print("-----------8---------")
print(abc_mcmc_result_vector8)
print("-----------9---------")
print(abc_mcmc_result_vector9)
print("-----------10---------")
print(abc_mcmc_result_vector10)
print("-----------11---------")
print(abc_mcmc_result_vector11)
print("-----------12---------")
print(abc_mcmc_result_vector12)
print("-----------13---------")
print(abc_mcmc_result_vector13)
print("-----------14---------")
print(abc_mcmc_result_vector14)
}
#print("------------------------")
#print("Fullll ABC-MCMC model done")
#print("------------------------")
table_data_abc_mcmc_0=table(abc_mcmc_result_vector,xlj_all_test[,Npro+1])
table_data_abc_mcmc_1=table(abc_mcmc_result_vector1,xlj_all_test[,Npro+1])
table_data_abc_mcmc_2=table(abc_mcmc_result_vector2,xlj_all_test[,Npro+1])
table_data_abc_mcmc_3=table(abc_mcmc_result_vector3,xlj_all_test[,Npro+1])
table_data_abc_mcmc_4=table(abc_mcmc_result_vector4,xlj_all_test[,Npro+1])
table_data_abc_mcmc_5=table(abc_mcmc_result_vector5,xlj_all_test[,Npro+1])
table_data_abc_mcmc_6=table(abc_mcmc_result_vector6,xlj_all_test[,Npro+1])
table_data_abc_mcmc_7=table(abc_mcmc_result_vector7,xlj_all_test[,Npro+1])
table_data_abc_mcmc_8=table(abc_mcmc_result_vector8,xlj_all_test[,Npro+1])
table_data_abc_mcmc_9=table(abc_mcmc_result_vector9,xlj_all_test[,Npro+1])
table_data_abc_mcmc_10=table(abc_mcmc_result_vector10,xlj_all_test[,Npro+1])
table_data_abc_mcmc_11=table(abc_mcmc_result_vector11,xlj_all_test[,Npro+1])
table_data_abc_mcmc_12=table(abc_mcmc_result_vector12,xlj_all_test[,Npro+1])
table_data_abc_mcmc_13=table(abc_mcmc_result_vector13,xlj_all_test[,Npro+1])
table_data_abc_mcmc_14=table(abc_mcmc_result_vector14,xlj_all_test[,Npro+1])
}
final_time=proc.time()
#print("K balahander")
#print(table_data_abc_mcmc_50)
abc_mcmc_result_vector_final1=rbind(abc_mcmc_result_vector,abc_mcmc_result_vector1,abc_mcmc_result_vector2,abc_mcmc_result_vector3)
abc_mcmc_result_vector_final2=rbind(abc_mcmc_result_vector4,abc_mcmc_result_vector5,abc_mcmc_result_vector6,abc_mcmc_result_vector7)
abc_mcmc_result_vector_final3=rbind(abc_mcmc_result_vector8,abc_mcmc_result_vector9,abc_mcmc_result_vector10,abc_mcmc_result_vector11)
abc_mcmc_result_vector_final4=rbind(abc_mcmc_result_vector12,abc_mcmc_result_vector13,abc_mcmc_result_vector14)
abc_mcmc_result_vector_final=rbind(abc_mcmc_result_vector_final1,abc_mcmc_result_vector_final2,abc_mcmc_result_vector_final3,abc_mcmc_result_vector_final4)
max_vector_return=function(abc_mcmc_result_vector123){
nrows=nrow(abc_mcmc_result_vector123)
abc_mcmc_result_vector_total=rep(2,ncol(abc_mcmc_result_vector123))
for(i in 1:ncol(abc_mcmc_result_vector123)){
print(sum(abc_mcmc_result_vector123[,i]))
if(sum(abc_mcmc_result_vector123[,i])>nrows/2)
{
abc_mcmc_result_vector_total[i]=1
}
else{
abc_mcmc_result_vector_total[i]=0
}
}
return(abc_mcmc_result_vector_total)
}
abc_mcmc_result_vector_total=max_vector_return(abc_mcmc_result_vector_final)
table_data_abc_mcmc_total=table(abc_mcmc_result_vector_total,xlj_all_test[,Npro+1])
print("------All tables-----")
print(table_data_lda_51)
print(table_data_knn_51)
print(table_data_abc_mcmc_total)
print("%%%%%")
print(proc.time()-time2)
|
###############################################################################
## Course: Machine Learning for Economists and Business Analysts
## Topic: Effect Heterogeneity
###############################################################################
rm(list = ls())
set.seed(100239)
#getwd()
#setwd("")
# Load Packages
library("fBasics")
library("glmnet")
library("AER")
library("grf")
library("hdm")
library("lmtest")
library("sandwich")
library("tidyverse")
# Load data
df <- read.csv("job_corps.csv",header=TRUE, sep=",")
###########################################
### Exercise 3: Double Machine Learning ###
###########################################
######################
## Data Preparation ##
######################
set.seed(123456789)
# Generate variable with the rows in training data
size <- floor(0.5 * nrow(df))
set_A <- sample(seq_len(nrow(df)), size = size)
set_B <- seq_len(nrow(df))[-set_A]
## Generate Variables
# Outcome
earnings <- as.matrix(df[,1])
# Treatment
treat = 2 #Select treatment 2= offer to participate, 3 = actual participation
treat <- as.matrix(df[,treat])
# Covariates
covariates <- as.matrix(df[,c(4:ncol(df))])
#########################
## Nuisance Parameters ##
#########################
###############################################################################
## Conditional Potential Earnings under Non-Treatment
p = 1 # 1 for LASSO, 0 for Ridge
set.seed(100237)
## Using Sample A to Predict Sample B
# Potential Earnings under Non-Treatment
lasso_y0_A <- cv.glmnet(covariates[c(set_A,treat==0),], earnings[c(set_A,treat==0)],
alpha=p, type.measure = 'mse')
plot(lasso_y0_A)
fit_y0_A <- glmnet(covariates[c(set_A,treat==0),], earnings[c(set_A,treat==0)]
,lambda = lasso_y0_A$lambda.min)
y0hat_B <- predict(fit_y0_A, covariates)
## Using Sample B to Predict Sample A
# Potential Earnings under Non-Treatment
lasso_y0_B <- cv.glmnet(covariates[c(set_B,treat==0),], earnings[c(set_B,treat==0)],
alpha=p, type.measure = 'mse')
plot(lasso_y0_B)
fit_y0_B <- glmnet(covariates[c(set_B,treat==0),], earnings[c(set_B,treat==0)]
,lambda = lasso_y0_B$lambda.min)
y0hat_A <- predict(fit_y0_B, covariates)
###############################################################################
## Conditional Potential Earnings under Treatment
p = 1 # 1 for LASSO, 0 for Ridge
set.seed(100237)
## Using Sample A to Predict Sample B
# Potential Earnings under Treatment
lasso_y1_A <- cv.glmnet(covariates[c(set_A,treat==1),], earnings[c(set_A,treat==1)],
alpha=p, type.measure = 'mse')
plot(lasso_y1_A)
fit_y1_A <- glmnet(covariates[c(set_A,treat==1),], earnings[c(set_A,treat==1)]
,lambda = lasso_y1_A$lambda.min)
y1hat_B <- predict(fit_y1_A, covariates)
## Using Sample B to Predict Sample A
# Potential Earnings under Treatment
lasso_y1_B <- cv.glmnet(covariates[c(set_B,treat==1),], earnings[c(set_B,treat==1)],
alpha=p, type.measure = 'mse')
plot(lasso_y1_B)
fit_y1_B <- glmnet(covariates[c(set_B,treat==1),], earnings[c(set_B,treat==1)]
,lambda = lasso_y1_B$lambda.min)
y1hat_A <- predict(fit_y1_B, covariates, type = 'response')
###############################################################################
## Propensity Score
p = 1 # 1 for LASSO, 0 for Ridge
set.seed(100236)
# Using Sample A to Predict Sample B
lasso_p_A <- cv.glmnet(covariates[set_A,], treat[set_A], alpha=p, type.measure = 'mse')
plot(lasso_p_A)
fit_p_A <- glmnet(covariates[set_A,], treat[set_A],lambda = lasso_p_A$lambda.min)
pscore_B <- predict(fit_p_A, covariates)
# Using Sample B to Predict Sample A
lasso_p_B <- cv.glmnet(covariates[set_B,], treat[set_B,], alpha=p, type.measure = 'mse')
plot(lasso_p_B)
fit_p_B <- glmnet(covariates[set_B,], treat[set_B,],lambda = lasso_p_B$lambda.min)
pscore_A <- predict(fit_p_B, covariates)
####################################
## Average Treatment Effect (ATE) ##
####################################
## Efficient Score
# Generate Modified Outcome in each sample
Y_star <- matrix(NA,nrow=nrow(df),ncol=1)
Y_star[set_A] <- invisible(y1hat_A[set_A] -y0hat_A[set_A]
+ treat[set_A]*(earnings[set_A]-y1hat_A[set_A])/pscore_A[set_A]
- (1-treat[set_A])*(earnings[set_A]-y0hat_A[set_A])/(1-pscore_A[set_A]))
Y_star[set_B] <- invisible(y1hat_B[set_B] -y0hat_B[set_B]
+ treat[set_B]*(earnings[set_B]-y1hat_B[set_B])/pscore_B[set_B]
- (1-treat[set_B])*(earnings[set_B]-y0hat_B[set_B])/(1-pscore_B[set_B]))
# Average Treatment Effect (ATE)
ATE <- round(mean(Y_star), digits=2)
se_ATE <- round(sd(Y_star)/sqrt(nrow(df)), digits=2)
print(paste0("Average Treatment Effect (ATE): ", ATE))
print(paste0("Standard Error for ATE: ", se_ATE))
##########################
## Effect Heterogeneity ##
##########################
set.seed(100237)
## Predict Effect Heterogeneity
lasso_A <- cv.glmnet(covariates[set_A,], Y_star[set_A],
alpha=p, type.measure = 'mse')
plot(lasso_A)
fit_A <- glmnet(covariates[set_A,], Y_star[set_A] ,lambda = lasso_A$lambda.min)
coef(fit_A)
# Extrapolate to sample B
het_B <- predict(fit_A, covariates)
## Predict Effect Heterogeneity
lasso_B <- cv.glmnet(covariates[set_B,], Y_star[set_B],
alpha=p, type.measure = 'mse')
plot(lasso_B)
fit_B <- glmnet(covariates[set_B,], Y_star[set_B],lambda = lasso_B$lambda.min)
coef(fit_B)
# Extrapolate to sample B
het_A <- predict(fit_B, covariates)
het_dml <- matrix(NA, nrow = nrow(df),ncol =1)
het_dml[set_A] <- het_A[set_A]
het_dml[set_B] <- het_B[set_B]
# Kernel Density Plot
d_dml <- density(het_dml)
plot(d_dml)
##################
## Post DML-OLS ##
##################
# Multivariate OLS
ols <- lm(Y_star ~ ., data = df[,-c(1,2,3,6,11)])
summary(ols)
# Robust standard errors
coeftest(ols, vcov = vcovHC(ols, type = "HC1"))
###################
## Causal Forest ##
###################
set.seed(1234567)
cf <- causal_forest(covariates, earnings, treat)
het_cf <- predict(cf,estimate.variance = TRUE)
# Kernel Density Plot
d_cf <- density(het_cf$predictions)
plot(d_cf)
cor(het_dml,het_cf$predictions)
## Inference
# t-Statistics
t_stat <- as.matrix(het_cf$predictions)/ as.matrix(sqrt(het_cf$variance.estimates))
sig_pos <- (t_stat>=1.96)== TRUE
sig_neg <- (t_stat<=-1.96)== TRUE
insig <- (abs(t_stat) <1.96)== TRUE
print(paste0("Share with positive effects: ", round(mean(sig_pos), digits=4)))
print(paste0("Share with negative effects: ", round(mean(sig_neg), digits=4)))
print(paste0("Share with insignificant effects: ", round(mean(insig), digits=4)))
| /PC Lab 4/heterogeneity.R | no_license | sthan41/Machine-Learning-Course | R | false | false | 6,924 | r | ###############################################################################
## Course: Machine Learning for Economists and Business Analysts
## Topic: Effect Heterogeneity
###############################################################################
rm(list = ls())
set.seed(100239)
#getwd()
#setwd("")
# Load Packages
library("fBasics")
library("glmnet")
library("AER")
library("grf")
library("hdm")
library("lmtest")
library("sandwich")
library("tidyverse")
# Load data
df <- read.csv("job_corps.csv",header=TRUE, sep=",")
###########################################
### Exercise 3: Double Machine Learning ###
###########################################
######################
## Data Preparation ##
######################
set.seed(123456789)
# Generate variable with the rows in training data
size <- floor(0.5 * nrow(df))
set_A <- sample(seq_len(nrow(df)), size = size)
set_B <- seq_len(nrow(df))[-set_A]
## Generate Variables
# Outcome
earnings <- as.matrix(df[,1])
# Treatment
treat = 2 #Select treatment 2= offer to participate, 3 = actual participation
treat <- as.matrix(df[,treat])
# Covariates
covariates <- as.matrix(df[,c(4:ncol(df))])
#########################
## Nuisance Parameters ##
#########################
###############################################################################
## Conditional Potential Earnings under Non-Treatment
p = 1 # 1 for LASSO, 0 for Ridge
set.seed(100237)
## Using Sample A to Predict Sample B
# Potential Earnings under Non-Treatment
lasso_y0_A <- cv.glmnet(covariates[c(set_A,treat==0),], earnings[c(set_A,treat==0)],
alpha=p, type.measure = 'mse')
plot(lasso_y0_A)
fit_y0_A <- glmnet(covariates[c(set_A,treat==0),], earnings[c(set_A,treat==0)]
,lambda = lasso_y0_A$lambda.min)
y0hat_B <- predict(fit_y0_A, covariates)
## Using Sample B to Predict Sample A
# Potential Earnings under Non-Treatment
lasso_y0_B <- cv.glmnet(covariates[c(set_B,treat==0),], earnings[c(set_B,treat==0)],
alpha=p, type.measure = 'mse')
plot(lasso_y0_B)
fit_y0_B <- glmnet(covariates[c(set_B,treat==0),], earnings[c(set_B,treat==0)]
,lambda = lasso_y0_B$lambda.min)
y0hat_A <- predict(fit_y0_B, covariates)
###############################################################################
## Conditional Potential Earnings under Treatment
p = 1 # 1 for LASSO, 0 for Ridge
set.seed(100237)
## Using Sample A to Predict Sample B
# Potential Earnings under Treatment
lasso_y1_A <- cv.glmnet(covariates[c(set_A,treat==1),], earnings[c(set_A,treat==1)],
alpha=p, type.measure = 'mse')
plot(lasso_y1_A)
fit_y1_A <- glmnet(covariates[c(set_A,treat==1),], earnings[c(set_A,treat==1)]
,lambda = lasso_y1_A$lambda.min)
y1hat_B <- predict(fit_y1_A, covariates)
## Using Sample B to Predict Sample A
# Potential Earnings under Treatment
lasso_y1_B <- cv.glmnet(covariates[c(set_B,treat==1),], earnings[c(set_B,treat==1)],
alpha=p, type.measure = 'mse')
plot(lasso_y1_B)
fit_y1_B <- glmnet(covariates[c(set_B,treat==1),], earnings[c(set_B,treat==1)]
,lambda = lasso_y1_B$lambda.min)
y1hat_A <- predict(fit_y1_B, covariates, type = 'response')
###############################################################################
## Propensity Score
p = 1 # 1 for LASSO, 0 for Ridge
set.seed(100236)
# Using Sample A to Predict Sample B
lasso_p_A <- cv.glmnet(covariates[set_A,], treat[set_A], alpha=p, type.measure = 'mse')
plot(lasso_p_A)
fit_p_A <- glmnet(covariates[set_A,], treat[set_A],lambda = lasso_p_A$lambda.min)
pscore_B <- predict(fit_p_A, covariates)
# Using Sample B to Predict Sample A
lasso_p_B <- cv.glmnet(covariates[set_B,], treat[set_B,], alpha=p, type.measure = 'mse')
plot(lasso_p_B)
fit_p_B <- glmnet(covariates[set_B,], treat[set_B,],lambda = lasso_p_B$lambda.min)
pscore_A <- predict(fit_p_B, covariates)
####################################
## Average Treatment Effect (ATE) ##
####################################
## Efficient Score
# Generate Modified Outcome in each sample
Y_star <- matrix(NA,nrow=nrow(df),ncol=1)
Y_star[set_A] <- invisible(y1hat_A[set_A] -y0hat_A[set_A]
+ treat[set_A]*(earnings[set_A]-y1hat_A[set_A])/pscore_A[set_A]
- (1-treat[set_A])*(earnings[set_A]-y0hat_A[set_A])/(1-pscore_A[set_A]))
Y_star[set_B] <- invisible(y1hat_B[set_B] -y0hat_B[set_B]
+ treat[set_B]*(earnings[set_B]-y1hat_B[set_B])/pscore_B[set_B]
- (1-treat[set_B])*(earnings[set_B]-y0hat_B[set_B])/(1-pscore_B[set_B]))
# Average Treatment Effect (ATE)
ATE <- round(mean(Y_star), digits=2)
se_ATE <- round(sd(Y_star)/sqrt(nrow(df)), digits=2)
print(paste0("Average Treatment Effect (ATE): ", ATE))
print(paste0("Standard Error for ATE: ", se_ATE))
##########################
## Effect Heterogeneity ##
##########################
set.seed(100237)
## Predict Effect Heterogeneity
lasso_A <- cv.glmnet(covariates[set_A,], Y_star[set_A],
alpha=p, type.measure = 'mse')
plot(lasso_A)
fit_A <- glmnet(covariates[set_A,], Y_star[set_A] ,lambda = lasso_A$lambda.min)
coef(fit_A)
# Extrapolate to sample B
het_B <- predict(fit_A, covariates)
## Predict Effect Heterogeneity
lasso_B <- cv.glmnet(covariates[set_B,], Y_star[set_B],
alpha=p, type.measure = 'mse')
plot(lasso_B)
fit_B <- glmnet(covariates[set_B,], Y_star[set_B],lambda = lasso_B$lambda.min)
coef(fit_B)
# Extrapolate to sample B
het_A <- predict(fit_B, covariates)
het_dml <- matrix(NA, nrow = nrow(df),ncol =1)
het_dml[set_A] <- het_A[set_A]
het_dml[set_B] <- het_B[set_B]
# Kernel Density Plot
d_dml <- density(het_dml)
plot(d_dml)
##################
## Post DML-OLS ##
##################
# Multivariate OLS
ols <- lm(Y_star ~ ., data = df[,-c(1,2,3,6,11)])
summary(ols)
# Robust standard errors
coeftest(ols, vcov = vcovHC(ols, type = "HC1"))
###################
## Causal Forest ##
###################
set.seed(1234567)
cf <- causal_forest(covariates, earnings, treat)
het_cf <- predict(cf,estimate.variance = TRUE)
# Kernel Density Plot
d_cf <- density(het_cf$predictions)
plot(d_cf)
cor(het_dml,het_cf$predictions)
## Inference
# t-Statistics
t_stat <- as.matrix(het_cf$predictions)/ as.matrix(sqrt(het_cf$variance.estimates))
sig_pos <- (t_stat>=1.96)== TRUE
sig_neg <- (t_stat<=-1.96)== TRUE
insig <- (abs(t_stat) <1.96)== TRUE
print(paste0("Share with positive effects: ", round(mean(sig_pos), digits=4)))
print(paste0("Share with negative effects: ", round(mean(sig_neg), digits=4)))
print(paste0("Share with insignificant effects: ", round(mean(insig), digits=4)))
|
library(ggplot2)
library(tmap) # for static and interactive maps
library(leaflet) # for interactive maps
library(mapview) # for interactive maps
library(ggiraph)
library(sf)
library(geojsonR)
library(sp)
library(geojsonio)
library(ggmap)
library(dplyr)
library(shiny)
library(shinydashboard)
library(rsconnect)
gentrification.df <- read.csv(file.choose(new = FALSE))
#Read in JSON file that includes the polygonal shapes of the neighborhoods
NYC.js <- geojson_read(file.choose(new = FALSE),what="sp")
#Test if it prints the outlines
colnames(NYC.js@data)[colnames(NYC.js@data)=="ntacode"] <- "GeoID"
NYC.js@data$id <- seq(0,194,by=1)
NYC.js@data <- merge(NYC.js@data,gentrification.df,by="GeoID",all=TRUE)
#add an id column for merging purposes
#Fortify and merge the two data sets
NYC.js.f <- fortify(NYC.js)
NYC.js.f <- merge(NYC.js.f,NYC.js@data,by="id",all=TRUE)
#GOOGLE Maps requires API key
register_google(key = "AIzaSyCB3s03JVUmb_cpbhWrt_BF52hWwxnHXT8", write = TRUE)
#Use data to layer outline over real NYC map (ggmap). Using GFactor2010 as fill
#2010
Outline10 <- geom_polygon_interactive(aes(long,lat,group=group,fill=GFactor2010.x,tooltip=sprintf("%s<br>%s",GeogName.x,GFactor2010.x)),data=NYC.js.f,colour = "black")
mapImage <- ggmap(get_googlemap(c(lon=-74.0060,lat=40.7128),scale=1,zoom=10),fullpage=TRUE,extent = "panel")
NYCMAP2010 <- mapImage+Outline10+labs(title = 'Gentrification by Neighborhood',subtitle = 'Based on 2010 Census Data')
Outline16 <- geom_polygon_interactive(aes(long,lat,group=group,fill=GFactor2016.x,tooltip=sprintf("%s<br>%s",GeogName.x,GFactor2016.x)),data=NYC.js.f,colour = "black")
mapImage <- ggmap(get_googlemap(c(lon=-74.0060,lat=40.7128),scale=1,zoom=10),fullpage=TRUE,extent = "panel")
NYCMAP2016 <- mapImage+Outline16+labs(title = 'Gentrification by Neighborhood',subtitle = 'Based on 2016 Census Data')
OutlineChange <- geom_polygon_interactive(aes(long,lat,group=group,fill=GFactorChange.x,tooltip=sprintf("%s<br>%s",GeogName.x,GFactorChange.x)),data=NYC.js.f,colour = "black")
mapImage <- ggmap(get_googlemap(c(lon=-74.0060,lat=40.7128),scale=1,zoom=10),fullpage=TRUE,extent = "panel")
NYCMAPCHANGE <- mapImage+OutlineChange+scale_fill_gradient(low = "#000000", high = "#ffff00")+labs(title = 'Gentrification by Neighborhood',subtitle = 'Based on 2010-16 Census Data')
server <- function(input, output) {
output$plot2 <- renderggiraph({
ggiraph(code=print(NYCMAP2010))
})
output$plot3 <- renderggiraph({
ggiraph(code=print(NYCMAP2016))
})
output$plot4 <- renderggiraph({
ggiraph(code=print(NYCMAPCHANGE))
})
output$plot5 <- renderggiraph({
ggiraph(code=print(mapImage+geom_polygon_interactive(aes(long,lat,group=group,fill=NYC.js.f[,input$variable],tooltip=sprintf("%s<br>%s",NYC.js.f$GeogName,NYC.js.f[,input$variable])),data=NYC.js.f,colour = "black")+scale_fill_gradient(low = "#ffffff", high = "#FF0000")))
})
}
| /Gentrification Data/Gentrification App/server.R | no_license | aranansari/Gentrification | R | false | false | 2,945 | r | library(ggplot2)
library(tmap) # for static and interactive maps
library(leaflet) # for interactive maps
library(mapview) # for interactive maps
library(ggiraph)
library(sf)
library(geojsonR)
library(sp)
library(geojsonio)
library(ggmap)
library(dplyr)
library(shiny)
library(shinydashboard)
library(rsconnect)
gentrification.df <- read.csv(file.choose(new = FALSE))
#Read in JSON file that includes the polygonal shapes of the neighborhoods
NYC.js <- geojson_read(file.choose(new = FALSE),what="sp")
#Test if it prints the outlines
colnames(NYC.js@data)[colnames(NYC.js@data)=="ntacode"] <- "GeoID"
NYC.js@data$id <- seq(0,194,by=1)
NYC.js@data <- merge(NYC.js@data,gentrification.df,by="GeoID",all=TRUE)
#add an id column for merging purposes
#Fortify and merge the two data sets
NYC.js.f <- fortify(NYC.js)
NYC.js.f <- merge(NYC.js.f,NYC.js@data,by="id",all=TRUE)
#GOOGLE Maps requires API key
register_google(key = "AIzaSyCB3s03JVUmb_cpbhWrt_BF52hWwxnHXT8", write = TRUE)
#Use data to layer outline over real NYC map (ggmap). Using GFactor2010 as fill
#2010
Outline10 <- geom_polygon_interactive(aes(long,lat,group=group,fill=GFactor2010.x,tooltip=sprintf("%s<br>%s",GeogName.x,GFactor2010.x)),data=NYC.js.f,colour = "black")
mapImage <- ggmap(get_googlemap(c(lon=-74.0060,lat=40.7128),scale=1,zoom=10),fullpage=TRUE,extent = "panel")
NYCMAP2010 <- mapImage+Outline10+labs(title = 'Gentrification by Neighborhood',subtitle = 'Based on 2010 Census Data')
Outline16 <- geom_polygon_interactive(aes(long,lat,group=group,fill=GFactor2016.x,tooltip=sprintf("%s<br>%s",GeogName.x,GFactor2016.x)),data=NYC.js.f,colour = "black")
mapImage <- ggmap(get_googlemap(c(lon=-74.0060,lat=40.7128),scale=1,zoom=10),fullpage=TRUE,extent = "panel")
NYCMAP2016 <- mapImage+Outline16+labs(title = 'Gentrification by Neighborhood',subtitle = 'Based on 2016 Census Data')
OutlineChange <- geom_polygon_interactive(aes(long,lat,group=group,fill=GFactorChange.x,tooltip=sprintf("%s<br>%s",GeogName.x,GFactorChange.x)),data=NYC.js.f,colour = "black")
mapImage <- ggmap(get_googlemap(c(lon=-74.0060,lat=40.7128),scale=1,zoom=10),fullpage=TRUE,extent = "panel")
NYCMAPCHANGE <- mapImage+OutlineChange+scale_fill_gradient(low = "#000000", high = "#ffff00")+labs(title = 'Gentrification by Neighborhood',subtitle = 'Based on 2010-16 Census Data')
server <- function(input, output) {
output$plot2 <- renderggiraph({
ggiraph(code=print(NYCMAP2010))
})
output$plot3 <- renderggiraph({
ggiraph(code=print(NYCMAP2016))
})
output$plot4 <- renderggiraph({
ggiraph(code=print(NYCMAPCHANGE))
})
output$plot5 <- renderggiraph({
ggiraph(code=print(mapImage+geom_polygon_interactive(aes(long,lat,group=group,fill=NYC.js.f[,input$variable],tooltip=sprintf("%s<br>%s",NYC.js.f$GeogName,NYC.js.f[,input$variable])),data=NYC.js.f,colour = "black")+scale_fill_gradient(low = "#ffffff", high = "#FF0000")))
})
}
|
rm(list = ls())
source("lib/helpers.R")
# Cargar los datos --------------------------------------------------------
cargar_paquetes("wooldridge")
data("wage2")
?wage2
head(wage2)
wage_db <- wage2[1:500,] %>%
mutate(pareduc = meduc + feduc)
# Base de datos con datos de salarios mensuales de 935 individuos en 1980
# Las variables que utilizaremos son:
# wage: Salario mensual
# educ: Años de educación
# exper: Años de experiencia trabajando
# tenure: Permanencia en el trabajo actuan (en años)
# meduc: Nivel de educación de la madree
# feduc: Nivel de educación del padre
# pareduc: Suma de meduc y feduc
# married: 1 si casado, 0 si no
# black: 1 si es de raza negra, 0 si no
# south: 1 si vive en el sur
# urban: 2 si vive en zona metropolitana, 0 si no
# age: edad del individuo
# Modelos candidatos ------------------------------------------------------
# Probaremos distintas regresiones propuestas en Wooldridge(2016)
# "Introductory Econometrics: A modern approach" para modelar el salario
# Modelo 1: salario explicado por nivel de eduación, experiencia y permanencia
# en trabajo actual
mod_1 <- lm(log(wage)~educ+exper+tenure, data = wage_db)
summary(mod_1)
# Modelo 2: salario explicado por educación, educación de los padres (interactuando
# con educación del individuo), experiencia y permanencia en el trabajo actual
mod_2 <- lm(log(wage)~educ+educ*pareduc+exper+tenure, data = wage_db)
summary(mod_2)
# Modelo 3: salario explicado por educación y experiencia en forma cuadrática
# (para modelar efecto decreciente del aumento de experiencia)
mod_3 <- lm(log(wage)~educ+exper+exper^2, data = wage_db)
summary(mod_3)
# Modelo 4: salario explicado por educación, experiencia, interacción entre ambas
# y edad
mod_4 <- lm(log(wage)~educ+exper+educ*exper+age+age^2, data = wage2)
summary(mod_4)
# Modelo 5: Modelo 3 agregando estado civil
mod_5 <- lm(log(wage)~educ+exper+exper^2+married,data = wage_db)
summary(mod_5)
# Modelo 6: Modelo 1 agregando estado civil, raza, si vive en el sur y
# si vive en zona metropolitana
mod_6 <- lm(log(wage)~educ+exper+tenure+married+black+south+urban, data = wage_db)
summary(mod_6)
# Estimar m gorro para cada modelo candidato ------------------------------
# De la ecuación (37) del artículo de Wasserman:
fun_log_m_hat <- function(model){
n <- nobs(model)
sigma_hat <- sqrt(sum(residuals(model)^2)/n)
log_m_hat <- -n*log(sigma_hat)-(n/2)*log(2*pi)-(n/2)
return(log_m_hat)
}
models_list <- list(mod_1, mod_2, mod_3, mod_4, mod_5, mod_6)
names(models_list) <- paste("mod_", 1:6, sep = "")
# Vector de log m gorros para cada modelo (mantener logs para evitar problemas
# numéricos)
log_m_hat <- sapply(models_list, fun_log_m_hat)
# Probabilidades posteriores de los modelos
(probs_post <- exp(log_m_hat-log(sum(exp(log_m_hat)))))
# Comparar conclusiones con AIC y BIC
sapply(models_list, AIC) # Elegir el de menor AIC
sapply(models_list, BIC) # Elegir el de menor BIC
| /src/04_ejemplo_regresion_selecc_de_modelos.R | no_license | rm-avila/metodos_mc_para_estadistica | R | false | false | 2,972 | r | rm(list = ls())
source("lib/helpers.R")
# Cargar los datos --------------------------------------------------------
cargar_paquetes("wooldridge")
data("wage2")
?wage2
head(wage2)
wage_db <- wage2[1:500,] %>%
mutate(pareduc = meduc + feduc)
# Base de datos con datos de salarios mensuales de 935 individuos en 1980
# Las variables que utilizaremos son:
# wage: Salario mensual
# educ: Años de educación
# exper: Años de experiencia trabajando
# tenure: Permanencia en el trabajo actuan (en años)
# meduc: Nivel de educación de la madree
# feduc: Nivel de educación del padre
# pareduc: Suma de meduc y feduc
# married: 1 si casado, 0 si no
# black: 1 si es de raza negra, 0 si no
# south: 1 si vive en el sur
# urban: 2 si vive en zona metropolitana, 0 si no
# age: edad del individuo
# Modelos candidatos ------------------------------------------------------
# Probaremos distintas regresiones propuestas en Wooldridge(2016)
# "Introductory Econometrics: A modern approach" para modelar el salario
# Modelo 1: salario explicado por nivel de eduación, experiencia y permanencia
# en trabajo actual
mod_1 <- lm(log(wage)~educ+exper+tenure, data = wage_db)
summary(mod_1)
# Modelo 2: salario explicado por educación, educación de los padres (interactuando
# con educación del individuo), experiencia y permanencia en el trabajo actual
mod_2 <- lm(log(wage)~educ+educ*pareduc+exper+tenure, data = wage_db)
summary(mod_2)
# Modelo 3: salario explicado por educación y experiencia en forma cuadrática
# (para modelar efecto decreciente del aumento de experiencia)
mod_3 <- lm(log(wage)~educ+exper+exper^2, data = wage_db)
summary(mod_3)
# Modelo 4: salario explicado por educación, experiencia, interacción entre ambas
# y edad
mod_4 <- lm(log(wage)~educ+exper+educ*exper+age+age^2, data = wage2)
summary(mod_4)
# Modelo 5: Modelo 3 agregando estado civil
mod_5 <- lm(log(wage)~educ+exper+exper^2+married,data = wage_db)
summary(mod_5)
# Modelo 6: Modelo 1 agregando estado civil, raza, si vive en el sur y
# si vive en zona metropolitana
mod_6 <- lm(log(wage)~educ+exper+tenure+married+black+south+urban, data = wage_db)
summary(mod_6)
# Estimar m gorro para cada modelo candidato ------------------------------
# De la ecuación (37) del artículo de Wasserman:
fun_log_m_hat <- function(model){
n <- nobs(model)
sigma_hat <- sqrt(sum(residuals(model)^2)/n)
log_m_hat <- -n*log(sigma_hat)-(n/2)*log(2*pi)-(n/2)
return(log_m_hat)
}
models_list <- list(mod_1, mod_2, mod_3, mod_4, mod_5, mod_6)
names(models_list) <- paste("mod_", 1:6, sep = "")
# Vector de log m gorros para cada modelo (mantener logs para evitar problemas
# numéricos)
log_m_hat <- sapply(models_list, fun_log_m_hat)
# Probabilidades posteriores de los modelos
(probs_post <- exp(log_m_hat-log(sum(exp(log_m_hat)))))
# Comparar conclusiones con AIC y BIC
sapply(models_list, AIC) # Elegir el de menor AIC
sapply(models_list, BIC) # Elegir el de menor BIC
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hello.R
\docType{package}
\name{wordcountaddin}
\alias{wordcountaddin}
\title{wordcountaddin}
\description{
This packages is an addin for RStudio that will count the words and characters in a plain text document. It is designed for use with R markdown documents and will exclude YAML header content, code chunks and inline code from the counts. It also computes readability statistics so you can get an idea of how easy or difficult your text is to read.
}
| /man/wordcountaddin.Rd | permissive | benmarwick/wordcountaddin | R | false | true | 535 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hello.R
\docType{package}
\name{wordcountaddin}
\alias{wordcountaddin}
\title{wordcountaddin}
\description{
This packages is an addin for RStudio that will count the words and characters in a plain text document. It is designed for use with R markdown documents and will exclude YAML header content, code chunks and inline code from the counts. It also computes readability statistics so you can get an idea of how easy or difficult your text is to read.
}
|
library(dplyr)
## Import linelist and get the incidence curve from it.
linelist <- get(load("data/CaseCounts/drc/linelist_29082018.RData"))
linelist$date_onset_new <- as.Date(linelist$date_onset_new)
## Fix name difference
linelist$location_province <-
stringr::str_replace_all(linelist$location_province,
"NORD- KIVU",
"NORD-KIVU")
incid_all <- incidence::incidence(linelist$date_onset_new,
groups = linelist$location_province) %>%
as.data.frame()
readr::write_csv(x = incid_all,
path = here::here("data/CaseCounts/drc",
"incid_drc_04052018.csv"))
## Extract centroids for the places of interest
centroids <- here::here("data/Geography/centroids/processed",
"adm0_centroids_fixed.tsv") %>%
readr::read_tsv()
## Countries that share a border with Liberia, Guinea or Sierra
wafrica <- c("Liberia",
"Guinea",
"Sierra Leone",
"Guinea-Bissau",
"Gambia",
"Senegal",
"Mali",
"Côte d'Ivoire"
)
filter(centroids, ADM0 %in% wafrica) %>%
readr::write_csv(path = here::here("data/Geography/centroids/processed",
"wafrica_adm0_centroids.csv"))
## First create the relative risk profile using gravity model alone
## Then use the epicurve to weight the profiles. This step requires
## SI mean and sd.
## Then create the map
## params <- list(from = "ituri",
## alpha = 2.01,
## rho = 72.96,
## tau = 1.12)
## 05092018 Alternative parameter values
## params <- list(from = "ituri",
## alpha = 1.91,
## rho = 88.23,
## tau = 1.22)
## 05092018 Parameters from model fitted to Zambia
## params <- list(from = "ituri",
## alpha = 1.70,
## rho = 38.47,
## tau = 0.91)
## 05092018 Parameters from model fitted to Tanzania
## Table 1
## params <- list(from = "ituri",
## alpha = 3.62,
## rho = 365.0375,
## tau = 0.86)
## 07092018 Reformatted report to make it more parameterised.
sources <- c("Guinea", "Liberia", "Sierra Leone")
## Can't call this object params else running render in a loop won't
## work.
params2 <- list(model = "gravity_alt",
modelpars = list(alpha = 2.01,
rho = 72.96,
tau = 1.12),
centroids = "data/Geography/centroids/processed/wafrica_adm0_centroids.csv"
)
purrr::map(sources, function(x) {
params2$from <- x
rmarkdown::render(here::here("reports/relative_risk.Rmd"),
params = params2)
})
outfile_suffix <- paste(sapply(params2$modelpars, paste, collapse=""),
collapse = "_")
outfiles <- paste0("output/flow_from_",
sources,
"_",
outfile_suffix,
".csv")
names(outfiles) <- sources
## Now we are ready to determine importation risk.
wtd_risk_out <- paste0("output/wtd_rel_risk_", outfile_suffix, ".csv")
params_imptn <- list(sources = sources,
cases = "data/CaseCounts/processed/HealthMap_Ebola_wide.csv",
risk = outfiles,
simean = 15.3,
sisd = 9.1,
R = 1.03,
onday = 200,
outfile = wtd_risk_out)
rm(params)
rmarkdown::render(here::here("reports/importation_risk.Rmd"),
params = params_imptn)
## The quotes/spaces are converted to periods.
## Sort out manually for now and then sort out later.
wtd_risk <- readr::read_csv(here::here("output/wtd_rel_risk_2.01_72.96_1.12.csv"))
idx <- which(wtd_risk$flow_to == "Côte.d.Ivoire")
wtd_risk$flow_to[idx] <- "Côte d'Ivoire"
idx <- which(wtd_risk$flow_to == "Sierra.Leone")
wtd_risk$flow_to[idx] <- "Sierra Leone"
idx <- which(wtd_risk$flow_to == "Guinea.Bissau")
wtd_risk$flow_to[idx] <- "Guinea-Bissau"
readr::write_csv(x = wtd_risk,
path = (here::here("output/wtd_rel_risk_2.01_72.96_1.12.csv")))
| /reports/map_pipeline.R | no_license | annecori/mRIIDSprocessData | R | false | false | 4,309 | r | library(dplyr)
## Import linelist and get the incidence curve from it.
linelist <- get(load("data/CaseCounts/drc/linelist_29082018.RData"))
linelist$date_onset_new <- as.Date(linelist$date_onset_new)
## Fix name difference
linelist$location_province <-
stringr::str_replace_all(linelist$location_province,
"NORD- KIVU",
"NORD-KIVU")
incid_all <- incidence::incidence(linelist$date_onset_new,
groups = linelist$location_province) %>%
as.data.frame()
readr::write_csv(x = incid_all,
path = here::here("data/CaseCounts/drc",
"incid_drc_04052018.csv"))
## Extract centroids for the places of interest
centroids <- here::here("data/Geography/centroids/processed",
"adm0_centroids_fixed.tsv") %>%
readr::read_tsv()
## Countries that share a border with Liberia, Guinea or Sierra
wafrica <- c("Liberia",
"Guinea",
"Sierra Leone",
"Guinea-Bissau",
"Gambia",
"Senegal",
"Mali",
"Côte d'Ivoire"
)
filter(centroids, ADM0 %in% wafrica) %>%
readr::write_csv(path = here::here("data/Geography/centroids/processed",
"wafrica_adm0_centroids.csv"))
## First create the relative risk profile using gravity model alone
## Then use the epicurve to weight the profiles. This step requires
## SI mean and sd.
## Then create the map
## params <- list(from = "ituri",
## alpha = 2.01,
## rho = 72.96,
## tau = 1.12)
## 05092018 Alternative parameter values
## params <- list(from = "ituri",
## alpha = 1.91,
## rho = 88.23,
## tau = 1.22)
## 05092018 Parameters from model fitted to Zambia
## params <- list(from = "ituri",
## alpha = 1.70,
## rho = 38.47,
## tau = 0.91)
## 05092018 Parameters from model fitted to Tanzania
## Table 1
## params <- list(from = "ituri",
## alpha = 3.62,
## rho = 365.0375,
## tau = 0.86)
## 07092018 Reformatted report to make it more parameterised.
sources <- c("Guinea", "Liberia", "Sierra Leone")
## Can't call this object params else running render in a loop won't
## work.
params2 <- list(model = "gravity_alt",
modelpars = list(alpha = 2.01,
rho = 72.96,
tau = 1.12),
centroids = "data/Geography/centroids/processed/wafrica_adm0_centroids.csv"
)
purrr::map(sources, function(x) {
params2$from <- x
rmarkdown::render(here::here("reports/relative_risk.Rmd"),
params = params2)
})
outfile_suffix <- paste(sapply(params2$modelpars, paste, collapse=""),
collapse = "_")
outfiles <- paste0("output/flow_from_",
sources,
"_",
outfile_suffix,
".csv")
names(outfiles) <- sources
## Now we are ready to determine importation risk.
wtd_risk_out <- paste0("output/wtd_rel_risk_", outfile_suffix, ".csv")
params_imptn <- list(sources = sources,
cases = "data/CaseCounts/processed/HealthMap_Ebola_wide.csv",
risk = outfiles,
simean = 15.3,
sisd = 9.1,
R = 1.03,
onday = 200,
outfile = wtd_risk_out)
rm(params)
rmarkdown::render(here::here("reports/importation_risk.Rmd"),
params = params_imptn)
## The quotes/spaces are converted to periods.
## Sort out manually for now and then sort out later.
wtd_risk <- readr::read_csv(here::here("output/wtd_rel_risk_2.01_72.96_1.12.csv"))
idx <- which(wtd_risk$flow_to == "Côte.d.Ivoire")
wtd_risk$flow_to[idx] <- "Côte d'Ivoire"
idx <- which(wtd_risk$flow_to == "Sierra.Leone")
wtd_risk$flow_to[idx] <- "Sierra Leone"
idx <- which(wtd_risk$flow_to == "Guinea.Bissau")
wtd_risk$flow_to[idx] <- "Guinea-Bissau"
readr::write_csv(x = wtd_risk,
path = (here::here("output/wtd_rel_risk_2.01_72.96_1.12.csv")))
|
# Clear workspace
rm(list = ls())
# Setup
################################################################################
# Packages
library(TMB)
library(devtools)
library(freeR)
library(tidyverse)
# devtools::install_github("kaskr/TMB_contrib_R/TMBhelper")
# Directories
tmbdir <- "code/tmb_code"
datadir <- "data/ramldb/data/processed"
codedir <- "code"
outputdir <- "output"
# Read data
data <- readRDS(file.path(datadir, "RAM_NE_data_w_sst_trimmed_prepped.Rds")) %>%
rename(tb_sd=tb_scaled, sp_sd=sp_scaled)
# Helper functions
source(file.path(codedir, "helper_functions.R"))
# Function to fit model
################################################################################
# Fit surplus production model
data <- data; p <- 1
fit_sp <- function(data, p){
# 1. Format data
######################################
# Parameters
stocks <- unique(data$stockid)
nstocks <- length(stocks)
# 2. Fit production model
######################################
# Compile TMB code
# Only run once to compile code
origdir <- getwd()
setwd(tmbdir)
if(FALSE){
dyn.unload(paste(tmbdir, dynlib("pella"), sep="/"))
file.remove(paste(tmbdir, c("pella.o", "pella.dll"), sep="/"))
compile("pella.cpp")
}
# Load TMB code
dyn.load(dynlib("pella"))
# Input data and parameter starting values
params <- list(ln_B0=rep(1.5, nstocks),
ln_r=rep(log(0.4), nstocks),
ln_sigmaP=rep(-2.5, nstocks)) # -3 before, -1.25 based on model fits
input.data <- list(Nstocks=nstocks,
Nobs=nrow(data),
p=p,
StockID=as.factor(data$stockid),
B_t=data$tb_sd,
P_t=data$sp_sd)
# Initialization
model <- MakeADFun(data=input.data, parameters=params, DLL="pella")
# model$control <- list(trace=1, parscale=rep(1,13), REPORT=1, reltol=1e-12, maxit=100)
# model$hessian <- F
# newtonOption(model, smartsearch=TRUE)
# Run model
output <- TMBhelper::fit_tmb(obj=model, lower=-Inf, upper=Inf, loopnum=3, newtonsteps=3, bias.correct=FALSE, getsd=FALSE)
# 3. Check fit
######################################
# Use hessian to diagnose fixed effects that might cause a problem
hess <- optimHess(par=output$par, fn=model$fn, gr=model$gr)
problem.vals <- which(eigen(hess)$values<0)
if(length(problem.vals)>0 ){
display <- eigen(hess)$vectors[,problem.vals]
names(display) = (output$diagnostics$Param)
cbind(1:length(output$par), output$par, display)
}
# Calculate SD
sd <- try(sdreport(model, hessian.fixed=hess))
# AIC of model
TMBhelper::TMBAIC(output)
# 4. Export model fit
######################################
# Outfile name
outfile <- paste0("pella_", "", format(p, nsmall=2), "p.Rdata")
# Export model objects
setwd(origdir)
save(data, stocks, nstocks,
input.data, params,
model, output, sd, hess, #results,
file=paste(outputdir, outfile, sep="/"))
}
# Fit models
################################################################################
# Fit surplus production models
fit_sp(data, p=1) # 50%
fit_sp(data, p=0.55) # 45%
fit_sp(data, p=0.20) # 40%
fit_sp(data, p=0.01) # 37%
| /code/Step1_fit_pella.R | no_license | cfree14/ne_fishing_portfolio | R | false | false | 3,269 | r |
# Clear workspace
rm(list = ls())
# Setup
################################################################################
# Packages
library(TMB)
library(devtools)
library(freeR)
library(tidyverse)
# devtools::install_github("kaskr/TMB_contrib_R/TMBhelper")
# Directories
tmbdir <- "code/tmb_code"
datadir <- "data/ramldb/data/processed"
codedir <- "code"
outputdir <- "output"
# Read data
data <- readRDS(file.path(datadir, "RAM_NE_data_w_sst_trimmed_prepped.Rds")) %>%
rename(tb_sd=tb_scaled, sp_sd=sp_scaled)
# Helper functions
source(file.path(codedir, "helper_functions.R"))
# Function to fit model
################################################################################
# Fit surplus production model
data <- data; p <- 1
fit_sp <- function(data, p){
# 1. Format data
######################################
# Parameters
stocks <- unique(data$stockid)
nstocks <- length(stocks)
# 2. Fit production model
######################################
# Compile TMB code
# Only run once to compile code
origdir <- getwd()
setwd(tmbdir)
if(FALSE){
dyn.unload(paste(tmbdir, dynlib("pella"), sep="/"))
file.remove(paste(tmbdir, c("pella.o", "pella.dll"), sep="/"))
compile("pella.cpp")
}
# Load TMB code
dyn.load(dynlib("pella"))
# Input data and parameter starting values
params <- list(ln_B0=rep(1.5, nstocks),
ln_r=rep(log(0.4), nstocks),
ln_sigmaP=rep(-2.5, nstocks)) # -3 before, -1.25 based on model fits
input.data <- list(Nstocks=nstocks,
Nobs=nrow(data),
p=p,
StockID=as.factor(data$stockid),
B_t=data$tb_sd,
P_t=data$sp_sd)
# Initialization
model <- MakeADFun(data=input.data, parameters=params, DLL="pella")
# model$control <- list(trace=1, parscale=rep(1,13), REPORT=1, reltol=1e-12, maxit=100)
# model$hessian <- F
# newtonOption(model, smartsearch=TRUE)
# Run model
output <- TMBhelper::fit_tmb(obj=model, lower=-Inf, upper=Inf, loopnum=3, newtonsteps=3, bias.correct=FALSE, getsd=FALSE)
# 3. Check fit
######################################
# Use hessian to diagnose fixed effects that might cause a problem
hess <- optimHess(par=output$par, fn=model$fn, gr=model$gr)
problem.vals <- which(eigen(hess)$values<0)
if(length(problem.vals)>0 ){
display <- eigen(hess)$vectors[,problem.vals]
names(display) = (output$diagnostics$Param)
cbind(1:length(output$par), output$par, display)
}
# Calculate SD
sd <- try(sdreport(model, hessian.fixed=hess))
# AIC of model
TMBhelper::TMBAIC(output)
# 4. Export model fit
######################################
# Outfile name
outfile <- paste0("pella_", "", format(p, nsmall=2), "p.Rdata")
# Export model objects
setwd(origdir)
save(data, stocks, nstocks,
input.data, params,
model, output, sd, hess, #results,
file=paste(outputdir, outfile, sep="/"))
}
# Fit models
################################################################################
# Fit surplus production models
fit_sp(data, p=1) # 50%
fit_sp(data, p=0.55) # 45%
fit_sp(data, p=0.20) # 40%
fit_sp(data, p=0.01) # 37%
|
net_align <- function(netfileA, netfileB,simfile, alpha=1,beta=2,delta.d=1e-10,output="result.txt")
{ a <- Sys.time()
combined_net <- read_net(netfileA,netfileB,simfile)
b<- Sys.time()
print(b-a)
crf <-build_model(combined_net,alpha,beta,delta.d)
c <- Sys.time()
print(c-b)
gc()
result <- decode.lbp(crf,max.iter=1000,verbose=1)
d <- Sys.time()
result <- crf$state.map[cbind(1:crf$n.nodes, result)]
print(d-c)
write_result(combined_net, result, output)
}
read_net <- function(netfileA,netfileB,simfile)
{
net.textA <- as.matrix(read.table(netfileA, fill=T, as.is=T))
net.textB <- as.matrix(read.table(netfileB, fill=T, as.is=T))
sim.text <- as.matrix(read.table(simfile, fill=T, as.is=T))
net.nodeA <- unique(as.character(net.textA))
net.nodeA <- net.nodeA[net.nodeA != ""]
net.sizeA <- length(net.nodeA)
net.nodeA <- paste("A_",sep="",net.nodeA)
net.nodeB <- unique(as.character(net.textB))
net.sizeB <- length(net.nodeB)
net.nodeB <-paste("B_",sep="",net.nodeB)
net.nodeB <- net.nodeB[net.nodeB != ""]
sim.node <- unique(c(paste("A_",sep="",as.character(sim.text[,1])),paste("B_",sep="",as.character(sim.text[,2]))))
sim.node <- sim.node[sim.node !=""]
net.node <- unique(c(net.nodeA,net.nodeB))
net.sizeAB <- length(net.node)
net.node <- unique(c(net.node, sim.node))
net.size <- length(net.node)
net.edgeA <- cbind(paste("A_",sep="",as.character(net.textA[,1])), paste("A_",sep="",as.character(net.textA[,2])))
net.edgeB <- cbind(paste("B_",sep="",as.character(net.textB[,1])), paste("B_",sep="",as.character(net.textB[,2])))
net.edgeA <- net.edgeA[net.edgeA[,2] != "A_", ]
net.edgeB <- net.edgeB[net.edgeB[,2] != "B_", ]
sim.edge <- cbind(paste("A_",sep="",as.character(sim.text[,1])), paste("B_",sep="",as.character(sim.text[,2])))
sim.edgerev <- cbind(paste("B_",sep="",as.character(sim.text[,2])), paste("A_",sep="",as.character(sim.text[,1])))
sim.edge <- rbind(sim.edge,sim.edgerev)
net.edge <- rbind(net.edgeA,sim.edge,net.edgeB)
node.id <- seq_along(net.node)
names(node.id) <- net.node
net.matrixA <- sparseMatrix(node.id[net.edgeA[,1]], node.id[net.edgeA[,2]], x=T, dims=c(net.size+1, net.size+1))
net.matrixB <- sparseMatrix(node.id[net.edgeB[,1]], node.id[net.edgeB[,2]], x=T, dims=c(net.size+1, net.size+1))
sim.matrix <- sparseMatrix(node.id[sim.edge[,1]], node.id[sim.edge[,2]], x=as.numeric(sim.text[,3]), dims=c(net.size, net.size))
net.matrix <- sparseMatrix(node.id[net.edge[,1]], node.id[net.edge[,2]], x=T, dims=c(net.size, net.size))
sim.edge <- cbind(node.id[sim.edge[,1]],node.id[sim.edge[,2]],as.numeric(sim.text[,3]))
list(node_sim = sim.matrix,sizeA=net.sizeA,node=net.node, matrix=net.matrix,size = net.size,
matrixA = net.matrixA,matrixB = net.matrixB,sim_edge = sim.edge,sizeAB = net.sizeAB)
}
build_model <- function(combined_net,alpha,beta,delta.d)
{
S <- cbind(combined_net$node_sim,delta.d)
crf <- make.crf(combined_net$matrix, rowSums(S>0))
crf$state.map <- matrix(0, nrow=crf$n.nodes, ncol=crf$max.state)
edges <- combined_net$sim_edge
state_cnt_vec <- vector('integer',crf$n.nodes)
len <- dim(edges)[1]
for( edge in 1:len){
i <- edges[edge,1]
j <- edges[edge,2]
x <- edges[edge,3]
state_cnt_vec[i] <- state_cnt_vec[i] + 1
crf$state.map[i,state_cnt_vec[i]] <- j
crf$node.pot[i,state_cnt_vec[i]] <- exp(x*alpha/2)
}
for(i in 1:crf$n.nodes){
state_cnt_vec[i] <- state_cnt_vec[i] + 1
crf$state.map[i,state_cnt_vec[i]] <- combined_net$size+1
crf$node.pot[i,state_cnt_vec[i]] <- 1
}
A.size <- combined_net$sizeA
AB.size <- combined_net$sizeAB
W1 <- combined_net$matrixA
W2 <- combined_net$matrixB
for (e in 1:crf$n.edges)
{
n1 <- crf$edges[e, 1]
n2 <- crf$edges[e, 2]
m1 <- 1:crf$n.states[n1]
m2 <- 1:crf$n.states[n2]
if(n1<=A.size && n2 <=A.size && n1 <=AB.size && n2 <=AB.size){
W <- submatrix(W2,crf$state.map[n1, m1],crf$state.map[n2, m2])
crf$edge.pot[[e]] <- exp(W*beta/2)
}
else if(n1 > A.size && n2 > A.size && n1 <=AB.size && n2 <=AB.size){
W <- submatrix(W1,crf$state.map[n1, m1],crf$state.map[n2, m2])
crf$edge.pot[[e]] <- exp(W*beta/2)
}
else{
G <- matrix(1,nrow=length(m1),ncol = length(m2))
m11 <- which(crf$state.map[n1,]==n2)
G[m11,m2]<-0
m22 <- which(crf$state.map[n2,]==n1)
G[m1,m22]<-0
G[m11,m22] <-1
crf$edge.pot[[e]]<-G
}
}
crf
}
write_result <- function(combined_net, result, filename="result.txt")
{
con <- file(as.character(filename), "w")
label.name <- c(combined_net$node, "gap")
# cnt <- 0
for (i in 1:combined_net$sizeA)
{ #if (result[i] <= combined_net$size){
#cnt <- cnt+1
#}
writeLines(paste(combined_net$node[i], " ", label.name[result[i]]), con, sep="\n")
}
writeLines("", con, sep="\n")
#print(cnt)
close(con)
}
column <- function(m, i)
{
if (inherits(m, "CsparseMatrix")) {
v <- vector(typeof(m@x), m@Dim[1])
p <- (m@p[i]+1):m@p[i+1]
if (p[1] <= p[length(p)])
v[m@i[p]+1] <- m@x[p]
}
else
v <- m[,i]
v
}
submatrix <- function(m, rows, cols)
{
sapply(cols, function(i) column(m, i)[rows])
}
| /net_align.R | no_license | rtchen/Corbi | R | false | false | 5,212 | r | net_align <- function(netfileA, netfileB,simfile, alpha=1,beta=2,delta.d=1e-10,output="result.txt")
{ a <- Sys.time()
combined_net <- read_net(netfileA,netfileB,simfile)
b<- Sys.time()
print(b-a)
crf <-build_model(combined_net,alpha,beta,delta.d)
c <- Sys.time()
print(c-b)
gc()
result <- decode.lbp(crf,max.iter=1000,verbose=1)
d <- Sys.time()
result <- crf$state.map[cbind(1:crf$n.nodes, result)]
print(d-c)
write_result(combined_net, result, output)
}
read_net <- function(netfileA,netfileB,simfile)
{
net.textA <- as.matrix(read.table(netfileA, fill=T, as.is=T))
net.textB <- as.matrix(read.table(netfileB, fill=T, as.is=T))
sim.text <- as.matrix(read.table(simfile, fill=T, as.is=T))
net.nodeA <- unique(as.character(net.textA))
net.nodeA <- net.nodeA[net.nodeA != ""]
net.sizeA <- length(net.nodeA)
net.nodeA <- paste("A_",sep="",net.nodeA)
net.nodeB <- unique(as.character(net.textB))
net.sizeB <- length(net.nodeB)
net.nodeB <-paste("B_",sep="",net.nodeB)
net.nodeB <- net.nodeB[net.nodeB != ""]
sim.node <- unique(c(paste("A_",sep="",as.character(sim.text[,1])),paste("B_",sep="",as.character(sim.text[,2]))))
sim.node <- sim.node[sim.node !=""]
net.node <- unique(c(net.nodeA,net.nodeB))
net.sizeAB <- length(net.node)
net.node <- unique(c(net.node, sim.node))
net.size <- length(net.node)
net.edgeA <- cbind(paste("A_",sep="",as.character(net.textA[,1])), paste("A_",sep="",as.character(net.textA[,2])))
net.edgeB <- cbind(paste("B_",sep="",as.character(net.textB[,1])), paste("B_",sep="",as.character(net.textB[,2])))
net.edgeA <- net.edgeA[net.edgeA[,2] != "A_", ]
net.edgeB <- net.edgeB[net.edgeB[,2] != "B_", ]
sim.edge <- cbind(paste("A_",sep="",as.character(sim.text[,1])), paste("B_",sep="",as.character(sim.text[,2])))
sim.edgerev <- cbind(paste("B_",sep="",as.character(sim.text[,2])), paste("A_",sep="",as.character(sim.text[,1])))
sim.edge <- rbind(sim.edge,sim.edgerev)
net.edge <- rbind(net.edgeA,sim.edge,net.edgeB)
node.id <- seq_along(net.node)
names(node.id) <- net.node
net.matrixA <- sparseMatrix(node.id[net.edgeA[,1]], node.id[net.edgeA[,2]], x=T, dims=c(net.size+1, net.size+1))
net.matrixB <- sparseMatrix(node.id[net.edgeB[,1]], node.id[net.edgeB[,2]], x=T, dims=c(net.size+1, net.size+1))
sim.matrix <- sparseMatrix(node.id[sim.edge[,1]], node.id[sim.edge[,2]], x=as.numeric(sim.text[,3]), dims=c(net.size, net.size))
net.matrix <- sparseMatrix(node.id[net.edge[,1]], node.id[net.edge[,2]], x=T, dims=c(net.size, net.size))
sim.edge <- cbind(node.id[sim.edge[,1]],node.id[sim.edge[,2]],as.numeric(sim.text[,3]))
list(node_sim = sim.matrix,sizeA=net.sizeA,node=net.node, matrix=net.matrix,size = net.size,
matrixA = net.matrixA,matrixB = net.matrixB,sim_edge = sim.edge,sizeAB = net.sizeAB)
}
build_model <- function(combined_net,alpha,beta,delta.d)
{
S <- cbind(combined_net$node_sim,delta.d)
crf <- make.crf(combined_net$matrix, rowSums(S>0))
crf$state.map <- matrix(0, nrow=crf$n.nodes, ncol=crf$max.state)
edges <- combined_net$sim_edge
state_cnt_vec <- vector('integer',crf$n.nodes)
len <- dim(edges)[1]
for( edge in 1:len){
i <- edges[edge,1]
j <- edges[edge,2]
x <- edges[edge,3]
state_cnt_vec[i] <- state_cnt_vec[i] + 1
crf$state.map[i,state_cnt_vec[i]] <- j
crf$node.pot[i,state_cnt_vec[i]] <- exp(x*alpha/2)
}
for(i in 1:crf$n.nodes){
state_cnt_vec[i] <- state_cnt_vec[i] + 1
crf$state.map[i,state_cnt_vec[i]] <- combined_net$size+1
crf$node.pot[i,state_cnt_vec[i]] <- 1
}
A.size <- combined_net$sizeA
AB.size <- combined_net$sizeAB
W1 <- combined_net$matrixA
W2 <- combined_net$matrixB
for (e in 1:crf$n.edges)
{
n1 <- crf$edges[e, 1]
n2 <- crf$edges[e, 2]
m1 <- 1:crf$n.states[n1]
m2 <- 1:crf$n.states[n2]
if(n1<=A.size && n2 <=A.size && n1 <=AB.size && n2 <=AB.size){
W <- submatrix(W2,crf$state.map[n1, m1],crf$state.map[n2, m2])
crf$edge.pot[[e]] <- exp(W*beta/2)
}
else if(n1 > A.size && n2 > A.size && n1 <=AB.size && n2 <=AB.size){
W <- submatrix(W1,crf$state.map[n1, m1],crf$state.map[n2, m2])
crf$edge.pot[[e]] <- exp(W*beta/2)
}
else{
G <- matrix(1,nrow=length(m1),ncol = length(m2))
m11 <- which(crf$state.map[n1,]==n2)
G[m11,m2]<-0
m22 <- which(crf$state.map[n2,]==n1)
G[m1,m22]<-0
G[m11,m22] <-1
crf$edge.pot[[e]]<-G
}
}
crf
}
write_result <- function(combined_net, result, filename="result.txt")
{
con <- file(as.character(filename), "w")
label.name <- c(combined_net$node, "gap")
# cnt <- 0
for (i in 1:combined_net$sizeA)
{ #if (result[i] <= combined_net$size){
#cnt <- cnt+1
#}
writeLines(paste(combined_net$node[i], " ", label.name[result[i]]), con, sep="\n")
}
writeLines("", con, sep="\n")
#print(cnt)
close(con)
}
column <- function(m, i)
{
if (inherits(m, "CsparseMatrix")) {
v <- vector(typeof(m@x), m@Dim[1])
p <- (m@p[i]+1):m@p[i+1]
if (p[1] <= p[length(p)])
v[m@i[p]+1] <- m@x[p]
}
else
v <- m[,i]
v
}
submatrix <- function(m, rows, cols)
{
sapply(cols, function(i) column(m, i)[rows])
}
|
matlabmin=function(x,ColsOrRows=1,vergleich=NULL){
# Bestimmt das Minimum eines Vector zur Zahl und einer Matrize zeilenweise analog zu matlab
#author: MT
requireNamespace('matrixStats')
if(is.vector(x)){
mini=min(x,na.rm = TRUE)
if(!is.null(vergleich)){mini=min(c(mini,vergleich))}
}else{
if(ColsOrRows==1){
mini=matrixStats::rowMins(x,na.rm = TRUE)
if(!is.null(vergleich)){mini=matrixStats::rowMins(cbind(mini,vergleich))}
}else{
mini=matrixStats::colMins(x,na.rm = TRUE)
if(!is.null(vergleich)){mini=matrixStats::colMins(cbind(mini,vergleich))}
}
}
return(mini)
}
| /DbtTools/RetroMAT/R/matlabmin.R | no_license | markus-flicke/KD_Projekt_1 | R | false | false | 672 | r | matlabmin=function(x,ColsOrRows=1,vergleich=NULL){
# Bestimmt das Minimum eines Vector zur Zahl und einer Matrize zeilenweise analog zu matlab
#author: MT
requireNamespace('matrixStats')
if(is.vector(x)){
mini=min(x,na.rm = TRUE)
if(!is.null(vergleich)){mini=min(c(mini,vergleich))}
}else{
if(ColsOrRows==1){
mini=matrixStats::rowMins(x,na.rm = TRUE)
if(!is.null(vergleich)){mini=matrixStats::rowMins(cbind(mini,vergleich))}
}else{
mini=matrixStats::colMins(x,na.rm = TRUE)
if(!is.null(vergleich)){mini=matrixStats::colMins(cbind(mini,vergleich))}
}
}
return(mini)
}
|
nspecies<-7 #the number of species
npatches<-10 #the number of patches
nreplicates<-5 #number of replicates
DispV<-c(0.0001,0.0005,0.001,0.005,0.01,0.05,0.1,0.5,1) #the dispersal rates
Data_storage<-data.frame(SR=NA,Biomass=NA,Biomass_CV=NA,PD=NA,MPD_abund=NA,MPD_pa=NA,MNTD_abund=NA,Dispersal=rep(DispV,each=nreplicates),ReplicateNum=factor(1:nreplicates),Scale=rep(c("Local","Regional"),each=length(DispV)*nreplicates)) #building the data frame
for(j in 1:nreplicates){
#runs the SIH model at all dispersal rates in DispV and saves the abundances and productivity in a list
eff_values<-rnorm(nspecies,mean=0.2,sd=0.005)
SIH_data<-sapply(DispV,SIH_function,species=nspecies,patches=npatches,eff_vary=T,eff_values=eff_values)
#MPD_abund = mean pairwise distance, abundance weighted (vs pa = presence absence)
#need to put the NAs in initially
#MNTD = mean nearest taxon index, measures tippiness of tree
#need to add 'ses' to some? of the phylogenetic functions to make them compare the observed with a null
#right now none of them are doing that
for(i in 1:length(DispV)){
#calculate species richness at the local and regional scale
Data_storage$SR[Data_storage$Dispersal==DispV[i] & Data_storage$ReplicateNum==j & Data_storage$Scale == "Local"] <-mean(rowMeans(apply(SIH_data[["Abund",i]]>0,3,rowSums)))
Data_storage$SR[Data_storage$Dispersal==DispV[i] & Data_storage$ReplicateNum==j & Data_storage$Scale == "Regional"]<-mean(rowSums(apply(SIH_data[["Abund",i]],2,rowMeans)>0)) #regional SR of all species in each time step
#At the local scale...
com_data<-t(SIH_data[["Abund",i]][400,,])
colnames(com_data)<-1:nspecies
Data_storage$PD[Data_storage$Dispersal==DispV[i] & Data_storage$ReplicateNum==j & Data_storage$Scale == "Local"]<-mean(pd(com_data,SIH_data[["phylo",i]])$PD)
Data_storage$MPD_abund[Data_storage$Dispersal==DispV[i] & Data_storage$ReplicateNum==j & Data_storage$Scale == "Local"]<-mean(mpd(com_data,cophenetic(SIH_data[["phylo",i]]),abundance.weighted = T))
Data_storage$MPD_pa[Data_storage$Dispersal==DispV[i] & Data_storage$ReplicateNum==j & Data_storage$Scale == "Local"]<-mean(mpd(com_data,cophenetic(SIH_data[["phylo",i]]),abundance.weighted = F))
Data_storage$MNTD_abund[Data_storage$Dispersal==DispV[i] & Data_storage$ReplicateNum==j & Data_storage$Scale == "Local"]<-mean(mntd(com_data,cophenetic(SIH_data[["phylo",i]]),abundance.weighted = T))
#At the regional scale
com_data<-matrix(colSums(t(SIH_data[["Abund",i]][400,,])),1,nspecies)
colnames(com_data)<-1:nspecies
Data_storage$PD[Data_storage$Dispersal==DispV[i] & Data_storage$ReplicateNum==j & Data_storage$Scale == "Regional"]<-pd(com_data,SIH_data[["phylo",i]])$PD
Data_storage$MPD_abund[Data_storage$Dispersal==DispV[i] & Data_storage$ReplicateNum==j & Data_storage$Scale == "Regional"]<-mpd(com_data,cophenetic(SIH_data[["phylo",i]]),abundance.weighted = T)
Data_storage$MPD_pa[Data_storage$Dispersal==DispV[i] & Data_storage$ReplicateNum==j & Data_storage$Scale == "Regional"]<-mpd(com_data,cophenetic(SIH_data[["phylo",i]]),abundance.weighted = F)
Data_storage$MNTD_abund[Data_storage$Dispersal==DispV[i] & Data_storage$ReplicateNum==j & Data_storage$Scale == "Regional"]<-mntd(com_data,cophenetic(SIH_data[["phylo",i]]),abundance.weighted = T)
}
}
#average over all of the replicates...
Data_storage_avg<-data.frame(SR=NA,Biomass=NA,Biomass_CV=NA,PD=NA,MPD_abund=NA,MPD_pa=NA,MNTD_abund=NA,Dispersal=DispV,Scale=rep(c("Local","Regional"),each=length(DispV))) #building the data frame
for(i in 1:length(DispV)){
#calculate species richness at the local and regional scale
Data_storage_avg$SR[Data_storage_avg$Dispersal==DispV[i] & Data_storage_avg$Scale == "Local"] <-mean(Data_storage$SR[Data_storage$Dispersal==DispV[i] & Data_storage$Scale == "Local"])
Data_storage_avg$SR[Data_storage_avg$Dispersal==DispV[i] & Data_storage_avg$Scale == "Regional"]<-mean(Data_storage$SR[Data_storage$Dispersal==DispV[i] & Data_storage$Scale == "Regional"])
#At the local scale...
Data_storage_avg$PD[Data_storage_avg$Dispersal==DispV[i] & Data_storage_avg$Scale == "Local"]<-mean(Data_storage$PD[Data_storage$Dispersal==DispV[i] & Data_storage$Scale == "Local"])
Data_storage_avg$MPD_abund[Data_storage_avg$Dispersal==DispV[i] & Data_storage_avg$Scale == "Local"]<-mean(Data_storage$MPD_abund[Data_storage$Dispersal==DispV[i] & Data_storage$Scale == "Local"])
Data_storage_avg$MPD_pa[Data_storage_avg$Dispersal==DispV[i] & Data_storage_avg$Scale == "Local"]<-mean(Data_storage$MPD_pa[Data_storage$Dispersal==DispV[i] & Data_storage$Scale == "Local"])
Data_storage_avg$MNTD_abund[Data_storage_avg$Dispersal==DispV[i] & Data_storage_avg$Scale == "Local"]<-mean(Data_storage$MNTD_abund[Data_storage$Dispersal==DispV[i] & Data_storage$Scale == "Local"])
#At the regional scale
Data_storage_avg$PD[Data_storage_avg$Dispersal==DispV[i] & Data_storage_avg$Scale == "Regional"]<-mean(Data_storage$PD[Data_storage$Dispersal==DispV[i] & Data_storage$Scale == "Regional"])
Data_storage_avg$MPD_abund[Data_storage_avg$Dispersal==DispV[i] & Data_storage_avg$Scale == "Regional"]<-mean(Data_storage$MPD_abund[Data_storage$Dispersal==DispV[i] & Data_storage$Scale == "Regional"])
Data_storage_avg$MPD_pa[Data_storage_avg$Dispersal==DispV[i] & Data_storage_avg$Scale == "Regional"]<-mean(Data_storage$MPD_pa[Data_storage$Dispersal==DispV[i] & Data_storage$Scale == "Regional"])
Data_storage_avg$MNTD_abund[Data_storage_avg$Dispersal==DispV[i] & Data_storage_avg$Scale == "Regional"]<-mean(Data_storage$MNTD_abund[Data_storage$Dispersal==DispV[i] & Data_storage$Scale == "Regional"])
}
#take the s.d. over all of the replicates...
Data_storage_sd<-data.frame(SR=NA,Biomass=NA,Biomass_CV=NA,PD=NA,MPD_abund=NA,MPD_pa=NA,MNTD_abund=NA,Dispersal=DispV,Scale=rep(c("Local","Regional"),each=length(DispV))) #building the data frame
for(i in 1:length(DispV)){
#calculate species richness at the local and regional scale
Data_storage_sd$SR[Data_storage_sd$Dispersal==DispV[i] & Data_storage_sd$Scale == "Local"] <-sd(Data_storage$SR[Data_storage$Dispersal==DispV[i] & Data_storage$Scale == "Local"])
Data_storage_sd$SR[Data_storage_sd$Dispersal==DispV[i] & Data_storage_sd$Scale == "Regional"]<-sd(Data_storage$SR[Data_storage$Dispersal==DispV[i] & Data_storage$Scale == "Regional"])
#At the local scale...
Data_storage_sd$PD[Data_storage_sd$Dispersal==DispV[i] & Data_storage_sd$Scale == "Local"]<-sd(Data_storage$PD[Data_storage$Dispersal==DispV[i] & Data_storage$Scale == "Local"])
Data_storage_sd$MPD_abund[Data_storage_sd$Dispersal==DispV[i] & Data_storage_sd$Scale == "Local"]<-sd(Data_storage$MPD_abund[Data_storage$Dispersal==DispV[i] & Data_storage$Scale == "Local"])
Data_storage_sd$MPD_pa[Data_storage_sd$Dispersal==DispV[i] & Data_storage_sd$Scale == "Local"]<-sd(Data_storage$MPD_pa[Data_storage$Dispersal==DispV[i] & Data_storage$Scale == "Local"])
Data_storage_sd$MNTD_abund[Data_storage_sd$Dispersal==DispV[i] & Data_storage_sd$Scale == "Local"]<-sd(Data_storage$MNTD_abund[Data_storage$Dispersal==DispV[i] & Data_storage$Scale == "Local"])
#At the regional scale
Data_storage_sd$PD[Data_storage_sd$Dispersal==DispV[i] & Data_storage_sd$Scale == "Regional"]<-sd(Data_storage$PD[Data_storage$Dispersal==DispV[i] & Data_storage$Scale == "Regional"])
Data_storage_sd$MPD_abund[Data_storage_sd$Dispersal==DispV[i] & Data_storage_sd$Scale == "Regional"]<-sd(Data_storage$MPD_abund[Data_storage$Dispersal==DispV[i] & Data_storage$Scale == "Regional"])
Data_storage_sd$MPD_pa[Data_storage_sd$Dispersal==DispV[i] & Data_storage_sd$Scale == "Regional"]<-sd(Data_storage$MPD_pa[Data_storage$Dispersal==DispV[i] & Data_storage$Scale == "Regional"])
Data_storage_sd$MNTD_abund[Data_storage_sd$Dispersal==DispV[i] & Data_storage_sd$Scale == "Regional"]<-sd(Data_storage$MNTD_abund[Data_storage$Dispersal==DispV[i] & Data_storage$Scale == "Regional"])
}
#Plot species richness at different dispersal levels
require(ggplot2) #need to define x and y within aes in ggplot
ggplot(Data_storage_avg,aes(x=Dispersal,y=SR,color=Scale,group=Scale))+
geom_line(size=2)+ #plots data as lines
scale_x_log10(breaks=DispV)+ #sets x axis to log10 scale
theme_bw(base_size = 18)+ #gets rid of grey background
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank()) #removes grid lines
#Plot phylogenetic diversity at different dispersal levels
ggplot(Data_storage_avg,aes(x=Dispersal,y=PD,color=Scale,group=Scale))+
geom_line(size=2)+ #plots data as lines
scale_x_log10(breaks=DispV)+ #sets x axis to log10 scale
theme_bw(base_size = 18)+ #gets rid of grey background
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank()) #removes grid lines
#MPD_pa may not be robust to reps, need to check
#Plot mean pairwise distance at different dispersal levels
ggplot(Data_storage_avg,aes(x=Dispersal,y=MPD_pa,color=Scale,group=Scale))+
geom_line(size=2,na.rm=T)+ #plots data as lines
scale_x_log10(breaks=DispV)+ #sets x axis to log10 scale
facet_grid(.~Scale)+ #plots local and regional side-by-side
theme_bw(base_size = 18)+ #gets rid of grey background
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())
#Plot abundance-weighted mean pairwise distance at different dispersal levels
ggplot(Data_storage_avg,aes(x=Dispersal,y=MPD_abund,color=Scale,group=Scale))+
geom_line(size=2,na.rm=T)+ #plots data as lines
scale_x_log10(breaks=DispV)+ #sets x axis to log10 scale
theme_bw(base_size = 18)+ #gets rid of grey background
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank()) #removes grid lines
#Plot abundance-weighted mean nearest taxon index at different dispersal levels
ggplot(Data_storage_avg,aes(x=Dispersal,y=MNTD_abund,color=Scale,group=Scale))+
geom_line(size=2,na.rm=T)+ #plots data as lines
facet_grid(.~Scale)+
scale_x_log10(breaks=DispV)+ #sets x axis to log10 scale
theme_bw(base_size = 18)+ #gets rid of grey background
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank()) #removes grid lines
#Plot presence-absence mean pairwise distance at different species richnesses
ggplot(Data_storage_avg,aes(x=SR,y=MPD_pa,color=Scale,group=Scale))+
geom_line(size=2,na.rm=T)+ #plots data as lines
#scale_x_log10(breaks=DispV)+ #sets x axis to log10 scale
facet_grid(.~Scale)+
theme_bw(base_size = 18)+ #gets rid of grey background
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())
##THESE SHOULD PROBABLY BE MOVED WITHIN THE FOR-LOOP
#plot phylo####
par(mfrow=c(3,3)) #local phylogenetic tree from whatever '3' is (see below) ##check
for(i in 1:length(DispV)){
com_data<-t(SIH_data[["Abund",i]][400,,])
plot(SIH_data[["phylo",i]],show.tip.label = F,main=paste("Dispersal = ",DispV[i]))
tiplabels(pch=22,bg=heat.colors(nspecies)[1:nspecies], cex=decostand(com_data[3,],method = 'range')*5)
}
par(mfrow=c(3,3)) #regional phylogenetic tree ##check
for(i in 1:length(DispV)){
com_data<-matrix(colSums(t(SIH_data[["Abund",i]][400,,])),1,nspecies)
plot(SIH_data[["phylo",i]],show.tip.label = F,main=paste("Dispersal = ",DispV[i]))
tiplabels(pch=22,bg=heat.colors(nspecies)[1:nspecies], cex=com_data/max(com_data)*5)
}
for(i in 1:length(DispV)){
com_data<-t(SIH_data[["Abund",i]][400,,])
plot(SIH_data[["phylo",i]],show.tip.label = F,main=paste("Dispersal = ",DispV[i]))
tiplabels(pch=22,bg=heat.colors(nspecies)[1:nspecies], cex=3*com_data[3,]>0)
}
| /vary_eff_run_replicates.R | no_license | ArielGreiner/SIH-BasicModel | R | false | false | 11,606 | r | nspecies<-7 #the number of species
npatches<-10 #the number of patches
nreplicates<-5 #number of replicates
DispV<-c(0.0001,0.0005,0.001,0.005,0.01,0.05,0.1,0.5,1) #the dispersal rates
Data_storage<-data.frame(SR=NA,Biomass=NA,Biomass_CV=NA,PD=NA,MPD_abund=NA,MPD_pa=NA,MNTD_abund=NA,Dispersal=rep(DispV,each=nreplicates),ReplicateNum=factor(1:nreplicates),Scale=rep(c("Local","Regional"),each=length(DispV)*nreplicates)) #building the data frame
for(j in 1:nreplicates){
#runs the SIH model at all dispersal rates in DispV and saves the abundances and productivity in a list
eff_values<-rnorm(nspecies,mean=0.2,sd=0.005)
SIH_data<-sapply(DispV,SIH_function,species=nspecies,patches=npatches,eff_vary=T,eff_values=eff_values)
#MPD_abund = mean pairwise distance, abundance weighted (vs pa = presence absence)
#need to put the NAs in initially
#MNTD = mean nearest taxon index, measures tippiness of tree
#need to add 'ses' to some? of the phylogenetic functions to make them compare the observed with a null
#right now none of them are doing that
for(i in 1:length(DispV)){
#calculate species richness at the local and regional scale
Data_storage$SR[Data_storage$Dispersal==DispV[i] & Data_storage$ReplicateNum==j & Data_storage$Scale == "Local"] <-mean(rowMeans(apply(SIH_data[["Abund",i]]>0,3,rowSums)))
Data_storage$SR[Data_storage$Dispersal==DispV[i] & Data_storage$ReplicateNum==j & Data_storage$Scale == "Regional"]<-mean(rowSums(apply(SIH_data[["Abund",i]],2,rowMeans)>0)) #regional SR of all species in each time step
#At the local scale...
com_data<-t(SIH_data[["Abund",i]][400,,])
colnames(com_data)<-1:nspecies
Data_storage$PD[Data_storage$Dispersal==DispV[i] & Data_storage$ReplicateNum==j & Data_storage$Scale == "Local"]<-mean(pd(com_data,SIH_data[["phylo",i]])$PD)
Data_storage$MPD_abund[Data_storage$Dispersal==DispV[i] & Data_storage$ReplicateNum==j & Data_storage$Scale == "Local"]<-mean(mpd(com_data,cophenetic(SIH_data[["phylo",i]]),abundance.weighted = T))
Data_storage$MPD_pa[Data_storage$Dispersal==DispV[i] & Data_storage$ReplicateNum==j & Data_storage$Scale == "Local"]<-mean(mpd(com_data,cophenetic(SIH_data[["phylo",i]]),abundance.weighted = F))
Data_storage$MNTD_abund[Data_storage$Dispersal==DispV[i] & Data_storage$ReplicateNum==j & Data_storage$Scale == "Local"]<-mean(mntd(com_data,cophenetic(SIH_data[["phylo",i]]),abundance.weighted = T))
#At the regional scale
com_data<-matrix(colSums(t(SIH_data[["Abund",i]][400,,])),1,nspecies)
colnames(com_data)<-1:nspecies
Data_storage$PD[Data_storage$Dispersal==DispV[i] & Data_storage$ReplicateNum==j & Data_storage$Scale == "Regional"]<-pd(com_data,SIH_data[["phylo",i]])$PD
Data_storage$MPD_abund[Data_storage$Dispersal==DispV[i] & Data_storage$ReplicateNum==j & Data_storage$Scale == "Regional"]<-mpd(com_data,cophenetic(SIH_data[["phylo",i]]),abundance.weighted = T)
Data_storage$MPD_pa[Data_storage$Dispersal==DispV[i] & Data_storage$ReplicateNum==j & Data_storage$Scale == "Regional"]<-mpd(com_data,cophenetic(SIH_data[["phylo",i]]),abundance.weighted = F)
Data_storage$MNTD_abund[Data_storage$Dispersal==DispV[i] & Data_storage$ReplicateNum==j & Data_storage$Scale == "Regional"]<-mntd(com_data,cophenetic(SIH_data[["phylo",i]]),abundance.weighted = T)
}
}
#average over all of the replicates...
Data_storage_avg<-data.frame(SR=NA,Biomass=NA,Biomass_CV=NA,PD=NA,MPD_abund=NA,MPD_pa=NA,MNTD_abund=NA,Dispersal=DispV,Scale=rep(c("Local","Regional"),each=length(DispV))) #building the data frame
for(i in 1:length(DispV)){
#calculate species richness at the local and regional scale
Data_storage_avg$SR[Data_storage_avg$Dispersal==DispV[i] & Data_storage_avg$Scale == "Local"] <-mean(Data_storage$SR[Data_storage$Dispersal==DispV[i] & Data_storage$Scale == "Local"])
Data_storage_avg$SR[Data_storage_avg$Dispersal==DispV[i] & Data_storage_avg$Scale == "Regional"]<-mean(Data_storage$SR[Data_storage$Dispersal==DispV[i] & Data_storage$Scale == "Regional"])
#At the local scale...
Data_storage_avg$PD[Data_storage_avg$Dispersal==DispV[i] & Data_storage_avg$Scale == "Local"]<-mean(Data_storage$PD[Data_storage$Dispersal==DispV[i] & Data_storage$Scale == "Local"])
Data_storage_avg$MPD_abund[Data_storage_avg$Dispersal==DispV[i] & Data_storage_avg$Scale == "Local"]<-mean(Data_storage$MPD_abund[Data_storage$Dispersal==DispV[i] & Data_storage$Scale == "Local"])
Data_storage_avg$MPD_pa[Data_storage_avg$Dispersal==DispV[i] & Data_storage_avg$Scale == "Local"]<-mean(Data_storage$MPD_pa[Data_storage$Dispersal==DispV[i] & Data_storage$Scale == "Local"])
Data_storage_avg$MNTD_abund[Data_storage_avg$Dispersal==DispV[i] & Data_storage_avg$Scale == "Local"]<-mean(Data_storage$MNTD_abund[Data_storage$Dispersal==DispV[i] & Data_storage$Scale == "Local"])
#At the regional scale
Data_storage_avg$PD[Data_storage_avg$Dispersal==DispV[i] & Data_storage_avg$Scale == "Regional"]<-mean(Data_storage$PD[Data_storage$Dispersal==DispV[i] & Data_storage$Scale == "Regional"])
Data_storage_avg$MPD_abund[Data_storage_avg$Dispersal==DispV[i] & Data_storage_avg$Scale == "Regional"]<-mean(Data_storage$MPD_abund[Data_storage$Dispersal==DispV[i] & Data_storage$Scale == "Regional"])
Data_storage_avg$MPD_pa[Data_storage_avg$Dispersal==DispV[i] & Data_storage_avg$Scale == "Regional"]<-mean(Data_storage$MPD_pa[Data_storage$Dispersal==DispV[i] & Data_storage$Scale == "Regional"])
Data_storage_avg$MNTD_abund[Data_storage_avg$Dispersal==DispV[i] & Data_storage_avg$Scale == "Regional"]<-mean(Data_storage$MNTD_abund[Data_storage$Dispersal==DispV[i] & Data_storage$Scale == "Regional"])
}
#take the s.d. over all of the replicates...
Data_storage_sd<-data.frame(SR=NA,Biomass=NA,Biomass_CV=NA,PD=NA,MPD_abund=NA,MPD_pa=NA,MNTD_abund=NA,Dispersal=DispV,Scale=rep(c("Local","Regional"),each=length(DispV))) #building the data frame
for(i in 1:length(DispV)){
#calculate species richness at the local and regional scale
Data_storage_sd$SR[Data_storage_sd$Dispersal==DispV[i] & Data_storage_sd$Scale == "Local"] <-sd(Data_storage$SR[Data_storage$Dispersal==DispV[i] & Data_storage$Scale == "Local"])
Data_storage_sd$SR[Data_storage_sd$Dispersal==DispV[i] & Data_storage_sd$Scale == "Regional"]<-sd(Data_storage$SR[Data_storage$Dispersal==DispV[i] & Data_storage$Scale == "Regional"])
#At the local scale...
Data_storage_sd$PD[Data_storage_sd$Dispersal==DispV[i] & Data_storage_sd$Scale == "Local"]<-sd(Data_storage$PD[Data_storage$Dispersal==DispV[i] & Data_storage$Scale == "Local"])
Data_storage_sd$MPD_abund[Data_storage_sd$Dispersal==DispV[i] & Data_storage_sd$Scale == "Local"]<-sd(Data_storage$MPD_abund[Data_storage$Dispersal==DispV[i] & Data_storage$Scale == "Local"])
Data_storage_sd$MPD_pa[Data_storage_sd$Dispersal==DispV[i] & Data_storage_sd$Scale == "Local"]<-sd(Data_storage$MPD_pa[Data_storage$Dispersal==DispV[i] & Data_storage$Scale == "Local"])
Data_storage_sd$MNTD_abund[Data_storage_sd$Dispersal==DispV[i] & Data_storage_sd$Scale == "Local"]<-sd(Data_storage$MNTD_abund[Data_storage$Dispersal==DispV[i] & Data_storage$Scale == "Local"])
#At the regional scale
Data_storage_sd$PD[Data_storage_sd$Dispersal==DispV[i] & Data_storage_sd$Scale == "Regional"]<-sd(Data_storage$PD[Data_storage$Dispersal==DispV[i] & Data_storage$Scale == "Regional"])
Data_storage_sd$MPD_abund[Data_storage_sd$Dispersal==DispV[i] & Data_storage_sd$Scale == "Regional"]<-sd(Data_storage$MPD_abund[Data_storage$Dispersal==DispV[i] & Data_storage$Scale == "Regional"])
Data_storage_sd$MPD_pa[Data_storage_sd$Dispersal==DispV[i] & Data_storage_sd$Scale == "Regional"]<-sd(Data_storage$MPD_pa[Data_storage$Dispersal==DispV[i] & Data_storage$Scale == "Regional"])
Data_storage_sd$MNTD_abund[Data_storage_sd$Dispersal==DispV[i] & Data_storage_sd$Scale == "Regional"]<-sd(Data_storage$MNTD_abund[Data_storage$Dispersal==DispV[i] & Data_storage$Scale == "Regional"])
}
#Plot species richness at different dispersal levels
require(ggplot2) #need to define x and y within aes in ggplot
ggplot(Data_storage_avg,aes(x=Dispersal,y=SR,color=Scale,group=Scale))+
geom_line(size=2)+ #plots data as lines
scale_x_log10(breaks=DispV)+ #sets x axis to log10 scale
theme_bw(base_size = 18)+ #gets rid of grey background
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank()) #removes grid lines
#Plot phylogenetic diversity at different dispersal levels
ggplot(Data_storage_avg,aes(x=Dispersal,y=PD,color=Scale,group=Scale))+
geom_line(size=2)+ #plots data as lines
scale_x_log10(breaks=DispV)+ #sets x axis to log10 scale
theme_bw(base_size = 18)+ #gets rid of grey background
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank()) #removes grid lines
#MPD_pa may not be robust to reps, need to check
#Plot mean pairwise distance at different dispersal levels
ggplot(Data_storage_avg,aes(x=Dispersal,y=MPD_pa,color=Scale,group=Scale))+
geom_line(size=2,na.rm=T)+ #plots data as lines
scale_x_log10(breaks=DispV)+ #sets x axis to log10 scale
facet_grid(.~Scale)+ #plots local and regional side-by-side
theme_bw(base_size = 18)+ #gets rid of grey background
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())
#Plot abundance-weighted mean pairwise distance at different dispersal levels
ggplot(Data_storage_avg,aes(x=Dispersal,y=MPD_abund,color=Scale,group=Scale))+
geom_line(size=2,na.rm=T)+ #plots data as lines
scale_x_log10(breaks=DispV)+ #sets x axis to log10 scale
theme_bw(base_size = 18)+ #gets rid of grey background
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank()) #removes grid lines
#Plot abundance-weighted mean nearest taxon index at different dispersal levels
ggplot(Data_storage_avg,aes(x=Dispersal,y=MNTD_abund,color=Scale,group=Scale))+
geom_line(size=2,na.rm=T)+ #plots data as lines
facet_grid(.~Scale)+
scale_x_log10(breaks=DispV)+ #sets x axis to log10 scale
theme_bw(base_size = 18)+ #gets rid of grey background
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank()) #removes grid lines
#Plot presence-absence mean pairwise distance at different species richnesses
ggplot(Data_storage_avg,aes(x=SR,y=MPD_pa,color=Scale,group=Scale))+
geom_line(size=2,na.rm=T)+ #plots data as lines
#scale_x_log10(breaks=DispV)+ #sets x axis to log10 scale
facet_grid(.~Scale)+
theme_bw(base_size = 18)+ #gets rid of grey background
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())
##THESE SHOULD PROBABLY BE MOVED WITHIN THE FOR-LOOP
#plot phylo####
par(mfrow=c(3,3)) #local phylogenetic tree from whatever '3' is (see below) ##check
for(i in 1:length(DispV)){
com_data<-t(SIH_data[["Abund",i]][400,,])
plot(SIH_data[["phylo",i]],show.tip.label = F,main=paste("Dispersal = ",DispV[i]))
tiplabels(pch=22,bg=heat.colors(nspecies)[1:nspecies], cex=decostand(com_data[3,],method = 'range')*5)
}
par(mfrow=c(3,3)) #regional phylogenetic tree ##check
for(i in 1:length(DispV)){
com_data<-matrix(colSums(t(SIH_data[["Abund",i]][400,,])),1,nspecies)
plot(SIH_data[["phylo",i]],show.tip.label = F,main=paste("Dispersal = ",DispV[i]))
tiplabels(pch=22,bg=heat.colors(nspecies)[1:nspecies], cex=com_data/max(com_data)*5)
}
for(i in 1:length(DispV)){
com_data<-t(SIH_data[["Abund",i]][400,,])
plot(SIH_data[["phylo",i]],show.tip.label = F,main=paste("Dispersal = ",DispV[i]))
tiplabels(pch=22,bg=heat.colors(nspecies)[1:nspecies], cex=3*com_data[3,]>0)
}
|
#' # print.rgcca_permutation
#'''
set.seed(0)
data(Russett)
blocks <- list(
agriculture = Russett[, seq(3)],
industry = Russett[, 4:5],
politic = Russett[, 6:11]
)
test_that("print.rgcca_permutation prints the expected text", {
skip_if_not(as.logical(Sys.getenv("TEST_SNAPSHOTS")))
local_edition(3)
expect_snapshot({
res <- rgcca_permutation(blocks,
par_type = "tau", par_length = 2,
n_perms = 5, n_cores = 1, verbose = FALSE
)
print(res)
})
})
test_that("print.rgcca_permutation prints the expected text 2", {
skip_if_not(as.logical(Sys.getenv("TEST_SNAPSHOTS")))
local_edition(3)
expect_snapshot({
blocks2 <- rep(blocks, 3)
names(blocks2) <- NULL
res <- rgcca_permutation(blocks2,
par_type = "ncomp", par_length = 2,
n_perms = 2, n_cores = 1, verbose = FALSE
)
print(res)
})
})
| /tests/testthat/test_print.rgcca_permutation.R | no_license | rgcca-factory/RGCCA | R | false | false | 860 | r | #' # print.rgcca_permutation
#'''
set.seed(0)
data(Russett)
blocks <- list(
agriculture = Russett[, seq(3)],
industry = Russett[, 4:5],
politic = Russett[, 6:11]
)
test_that("print.rgcca_permutation prints the expected text", {
skip_if_not(as.logical(Sys.getenv("TEST_SNAPSHOTS")))
local_edition(3)
expect_snapshot({
res <- rgcca_permutation(blocks,
par_type = "tau", par_length = 2,
n_perms = 5, n_cores = 1, verbose = FALSE
)
print(res)
})
})
test_that("print.rgcca_permutation prints the expected text 2", {
skip_if_not(as.logical(Sys.getenv("TEST_SNAPSHOTS")))
local_edition(3)
expect_snapshot({
blocks2 <- rep(blocks, 3)
names(blocks2) <- NULL
res <- rgcca_permutation(blocks2,
par_type = "ncomp", par_length = 2,
n_perms = 2, n_cores = 1, verbose = FALSE
)
print(res)
})
})
|
set_dir <- function(dir_path) {
if (missing(dir_path)==TRUE) {
path <- readline(prompt="Directory: ")
path <- as.character(path)
setwd(path)
} else {
dir_path <- dir_path
setwd(dir_path)
}
}
load_dir <- function() {
dir_path <- '/home/jserra/grive/MITERRA/MITERRA-PORTUGAL/'
dir_isa <- 'G:/My Drive/MITERRA/MITERRA-PORTUGAL/'
dir_home <- 'G:/O meu disco/MITERRA/MITERRA-PORTUGAL/'
store_db <- c(dir_path, dir_isa, dir_home)
correct_path <- which(dir.exists(store_db)==TRUE)
set_dir(store_db[correct_path])
}
load_dir()
| /set_directory.R | permissive | shekharsg/MITERRA-PORTUGAL | R | false | false | 566 | r | set_dir <- function(dir_path) {
if (missing(dir_path)==TRUE) {
path <- readline(prompt="Directory: ")
path <- as.character(path)
setwd(path)
} else {
dir_path <- dir_path
setwd(dir_path)
}
}
load_dir <- function() {
dir_path <- '/home/jserra/grive/MITERRA/MITERRA-PORTUGAL/'
dir_isa <- 'G:/My Drive/MITERRA/MITERRA-PORTUGAL/'
dir_home <- 'G:/O meu disco/MITERRA/MITERRA-PORTUGAL/'
store_db <- c(dir_path, dir_isa, dir_home)
correct_path <- which(dir.exists(store_db)==TRUE)
set_dir(store_db[correct_path])
}
load_dir()
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/GHCN_FUNCTIONS.R
\name{getGHCNDailyStation}
\alias{getGHCNDailyStation}
\title{Download and extract the daily data for a GHCN weather station.}
\usage{
getGHCNDailyStation(ID, elements = NULL, raw.dir, standardize = F,
force.redo = F)
}
\arguments{
\item{ID}{A character string giving the station ID.}
\item{elements}{A character vector of elemets to extract.
Common elements include "tmin", "tmax", and "prcp".}
\item{raw.dir}{A character string indicating where raw downloaded files should be put.}
\item{standardize}{Select only common year/month/day? Defaults to FALSE.}
\item{force.redo}{If this weather station has been downloaded before, should it be updated? Defaults to FALSE.}
}
\value{
A named list of \code{\link{data.frame}s}, one for each \code{elements}.
}
\description{
\code{getGHCNDailyStation} returns a named list of \code{\link{data.frame}s}, one for
each \code{elements}. If \code{elements} is undefined, it returns all available weather
tables for the station
}
| /man/getGHCNDailyStation.Rd | no_license | arturochian/FedData | R | false | false | 1,078 | rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/GHCN_FUNCTIONS.R
\name{getGHCNDailyStation}
\alias{getGHCNDailyStation}
\title{Download and extract the daily data for a GHCN weather station.}
\usage{
getGHCNDailyStation(ID, elements = NULL, raw.dir, standardize = F,
force.redo = F)
}
\arguments{
\item{ID}{A character string giving the station ID.}
\item{elements}{A character vector of elemets to extract.
Common elements include "tmin", "tmax", and "prcp".}
\item{raw.dir}{A character string indicating where raw downloaded files should be put.}
\item{standardize}{Select only common year/month/day? Defaults to FALSE.}
\item{force.redo}{If this weather station has been downloaded before, should it be updated? Defaults to FALSE.}
}
\value{
A named list of \code{\link{data.frame}s}, one for each \code{elements}.
}
\description{
\code{getGHCNDailyStation} returns a named list of \code{\link{data.frame}s}, one for
each \code{elements}. If \code{elements} is undefined, it returns all available weather
tables for the station
}
|
#EX_2_13
rate<-1000000
y<-numeric(2)
py<-numeric(2)
px<- numeric(5)
y<-c(10^2,10^4)
py<-c(5/6,1/6)
y1<-sample(c(10^2,10^4),size=rate,replace=T,prob=c(5/6,1/6))
x1<-rep(NA,100)
x1[y1==10^2]<-sample(c(10^-2,10^-1,1),size=sum(y1==10^2),replace=T,prob=c(1/2,1/3,1/6))
x1[y1==10^4]<-sample(c(1,10,100),size=sum(y1==10^4),replace=T,prob=c(1/2,1/3,1/6))
p1<-mean(x1[y1==10^2]==10^-2)
p2<-mean(x1[y1==10^2]==10^-1)
p3<- mean(x1[y1==10^2]==1)
p4<-mean(x1[y1==10^4]==1)
p5<-mean(x1[y1==10^4]==10)
p6<-mean(x1[y1==10^4]==100)
px.01<-p1*mean(y1==10^2)
px.1<-p2*mean(y1==10^2)
px1<-sum((p3*mean(y1==10^2)),(p4*mean(y1==10^4)))
px10<-p5*mean(y1==10^4)
px100<-p6*mean(y1==10^4)
px.1
px.01
px1
px10
px100
px<-c(px.01,px.1,px1,px10,px100)
px
plot(px,type="b")
| /midhun/sample_code_R/EX_2_13.R | no_license | sahridhaya/BitPlease | R | false | false | 743 | r | #EX_2_13
rate<-1000000
y<-numeric(2)
py<-numeric(2)
px<- numeric(5)
y<-c(10^2,10^4)
py<-c(5/6,1/6)
y1<-sample(c(10^2,10^4),size=rate,replace=T,prob=c(5/6,1/6))
x1<-rep(NA,100)
x1[y1==10^2]<-sample(c(10^-2,10^-1,1),size=sum(y1==10^2),replace=T,prob=c(1/2,1/3,1/6))
x1[y1==10^4]<-sample(c(1,10,100),size=sum(y1==10^4),replace=T,prob=c(1/2,1/3,1/6))
p1<-mean(x1[y1==10^2]==10^-2)
p2<-mean(x1[y1==10^2]==10^-1)
p3<- mean(x1[y1==10^2]==1)
p4<-mean(x1[y1==10^4]==1)
p5<-mean(x1[y1==10^4]==10)
p6<-mean(x1[y1==10^4]==100)
px.01<-p1*mean(y1==10^2)
px.1<-p2*mean(y1==10^2)
px1<-sum((p3*mean(y1==10^2)),(p4*mean(y1==10^4)))
px10<-p5*mean(y1==10^4)
px100<-p6*mean(y1==10^4)
px.1
px.01
px1
px10
px100
px<-c(px.01,px.1,px1,px10,px100)
px
plot(px,type="b")
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(mtx = matrix()) {
inverse <- NULL
set <- function(x) {
mtx <<- x;
inverse <<- NULL;
}
get <- function() return(mtx);
setinv <- function(inv) inverse <<- inv;
getinv <- function() return(inverse);
return(list(set = set, get = get, setinv = setinv, getinv = getinv))
}
## Write a short comment describing this function
cacheSolve <- function(mtx, ...) {
inverse <- mtx$getinv()
if(!is.null(inverse)) {
message("Getting cached data...")
return(inverse)
}
data <- mtx$get()
invserse <- solve(data, ...)
mtx$setinv(inverse)
return(inverse)
}
| /cachematrix.R | no_license | hkghanta/ProgrammingAssignment2 | R | false | false | 879 | r | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(mtx = matrix()) {
inverse <- NULL
set <- function(x) {
mtx <<- x;
inverse <<- NULL;
}
get <- function() return(mtx);
setinv <- function(inv) inverse <<- inv;
getinv <- function() return(inverse);
return(list(set = set, get = get, setinv = setinv, getinv = getinv))
}
## Write a short comment describing this function
cacheSolve <- function(mtx, ...) {
inverse <- mtx$getinv()
if(!is.null(inverse)) {
message("Getting cached data...")
return(inverse)
}
data <- mtx$get()
invserse <- solve(data, ...)
mtx$setinv(inverse)
return(inverse)
}
|
library(dplyr)
library(magrittr)
library(ggplot2)
source('src/support/TrackUtils.r')
tracks <- getTracksData()
WIDTH_TARGET <- 0.37
LENGTH_TARGET <- 0.44
bebTracks <- dplyr::filter(tracks, tracks$site == 'BEB' & tracks$level == '515') %>%
dplyr::filter(width > 0 & length > 0) %>%
dplyr::filter(widthMeasured > 0 & lengthMeasured > 0)
bebTracks$ranking <- sqrt(
(bebTracks$width - WIDTH_TARGET)^2 +
(bebTracks$length - LENGTH_TARGET)^2 +
(bebTracks$widthMeasured - WIDTH_TARGET)^2 +
(bebTracks$lengthMeasured - LENGTH_TARGET)^2)
ggplot2::ggplot(bebTracks, aes(ranking)) + ggplot2::geom_histogram()
bebTracks <- dplyr::filter(bebTracks, bebTracks$ranking)
write.csv(result, 'output/Unknown-Track-Matches.csv')
| /statistics/src/Data-Valid-Paper/FindUnknownTrack.R | no_license | sernst/Cadence | R | false | false | 727 | r | library(dplyr)
library(magrittr)
library(ggplot2)
source('src/support/TrackUtils.r')
tracks <- getTracksData()
WIDTH_TARGET <- 0.37
LENGTH_TARGET <- 0.44
bebTracks <- dplyr::filter(tracks, tracks$site == 'BEB' & tracks$level == '515') %>%
dplyr::filter(width > 0 & length > 0) %>%
dplyr::filter(widthMeasured > 0 & lengthMeasured > 0)
bebTracks$ranking <- sqrt(
(bebTracks$width - WIDTH_TARGET)^2 +
(bebTracks$length - LENGTH_TARGET)^2 +
(bebTracks$widthMeasured - WIDTH_TARGET)^2 +
(bebTracks$lengthMeasured - LENGTH_TARGET)^2)
ggplot2::ggplot(bebTracks, aes(ranking)) + ggplot2::geom_histogram()
bebTracks <- dplyr::filter(bebTracks, bebTracks$ranking)
write.csv(result, 'output/Unknown-Track-Matches.csv')
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/textreuse-package.r
\docType{package}
\name{textreuse}
\alias{textreuse}
\alias{textreuse-package}
\title{textreuse: Detect Text Reuse and Document Similarity}
\description{
textreuse: Detect Text Reuse and Document Similarity
}
\references{
The sample data provided in the files \code{ca1851-match.txt},
\code{ca1851-nomatch.txt}, \code{ny1850-match.txt}, all of which are in the
\code{extdata/} directory, are taken from the following nineteenth-century
codes of civil procedure from California and New York.
\emph{Final Report of the Commissioners on Practice and Pleadings}, in 2
\emph{Documents of the Assembly of New York}, 73rd Sess., No. 16, (1850):
243-250, sections 597-613.
\href{http://books.google.com/books?id=9HEbAQAAIAAJ&pg=PA243#v=onepage&q&f=false}{Google
Books}.
\emph{An Act To Regulate Proceedings in Civil Cases}, 1851 \emph{California
Laws} 51, 51-53 sections 4-17; 101, sections 313-316.
\href{http://books.google.com/books?id=4PHEAAAAIAAJ&pg=PA51#v=onepage&q&f=false}{Google
Books}.
}
| /man/textreuse.Rd | permissive | mdlincoln/textreuse | R | false | false | 1,099 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/textreuse-package.r
\docType{package}
\name{textreuse}
\alias{textreuse}
\alias{textreuse-package}
\title{textreuse: Detect Text Reuse and Document Similarity}
\description{
textreuse: Detect Text Reuse and Document Similarity
}
\references{
The sample data provided in the files \code{ca1851-match.txt},
\code{ca1851-nomatch.txt}, \code{ny1850-match.txt}, all of which are in the
\code{extdata/} directory, are taken from the following nineteenth-century
codes of civil procedure from California and New York.
\emph{Final Report of the Commissioners on Practice and Pleadings}, in 2
\emph{Documents of the Assembly of New York}, 73rd Sess., No. 16, (1850):
243-250, sections 597-613.
\href{http://books.google.com/books?id=9HEbAQAAIAAJ&pg=PA243#v=onepage&q&f=false}{Google
Books}.
\emph{An Act To Regulate Proceedings in Civil Cases}, 1851 \emph{California
Laws} 51, 51-53 sections 4-17; 101, sections 313-316.
\href{http://books.google.com/books?id=4PHEAAAAIAAJ&pg=PA51#v=onepage&q&f=false}{Google
Books}.
}
|
## Test filter trace tibble ##
tbl <- ex_trace_tbl
x <- find_peaks_manual(
start = c(35390, 45451, 55504, 65557, 75576, 85656, 95718, 105768, 115828, 125888),
end = c(35593, 45650, 55700, 65771, 75825, 85877, 95932, 105978, 116042, 126087))
peaks_sum <- sum(purrr::map_int(x, length))
peaks_extent <- length(seq(purrr::map_int(x, min)[[1]],
purrr::map_int(x, max)[[10]]))
stims <- stimuli_samples(tbl, 9000)
stimuli_extent <- length(seq(min(stims), max(stims)))
test_that("test filter_peaks", {
# Correct column names
expect_equal(names(filter_peaks(tbl, x)),
c("sample", "secs", "stimulus", "response", "sub_sample", "peak_nr"))
# Correct values in peak_nr and sub_sample columns
expect_equal(unique(filter_peaks(tbl, x)$peak_nr), 1:10)
expect_equal(unique(filter_peaks(tbl, x)$sub_sample),
1:max(purrr::map_int(x, length)))
# Correct filtering
expect_equal(nrow(filter_peaks(tbl, x)), peaks_sum)
# With length_out: Correct filtering with more than max and less than min
expect_equal(nrow(filter_peaks(tbl, x, length_out = 300)), 3000)
expect_equal(nrow(filter_peaks(tbl, x, length_out = 100)), 1000)
})
test_that("test filter by stimuli", {
expect_equal(nrow(filter_full_stimuli(tbl, buffer = 0)), stimuli_extent)
expect_equal(nrow(filter_full_stimuli(tbl)), stimuli_extent + 2000)
expect_equal(nrow(filter_full_stimuli(tbl, buffer = 50)), stimuli_extent + 1000)
})
test_that("test filter by response", {
expect_equal(nrow(filter_full_response(tbl, x, buffer = 0)), peaks_extent)
expect_equal(nrow(filter_full_response(tbl, x)), peaks_extent + 2000)
expect_equal(nrow(filter_full_response(tbl, x, buffer = 50)), peaks_extent + 1000)
})
| /tests/testthat/test-filter-tbl.R | permissive | jessesadler/madisonamg | R | false | false | 1,739 | r | ## Test filter trace tibble ##
tbl <- ex_trace_tbl
x <- find_peaks_manual(
start = c(35390, 45451, 55504, 65557, 75576, 85656, 95718, 105768, 115828, 125888),
end = c(35593, 45650, 55700, 65771, 75825, 85877, 95932, 105978, 116042, 126087))
peaks_sum <- sum(purrr::map_int(x, length))
peaks_extent <- length(seq(purrr::map_int(x, min)[[1]],
purrr::map_int(x, max)[[10]]))
stims <- stimuli_samples(tbl, 9000)
stimuli_extent <- length(seq(min(stims), max(stims)))
test_that("test filter_peaks", {
# Correct column names
expect_equal(names(filter_peaks(tbl, x)),
c("sample", "secs", "stimulus", "response", "sub_sample", "peak_nr"))
# Correct values in peak_nr and sub_sample columns
expect_equal(unique(filter_peaks(tbl, x)$peak_nr), 1:10)
expect_equal(unique(filter_peaks(tbl, x)$sub_sample),
1:max(purrr::map_int(x, length)))
# Correct filtering
expect_equal(nrow(filter_peaks(tbl, x)), peaks_sum)
# With length_out: Correct filtering with more than max and less than min
expect_equal(nrow(filter_peaks(tbl, x, length_out = 300)), 3000)
expect_equal(nrow(filter_peaks(tbl, x, length_out = 100)), 1000)
})
test_that("test filter by stimuli", {
expect_equal(nrow(filter_full_stimuli(tbl, buffer = 0)), stimuli_extent)
expect_equal(nrow(filter_full_stimuli(tbl)), stimuli_extent + 2000)
expect_equal(nrow(filter_full_stimuli(tbl, buffer = 50)), stimuli_extent + 1000)
})
test_that("test filter by response", {
expect_equal(nrow(filter_full_response(tbl, x, buffer = 0)), peaks_extent)
expect_equal(nrow(filter_full_response(tbl, x)), peaks_extent + 2000)
expect_equal(nrow(filter_full_response(tbl, x, buffer = 50)), peaks_extent + 1000)
})
|
testlist <- list(latLongs = structure(c(1.97626258336499e-322, 2.6461938652295e-260, 2.6461938652295e-260, 2.6461938652295e-260, 2.64619386524133e-260, 2.6461938652295e-260, 2.64619511850766e-260, 1.64900014794264e-260, 0), .Dim = c(9L, 1L)), r = 0)
result <- do.call(MGDrivE::calcCos,testlist)
str(result) | /MGDrivE/inst/testfiles/calcCos/libFuzzer_calcCos/calcCos_valgrind_files/1612726799-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 309 | r | testlist <- list(latLongs = structure(c(1.97626258336499e-322, 2.6461938652295e-260, 2.6461938652295e-260, 2.6461938652295e-260, 2.64619386524133e-260, 2.6461938652295e-260, 2.64619511850766e-260, 1.64900014794264e-260, 0), .Dim = c(9L, 1L)), r = 0)
result <- do.call(MGDrivE::calcCos,testlist)
str(result) |
#1. Napisz funkcję sprawdzająca czy 1 liczba jest podzielna przez druga użyj - %%
podzial <- function(liczba1, liczba2) {
if (liczba1 %% liczba2 == 0) {
print("Pierwsza liczba jest podzielna przez druga")}
else {
print("Pierwsza liczba jest niepodzielna przez druga")}
}
podzial(10, 2)
podzial(13, 7) | /Topic1Zadanie1.r | no_license | kowalczykwiktor/r | R | false | false | 317 | r | #1. Napisz funkcję sprawdzająca czy 1 liczba jest podzielna przez druga użyj - %%
podzial <- function(liczba1, liczba2) {
if (liczba1 %% liczba2 == 0) {
print("Pierwsza liczba jest podzielna przez druga")}
else {
print("Pierwsza liczba jest niepodzielna przez druga")}
}
podzial(10, 2)
podzial(13, 7) |
require(hdrcde)
data(faithful)
faithful.cde <- cde.est(faithful$waiting,faithful$eruptions)
plot(faithful.cde,xlab="Waiting time",ylab="Duration time",plot.fn="hdr")
points(faithful$waiting,faithful$eruptions, pch="+", col="yellow")
| /R/rgg_code0105.R | no_license | skinnybob/r-matrix-school | R | false | false | 236 | r | require(hdrcde)
data(faithful)
faithful.cde <- cde.est(faithful$waiting,faithful$eruptions)
plot(faithful.cde,xlab="Waiting time",ylab="Duration time",plot.fn="hdr")
points(faithful$waiting,faithful$eruptions, pch="+", col="yellow")
|
## Initially, only four rows of data are read from the file, so the starting date and time of data collection is found out
## the starting date and time are calculated using the function strptime()
## dif will be the number of minutes from the starting point to 2007-02-01, time 00:00:00
## nlines will be the number of minutes between 2007-02-01, time 00:00:00 to 2007-02-02, time 23:59:00 (inclusive)
colNames <- c("Date", "Time", "Global_active_power", "Global_reactive_power", "Voltage", "Global_intensity", "Sub_metering_1",
"Sub_metering_2", "Sub_metering_3")
initial <- read.table("household_power_consumption.txt", col.names=colNames, na.strings="?", sep=";",
comment.char="", skip=1, nrows=4)
initial$Time <- strptime(paste(initial$Date,initial$Time), "%d/%m/%Y %H:%M:%S")
dif <- as.integer((strptime("01/02/2007 00:00:00", "%d/%m/%Y %H:%M:%S") - initial[1,]$Time)*24*60)
nlines <- as.integer((strptime("02/02/2007 24:00:00", "%d/%m/%Y %H:%M:%S") - strptime("01/02/2007 00:00:00", "%d/%m/%Y %H:%M:%S"))*24*60)
## the data from 2007-02-01, time 00:00:00 to 2007-02-02, time 23:59:00 is read
data <- read.table("household_power_consumption.txt", col.names=colNames, na.strings="?", sep=";",
comment.char="", skip=(dif+1), nrows=nlines)
## the required graph is made and put in plot1.png
png(file="plot1.png")
par(cex=0.8)
hist(data[,3], col="red", xlab="Global active power (kilowatts)", main="Global Active Power")
dev.off()
| /plot1.R | no_license | dlungeanu/ExData_Plotting1 | R | false | false | 1,495 | r | ## Initially, only four rows of data are read from the file, so the starting date and time of data collection is found out
## the starting date and time are calculated using the function strptime()
## dif will be the number of minutes from the starting point to 2007-02-01, time 00:00:00
## nlines will be the number of minutes between 2007-02-01, time 00:00:00 to 2007-02-02, time 23:59:00 (inclusive)
colNames <- c("Date", "Time", "Global_active_power", "Global_reactive_power", "Voltage", "Global_intensity", "Sub_metering_1",
"Sub_metering_2", "Sub_metering_3")
initial <- read.table("household_power_consumption.txt", col.names=colNames, na.strings="?", sep=";",
comment.char="", skip=1, nrows=4)
initial$Time <- strptime(paste(initial$Date,initial$Time), "%d/%m/%Y %H:%M:%S")
dif <- as.integer((strptime("01/02/2007 00:00:00", "%d/%m/%Y %H:%M:%S") - initial[1,]$Time)*24*60)
nlines <- as.integer((strptime("02/02/2007 24:00:00", "%d/%m/%Y %H:%M:%S") - strptime("01/02/2007 00:00:00", "%d/%m/%Y %H:%M:%S"))*24*60)
## the data from 2007-02-01, time 00:00:00 to 2007-02-02, time 23:59:00 is read
data <- read.table("household_power_consumption.txt", col.names=colNames, na.strings="?", sep=";",
comment.char="", skip=(dif+1), nrows=nlines)
## the required graph is made and put in plot1.png
png(file="plot1.png")
par(cex=0.8)
hist(data[,3], col="red", xlab="Global active power (kilowatts)", main="Global Active Power")
dev.off()
|
## Set Working Directory
setwd("C:\\Users\\Davide\\Documents\\power")
opar=par(ps=12)
## Read Power File
power <- read.table(file="household_power_consumption.txt", sep=";", na.strings="?", header=TRUE, colClasses=c("character","character","numeric","numeric","numeric","numeric","numeric","numeric","numeric"))
## Convert Date and Time Field
power$Date <- as.Date(power$Date,"%d/%m/%Y")
power$Time <- strptime(power$Time, format="%H:%M:%S")
## Subset Data for Feb 1 and 2 2007
power_subset <- subset(power, (power$Date=="2007-02-01" | power$Date=="2007-02-02" ))
## Create plot 2 -- write to png file
png('plot2.png', width=480, height=480)
plot(power_subset$Global_active_power, xlab="", ylab="Global Active Power (kilowatts)", type="l", xaxt="n")
axis(1, at=c(1,nrow(power_subset)/2,nrow(power_subset)),labels=c("Thu","Fri","Sat"))
dev.off()
| /plot2.r | no_license | eatondl/ExData_Plotting1 | R | false | false | 850 | r | ## Set Working Directory
setwd("C:\\Users\\Davide\\Documents\\power")
opar=par(ps=12)
## Read Power File
power <- read.table(file="household_power_consumption.txt", sep=";", na.strings="?", header=TRUE, colClasses=c("character","character","numeric","numeric","numeric","numeric","numeric","numeric","numeric"))
## Convert Date and Time Field
power$Date <- as.Date(power$Date,"%d/%m/%Y")
power$Time <- strptime(power$Time, format="%H:%M:%S")
## Subset Data for Feb 1 and 2 2007
power_subset <- subset(power, (power$Date=="2007-02-01" | power$Date=="2007-02-02" ))
## Create plot 2 -- write to png file
png('plot2.png', width=480, height=480)
plot(power_subset$Global_active_power, xlab="", ylab="Global Active Power (kilowatts)", type="l", xaxt="n")
axis(1, at=c(1,nrow(power_subset)/2,nrow(power_subset)),labels=c("Thu","Fri","Sat"))
dev.off()
|
library(xts)
library(zoo)
model2 <- Arima(price1, order = c(8, 0, 7), xreg = cbind(wind1, pv1))
a <- numeric(4762)
u <- numeric(4762)
o <- numeric(4762)
#fit <- xts(fitted(model2), time)
price2 <- resid(deas_price)
i <- 1
b <- 1
while(i < length(pricemw)){
a[b] <- (pricemw[i]+pricemw[i+1]+pricemw[i+2]+pricemw[i+3])/4
u[b] <- (price2[i]+price2[i+1]+price2[i+2]+price2[i+3])/4
o[b] <- (fitted(model2)[i]+fitted(model2)[i+1]+fitted(model2)[i+2]+fitted(model2)[i+3])/4
i <- i+4
b <- b+1
}
time1 <- ISOdatetime(2011,12,14,12,0,0)+1:4762*60*60
c <- xts(a, time1)
d <- xts(u, time1)
fit1 <- xts(o, time1)
#autoplot(c, xlab="Time", ylab= "Electricity Price (in ???/MWh)", main=NULL) | /hourly av.R | no_license | arberzela/ARIMA-model | R | false | false | 716 | r | library(xts)
library(zoo)
model2 <- Arima(price1, order = c(8, 0, 7), xreg = cbind(wind1, pv1))
a <- numeric(4762)
u <- numeric(4762)
o <- numeric(4762)
#fit <- xts(fitted(model2), time)
price2 <- resid(deas_price)
i <- 1
b <- 1
while(i < length(pricemw)){
a[b] <- (pricemw[i]+pricemw[i+1]+pricemw[i+2]+pricemw[i+3])/4
u[b] <- (price2[i]+price2[i+1]+price2[i+2]+price2[i+3])/4
o[b] <- (fitted(model2)[i]+fitted(model2)[i+1]+fitted(model2)[i+2]+fitted(model2)[i+3])/4
i <- i+4
b <- b+1
}
time1 <- ISOdatetime(2011,12,14,12,0,0)+1:4762*60*60
c <- xts(a, time1)
d <- xts(u, time1)
fit1 <- xts(o, time1)
#autoplot(c, xlab="Time", ylab= "Electricity Price (in ???/MWh)", main=NULL) |
#' Gene network reconstruction using global-local shrinkage priors
#'
#' @param tX p by n data matrix of gene expression measurements
#' @param globalShrink integer. Either 1 or 2. See Details.
#' @param nsamp0 integer. Number of randomly selected edges to estimate p0. See Details.
#' @param blfdr numeric. Bayesian analogue of the local false discovery rate used for edge selection. Value should be between 0 and 1. Default is 0.1.
#' @param maxNbEdges numeric. Maximum number of edges to select.
#' @param maxiter integer. Maximum number of iterations for the variational algorithm. Default is 100.
#' @param tol numeric. Represents the maximum relative convergence tolerance over the p variational lower bounds. Default is 0.001.
#' @param verbose logical. Should information on progress be printed?
#' @param standardize logical. Should the data be standardized? Default is TRUE.
#' @details
#'
#' If \code{globalShrink}=1 then empirical Bayes for the global shrinkage prior is carried out using
#' fixed-point iterations as in Valpola and Honkela (2006). Otherwise, if \code{globalShrink}=2,
#' the approximate analytical solution of Leday et al (2015) is used.
#'
#' When \code{nsamp0}=\code{NULL}, the proportion of null hypotheses p0 is estimated using Bayes factors calculated for all P=0.5*p*(p-1) edges (cf Leday et al., 2015).
#' When P is very large, it may me preferable to approximate p0 instead using a random subset of edges.
#' If \code{nsamp0} is an integer, then a random subset of size \code{nsamp0} is selected to estimate p0.
#' The default is \code{nsamp0}=\code{NULL} when p<=100 and \code{nsamp0}=1000 otherwise.
#'
#' @return An object of class \code{\link{ShrinkNet-class}}
#'
#' @author Gwenael G.R. Leday <gwenael.leday (at) mrc-bsu.cam.ac.uk>
#'
#' @references Leday, G.G.R., de Gunst, M.C.M., Kpogbezan, G.B., van der Vaart, A.W., van Wieringen, W.N., and
#' van de Wiel, M.A. (2015). Gene network reconstruction using global-local shrinkage priors. Submitted.
#'
#' @export
ShrinkNet <- function(tX, globalShrink=1, nsamp0=NULL, blfdr=0.1, maxNbEdges=NULL, maxiter=100, tol=0.001, verbose=TRUE, standardize=TRUE){
##### Input checks
if(is.matrix(tX)){
if(any(is.na(tX))){
stop("Missing values are not allowed")
}
}else{
stop("tX is not a matrix")
}
if(is.numeric(globalShrink)){
globalShrink <- as.integer(globalShrink)
if(!globalShrink%in%c(1,2)){
stop("globalShrink should be equal to 1 or 2")
}
}else{
stop("globalShrink is not a numeric")
}
edgeTot <- 0.5*nrow(tX)*(nrow(tX)-1)
if(is.null(nsamp0)){
if(nrow(tX)>100){
nsamp0 <- 1000
warning("p>100 so p0 is estimated by sampling nsamp0=1000 edges")
}
}else{
if(is.numeric(nsamp0)){
nsamp0 <- as.integer(nsamp0)
if(nsamp0>edgeTot){
nsamp0 <- edgeTot
}
if(nsamp0<1000){
warning("nsamp0 (<1000) may be too low to obtain a reasonable estimate of p0")
}
}else{
stop("nsamp0 is not a numeric")
}
}
if(is.numeric(maxiter)){
maxiter <- as.integer(maxiter)
}else{
stop("maxiter is not a numeric")
}
if(is.numeric(blfdr)){
if((blfdr<=0)|(blfdr>=1)){
stop("blfdr should be between 0 and 1")
}
}else{
stop("blfdr is not a numeric")
}
if(is.null(maxNbEdges)){
maxNbEdges <- 0
}else{
if(is.numeric(maxNbEdges)){
maxNbEdges <- round(maxNbEdges)
if((maxNbEdges<=0) | (maxNbEdges>edgeTot) ){
stop(paste("maxNbEdges must take values between 1 and", edgeTot) )
}
}else{
stop("maxNbEdges is not a numeric")
}
}
if(!is.logical(verbose)){
stop("verbose is not a logical")
}
if(!is.logical(standardize)){
stop("standardize is not a logical")
}else{
if(standardize){
tX <- t(scale(t(tX), center = TRUE, scale = TRUE))
}else{
warning("Input data have not been standardized")
}
}
tps <- proc.time()
##### Initialization
aRand <- 0.001
bRand <- 0.001
##### Data preparation
if(verbose){
cat("\n")
cat("STEP 0: SVD computations... ")
}
allSVDs <- sapply(1:nrow(tX), getSVD, tX=tX, simplify=FALSE)
if(verbose) cat("DONE\n")
tps1 <- proc.time() - tps
##### Algo
if(verbose) cat("STEP 1: Variational algorithm...\n")
eb <- HiddenVarAlgo(SVDs=allSVDs, tX=tX, aRand=aRand, bRand=bRand, maxiter=maxiter, globalShrink=globalShrink, tol=tol, verbose=verbose)
tps2 <- proc.time() - tps - tps1
##### Calculate summary statistics from posteriors
if(verbose) cat("STEP 2: Calculate summary statistics from posteriors... ")
postSummaries <- sapply(1:nrow(tX), HiddenVarRidgeiGetKappa, SVDs=allSVDs, tX=tX, aRand=eb$parTau[nrow(eb$parTau),1], bRand=eb$parTau[nrow(eb$parTau),2], bRandStarInit=eb$allbRandStar, dSigmaStarInit=eb$alldSigmaStar, simplify=FALSE)
matThres <- sapply(1:length(postSummaries), function(x){postSummaries[[x]][,1]}, simplify=TRUE)
matBeta <- sapply(1:length(postSummaries), function(x){postSummaries[[x]][,2]}, simplify=TRUE)
matThres <- (matThres + t(matThres))/2
if(verbose) cat("DONE\n")
tps3 <- proc.time() - tps - tps1 - tps2
##### Estimate p0
if(verbose) cat("STEP 3: Estimate p0... ")
if(is.null(nsamp0)){
p0 <- HiddenEstimatep0(themat=matThres, tX=tX)
}else{
mat <- matThres
mat[upper.tri(mat)] <- 0
idx <- which(mat!=0, arr.ind=TRUE)
idx <- idx[sample(nrow(idx), nsamp0),]
allLogBFs <- t(apply(idx, 1, .edgeBFprime, themat=matThres, tX=tX))
p0 <- 1-mean(allLogBFs>0)
}
if(verbose) cat("DONE\n")
tps4 <- proc.time() - tps - tps1 - tps2 - tps3
##### Edge selection using Bayesian local false discovery rate
if(verbose) cat("STEP 4: Edge selection... ")
resSel <- HiddenEdgeSelection(themat=matThres, tX=tX, p0=p0, lfdrcut=blfdr, maxNbEdges=maxNbEdges)
selGraph <- resSel$myGraph
logMaxBFs <- Matrix::Matrix(resSel$logMaxBFs, sparse=TRUE)
nbedge <- sum(selGraph)/2
if(verbose){
cat("DONE\n\n")
cat("prior null probability p0 =", round(p0,5), "\n")
cat("", nbedge, " selected edges out of ", edgeTot, " (",round(100*nbedge/edgeTot, 2),"%)", " using blfdr = ", blfdr, sep="")
}
tps5 <- proc.time() - tps - tps1 - tps2 - tps3 - tps4
tps6 <- proc.time() - tps
## Time
mytime <- data.frame("elapsed"=c(tps1[3], tps2[3], tps3[3], tps4[3], tps5[3], tps6[3]))
mytime$"H:MM:SS" <- sapply(mytime$elapsed, .convertToTime)
rownames(mytime) <- c("STEP 0 (SVD decomposition)", "STEP 1 (variational algorithm)", "STEP 2 (summary statistics)", "STEP 3 (p0 estimation)", "STEP 4 (edge selection)", "overall")
if(verbose){
cat("\n\n")
cat("Time (H:MM:SS):", .convertToTime(tps6[3]))
cat("\n\n")
}
## Output
myigraph <- igraph::graph.adjacency(selGraph, mode = "undirected")
if(!is.null(rownames(tX))){
myigraph <- igraph::set.vertex.attribute(myigraph, "name", value=rownames(tX))
}
out <- new("ShrinkNet",
graph = myigraph,
kappa = matThres,
beta = matBeta,
p0 = p0,
logMaxBFs = logMaxBFs,
globalPrior = eb$parTau,
allmargs = eb$allmargs,
time = mytime)
return(out)
}
| /R/ShrinkNet.R | no_license | gleday/ShrinkNet | R | false | false | 7,223 | r | #' Gene network reconstruction using global-local shrinkage priors
#'
#' @param tX p by n data matrix of gene expression measurements
#' @param globalShrink integer. Either 1 or 2. See Details.
#' @param nsamp0 integer. Number of randomly selected edges to estimate p0. See Details.
#' @param blfdr numeric. Bayesian analogue of the local false discovery rate used for edge selection. Value should be between 0 and 1. Default is 0.1.
#' @param maxNbEdges numeric. Maximum number of edges to select.
#' @param maxiter integer. Maximum number of iterations for the variational algorithm. Default is 100.
#' @param tol numeric. Represents the maximum relative convergence tolerance over the p variational lower bounds. Default is 0.001.
#' @param verbose logical. Should information on progress be printed?
#' @param standardize logical. Should the data be standardized? Default is TRUE.
#' @details
#'
#' If \code{globalShrink}=1 then empirical Bayes for the global shrinkage prior is carried out using
#' fixed-point iterations as in Valpola and Honkela (2006). Otherwise, if \code{globalShrink}=2,
#' the approximate analytical solution of Leday et al (2015) is used.
#'
#' When \code{nsamp0}=\code{NULL}, the proportion of null hypotheses p0 is estimated using Bayes factors calculated for all P=0.5*p*(p-1) edges (cf Leday et al., 2015).
#' When P is very large, it may me preferable to approximate p0 instead using a random subset of edges.
#' If \code{nsamp0} is an integer, then a random subset of size \code{nsamp0} is selected to estimate p0.
#' The default is \code{nsamp0}=\code{NULL} when p<=100 and \code{nsamp0}=1000 otherwise.
#'
#' @return An object of class \code{\link{ShrinkNet-class}}
#'
#' @author Gwenael G.R. Leday <gwenael.leday (at) mrc-bsu.cam.ac.uk>
#'
#' @references Leday, G.G.R., de Gunst, M.C.M., Kpogbezan, G.B., van der Vaart, A.W., van Wieringen, W.N., and
#' van de Wiel, M.A. (2015). Gene network reconstruction using global-local shrinkage priors. Submitted.
#'
#' @export
ShrinkNet <- function(tX, globalShrink=1, nsamp0=NULL, blfdr=0.1, maxNbEdges=NULL, maxiter=100, tol=0.001, verbose=TRUE, standardize=TRUE){
##### Input checks
if(is.matrix(tX)){
if(any(is.na(tX))){
stop("Missing values are not allowed")
}
}else{
stop("tX is not a matrix")
}
if(is.numeric(globalShrink)){
globalShrink <- as.integer(globalShrink)
if(!globalShrink%in%c(1,2)){
stop("globalShrink should be equal to 1 or 2")
}
}else{
stop("globalShrink is not a numeric")
}
edgeTot <- 0.5*nrow(tX)*(nrow(tX)-1)
if(is.null(nsamp0)){
if(nrow(tX)>100){
nsamp0 <- 1000
warning("p>100 so p0 is estimated by sampling nsamp0=1000 edges")
}
}else{
if(is.numeric(nsamp0)){
nsamp0 <- as.integer(nsamp0)
if(nsamp0>edgeTot){
nsamp0 <- edgeTot
}
if(nsamp0<1000){
warning("nsamp0 (<1000) may be too low to obtain a reasonable estimate of p0")
}
}else{
stop("nsamp0 is not a numeric")
}
}
if(is.numeric(maxiter)){
maxiter <- as.integer(maxiter)
}else{
stop("maxiter is not a numeric")
}
if(is.numeric(blfdr)){
if((blfdr<=0)|(blfdr>=1)){
stop("blfdr should be between 0 and 1")
}
}else{
stop("blfdr is not a numeric")
}
if(is.null(maxNbEdges)){
maxNbEdges <- 0
}else{
if(is.numeric(maxNbEdges)){
maxNbEdges <- round(maxNbEdges)
if((maxNbEdges<=0) | (maxNbEdges>edgeTot) ){
stop(paste("maxNbEdges must take values between 1 and", edgeTot) )
}
}else{
stop("maxNbEdges is not a numeric")
}
}
if(!is.logical(verbose)){
stop("verbose is not a logical")
}
if(!is.logical(standardize)){
stop("standardize is not a logical")
}else{
if(standardize){
tX <- t(scale(t(tX), center = TRUE, scale = TRUE))
}else{
warning("Input data have not been standardized")
}
}
tps <- proc.time()
##### Initialization
aRand <- 0.001
bRand <- 0.001
##### Data preparation
if(verbose){
cat("\n")
cat("STEP 0: SVD computations... ")
}
allSVDs <- sapply(1:nrow(tX), getSVD, tX=tX, simplify=FALSE)
if(verbose) cat("DONE\n")
tps1 <- proc.time() - tps
##### Algo
if(verbose) cat("STEP 1: Variational algorithm...\n")
eb <- HiddenVarAlgo(SVDs=allSVDs, tX=tX, aRand=aRand, bRand=bRand, maxiter=maxiter, globalShrink=globalShrink, tol=tol, verbose=verbose)
tps2 <- proc.time() - tps - tps1
##### Calculate summary statistics from posteriors
if(verbose) cat("STEP 2: Calculate summary statistics from posteriors... ")
postSummaries <- sapply(1:nrow(tX), HiddenVarRidgeiGetKappa, SVDs=allSVDs, tX=tX, aRand=eb$parTau[nrow(eb$parTau),1], bRand=eb$parTau[nrow(eb$parTau),2], bRandStarInit=eb$allbRandStar, dSigmaStarInit=eb$alldSigmaStar, simplify=FALSE)
matThres <- sapply(1:length(postSummaries), function(x){postSummaries[[x]][,1]}, simplify=TRUE)
matBeta <- sapply(1:length(postSummaries), function(x){postSummaries[[x]][,2]}, simplify=TRUE)
matThres <- (matThres + t(matThres))/2
if(verbose) cat("DONE\n")
tps3 <- proc.time() - tps - tps1 - tps2
##### Estimate p0
if(verbose) cat("STEP 3: Estimate p0... ")
if(is.null(nsamp0)){
p0 <- HiddenEstimatep0(themat=matThres, tX=tX)
}else{
mat <- matThres
mat[upper.tri(mat)] <- 0
idx <- which(mat!=0, arr.ind=TRUE)
idx <- idx[sample(nrow(idx), nsamp0),]
allLogBFs <- t(apply(idx, 1, .edgeBFprime, themat=matThres, tX=tX))
p0 <- 1-mean(allLogBFs>0)
}
if(verbose) cat("DONE\n")
tps4 <- proc.time() - tps - tps1 - tps2 - tps3
##### Edge selection using Bayesian local false discovery rate
if(verbose) cat("STEP 4: Edge selection... ")
resSel <- HiddenEdgeSelection(themat=matThres, tX=tX, p0=p0, lfdrcut=blfdr, maxNbEdges=maxNbEdges)
selGraph <- resSel$myGraph
logMaxBFs <- Matrix::Matrix(resSel$logMaxBFs, sparse=TRUE)
nbedge <- sum(selGraph)/2
if(verbose){
cat("DONE\n\n")
cat("prior null probability p0 =", round(p0,5), "\n")
cat("", nbedge, " selected edges out of ", edgeTot, " (",round(100*nbedge/edgeTot, 2),"%)", " using blfdr = ", blfdr, sep="")
}
tps5 <- proc.time() - tps - tps1 - tps2 - tps3 - tps4
tps6 <- proc.time() - tps
## Time
mytime <- data.frame("elapsed"=c(tps1[3], tps2[3], tps3[3], tps4[3], tps5[3], tps6[3]))
mytime$"H:MM:SS" <- sapply(mytime$elapsed, .convertToTime)
rownames(mytime) <- c("STEP 0 (SVD decomposition)", "STEP 1 (variational algorithm)", "STEP 2 (summary statistics)", "STEP 3 (p0 estimation)", "STEP 4 (edge selection)", "overall")
if(verbose){
cat("\n\n")
cat("Time (H:MM:SS):", .convertToTime(tps6[3]))
cat("\n\n")
}
## Output
myigraph <- igraph::graph.adjacency(selGraph, mode = "undirected")
if(!is.null(rownames(tX))){
myigraph <- igraph::set.vertex.attribute(myigraph, "name", value=rownames(tX))
}
out <- new("ShrinkNet",
graph = myigraph,
kappa = matThres,
beta = matBeta,
p0 = p0,
logMaxBFs = logMaxBFs,
globalPrior = eb$parTau,
allmargs = eb$allmargs,
time = mytime)
return(out)
}
|
##------------------------------------------------------------------------------------------
## Checks if file that loads data (loadthedata.R) is in the current working directory.
## If not, working directory is set to that which contains the file loadthedata.R
##------------------------------------------------------------------------------------------
if (!"loadthedata.R" %in% list.files()) {
setwd("~/Desktop/Coursera/Exploratory_Data_Analysis/Project_1/")
}
##------------------------------------------------------------------------------------------
## Tells R to accept its input from the provided file (loadthedata.R)
##------------------------------------------------------------------------------------------
source("loadthedata.R")
##------------------------------------------------------------------------------------------
## Creates a png graphics device, fills it with the appropriate plot, then closes the device.
##------------------------------------------------------------------------------------------
png(filename = "plot3.png", width = 480, height = 480)
plot(date_time, SubMetering1,
type = "l", col = "black",
xlab = "", ylab = "Energy sub metering")
lines(date_time, SubMetering2, col = "red")
lines(date_time, SubMetering3, col = "blue")
legend("topright",
col = c("black", "red", "blue"),
c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
lwd = 1)
dev.off()
| /plot3.R | no_license | anthonypecchillo/ExData_Plotting1 | R | false | false | 1,435 | r | ##------------------------------------------------------------------------------------------
## Checks if file that loads data (loadthedata.R) is in the current working directory.
## If not, working directory is set to that which contains the file loadthedata.R
##------------------------------------------------------------------------------------------
if (!"loadthedata.R" %in% list.files()) {
setwd("~/Desktop/Coursera/Exploratory_Data_Analysis/Project_1/")
}
##------------------------------------------------------------------------------------------
## Tells R to accept its input from the provided file (loadthedata.R)
##------------------------------------------------------------------------------------------
source("loadthedata.R")
##------------------------------------------------------------------------------------------
## Creates a png graphics device, fills it with the appropriate plot, then closes the device.
##------------------------------------------------------------------------------------------
png(filename = "plot3.png", width = 480, height = 480)
plot(date_time, SubMetering1,
type = "l", col = "black",
xlab = "", ylab = "Energy sub metering")
lines(date_time, SubMetering2, col = "red")
lines(date_time, SubMetering3, col = "blue")
legend("topright",
col = c("black", "red", "blue"),
c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
lwd = 1)
dev.off()
|
# Module containing functions for looking at individual rodent data pertaining to movement
id_unknowns = function(dat, tag_col){
# give unique numbers to blank tags
# note: these are 7 digit numbers, so they are longer than any other tag type
# note: in the Portal data, column 12 is tag, so we are looking for blank or "0" tags to rename
unk = 1000000
for (irow in 1:nrow(dat)){
tag = dat[irow,tag_col]
unk = unk + 1
if (tag == "") {
dat[irow,tag_col] = unk
}
else if (tag == "0") {
dat[irow,tag_col] = unk
}}
return(dat)
}
starred_tags = function(dat, tags, spp_col, tag_col){
#Automate checking the flagged data for where the individual breaks should be
#check for *, which indicates a new tag
#tags with multiple rows are sorted by species, then checked for *
#if a * exists, then each time it is given a new unique tag, that ends with "s" for "star" (from note2 column)
numcount = 1
for (t in 1:length(tags)){
#only run on ear and toe tags, pit tags are very unlikely to be duplicated
#NOTE: there are some 6-character toe tags (e.g.1200DM, how to deal with these?)
if (nchar(tags[t]) < 6){
tmp <- which(dat$tag == tags[t])
# if indiv was captured multiple times
if (nrow(dat[tmp,]) > 1) {
# check num species recorded. If more than one, does data look OK if separated on species?
spp_list = unique(dat[tmp,spp_col])
for (sp in 1:length(spp_list)) {
tmp2 = which(dat$tag == tags[t] & dat$species == spp_list[sp])
isnew = as.vector(dat[tmp2,]$note2)
if ("*" %in% isnew) {
#print(dat[tmp2,])
rowbreaks = which(isnew == "*", arr.in=TRUE) #find rows where * indicates a new tag
for (r in 1:length(rowbreaks)){
if (r == 1) {
#GIVE an ID up to the first *
newtag = paste(tags[t], numcount, "s", sep = "") #make a new tag to keep separate
dat[tmp2,][1:rowbreaks[r]-1, tag_col] = newtag
numcount = numcount + 1
#AND an ID to everything after the first * (the loop should take care of the next set and so on)
newtag = paste(tags[t], numcount, "s", sep = "") #make a new tag to keep separate
dat[tmp2,][rowbreaks[r]:nrow(dat[tmp2,]),tag_col] = newtag
numcount = numcount + 1
}
else if (r > 1) {
#GIVE an ID to everything after the next *
newtag = paste(tags[t], numcount, "s", sep = "") #make a new tag to keep separate
dat[tmp2,][rowbreaks[r]:nrow(dat[tmp2,]),tag_col] = newtag
numcount = numcount + 1
}
}
}
}
}
}
}
return(dat)
}
is_dead = function(dat, tags, spp_col, tag_col){
#checks note5 for "D", which indicated a dead rat.
#by definition, all captures with the same tagID afterwards, must be a different individual
#assign these captures with a new tag ID that ends with 'm' for 'mortality.
numcount = 1
for (t in 1:length(tags)){
tmp <- which(dat$tag == tags[t])
# if indiv was captured multiple times
if (nrow(dat[tmp,]) > 1) {
# check num species recorded. If more than one, does data look OK if separated on species?
spp_list = unique(dat[tmp,spp_col])
for (sp in 1:length(spp_list)) {
tmp2 = which(dat$tag == tags[t] & dat$species == spp_list[sp])
isdead = as.vector(dat[tmp2,]$note5)
if ("D" %in% isdead) {
rowbreaks = which(isdead == "D", arr.in=TRUE) #find rows where D indicates a dead individuals
endrow = nrow(dat[tmp2,])
#print (endrow)
for (r in 1:length(rowbreaks)){
if (r == 1) {
if (rowbreaks[r] == endrow) {
#GIVE an ID up to the first *
newtag = paste(tags[t], numcount, "m", sep = "") #make a new tag to keep separate
numrows = nrow(dat[tmp2,][1:rowbreaks[r],])
newtagvector = as.vector(rep(newtag, numrows))
dat[tmp2,][1:rowbreaks[r], tag_col] = newtag
numcount = numcount + 1
#print(dat[tmp2,]
}
else{
#GIVE an ID up to the first *
newtag = paste(tags[t], numcount, "m", sep = "") #make a new tag to keep separate
numrows = nrow(dat[tmp2,][1:rowbreaks[r],])
newtagvector = as.vector(rep(newtag, numrows))
dat[tmp2,][1:rowbreaks[r], tag_col] = newtag
numcount = numcount + 1
#print(dat[tmp2,])
#AND an ID to everything after the first "D" (the loop should take care of the next set and so on)
startrow = rowbreaks[r] + 1
newtag = paste(tags[t], numcount, "m", sep = "") #make a new tag to keep separate
numrows = nrow(dat[tmp2,][(startrow:endrow),])
newtagvector = as.vector(rep(newtag, numrows))
dat[tmp2,][(startrow:endrow),tag_col] = newtag
numcount = numcount + 1
}
}
else if (r > 1) {
if (rowbreaks[r] == endrow) {
break
}
else{
#print (t)
#GIVE an ID to everything after the next "D"
startrow = rowbreaks[r] + 1
newtag = paste(tags[t], numcount, "m", sep = "") #make a new tag to keep separate
numrows = nrow(dat[tmp2,][(startrow:endrow),])
newtagvector = as.vector(rep(newtag, numrows))
dat[tmp2,][(startrow:endrow),tag_col] = newtag
numcount = numcount + 1
}
}
}
}
}}}
return(dat)
}
is_duplicate_tag = function(dat, tags, sex_col, spp_col, tag_col){
# check the min to max year for a given tag.
# If > 4, considered suspicious
# If multiple species, considered suspicious
# If adequately resolved, given a new unique tag number, that ends with d for "duplicate"
# returns a list with 2 elements [1] altered data, [2] flagged data
numcount = 100
flagged_rats = data.frame("tag"=1, "reason"=1, "occurrences"=1)
outcount = 0
for (t in 1:length(tags)){
#only run on ear and toe tags, pit tags are very unlikely to be duplicated
if (nchar(tags[t]) < 6){
tmp <- which(dat$tag == tags[t])
# if indiv was captured multiple times
if (nrow(dat[tmp,]) > 1) {
#more than 3 years between recaptures? Rodents are short-lived.
if (max(dat[tmp,1]) - min(dat[tmp,1]) >= 3){
# check num species recorded. If more than one, does data look OK if separated on species?
spp_list = unique(dat[tmp,spp_col])
for (sp in 1:length(spp_list)) {
tmp2 = which(dat$tag == tags[t] & dat$species == spp_list[sp])
#Check for duplicate tags in the same period and same species. This likely indicates multiple individuals with the same tag.
if(anyDuplicated(dat[tmp2,]) > 0) {
outcount = outcount + 1
flagged_rats[outcount,] <- c(tags[t], "sameprd", nrow(dat[tmp,]))
}
#Dipodomys are long-lived. Raise the threshold for these indivs
if(spp_list[sp] %in% list("DO", "DM", "DS")){
if (max(dat[tmp2,1]) - min(dat[tmp2,1]) < 5) {
newtag = paste(tags[t], numcount, "d", sep = "") #make a new tag to keep separate
dat[tmp2,tag_col] = newtag
numcount = numcount + 1
}
else {
outcount = outcount + 1
flagged_rats[outcount,] <- c(tags[t], "year", nrow(dat[tmp,]))
#print(dat[tmp2,])
}
}
#Other genera are very short-lived. Flag data if same individual appears to occur >= 3 years.
else {
if(max(dat[tmp2,1]) - min(dat[tmp2,1]) < 3) {
newtag = paste(tags[t], numcount, "d", sep = "") #make a new tag to keep separate
dat[tmp2,tag_col] = newtag
numcount = numcount + 1
}
else {
#print(dat[tmp2,])
outcount = outcount + 1
flagged_rats[outcount,] <- c(tags[t], "year", nrow(dat[tmp,]))
}
}
}
}}}}
info = list(data = dat, bad = flagged_rats)
return (info)
}
same_period = function(dat, tags){
# multiple individuals with same tag captured in same period? Questionable daata
flagged_rats = data.frame("tag"=1, "reason"=1, "occurrences"=1)
outcount = 0
for (t in 1:length(tags)){
tmp <- which(dat$tag == tags[t])
if (nrow(dat[tmp,]) > 1){
periods = unique(dat[tmp,]$period)
for (p in 1:length(periods)){
ptmp <- which(dat$tag == tags[t] & dat$period == periods[p])
if (nrow(dat[ptmp,]) > 1){
outcount = outcount + 1
flagged_rats[outcount,] <- c(tags[t], "sameprd", nrow(dat[ptmp,]))
break
}
}
}
}
return (flagged_rats)
}
find_bad_data2 = function(dat, tags, sex_col, spp_col){
# check for consistent sex and species, outputs flagged tags to check, or to remove from study
flagged_rats = data.frame("tag"=1, "reason"=1, "occurrences"=1)
outcount = 0
for (t in 1:length(tags)){
tmp <- which(dat$tag == tags[t])
if (nrow(dat[tmp,]) > 1) { # if indiv was captured multiple times
sex_list = dat[tmp,sex_col]
sex = sex_list[1]
for (i in 2:length(sex_list)){ # check for consistent sex
if (sex_list[i] != sex) {
outcount = outcount + 1
flagged_rats[outcount,] <- c(tags[t], "sex", nrow(dat[tmp,]))
break
}}
spp_list = dat[tmp,spp_col]
spp = spp_list[1]
for (s in 2:length(spp_list)){ # check for consistent species
if (spp_list[s] != spp){
outcount = outcount + 1
flagged_rats[outcount,] <- c(tags[t], "spp", nrow(dat[tmp,]))
break
}}
}}
return(flagged_rats)
}
subsetDat = function(dataset){
## function to subset out proper data
##### will find bad data, then delete it from the dataset and get rid of incompletely sampled periods
tags = as.character(unique(dataset$tag)) # get list of unique tags
flags = find_bad_data2(dataset, tags, 10, 9) # list of flagged data
#first, mark all uncertain or unmarked sex as "U" for unknown
badsextags = unique(flags[which(flags$reason == "sex"),1])
dataset[which(dataset$tag %in% badsextags),10] = "U"
dataset[which(dataset$sex %in% c("", "P", "Z")),10] = "U" #get rid of other weird typos in sex column
#get rid of results where we don't know the species for sure
badspptags = unique(flags[which(flags$reason == "spp"), 1])
dataset = dataset[-which(dataset$tag %in% badspptags),] #delete rows where species is unsure
#don't use negative period numbers and periods with only one day of trapping
#TODO: add periods from 1980-1999 that were incompletely sampled
dataset = subset(dataset, period != 111 & period != 237 & period != 241 &
period != 267 & period != 277 & period != 278 & period != 283 &
period != 284 & period != 300 & period != 311 & period != 313 &
period != 314 & period != 318 & period != 321 & period != 323 &
period != 337 & period != 339 & period != 344 & period != 351)
return (dataset)
}
distance_moved = function (data, tags) {
# Calculatemoved from one time frame to the next, for an individual
distance = as.numeric()
# for each individual
for (t in 1:length(tags)){
ind_data = data[which(data$tag == tags[t]),] #get data for indiv with tag t
ind_data = ind_data[order(ind_data$period),] #order chronologically
# if it was captured more than once
if (nrow(ind_data) > 1) {
for (i in 1:nrow(ind_data)){
if (i+1 <= nrow(ind_data)){
meters = sqrt((ind_data[i,8]-ind_data[i+1,8])**2 + (ind_data[i,7]-ind_data[i+1,7])**2)
distance=append(distance,meters)
}
}
}
}
return(distance)
}
sd_avg_mass = function (ind_dat, spp_mean, spp_sd) {
# Then takes the average mass for an individual (across all recaptures)
# and calculates the proportional difference away from that avg. species level mass.
ind_mean = mean(ind_dat$wgt, na.rm = TRUE)
if (is.na(ind_mean)) {
# if no mass is recorded (returns NA), make ind_sd = 0 so we don't lose data.
#This shouldn't skew the results, but it ISN'T a true zero. Denote appropriately in methods.
ind_sd = 0
}
else {
# num of standard deviations the indiv's mean wgt is from spp mean wgt
ind_sd = round((ind_mean - spp_mean) / spp_sd , 4)
}
return (ind_sd) #number of standard deviations individual is away from capture weight mean
}
feeding_guild = function(speciesname) {
# grab the species name and decide what feeding guild it is in, based on the lit
# heteromyidae granivores == 1
if (speciesname %in% list("DO", "DM", "DS", "PB", "PP", "PF", "PH", "PI",
"PE", "PM", "PL", "RM", "RF", "RO", "BA")) {guild = 1}
# folivores == 2
else if (speciesname %in% list("SH", "SF", "SO", "NAO")) {guild = 3 }
# insectivores == 3
else {guild = 3}
return(guild)
}
enumerate_species = function(speciesname) {
#each species needs to be replaced with a number instead of a name,
#for input into Program MARK later
if (speciesname == "DO") {speciesnum = 1}
else if (speciesname == "DM") {speciesnum = 2}
else if (speciesname == "DS") {speciesnum = 3}
else if (speciesname == "PB") {speciesnum = 4}
else if (speciesname == "PP") {speciesnum = 5}
else if (speciesname == "PF") {speciesnum = 6}
else if (speciesname == "PE") {speciesnum = 7}
else if (speciesname == "PM") {speciesnum = 8}
else if (speciesname == "PL") {speciesnum = 10} #transient G
else if (speciesname == "PH") {speciesnum = 10} #transient G
else if (speciesname == "PI") {speciesnum = 10} #transient G
else if (speciesname == "RM") {speciesnum = 9}
else if (speciesname == "RF") {speciesnum = 10} #transient G
else if (speciesname == "RO") {speciesnum = 10} #transient G
else if (speciesname == "BA") {speciesnum = 10} #transient G
else if (speciesname == "SH") {speciesnum = 12}
else if (speciesname == "SF") {speciesnum = 13}
else if (speciesname == "SO") {speciesnum = 14} #transient F
else if (speciesname == "NAO") {speciesnum = 11}
else if (speciesname == "OT") {speciesnum = 15}
else if (speciesname == "OL") {speciesnum = 16}
return(speciesnum)
}
temporal_status = function (speciesname){
#assigns temporal status to each species based on its across year and within year presence thru span of the entire 30+ year dataset
#definitions based on output from corespecies, intermediatespecies and transientspecies variables in main script
#core = 1, intermediate = 2, transient = 3 -- NEEDS TO BE DOUBLE CHECKED WITH DEFINITION
if (speciesname %in% list( "OT","DM","RM","NAO","OL","PE","DO","PP","PF","PB")) {status = 1}
else if (speciesname %in% list("PM","SH","DS","SF")) {status = 2}
else if (speciesname %in% list("RF","BA","PH","RO","SO","PI","PL")) {status = 3}
return(status)
}
noplacelikehome = function (dat, prd, exclosures, breakpoint){
### Create a set of MARK capture histories by home vs away from home
# Creates a movement history to be used in Mark. Matrix is filled in with zeroes (not captured) and later filled in
# An individual always starts in 1, and is moved to state 2 only if it moves a distance larger than the threshold
# set by the core species movement distribution. If it is again recaptured at a distance larger than the threshold,
# is is moved from state 2 back to state 1. This will be used to calculate "transition" (or long distance movement)
# probability in Rmark.
## Home is determined using the mean + 1 sd of the logged data.
# species - alpha code
tags = unique(dat$tag)
capture_history = matrix(0, nrow = length(tags), ncol = length(prd))
covariates = matrix(0, nrow = length(tags), ncol = 2)
colnames(covariates) = c("freq", "species")
# fill freq in with 1. 1 indicates a normal capture history, -1 indicates a right-censored capture history
covariates[,1] = as.numeric(1)
# since data is imported by species, we only need to check the first row of data to grab the species name and decide what guild it is in
covariates[,2] = as.character(dat[1,]$species)
#loop through each tag to get individual-level data
for (t in 1:length(tags)) {
ind_dat = dat[which(dat$tag == tags[t]),] #get data for indiv with tag t
ind_dat = ind_dat[order(ind_dat$period),] #order chronologically
p1 = min(ind_dat$period) # record first capture period for the individual
index = match(p1, prd) # match the period with the index number for the list of periods (will correspond to col num in matrix)
state = 1
capture_history[t,index] = state #mark first capture with 1 ("home")
for (i in 1:nrow(ind_dat)){ #record capture history data
if (i+1 <= nrow(ind_dat)){
meters = sqrt((ind_dat[i,8]-ind_dat[i+1,8])**2 + (ind_dat[i,7]-ind_dat[i+1,7])**2)
pnext = ind_dat[i+1,]$period #next capture period, where the distance will be recorded in the matrix (first capture period is always marked as "home")
if (meters <= breakpoint) {dist = state} #captured close to "home"
else if (meters > breakpoint) {
if (state == 1) {
dist = 2
}
else if (state == 2) {
dist = 1
}
}
#was it captured on an exclosure? If yes, remove from study at this point.
if (ind_dat[i+1,]$plot %in% exclosures) {
covariates[t,1] = as.numeric(covariates[t,1]) * -1 }
#was it found dead or was it removed from the plot? If yes, remove from study at this point.
if (ind_dat[i+1,]$note5 %in% list("D", "R")) {
covariates[t,1] = as.numeric(covariates[t,1]) * -1 }
index = match(pnext, prd)
capture_history[t,index] = dist #mark subsequent captures
state = dist
}
}
}
mark_df = concat_ch(capture_history, covariates)
return(mark_df)
}
concat_ch = function (ch_matrix, cov_matrix){
#concatenates columns representing capture histories, to be used in later MARK analyses
ch_df <- data.frame(ch_matrix)
encounters <- do.call(paste, c(ch_df[c(names(ch_df))], sep = '')) # makes a vector of all the capture histories
semicol <- rep(";", nrow(ch_df)) # makes a vector of semicolons
mark_df <- cbind(encounters, cov_matrix, semicol) # binds the capture, cov, and semicolon data together into a dataframe
return (mark_df)
}
count_months = function (data, years) {
#counts the number of unique months sampled in each year. Returns an ordered list (should be same length as number of years)
months = as.numeric()
for (y in 1:length(years)){
yrdata = data[which(data$yr == years[y]),]
num_mos = length(unique(yrdata$mo))
months = append(months, num_mos)
}
return(months)
}
mean_win_yr_occ = function (data, years, uniq_mos){
#finds the mean within year occupancy for each month for a given species, returns a single value
proportion_mos = c()
for (y in 1:length (years)){
yr_data = subset(data, yr == years[y])
if(nrow(yr_data) > 0) { #don't use years where it wasn't captured
m = length(unique(yr_data$mo))/uniq_mos[y]
proportion_mos = append(proportion_mos, m)
}
}
mean_mos = round(mean(proportion_mos),4)
return (mean_mos)
}
mean_win_yr_alldat = function (data, years, uniq_mos){
#finds the mean within year occupancy for each month for a given species, returns a single value
#uniq_mos is the number of months sampled in a given year (not all mos sampled in all years)
#years should be the number of years that you are interested in
#THIS FXN NO LONGER OCCURS IN RODENT_WRAPPER - DON'T NEED?
mos = c(1:12)
proportion_mos = c()
for (y in 1:length (years)){
yr_data = subset(data, yr == years[y])
if(nrow(yr_data) > 0) { #don't use years where it wasn't captured
m = length(unique(yr_data$mo))/uniq_mos[y]
proportion_mos = append(proportion_mos, m)
}
}
months = round(mean(proportion_mos),4)
return (months)
}
mean_mo_repro = function (femaledata, years){
#returns the proportion of females that are reproductive (nipples enlarged - E - red - R- or both - B - or pregnant - P-)
#on average in a given month across all the years. Only looks at data during years and months in which the species is present.
month = c(1:12)
species = rep(femaledata[1,9],12)
proprepro = c()
for (m in 1:length(month)){
mo_repros = c()
for (y in 1:length(years)){
tmp = subset(femaledata, yr == years[y] & mo == month[m])
if (nrow(tmp) > 0){
num_females = nrow(tmp)
repro = subset(tmp, nipples == "E" | nipples == "B" | nipples == "R" | pregnant == "P")
prop_repro = nrow(repro)/num_females
mo_repros = append(mo_repros, prop_repro)
}
else {
mo_repros = append(mo_repros, NA)
}
}
avg_mo = round(mean(mo_repros, na.rm = T),4)
proprepro = append(proprepro, avg_mo)
}
avg_r_mo = as.data.frame(cbind(species, month, proprepro))
return(avg_r_mo)
}
mo_repro = function (femaledata){
#returns the proportion of females that are reproductive (nipples enlarged - E - red - R- or both - B - or pregnant - P-)
#in each year. Only looks at data during years and months in which the species is present.
mos = c(1:12)
years = sort(unique(femaledata$yr)) #only look at data during years in which the species is present
species = femaledata[1,9]
r_mo_df =data.frame("year" = 1, "month" = 1, "proprepro" = 1, "numfemales" = 1, "species" = 1)
for (m in 1:length(mos)){
for (y in 1:length(years)){
tmp = subset(femaledata, yr == years[y] & mo == mos[m])
if (nrow(tmp) > 0){
num_females = nrow(tmp)
repro = subset(tmp, nipples == "E" | nipples == "B" | nipples == "R" | pregnant == "P")
prop_repro = round(nrow(repro)/num_females, 4)
mo_repros = c(years[y], mos[m], prop_repro, num_females, species)
}
else {
num_females = 0
mo_repros = c(years[y], mos[m], NA, num_females, species)
}
r_mo_df = rbind(r_mo_df, mo_repros)
}
}
return(r_mo_df[-1,])
}
count_repro = function(reprodata){
#input a df of reproductive data. Outputs the number of times that the individual was "uniquely" reproductive.
# unique reproduction is defined by having a break between reproductive status in the trapping record.
rh = 1 #reproductive history counter. Starts at one. Increment when the difference between reproductive events is > 1
if (nrow(reprodata) > 1) {
prds = reprodata$period
for (p in 1:length(prds)){
if ((p + 1) <= length(prds)){ #can't go past the number of periods that exist for the individual
diff = prds[p+1] - prds[p] #subtract the periods
if (diff > 1){
rh = rh + 1 #increment by one if the difference between reproductive events is > 1 period
}
}
}
}
return (rh)
}
indiv_repro = function (femaledata){
# returns the reproductive history of females in the time series
# for each individual, all rows of data where nipples are red, enlarged or both, or it is pregnant,
# are set aside. Within a year, the trapping periods are ordered to detect timing of reproduction
# and the number of unique reproduction events, per year, for each individual female.
tags = unique(femaledata$tag)
reprod_df = data.frame("tag" = 1, "year" = 1, "num_reprod" = 1)
for (t in 1:length(tags)) {
indiv = subset(femaledata, tag == tags[t]) #get individual data, sorted by chronologically by period
indiv = indiv[order(indiv$period),] #order chronologically
years = sort(unique(indiv$yr))
for (y in 1:length(years)){
tmp = subset(indiv, yr == years[y])
repro = subset(tmp, nipples == "E" | nipples == "B" | nipples == "R" | pregnant == "P") #count as reproductive if pregnant or nursing, NOT if swollen vagina
if (nrow(repro) > 0){
numreprod = count_repro(repro)
}
else {
numreprod = 0
}
}
data = c(tags[t], years[y], numreprod)
reprod_df = rbind(reprod_df, data)
}
return (reprod_df[-1,])
}
allyrs_abun = function(spdata, years){
#function to find abundance for a species across the timeseries.
# input the species dataframe, calculates total number of unique individuals. Returns a vector.
abun = c()
for (y in 1:length(years)){
dat = subset(spdata, spdata[,1] == years[y])
if (length(dat) > 0) {
indivs = sort(unique(dat$tag)) #grab unique individuals by tag ID
abun = append(abun, length(indivs)) #count the number of unique tags
}
else { abun = append(abun, 0)}
}
return (abun)
}
# pairs plot functions
panel.hist <- function(x, ...){
usr <- par("usr"); on.exit(par(usr))
par(usr = c(usr[1:2], 0, 1.5) )
h <- hist(x, plot = FALSE)
breaks <- h$breaks; nB <- length(breaks)
y <- h$counts; y <- y/max(y)
rect(breaks[-nB], 0, breaks[-1], y, col = "black", ...)
}
panel.cor <- function(x, y, digits = 2, prefix = "", cex.cor, ...){
usr <- par("usr"); on.exit(par(usr))
par(usr = c(0, 1, 0, 1))
r <- abs(cor(x, y))
txt <- format(c(r, 0.123456789), digits = digits)[1]
txt <- paste0(prefix, txt)
if(missing(cex.cor)) cex.cor <- 0.8/strwidth(txt)
text(0.5, 0.5, txt, cex = 2)
}
#-----------------------------------------------OLD CODE
# noplacelikehome = function (dat, prd, exclosures, breakpoint){
# ### Create a set of MARK capture histories by home vs away from home
# # Creates a movement history to be used in Mark. Matrix is filled in with zeroes (not captured) and later filled in
# ## 1 (stayed home), and 2 (away from home).
# ## Home is determined using the mean + 1 sd of the logged data.
# # guild (1 = granivore, 2 = folivore, 3 = carnivore)
# # species (assigned using function enumerate_species)
# # status (1 = core, 2 = intermediate, 3 = transient)
#
# tags = unique(dat$tag)
# capture_history = matrix(0, nrow = length(tags), ncol = length(prd))
# covariates = matrix(0, nrow = length(tags), ncol = 7)
# colnames(covariates) = c("male", "female", "unidsex", "sd_mass", "guild", "species", "status")
# group = c(1,2,3) #represent the "group" (core, transient, intermediate)
#
# # finds the average of all the masses for all non-pregnant adults in the species, as a baseline.
# # remove juveniles and pregnant females for adult mass estimation
# adult_dat = dat[which(dat$reprod != "J" & dat$pregnant != "P"),]
# mean_mass = mean(adult_dat$wgt, na.rm = TRUE)
# sd_mass = sd(adult_dat$wgt, na.rm = TRUE)
#
# # since data is imported by species, we only need to check the first row of data to grab the species name and decide what guild it is in
# # record guild in col 5 of covariates
# covariates[,5] = feeding_guild(dat[1,]$species)
#
# # record species in col 6 of covariates
# covariates[,6] = enumerate_species(dat[1,]$species)
#
# # record hypothesized status in col 7 of covariates
# covariates[,7] = temporal_status(dat[1,]$species)
#
# #loop through each tag to get individual-level data
# for (t in 1:length(tags)) {
# ind_dat = dat[which(dat$tag == tags[t]),] #get data for indiv with tag t
# ind_dat = ind_dat[order(ind_dat$period),] #order chronologically
#
# #mark sex in Male, Female, or Unidentified columns of Covariates
# sex = ind_dat[1,]$sex
# if (sex == "M") { covariates[t,1] = 1 }
# else if (sex == "F") { covariates[t,2] = 1 }
# else { covariates[t,3] = 1 }
#
# # record standard deviations away from species average mass as another Covariate
# covariates[t,4] = sd_avg_mass(ind_dat, mean_mass, sd_mass)
#
# p1 = min(ind_dat$period) # record first capture period for the individual
# index = match(p1, prd) # match the period with the index number for the list of periods (will correspond to col num in matrix)
# capture_history[t,index] = 1 #mark first capture with 1 ("home")
#
# for (i in 1:nrow(ind_dat)){ #record capture history data
#
# if (i+1 <= nrow(ind_dat)){
# meters = sqrt((ind_dat[i,8]-ind_dat[i+1,8])**2 + (ind_dat[i,7]-ind_dat[i+1,7])**2)
# pnext = ind_dat[i+1,]$period #next capture period, where the distance will be recorded in the matrix (first capture period is always marked as "home")
#
# if (meters <= breakpoint) {dist = 1} #captured close to "home"
#
# else if (meters > breakpoint) {dist = 2} #captured far from "home"
#
# #was it captured on an exclosure? If yes, remove from study at this point.
# if (ind_dat[i+1,]$plot %in% exclosures) {
# covariates[t,6] = covariates[t,6] * -1 }
#
# #was it found dead or was it removed from the plot? If yes, remove from study at this point.
# if (ind_dat[i+1,]$note5 %in% list("D", "R")) {
# covariates[t,6] = covariates[t,6] * -1 }
#
# index = match(pnext, prd)
# capture_history[t,index] = dist #mark subsequent captures
# }
# }
# }
# mark_df = concat_ch(capture_history, covariates)
# return(mark_df)
# }
| /movement_fxns.r | permissive | inbaltiano/portal-rodent-dispersal | R | false | false | 29,961 | r | # Module containing functions for looking at individual rodent data pertaining to movement
id_unknowns = function(dat, tag_col){
# give unique numbers to blank tags
# note: these are 7 digit numbers, so they are longer than any other tag type
# note: in the Portal data, column 12 is tag, so we are looking for blank or "0" tags to rename
unk = 1000000
for (irow in 1:nrow(dat)){
tag = dat[irow,tag_col]
unk = unk + 1
if (tag == "") {
dat[irow,tag_col] = unk
}
else if (tag == "0") {
dat[irow,tag_col] = unk
}}
return(dat)
}
starred_tags = function(dat, tags, spp_col, tag_col){
#Automate checking the flagged data for where the individual breaks should be
#check for *, which indicates a new tag
#tags with multiple rows are sorted by species, then checked for *
#if a * exists, then each time it is given a new unique tag, that ends with "s" for "star" (from note2 column)
numcount = 1
for (t in 1:length(tags)){
#only run on ear and toe tags, pit tags are very unlikely to be duplicated
#NOTE: there are some 6-character toe tags (e.g.1200DM, how to deal with these?)
if (nchar(tags[t]) < 6){
tmp <- which(dat$tag == tags[t])
# if indiv was captured multiple times
if (nrow(dat[tmp,]) > 1) {
# check num species recorded. If more than one, does data look OK if separated on species?
spp_list = unique(dat[tmp,spp_col])
for (sp in 1:length(spp_list)) {
tmp2 = which(dat$tag == tags[t] & dat$species == spp_list[sp])
isnew = as.vector(dat[tmp2,]$note2)
if ("*" %in% isnew) {
#print(dat[tmp2,])
rowbreaks = which(isnew == "*", arr.in=TRUE) #find rows where * indicates a new tag
for (r in 1:length(rowbreaks)){
if (r == 1) {
#GIVE an ID up to the first *
newtag = paste(tags[t], numcount, "s", sep = "") #make a new tag to keep separate
dat[tmp2,][1:rowbreaks[r]-1, tag_col] = newtag
numcount = numcount + 1
#AND an ID to everything after the first * (the loop should take care of the next set and so on)
newtag = paste(tags[t], numcount, "s", sep = "") #make a new tag to keep separate
dat[tmp2,][rowbreaks[r]:nrow(dat[tmp2,]),tag_col] = newtag
numcount = numcount + 1
}
else if (r > 1) {
#GIVE an ID to everything after the next *
newtag = paste(tags[t], numcount, "s", sep = "") #make a new tag to keep separate
dat[tmp2,][rowbreaks[r]:nrow(dat[tmp2,]),tag_col] = newtag
numcount = numcount + 1
}
}
}
}
}
}
}
return(dat)
}
is_dead = function(dat, tags, spp_col, tag_col){
#checks note5 for "D", which indicated a dead rat.
#by definition, all captures with the same tagID afterwards, must be a different individual
#assign these captures with a new tag ID that ends with 'm' for 'mortality.
numcount = 1
for (t in 1:length(tags)){
tmp <- which(dat$tag == tags[t])
# if indiv was captured multiple times
if (nrow(dat[tmp,]) > 1) {
# check num species recorded. If more than one, does data look OK if separated on species?
spp_list = unique(dat[tmp,spp_col])
for (sp in 1:length(spp_list)) {
tmp2 = which(dat$tag == tags[t] & dat$species == spp_list[sp])
isdead = as.vector(dat[tmp2,]$note5)
if ("D" %in% isdead) {
rowbreaks = which(isdead == "D", arr.in=TRUE) #find rows where D indicates a dead individuals
endrow = nrow(dat[tmp2,])
#print (endrow)
for (r in 1:length(rowbreaks)){
if (r == 1) {
if (rowbreaks[r] == endrow) {
#GIVE an ID up to the first *
newtag = paste(tags[t], numcount, "m", sep = "") #make a new tag to keep separate
numrows = nrow(dat[tmp2,][1:rowbreaks[r],])
newtagvector = as.vector(rep(newtag, numrows))
dat[tmp2,][1:rowbreaks[r], tag_col] = newtag
numcount = numcount + 1
#print(dat[tmp2,]
}
else{
#GIVE an ID up to the first *
newtag = paste(tags[t], numcount, "m", sep = "") #make a new tag to keep separate
numrows = nrow(dat[tmp2,][1:rowbreaks[r],])
newtagvector = as.vector(rep(newtag, numrows))
dat[tmp2,][1:rowbreaks[r], tag_col] = newtag
numcount = numcount + 1
#print(dat[tmp2,])
#AND an ID to everything after the first "D" (the loop should take care of the next set and so on)
startrow = rowbreaks[r] + 1
newtag = paste(tags[t], numcount, "m", sep = "") #make a new tag to keep separate
numrows = nrow(dat[tmp2,][(startrow:endrow),])
newtagvector = as.vector(rep(newtag, numrows))
dat[tmp2,][(startrow:endrow),tag_col] = newtag
numcount = numcount + 1
}
}
else if (r > 1) {
if (rowbreaks[r] == endrow) {
break
}
else{
#print (t)
#GIVE an ID to everything after the next "D"
startrow = rowbreaks[r] + 1
newtag = paste(tags[t], numcount, "m", sep = "") #make a new tag to keep separate
numrows = nrow(dat[tmp2,][(startrow:endrow),])
newtagvector = as.vector(rep(newtag, numrows))
dat[tmp2,][(startrow:endrow),tag_col] = newtag
numcount = numcount + 1
}
}
}
}
}}}
return(dat)
}
is_duplicate_tag = function(dat, tags, sex_col, spp_col, tag_col){
# check the min to max year for a given tag.
# If > 4, considered suspicious
# If multiple species, considered suspicious
# If adequately resolved, given a new unique tag number, that ends with d for "duplicate"
# returns a list with 2 elements [1] altered data, [2] flagged data
numcount = 100
flagged_rats = data.frame("tag"=1, "reason"=1, "occurrences"=1)
outcount = 0
for (t in 1:length(tags)){
#only run on ear and toe tags, pit tags are very unlikely to be duplicated
if (nchar(tags[t]) < 6){
tmp <- which(dat$tag == tags[t])
# if indiv was captured multiple times
if (nrow(dat[tmp,]) > 1) {
#more than 3 years between recaptures? Rodents are short-lived.
if (max(dat[tmp,1]) - min(dat[tmp,1]) >= 3){
# check num species recorded. If more than one, does data look OK if separated on species?
spp_list = unique(dat[tmp,spp_col])
for (sp in 1:length(spp_list)) {
tmp2 = which(dat$tag == tags[t] & dat$species == spp_list[sp])
#Check for duplicate tags in the same period and same species. This likely indicates multiple individuals with the same tag.
if(anyDuplicated(dat[tmp2,]) > 0) {
outcount = outcount + 1
flagged_rats[outcount,] <- c(tags[t], "sameprd", nrow(dat[tmp,]))
}
#Dipodomys are long-lived. Raise the threshold for these indivs
if(spp_list[sp] %in% list("DO", "DM", "DS")){
if (max(dat[tmp2,1]) - min(dat[tmp2,1]) < 5) {
newtag = paste(tags[t], numcount, "d", sep = "") #make a new tag to keep separate
dat[tmp2,tag_col] = newtag
numcount = numcount + 1
}
else {
outcount = outcount + 1
flagged_rats[outcount,] <- c(tags[t], "year", nrow(dat[tmp,]))
#print(dat[tmp2,])
}
}
#Other genera are very short-lived. Flag data if same individual appears to occur >= 3 years.
else {
if(max(dat[tmp2,1]) - min(dat[tmp2,1]) < 3) {
newtag = paste(tags[t], numcount, "d", sep = "") #make a new tag to keep separate
dat[tmp2,tag_col] = newtag
numcount = numcount + 1
}
else {
#print(dat[tmp2,])
outcount = outcount + 1
flagged_rats[outcount,] <- c(tags[t], "year", nrow(dat[tmp,]))
}
}
}
}}}}
info = list(data = dat, bad = flagged_rats)
return (info)
}
same_period = function(dat, tags){
# multiple individuals with same tag captured in same period? Questionable daata
flagged_rats = data.frame("tag"=1, "reason"=1, "occurrences"=1)
outcount = 0
for (t in 1:length(tags)){
tmp <- which(dat$tag == tags[t])
if (nrow(dat[tmp,]) > 1){
periods = unique(dat[tmp,]$period)
for (p in 1:length(periods)){
ptmp <- which(dat$tag == tags[t] & dat$period == periods[p])
if (nrow(dat[ptmp,]) > 1){
outcount = outcount + 1
flagged_rats[outcount,] <- c(tags[t], "sameprd", nrow(dat[ptmp,]))
break
}
}
}
}
return (flagged_rats)
}
find_bad_data2 = function(dat, tags, sex_col, spp_col){
# check for consistent sex and species, outputs flagged tags to check, or to remove from study
flagged_rats = data.frame("tag"=1, "reason"=1, "occurrences"=1)
outcount = 0
for (t in 1:length(tags)){
tmp <- which(dat$tag == tags[t])
if (nrow(dat[tmp,]) > 1) { # if indiv was captured multiple times
sex_list = dat[tmp,sex_col]
sex = sex_list[1]
for (i in 2:length(sex_list)){ # check for consistent sex
if (sex_list[i] != sex) {
outcount = outcount + 1
flagged_rats[outcount,] <- c(tags[t], "sex", nrow(dat[tmp,]))
break
}}
spp_list = dat[tmp,spp_col]
spp = spp_list[1]
for (s in 2:length(spp_list)){ # check for consistent species
if (spp_list[s] != spp){
outcount = outcount + 1
flagged_rats[outcount,] <- c(tags[t], "spp", nrow(dat[tmp,]))
break
}}
}}
return(flagged_rats)
}
subsetDat = function(dataset){
## function to subset out proper data
##### will find bad data, then delete it from the dataset and get rid of incompletely sampled periods
tags = as.character(unique(dataset$tag)) # get list of unique tags
flags = find_bad_data2(dataset, tags, 10, 9) # list of flagged data
#first, mark all uncertain or unmarked sex as "U" for unknown
badsextags = unique(flags[which(flags$reason == "sex"),1])
dataset[which(dataset$tag %in% badsextags),10] = "U"
dataset[which(dataset$sex %in% c("", "P", "Z")),10] = "U" #get rid of other weird typos in sex column
#get rid of results where we don't know the species for sure
badspptags = unique(flags[which(flags$reason == "spp"), 1])
dataset = dataset[-which(dataset$tag %in% badspptags),] #delete rows where species is unsure
#don't use negative period numbers and periods with only one day of trapping
#TODO: add periods from 1980-1999 that were incompletely sampled
dataset = subset(dataset, period != 111 & period != 237 & period != 241 &
period != 267 & period != 277 & period != 278 & period != 283 &
period != 284 & period != 300 & period != 311 & period != 313 &
period != 314 & period != 318 & period != 321 & period != 323 &
period != 337 & period != 339 & period != 344 & period != 351)
return (dataset)
}
distance_moved = function (data, tags) {
# Calculatemoved from one time frame to the next, for an individual
distance = as.numeric()
# for each individual
for (t in 1:length(tags)){
ind_data = data[which(data$tag == tags[t]),] #get data for indiv with tag t
ind_data = ind_data[order(ind_data$period),] #order chronologically
# if it was captured more than once
if (nrow(ind_data) > 1) {
for (i in 1:nrow(ind_data)){
if (i+1 <= nrow(ind_data)){
meters = sqrt((ind_data[i,8]-ind_data[i+1,8])**2 + (ind_data[i,7]-ind_data[i+1,7])**2)
distance=append(distance,meters)
}
}
}
}
return(distance)
}
sd_avg_mass = function (ind_dat, spp_mean, spp_sd) {
# Then takes the average mass for an individual (across all recaptures)
# and calculates the proportional difference away from that avg. species level mass.
ind_mean = mean(ind_dat$wgt, na.rm = TRUE)
if (is.na(ind_mean)) {
# if no mass is recorded (returns NA), make ind_sd = 0 so we don't lose data.
#This shouldn't skew the results, but it ISN'T a true zero. Denote appropriately in methods.
ind_sd = 0
}
else {
# num of standard deviations the indiv's mean wgt is from spp mean wgt
ind_sd = round((ind_mean - spp_mean) / spp_sd , 4)
}
return (ind_sd) #number of standard deviations individual is away from capture weight mean
}
feeding_guild = function(speciesname) {
# grab the species name and decide what feeding guild it is in, based on the lit
# heteromyidae granivores == 1
if (speciesname %in% list("DO", "DM", "DS", "PB", "PP", "PF", "PH", "PI",
"PE", "PM", "PL", "RM", "RF", "RO", "BA")) {guild = 1}
# folivores == 2
else if (speciesname %in% list("SH", "SF", "SO", "NAO")) {guild = 3 }
# insectivores == 3
else {guild = 3}
return(guild)
}
enumerate_species = function(speciesname) {
#each species needs to be replaced with a number instead of a name,
#for input into Program MARK later
if (speciesname == "DO") {speciesnum = 1}
else if (speciesname == "DM") {speciesnum = 2}
else if (speciesname == "DS") {speciesnum = 3}
else if (speciesname == "PB") {speciesnum = 4}
else if (speciesname == "PP") {speciesnum = 5}
else if (speciesname == "PF") {speciesnum = 6}
else if (speciesname == "PE") {speciesnum = 7}
else if (speciesname == "PM") {speciesnum = 8}
else if (speciesname == "PL") {speciesnum = 10} #transient G
else if (speciesname == "PH") {speciesnum = 10} #transient G
else if (speciesname == "PI") {speciesnum = 10} #transient G
else if (speciesname == "RM") {speciesnum = 9}
else if (speciesname == "RF") {speciesnum = 10} #transient G
else if (speciesname == "RO") {speciesnum = 10} #transient G
else if (speciesname == "BA") {speciesnum = 10} #transient G
else if (speciesname == "SH") {speciesnum = 12}
else if (speciesname == "SF") {speciesnum = 13}
else if (speciesname == "SO") {speciesnum = 14} #transient F
else if (speciesname == "NAO") {speciesnum = 11}
else if (speciesname == "OT") {speciesnum = 15}
else if (speciesname == "OL") {speciesnum = 16}
return(speciesnum)
}
temporal_status = function (speciesname){
#assigns temporal status to each species based on its across year and within year presence thru span of the entire 30+ year dataset
#definitions based on output from corespecies, intermediatespecies and transientspecies variables in main script
#core = 1, intermediate = 2, transient = 3 -- NEEDS TO BE DOUBLE CHECKED WITH DEFINITION
if (speciesname %in% list( "OT","DM","RM","NAO","OL","PE","DO","PP","PF","PB")) {status = 1}
else if (speciesname %in% list("PM","SH","DS","SF")) {status = 2}
else if (speciesname %in% list("RF","BA","PH","RO","SO","PI","PL")) {status = 3}
return(status)
}
noplacelikehome = function (dat, prd, exclosures, breakpoint){
### Create a set of MARK capture histories by home vs away from home
# Creates a movement history to be used in Mark. Matrix is filled in with zeroes (not captured) and later filled in
# An individual always starts in 1, and is moved to state 2 only if it moves a distance larger than the threshold
# set by the core species movement distribution. If it is again recaptured at a distance larger than the threshold,
# is is moved from state 2 back to state 1. This will be used to calculate "transition" (or long distance movement)
# probability in Rmark.
## Home is determined using the mean + 1 sd of the logged data.
# species - alpha code
tags = unique(dat$tag)
capture_history = matrix(0, nrow = length(tags), ncol = length(prd))
covariates = matrix(0, nrow = length(tags), ncol = 2)
colnames(covariates) = c("freq", "species")
# fill freq in with 1. 1 indicates a normal capture history, -1 indicates a right-censored capture history
covariates[,1] = as.numeric(1)
# since data is imported by species, we only need to check the first row of data to grab the species name and decide what guild it is in
covariates[,2] = as.character(dat[1,]$species)
#loop through each tag to get individual-level data
for (t in 1:length(tags)) {
ind_dat = dat[which(dat$tag == tags[t]),] #get data for indiv with tag t
ind_dat = ind_dat[order(ind_dat$period),] #order chronologically
p1 = min(ind_dat$period) # record first capture period for the individual
index = match(p1, prd) # match the period with the index number for the list of periods (will correspond to col num in matrix)
state = 1
capture_history[t,index] = state #mark first capture with 1 ("home")
for (i in 1:nrow(ind_dat)){ #record capture history data
if (i+1 <= nrow(ind_dat)){
meters = sqrt((ind_dat[i,8]-ind_dat[i+1,8])**2 + (ind_dat[i,7]-ind_dat[i+1,7])**2)
pnext = ind_dat[i+1,]$period #next capture period, where the distance will be recorded in the matrix (first capture period is always marked as "home")
if (meters <= breakpoint) {dist = state} #captured close to "home"
else if (meters > breakpoint) {
if (state == 1) {
dist = 2
}
else if (state == 2) {
dist = 1
}
}
#was it captured on an exclosure? If yes, remove from study at this point.
if (ind_dat[i+1,]$plot %in% exclosures) {
covariates[t,1] = as.numeric(covariates[t,1]) * -1 }
#was it found dead or was it removed from the plot? If yes, remove from study at this point.
if (ind_dat[i+1,]$note5 %in% list("D", "R")) {
covariates[t,1] = as.numeric(covariates[t,1]) * -1 }
index = match(pnext, prd)
capture_history[t,index] = dist #mark subsequent captures
state = dist
}
}
}
mark_df = concat_ch(capture_history, covariates)
return(mark_df)
}
concat_ch = function (ch_matrix, cov_matrix){
#concatenates columns representing capture histories, to be used in later MARK analyses
ch_df <- data.frame(ch_matrix)
encounters <- do.call(paste, c(ch_df[c(names(ch_df))], sep = '')) # makes a vector of all the capture histories
semicol <- rep(";", nrow(ch_df)) # makes a vector of semicolons
mark_df <- cbind(encounters, cov_matrix, semicol) # binds the capture, cov, and semicolon data together into a dataframe
return (mark_df)
}
count_months = function (data, years) {
#counts the number of unique months sampled in each year. Returns an ordered list (should be same length as number of years)
months = as.numeric()
for (y in 1:length(years)){
yrdata = data[which(data$yr == years[y]),]
num_mos = length(unique(yrdata$mo))
months = append(months, num_mos)
}
return(months)
}
mean_win_yr_occ = function (data, years, uniq_mos){
#finds the mean within year occupancy for each month for a given species, returns a single value
proportion_mos = c()
for (y in 1:length (years)){
yr_data = subset(data, yr == years[y])
if(nrow(yr_data) > 0) { #don't use years where it wasn't captured
m = length(unique(yr_data$mo))/uniq_mos[y]
proportion_mos = append(proportion_mos, m)
}
}
mean_mos = round(mean(proportion_mos),4)
return (mean_mos)
}
mean_win_yr_alldat = function (data, years, uniq_mos){
#finds the mean within year occupancy for each month for a given species, returns a single value
#uniq_mos is the number of months sampled in a given year (not all mos sampled in all years)
#years should be the number of years that you are interested in
#THIS FXN NO LONGER OCCURS IN RODENT_WRAPPER - DON'T NEED?
mos = c(1:12)
proportion_mos = c()
for (y in 1:length (years)){
yr_data = subset(data, yr == years[y])
if(nrow(yr_data) > 0) { #don't use years where it wasn't captured
m = length(unique(yr_data$mo))/uniq_mos[y]
proportion_mos = append(proportion_mos, m)
}
}
months = round(mean(proportion_mos),4)
return (months)
}
mean_mo_repro = function (femaledata, years){
#returns the proportion of females that are reproductive (nipples enlarged - E - red - R- or both - B - or pregnant - P-)
#on average in a given month across all the years. Only looks at data during years and months in which the species is present.
month = c(1:12)
species = rep(femaledata[1,9],12)
proprepro = c()
for (m in 1:length(month)){
mo_repros = c()
for (y in 1:length(years)){
tmp = subset(femaledata, yr == years[y] & mo == month[m])
if (nrow(tmp) > 0){
num_females = nrow(tmp)
repro = subset(tmp, nipples == "E" | nipples == "B" | nipples == "R" | pregnant == "P")
prop_repro = nrow(repro)/num_females
mo_repros = append(mo_repros, prop_repro)
}
else {
mo_repros = append(mo_repros, NA)
}
}
avg_mo = round(mean(mo_repros, na.rm = T),4)
proprepro = append(proprepro, avg_mo)
}
avg_r_mo = as.data.frame(cbind(species, month, proprepro))
return(avg_r_mo)
}
mo_repro = function (femaledata){
#returns the proportion of females that are reproductive (nipples enlarged - E - red - R- or both - B - or pregnant - P-)
#in each year. Only looks at data during years and months in which the species is present.
mos = c(1:12)
years = sort(unique(femaledata$yr)) #only look at data during years in which the species is present
species = femaledata[1,9]
r_mo_df =data.frame("year" = 1, "month" = 1, "proprepro" = 1, "numfemales" = 1, "species" = 1)
for (m in 1:length(mos)){
for (y in 1:length(years)){
tmp = subset(femaledata, yr == years[y] & mo == mos[m])
if (nrow(tmp) > 0){
num_females = nrow(tmp)
repro = subset(tmp, nipples == "E" | nipples == "B" | nipples == "R" | pregnant == "P")
prop_repro = round(nrow(repro)/num_females, 4)
mo_repros = c(years[y], mos[m], prop_repro, num_females, species)
}
else {
num_females = 0
mo_repros = c(years[y], mos[m], NA, num_females, species)
}
r_mo_df = rbind(r_mo_df, mo_repros)
}
}
return(r_mo_df[-1,])
}
count_repro = function(reprodata){
#input a df of reproductive data. Outputs the number of times that the individual was "uniquely" reproductive.
# unique reproduction is defined by having a break between reproductive status in the trapping record.
rh = 1 #reproductive history counter. Starts at one. Increment when the difference between reproductive events is > 1
if (nrow(reprodata) > 1) {
prds = reprodata$period
for (p in 1:length(prds)){
if ((p + 1) <= length(prds)){ #can't go past the number of periods that exist for the individual
diff = prds[p+1] - prds[p] #subtract the periods
if (diff > 1){
rh = rh + 1 #increment by one if the difference between reproductive events is > 1 period
}
}
}
}
return (rh)
}
indiv_repro = function (femaledata){
# returns the reproductive history of females in the time series
# for each individual, all rows of data where nipples are red, enlarged or both, or it is pregnant,
# are set aside. Within a year, the trapping periods are ordered to detect timing of reproduction
# and the number of unique reproduction events, per year, for each individual female.
tags = unique(femaledata$tag)
reprod_df = data.frame("tag" = 1, "year" = 1, "num_reprod" = 1)
for (t in 1:length(tags)) {
indiv = subset(femaledata, tag == tags[t]) #get individual data, sorted by chronologically by period
indiv = indiv[order(indiv$period),] #order chronologically
years = sort(unique(indiv$yr))
for (y in 1:length(years)){
tmp = subset(indiv, yr == years[y])
repro = subset(tmp, nipples == "E" | nipples == "B" | nipples == "R" | pregnant == "P") #count as reproductive if pregnant or nursing, NOT if swollen vagina
if (nrow(repro) > 0){
numreprod = count_repro(repro)
}
else {
numreprod = 0
}
}
data = c(tags[t], years[y], numreprod)
reprod_df = rbind(reprod_df, data)
}
return (reprod_df[-1,])
}
allyrs_abun = function(spdata, years){
#function to find abundance for a species across the timeseries.
# input the species dataframe, calculates total number of unique individuals. Returns a vector.
abun = c()
for (y in 1:length(years)){
dat = subset(spdata, spdata[,1] == years[y])
if (length(dat) > 0) {
indivs = sort(unique(dat$tag)) #grab unique individuals by tag ID
abun = append(abun, length(indivs)) #count the number of unique tags
}
else { abun = append(abun, 0)}
}
return (abun)
}
# pairs plot functions
panel.hist <- function(x, ...){
usr <- par("usr"); on.exit(par(usr))
par(usr = c(usr[1:2], 0, 1.5) )
h <- hist(x, plot = FALSE)
breaks <- h$breaks; nB <- length(breaks)
y <- h$counts; y <- y/max(y)
rect(breaks[-nB], 0, breaks[-1], y, col = "black", ...)
}
panel.cor <- function(x, y, digits = 2, prefix = "", cex.cor, ...){
usr <- par("usr"); on.exit(par(usr))
par(usr = c(0, 1, 0, 1))
r <- abs(cor(x, y))
txt <- format(c(r, 0.123456789), digits = digits)[1]
txt <- paste0(prefix, txt)
if(missing(cex.cor)) cex.cor <- 0.8/strwidth(txt)
text(0.5, 0.5, txt, cex = 2)
}
#-----------------------------------------------OLD CODE
# noplacelikehome = function (dat, prd, exclosures, breakpoint){
# ### Create a set of MARK capture histories by home vs away from home
# # Creates a movement history to be used in Mark. Matrix is filled in with zeroes (not captured) and later filled in
# ## 1 (stayed home), and 2 (away from home).
# ## Home is determined using the mean + 1 sd of the logged data.
# # guild (1 = granivore, 2 = folivore, 3 = carnivore)
# # species (assigned using function enumerate_species)
# # status (1 = core, 2 = intermediate, 3 = transient)
#
# tags = unique(dat$tag)
# capture_history = matrix(0, nrow = length(tags), ncol = length(prd))
# covariates = matrix(0, nrow = length(tags), ncol = 7)
# colnames(covariates) = c("male", "female", "unidsex", "sd_mass", "guild", "species", "status")
# group = c(1,2,3) #represent the "group" (core, transient, intermediate)
#
# # finds the average of all the masses for all non-pregnant adults in the species, as a baseline.
# # remove juveniles and pregnant females for adult mass estimation
# adult_dat = dat[which(dat$reprod != "J" & dat$pregnant != "P"),]
# mean_mass = mean(adult_dat$wgt, na.rm = TRUE)
# sd_mass = sd(adult_dat$wgt, na.rm = TRUE)
#
# # since data is imported by species, we only need to check the first row of data to grab the species name and decide what guild it is in
# # record guild in col 5 of covariates
# covariates[,5] = feeding_guild(dat[1,]$species)
#
# # record species in col 6 of covariates
# covariates[,6] = enumerate_species(dat[1,]$species)
#
# # record hypothesized status in col 7 of covariates
# covariates[,7] = temporal_status(dat[1,]$species)
#
# #loop through each tag to get individual-level data
# for (t in 1:length(tags)) {
# ind_dat = dat[which(dat$tag == tags[t]),] #get data for indiv with tag t
# ind_dat = ind_dat[order(ind_dat$period),] #order chronologically
#
# #mark sex in Male, Female, or Unidentified columns of Covariates
# sex = ind_dat[1,]$sex
# if (sex == "M") { covariates[t,1] = 1 }
# else if (sex == "F") { covariates[t,2] = 1 }
# else { covariates[t,3] = 1 }
#
# # record standard deviations away from species average mass as another Covariate
# covariates[t,4] = sd_avg_mass(ind_dat, mean_mass, sd_mass)
#
# p1 = min(ind_dat$period) # record first capture period for the individual
# index = match(p1, prd) # match the period with the index number for the list of periods (will correspond to col num in matrix)
# capture_history[t,index] = 1 #mark first capture with 1 ("home")
#
# for (i in 1:nrow(ind_dat)){ #record capture history data
#
# if (i+1 <= nrow(ind_dat)){
# meters = sqrt((ind_dat[i,8]-ind_dat[i+1,8])**2 + (ind_dat[i,7]-ind_dat[i+1,7])**2)
# pnext = ind_dat[i+1,]$period #next capture period, where the distance will be recorded in the matrix (first capture period is always marked as "home")
#
# if (meters <= breakpoint) {dist = 1} #captured close to "home"
#
# else if (meters > breakpoint) {dist = 2} #captured far from "home"
#
# #was it captured on an exclosure? If yes, remove from study at this point.
# if (ind_dat[i+1,]$plot %in% exclosures) {
# covariates[t,6] = covariates[t,6] * -1 }
#
# #was it found dead or was it removed from the plot? If yes, remove from study at this point.
# if (ind_dat[i+1,]$note5 %in% list("D", "R")) {
# covariates[t,6] = covariates[t,6] * -1 }
#
# index = match(pnext, prd)
# capture_history[t,index] = dist #mark subsequent captures
# }
# }
# }
# mark_df = concat_ch(capture_history, covariates)
# return(mark_df)
# }
|
install.packages("fs")
install.packages("yaml")
redirects <- yaml::read_yaml("_output.yml")$redirects
make_redirect <- function(from, to){
html <- sprintf(
'<head><meta http-equiv="refresh" content="0; URL=%s.html" /></head>',
to
)
dest <- fs::path("_site", from, ext = "html")
fs::file_create(dest)
write(html, dest)
}
mapply(make_redirect, from = names(redirects), to = redirects)
# make_redirect_apps <- function(name, url){
# fs::dir_create(
# fs::path(
# "redirects",
# name
# )
# )
# fls <- fs::path(
# "_site",
# name,
# "index.html"
# )
# fs::file_create(
# fls
# )
# write(file = fls,
# sprintf('<head><meta http-equiv="refresh" content="0; URL=%s" /></head>', url)
# )
# }
# make_redirect_apps("tidytuesday201942", "https://connect.thinkr.fr/tidytuesday201942/")
# make_redirect_apps("hexmake", "https://connect.thinkr.fr/hexmake/")
# make_redirect_apps("minifying", "https://connect.thinkr.fr/minifying/")
# make_redirect_apps("golemhtmltemplate", "https://connect.thinkr.fr/golemhtmltemplate/")
# make_redirect_apps("shinipsumdemo", "https://connect.thinkr.fr/shinipsumdemo/")
# make_redirect_apps("databasedemo", "https://connect.thinkr.fr/databasedemo/")
# make_redirect_apps("graysacle", "https://connect.thinkr.fr/graysacle/")
# make_redirect_apps("bs4dashdemo", "https://connect.thinkr.fr/bs4dashdemo/")
# make_redirect_apps("shinyfuture", "https://connect.thinkr.fr/shinyfuture/")
# Creating the redirect
unlink("redirects", TRUE, TRUE)
make_redirect <- function(name, url){
fs::dir_create(
fs::path(
"redirects",
name
)
)
fls <- fs::path(
"redirects",
name,
"index.html"
)
fs::file_create(
fls
)
write(file = fls,
sprintf('<head><meta http-equiv="refresh" content="0; URL=%s" /></head>', url)
)
}
make_redirect("tidytuesday201942", "https://connect.thinkr.fr/tidytuesday201942/")
make_redirect("hexmake", "https://connect.thinkr.fr/hexmake/")
make_redirect("minifying", "https://connect.thinkr.fr/minifying/")
make_redirect("golemhtmltemplate", "https://connect.thinkr.fr/golemhtmltemplate/")
make_redirect("shinipsumdemo", "https://connect.thinkr.fr/shinipsumdemo/")
make_redirect("databasedemo", "https://connect.thinkr.fr/databasedemo/")
make_redirect("graysacle", "https://connect.thinkr.fr/graysacle/")
make_redirect("bs4dashdemo", "https://connect.thinkr.fr/bs4dashdemo/")
make_redirect("shinyfuture", "https://connect.thinkr.fr/shinyfuture/")
try({
dirs <- list.dirs(
"redirects"
)
dirs <- dirs[!dirs == "redirects"]
for (i in c(
dirs
)){
fs::dir_copy(
i,
fs::path("_site", basename(i))
)
}
}) | /redirect.R | permissive | zettsu-t/engineering-shiny-book | R | false | false | 2,716 | r | install.packages("fs")
install.packages("yaml")
redirects <- yaml::read_yaml("_output.yml")$redirects
make_redirect <- function(from, to){
html <- sprintf(
'<head><meta http-equiv="refresh" content="0; URL=%s.html" /></head>',
to
)
dest <- fs::path("_site", from, ext = "html")
fs::file_create(dest)
write(html, dest)
}
mapply(make_redirect, from = names(redirects), to = redirects)
# make_redirect_apps <- function(name, url){
# fs::dir_create(
# fs::path(
# "redirects",
# name
# )
# )
# fls <- fs::path(
# "_site",
# name,
# "index.html"
# )
# fs::file_create(
# fls
# )
# write(file = fls,
# sprintf('<head><meta http-equiv="refresh" content="0; URL=%s" /></head>', url)
# )
# }
# make_redirect_apps("tidytuesday201942", "https://connect.thinkr.fr/tidytuesday201942/")
# make_redirect_apps("hexmake", "https://connect.thinkr.fr/hexmake/")
# make_redirect_apps("minifying", "https://connect.thinkr.fr/minifying/")
# make_redirect_apps("golemhtmltemplate", "https://connect.thinkr.fr/golemhtmltemplate/")
# make_redirect_apps("shinipsumdemo", "https://connect.thinkr.fr/shinipsumdemo/")
# make_redirect_apps("databasedemo", "https://connect.thinkr.fr/databasedemo/")
# make_redirect_apps("graysacle", "https://connect.thinkr.fr/graysacle/")
# make_redirect_apps("bs4dashdemo", "https://connect.thinkr.fr/bs4dashdemo/")
# make_redirect_apps("shinyfuture", "https://connect.thinkr.fr/shinyfuture/")
# Creating the redirect
unlink("redirects", TRUE, TRUE)
make_redirect <- function(name, url){
fs::dir_create(
fs::path(
"redirects",
name
)
)
fls <- fs::path(
"redirects",
name,
"index.html"
)
fs::file_create(
fls
)
write(file = fls,
sprintf('<head><meta http-equiv="refresh" content="0; URL=%s" /></head>', url)
)
}
make_redirect("tidytuesday201942", "https://connect.thinkr.fr/tidytuesday201942/")
make_redirect("hexmake", "https://connect.thinkr.fr/hexmake/")
make_redirect("minifying", "https://connect.thinkr.fr/minifying/")
make_redirect("golemhtmltemplate", "https://connect.thinkr.fr/golemhtmltemplate/")
make_redirect("shinipsumdemo", "https://connect.thinkr.fr/shinipsumdemo/")
make_redirect("databasedemo", "https://connect.thinkr.fr/databasedemo/")
make_redirect("graysacle", "https://connect.thinkr.fr/graysacle/")
make_redirect("bs4dashdemo", "https://connect.thinkr.fr/bs4dashdemo/")
make_redirect("shinyfuture", "https://connect.thinkr.fr/shinyfuture/")
try({
dirs <- list.dirs(
"redirects"
)
dirs <- dirs[!dirs == "redirects"]
for (i in c(
dirs
)){
fs::dir_copy(
i,
fs::path("_site", basename(i))
)
}
}) |
#########################################################################
# Author : Desiree Wilson
# Date : April 26, 2020
# Purpose: The purpose of this code is relationship
# to see if there is any relationship
# between melatonin-A gene expression and
# sex in metastatic melanoma in the
# GSE65904 dataset. The gene expression
# was measured using the following chip:
# Illlumina Human HT-12V4.0 BeadChip arrays
# For more info, look at this link:
# https://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE65904
#
########################################################################
#loading in libraries:
library(GEOquery)
library(beadarray)
library(illuminaHumanv4.db)
library(readr)
library(ggplot2)
#another way to obtain GEO dataset; directly from website:
#reference: Page31 of following document:
#https://www.bioconductor.org/packages/release/bioc/vignettes/beadarray/inst/doc/beadsummary.pdf
url <- "https://ftp.ncbi.nlm.nih.gov/geo/series/GSE65nnn/GSE65904/matrix/"
filenm <- "GSE65904_series_matrix.txt.gz"
if(!file.exists("GSE65904_series_matrix.txt.gz")) download.file(paste(url, filenm, sep=""), destfile=filenm)
gse <- getGEO(filename=filenm)
head(exprs(gse))
#corresponding feature data according to the same reference listed above:
summaryData <- as(gse, "ExpressionSetIllumina")
summaryData
# head(fData(summaryData))
# head(pData(summaryData))
# colnames(pData(summaryData))
# modified_colnames <- gsub(":ch1", ".ch1", colnames(pData(summaryData)))
# modified_colnames2 <- gsub("\\(.+\\)", "0_1", modified_colnames)
# modified_colnames3 <- gsub(" ", "_", modified_colnames2)
# colnames(pData(summaryData)) <- modified_colnames3
#I just downloaded the original manifest file from the Illumina website:
#https://support.illumina.com/content/dam/illumina-support/documents/downloads/productfiles/humanht-12/humanht-12_v4_0_r2_15002873_b.txt.zip
#There is exactly 1/ONE probe for melatonin 1A (genesymbol: MTNR1A). So from here
#I am going to do a t-test:
#identifying the location of the illumina probe:
gs_mtnr1ag_idx <- grep("MTNR1A", fData(summaryData)$ILMN_Gene)
probe_mtnr1a_name <- row.names(fData(summaryData))[gs_mtnr1ag_idx]
#pulling out the illumina probe in the gene expression data:
exprs_probe_mtnr1a_idx <- grep(probe_mtnr1a_name, row.names(exprs(summaryData)))
exprs_probe_mtnr1a_dat <- exprs(summaryData)[exprs_probe_mtnr1a_idx,]
#identifying the samples that are missing sex information:
missing_sex_idx <- which(pData(summaryData)$`gender:ch1` %in% c("NA"))
#need to drop this sample in both the pheno and expression data:
exprs_probe_mtnr1a_dat_filtered <- exprs_probe_mtnr1a_dat[-missing_sex_idx]
sex_filtered <- pData(summaryData)$`gender:ch1`[-missing_sex_idx]
#finally performing the t-test
t_test <- t.test(exprs_probe_mtnr1a_dat_filtered ~ sex_filtered)
summary(t_test)
#plotting the expression values:
normalized_exprs_ln <- exprs_probe_mtnr1a_dat_filtered
sex <- sex_filtered
mtnr1a_exprs_df <- data.frame(normalized_exprs_ln, sex)
ggplot(mtnr1a_exprs_df, aes(x = sex, y = normalized_exprs_ln)) +
geom_boxplot() +
labs(title = "MTNR1 Normalized Expression"
,subtitle = paste0("p.value = ", t_test$p.value)
)
# test <- lapply(pData(summaryData), function(x){gsub("NA", NA, x)})
# pData(summaryData) <- test
# #performing differential gene expression:
# contrast_mat <-
# limmaResults <- limmaDE(summaryData, "gender.ch1")
# colnames(limmaResults)
| /Scripts/GSE65904dataAnalysis_v2.R | no_license | DesireeWilson/MelatoninA | R | false | false | 3,509 | r | #########################################################################
# Author : Desiree Wilson
# Date : April 26, 2020
# Purpose: The purpose of this code is relationship
# to see if there is any relationship
# between melatonin-A gene expression and
# sex in metastatic melanoma in the
# GSE65904 dataset. The gene expression
# was measured using the following chip:
# Illlumina Human HT-12V4.0 BeadChip arrays
# For more info, look at this link:
# https://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE65904
#
########################################################################
#loading in libraries:
library(GEOquery)
library(beadarray)
library(illuminaHumanv4.db)
library(readr)
library(ggplot2)
#another way to obtain GEO dataset; directly from website:
#reference: Page31 of following document:
#https://www.bioconductor.org/packages/release/bioc/vignettes/beadarray/inst/doc/beadsummary.pdf
url <- "https://ftp.ncbi.nlm.nih.gov/geo/series/GSE65nnn/GSE65904/matrix/"
filenm <- "GSE65904_series_matrix.txt.gz"
if(!file.exists("GSE65904_series_matrix.txt.gz")) download.file(paste(url, filenm, sep=""), destfile=filenm)
gse <- getGEO(filename=filenm)
head(exprs(gse))
#corresponding feature data according to the same reference listed above:
summaryData <- as(gse, "ExpressionSetIllumina")
summaryData
# head(fData(summaryData))
# head(pData(summaryData))
# colnames(pData(summaryData))
# modified_colnames <- gsub(":ch1", ".ch1", colnames(pData(summaryData)))
# modified_colnames2 <- gsub("\\(.+\\)", "0_1", modified_colnames)
# modified_colnames3 <- gsub(" ", "_", modified_colnames2)
# colnames(pData(summaryData)) <- modified_colnames3
#I just downloaded the original manifest file from the Illumina website:
#https://support.illumina.com/content/dam/illumina-support/documents/downloads/productfiles/humanht-12/humanht-12_v4_0_r2_15002873_b.txt.zip
#There is exactly 1/ONE probe for melatonin 1A (genesymbol: MTNR1A). So from here
#I am going to do a t-test:
#identifying the location of the illumina probe:
gs_mtnr1ag_idx <- grep("MTNR1A", fData(summaryData)$ILMN_Gene)
probe_mtnr1a_name <- row.names(fData(summaryData))[gs_mtnr1ag_idx]
#pulling out the illumina probe in the gene expression data:
exprs_probe_mtnr1a_idx <- grep(probe_mtnr1a_name, row.names(exprs(summaryData)))
exprs_probe_mtnr1a_dat <- exprs(summaryData)[exprs_probe_mtnr1a_idx,]
#identifying the samples that are missing sex information:
missing_sex_idx <- which(pData(summaryData)$`gender:ch1` %in% c("NA"))
#need to drop this sample in both the pheno and expression data:
exprs_probe_mtnr1a_dat_filtered <- exprs_probe_mtnr1a_dat[-missing_sex_idx]
sex_filtered <- pData(summaryData)$`gender:ch1`[-missing_sex_idx]
#finally performing the t-test
t_test <- t.test(exprs_probe_mtnr1a_dat_filtered ~ sex_filtered)
summary(t_test)
#plotting the expression values:
normalized_exprs_ln <- exprs_probe_mtnr1a_dat_filtered
sex <- sex_filtered
mtnr1a_exprs_df <- data.frame(normalized_exprs_ln, sex)
ggplot(mtnr1a_exprs_df, aes(x = sex, y = normalized_exprs_ln)) +
geom_boxplot() +
labs(title = "MTNR1 Normalized Expression"
,subtitle = paste0("p.value = ", t_test$p.value)
)
# test <- lapply(pData(summaryData), function(x){gsub("NA", NA, x)})
# pData(summaryData) <- test
# #performing differential gene expression:
# contrast_mat <-
# limmaResults <- limmaDE(summaryData, "gender.ch1")
# colnames(limmaResults)
|
## makeCacheMatrix
## The first function, makeCacheMatrix creates a special "matrix",
## that can cache it's own inverse by:
## - set the value of the matrix
## - get the value of the matrix
## - set the value of the inverse
## - get the value of the inverse
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
#Solve returns the inverse of a matrix
setinverse <- function(solve) m <<- solve
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## cacheSolve
## This function computes the inverse of the special "matrix"
## returned by makeCacheMatrix above. If the inverse has already been calculated
## (and the matrix has not changed), then the cachesolve should retrieve the
## inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
} | /cachematrix.R | no_license | jflechs1/ProgrammingAssignment2 | R | false | false | 1,265 | r | ## makeCacheMatrix
## The first function, makeCacheMatrix creates a special "matrix",
## that can cache it's own inverse by:
## - set the value of the matrix
## - get the value of the matrix
## - set the value of the inverse
## - get the value of the inverse
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
#Solve returns the inverse of a matrix
setinverse <- function(solve) m <<- solve
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## cacheSolve
## This function computes the inverse of the special "matrix"
## returned by makeCacheMatrix above. If the inverse has already been calculated
## (and the matrix has not changed), then the cachesolve should retrieve the
## inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
} |
file_move <- function(file, dest)
{
newPath <- file.path(dest, basename(file))
if (file.copy(file, newPath)) {
if (!file.remove(file)) {
file.remove(newPath)
}
return(TRUE)
}
FALSE
} | /R/files.R | no_license | BroVic/myRStuff | R | false | false | 218 | r | file_move <- function(file, dest)
{
newPath <- file.path(dest, basename(file))
if (file.copy(file, newPath)) {
if (!file.remove(file)) {
file.remove(newPath)
}
return(TRUE)
}
FALSE
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.r
\docType{data}
\name{tx_rates}
\alias{tx_rates}
\title{Transcription rates for ~7000 genes measured across a timecourse}
\format{matrix with 7872 genes as rows and 13 columns containing
transcription rates.}
\source{
\url{https://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE56977}
\url{https://www.ncbi.nlm.nih.gov/pubmed/25497548}
}
\usage{
tx_rates
}
\description{
Mouse dendritic cells were stimulated with LPS, and timepoints were
collected every 15 minutes. Metabolic labeling pulse with 4sU was performed
10 minutes prior to collection. Total RNA and 4sU containing RNA was collected
and used to calculate transcription rates.
}
\examples{
tx_rates
}
\keyword{datasets}
| /man/tx_rates.Rd | permissive | Plondone/practical-data-analysis | R | false | true | 765 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.r
\docType{data}
\name{tx_rates}
\alias{tx_rates}
\title{Transcription rates for ~7000 genes measured across a timecourse}
\format{matrix with 7872 genes as rows and 13 columns containing
transcription rates.}
\source{
\url{https://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE56977}
\url{https://www.ncbi.nlm.nih.gov/pubmed/25497548}
}
\usage{
tx_rates
}
\description{
Mouse dendritic cells were stimulated with LPS, and timepoints were
collected every 15 minutes. Metabolic labeling pulse with 4sU was performed
10 minutes prior to collection. Total RNA and 4sU containing RNA was collected
and used to calculate transcription rates.
}
\examples{
tx_rates
}
\keyword{datasets}
|
# Function to fit multivariate mixed model
# SAR model
# QDR NASABioXGeo
# New version created 29 April 2019: This is now a SAR model not a CAR (continuous random effects not discrete based on regions) - note SAR does not support multivariate so we have to keep it to a single response variable.
# Modified 15 August 2018: add option to force intercept through zero
# Modified 18 June 2018: add drop = FALSE to scale so that it does not give weird output if only 1 response variable
# Modified 15 June 2018: correct scale() function to just return a numeric vector without attributes (also debug this a bit)
fit_mv_sar <- function(pred_df, resp_df, pred_vars, resp_vars, id_var, dist_matrix, distribution = 'gaussian', priors = NULL, n_chains = 2, n_iter = 2000, n_warmup = 1000, delta = 0.9, random_effect_type = 'spatial', force_zero_intercept = FALSE) {
require(dplyr)
require(brms)
require(reshape2)
# Build formula and data
resp_df <- resp_df[, c(id_var, resp_vars)]
resp_df[,-1] <- scale(resp_df[,-1, drop = FALSE])
resp_df[,-1] <- lapply(resp_df[,-1, drop = FALSE], as.numeric)
resp_var_names <- resp_vars
# below, create full formula string for the model with predictors
# create much simpler one if there aren't predictors (edited 13 June)
if (length(pred_vars) > 0) {
pred_df <- pred_df[, c(id_var, pred_vars)]
pred_df[,-1] <- scale(pred_df[,-1, drop = FALSE])
pred_df[,-1] <- lapply(pred_df[,-1, drop = FALSE], as.numeric)
pred_var_names <- names(pred_df)[-1]
fixed_effects <- paste(pred_var_names, collapse = '+')
#intercepts <- if (force_zero_intercept) '0' else paste('(1|', region_name, ')', sep = '')
#random_effects <- paste(c(intercepts, paste('(', pred_var_names, ' - 1|', region_name, ')', sep = '')), collapse = '+')
#formula_string <- paste(resp_var_names, '~', fixed_effects, '+', random_effects)
formula_string <- paste(resp_var_names, '~', fixed_effects)
} else {
#intercepts <- if (force_zero_intercept) '0 +' else ''
#formula_string <- paste(resp_var_names, '~', intercepts, paste('(1|', region_name, ')', sep = ''))
formula_string <- paste(resp_var_names, '~', 1)
}
dat <- left_join(resp_df, pred_df)
use_obs <- complete.cases(dat)
dat <- dat[use_obs,]
dist_matrix <- dist_matrix[use_obs, use_obs]
# Fit model, extract coefficients, and format them
if (random_effect_type == 'spatial') {
mm <- brm(formula = formula_string, data = dat, family = distribution, autocor = cor_sar(dist_matrix, 'lag'),
chains = n_chains, iter = n_iter, warmup = n_warmup, prior = priors, control = list(adapt_delta = delta))
} else {
mm <- brm(formula = formula_string, data = dat, family = distribution,
chains = n_chains, iter = n_iter, warmup = n_warmup, prior = priors, control = list(adapt_delta = delta))
}
# Edit 16 Aug: do not extract fixed effects (and combined fixed+random effects) if it is a null model without fixed effects.
if (!force_zero_intercept | length(pred_vars) > 0) {
fixed_effects <- fixef(mm)
region_effects <- coef(mm)
fixed_effects <- cbind(effect = 'fixed', region = as.character(NA), melt(fixed_effects, varnames = c('parameter', 'stat')))
region_effects <- cbind(effect = 'coefficient', melt(region_effects$region, varnames = c('region', 'stat', 'parameter'))) %>% mutate(region = as.character(region))
mm_coef <- fixed_effects %>% full_join(random_effects) %>% full_join(region_effects)
} else {
mm_coef <- random_effects
}
return(list(model = mm, coef = mm_coef))
}
| /stats/sar/fit_mv_sar.r | no_license | qdread/nasabio | R | false | false | 3,507 | r | # Function to fit multivariate mixed model
# SAR model
# QDR NASABioXGeo
# New version created 29 April 2019: This is now a SAR model not a CAR (continuous random effects not discrete based on regions) - note SAR does not support multivariate so we have to keep it to a single response variable.
# Modified 15 August 2018: add option to force intercept through zero
# Modified 18 June 2018: add drop = FALSE to scale so that it does not give weird output if only 1 response variable
# Modified 15 June 2018: correct scale() function to just return a numeric vector without attributes (also debug this a bit)
fit_mv_sar <- function(pred_df, resp_df, pred_vars, resp_vars, id_var, dist_matrix, distribution = 'gaussian', priors = NULL, n_chains = 2, n_iter = 2000, n_warmup = 1000, delta = 0.9, random_effect_type = 'spatial', force_zero_intercept = FALSE) {
require(dplyr)
require(brms)
require(reshape2)
# Build formula and data
resp_df <- resp_df[, c(id_var, resp_vars)]
resp_df[,-1] <- scale(resp_df[,-1, drop = FALSE])
resp_df[,-1] <- lapply(resp_df[,-1, drop = FALSE], as.numeric)
resp_var_names <- resp_vars
# below, create full formula string for the model with predictors
# create much simpler one if there aren't predictors (edited 13 June)
if (length(pred_vars) > 0) {
pred_df <- pred_df[, c(id_var, pred_vars)]
pred_df[,-1] <- scale(pred_df[,-1, drop = FALSE])
pred_df[,-1] <- lapply(pred_df[,-1, drop = FALSE], as.numeric)
pred_var_names <- names(pred_df)[-1]
fixed_effects <- paste(pred_var_names, collapse = '+')
#intercepts <- if (force_zero_intercept) '0' else paste('(1|', region_name, ')', sep = '')
#random_effects <- paste(c(intercepts, paste('(', pred_var_names, ' - 1|', region_name, ')', sep = '')), collapse = '+')
#formula_string <- paste(resp_var_names, '~', fixed_effects, '+', random_effects)
formula_string <- paste(resp_var_names, '~', fixed_effects)
} else {
#intercepts <- if (force_zero_intercept) '0 +' else ''
#formula_string <- paste(resp_var_names, '~', intercepts, paste('(1|', region_name, ')', sep = ''))
formula_string <- paste(resp_var_names, '~', 1)
}
dat <- left_join(resp_df, pred_df)
use_obs <- complete.cases(dat)
dat <- dat[use_obs,]
dist_matrix <- dist_matrix[use_obs, use_obs]
# Fit model, extract coefficients, and format them
if (random_effect_type == 'spatial') {
mm <- brm(formula = formula_string, data = dat, family = distribution, autocor = cor_sar(dist_matrix, 'lag'),
chains = n_chains, iter = n_iter, warmup = n_warmup, prior = priors, control = list(adapt_delta = delta))
} else {
mm <- brm(formula = formula_string, data = dat, family = distribution,
chains = n_chains, iter = n_iter, warmup = n_warmup, prior = priors, control = list(adapt_delta = delta))
}
# Edit 16 Aug: do not extract fixed effects (and combined fixed+random effects) if it is a null model without fixed effects.
if (!force_zero_intercept | length(pred_vars) > 0) {
fixed_effects <- fixef(mm)
region_effects <- coef(mm)
fixed_effects <- cbind(effect = 'fixed', region = as.character(NA), melt(fixed_effects, varnames = c('parameter', 'stat')))
region_effects <- cbind(effect = 'coefficient', melt(region_effects$region, varnames = c('region', 'stat', 'parameter'))) %>% mutate(region = as.character(region))
mm_coef <- fixed_effects %>% full_join(random_effects) %>% full_join(region_effects)
} else {
mm_coef <- random_effects
}
return(list(model = mm, coef = mm_coef))
}
|
########## SEX AND ABANY
##### estimating the difference between two proportions
sex_abany_D_SE <- sqrt( ( (male_aby_proportion*(1-male_aby_proportion)) / aby_n1) + ( (female_aby_proportion*(1-female_aby_proportion)) / aby_n2) )
sex_abany_D_SE
#[1] 0.00597667
1.96*sex_abany_D_SE
#[1] 0.01171427
(male_aby_proportion - female_aby_proportion) - (1.96*sex_abany_D_SE)
#[1] 0.009843132
(male_aby_proportion - female_aby_proportion) + (1.96*sex_abany_D_SE)
#[1] 0.03327168
## we can expect a difference of (0.009843132, 0.03327168) between proportion of males who support abortion and proportion of females who support abortion
## since that range does not include the null hypothesis of p_male - p_female = 0, we must reject the null hypothesis.
##### hypothesis tests for comparing two proportions
sex_abany_SE <- sqrt(((pooled_aby_proportion*(1 - pooled_aby_proportion)) / length(which(homo_ab_nonNA$sex=="Male" & (homo_ab_nonNA$abany=="Yes" | homo_ab_nonNA$abany=="No"))) ) + ( (pooled_aby_proportion*(1 - pooled_aby_proportion)) / length(which(homo_ab_nonNA$sex=="Female" & (homo_ab_nonNA$abany=="Yes" | homo_ab_nonNA$abany=="No")))))
#[1] 0.005972433
male_aby_proportion - female_aby_proportion
#[1] 0.02155741
0.02155741/0.005972433
#[1] 3.609485
2*pnorm(-abs(3.609485))
#[1] 0.0003068055
## With a very low p-value, we must reject the null hypothesis..
########## ABANY AND HOMOSEX
##### estimating the difference between two proportions
ab_homo_D_SE
#[1] 0.005309297
(aby_homonotwrong - abn_homonotwrong) + 1.96*ab_homo_D_SE
#[1] 0.2862282
(aby_homonotwrong - abn_homonotwrong) - 1.96*ab_homo_D_SE
#[1] 0.2654158
## (0.2654158, 0.2862282)
## null hypothesis does not fall within range. we must reject null hypothesis
##### hypothesis tests for comparing two proportions
ab_homo_HT_SE
#[1] 0.00522962
(aby_homonotwrong - abn_homonotwrong) / ab_homo_HT_SE
#[1] 52.74226
2*pnorm(-abs(52.74226))
#[1] 0
## with this p-value we must reject the null hypothesis.
| /Statistics-With-R-Specialization/Inferential Statistics/project/inference2.r | no_license | ashutoshtiwari13/Statistical-Learning-Hub | R | false | false | 1,984 | r | ########## SEX AND ABANY
##### estimating the difference between two proportions
sex_abany_D_SE <- sqrt( ( (male_aby_proportion*(1-male_aby_proportion)) / aby_n1) + ( (female_aby_proportion*(1-female_aby_proportion)) / aby_n2) )
sex_abany_D_SE
#[1] 0.00597667
1.96*sex_abany_D_SE
#[1] 0.01171427
(male_aby_proportion - female_aby_proportion) - (1.96*sex_abany_D_SE)
#[1] 0.009843132
(male_aby_proportion - female_aby_proportion) + (1.96*sex_abany_D_SE)
#[1] 0.03327168
## we can expect a difference of (0.009843132, 0.03327168) between proportion of males who support abortion and proportion of females who support abortion
## since that range does not include the null hypothesis of p_male - p_female = 0, we must reject the null hypothesis.
##### hypothesis tests for comparing two proportions
sex_abany_SE <- sqrt(((pooled_aby_proportion*(1 - pooled_aby_proportion)) / length(which(homo_ab_nonNA$sex=="Male" & (homo_ab_nonNA$abany=="Yes" | homo_ab_nonNA$abany=="No"))) ) + ( (pooled_aby_proportion*(1 - pooled_aby_proportion)) / length(which(homo_ab_nonNA$sex=="Female" & (homo_ab_nonNA$abany=="Yes" | homo_ab_nonNA$abany=="No")))))
#[1] 0.005972433
male_aby_proportion - female_aby_proportion
#[1] 0.02155741
0.02155741/0.005972433
#[1] 3.609485
2*pnorm(-abs(3.609485))
#[1] 0.0003068055
## With a very low p-value, we must reject the null hypothesis..
########## ABANY AND HOMOSEX
##### estimating the difference between two proportions
ab_homo_D_SE
#[1] 0.005309297
(aby_homonotwrong - abn_homonotwrong) + 1.96*ab_homo_D_SE
#[1] 0.2862282
(aby_homonotwrong - abn_homonotwrong) - 1.96*ab_homo_D_SE
#[1] 0.2654158
## (0.2654158, 0.2862282)
## null hypothesis does not fall within range. we must reject null hypothesis
##### hypothesis tests for comparing two proportions
ab_homo_HT_SE
#[1] 0.00522962
(aby_homonotwrong - abn_homonotwrong) / ab_homo_HT_SE
#[1] 52.74226
2*pnorm(-abs(52.74226))
#[1] 0
## with this p-value we must reject the null hypothesis.
|
library(caret)
library(pROC)
library(verification)
library(rJava)
library(doMC)
registerDoMC(cores = 2)
output <- "results/Curve3/"
fileNames <- c("sanos-enfermos")
algorithmNames <-c(#"glm",
"rf"
#,"J48"
)
tuneLenght <-c(rf=30, J48=5)
fitControl <- trainControl(method="LOOCV", classProbs = TRUE,
savePredictions = TRUE, allowParallel= TRUE,
summaryFunction = twoClassSummary, verboseIter = FALSE)
form <- as.formula("Class ~ .")
#columns <- c("RBM45", "U4", "PTB", "U6ATAC", "SRM160", "Class")
#columns <- c("PTB", "RBM22", "SRSF1", "SRM160", "U1", "U6", "U6ATAC", "Class")
columns <- c("CUGBP", "PTB", "RBM22", "RBM3", "SRM160", "U6", "U6ATAC", "Class")
# Seed for reproducibility
set.seed(123)
for(fileName in fileNames){
dataset <- read.csv(paste(fileName,".csv", sep = ""), sep = ",")
dataset <- dataset[, columns]
for(alg in algorithmNames){
outputT <- paste0(output, alg,"/", fileName, "/")
if(!dir.exists(outputT)){
dir.create(outputT, recursive = T)
}
if(alg=="glm"){
modelFit <- train(form, data = dataset,
method=alg,
metric = "ROC",
maximize = TRUE,
trControl = fitControl, family="binomial")
}
if(alg=="rf" || alg=="J48"){
modelFit <- train(form, data = dataset,
method=alg,
metric = "ROC",
maximize = TRUE,
trControl = fitControl,
tuneLength = tuneLenght[[alg]])
}
# Predict over the same training data. Cosas de biologos...
truth <- as.numeric(dataset$Class=='E')
predictor <- predict(modelFit, newdata = dataset, type = "prob")
rocT <- roc(truth, predictor$E) # Draw ROC curve.
plot(rocT, print.auc = TRUE)
dev.copy(png, paste(outputT,'TrainingSet-ROC-Curve.png', sep = ""))
dev.off()
# Save the residuals to generate ROC Curve. Esto para los biologos.
cvResiduals <- data.frame(class=truth, prediction=predictor$E)
#save the dataframe
write.table(cvResiduals, file= paste0(outputT, "TrainingSet-residuals.csv"), quote = FALSE, sep="," , row.names = FALSE,
col.names = TRUE)
#Compute the metrics
ROCV <- roc.area(truth, predictor$E)
pvalue <- ROCV$p.value
auc <- ROCV$A
#To extract sensitivity and specifitivity
predictorFactor <- predict(modelFit, newdata = dataset)
sensitivity <- sensitivity(predictorFactor, dataset$Class)
specificity <- specificity(predictorFactor, dataset$Class)
dataResults <- data.frame("AUC"= auc, "p-value"= pvalue, "sensitivity"=sensitivity, "specificity"= specificity)
#Write the results in a file
write.table(dataResults, file = paste0(outputT, "TrainingSet-results.csv"), quote = F, sep = "\t", col.names = T, row.names = F)
# This case is for generate the results of CV process.
if(alg=="glm"){
predictions <- modelFit$pred
}
if(alg=="rf"){
# Extracting the best tune
predictions <- modelFit$pred[modelFit$pred$mtry == modelFit$bestTune$mtry,]
}
if(alg=="J48"){
# Extracting the best tune
predictions <- modelFit$pred[modelFit$pred$C == modelFit$bestTune$C & modelFit$pred$M == modelFit$bestTune$M,]
}
truth <- as.numeric(modelFit$trainingData$.outcome=='E')
predictor <- aggregate(E ~ rowIndex, predictions, mean)[,'E']
rocT <- roc(truth, predictor)
plot(rocT, print.auc = TRUE)
dev.copy(png, paste(outputT, '/CV-ROC-Curve.png', sep = ""))
dev.off()
# Save the residuals to generate ROC Curve. Esto para los biologos.
cvResiduals <- data.frame(class=truth, prediction=predictor)
#save the dataframe
write.table(cvResiduals, file= paste0(outputT, "CV-residuals.csv"), quote = F, sep="," , row.names = F,
col.names=T)
#Compute the metrics
ROCV <- roc.area(truth, predictor)
pvalue <- ROCV$p.value
auc <- ROCV$A
#To extract sensitivity and specifitivity
sensitivity <- modelFit$results[rownames(modelFit$bestTune)[1],]$Sens
specificity <- modelFit$results[rownames(modelFit$bestTune)[1],]$Spec
dataResults <- data.frame("AUC"= auc, "p-value"= pvalue, "sensitivity"=sensitivity, "specificity"= specificity)
#Write the results in a file
write.table(dataResults, file = paste0(outputT, "CV-results.csv"), quote = F, sep = "\t", col.names = T, row.names = F)
#Saving the model for posterior analysis
if(alg=="J48"){
#Cache rJava object classifier in order to save it with the object
.jcache(modelFit$finalModel$classifier)
}
saveRDS(modelFit, paste(outputT, "modelfit-", alg, ".rds", sep = ""))
}
} | /Estudio-steatosis/v3/Training-CV.R | no_license | kdis-lab/IMIBIC-projects | R | false | false | 4,983 | r | library(caret)
library(pROC)
library(verification)
library(rJava)
library(doMC)
registerDoMC(cores = 2)
output <- "results/Curve3/"
fileNames <- c("sanos-enfermos")
algorithmNames <-c(#"glm",
"rf"
#,"J48"
)
tuneLenght <-c(rf=30, J48=5)
fitControl <- trainControl(method="LOOCV", classProbs = TRUE,
savePredictions = TRUE, allowParallel= TRUE,
summaryFunction = twoClassSummary, verboseIter = FALSE)
form <- as.formula("Class ~ .")
#columns <- c("RBM45", "U4", "PTB", "U6ATAC", "SRM160", "Class")
#columns <- c("PTB", "RBM22", "SRSF1", "SRM160", "U1", "U6", "U6ATAC", "Class")
columns <- c("CUGBP", "PTB", "RBM22", "RBM3", "SRM160", "U6", "U6ATAC", "Class")
# Seed for reproducibility
set.seed(123)
for(fileName in fileNames){
dataset <- read.csv(paste(fileName,".csv", sep = ""), sep = ",")
dataset <- dataset[, columns]
for(alg in algorithmNames){
outputT <- paste0(output, alg,"/", fileName, "/")
if(!dir.exists(outputT)){
dir.create(outputT, recursive = T)
}
if(alg=="glm"){
modelFit <- train(form, data = dataset,
method=alg,
metric = "ROC",
maximize = TRUE,
trControl = fitControl, family="binomial")
}
if(alg=="rf" || alg=="J48"){
modelFit <- train(form, data = dataset,
method=alg,
metric = "ROC",
maximize = TRUE,
trControl = fitControl,
tuneLength = tuneLenght[[alg]])
}
# Predict over the same training data. Cosas de biologos...
truth <- as.numeric(dataset$Class=='E')
predictor <- predict(modelFit, newdata = dataset, type = "prob")
rocT <- roc(truth, predictor$E) # Draw ROC curve.
plot(rocT, print.auc = TRUE)
dev.copy(png, paste(outputT,'TrainingSet-ROC-Curve.png', sep = ""))
dev.off()
# Save the residuals to generate ROC Curve. Esto para los biologos.
cvResiduals <- data.frame(class=truth, prediction=predictor$E)
#save the dataframe
write.table(cvResiduals, file= paste0(outputT, "TrainingSet-residuals.csv"), quote = FALSE, sep="," , row.names = FALSE,
col.names = TRUE)
#Compute the metrics
ROCV <- roc.area(truth, predictor$E)
pvalue <- ROCV$p.value
auc <- ROCV$A
#To extract sensitivity and specifitivity
predictorFactor <- predict(modelFit, newdata = dataset)
sensitivity <- sensitivity(predictorFactor, dataset$Class)
specificity <- specificity(predictorFactor, dataset$Class)
dataResults <- data.frame("AUC"= auc, "p-value"= pvalue, "sensitivity"=sensitivity, "specificity"= specificity)
#Write the results in a file
write.table(dataResults, file = paste0(outputT, "TrainingSet-results.csv"), quote = F, sep = "\t", col.names = T, row.names = F)
# This case is for generate the results of CV process.
if(alg=="glm"){
predictions <- modelFit$pred
}
if(alg=="rf"){
# Extracting the best tune
predictions <- modelFit$pred[modelFit$pred$mtry == modelFit$bestTune$mtry,]
}
if(alg=="J48"){
# Extracting the best tune
predictions <- modelFit$pred[modelFit$pred$C == modelFit$bestTune$C & modelFit$pred$M == modelFit$bestTune$M,]
}
truth <- as.numeric(modelFit$trainingData$.outcome=='E')
predictor <- aggregate(E ~ rowIndex, predictions, mean)[,'E']
rocT <- roc(truth, predictor)
plot(rocT, print.auc = TRUE)
dev.copy(png, paste(outputT, '/CV-ROC-Curve.png', sep = ""))
dev.off()
# Save the residuals to generate ROC Curve. Esto para los biologos.
cvResiduals <- data.frame(class=truth, prediction=predictor)
#save the dataframe
write.table(cvResiduals, file= paste0(outputT, "CV-residuals.csv"), quote = F, sep="," , row.names = F,
col.names=T)
#Compute the metrics
ROCV <- roc.area(truth, predictor)
pvalue <- ROCV$p.value
auc <- ROCV$A
#To extract sensitivity and specifitivity
sensitivity <- modelFit$results[rownames(modelFit$bestTune)[1],]$Sens
specificity <- modelFit$results[rownames(modelFit$bestTune)[1],]$Spec
dataResults <- data.frame("AUC"= auc, "p-value"= pvalue, "sensitivity"=sensitivity, "specificity"= specificity)
#Write the results in a file
write.table(dataResults, file = paste0(outputT, "CV-results.csv"), quote = F, sep = "\t", col.names = T, row.names = F)
#Saving the model for posterior analysis
if(alg=="J48"){
#Cache rJava object classifier in order to save it with the object
.jcache(modelFit$finalModel$classifier)
}
saveRDS(modelFit, paste(outputT, "modelfit-", alg, ".rds", sep = ""))
}
} |
cor.score <- function()
{
# Nonparametric Correlation between community score and user's credibility rating
cor.tot.norep<-cor.test(df.norep$score, df.norep$user_rating, method="kendall")
cor.tot.rep<-cor.test(df.rep$score, df.rep$user_rating, method="kendall")
cor.ug.norep<-cor.test(df.ug.norep$score, df.ug.norep$user_rating, method="kendall")
cor.ug.rep<-cor.test(df.ug.rep$score, df.ug.rep$user_rating, method="kendall")
cor.msc.norep<-cor.test(df.msc.norep$score, df.msc.norep$user_rating, method="kendall")
cor.msc.rep<-cor.test(df.msc.rep$score, df.msc.rep$user_rating, method="kendall")
cor.phil.norep<-cor.test(df.phil.norep$score, df.phil.norep$user_rating, method="kendall")
cor.phil.rep<-cor.test(df.phil.rep$score, df.phil.rep$user_rating, method="kendall")
cor.score.summary<-data.frame(
c(
paste(round(cor.ug.norep$estimate,3)," (", format(round(cor.ug.norep$p.value,3),nsmall=3),")", sep=""),
paste(round(cor.msc.norep$estimate,3)," (", format(round(cor.msc.norep$p.value,3),nsmall=3),")", sep=""),
paste(round(cor.phil.norep$estimate,3)," (", format(round(cor.phil.norep$p.value,3),nsmall=3),")", sep=""),
paste(round(cor.tot.norep$estimate,3)," (", format(round(cor.tot.norep$p.value,3),nsmall=3),")", sep="")
),
c(
paste(round(cor.ug.rep$estimate,3)," (", format(round(cor.ug.rep$p.value,3),nsmall=3),")", sep=""),
paste(round(cor.msc.rep$estimate,3)," (", format(round(cor.msc.rep$p.value,3),nsmall=3),")", sep=""),
paste(round(cor.phil.rep$estimate,3)," (", format(round(cor.phil.rep$p.value,3),nsmall=3),")", sep=""),
paste(round(cor.tot.rep$estimate,3)," (", format(round(cor.tot.rep$p.value,3),nsmall=3),")", sep="")
)
)
colnames(cor.score.summary) <- c("Cues Not Shown","Cues Shown")
rownames(cor.score.summary) <- c("UG Comp/IS","PG IS","UG Phil","Overall")
return(cor.score.summary)
}
cor.rep <- function()
{
# Non parametric correlation between answerer's reputation and user's credibility rating
cor.tot.norep<-cor.test(df.norep$userrep, df.norep$user_rating, method="kendall")
cor.tot.rep<-cor.test(df.rep$userrep, df.rep$user_rating, method="kendall")
cor.ug.norep<-cor.test(df.ug.norep$userrep, df.ug.norep$user_rating, method="kendall")
cor.ug.rep<-cor.test(df.ug.rep$userrep, df.ug.rep$user_rating, method="kendall")
cor.msc.norep<-cor.test(df.msc.norep$userrep, df.msc.norep$user_rating, method="kendall")
cor.msc.rep<-cor.test(df.msc.rep$userrep, df.msc.rep$user_rating, method="kendall")
cor.phil.norep<-cor.test(df.phil.norep$userrep, df.phil.norep$user_rating, method="kendall")
cor.phil.rep<-cor.test(df.phil.rep$userrep, df.phil.rep$user_rating, method="kendall")
cor.rep.summary<-data.frame(
c(
paste(round(cor.ug.norep$estimate,3)," (", format(round(cor.ug.norep$p.value,3),nsmall=3),")", sep=""),
paste(round(cor.msc.norep$estimate,3)," (", format(round(cor.msc.norep$p.value,3),nsmall=3),")", sep=""),
paste(round(cor.phil.norep$estimate,3)," (", format(round(cor.phil.norep$p.value,3),nsmall=3),")", sep=""),
paste(round(cor.tot.norep$estimate,3)," (", format(round(cor.tot.norep$p.value,3),nsmall=3),")", sep="")
),
c(
paste(round(cor.ug.rep$estimate,3)," (", format(round(cor.ug.rep$p.value,3),nsmall=3),")", sep=""),
paste(round(cor.msc.rep$estimate,3)," (", format(round(cor.msc.rep$p.value,3),nsmall=3),")", sep=""),
paste(round(cor.phil.rep$estimate,3)," (", format(round(cor.phil.rep$p.value,3),nsmall=3),")", sep=""),
paste(round(cor.tot.rep$estimate,3)," (", format(round(cor.tot.rep$p.value,3),nsmall=3),")", sep="")
)
)
colnames(cor.rep.summary) <- c("Cues Not Shown","Cues Shown")
rownames(cor.rep.summary) <- c("UG Comp/IS","PG IS","UG Phil","Overall")
return(cor.rep.summary)
}
cor.priork.score <- function()
{
# Nonparametric Correlation between community score and user's credibility rating
cor.hi.norep<-cor.test(df.hi_k.norep$score, df.hi_k.norep$user_rating, method="kendall")
cor.hi.rep<-cor.test(df.hi_k.rep$score, df.hi_k.rep$user_rating, method="kendall")
cor.med.norep<-cor.test(df.mid_k.norep$score, df.mid_k.norep$user_rating, method="kendall")
cor.med.rep<-cor.test(df.mid_k.rep$score, df.mid_k.rep$user_rating, method="kendall")
cor.low.norep<-cor.test(df.low_k.norep$score, df.low_k.norep$user_rating, method="kendall")
cor.low.rep<-cor.test(df.low_k.rep$score, df.low_k.rep$user_rating, method="kendall")
cor.priork_score.summary<-data.frame(
c(
paste(round(cor.low.norep$estimate,3)," (", format(round(cor.low.norep$p.value,3),nsmall=3),")", sep=""),
paste(round(cor.med.norep$estimate,3)," (", format(round(cor.med.norep$p.value,3),nsmall=3),")", sep=""),
paste(round(cor.hi.norep$estimate,3)," (", format(round(cor.hi.norep$p.value,3),nsmall=3),")", sep="")
),
c(
paste(round(cor.low.rep$estimate,3)," (", format(round(cor.low.rep$p.value,3),nsmall=3),")", sep=""),
paste(round(cor.med.rep$estimate,3)," (", format(round(cor.med.rep$p.value,3),nsmall=3),")", sep=""),
paste(round(cor.hi.rep$estimate,3)," (", format(round(cor.hi.rep$p.value,3),nsmall=3),")", sep="")
)
)
colnames(cor.priork_score.summary) <- c("Cues Not Shown","Cues Shown")
rownames(cor.priork_score.summary) <- c("Low","Medium","High")
return(cor.priork_score.summary)
}
cor.priork.rep <- function()
{
# Nonparametric Correlation between community score and user's credibility rating
cor.hi.norep<-cor.test(df.hi_k.norep$userrep, df.hi_k.norep$user_rating, method="kendall")
cor.hi.rep<-cor.test(df.hi_k.rep$userrep, df.hi_k.rep$user_rating, method="kendall")
cor.med.norep<-cor.test(df.mid_k.norep$userrep, df.mid_k.norep$user_rating, method="kendall")
cor.med.rep<-cor.test(df.mid_k.rep$userrep, df.mid_k.rep$user_rating, method="kendall")
cor.low.norep<-cor.test(df.low_k.norep$userrep, df.low_k.norep$user_rating, method="kendall")
cor.low.rep<-cor.test(df.low_k.rep$userrep, df.low_k.rep$user_rating, method="kendall")
cor.priork_score.summary<-data.frame(
c(
paste(round(cor.low.norep$estimate,3)," (", format(round(cor.low.norep$p.value,3),nsmall=3),")", sep=""),
paste(round(cor.med.norep$estimate,3)," (", format(round(cor.med.norep$p.value,3),nsmall=3),")", sep=""),
paste(round(cor.hi.norep$estimate,3)," (", format(round(cor.hi.norep$p.value,3),nsmall=3),")", sep="")
),
c(
paste(round(cor.low.rep$estimate,3)," (", format(round(cor.low.rep$p.value,3),nsmall=3),")", sep=""),
paste(round(cor.med.rep$estimate,3)," (", format(round(cor.med.rep$p.value,3),nsmall=3),")", sep=""),
paste(round(cor.hi.rep$estimate,3)," (", format(round(cor.hi.rep$p.value,3),nsmall=3),")", sep="")
)
)
colnames(cor.priork_score.summary) <- c("Cues Not Shown","Cues Shown")
rownames(cor.priork_score.summary) <- c("Low","Medium","High")
return(cor.priork_score.summary)
} | /lib/correlations.R | no_license | paulusm/stacktest | R | false | false | 7,013 | r | cor.score <- function()
{
# Nonparametric Correlation between community score and user's credibility rating
cor.tot.norep<-cor.test(df.norep$score, df.norep$user_rating, method="kendall")
cor.tot.rep<-cor.test(df.rep$score, df.rep$user_rating, method="kendall")
cor.ug.norep<-cor.test(df.ug.norep$score, df.ug.norep$user_rating, method="kendall")
cor.ug.rep<-cor.test(df.ug.rep$score, df.ug.rep$user_rating, method="kendall")
cor.msc.norep<-cor.test(df.msc.norep$score, df.msc.norep$user_rating, method="kendall")
cor.msc.rep<-cor.test(df.msc.rep$score, df.msc.rep$user_rating, method="kendall")
cor.phil.norep<-cor.test(df.phil.norep$score, df.phil.norep$user_rating, method="kendall")
cor.phil.rep<-cor.test(df.phil.rep$score, df.phil.rep$user_rating, method="kendall")
cor.score.summary<-data.frame(
c(
paste(round(cor.ug.norep$estimate,3)," (", format(round(cor.ug.norep$p.value,3),nsmall=3),")", sep=""),
paste(round(cor.msc.norep$estimate,3)," (", format(round(cor.msc.norep$p.value,3),nsmall=3),")", sep=""),
paste(round(cor.phil.norep$estimate,3)," (", format(round(cor.phil.norep$p.value,3),nsmall=3),")", sep=""),
paste(round(cor.tot.norep$estimate,3)," (", format(round(cor.tot.norep$p.value,3),nsmall=3),")", sep="")
),
c(
paste(round(cor.ug.rep$estimate,3)," (", format(round(cor.ug.rep$p.value,3),nsmall=3),")", sep=""),
paste(round(cor.msc.rep$estimate,3)," (", format(round(cor.msc.rep$p.value,3),nsmall=3),")", sep=""),
paste(round(cor.phil.rep$estimate,3)," (", format(round(cor.phil.rep$p.value,3),nsmall=3),")", sep=""),
paste(round(cor.tot.rep$estimate,3)," (", format(round(cor.tot.rep$p.value,3),nsmall=3),")", sep="")
)
)
colnames(cor.score.summary) <- c("Cues Not Shown","Cues Shown")
rownames(cor.score.summary) <- c("UG Comp/IS","PG IS","UG Phil","Overall")
return(cor.score.summary)
}
cor.rep <- function()
{
# Non parametric correlation between answerer's reputation and user's credibility rating
cor.tot.norep<-cor.test(df.norep$userrep, df.norep$user_rating, method="kendall")
cor.tot.rep<-cor.test(df.rep$userrep, df.rep$user_rating, method="kendall")
cor.ug.norep<-cor.test(df.ug.norep$userrep, df.ug.norep$user_rating, method="kendall")
cor.ug.rep<-cor.test(df.ug.rep$userrep, df.ug.rep$user_rating, method="kendall")
cor.msc.norep<-cor.test(df.msc.norep$userrep, df.msc.norep$user_rating, method="kendall")
cor.msc.rep<-cor.test(df.msc.rep$userrep, df.msc.rep$user_rating, method="kendall")
cor.phil.norep<-cor.test(df.phil.norep$userrep, df.phil.norep$user_rating, method="kendall")
cor.phil.rep<-cor.test(df.phil.rep$userrep, df.phil.rep$user_rating, method="kendall")
cor.rep.summary<-data.frame(
c(
paste(round(cor.ug.norep$estimate,3)," (", format(round(cor.ug.norep$p.value,3),nsmall=3),")", sep=""),
paste(round(cor.msc.norep$estimate,3)," (", format(round(cor.msc.norep$p.value,3),nsmall=3),")", sep=""),
paste(round(cor.phil.norep$estimate,3)," (", format(round(cor.phil.norep$p.value,3),nsmall=3),")", sep=""),
paste(round(cor.tot.norep$estimate,3)," (", format(round(cor.tot.norep$p.value,3),nsmall=3),")", sep="")
),
c(
paste(round(cor.ug.rep$estimate,3)," (", format(round(cor.ug.rep$p.value,3),nsmall=3),")", sep=""),
paste(round(cor.msc.rep$estimate,3)," (", format(round(cor.msc.rep$p.value,3),nsmall=3),")", sep=""),
paste(round(cor.phil.rep$estimate,3)," (", format(round(cor.phil.rep$p.value,3),nsmall=3),")", sep=""),
paste(round(cor.tot.rep$estimate,3)," (", format(round(cor.tot.rep$p.value,3),nsmall=3),")", sep="")
)
)
colnames(cor.rep.summary) <- c("Cues Not Shown","Cues Shown")
rownames(cor.rep.summary) <- c("UG Comp/IS","PG IS","UG Phil","Overall")
return(cor.rep.summary)
}
cor.priork.score <- function()
{
# Nonparametric Correlation between community score and user's credibility rating
cor.hi.norep<-cor.test(df.hi_k.norep$score, df.hi_k.norep$user_rating, method="kendall")
cor.hi.rep<-cor.test(df.hi_k.rep$score, df.hi_k.rep$user_rating, method="kendall")
cor.med.norep<-cor.test(df.mid_k.norep$score, df.mid_k.norep$user_rating, method="kendall")
cor.med.rep<-cor.test(df.mid_k.rep$score, df.mid_k.rep$user_rating, method="kendall")
cor.low.norep<-cor.test(df.low_k.norep$score, df.low_k.norep$user_rating, method="kendall")
cor.low.rep<-cor.test(df.low_k.rep$score, df.low_k.rep$user_rating, method="kendall")
cor.priork_score.summary<-data.frame(
c(
paste(round(cor.low.norep$estimate,3)," (", format(round(cor.low.norep$p.value,3),nsmall=3),")", sep=""),
paste(round(cor.med.norep$estimate,3)," (", format(round(cor.med.norep$p.value,3),nsmall=3),")", sep=""),
paste(round(cor.hi.norep$estimate,3)," (", format(round(cor.hi.norep$p.value,3),nsmall=3),")", sep="")
),
c(
paste(round(cor.low.rep$estimate,3)," (", format(round(cor.low.rep$p.value,3),nsmall=3),")", sep=""),
paste(round(cor.med.rep$estimate,3)," (", format(round(cor.med.rep$p.value,3),nsmall=3),")", sep=""),
paste(round(cor.hi.rep$estimate,3)," (", format(round(cor.hi.rep$p.value,3),nsmall=3),")", sep="")
)
)
colnames(cor.priork_score.summary) <- c("Cues Not Shown","Cues Shown")
rownames(cor.priork_score.summary) <- c("Low","Medium","High")
return(cor.priork_score.summary)
}
cor.priork.rep <- function()
{
# Nonparametric Correlation between community score and user's credibility rating
cor.hi.norep<-cor.test(df.hi_k.norep$userrep, df.hi_k.norep$user_rating, method="kendall")
cor.hi.rep<-cor.test(df.hi_k.rep$userrep, df.hi_k.rep$user_rating, method="kendall")
cor.med.norep<-cor.test(df.mid_k.norep$userrep, df.mid_k.norep$user_rating, method="kendall")
cor.med.rep<-cor.test(df.mid_k.rep$userrep, df.mid_k.rep$user_rating, method="kendall")
cor.low.norep<-cor.test(df.low_k.norep$userrep, df.low_k.norep$user_rating, method="kendall")
cor.low.rep<-cor.test(df.low_k.rep$userrep, df.low_k.rep$user_rating, method="kendall")
cor.priork_score.summary<-data.frame(
c(
paste(round(cor.low.norep$estimate,3)," (", format(round(cor.low.norep$p.value,3),nsmall=3),")", sep=""),
paste(round(cor.med.norep$estimate,3)," (", format(round(cor.med.norep$p.value,3),nsmall=3),")", sep=""),
paste(round(cor.hi.norep$estimate,3)," (", format(round(cor.hi.norep$p.value,3),nsmall=3),")", sep="")
),
c(
paste(round(cor.low.rep$estimate,3)," (", format(round(cor.low.rep$p.value,3),nsmall=3),")", sep=""),
paste(round(cor.med.rep$estimate,3)," (", format(round(cor.med.rep$p.value,3),nsmall=3),")", sep=""),
paste(round(cor.hi.rep$estimate,3)," (", format(round(cor.hi.rep$p.value,3),nsmall=3),")", sep="")
)
)
colnames(cor.priork_score.summary) <- c("Cues Not Shown","Cues Shown")
rownames(cor.priork_score.summary) <- c("Low","Medium","High")
return(cor.priork_score.summary)
} |
# Finds the hubs of a network.
FindHubs <- function(runCount, hubThreshold, swpGraph){
hubScore = hub.score(swpGraph)
hubValues = hubScore$vector # Takes just values from hub score
# Replaces all hubs with a 1, and other vertices with a 0.
hubMatrix = replace(replace(hubValues,hubValues >= hubThreshold, 1),
hubValues < hubThreshold,0)
return(hubMatrix)
}
| /Model/ryThesis/R/FindHubs.r | no_license | himmAllRight/Thesis | R | false | false | 406 | r | # Finds the hubs of a network.
FindHubs <- function(runCount, hubThreshold, swpGraph){
hubScore = hub.score(swpGraph)
hubValues = hubScore$vector # Takes just values from hub score
# Replaces all hubs with a 1, and other vertices with a 0.
hubMatrix = replace(replace(hubValues,hubValues >= hubThreshold, 1),
hubValues < hubThreshold,0)
return(hubMatrix)
}
|
# Coded by Anderson Borba data: 07/07/2020 version 1.0
# Article submitted
# Fusion of Evidences in Intensities Channels for Edge Detection in PolSAR Images
# GRSL - IEEE Geoscience and Remote Sensing Letters
# Anderson A. de Borba, Maurı́cio Marengoni, and Alejandro C Frery
# Despriction
# Finds evince edge in each channel
# Input: Radial information to region
#
# Output: Edges evidences estimated
# obs: 1) Change the channels in the input and output files.
# 2) Disable the print in file after running the tests of interest in order not to modify files unduly.
#
library(ggplot2)
library(latex2exp)
library(GenSA)
library(maxLik)
#
source("func_obj_l_L_mu.r")
source("loglike.r")
source("loglikd.r")
#
setwd("..")
setwd("Data")
args <- commandArgs(trailingOnly = TRUE)
# channels hh(1), hv(2), and vv(3)
mat <- scan(args[1])
setwd("..")
setwd("Code_r")
########## setup to Flevoland
r <- 120
nr <- 25
mat <- matrix(mat, ncol = r, byrow = TRUE)
evidencias <- rep(0, nr)
evidencias_valores <- rep(0, nr)
xev <- seq(1, nr, 1 )
for (k in 1 : nr){
print(k)
N <- r
z <- rep(0, N)
z <- mat[k, 1: N]
zaux1 <- rep(0, N)
conta = 0
for (i in 1 : N){
if (z[i] > 0){
conta <- conta + 1
zaux1[conta] = z[i]
}
}
indx <- which(zaux1 != 0)
N <- floor(max(indx))
z <- zaux1[1:N]
matdf1 <- matrix(0, nrow = N, ncol = 2)
matdf2 <- matrix(0, nrow = N, ncol = 2)
for (j in 1 : (N - 1) ){
r1 <- 1
r2 <- sum(z[1: j]) / j
res1 <- maxBFGS(loglike, start=c(r1, r2))
matdf1[j, 1] <- res1$estimate[1]
matdf1[j, 2] <- res1$estimate[2]
r1 <- 1
r2 <- sum(z[(j + 1): N]) / (N - j)
res2 <- maxBFGS(loglikd, start=c(r1, r2))
matdf2[j, 1] <- res2$estimate[1]
matdf2[j, 2] <- res2$estimate[2]
}
lim <- as.numeric(14)
lower <- lim
upper <- N - lim
out <- GenSA(lower = lower, upper = upper, fn = func_obj_l_L_mu, control=list(maxit =100))
evidencias[k] <- out$par
evidencias_valores[k] <- out$value
}
x <- seq(N - 1)
lobj <- rep(0, (N - 1))
for (j in 1 : (N - 1) ){
lobj[j] <- func_obj_l_L_mu(j)
}
df <- data.frame(x, lobj)
p <- ggplot(df, aes(x = x, y = lobj, color = 'darkred')) + geom_line() + xlab(TeX('Pixel $j$')) + ylab(TeX('$l(j)$')) + guides(color=guide_legend(title=NULL)) + scale_color_discrete(labels= lapply(sprintf('$\\sigma_{hh} = %2.0f$', NULL), TeX))
print(p)
## Print *.txt, change hh, hv or vv
dfev <- data.frame(xev, evidencias)
names(dfev) <- NULL
setwd("..")
setwd("Data")
sink(args[2])
print(dfev)
sink()
setwd("..")
setwd("Code_r")
| /Code_r/evidencias_im_real_sa_param_mu_L.R | permissive | lcbjrrr/Code_GRSL_2020_1_copy | R | false | false | 2,572 | r | # Coded by Anderson Borba data: 07/07/2020 version 1.0
# Article submitted
# Fusion of Evidences in Intensities Channels for Edge Detection in PolSAR Images
# GRSL - IEEE Geoscience and Remote Sensing Letters
# Anderson A. de Borba, Maurı́cio Marengoni, and Alejandro C Frery
# Despriction
# Finds evince edge in each channel
# Input: Radial information to region
#
# Output: Edges evidences estimated
# obs: 1) Change the channels in the input and output files.
# 2) Disable the print in file after running the tests of interest in order not to modify files unduly.
#
library(ggplot2)
library(latex2exp)
library(GenSA)
library(maxLik)
#
source("func_obj_l_L_mu.r")
source("loglike.r")
source("loglikd.r")
#
setwd("..")
setwd("Data")
args <- commandArgs(trailingOnly = TRUE)
# channels hh(1), hv(2), and vv(3)
mat <- scan(args[1])
setwd("..")
setwd("Code_r")
########## setup to Flevoland
r <- 120
nr <- 25
mat <- matrix(mat, ncol = r, byrow = TRUE)
evidencias <- rep(0, nr)
evidencias_valores <- rep(0, nr)
xev <- seq(1, nr, 1 )
for (k in 1 : nr){
print(k)
N <- r
z <- rep(0, N)
z <- mat[k, 1: N]
zaux1 <- rep(0, N)
conta = 0
for (i in 1 : N){
if (z[i] > 0){
conta <- conta + 1
zaux1[conta] = z[i]
}
}
indx <- which(zaux1 != 0)
N <- floor(max(indx))
z <- zaux1[1:N]
matdf1 <- matrix(0, nrow = N, ncol = 2)
matdf2 <- matrix(0, nrow = N, ncol = 2)
for (j in 1 : (N - 1) ){
r1 <- 1
r2 <- sum(z[1: j]) / j
res1 <- maxBFGS(loglike, start=c(r1, r2))
matdf1[j, 1] <- res1$estimate[1]
matdf1[j, 2] <- res1$estimate[2]
r1 <- 1
r2 <- sum(z[(j + 1): N]) / (N - j)
res2 <- maxBFGS(loglikd, start=c(r1, r2))
matdf2[j, 1] <- res2$estimate[1]
matdf2[j, 2] <- res2$estimate[2]
}
lim <- as.numeric(14)
lower <- lim
upper <- N - lim
out <- GenSA(lower = lower, upper = upper, fn = func_obj_l_L_mu, control=list(maxit =100))
evidencias[k] <- out$par
evidencias_valores[k] <- out$value
}
x <- seq(N - 1)
lobj <- rep(0, (N - 1))
for (j in 1 : (N - 1) ){
lobj[j] <- func_obj_l_L_mu(j)
}
df <- data.frame(x, lobj)
p <- ggplot(df, aes(x = x, y = lobj, color = 'darkred')) + geom_line() + xlab(TeX('Pixel $j$')) + ylab(TeX('$l(j)$')) + guides(color=guide_legend(title=NULL)) + scale_color_discrete(labels= lapply(sprintf('$\\sigma_{hh} = %2.0f$', NULL), TeX))
print(p)
## Print *.txt, change hh, hv or vv
dfev <- data.frame(xev, evidencias)
names(dfev) <- NULL
setwd("..")
setwd("Data")
sink(args[2])
print(dfev)
sink()
setwd("..")
setwd("Code_r")
|
testlist <- list(id = NULL, id = NULL, booklet_id = c(8168473L, 2127314835L, 171177770L, -1942759639L, -1815221204L, 601253144L, -804651186L, 2094281728L, 860713787L, -971707632L, -1475044502L, 870040598L, -1182814578L, -1415711445L, 1901326755L, -1882837573L, 1340545259L, 1156039895L, 823641812L, -1106109928L, -1048157941L), person_id = c(0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(dexterMST:::is_person_booklet_sorted,testlist)
str(result) | /dexterMST/inst/testfiles/is_person_booklet_sorted/AFL_is_person_booklet_sorted/is_person_booklet_sorted_valgrind_files/1615938066-test.R | no_license | akhikolla/updatedatatype-list1 | R | false | false | 826 | r | testlist <- list(id = NULL, id = NULL, booklet_id = c(8168473L, 2127314835L, 171177770L, -1942759639L, -1815221204L, 601253144L, -804651186L, 2094281728L, 860713787L, -971707632L, -1475044502L, 870040598L, -1182814578L, -1415711445L, 1901326755L, -1882837573L, 1340545259L, 1156039895L, 823641812L, -1106109928L, -1048157941L), person_id = c(0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(dexterMST:::is_person_booklet_sorted,testlist)
str(result) |
############################################
## Randy Oswald #
## R Programming - Coursera #
## 7-22-2014 #
############################################
# Create a matrix 'object' that is capable of caching the result.
makeCacheMatrix <- function(x = matrix()) {
# Inverse member variable - start NULL until we cache an inverse.
inverse <- NULL
# Set a new matrix member function.
set <- function(y) {
x <<- y
# Clear cached inverse if we set cached matrix to something new.
inverse <<- NULL
}
# Get the matrix member function.
get <- function() {
x
}
# set the inverse of the matrix (after solving).
setInverse <- function(inv) {
inverse <<- inv
}
# Get the matrix member function.
getInverse <- function() {
inverse
}
# Assign and return a list of the functions.
list(set = set
, get = get
, setInverse = setInverse
, getInverse = getInverse
)
}
## Solving routine that takes in a "matrix object" created by makeCacheMatrix.
cacheSolve <- function(x, mat) {
# Check if we have cached an inverse (it would be non-NULL)
inverse <- x$getInverse()
# If we have a cached inverse, print message and return the inverse
if(!is.null(inverse)) {
message("getting cached data")
return(inverse)
}
# If no cached inverse is available, we have to solve.
data <- x$get() # Get original matrix from object
inverse <- solve(data) # Solve
x$setInverse(inverse) # Don't forget to cache result for next time!
inverse # Return inverse.
}
| /cachematrix.R | no_license | randyoswald/ProgrammingAssignment2 | R | false | false | 1,763 | r | ############################################
## Randy Oswald #
## R Programming - Coursera #
## 7-22-2014 #
############################################
# Create a matrix 'object' that is capable of caching the result.
makeCacheMatrix <- function(x = matrix()) {
# Inverse member variable - start NULL until we cache an inverse.
inverse <- NULL
# Set a new matrix member function.
set <- function(y) {
x <<- y
# Clear cached inverse if we set cached matrix to something new.
inverse <<- NULL
}
# Get the matrix member function.
get <- function() {
x
}
# set the inverse of the matrix (after solving).
setInverse <- function(inv) {
inverse <<- inv
}
# Get the matrix member function.
getInverse <- function() {
inverse
}
# Assign and return a list of the functions.
list(set = set
, get = get
, setInverse = setInverse
, getInverse = getInverse
)
}
## Solving routine that takes in a "matrix object" created by makeCacheMatrix.
cacheSolve <- function(x, mat) {
# Check if we have cached an inverse (it would be non-NULL)
inverse <- x$getInverse()
# If we have a cached inverse, print message and return the inverse
if(!is.null(inverse)) {
message("getting cached data")
return(inverse)
}
# If no cached inverse is available, we have to solve.
data <- x$get() # Get original matrix from object
inverse <- solve(data) # Solve
x$setInverse(inverse) # Don't forget to cache result for next time!
inverse # Return inverse.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/linear_regression-fit.R
\name{linear_regression}
\alias{linear_regression}
\alias{linear_regression.default}
\alias{linear_regression.data.frame}
\alias{linear_regression.matrix}
\alias{linear_regression.formula}
\alias{linear_regression.recipe}
\title{Fit a \code{linear_regression}}
\usage{
linear_regression(x, ...)
\method{linear_regression}{default}(x, ...)
\method{linear_regression}{data.frame}(x, y, ...)
\method{linear_regression}{matrix}(x, y, ...)
\method{linear_regression}{formula}(formula, data, ...)
\method{linear_regression}{recipe}(x, data, ...)
}
\arguments{
\item{x}{Depending on the context:
\itemize{
\item A \strong{data frame} of predictors.
\item A \strong{matrix} of predictors.
\item A \strong{recipe} specifying a set of preprocessing steps
created from \code{\link[recipes:recipe]{recipes::recipe()}}.
}}
\item{...}{Not currently used, but required for extensibility.}
\item{y}{When \code{x} is a \strong{data frame} or \strong{matrix}, \code{y} is the outcome
specified as:
\itemize{
\item A \strong{data frame} with 1 numeric column.
\item A \strong{matrix} with 1 numeric column.
\item A numeric \strong{vector}.
}}
\item{formula}{A formula specifying the outcome terms on the left-hand side,
and the predictor terms on the right-hand side.}
\item{data}{When a \strong{recipe} or \strong{formula} is used, \code{data} is specified as:
\itemize{
\item A \strong{data frame} containing both the predictors and the outcome.
}}
}
\value{
A \code{linear_regression} object.
}
\description{
\code{linear_regression()} fits a model.
}
\examples{
predictors <- mtcars[, -1]
outcome <- mtcars[, 1]
# XY interface
mod <- linear_regression(predictors, outcome)
# Formula interface
mod2 <- linear_regression(mpg ~ ., mtcars)
# Recipes interface
library(recipes)
rec <- recipe(mpg ~ ., mtcars)
rec <- step_log(rec, disp)
mod3 <- linear_regression(rec, mtcars)
}
| /man/linear_regression.Rd | no_license | Spain-AI/glasp | R | false | true | 1,972 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/linear_regression-fit.R
\name{linear_regression}
\alias{linear_regression}
\alias{linear_regression.default}
\alias{linear_regression.data.frame}
\alias{linear_regression.matrix}
\alias{linear_regression.formula}
\alias{linear_regression.recipe}
\title{Fit a \code{linear_regression}}
\usage{
linear_regression(x, ...)
\method{linear_regression}{default}(x, ...)
\method{linear_regression}{data.frame}(x, y, ...)
\method{linear_regression}{matrix}(x, y, ...)
\method{linear_regression}{formula}(formula, data, ...)
\method{linear_regression}{recipe}(x, data, ...)
}
\arguments{
\item{x}{Depending on the context:
\itemize{
\item A \strong{data frame} of predictors.
\item A \strong{matrix} of predictors.
\item A \strong{recipe} specifying a set of preprocessing steps
created from \code{\link[recipes:recipe]{recipes::recipe()}}.
}}
\item{...}{Not currently used, but required for extensibility.}
\item{y}{When \code{x} is a \strong{data frame} or \strong{matrix}, \code{y} is the outcome
specified as:
\itemize{
\item A \strong{data frame} with 1 numeric column.
\item A \strong{matrix} with 1 numeric column.
\item A numeric \strong{vector}.
}}
\item{formula}{A formula specifying the outcome terms on the left-hand side,
and the predictor terms on the right-hand side.}
\item{data}{When a \strong{recipe} or \strong{formula} is used, \code{data} is specified as:
\itemize{
\item A \strong{data frame} containing both the predictors and the outcome.
}}
}
\value{
A \code{linear_regression} object.
}
\description{
\code{linear_regression()} fits a model.
}
\examples{
predictors <- mtcars[, -1]
outcome <- mtcars[, 1]
# XY interface
mod <- linear_regression(predictors, outcome)
# Formula interface
mod2 <- linear_regression(mpg ~ ., mtcars)
# Recipes interface
library(recipes)
rec <- recipe(mpg ~ ., mtcars)
rec <- step_log(rec, disp)
mod3 <- linear_regression(rec, mtcars)
}
|
# Load db data ------------------------------------------------------------
edc_rates_raw <-
read_csv(
"data/db_tables/eye_bank_edc_rates-2019-06-04.csv",
na = "NULL"
) %>%
filter(!is.na(.data$measure))
# Calculate EDC rate ------------------------------------------------------
edc_prod <- edc_rates_raw %>%
select(-metricname) %>%
tidyr::spread(metricid, measure) %>%
mutate(`42` = .data$`28` / .data$`34`) %>%
select(startdate, enddate, eyebankid, .data$`42`) %>%
tidyr::gather(metric_id, measure, -startdate, -enddate, -eyebankid) %>%
filter(
!is.na(measure),
is.finite(measure)
)
write_csv(edc_prod, "data/output/2018_india_edc_prod_data.csv")
| /compute_edc_rates.R | no_license | sightlife-capstone/data-extraction | R | false | false | 692 | r |
# Load db data ------------------------------------------------------------
edc_rates_raw <-
read_csv(
"data/db_tables/eye_bank_edc_rates-2019-06-04.csv",
na = "NULL"
) %>%
filter(!is.na(.data$measure))
# Calculate EDC rate ------------------------------------------------------
edc_prod <- edc_rates_raw %>%
select(-metricname) %>%
tidyr::spread(metricid, measure) %>%
mutate(`42` = .data$`28` / .data$`34`) %>%
select(startdate, enddate, eyebankid, .data$`42`) %>%
tidyr::gather(metric_id, measure, -startdate, -enddate, -eyebankid) %>%
filter(
!is.na(measure),
is.finite(measure)
)
write_csv(edc_prod, "data/output/2018_india_edc_prod_data.csv")
|
# ______________________________________________________
# Exact activity type identification inside Sedentary
# class.
#
# Classifiers used in this script:
# 1. SVM
# 2. Decision Tree
# 3. Random Forest
#
# ______________
# Matin Kheirkhahan (matinkheirkhahan@ufl.edu)
# ______________________________________________________
setwd("~/Workspaces/R workspace/Comparative Activity Recognition/BoW/Matin Scripts/")
training.df <- readRDS(file.choose())
test.df <- readRDS(file.choose())
# Class labels ----------------------
source("../../Utilities/FUN_PA_Labels_and_METs.R")
training.df$Sedentary <- sapply(as.character(training.df$Task), FUN = giveClassLabel_sedentary)
test.df$Sedentary <- sapply(as.character(test.df$Task), FUN = giveClassLabel_sedentary)
rm(giveClassLabel_sedentary, giveClassLabel_locomotion, giveMETIntensity, giveMETValue)
training.df <- training.df[training.df$Sedentary, -ncol(training.df)]
training.df$Task <- gsub(x = training.df$Task, pattern = " ", replacement = ".")
training.df$Task <- factor(training.df$Task, levels = levels(as.factor(as.character(training.df$Task))))
test.df <- test.df[test.df$Sedentary, -ncol(test.df)]
test.df$Task <- gsub(test.df$Task, pattern = " ", replacement = ".")
test.df$Task <- factor(test.df$Task, levels = levels(as.factor(as.character(test.df$Task))))
# Training phase --------------------
# SVM ========
library(e1071)
library(kernlab)
x <- training.df[, -c(1:2)]
y <- training.df$Task
# Finding the best fit ###############
set.seed(5855)
ctrl <- trainControl(method = "LOOCV", classProbs = T) # Leave-one-out-cross-validation is used for training.
svm.sedType.tuned <- train(x = x,
y = y,
trControl = ctrl,
method = "svmLinear",
metric = "Accuracy")
# Saving the trained classifier for future use
save(svm.sedType.tuned, file = "Trained models/sedentary_type_SVM.Rdata")
# Evaluation on test set #############
svm.out <- data.frame(PID = test.df$PID, Actual = test.df$Task, Predicted = NA)
svm.out$Predicted <- predict(svm.sedType.tuned, test.df[, -c(1:2)])
# saving the output on test set for future analysis.
write.csv(svm.out, "~/Dropbox/Work-Research/Current Directory/Activity Recognition - Comparative Study/Data/Outputs/Bag of Words/Matin/Sedentary_Type_SVM.csv", row.names = F)
table(svm.out[, -1])
# Decision Tree ========
library(rpart)
x <- training.df[, -c(1:2)]
y <- training.df$Task
# Finding the best fit ###############
set.seed(5855)
ctrl <- trainControl(method = "LOOCV", classProbs = T) # Leave-one-out-cross-validation is used for training.
decisionTree.sedType.tuned <- train(x = x,
y = y,
trControl = ctrl,
method = "rpart",
metric = "Accuracy")
# Saving the trained classifier for future use
save(decisionTree.sedType.tuned, file = "Trained models/sedentary_type_decisionTree.Rdata")
# Evaluation on test set #############
decisionTree.out <- data.frame(PID = test.df$PID, Actual = test.df$Task, Predicted = NA)
decisionTree.out$Predicted <- predict(decisionTree.sedType.tuned, test.df[, -c(1:2)])
# saving the output on test set for future analysis.
write.csv(decisionTree.out, "~/Dropbox/Work-Research/Current Directory/Activity Recognition - Comparative Study/Data/Outputs/Bag of Words/Matin/Sedentary_Type_decisionTree.csv", row.names = F)
table(decisionTree.out[, -1])
# Random Forest ========
library(randomForest)
x <- training.df[, -c(1:2)]
y <- training.df$Task
# Finding the best fit ###############
ctrl <- trainControl(method = "oob", classProbs = T)
nTree <- 5000 # This is altered to find the best combination (ntree, mtry)
mtry <- 2:4
tunegrid <- expand.grid(.mtry=mtry)
set.seed(5855)
randomForest.sedType.tuned <- train(x = x, y = y, method = "rf", metric = "Accuracy", tuneGrid = tunegrid, trControl = ctrl, ntree = nTree)
randomForest.sedType.tuned$results
# Saving the trained classifier for future use (Best: ntree = 1500, mtry = 2)
save(randomForest.sedType.tuned, file = "Trained models/sedentary_type_randomForest.Rdata")
# Evaluation on test set #############
randomForest.out <- data.frame(PID = test.df$PID, Actual = test.df$Task, Predicted = NA)
randomForest.out$Predicted <- predict(randomForest.sedType.tuned, test.df[, -c(1:2)])
# saving the output on test set for future analysis.
write.csv(randomForest.out, "~/Dropbox/Work-Research/Current Directory/Activity Recognition - Comparative Study/Data/Outputs/Bag of Words/Matin/Sedentary_Type_randomForest.csv", row.names = F)
table(randomForest.out[, -1])
| /BoW/Matin Scripts/SCP_Sedentary_withinClass_Identification.R | no_license | matin-ufl/comparative_activity_recognition | R | false | false | 4,715 | r | # ______________________________________________________
# Exact activity type identification inside Sedentary
# class.
#
# Classifiers used in this script:
# 1. SVM
# 2. Decision Tree
# 3. Random Forest
#
# ______________
# Matin Kheirkhahan (matinkheirkhahan@ufl.edu)
# ______________________________________________________
setwd("~/Workspaces/R workspace/Comparative Activity Recognition/BoW/Matin Scripts/")
training.df <- readRDS(file.choose())
test.df <- readRDS(file.choose())
# Class labels ----------------------
source("../../Utilities/FUN_PA_Labels_and_METs.R")
training.df$Sedentary <- sapply(as.character(training.df$Task), FUN = giveClassLabel_sedentary)
test.df$Sedentary <- sapply(as.character(test.df$Task), FUN = giveClassLabel_sedentary)
rm(giveClassLabel_sedentary, giveClassLabel_locomotion, giveMETIntensity, giveMETValue)
training.df <- training.df[training.df$Sedentary, -ncol(training.df)]
training.df$Task <- gsub(x = training.df$Task, pattern = " ", replacement = ".")
training.df$Task <- factor(training.df$Task, levels = levels(as.factor(as.character(training.df$Task))))
test.df <- test.df[test.df$Sedentary, -ncol(test.df)]
test.df$Task <- gsub(test.df$Task, pattern = " ", replacement = ".")
test.df$Task <- factor(test.df$Task, levels = levels(as.factor(as.character(test.df$Task))))
# Training phase --------------------
# SVM ========
library(e1071)
library(kernlab)
x <- training.df[, -c(1:2)]
y <- training.df$Task
# Finding the best fit ###############
set.seed(5855)
ctrl <- trainControl(method = "LOOCV", classProbs = T) # Leave-one-out-cross-validation is used for training.
svm.sedType.tuned <- train(x = x,
y = y,
trControl = ctrl,
method = "svmLinear",
metric = "Accuracy")
# Saving the trained classifier for future use
save(svm.sedType.tuned, file = "Trained models/sedentary_type_SVM.Rdata")
# Evaluation on test set #############
svm.out <- data.frame(PID = test.df$PID, Actual = test.df$Task, Predicted = NA)
svm.out$Predicted <- predict(svm.sedType.tuned, test.df[, -c(1:2)])
# saving the output on test set for future analysis.
write.csv(svm.out, "~/Dropbox/Work-Research/Current Directory/Activity Recognition - Comparative Study/Data/Outputs/Bag of Words/Matin/Sedentary_Type_SVM.csv", row.names = F)
table(svm.out[, -1])
# Decision Tree ========
library(rpart)
x <- training.df[, -c(1:2)]
y <- training.df$Task
# Finding the best fit ###############
set.seed(5855)
ctrl <- trainControl(method = "LOOCV", classProbs = T) # Leave-one-out-cross-validation is used for training.
decisionTree.sedType.tuned <- train(x = x,
y = y,
trControl = ctrl,
method = "rpart",
metric = "Accuracy")
# Saving the trained classifier for future use
save(decisionTree.sedType.tuned, file = "Trained models/sedentary_type_decisionTree.Rdata")
# Evaluation on test set #############
decisionTree.out <- data.frame(PID = test.df$PID, Actual = test.df$Task, Predicted = NA)
decisionTree.out$Predicted <- predict(decisionTree.sedType.tuned, test.df[, -c(1:2)])
# saving the output on test set for future analysis.
write.csv(decisionTree.out, "~/Dropbox/Work-Research/Current Directory/Activity Recognition - Comparative Study/Data/Outputs/Bag of Words/Matin/Sedentary_Type_decisionTree.csv", row.names = F)
table(decisionTree.out[, -1])
# Random Forest ========
library(randomForest)
x <- training.df[, -c(1:2)]
y <- training.df$Task
# Finding the best fit ###############
ctrl <- trainControl(method = "oob", classProbs = T)
nTree <- 5000 # This is altered to find the best combination (ntree, mtry)
mtry <- 2:4
tunegrid <- expand.grid(.mtry=mtry)
set.seed(5855)
randomForest.sedType.tuned <- train(x = x, y = y, method = "rf", metric = "Accuracy", tuneGrid = tunegrid, trControl = ctrl, ntree = nTree)
randomForest.sedType.tuned$results
# Saving the trained classifier for future use (Best: ntree = 1500, mtry = 2)
save(randomForest.sedType.tuned, file = "Trained models/sedentary_type_randomForest.Rdata")
# Evaluation on test set #############
randomForest.out <- data.frame(PID = test.df$PID, Actual = test.df$Task, Predicted = NA)
randomForest.out$Predicted <- predict(randomForest.sedType.tuned, test.df[, -c(1:2)])
# saving the output on test set for future analysis.
write.csv(randomForest.out, "~/Dropbox/Work-Research/Current Directory/Activity Recognition - Comparative Study/Data/Outputs/Bag of Words/Matin/Sedentary_Type_randomForest.csv", row.names = F)
table(randomForest.out[, -1])
|
#' Simulation of a compartmental infectious disease transmission model with 3 types of hosts and intervention
#'
#' @description This model allows for the simulation of an ID with 3 types of hosts.
#' Groups are assumed to be children, adults and elderly.
#' Intervention can be applied to any of the groups for a certain duration.
#'
#'
#' @param Sc : initial number of susceptible children : numeric
#' @param Ic : initial number of infected children : numeric
#' @param Sa : initial number of susceptible adults : numeric
#' @param Ia : initial number of infected adults : numeric
#' @param Se : initial number of susceptible elderly : numeric
#' @param Ie : initial number of infected elderly : numeric
#' @param bcc : rate of transmission to susceptible child from infected child : numeric
#' @param bca : rate of transmission to susceptible child from infected adult : numeric
#' @param bce : rate of transmission to susceptible child from infected elderly : numeric
#' @param bac : rate of transmission to susceptible adult from infected child : numeric
#' @param baa : rate of transmission to susceptible adult from infected adult : numeric
#' @param bae : rate of transmission to susceptible adult from infected elderly : numeric
#' @param bec : rate of transmission to susceptible elderly from infected child : numeric
#' @param bea : rate of transmission to susceptible elderly from infected adult : numeric
#' @param bee : rate of transmission to susceptible elderly from infected elderly : numeric
#' @param gc : rate at which infected children recover or die : numeric
#' @param ga : rate at which infected adults recover or die : numeric
#' @param ge : rate at which infected elderly recover or die : numeric
#' @param wc : rate at which immunity in children wanes : numeric
#' @param wa : rate at which immunity in adults wanes : numeric
#' @param we : rate at which immunity in elderly wanes : numeric
#' @param mc : fraction of infected children who die : numeric
#' @param ma : fraction of infected adults who die : numeric
#' @param me : fraction of infected elderly who die : numeric
#' @param f1 : strength of intervention applied to children, between 0 and 1 : numeric
#' @param T1_start : start of intervention applied to children : numeric
#' @param T1_end : end of intervention applied to children : numeric
#' @param f2 : strength of intervention applied to adults, between 0 and 1 : numeric
#' @param T2_start : start of intervention applied to adults : numeric
#' @param T2_end : end of intervention applied to adults : numeric
#' @param f3 : strength of intervention applied to elderly, between 0 and 1 : numeric
#' @param T3_start : start of intervention applied to elderly : numeric
#' @param T3_end : end of intervention applied to elderly : numeric
#' @param tmax : maximum simulation time : numeric
#' @return This function returns the simulation result as obtained from a call
#' to the deSolve ode solver.
#' @details A compartmental ID model with several states/compartments
#' is simulated as a set of ordinary differential
#' equations. The function returns the output from the odesolver as a matrix,
#' with one column per compartment/variable. The first column is time.
#' The model implement basic processes of infection, recovery and death.
#' Waning immunity is also implemented.
#' Control is applied, which reduces transmission by the indicated proportion, during times tstart and tend.
#' Control can be applied at different levels to the different groups.
#' @section Warning:
#' This function does not perform any error checking. So if you try to do
#' something nonsensical (e.g. any negative values or fractions > 1),
#' the code will likely abort with an error message.
#' @examples
#' # To run the simulation with default parameters just call the function:
#' result <- simulate_idcontrolmultigroup_ode()
#' @author Andreas Handel
#' @export
simulate_idcontrolmultigroup_ode <- function(Sc = 1000, Ic = 0, Sa = 1000, Ia = 1, Se = 1000, Ie = 0,
bcc = 0.0003, bca = 0.0001, bce = 0.0001, bac = 0.0001, baa = 0.0003, bae = 0.0001, bec = 0.0001, bea = 0.0001, bee = 0.0003,
gc = 0.1, ga = 0.1, ge = 0.1, wc = 0, wa = 0, we = 0, mc = 0.001, ma = 0.01, me = 0.1,
f1 = 0, T1_start = 50, T1_end = 150, f2 = 0, T2_start = 50, T2_end = 150, f3 = 0, T3_start = 50, T3_end = 150, tmax = 600)
{
# This function is used in the solver function and has no independent usages
interventionmodel <- function(t, y, parms)
{
with(
as.list(c(y,parms)), #lets us access variables and parameters stored in y and pars by name
{
#apply intervention, which reduces rates at which a group gets infected
#since bac means transmission from adult to children, intervention for kids would reduce bac (but not bca)
if (t>=T1_start && t<=T1_end) {bcc = (1 - f1) * bcc; bca = (1 - f1) * bca; bce = (1 - f1) * bce; }
if (t>=T2_start && t<=T2_end) {bac = (1 - f2) * bac; baa = (1 - f2) * baa; bae = (1 - f2) * bae; }
if (t>=T3_start && t<=T3_end) {bec = (1 - f3) * bec; bea = (1 - f3) * bea; bee = (1 - f3) * bee; }
#the ordinary differential equations
dSc = - Sc * (bcc * Ic + bca * Ia + bce * Ie) + wc * Rc
dIc = Sc * (bcc * Ic + bca * Ia + bce * Ie) - gc * Ic
dRc = (1-mc)*gc * Ic - wc * Rc
dDc = mc*gc*Ic
dSa = - Sa * (bac * Ic + baa * Ia + bae * Ie) + wa * Ra
dIa = Sa * (bac * Ic + baa * Ia + bae * Ie) - ga * Ia
dRa = (1-ma)*ga * Ia - wa * Ra
dDa = ma*ga*Ia
dSe = - Se * (bec * Ic + bea * Ia + bee * Ie) + we *Re
dIe = Se * (bec * Ic + bea * Ia + bee * Ie) - ge * Ie
dRe = (1-me)*ge * Ie - we * Re
dDe = me*ge*Ie
list(c(dSc, dIc, dRc, dDc, dSa, dIa, dRa, dDa, dSe, dIe, dRe, dDe))
}
) #close with statement
} #end function specifying the ODEs
############################################################
Y0 = c(Sc = Sc, Ic = Ic, Rc = 0, Dc = 0, Sa = Sa, Ia = Ia, Ra = 0, Da = 0, Se = Se, Ie = Ie, Re = 0, De = 0); #combine initial conditions into a vector
dt = min(0.5, tmax / 100); #time step for which to get results back
timevec = seq(0, tmax, dt); #vector of times for which solution is returned (not that internal timestep of the integrator is different)
############################################################
#vector of parameters which is sent to the ODE function
pars=c(bcc = bcc, bca = bca, bce = bce, bac = bac, baa = baa, bae = bae , bec = bec, bea = bea, bee = bee, gc = gc, ga = ga, ge = ge, wc = wc, wa = wa, we = we, mc = mc, ma = ma, me = me,
f1 = f1, T1_start = T1_start, T1_end = T1_end, f2 = f2, T2_start = T2_start, T2_end = T2_end, f3 = f3, T3_start = T3_start, T3_end = T3_end)
odeoutput = deSolve::ode(y = Y0, times = timevec, func = interventionmodel, parms=pars, method = "lsoda", atol=1e-8, rtol=1e-8);
result <- list()
result$ts <- as.data.frame(odeoutput)
return(result)
}
| /inst/simulatorfunctions/simulate_idcontrolmultigroup_ode.R | no_license | cran/DSAIDE | R | false | false | 7,292 | r | #' Simulation of a compartmental infectious disease transmission model with 3 types of hosts and intervention
#'
#' @description This model allows for the simulation of an ID with 3 types of hosts.
#' Groups are assumed to be children, adults and elderly.
#' Intervention can be applied to any of the groups for a certain duration.
#'
#'
#' @param Sc : initial number of susceptible children : numeric
#' @param Ic : initial number of infected children : numeric
#' @param Sa : initial number of susceptible adults : numeric
#' @param Ia : initial number of infected adults : numeric
#' @param Se : initial number of susceptible elderly : numeric
#' @param Ie : initial number of infected elderly : numeric
#' @param bcc : rate of transmission to susceptible child from infected child : numeric
#' @param bca : rate of transmission to susceptible child from infected adult : numeric
#' @param bce : rate of transmission to susceptible child from infected elderly : numeric
#' @param bac : rate of transmission to susceptible adult from infected child : numeric
#' @param baa : rate of transmission to susceptible adult from infected adult : numeric
#' @param bae : rate of transmission to susceptible adult from infected elderly : numeric
#' @param bec : rate of transmission to susceptible elderly from infected child : numeric
#' @param bea : rate of transmission to susceptible elderly from infected adult : numeric
#' @param bee : rate of transmission to susceptible elderly from infected elderly : numeric
#' @param gc : rate at which infected children recover or die : numeric
#' @param ga : rate at which infected adults recover or die : numeric
#' @param ge : rate at which infected elderly recover or die : numeric
#' @param wc : rate at which immunity in children wanes : numeric
#' @param wa : rate at which immunity in adults wanes : numeric
#' @param we : rate at which immunity in elderly wanes : numeric
#' @param mc : fraction of infected children who die : numeric
#' @param ma : fraction of infected adults who die : numeric
#' @param me : fraction of infected elderly who die : numeric
#' @param f1 : strength of intervention applied to children, between 0 and 1 : numeric
#' @param T1_start : start of intervention applied to children : numeric
#' @param T1_end : end of intervention applied to children : numeric
#' @param f2 : strength of intervention applied to adults, between 0 and 1 : numeric
#' @param T2_start : start of intervention applied to adults : numeric
#' @param T2_end : end of intervention applied to adults : numeric
#' @param f3 : strength of intervention applied to elderly, between 0 and 1 : numeric
#' @param T3_start : start of intervention applied to elderly : numeric
#' @param T3_end : end of intervention applied to elderly : numeric
#' @param tmax : maximum simulation time : numeric
#' @return This function returns the simulation result as obtained from a call
#' to the deSolve ode solver.
#' @details A compartmental ID model with several states/compartments
#' is simulated as a set of ordinary differential
#' equations. The function returns the output from the odesolver as a matrix,
#' with one column per compartment/variable. The first column is time.
#' The model implement basic processes of infection, recovery and death.
#' Waning immunity is also implemented.
#' Control is applied, which reduces transmission by the indicated proportion, during times tstart and tend.
#' Control can be applied at different levels to the different groups.
#' @section Warning:
#' This function does not perform any error checking. So if you try to do
#' something nonsensical (e.g. any negative values or fractions > 1),
#' the code will likely abort with an error message.
#' @examples
#' # To run the simulation with default parameters just call the function:
#' result <- simulate_idcontrolmultigroup_ode()
#' @author Andreas Handel
#' @export
simulate_idcontrolmultigroup_ode <- function(Sc = 1000, Ic = 0, Sa = 1000, Ia = 1, Se = 1000, Ie = 0,
bcc = 0.0003, bca = 0.0001, bce = 0.0001, bac = 0.0001, baa = 0.0003, bae = 0.0001, bec = 0.0001, bea = 0.0001, bee = 0.0003,
gc = 0.1, ga = 0.1, ge = 0.1, wc = 0, wa = 0, we = 0, mc = 0.001, ma = 0.01, me = 0.1,
f1 = 0, T1_start = 50, T1_end = 150, f2 = 0, T2_start = 50, T2_end = 150, f3 = 0, T3_start = 50, T3_end = 150, tmax = 600)
{
# This function is used in the solver function and has no independent usages
interventionmodel <- function(t, y, parms)
{
with(
as.list(c(y,parms)), #lets us access variables and parameters stored in y and pars by name
{
#apply intervention, which reduces rates at which a group gets infected
#since bac means transmission from adult to children, intervention for kids would reduce bac (but not bca)
if (t>=T1_start && t<=T1_end) {bcc = (1 - f1) * bcc; bca = (1 - f1) * bca; bce = (1 - f1) * bce; }
if (t>=T2_start && t<=T2_end) {bac = (1 - f2) * bac; baa = (1 - f2) * baa; bae = (1 - f2) * bae; }
if (t>=T3_start && t<=T3_end) {bec = (1 - f3) * bec; bea = (1 - f3) * bea; bee = (1 - f3) * bee; }
#the ordinary differential equations
dSc = - Sc * (bcc * Ic + bca * Ia + bce * Ie) + wc * Rc
dIc = Sc * (bcc * Ic + bca * Ia + bce * Ie) - gc * Ic
dRc = (1-mc)*gc * Ic - wc * Rc
dDc = mc*gc*Ic
dSa = - Sa * (bac * Ic + baa * Ia + bae * Ie) + wa * Ra
dIa = Sa * (bac * Ic + baa * Ia + bae * Ie) - ga * Ia
dRa = (1-ma)*ga * Ia - wa * Ra
dDa = ma*ga*Ia
dSe = - Se * (bec * Ic + bea * Ia + bee * Ie) + we *Re
dIe = Se * (bec * Ic + bea * Ia + bee * Ie) - ge * Ie
dRe = (1-me)*ge * Ie - we * Re
dDe = me*ge*Ie
list(c(dSc, dIc, dRc, dDc, dSa, dIa, dRa, dDa, dSe, dIe, dRe, dDe))
}
) #close with statement
} #end function specifying the ODEs
############################################################
Y0 = c(Sc = Sc, Ic = Ic, Rc = 0, Dc = 0, Sa = Sa, Ia = Ia, Ra = 0, Da = 0, Se = Se, Ie = Ie, Re = 0, De = 0); #combine initial conditions into a vector
dt = min(0.5, tmax / 100); #time step for which to get results back
timevec = seq(0, tmax, dt); #vector of times for which solution is returned (not that internal timestep of the integrator is different)
############################################################
#vector of parameters which is sent to the ODE function
pars=c(bcc = bcc, bca = bca, bce = bce, bac = bac, baa = baa, bae = bae , bec = bec, bea = bea, bee = bee, gc = gc, ga = ga, ge = ge, wc = wc, wa = wa, we = we, mc = mc, ma = ma, me = me,
f1 = f1, T1_start = T1_start, T1_end = T1_end, f2 = f2, T2_start = T2_start, T2_end = T2_end, f3 = f3, T3_start = T3_start, T3_end = T3_end)
odeoutput = deSolve::ode(y = Y0, times = timevec, func = interventionmodel, parms=pars, method = "lsoda", atol=1e-8, rtol=1e-8);
result <- list()
result$ts <- as.data.frame(odeoutput)
return(result)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/crs-funs.R
\name{reproject}
\alias{reproject}
\title{Reproject lat/long spatial object so that they are in units of 1m}
\usage{
reproject(shp, crs = geo_select_aeq(shp))
}
\arguments{
\item{shp}{A spatial object with a geographic (WGS84) coordinate system}
\item{crs}{An optional coordinate reference system (if not provided it is set
automatically by \code{\link[=geo_select_aeq]{geo_select_aeq()}}).}
}
\description{
Many GIS functions (e.g. finding the area)
}
\examples{
data(routes_fast)
rf_aeq <- reproject(routes_fast[1:3, ])
rf_osgb <- reproject(routes_fast[1:3, ], 27700)
}
\seealso{
Other geo: \code{\link{bbox_scale}},
\code{\link{buff_geo}}, \code{\link{crs_select_aeq}},
\code{\link{gclip}}, \code{\link{geo_bb_matrix}},
\code{\link{geo_bb}}, \code{\link{mapshape_available}},
\code{\link{mapshape}}, \code{\link{quadrant}}
}
\concept{geo}
| /man/reproject.Rd | permissive | weijia2013/stplanr | R | false | true | 940 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/crs-funs.R
\name{reproject}
\alias{reproject}
\title{Reproject lat/long spatial object so that they are in units of 1m}
\usage{
reproject(shp, crs = geo_select_aeq(shp))
}
\arguments{
\item{shp}{A spatial object with a geographic (WGS84) coordinate system}
\item{crs}{An optional coordinate reference system (if not provided it is set
automatically by \code{\link[=geo_select_aeq]{geo_select_aeq()}}).}
}
\description{
Many GIS functions (e.g. finding the area)
}
\examples{
data(routes_fast)
rf_aeq <- reproject(routes_fast[1:3, ])
rf_osgb <- reproject(routes_fast[1:3, ], 27700)
}
\seealso{
Other geo: \code{\link{bbox_scale}},
\code{\link{buff_geo}}, \code{\link{crs_select_aeq}},
\code{\link{gclip}}, \code{\link{geo_bb_matrix}},
\code{\link{geo_bb}}, \code{\link{mapshape_available}},
\code{\link{mapshape}}, \code{\link{quadrant}}
}
\concept{geo}
|
r=359.92
https://sandbox.dams.library.ucdavis.edu/fcrepo/rest/collection/sherry-lehmann/catalogs/d7qc72/media/images/d7qc72-030/svc:tesseract/full/full/359.92/default.jpg Accept:application/hocr+xml
| /ark_87287/d7qc72/d7qc72-030/rotated.r | permissive | ucd-library/wine-price-extraction | R | false | false | 199 | r | r=359.92
https://sandbox.dams.library.ucdavis.edu/fcrepo/rest/collection/sherry-lehmann/catalogs/d7qc72/media/images/d7qc72-030/svc:tesseract/full/full/359.92/default.jpg Accept:application/hocr+xml
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/qad.R
\name{qad}
\alias{qad}
\alias{qad.data.frame}
\alias{qad.numeric}
\title{Measure of (asymmetric and directed) dependence}
\usage{
qad(x, ...)
\method{qad}{data.frame}(
x,
resolution = NULL,
p.value = TRUE,
nperm = 1000,
p.value_asymmetry = FALSE,
nboot = 1000,
print = TRUE,
remove.00 = FALSE,
...
)
\method{qad}{numeric}(
x,
y,
resolution = NULL,
p.value = TRUE,
nperm = 1000,
p.value_asymmetry = FALSE,
nboot = 1000,
print = TRUE,
remove.00 = FALSE,
...
)
}
\arguments{
\item{x}{a data.frame containing two columns with the observations of the bi-variate sample or a (non-empty) numeric vector of data values}
\item{...}{Further arguments passed to 'qad' will be ignored}
\item{resolution}{an integer indicating the number of strips for the checkerboard aggregation (see \link{ECBC}). We recommend to use the default value (resolution = NULL)}
\item{p.value}{a logical indicating whether to return a p-value of rejecting independence (based on permutation).}
\item{nperm}{an integer indicating the number of permutation runs (if p.value = TRUE)}
\item{p.value_asymmetry}{a logical indicating whether to return a (heuristic) p-value for the measure of asymmetry (based on bootstrap).}
\item{nboot}{an integer indicating the number of runs for the bootstrap.}
\item{print}{a logical indicating whether the result of qad is printed.}
\item{remove.00}{a logical indicating whether double 0 entries should be excluded (default = FALSE)}
\item{y}{a (non-empty) numeric vector of data values.}
}
\value{
qad returns an object of class qad containing the following components:
\item{data}{ a data.frame containing the input data.}
\item{q(X,Y)}{influence of X on Y}
\item{q(Y,X)}{influence of Y on X}
\item{max.dependence}{maximal dependence}
\item{results}{ a data.frame containing the results of the dependence measures.}
\item{mass_matrix}{ a matrix containing the mass distribution of the empirical checkerboard copula.}
\item{resolution}{an integer containing the used resolution of the checkerboard aggregation.}
\item{n}{Sample size.}
}
\description{
Quantification of (asymmetric and directed) dependence structures between two random variables X and Y.
}
\details{
qad is the implementation of a strongly consistent estimator of the copula based dependence measure zeta_1 introduced in Trutschnig 2011.
We first compute the empirical copula of a two-dimensional sample, aggregate it to the so called empirical checkerboard copula (ECBC), and
calculate zeta_1 of the ECBC and its transpose. In order to test for independence (in both directions), a built-in p-value
is implemented (a permutation test with nperm permutation runs to estimate the p-value).
Furthermore, a (heuristic) bootstrap test with nboot runs can be applied to estimate a p-value for the measure of asymmetry a.
}
\examples{
#Example 1 (independence)
n <- 100
x <- runif(n,0,1)
y <- runif(n,0,1)
sample <- data.frame(x,y)
qad(sample)
###
#Example 2 (mutual complete dependence)
n <- 500
x <- runif(n,0,1)
y <- x^2
sample <- data.frame(x,y)
qad(sample)
#Example 3 (complete dependence)
n <- 1000
x <- runif(n,-10,10)
y <- sin(x)
sample <- data.frame(x,y)
qad(sample)
#Example 4 (Asymmetry)
n <- 100
x <- runif(n,0,1)
y <- (2*x) \%\% 1
qad(x, y)
}
\references{
Trutschnig, W. (2011). On a strong metric on the space of copulas and its induced dependence measure, Journal of Mathematical Analysis and Applications 384, 690-705.
Junker, R., Griessenberger, F. and Trutschnig, W. (2021). Estimating scale-invariant directed dependence of bivariate distributions. Computational Statistics and Data Analysis, 153.
}
\seealso{
A tutorial can be found at \url{http://www.trutschnig.net/software.html}.
}
| /man/qad.Rd | no_license | cran/qad | R | false | true | 3,929 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/qad.R
\name{qad}
\alias{qad}
\alias{qad.data.frame}
\alias{qad.numeric}
\title{Measure of (asymmetric and directed) dependence}
\usage{
qad(x, ...)
\method{qad}{data.frame}(
x,
resolution = NULL,
p.value = TRUE,
nperm = 1000,
p.value_asymmetry = FALSE,
nboot = 1000,
print = TRUE,
remove.00 = FALSE,
...
)
\method{qad}{numeric}(
x,
y,
resolution = NULL,
p.value = TRUE,
nperm = 1000,
p.value_asymmetry = FALSE,
nboot = 1000,
print = TRUE,
remove.00 = FALSE,
...
)
}
\arguments{
\item{x}{a data.frame containing two columns with the observations of the bi-variate sample or a (non-empty) numeric vector of data values}
\item{...}{Further arguments passed to 'qad' will be ignored}
\item{resolution}{an integer indicating the number of strips for the checkerboard aggregation (see \link{ECBC}). We recommend to use the default value (resolution = NULL)}
\item{p.value}{a logical indicating whether to return a p-value of rejecting independence (based on permutation).}
\item{nperm}{an integer indicating the number of permutation runs (if p.value = TRUE)}
\item{p.value_asymmetry}{a logical indicating whether to return a (heuristic) p-value for the measure of asymmetry (based on bootstrap).}
\item{nboot}{an integer indicating the number of runs for the bootstrap.}
\item{print}{a logical indicating whether the result of qad is printed.}
\item{remove.00}{a logical indicating whether double 0 entries should be excluded (default = FALSE)}
\item{y}{a (non-empty) numeric vector of data values.}
}
\value{
qad returns an object of class qad containing the following components:
\item{data}{ a data.frame containing the input data.}
\item{q(X,Y)}{influence of X on Y}
\item{q(Y,X)}{influence of Y on X}
\item{max.dependence}{maximal dependence}
\item{results}{ a data.frame containing the results of the dependence measures.}
\item{mass_matrix}{ a matrix containing the mass distribution of the empirical checkerboard copula.}
\item{resolution}{an integer containing the used resolution of the checkerboard aggregation.}
\item{n}{Sample size.}
}
\description{
Quantification of (asymmetric and directed) dependence structures between two random variables X and Y.
}
\details{
qad is the implementation of a strongly consistent estimator of the copula based dependence measure zeta_1 introduced in Trutschnig 2011.
We first compute the empirical copula of a two-dimensional sample, aggregate it to the so called empirical checkerboard copula (ECBC), and
calculate zeta_1 of the ECBC and its transpose. In order to test for independence (in both directions), a built-in p-value
is implemented (a permutation test with nperm permutation runs to estimate the p-value).
Furthermore, a (heuristic) bootstrap test with nboot runs can be applied to estimate a p-value for the measure of asymmetry a.
}
\examples{
#Example 1 (independence)
n <- 100
x <- runif(n,0,1)
y <- runif(n,0,1)
sample <- data.frame(x,y)
qad(sample)
###
#Example 2 (mutual complete dependence)
n <- 500
x <- runif(n,0,1)
y <- x^2
sample <- data.frame(x,y)
qad(sample)
#Example 3 (complete dependence)
n <- 1000
x <- runif(n,-10,10)
y <- sin(x)
sample <- data.frame(x,y)
qad(sample)
#Example 4 (Asymmetry)
n <- 100
x <- runif(n,0,1)
y <- (2*x) \%\% 1
qad(x, y)
}
\references{
Trutschnig, W. (2011). On a strong metric on the space of copulas and its induced dependence measure, Journal of Mathematical Analysis and Applications 384, 690-705.
Junker, R., Griessenberger, F. and Trutschnig, W. (2021). Estimating scale-invariant directed dependence of bivariate distributions. Computational Statistics and Data Analysis, 153.
}
\seealso{
A tutorial can be found at \url{http://www.trutschnig.net/software.html}.
}
|
#Dear Student,
#
#Welcome to the dataset for the homework exercise.
#
#Instructions for this dataset:
# You have only been supplied vectors. You will need
# to create the matrices yourself.
# Matrices:
# - FreeThrows
# - FreeThrowAttempts
#
#Sincerely,
#Kirill Eremenko
#www.superdatascience.com
#Copyright: These datasets were prepared using publicly available data.
# However, theses scripts are subject to Copyright Laws.
# If you wish to use these R scripts outside of the R Programming Course
# by Kirill Eremenko, you may do so by referencing www.superdatascience.com in your work.
#Comments:
#Seasons are labeled based on the first year in the season
#E.g. the 2012-2013 season is preseneted as simply 2012
#Notes and Corrections to the data:
#Kevin Durant: 2006 - College Data Used
#Kevin Durant: 2005 - Proxied With 2006 Data
#Derrick Rose: 2012 - Did Not Play
#Derrick Rose: 2007 - College Data Used
#Derrick Rose: 2006 - Proxied With 2007 Data
#Derrick Rose: 2005 - Proxied With 2007 Data
#Seasons
Seasons <- c("2005","2006","2007","2008","2009","2010","2011","2012","2013","2014")
#Players
Players <- c("KobeBryant","JoeJohnson","LeBronJames","CarmeloAnthony","DwightHoward","ChrisBosh","ChrisPaul","KevinDurant","DerrickRose","DwayneWade")
#Games
KobeBryant_G <- c(80,77,82,82,73,82,58,78,6,35)
JoeJohnson_G <- c(82,57,82,79,76,72,60,72,79,80)
LeBronJames_G <- c(79,78,75,81,76,79,62,76,77,69)
CarmeloAnthony_G <- c(80,65,77,66,69,77,55,67,77,40)
DwightHoward_G <- c(82,82,82,79,82,78,54,76,71,41)
ChrisBosh_G <- c(70,69,67,77,70,77,57,74,79,44)
ChrisPaul_G <- c(78,64,80,78,45,80,60,70,62,82)
KevinDurant_G <- c(35,35,80,74,82,78,66,81,81,27)
DerrickRose_G <- c(40,40,40,81,78,81,39,0,10,51)
DwayneWade_G <- c(75,51,51,79,77,76,49,69,54,62)
#Matrix
Games <- rbind(KobeBryant_G, JoeJohnson_G, LeBronJames_G, CarmeloAnthony_G, DwightHoward_G, ChrisBosh_G, ChrisPaul_G, KevinDurant_G, DerrickRose_G, DwayneWade_G)
rm(KobeBryant_G, JoeJohnson_G, CarmeloAnthony_G, DwightHoward_G, ChrisBosh_G, LeBronJames_G, ChrisPaul_G, DerrickRose_G, DwayneWade_G, KevinDurant_G)
colnames(Games) <- Seasons
rownames(Games) <- Players
#Field Goals
KobeBryant_FG <- c(978,813,775,800,716,740,574,738,31,266)
JoeJohnson_FG <- c(632,536,647,620,635,514,423,445,462,446)
LeBronJames_FG <- c(875,772,794,789,768,758,621,765,767,624)
CarmeloAnthony_FG <- c(756,691,728,535,688,684,441,669,743,358)
DwightHoward_FG <- c(468,526,583,560,510,619,416,470,473,251)
ChrisBosh_FG <- c(549,543,507,615,600,524,393,485,492,343)
ChrisPaul_FG <- c(407,381,630,631,314,430,425,412,406,568)
KevinDurant_FG <- c(306,306,587,661,794,711,643,731,849,238)
DerrickRose_FG <- c(208,208,208,574,672,711,302,0,58,338)
DwayneWade_FG <- c(699,472,439,854,719,692,416,569,415,509)
#Matrix
FieldGoals <- rbind(KobeBryant_FG, JoeJohnson_FG, LeBronJames_FG, CarmeloAnthony_FG, DwightHoward_FG, ChrisBosh_FG, ChrisPaul_FG, KevinDurant_FG, DerrickRose_FG, DwayneWade_FG)
rm(KobeBryant_FG, JoeJohnson_FG, LeBronJames_FG, CarmeloAnthony_FG, DwightHoward_FG, ChrisBosh_FG, ChrisPaul_FG, KevinDurant_FG, DerrickRose_FG, DwayneWade_FG)
colnames(FieldGoals) <- Seasons
rownames(FieldGoals) <- Players
#Points
KobeBryant_PTS <- c(2832,2430,2323,2201,1970,2078,1616,2133,83,782)
JoeJohnson_PTS <- c(1653,1426,1779,1688,1619,1312,1129,1170,1245,1154)
LeBronJames_PTS <- c(2478,2132,2250,2304,2258,2111,1683,2036,2089,1743)
CarmeloAnthony_PTS <- c(2122,1881,1978,1504,1943,1970,1245,1920,2112,966)
DwightHoward_PTS <- c(1292,1443,1695,1624,1503,1784,1113,1296,1297,646)
ChrisBosh_PTS <- c(1572,1561,1496,1746,1678,1438,1025,1232,1281,928)
ChrisPaul_PTS <- c(1258,1104,1684,1781,841,1268,1189,1186,1185,1564)
KevinDurant_PTS <- c(903,903,1624,1871,2472,2161,1850,2280,2593,686)
DerrickRose_PTS <- c(597,597,597,1361,1619,2026,852,0,159,904)
DwayneWade_PTS <- c(2040,1397,1254,2386,2045,1941,1082,1463,1028,1331)
#Matrix
Points <- rbind(KobeBryant_PTS, JoeJohnson_PTS, LeBronJames_PTS, CarmeloAnthony_PTS, DwightHoward_PTS, ChrisBosh_PTS, ChrisPaul_PTS, KevinDurant_PTS, DerrickRose_PTS, DwayneWade_PTS)
rm(KobeBryant_PTS, JoeJohnson_PTS, LeBronJames_PTS, CarmeloAnthony_PTS, DwightHoward_PTS, ChrisBosh_PTS, ChrisPaul_PTS, KevinDurant_PTS, DerrickRose_PTS, DwayneWade_PTS)
colnames(Points) <- Seasons
rownames(Points) <- Players
#Free Throws
KobeBryant_FT <- c(696,667,623,483,439,483,381,525,18,196)
JoeJohnson_FT <- c(261,235,316,299,220,195,158,132,159,141)
LeBronJames_FT <- c(601,489,549,594,593,503,387,403,439,375)
CarmeloAnthony_FT <- c(573,459,464,371,508,507,295,425,459,189)
DwightHoward_FT <- c(356,390,529,504,483,546,281,355,349,143)
ChrisBosh_FT <- c(474,463,472,504,470,384,229,241,223,179)
ChrisPaul_FT <- c(394,292,332,455,161,337,260,286,295,289)
KevinDurant_FT <- c(209,209,391,452,756,594,431,679,703,146)
DerrickRose_FT <- c(146,146,146,197,259,476,194,0,27,152)
DwayneWade_FT <- c(629,432,354,590,534,494,235,308,189,284)
#Matrix
FreeThrows <- rbind(KobeBryant_FT, JoeJohnson_FT, LeBronJames_FT, CarmeloAnthony_FT, DwightHoward_FT, ChrisBosh_FT, ChrisPaul_FT, KevinDurant_FT, DerrickRose_FT, DwayneWade_FT)
rm(KobeBryant_FT, JoeJohnson_FT, LeBronJames_FT, CarmeloAnthony_FT, DwightHoward_FT, ChrisBosh_FT, ChrisPaul_FT, KevinDurant_FT, DerrickRose_FT, DwayneWade_FT)
colnames(FreeThrows) <- Seasons
rownames(FreeThrows) <- Players
#Free Throw Attempts
KobeBryant_FTA <- c(819,768,742,564,541,583,451,626,21,241)
JoeJohnson_FTA <- c(330,314,379,362,269,243,186,161,195,176)
LeBronJames_FTA <- c(814,701,771,762,773,663,502,535,585,528)
CarmeloAnthony_FTA <- c(709,568,590,468,612,605,367,512,541,237)
DwightHoward_FTA <- c(598,666,897,849,816,916,572,721,638,271)
ChrisBosh_FTA <- c(581,590,559,617,590,471,279,302,272,232)
ChrisPaul_FTA <- c(465,357,390,524,190,384,302,323,345,321)
KevinDurant_FTA <- c(256,256,448,524,840,675,501,750,805,171)
DerrickRose_FTA <- c(205,205,205,250,338,555,239,0,32,187)
DwayneWade_FTA <- c(803,535,467,771,702,652,297,425,258,370)
#Matrix
#
FreeThrowAttempts <- rbind(KobeBryant_FTA, JoeJohnson_FTA, LeBronJames_FTA, CarmeloAnthony_FTA, DwightHoward_FTA, ChrisBosh_FTA, ChrisPaul_FTA, KevinDurant_FTA, DerrickRose_FTA, DwayneWade_FTA)
rm(KobeBryant_FTA, JoeJohnson_FTA, LeBronJames_FTA, CarmeloAnthony_FTA, DwightHoward_FTA, ChrisBosh_FTA, ChrisPaul_FTA, KevinDurant_FTA, DerrickRose_FTA, DwayneWade_FTA)
colnames(FreeThrowAttempts) <- Seasons
rownames(FreeThrowAttempts) <- Players
FreeThrowAttempts
myplot <- function(dataset, players=Players, ylabel=deparse(substitute(dataset))){
Data <- dataset[players,, drop=F]
matplot(t(Data), type = "b", pch = 1:15, col=1:15, ylab = ylabel, xlab = 'Year')
legend("topright", inset=0.01, legend = players, pch = 1:15, col=1:15)
}
# Plot 1: FreeThrowAttempts (per Game)
myplot(FreeThrowAttempts/Games, ylabel='Attempted Free Throws per Game')
# Plot 2: Accuracy of FreeThrows
myplot(FreeThrows/FreeThrowAttempts, ylabel=' Free Throw Accuracy')
# Plot 3: Player Playing Style
#myplot(Points/FieldGoals, ylabel='2PT / 3PT Preference') #FreeThrow Points included
myplot((Points-FreeThrows)/FieldGoals, ylabel='2PT / 3PT Preference') # Excluded Free Throws
| /exercises/Basketball Freethrows Homework Exercise.R | no_license | KushalVenkatesh/R-Programming-A-Z | R | false | false | 7,151 | r | #Dear Student,
#
#Welcome to the dataset for the homework exercise.
#
#Instructions for this dataset:
# You have only been supplied vectors. You will need
# to create the matrices yourself.
# Matrices:
# - FreeThrows
# - FreeThrowAttempts
#
#Sincerely,
#Kirill Eremenko
#www.superdatascience.com
#Copyright: These datasets were prepared using publicly available data.
# However, theses scripts are subject to Copyright Laws.
# If you wish to use these R scripts outside of the R Programming Course
# by Kirill Eremenko, you may do so by referencing www.superdatascience.com in your work.
#Comments:
#Seasons are labeled based on the first year in the season
#E.g. the 2012-2013 season is preseneted as simply 2012
#Notes and Corrections to the data:
#Kevin Durant: 2006 - College Data Used
#Kevin Durant: 2005 - Proxied With 2006 Data
#Derrick Rose: 2012 - Did Not Play
#Derrick Rose: 2007 - College Data Used
#Derrick Rose: 2006 - Proxied With 2007 Data
#Derrick Rose: 2005 - Proxied With 2007 Data
#Seasons
Seasons <- c("2005","2006","2007","2008","2009","2010","2011","2012","2013","2014")
#Players
Players <- c("KobeBryant","JoeJohnson","LeBronJames","CarmeloAnthony","DwightHoward","ChrisBosh","ChrisPaul","KevinDurant","DerrickRose","DwayneWade")
#Games
KobeBryant_G <- c(80,77,82,82,73,82,58,78,6,35)
JoeJohnson_G <- c(82,57,82,79,76,72,60,72,79,80)
LeBronJames_G <- c(79,78,75,81,76,79,62,76,77,69)
CarmeloAnthony_G <- c(80,65,77,66,69,77,55,67,77,40)
DwightHoward_G <- c(82,82,82,79,82,78,54,76,71,41)
ChrisBosh_G <- c(70,69,67,77,70,77,57,74,79,44)
ChrisPaul_G <- c(78,64,80,78,45,80,60,70,62,82)
KevinDurant_G <- c(35,35,80,74,82,78,66,81,81,27)
DerrickRose_G <- c(40,40,40,81,78,81,39,0,10,51)
DwayneWade_G <- c(75,51,51,79,77,76,49,69,54,62)
#Matrix
Games <- rbind(KobeBryant_G, JoeJohnson_G, LeBronJames_G, CarmeloAnthony_G, DwightHoward_G, ChrisBosh_G, ChrisPaul_G, KevinDurant_G, DerrickRose_G, DwayneWade_G)
rm(KobeBryant_G, JoeJohnson_G, CarmeloAnthony_G, DwightHoward_G, ChrisBosh_G, LeBronJames_G, ChrisPaul_G, DerrickRose_G, DwayneWade_G, KevinDurant_G)
colnames(Games) <- Seasons
rownames(Games) <- Players
#Field Goals
KobeBryant_FG <- c(978,813,775,800,716,740,574,738,31,266)
JoeJohnson_FG <- c(632,536,647,620,635,514,423,445,462,446)
LeBronJames_FG <- c(875,772,794,789,768,758,621,765,767,624)
CarmeloAnthony_FG <- c(756,691,728,535,688,684,441,669,743,358)
DwightHoward_FG <- c(468,526,583,560,510,619,416,470,473,251)
ChrisBosh_FG <- c(549,543,507,615,600,524,393,485,492,343)
ChrisPaul_FG <- c(407,381,630,631,314,430,425,412,406,568)
KevinDurant_FG <- c(306,306,587,661,794,711,643,731,849,238)
DerrickRose_FG <- c(208,208,208,574,672,711,302,0,58,338)
DwayneWade_FG <- c(699,472,439,854,719,692,416,569,415,509)
#Matrix
FieldGoals <- rbind(KobeBryant_FG, JoeJohnson_FG, LeBronJames_FG, CarmeloAnthony_FG, DwightHoward_FG, ChrisBosh_FG, ChrisPaul_FG, KevinDurant_FG, DerrickRose_FG, DwayneWade_FG)
rm(KobeBryant_FG, JoeJohnson_FG, LeBronJames_FG, CarmeloAnthony_FG, DwightHoward_FG, ChrisBosh_FG, ChrisPaul_FG, KevinDurant_FG, DerrickRose_FG, DwayneWade_FG)
colnames(FieldGoals) <- Seasons
rownames(FieldGoals) <- Players
#Points
KobeBryant_PTS <- c(2832,2430,2323,2201,1970,2078,1616,2133,83,782)
JoeJohnson_PTS <- c(1653,1426,1779,1688,1619,1312,1129,1170,1245,1154)
LeBronJames_PTS <- c(2478,2132,2250,2304,2258,2111,1683,2036,2089,1743)
CarmeloAnthony_PTS <- c(2122,1881,1978,1504,1943,1970,1245,1920,2112,966)
DwightHoward_PTS <- c(1292,1443,1695,1624,1503,1784,1113,1296,1297,646)
ChrisBosh_PTS <- c(1572,1561,1496,1746,1678,1438,1025,1232,1281,928)
ChrisPaul_PTS <- c(1258,1104,1684,1781,841,1268,1189,1186,1185,1564)
KevinDurant_PTS <- c(903,903,1624,1871,2472,2161,1850,2280,2593,686)
DerrickRose_PTS <- c(597,597,597,1361,1619,2026,852,0,159,904)
DwayneWade_PTS <- c(2040,1397,1254,2386,2045,1941,1082,1463,1028,1331)
#Matrix
Points <- rbind(KobeBryant_PTS, JoeJohnson_PTS, LeBronJames_PTS, CarmeloAnthony_PTS, DwightHoward_PTS, ChrisBosh_PTS, ChrisPaul_PTS, KevinDurant_PTS, DerrickRose_PTS, DwayneWade_PTS)
rm(KobeBryant_PTS, JoeJohnson_PTS, LeBronJames_PTS, CarmeloAnthony_PTS, DwightHoward_PTS, ChrisBosh_PTS, ChrisPaul_PTS, KevinDurant_PTS, DerrickRose_PTS, DwayneWade_PTS)
colnames(Points) <- Seasons
rownames(Points) <- Players
#Free Throws
KobeBryant_FT <- c(696,667,623,483,439,483,381,525,18,196)
JoeJohnson_FT <- c(261,235,316,299,220,195,158,132,159,141)
LeBronJames_FT <- c(601,489,549,594,593,503,387,403,439,375)
CarmeloAnthony_FT <- c(573,459,464,371,508,507,295,425,459,189)
DwightHoward_FT <- c(356,390,529,504,483,546,281,355,349,143)
ChrisBosh_FT <- c(474,463,472,504,470,384,229,241,223,179)
ChrisPaul_FT <- c(394,292,332,455,161,337,260,286,295,289)
KevinDurant_FT <- c(209,209,391,452,756,594,431,679,703,146)
DerrickRose_FT <- c(146,146,146,197,259,476,194,0,27,152)
DwayneWade_FT <- c(629,432,354,590,534,494,235,308,189,284)
#Matrix
FreeThrows <- rbind(KobeBryant_FT, JoeJohnson_FT, LeBronJames_FT, CarmeloAnthony_FT, DwightHoward_FT, ChrisBosh_FT, ChrisPaul_FT, KevinDurant_FT, DerrickRose_FT, DwayneWade_FT)
rm(KobeBryant_FT, JoeJohnson_FT, LeBronJames_FT, CarmeloAnthony_FT, DwightHoward_FT, ChrisBosh_FT, ChrisPaul_FT, KevinDurant_FT, DerrickRose_FT, DwayneWade_FT)
colnames(FreeThrows) <- Seasons
rownames(FreeThrows) <- Players
#Free Throw Attempts
KobeBryant_FTA <- c(819,768,742,564,541,583,451,626,21,241)
JoeJohnson_FTA <- c(330,314,379,362,269,243,186,161,195,176)
LeBronJames_FTA <- c(814,701,771,762,773,663,502,535,585,528)
CarmeloAnthony_FTA <- c(709,568,590,468,612,605,367,512,541,237)
DwightHoward_FTA <- c(598,666,897,849,816,916,572,721,638,271)
ChrisBosh_FTA <- c(581,590,559,617,590,471,279,302,272,232)
ChrisPaul_FTA <- c(465,357,390,524,190,384,302,323,345,321)
KevinDurant_FTA <- c(256,256,448,524,840,675,501,750,805,171)
DerrickRose_FTA <- c(205,205,205,250,338,555,239,0,32,187)
DwayneWade_FTA <- c(803,535,467,771,702,652,297,425,258,370)
#Matrix
#
FreeThrowAttempts <- rbind(KobeBryant_FTA, JoeJohnson_FTA, LeBronJames_FTA, CarmeloAnthony_FTA, DwightHoward_FTA, ChrisBosh_FTA, ChrisPaul_FTA, KevinDurant_FTA, DerrickRose_FTA, DwayneWade_FTA)
rm(KobeBryant_FTA, JoeJohnson_FTA, LeBronJames_FTA, CarmeloAnthony_FTA, DwightHoward_FTA, ChrisBosh_FTA, ChrisPaul_FTA, KevinDurant_FTA, DerrickRose_FTA, DwayneWade_FTA)
colnames(FreeThrowAttempts) <- Seasons
rownames(FreeThrowAttempts) <- Players
FreeThrowAttempts
myplot <- function(dataset, players=Players, ylabel=deparse(substitute(dataset))){
Data <- dataset[players,, drop=F]
matplot(t(Data), type = "b", pch = 1:15, col=1:15, ylab = ylabel, xlab = 'Year')
legend("topright", inset=0.01, legend = players, pch = 1:15, col=1:15)
}
# Plot 1: FreeThrowAttempts (per Game)
myplot(FreeThrowAttempts/Games, ylabel='Attempted Free Throws per Game')
# Plot 2: Accuracy of FreeThrows
myplot(FreeThrows/FreeThrowAttempts, ylabel=' Free Throw Accuracy')
# Plot 3: Player Playing Style
#myplot(Points/FieldGoals, ylabel='2PT / 3PT Preference') #FreeThrow Points included
myplot((Points-FreeThrows)/FieldGoals, ylabel='2PT / 3PT Preference') # Excluded Free Throws
|
# Training of a random forest classifier to distinguish between sound clips containing
# nocturnal flight calls (NFCs) and sound clips not containing NFCs;
# Input: features extracted from sound clips;
# Training sets are artificially balanced to contain 50% sound clips with NFCs;
# Evaluation is based on a "leave-one-test group-out" cross-validation procedure
# PACKAGES: ==========================================================================
library(ranger)
# MAIN PART ==========================================================================
# load data:
# extracted features = input for the random forest classifier:
load(file = "~/Master/Masterarbeit/Code/Code_final/df_max_freq_spec_spectro_feat_final.RData")
# Random Forest:
# "leave-one-group-out" cross-validation procedure;
# each group = test group in one cross-validation run
groups <- sort(unique(df_max_freq_spec_spectro_feat_tot$group))
# store Random Forest predictions:
rfs_preds_final1 <- vector(mode = "list", length = length(groups))
# store importance of input variables:
importance_preds_final1 <- as.data.frame(matrix(nrow = length(groups), ncol = 281))
colnames(importance_preds_final1) <- c(paste0("bw1_f_", 1:256),
paste0(c("f_25_ampl_bw1", "f_50_ampl_bw1",
"f_75_ampl_bw1", "f_90_ampl_bw1")),
paste0(c("ampl_sum_0_1kHz", "ampl_sum_1_2kHz",
"ampl_sum_2_3kHz", "ampl_sum_3_6kHz",
"ampl_sum_6_8kHz", "ampl_sum_8_11kHz")),
paste0("pks_", 0:10, "_", 1:11, "kHz_bw1"),
paste0(c("event_duration", "event_freq_range",
"event_min_freq", "event_max_freq")))
# train Random Forest:
## formula:
fmla <- as.formula(paste("as.factor(bird_event) ~ ",
paste0("bw1_f_", 1:256, collapse= "+"), "+",
paste0(c("f_25_ampl_bw1", "f_50_ampl_bw1", "f_75_ampl_bw1",
"f_90_ampl_bw1"), collapse = "+"), "+",
paste0(c("ampl_sum_0_1kHz", "ampl_sum_1_2kHz", "ampl_sum_2_3kHz",
"ampl_sum_3_6kHz", "ampl_sum_6_8kHz", "ampl_sum_8_11kHz"),
collapse = "+"), "+",
paste0("pks_", 0:10, "_", 1:11, "kHz_bw1", collapse= "+"), "+",
paste0(c("event_duration", "event_freq_range", "event_min_freq",
"event_max_freq"), collapse= "+")))
set.seed(123)
for(n in c(1:length(groups))) {
print(n) # test group
# artificially balance training set:
## sound clips containing only background noise:
df_max_freq_spec_spectro_feat_tot_bal <- df_max_freq_spec_spectro_feat_tot[which(df_max_freq_spec_spectro_feat_tot$bird_event == 0 & df_max_freq_spec_spectro_feat_tot$group != groups[n]),]
## number of sound clips containing only background noise:
n_noise <- nrow(df_max_freq_spec_spectro_feat_tot_bal)
## sound clips containing NFCs:
row_be <- which(df_max_freq_spec_spectro_feat_tot$bird_event == 1 &
df_max_freq_spec_spectro_feat_tot$group != groups[n])
## upsampling of sound clips containing NFCs, until 50% of training set contains NFCs:
row_be_upsmpl <- sample(row_be, size = n_noise, replace = T)
df_max_freq_spec_spectro_feat_tot_bal <- rbind(df_max_freq_spec_spectro_feat_tot_bal,
df_max_freq_spec_spectro_feat_tot[row_be_upsmpl,])
# run Random Forest:
rf <- ranger(fmla ,
data = df_max_freq_spec_spectro_feat_tot_bal,
num.threads = 7,
importance="impurity",
probability = T,
num.trees = 500,
oob.error = F)
# predict to test group:
rfs_preds_final1[[n]] <- predict(rf,
data = df_max_freq_spec_spectro_feat_tot[which(df_max_freq_spec_spectro_feat_tot$group == groups[n]),])$predictions[,2]
# variable importance:
importance_preds_final1[n,] <- as.numeric(importance(rf))
# remove RF:
rm(rf)
# remove artificially balanced training set:
rm(df_max_freq_spec_spectro_feat_tot_bal)
}
# store Random Forest predictions and input variable importance:
save(rfs_preds_final1,
importance_preds_final1,
file = "~/Master/Masterarbeit/Code/Code_final/rfs_preds.RData") | /4_Random_Forest.R | no_license | KatrinSch/msc-thesis-call-detection | R | false | false | 4,746 | r | # Training of a random forest classifier to distinguish between sound clips containing
# nocturnal flight calls (NFCs) and sound clips not containing NFCs;
# Input: features extracted from sound clips;
# Training sets are artificially balanced to contain 50% sound clips with NFCs;
# Evaluation is based on a "leave-one-test group-out" cross-validation procedure
# PACKAGES: ==========================================================================
library(ranger)
# MAIN PART ==========================================================================
# load data:
# extracted features = input for the random forest classifier:
load(file = "~/Master/Masterarbeit/Code/Code_final/df_max_freq_spec_spectro_feat_final.RData")
# Random Forest:
# "leave-one-group-out" cross-validation procedure;
# each group = test group in one cross-validation run
groups <- sort(unique(df_max_freq_spec_spectro_feat_tot$group))
# store Random Forest predictions:
rfs_preds_final1 <- vector(mode = "list", length = length(groups))
# store importance of input variables:
importance_preds_final1 <- as.data.frame(matrix(nrow = length(groups), ncol = 281))
colnames(importance_preds_final1) <- c(paste0("bw1_f_", 1:256),
paste0(c("f_25_ampl_bw1", "f_50_ampl_bw1",
"f_75_ampl_bw1", "f_90_ampl_bw1")),
paste0(c("ampl_sum_0_1kHz", "ampl_sum_1_2kHz",
"ampl_sum_2_3kHz", "ampl_sum_3_6kHz",
"ampl_sum_6_8kHz", "ampl_sum_8_11kHz")),
paste0("pks_", 0:10, "_", 1:11, "kHz_bw1"),
paste0(c("event_duration", "event_freq_range",
"event_min_freq", "event_max_freq")))
# train Random Forest:
## formula:
fmla <- as.formula(paste("as.factor(bird_event) ~ ",
paste0("bw1_f_", 1:256, collapse= "+"), "+",
paste0(c("f_25_ampl_bw1", "f_50_ampl_bw1", "f_75_ampl_bw1",
"f_90_ampl_bw1"), collapse = "+"), "+",
paste0(c("ampl_sum_0_1kHz", "ampl_sum_1_2kHz", "ampl_sum_2_3kHz",
"ampl_sum_3_6kHz", "ampl_sum_6_8kHz", "ampl_sum_8_11kHz"),
collapse = "+"), "+",
paste0("pks_", 0:10, "_", 1:11, "kHz_bw1", collapse= "+"), "+",
paste0(c("event_duration", "event_freq_range", "event_min_freq",
"event_max_freq"), collapse= "+")))
set.seed(123)
for(n in c(1:length(groups))) {
print(n) # test group
# artificially balance training set:
## sound clips containing only background noise:
df_max_freq_spec_spectro_feat_tot_bal <- df_max_freq_spec_spectro_feat_tot[which(df_max_freq_spec_spectro_feat_tot$bird_event == 0 & df_max_freq_spec_spectro_feat_tot$group != groups[n]),]
## number of sound clips containing only background noise:
n_noise <- nrow(df_max_freq_spec_spectro_feat_tot_bal)
## sound clips containing NFCs:
row_be <- which(df_max_freq_spec_spectro_feat_tot$bird_event == 1 &
df_max_freq_spec_spectro_feat_tot$group != groups[n])
## upsampling of sound clips containing NFCs, until 50% of training set contains NFCs:
row_be_upsmpl <- sample(row_be, size = n_noise, replace = T)
df_max_freq_spec_spectro_feat_tot_bal <- rbind(df_max_freq_spec_spectro_feat_tot_bal,
df_max_freq_spec_spectro_feat_tot[row_be_upsmpl,])
# run Random Forest:
rf <- ranger(fmla ,
data = df_max_freq_spec_spectro_feat_tot_bal,
num.threads = 7,
importance="impurity",
probability = T,
num.trees = 500,
oob.error = F)
# predict to test group:
rfs_preds_final1[[n]] <- predict(rf,
data = df_max_freq_spec_spectro_feat_tot[which(df_max_freq_spec_spectro_feat_tot$group == groups[n]),])$predictions[,2]
# variable importance:
importance_preds_final1[n,] <- as.numeric(importance(rf))
# remove RF:
rm(rf)
# remove artificially balanced training set:
rm(df_max_freq_spec_spectro_feat_tot_bal)
}
# store Random Forest predictions and input variable importance:
save(rfs_preds_final1,
importance_preds_final1,
file = "~/Master/Masterarbeit/Code/Code_final/rfs_preds.RData") |
# modification on git from copied files
setMethod("c", "lociData", function(x, ..., recursive = FALSE) {
cdl <- list(...)
catLD <- callNextMethod()
if(all(sapply(cdl, function(z) all.equal(z@replicates, current = x@replicates)))) {
nLL <- do.call("rbind", (c(list(x@locLikelihoods), lapply(cdl, function(x) x@locLikelihoods))))
}
nCoord <- do.call("c", c(list(x@coordinates), lapply(cdl, function(x) x@coordinates)))
new("lociData", catLD, locLikelihoods = nLL, coordinates = nCoord)
})
setMethod("show", "lociData", function(object) {
show(object@coordinates)
callNextMethod()
.printLocLikes(object@locLikelihoods)
if(nrow(object@locLikelihoods) > 0) {
cat("\nExpected number of loci in each replicate group\n")
print(colSums(exp(object@locLikelihoods)))
}
})
setMethod("[", "lociData", function(x, i, j, ..., drop = FALSE) {
if(missing(j))
j <- 1:ncol(x@data)
if(missing(i))
i <- 1:nrow(x@data)
if(length(i) == 0) return(x)
i <- as.vector(i)
j <- as.vector(j)
x <- callNextMethod()
if(nrow(x@locLikelihoods) > 0)
x@locLikelihoods <- x@locLikelihoods[i,, drop = FALSE]
if(length(x@coordinates) > 0)
x@coordinates <- x@coordinates[i,, drop = FALSE]
x
})
setMethod("dim", "lociData", function(x) {
dim <- dim(x@data)
if(dim[1] == 0) dim[1] <- length(x@coordinates)
dim
})
| /R/lociData-accessors.R | no_license | tjh48/segmentSeq | R | false | false | 1,392 | r | # modification on git from copied files
setMethod("c", "lociData", function(x, ..., recursive = FALSE) {
cdl <- list(...)
catLD <- callNextMethod()
if(all(sapply(cdl, function(z) all.equal(z@replicates, current = x@replicates)))) {
nLL <- do.call("rbind", (c(list(x@locLikelihoods), lapply(cdl, function(x) x@locLikelihoods))))
}
nCoord <- do.call("c", c(list(x@coordinates), lapply(cdl, function(x) x@coordinates)))
new("lociData", catLD, locLikelihoods = nLL, coordinates = nCoord)
})
setMethod("show", "lociData", function(object) {
show(object@coordinates)
callNextMethod()
.printLocLikes(object@locLikelihoods)
if(nrow(object@locLikelihoods) > 0) {
cat("\nExpected number of loci in each replicate group\n")
print(colSums(exp(object@locLikelihoods)))
}
})
setMethod("[", "lociData", function(x, i, j, ..., drop = FALSE) {
if(missing(j))
j <- 1:ncol(x@data)
if(missing(i))
i <- 1:nrow(x@data)
if(length(i) == 0) return(x)
i <- as.vector(i)
j <- as.vector(j)
x <- callNextMethod()
if(nrow(x@locLikelihoods) > 0)
x@locLikelihoods <- x@locLikelihoods[i,, drop = FALSE]
if(length(x@coordinates) > 0)
x@coordinates <- x@coordinates[i,, drop = FALSE]
x
})
setMethod("dim", "lociData", function(x) {
dim <- dim(x@data)
if(dim[1] == 0) dim[1] <- length(x@coordinates)
dim
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SPNC.R
\name{SPNCRefClass_get_dims}
\alias{SPNCRefClass_get_dims}
\title{Retrieve a named vector of dimensions}
\value{
a named numeric vector of dimensions or NULL
}
\description{
Retrieve a named vector of dimensions
}
| /man/SPNCRefClass_get_dims.Rd | permissive | BigelowLab/spnc | R | false | true | 299 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SPNC.R
\name{SPNCRefClass_get_dims}
\alias{SPNCRefClass_get_dims}
\title{Retrieve a named vector of dimensions}
\value{
a named numeric vector of dimensions or NULL
}
\description{
Retrieve a named vector of dimensions
}
|
# 1. Define two matrices. Let A be 3 x 4, filled row-wise with the numbers 1-12.
# Let B be 4 x 2, filled column-wise with the numbers 1-8.
A <- matrix(1:12, nrow = 3, ncol = 4, byrow = TRUE)
B <- matrix(1:8, nrow = 4, ncol = 2, byrow = FALSE)
A
B
# 2. Explain why A and B cannot be added or subtracted.
A + B
A - B
##### Answer: The dimensions of the matrices are not the same.
# 3. Create a matrix C that can be added to A. Perform the addition.
##### Requirement is that it must have 3 rows and 4 columns
C <- matrix(seq(2,24,by = 2), nrow = 3, ncol = 4, byrow = TRUE)
C
A + C
# 4. Find AB. Before multiplying in R, anticipate the dimensions of the product,
# and multiply by hand.
##### 3x4 times 4x2, so product will be 3x2
A %*% B
| /1-Algebra/3 - LAB3.R | permissive | sergioalegre/Machine-learning-based-R | R | false | false | 751 | r | # 1. Define two matrices. Let A be 3 x 4, filled row-wise with the numbers 1-12.
# Let B be 4 x 2, filled column-wise with the numbers 1-8.
A <- matrix(1:12, nrow = 3, ncol = 4, byrow = TRUE)
B <- matrix(1:8, nrow = 4, ncol = 2, byrow = FALSE)
A
B
# 2. Explain why A and B cannot be added or subtracted.
A + B
A - B
##### Answer: The dimensions of the matrices are not the same.
# 3. Create a matrix C that can be added to A. Perform the addition.
##### Requirement is that it must have 3 rows and 4 columns
C <- matrix(seq(2,24,by = 2), nrow = 3, ncol = 4, byrow = TRUE)
C
A + C
# 4. Find AB. Before multiplying in R, anticipate the dimensions of the product,
# and multiply by hand.
##### 3x4 times 4x2, so product will be 3x2
A %*% B
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helpers.r
\name{tr}
\alias{tr}
\title{Compute trace of a square matrix, that is, the sum
of its diagonal elements.}
\usage{
tr(m)
}
\arguments{
\item{m}{a square matrix}
}
\value{
trace of input matrix
}
\description{
Compute trace of a square matrix, that is, the sum
of its diagonal elements.
}
| /man/tr.Rd | permissive | Express50/omgee | R | false | true | 375 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helpers.r
\name{tr}
\alias{tr}
\title{Compute trace of a square matrix, that is, the sum
of its diagonal elements.}
\usage{
tr(m)
}
\arguments{
\item{m}{a square matrix}
}
\value{
trace of input matrix
}
\description{
Compute trace of a square matrix, that is, the sum
of its diagonal elements.
}
|
#
# FY2020_MacroPru_PortfolioRisk.R
#
# Short program to create the classic mean-sd graph
# for portfolios of stocks.
#
# DGB May 2020.
#
rm(list = ls()) # This line clears R's memory of data so that we start from a clean slate
# for this program with nothing hanging around from any programs we ran earlier.
# Load the data: We use the dataframe that we saved in the
# Risk Measurement session. We can call it whatever we like
# on the left of the arrow. Here we use the same name
# that it had in the earlier program.
stockReturns <- readRDS("stockReturnsDataframe.Rda")
# Create a function to calculate the mean and s.d. of returns
# for a 2-asset portfolio. The proportion invested in the first
# asset is w1. This would usually be done using matrices but the
# code below is easier to follow. (Matrices would make it easier
# to expand to more than 2 assets.)
portfolioMoments <- function(r1,r2,w1){
mean_r1 <- mean(r1)
mean_r2 <- mean(r2)
Var_r1 <- var(r1)
Var_r2 <- var(r2)
Cov_r1r2 <- cov(r1,r2)
w2 <- 1 - w1
Mean_portfolio <- w1*mean_r1 + w2*mean_r2
Var_portfolio <- w1*w1*Var_r1 + 2*w1*w2*Cov_r1r2 + w2*w2*Var_r2
sd_portfolio <- sqrt(Var_portfolio)
return(c(Mean_portfolio,sd_portfolio))
}
portfolioMoments(stockReturns$apple, stockReturns$google, 0.5)
# ------------------------ Portfolios -------------------------
# Non-matrix-based 2 asset portfolio analysis.
# Choose the 2 firms.
r1 <- stockReturns$nasdaq
r2 <- stockReturns$jpm
# Create a 2-asset dataframe without dates.
TwoStockReturns <- data.frame("r1" = r1,
"r2" = r2)
# Create an empty matrix which into which we will put the
# mean and s.d. of 101 portfolios with different weights
# running from w1 = 0, to w1 = 1.
meanSDPortfolios <- matrix(nrow=101,ncol=2)
meanSDPortfolios
# Create the vector of weights.
w_i <- seq(0,1,0.01)
head(w_i)
# Use a for loop to fill the portfolio matrix
for(i in 1:length(w_i)){
meanSDPortfolios[i,] <- portfolioMoments(r1,
r2,w_i[i])
}
head(meanSDPortfolios)
# Put meanSDPortfolio into a dataframe so that we can use ggplot.
meanSDPortfolios_df <- data.frame("weight1" = w_i,
"mu"=meanSDPortfolios[,1],
"sigma" = meanSDPortfolios[,2])
head(meanSDPortfolios_df)
# Plot the results.
ggplot(meanSDPortfolios_df,aes(sigma)) +
geom_point(aes(y=mu),color="purple")
# ---------------- End of the session - some additional notes follow. ----
# Plot all firms' mean and sd
meanAll <- colMeans(stockReturns[,2:ncol(stockReturns)])
VarAll <- var(stockReturns[,2:ncol(stockReturns)])
sdAll <- sqrt(diag(VarAll))
momentsAll <- data.frame("muAll"= meanAll,"sigmaAll"= sdAll)
data.frame(meanAll,sdAll)
ggplot(momentsAll,aes(sigmaAll)) + geom_point(aes(y=muAll),color="red")
# ------------------ Illustrating the functions used in ---------------
# ------------- the user-defined-function 'portfolioMoments'. -----------
# var() will give us a covariance matrix if we give it more than 1 series.
# It provides just a varaince if we give it just 1 series.
varcov2 <- var(TwoStockReturns)
varcov2
# We can extract the moments from the resulting matrix as follows:
Var_r1 <- varcov2[1,1]
Cov_r1r2 <- varcov2[1,2]
Var_r2 <- varcov2[2,2]
# Print them as a vector.
c(Var_r1,Cov_r1r2,Var_r2)
# Calculate and print a portfolio variance.
w1 <- 0.5
w2 <- 1 - w1
Var_Portfolio <- w1*w1*Var_r1 + 2*w1*w2*Cov_r1r2 + w2*w2*Var_r2
Var_Portfolio
# Alternatively we can calculate the moments one by one.
# We label them with a z to distinguish them from the ones above.
Var_r1z <- var(r1)
Var_r2z <- var(r2)
Cov_r1r2z <- cov(r1,r2)
c(Var_r1z,Cov_r1r2z,Var_r2z)
# Calculate the means of the returns.
mean_r1 <- mean(r1)
mean_r2 <- mean(r2)
c(mean_r1,mean_r2)
Mean_Portfolio <- w1 * mean_r1 + w2 * mean_r2
Mean_Portfolio
# ------------------ A note on using matrices to find mean and Var. ------------------
n_assets <- 2 # This programme is set up for only 2 assets.
w <- c(0.5, 0.5) # Create a vector of 2 portfolio weights = 0.5
w
means <- colMeans(TwoStockReturns)
Mean_Portfolio_matrix <- w %*% means
Mean_Portfolio_matrix
Var_Portfolio_matrix <- w %*% varcov2 %*% w
Var_Portfolio_matrix
c(Mean_Portfolio_matrix, Var_Portfolio_matrix)
| /60_RiskMeasurementInR/AllMaterialForMoodleJune2020/FY_MacroPru_PortfolioRisk.R | no_license | dgbarr2/intro_coding | R | false | false | 4,518 | r | #
# FY2020_MacroPru_PortfolioRisk.R
#
# Short program to create the classic mean-sd graph
# for portfolios of stocks.
#
# DGB May 2020.
#
rm(list = ls()) # This line clears R's memory of data so that we start from a clean slate
# for this program with nothing hanging around from any programs we ran earlier.
# Load the data: We use the dataframe that we saved in the
# Risk Measurement session. We can call it whatever we like
# on the left of the arrow. Here we use the same name
# that it had in the earlier program.
stockReturns <- readRDS("stockReturnsDataframe.Rda")
# Create a function to calculate the mean and s.d. of returns
# for a 2-asset portfolio. The proportion invested in the first
# asset is w1. This would usually be done using matrices but the
# code below is easier to follow. (Matrices would make it easier
# to expand to more than 2 assets.)
portfolioMoments <- function(r1,r2,w1){
mean_r1 <- mean(r1)
mean_r2 <- mean(r2)
Var_r1 <- var(r1)
Var_r2 <- var(r2)
Cov_r1r2 <- cov(r1,r2)
w2 <- 1 - w1
Mean_portfolio <- w1*mean_r1 + w2*mean_r2
Var_portfolio <- w1*w1*Var_r1 + 2*w1*w2*Cov_r1r2 + w2*w2*Var_r2
sd_portfolio <- sqrt(Var_portfolio)
return(c(Mean_portfolio,sd_portfolio))
}
portfolioMoments(stockReturns$apple, stockReturns$google, 0.5)
# ------------------------ Portfolios -------------------------
# Non-matrix-based 2 asset portfolio analysis.
# Choose the 2 firms.
r1 <- stockReturns$nasdaq
r2 <- stockReturns$jpm
# Create a 2-asset dataframe without dates.
TwoStockReturns <- data.frame("r1" = r1,
"r2" = r2)
# Create an empty matrix which into which we will put the
# mean and s.d. of 101 portfolios with different weights
# running from w1 = 0, to w1 = 1.
meanSDPortfolios <- matrix(nrow=101,ncol=2)
meanSDPortfolios
# Create the vector of weights.
w_i <- seq(0,1,0.01)
head(w_i)
# Use a for loop to fill the portfolio matrix
for(i in 1:length(w_i)){
meanSDPortfolios[i,] <- portfolioMoments(r1,
r2,w_i[i])
}
head(meanSDPortfolios)
# Put meanSDPortfolio into a dataframe so that we can use ggplot.
meanSDPortfolios_df <- data.frame("weight1" = w_i,
"mu"=meanSDPortfolios[,1],
"sigma" = meanSDPortfolios[,2])
head(meanSDPortfolios_df)
# Plot the results.
ggplot(meanSDPortfolios_df,aes(sigma)) +
geom_point(aes(y=mu),color="purple")
# ---------------- End of the session - some additional notes follow. ----
# Plot all firms' mean and sd
meanAll <- colMeans(stockReturns[,2:ncol(stockReturns)])
VarAll <- var(stockReturns[,2:ncol(stockReturns)])
sdAll <- sqrt(diag(VarAll))
momentsAll <- data.frame("muAll"= meanAll,"sigmaAll"= sdAll)
data.frame(meanAll,sdAll)
ggplot(momentsAll,aes(sigmaAll)) + geom_point(aes(y=muAll),color="red")
# ------------------ Illustrating the functions used in ---------------
# ------------- the user-defined-function 'portfolioMoments'. -----------
# var() will give us a covariance matrix if we give it more than 1 series.
# It provides just a varaince if we give it just 1 series.
varcov2 <- var(TwoStockReturns)
varcov2
# We can extract the moments from the resulting matrix as follows:
Var_r1 <- varcov2[1,1]
Cov_r1r2 <- varcov2[1,2]
Var_r2 <- varcov2[2,2]
# Print them as a vector.
c(Var_r1,Cov_r1r2,Var_r2)
# Calculate and print a portfolio variance.
w1 <- 0.5
w2 <- 1 - w1
Var_Portfolio <- w1*w1*Var_r1 + 2*w1*w2*Cov_r1r2 + w2*w2*Var_r2
Var_Portfolio
# Alternatively we can calculate the moments one by one.
# We label them with a z to distinguish them from the ones above.
Var_r1z <- var(r1)
Var_r2z <- var(r2)
Cov_r1r2z <- cov(r1,r2)
c(Var_r1z,Cov_r1r2z,Var_r2z)
# Calculate the means of the returns.
mean_r1 <- mean(r1)
mean_r2 <- mean(r2)
c(mean_r1,mean_r2)
Mean_Portfolio <- w1 * mean_r1 + w2 * mean_r2
Mean_Portfolio
# ------------------ A note on using matrices to find mean and Var. ------------------
n_assets <- 2 # This programme is set up for only 2 assets.
w <- c(0.5, 0.5) # Create a vector of 2 portfolio weights = 0.5
w
means <- colMeans(TwoStockReturns)
Mean_Portfolio_matrix <- w %*% means
Mean_Portfolio_matrix
Var_Portfolio_matrix <- w %*% varcov2 %*% w
Var_Portfolio_matrix
c(Mean_Portfolio_matrix, Var_Portfolio_matrix)
|
## Put comments here that give an overall description of what your functions do
## The code in this script is particularly useful for heavy invertible matrices that are non-changing.
##The script caches the inverse of a matrix so that when the inverse is needed again,
##it can be looked up in the cache rather than recomputed.
## Write a short comment describing this function
## The funtion below is essentially a list of children functions. It sets the matrix, gets the maxtrix,
## sets/caches the inverse of the matrix.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) { ##This function sets the value of the matrix
x <<- y ##This uses the special <<- operator to assign the value of the matrix in y to object x
m <<- NULL
}
get <- function()x ##This returns the value of x set in the set() function above
setInverse <- function(inverse) m <<- inverse ##This caches the inverse of the matrix after it is computer in the CacheSolve function
getInverse <- function() m ## This returns the cached value of the inverse in the event that the inverse of the same matrix is requested again
list(set = set, get = get, setInverse = setInverse, getInverse = getInverse) ## List of the children functions
}
## Write a short comment describing this function
## This function computes the inverse of teh matrix returned by the makeCacheMatrix above.
## In the event that the inverse has already been computed, the this retrieves the inverse
## from the getinverse() function above.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getInverse() ##This gets the value of the inverse of the matrix stored in object m
if(!is.null(m)){ ##this checks whether the m is NULL (which means that the inverse is yet to be computed), or if it not NULL, which means the inverse is already computed.
message("getting cached data")
return(m)
}
data <- x$get() ## This retrieves the matrix in the event that the inverse has not been computed
m <- solve(data) ##This computes the inverse of the matrix
x$setInverse(m) ## This sends it to the setInverse() function for caching
m ## This returns the inverse of the matrix to be printed on the console
}
| /cachematrix.R | no_license | nakpunonu/ProgrammingAssignment2 | R | false | false | 2,472 | r | ## Put comments here that give an overall description of what your functions do
## The code in this script is particularly useful for heavy invertible matrices that are non-changing.
##The script caches the inverse of a matrix so that when the inverse is needed again,
##it can be looked up in the cache rather than recomputed.
## Write a short comment describing this function
## The funtion below is essentially a list of children functions. It sets the matrix, gets the maxtrix,
## sets/caches the inverse of the matrix.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) { ##This function sets the value of the matrix
x <<- y ##This uses the special <<- operator to assign the value of the matrix in y to object x
m <<- NULL
}
get <- function()x ##This returns the value of x set in the set() function above
setInverse <- function(inverse) m <<- inverse ##This caches the inverse of the matrix after it is computer in the CacheSolve function
getInverse <- function() m ## This returns the cached value of the inverse in the event that the inverse of the same matrix is requested again
list(set = set, get = get, setInverse = setInverse, getInverse = getInverse) ## List of the children functions
}
## Write a short comment describing this function
## This function computes the inverse of teh matrix returned by the makeCacheMatrix above.
## In the event that the inverse has already been computed, the this retrieves the inverse
## from the getinverse() function above.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getInverse() ##This gets the value of the inverse of the matrix stored in object m
if(!is.null(m)){ ##this checks whether the m is NULL (which means that the inverse is yet to be computed), or if it not NULL, which means the inverse is already computed.
message("getting cached data")
return(m)
}
data <- x$get() ## This retrieves the matrix in the event that the inverse has not been computed
m <- solve(data) ##This computes the inverse of the matrix
x$setInverse(m) ## This sends it to the setInverse() function for caching
m ## This returns the inverse of the matrix to be printed on the console
}
|
# Basic operation
# DOM interaction
# by name
remDr$navigate("http://www.google.com/ncr")
webElem0 <- remDr$findElement("name", "q")
webElem0$getElementAttribute("outerHTML")[[1]]
# by id
> webElem0$getElementAttribute("id")[[1]]
[1] "gbqfq"
webElem <- remDr$findElement("id", "gbqfq")
# by class
> webElem0$getElementAttribute("class")[[1]]
[1] "gbqfif"
webElem <- remDr$findElement("class", "gbqfif")
# by css selector
webElem <- remDr$findElement("css selector", ".gbqfif")
webElem <- remDr$findElement("css selector", "#gbqfq")
webElem <- remDr$findElement("css selector", "input.gbqfif")
webElem <- remDr$findElement("css selector", "input#gbqfq")
# by tag name
length(remDr$findElements("tag name", "input"))
# by link text
# get all the anchors
anchorElements <- remDr$findElements("tag name", "a")
sapply(anchorElements, function(x){x$getElementText()[[1]]})
advertAnchor <- remDr$findElement("link text", "Advertising")
advertAnchor$highlightElement()
# partial link text
googleAnchor <- remDr$findElement("partial link text", "oogle")
googleAnchor$highlightElement()
# xpath
| /R/Basic-DOM.R | no_license | strategist922/RSOCRUG | R | false | false | 1,100 | r | # Basic operation
# DOM interaction
# by name
remDr$navigate("http://www.google.com/ncr")
webElem0 <- remDr$findElement("name", "q")
webElem0$getElementAttribute("outerHTML")[[1]]
# by id
> webElem0$getElementAttribute("id")[[1]]
[1] "gbqfq"
webElem <- remDr$findElement("id", "gbqfq")
# by class
> webElem0$getElementAttribute("class")[[1]]
[1] "gbqfif"
webElem <- remDr$findElement("class", "gbqfif")
# by css selector
webElem <- remDr$findElement("css selector", ".gbqfif")
webElem <- remDr$findElement("css selector", "#gbqfq")
webElem <- remDr$findElement("css selector", "input.gbqfif")
webElem <- remDr$findElement("css selector", "input#gbqfq")
# by tag name
length(remDr$findElements("tag name", "input"))
# by link text
# get all the anchors
anchorElements <- remDr$findElements("tag name", "a")
sapply(anchorElements, function(x){x$getElementText()[[1]]})
advertAnchor <- remDr$findElement("link text", "Advertising")
advertAnchor$highlightElement()
# partial link text
googleAnchor <- remDr$findElement("partial link text", "oogle")
googleAnchor$highlightElement()
# xpath
|
\name{tp_classification}
\alias{tp_classification}
\title{Return all synonyms for a taxon name with a given id.}
\usage{
tp_classification(id = NULL, key = NULL,
callopts = list())
}
\arguments{
\item{id}{A Tropicos name ID}
\item{key}{Your Tropicos API key; loads from .Rprofile.}
\item{callopts}{Further args passed on to httr::GET}
}
\value{
A data.frame giving the hierarchy.
}
\description{
Return all synonyms for a taxon name with a given id.
}
\examples{
\dontrun{
tp_classification(id = 25509881)
tp_classification(id = c(25509881,2700851))
tp_classification(id = c(25509881,2700851), callopts=verbose())
}
}
\references{
\url{http://services.tropicos.org/help?method=GetNameHigherTaxaXml}
}
| /man/tp_classification.Rd | permissive | imclab/taxize | R | false | false | 722 | rd | \name{tp_classification}
\alias{tp_classification}
\title{Return all synonyms for a taxon name with a given id.}
\usage{
tp_classification(id = NULL, key = NULL,
callopts = list())
}
\arguments{
\item{id}{A Tropicos name ID}
\item{key}{Your Tropicos API key; loads from .Rprofile.}
\item{callopts}{Further args passed on to httr::GET}
}
\value{
A data.frame giving the hierarchy.
}
\description{
Return all synonyms for a taxon name with a given id.
}
\examples{
\dontrun{
tp_classification(id = 25509881)
tp_classification(id = c(25509881,2700851))
tp_classification(id = c(25509881,2700851), callopts=verbose())
}
}
\references{
\url{http://services.tropicos.org/help?method=GetNameHigherTaxaXml}
}
|
library(pROC , quietly=T, warn.conflicts=F)
args <- commandArgs(TRUE)
input = args[1]
group = args[2]
#data = "/Scratch/analysis/Ironman/LC_pentad_code_analyse_20171023/01.FE_pentad_code/pentad_code_IA/cpileup-parsed/probability-estimation--ignore-all-at-tissue-chisq.test-deletesamepentadcode/IA-Benign.class_probability_estimation.tsv"
data = read.table(input,header=TRUE,sep="\t")
pdf(file=sprintf("ROC_Comparison__%s.pdf",group))
mygrey <- rgb(89/255,87/255,87/255)
#par(family="sans",col.main=mygrey,col=mygrey,col.lab=mygrey,col.axis=mygrey,cex.main=1.5,cex.lab=1.2,cex.axis=1.2,mgp=c(2.5,1,0))
par(family="sans",col.main=mygrey,col=mygrey,col.lab=mygrey,col.axis=mygrey,cex.main=1.5,cex.lab=1.2,cex.axis=1.2)
plot.roc(data$group, data$IA_Tissue,
grid=c(5,5),
# main=sprintf("ROC Comparison(%s,%s)","75IA,121Benign","Tissue"),
main=sprintf("ROC Comparison(%s)",group),
percent=TRUE,
print.thres=TRUE,
ci=TRUE,
auc=TRUE,
print.auc=TRUE,
print.thres.cex=1.2,
print.thres.pattern="%.3f (%.1f%%, %.1f%%)",
print.thres.col=mygrey,
print.auc.cex=1.2,
auc.polygon=TRUE,
auc.polygon.col="Light Sky Blue",
)
#legend("bottomright", legend = "75IA,121Benign", lwd = 1.5,cex=1.5)
dev.off()
| /ROC_plot.R | no_license | mengyang2000/ROC_plot | R | false | false | 1,314 | r | library(pROC , quietly=T, warn.conflicts=F)
args <- commandArgs(TRUE)
input = args[1]
group = args[2]
#data = "/Scratch/analysis/Ironman/LC_pentad_code_analyse_20171023/01.FE_pentad_code/pentad_code_IA/cpileup-parsed/probability-estimation--ignore-all-at-tissue-chisq.test-deletesamepentadcode/IA-Benign.class_probability_estimation.tsv"
data = read.table(input,header=TRUE,sep="\t")
pdf(file=sprintf("ROC_Comparison__%s.pdf",group))
mygrey <- rgb(89/255,87/255,87/255)
#par(family="sans",col.main=mygrey,col=mygrey,col.lab=mygrey,col.axis=mygrey,cex.main=1.5,cex.lab=1.2,cex.axis=1.2,mgp=c(2.5,1,0))
par(family="sans",col.main=mygrey,col=mygrey,col.lab=mygrey,col.axis=mygrey,cex.main=1.5,cex.lab=1.2,cex.axis=1.2)
plot.roc(data$group, data$IA_Tissue,
grid=c(5,5),
# main=sprintf("ROC Comparison(%s,%s)","75IA,121Benign","Tissue"),
main=sprintf("ROC Comparison(%s)",group),
percent=TRUE,
print.thres=TRUE,
ci=TRUE,
auc=TRUE,
print.auc=TRUE,
print.thres.cex=1.2,
print.thres.pattern="%.3f (%.1f%%, %.1f%%)",
print.thres.col=mygrey,
print.auc.cex=1.2,
auc.polygon=TRUE,
auc.polygon.col="Light Sky Blue",
)
#legend("bottomright", legend = "75IA,121Benign", lwd = 1.5,cex=1.5)
dev.off()
|
# Extract specified rows from GCT file.
# Masashi Fujita, Aug. 22, 2018
source("scripts/common.R")
#
# parse arguments
#
args <- commandArgs(trailingOnly=T)
if(length(args) != 3) {
stop("[ERROR] invalid number of arguments")
}
expr_file <- args[1]
name_file <- args[2]
outfile <- args[3]
#
# load files
#
expr <- read_gct(expr_file)
name <- fread(name_file, header=F, col.names="Name")
#
# filter
#
df <- expr %>% inner_join(name)
#
# save
#
df %>% write_gct(outfile)
| /scripts/filter_gct.R | permissive | toddajohnson/immunoduct | R | false | false | 475 | r | # Extract specified rows from GCT file.
# Masashi Fujita, Aug. 22, 2018
source("scripts/common.R")
#
# parse arguments
#
args <- commandArgs(trailingOnly=T)
if(length(args) != 3) {
stop("[ERROR] invalid number of arguments")
}
expr_file <- args[1]
name_file <- args[2]
outfile <- args[3]
#
# load files
#
expr <- read_gct(expr_file)
name <- fread(name_file, header=F, col.names="Name")
#
# filter
#
df <- expr %>% inner_join(name)
#
# save
#
df %>% write_gct(outfile)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/performance.R
\name{distributeGuessAsExpectedValue}
\alias{distributeGuessAsExpectedValue}
\title{Distributes guesses of 3x3 confusion matrix to expected value of 1 and -1.}
\usage{
distributeGuessAsExpectedValue(confusion_matrix_3x3)
}
\arguments{
\item{confusion_matrix_3x3}{A 3x3 matrix where the middle column is counts of
guesses.}
}
\value{
A 3x3 confusion matrix with 0's in the middle column.
}
\description{
Given a 3x3 confusion matrix, distributes guesses in column 2 using the
expected value. That is, moves half of guess counts (in column 2) to -1
(column 1) and the other half to 1 (column 3).
}
\details{
-1 0 1
-1 2 2 2
0 4 4 4
1 6 6 6
becomes
-1 0 1
-1 3 0 3
0 6 0 6
1 9 0 9
}
| /man/distributeGuessAsExpectedValue.Rd | no_license | jeanimal/heuristica | R | false | true | 786 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/performance.R
\name{distributeGuessAsExpectedValue}
\alias{distributeGuessAsExpectedValue}
\title{Distributes guesses of 3x3 confusion matrix to expected value of 1 and -1.}
\usage{
distributeGuessAsExpectedValue(confusion_matrix_3x3)
}
\arguments{
\item{confusion_matrix_3x3}{A 3x3 matrix where the middle column is counts of
guesses.}
}
\value{
A 3x3 confusion matrix with 0's in the middle column.
}
\description{
Given a 3x3 confusion matrix, distributes guesses in column 2 using the
expected value. That is, moves half of guess counts (in column 2) to -1
(column 1) and the other half to 1 (column 3).
}
\details{
-1 0 1
-1 2 2 2
0 4 4 4
1 6 6 6
becomes
-1 0 1
-1 3 0 3
0 6 0 6
1 9 0 9
}
|
#!/usr/bin/Rscript --restore
library("tm")
SAcorpus
# read negative words from csv
undervalued_list<-read.table("negative.csv")
undervalued_list<-tolower(as.vector(t(undervalued_list)))
#undervalued_list=c("undervalued")
# Document Term Matrix
dtm_undervalued<-DocumentTermMatrix(SAcorpus,list(dictionary=undervalued_list))
#dtm_undervalued<-removeSparseTerms(dtm_undervalued, 0.98)
dtm_undervalued<-weightTfIdf(dtm_undervalued)
tdm_undervalued <- as.TermDocumentMatrix(dtm_undervalued)
#inspect(tdm)
term_score<-tm_term_score(tdm_undervalued, undervalued_list)
head(sort(term_score, decreasing=TRUE), 200L)
| /find_negative.r | no_license | puyuan/stockdragon | R | false | false | 611 | r | #!/usr/bin/Rscript --restore
library("tm")
SAcorpus
# read negative words from csv
undervalued_list<-read.table("negative.csv")
undervalued_list<-tolower(as.vector(t(undervalued_list)))
#undervalued_list=c("undervalued")
# Document Term Matrix
dtm_undervalued<-DocumentTermMatrix(SAcorpus,list(dictionary=undervalued_list))
#dtm_undervalued<-removeSparseTerms(dtm_undervalued, 0.98)
dtm_undervalued<-weightTfIdf(dtm_undervalued)
tdm_undervalued <- as.TermDocumentMatrix(dtm_undervalued)
#inspect(tdm)
term_score<-tm_term_score(tdm_undervalued, undervalued_list)
head(sort(term_score, decreasing=TRUE), 200L)
|
#PLOT 1: Make a Histogram
hist(plotData$Global_active_power, main="Global Active Power", xlab = "Global Active Power (kilowatts)", col="red")
# Create and save a png file with size 480 x 480
dev.copy(png,"plot1.png", width=480, height=480)
dev.off() | /plot1.R | no_license | lapispencil/ExData_Plotting1 | R | false | false | 251 | r | #PLOT 1: Make a Histogram
hist(plotData$Global_active_power, main="Global Active Power", xlab = "Global Active Power (kilowatts)", col="red")
# Create and save a png file with size 480 x 480
dev.copy(png,"plot1.png", width=480, height=480)
dev.off() |
0f7192eea493e23c8b65bdf142e31878 incrementer-enc05-nonuniform-depth-26.qdimacs 24220 62529 | /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Database/Miller-Marin/incrementer-encoder/incrementer-enc05-nonuniform-depth-26/incrementer-enc05-nonuniform-depth-26.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 90 | r | 0f7192eea493e23c8b65bdf142e31878 incrementer-enc05-nonuniform-depth-26.qdimacs 24220 62529 |
testlist <- list(b = -2147483393L)
result <- do.call(mcga:::ByteVectorToDoubles,testlist)
str(result) | /mcga/inst/testfiles/ByteVectorToDoubles/AFL_ByteVectorToDoubles/ByteVectorToDoubles_valgrind_files/1613105839-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 101 | r | testlist <- list(b = -2147483393L)
result <- do.call(mcga:::ByteVectorToDoubles,testlist)
str(result) |
library(openair)
library(plyr)
library(dplyr)
library(rmweather)
library(ranger)
library(magrittr)
library(globals)
library(future)
library(foreach)
library(iterators)
library(parallel)
library(doFuture)
library(readxl)
library(doParallel)
plan(multicore)
registerDoParallel(cores = detectCores()- 1)
registerDoFuture()
plan(multiprocess)
filenamelist<-list("Rome_Traffic")
polllist<-list("no2","pm2_5","pm10","co","o3")
#polllist<-list("no2","co","o3")
ncal=100
for (filename in filenamelist){
Dataraw1 <- read_excel(paste(filename,".xlsx",sep=''),
sheet = "Sheet1", col_types = c("date",
"numeric", "numeric", "text",
"numeric", "numeric", "numeric",
"numeric", "numeric", "numeric",
"numeric", "numeric", "text",
"numeric", "numeric", "numeric", "numeric","numeric",
"numeric", "numeric", "numeric",
"numeric", "numeric", "numeric"))
Dataraw1$cluster<-as.factor(Dataraw1$cluster)
Dataraw1$weekday<-as.factor(Dataraw1$weekday)
Dataraw1 <- Dataraw1 %>% filter(!is.na(cluster))
Dataraw <- Dataraw1 %>% filter(date>="2018-12-01"& date <= "2019-05-31")
for (poll in polllist){
r.min <- 0.1
perform<-matrix(data=NA,ncol=11,nrow=1)
colnames(perform)<-c("default","n", "FAC2","MB", "MGE", "NMB", "NMGE", "RMSE", "r","COE", "IOA")
for (i in as.numeric(1:ncal)){
set.seed(i)
data_prepared <- Dataraw %>%
filter(!is.na(ws)) %>%
dplyr::rename(value = poll) %>%
rmw_prepare_data(na.rm = TRUE,fraction = 0.7)
set.seed(i)
RF_model <- rmw_do_all(
data_prepared,
variables = c(
"date_unix","day_julian", "weekday","hour", "temp", "RH", "wd", "ws","sp","cluster","tp","blh","tcc","ssr"),
variables_sample=c("temp", "RH", "wd", "ws","sp","cluster","tp","blh","tcc","ssr"),
n_trees = 300,
n_samples = 300,
verbose = TRUE
)
testing_model <- rmw_predict_the_test_set(model = RF_model$model,df = RF_model$observations)
model_performance<-modStats(testing_model, mod = "value", obs = "value_predict", statistic = c("n", "FAC2","MB", "MGE", "NMB", "NMGE", "RMSE", "r","COE", "IOA"),
type = "default", rank.name = NULL)
perform<-rbind(perform,model_performance)
if (model_performance$r > r.min){
r.min <- model_performance$r
RF_modelo <- RF_model}
}
save.image(file = paste(filename,"_",poll,"_RW_Short",".RData",sep=""))
write.table(perform, file=paste(filename,"_",poll,"_RWPerformance_Short",".csv",sep=""), sep=",", row.names=FALSE)
}
} | /Data and code/2019/Rome_Traffic/RWshort.R | no_license | songnku/COVID-19-AQ | R | false | false | 2,943 | r | library(openair)
library(plyr)
library(dplyr)
library(rmweather)
library(ranger)
library(magrittr)
library(globals)
library(future)
library(foreach)
library(iterators)
library(parallel)
library(doFuture)
library(readxl)
library(doParallel)
plan(multicore)
registerDoParallel(cores = detectCores()- 1)
registerDoFuture()
plan(multiprocess)
filenamelist<-list("Rome_Traffic")
polllist<-list("no2","pm2_5","pm10","co","o3")
#polllist<-list("no2","co","o3")
ncal=100
for (filename in filenamelist){
Dataraw1 <- read_excel(paste(filename,".xlsx",sep=''),
sheet = "Sheet1", col_types = c("date",
"numeric", "numeric", "text",
"numeric", "numeric", "numeric",
"numeric", "numeric", "numeric",
"numeric", "numeric", "text",
"numeric", "numeric", "numeric", "numeric","numeric",
"numeric", "numeric", "numeric",
"numeric", "numeric", "numeric"))
Dataraw1$cluster<-as.factor(Dataraw1$cluster)
Dataraw1$weekday<-as.factor(Dataraw1$weekday)
Dataraw1 <- Dataraw1 %>% filter(!is.na(cluster))
Dataraw <- Dataraw1 %>% filter(date>="2018-12-01"& date <= "2019-05-31")
for (poll in polllist){
r.min <- 0.1
perform<-matrix(data=NA,ncol=11,nrow=1)
colnames(perform)<-c("default","n", "FAC2","MB", "MGE", "NMB", "NMGE", "RMSE", "r","COE", "IOA")
for (i in as.numeric(1:ncal)){
set.seed(i)
data_prepared <- Dataraw %>%
filter(!is.na(ws)) %>%
dplyr::rename(value = poll) %>%
rmw_prepare_data(na.rm = TRUE,fraction = 0.7)
set.seed(i)
RF_model <- rmw_do_all(
data_prepared,
variables = c(
"date_unix","day_julian", "weekday","hour", "temp", "RH", "wd", "ws","sp","cluster","tp","blh","tcc","ssr"),
variables_sample=c("temp", "RH", "wd", "ws","sp","cluster","tp","blh","tcc","ssr"),
n_trees = 300,
n_samples = 300,
verbose = TRUE
)
testing_model <- rmw_predict_the_test_set(model = RF_model$model,df = RF_model$observations)
model_performance<-modStats(testing_model, mod = "value", obs = "value_predict", statistic = c("n", "FAC2","MB", "MGE", "NMB", "NMGE", "RMSE", "r","COE", "IOA"),
type = "default", rank.name = NULL)
perform<-rbind(perform,model_performance)
if (model_performance$r > r.min){
r.min <- model_performance$r
RF_modelo <- RF_model}
}
save.image(file = paste(filename,"_",poll,"_RW_Short",".RData",sep=""))
write.table(perform, file=paste(filename,"_",poll,"_RWPerformance_Short",".csv",sep=""), sep=",", row.names=FALSE)
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.