content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_available_datasets.R
\name{get_available_datasets}
\alias{get_available_datasets}
\title{Get the countries data is available for}
\usage{
get_available_datasets()
}
\value{
A list of available countries and the region level
data is available for
}
\description{
Show what countries have what level data available.
The function searches the environment for R6 class objects and
extracts the country name and what level it has from the object.
}
\examples{
get_available_datasets()
}
|
/man/get_available_datasets.Rd
|
permissive
|
elenanikolova190/covidregionaldata
|
R
| false
| true
| 564
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_available_datasets.R
\name{get_available_datasets}
\alias{get_available_datasets}
\title{Get the countries data is available for}
\usage{
get_available_datasets()
}
\value{
A list of available countries and the region level
data is available for
}
\description{
Show what countries have what level data available.
The function searches the environment for R6 class objects and
extracts the country name and what level it has from the object.
}
\examples{
get_available_datasets()
}
|
# plotDist: This function will plot a distribution
plotDist <- function(object, xlim = NULL, ylim = NULL, r = 0, var = NULL, contour = TRUE) {
if (!is.null(var)) {
if (!is.vector(var))
stop("Please specify a vector (no more than two elements) of variables")
if (length(var) > 2)
stop("The length of the variables you wish to plot is larger than two")
object <- extractSimDataDist(object, var)
}
if (object@p == 1) {
if(any(is.na(object@skewness))) {
plotDist1D(object@margins[1], object@paramMargins[[1]], reverse = object@reverse[1],
xlim = xlim)
} else {
plot(density(dataGen(object, n = 100000, m = matrix(0), cm = matrix(1))), main = paste0("Density: skewness = ", object@skewness, ", kurtosis = ", object@kurtosis))
}
} else if (object@p == 2) {
plotDist2D(object, xlim = xlim, ylim = ylim, r = r, contour = contour)
} else {
stop("The dimension cannot be greater than 2.")
}
}
plotDist1D <- function(distName, param, xlim = NULL, reverse = FALSE) {
if (is.null(xlim)) {
funmin <- c(list(get(paste("q", distName, sep = "")), 0.005), param)
funmax <- c(list(get(paste("q", distName, sep = "")), 0.995), param)
xlim <- rep(0, 0)
xlim[1] <- eval(as.call(funmin))
xlim[2] <- eval(as.call(funmax))
}
xrange <- seq(xlim[1], xlim[2], length.out = 200)
fun <- c(list(get(paste("d", distName, sep = "")), xrange), param)
yrange <- eval(as.call(fun))
if (reverse) {
wMeanOld <- sum(xrange * yrange)/sum(yrange)
disLeftOld <- wMeanOld - min(xrange)
disRightOld <- max(xrange) - wMeanOld
yrange <- rev(yrange)
wMeanNew <- sum(xrange * yrange)/sum(yrange)
xrange <- seq(wMeanNew - disRightOld, wMeanNew + disLeftOld, length.out = length(xrange))
}
plot(xrange, yrange, type = "n", xlab = "Value", ylab = "Density")
lines(xrange, yrange)
}
plotDist2D <- function(object, xlim = NULL, ylim = NULL, r = 0, contour = TRUE) {
if(any(is.na(object@skewness)) && !is.null(object@copula) && is(object@copula, "NullCopula")) {
CopNorm <- copula::ellipCopula(family = "normal", dim = 2, dispstr = "un", param = r)
Mvdc <- copula::mvdc(CopNorm, object@margins, object@paramMargins)
######################### xlim
if (is.null(xlim)) {
xfunmin <- c(list(get(paste("q", object@margins[1], sep = "")), 0.005), object@paramMargins[[1]])
xfunmax <- c(list(get(paste("q", object@margins[1], sep = "")), 0.995), object@paramMargins[[1]])
xlim <- rep(0, 0)
xlim[1] <- eval(as.call(xfunmin))
xlim[2] <- eval(as.call(xfunmax))
}
######################### ylim
if (is.null(ylim)) {
yfunmin <- c(list(get(paste("q", object@margins[2], sep = "")), 0.005), object@paramMargins[[2]])
yfunmax <- c(list(get(paste("q", object@margins[2], sep = "")), 0.995), object@paramMargins[[2]])
ylim <- rep(0, 0)
ylim[1] <- eval(as.call(yfunmin))
ylim[2] <- eval(as.call(yfunmax))
}
xis <- seq(xlim[1], xlim[2], length = 51)
yis <- seq(ylim[1], ylim[2], length = 51)
grids <- as.matrix(expand.grid(xis, yis))
zmat <- matrix(copula::dMvdc(grids, Mvdc), 51, 51)
} else {
if(any(is.na(object@skewness))) {
Mvdc <- copula::mvdc(object@copula, object@margins, object@paramMargins)
Data <- CopSEM(Mvdc, matrix(c(1, r, r, 1), 2, 2), nw = 100000, np = 100000)
} else {
Data <- dataGen(object, n = 100000, m = rep(0, 2), cm = matrix(c(1, r, r, 1), 2, 2))
}
obj <- find2Dhist(Data[,1], Data[,2], gridsize = c(500L, 500L))
xis <- obj$x1
yis <- obj$x2
zmat <- obj$fhat
used <- (zmat/max(zmat)) > .005
usedx <- apply(used, 1, any)
usedy <- apply(used, 2, any)
xis <- xis[usedx]
yis <- yis[usedy]
zmat <- zmat[usedx, usedy]
}
if (object@reverse[1]) {
zmat <- zmat[nrow(zmat):1, ]
den <- apply(zmat, 1, sum)
wMeanOld <- sum(xis * den)/sum(den)
disLeftOld <- wMeanOld - min(xis)
disRightOld <- max(xis) - wMeanOld
den <- rev(den)
wMeanNew <- sum(xis * den)/sum(den)
xis <- seq(wMeanNew - disRightOld, wMeanNew + disLeftOld, length.out = length(xis))
}
if (object@reverse[2]) {
zmat <- zmat[, ncol(zmat):1]
den <- apply(zmat, 2, sum)
wMeanOld <- sum(yis * den)/sum(den)
disLeftOld <- wMeanOld - min(yis)
disRightOld <- max(yis) - wMeanOld
den <- rev(den)
wMeanNew <- sum(yis * den)/sum(den)
yis <- seq(wMeanNew - disRightOld, wMeanNew + disLeftOld, length.out = length(yis))
}
if (contour) {
contour(xis, yis, zmat, xlab = "Varible 1", ylab = "Variable 2")
} else {
persp(xis, yis, zmat, xlab = "Varible 1", ylab = "Variable 2", zlab = "Density")
}
val <- list(x = xis, y = yis, z = zmat)
invisible(val)
}
|
/simsem/R/plotDist.R
|
no_license
|
simsem/simsem
|
R
| false
| false
| 4,848
|
r
|
# plotDist: This function will plot a distribution
plotDist <- function(object, xlim = NULL, ylim = NULL, r = 0, var = NULL, contour = TRUE) {
if (!is.null(var)) {
if (!is.vector(var))
stop("Please specify a vector (no more than two elements) of variables")
if (length(var) > 2)
stop("The length of the variables you wish to plot is larger than two")
object <- extractSimDataDist(object, var)
}
if (object@p == 1) {
if(any(is.na(object@skewness))) {
plotDist1D(object@margins[1], object@paramMargins[[1]], reverse = object@reverse[1],
xlim = xlim)
} else {
plot(density(dataGen(object, n = 100000, m = matrix(0), cm = matrix(1))), main = paste0("Density: skewness = ", object@skewness, ", kurtosis = ", object@kurtosis))
}
} else if (object@p == 2) {
plotDist2D(object, xlim = xlim, ylim = ylim, r = r, contour = contour)
} else {
stop("The dimension cannot be greater than 2.")
}
}
plotDist1D <- function(distName, param, xlim = NULL, reverse = FALSE) {
if (is.null(xlim)) {
funmin <- c(list(get(paste("q", distName, sep = "")), 0.005), param)
funmax <- c(list(get(paste("q", distName, sep = "")), 0.995), param)
xlim <- rep(0, 0)
xlim[1] <- eval(as.call(funmin))
xlim[2] <- eval(as.call(funmax))
}
xrange <- seq(xlim[1], xlim[2], length.out = 200)
fun <- c(list(get(paste("d", distName, sep = "")), xrange), param)
yrange <- eval(as.call(fun))
if (reverse) {
wMeanOld <- sum(xrange * yrange)/sum(yrange)
disLeftOld <- wMeanOld - min(xrange)
disRightOld <- max(xrange) - wMeanOld
yrange <- rev(yrange)
wMeanNew <- sum(xrange * yrange)/sum(yrange)
xrange <- seq(wMeanNew - disRightOld, wMeanNew + disLeftOld, length.out = length(xrange))
}
plot(xrange, yrange, type = "n", xlab = "Value", ylab = "Density")
lines(xrange, yrange)
}
plotDist2D <- function(object, xlim = NULL, ylim = NULL, r = 0, contour = TRUE) {
if(any(is.na(object@skewness)) && !is.null(object@copula) && is(object@copula, "NullCopula")) {
CopNorm <- copula::ellipCopula(family = "normal", dim = 2, dispstr = "un", param = r)
Mvdc <- copula::mvdc(CopNorm, object@margins, object@paramMargins)
######################### xlim
if (is.null(xlim)) {
xfunmin <- c(list(get(paste("q", object@margins[1], sep = "")), 0.005), object@paramMargins[[1]])
xfunmax <- c(list(get(paste("q", object@margins[1], sep = "")), 0.995), object@paramMargins[[1]])
xlim <- rep(0, 0)
xlim[1] <- eval(as.call(xfunmin))
xlim[2] <- eval(as.call(xfunmax))
}
######################### ylim
if (is.null(ylim)) {
yfunmin <- c(list(get(paste("q", object@margins[2], sep = "")), 0.005), object@paramMargins[[2]])
yfunmax <- c(list(get(paste("q", object@margins[2], sep = "")), 0.995), object@paramMargins[[2]])
ylim <- rep(0, 0)
ylim[1] <- eval(as.call(yfunmin))
ylim[2] <- eval(as.call(yfunmax))
}
xis <- seq(xlim[1], xlim[2], length = 51)
yis <- seq(ylim[1], ylim[2], length = 51)
grids <- as.matrix(expand.grid(xis, yis))
zmat <- matrix(copula::dMvdc(grids, Mvdc), 51, 51)
} else {
if(any(is.na(object@skewness))) {
Mvdc <- copula::mvdc(object@copula, object@margins, object@paramMargins)
Data <- CopSEM(Mvdc, matrix(c(1, r, r, 1), 2, 2), nw = 100000, np = 100000)
} else {
Data <- dataGen(object, n = 100000, m = rep(0, 2), cm = matrix(c(1, r, r, 1), 2, 2))
}
obj <- find2Dhist(Data[,1], Data[,2], gridsize = c(500L, 500L))
xis <- obj$x1
yis <- obj$x2
zmat <- obj$fhat
used <- (zmat/max(zmat)) > .005
usedx <- apply(used, 1, any)
usedy <- apply(used, 2, any)
xis <- xis[usedx]
yis <- yis[usedy]
zmat <- zmat[usedx, usedy]
}
if (object@reverse[1]) {
zmat <- zmat[nrow(zmat):1, ]
den <- apply(zmat, 1, sum)
wMeanOld <- sum(xis * den)/sum(den)
disLeftOld <- wMeanOld - min(xis)
disRightOld <- max(xis) - wMeanOld
den <- rev(den)
wMeanNew <- sum(xis * den)/sum(den)
xis <- seq(wMeanNew - disRightOld, wMeanNew + disLeftOld, length.out = length(xis))
}
if (object@reverse[2]) {
zmat <- zmat[, ncol(zmat):1]
den <- apply(zmat, 2, sum)
wMeanOld <- sum(yis * den)/sum(den)
disLeftOld <- wMeanOld - min(yis)
disRightOld <- max(yis) - wMeanOld
den <- rev(den)
wMeanNew <- sum(yis * den)/sum(den)
yis <- seq(wMeanNew - disRightOld, wMeanNew + disLeftOld, length.out = length(yis))
}
if (contour) {
contour(xis, yis, zmat, xlab = "Varible 1", ylab = "Variable 2")
} else {
persp(xis, yis, zmat, xlab = "Varible 1", ylab = "Variable 2", zlab = "Density")
}
val <- list(x = xis, y = yis, z = zmat)
invisible(val)
}
|
\name{print.gamsel}
\alias{print.gamsel}
\title{
print a gamsel object
}
\description{
Print a summary of the gamsel path at each step along the path
}
\usage{
\method{print}{gamsel}(x, digits = max(3, getOption("digits") - 3), ...)
}
\arguments{
\item{x}{fitted gamsel object}
\item{digits}{significant digits in printout}
\item{\dots}{additional print arguments}
}
\details{
The call that produced the object \code{x} is printed, followed by a five-column
matrix
with columns \code{NonZero}, \code{Lin}, \code{NonLin}, \code{\%Dev}
and \code{Lambda}.
The first three columns say how many nonzero, linear and nonlinear
terms there are. \code{\%Dev} is the percent deviance
explained (relative to the null deviance).
}
\value{
The matrix above is silently returned}
\references{
Chouldechova, A. and Hastie, T. (2015) \emph{Generalized Additive Model
Selection}
}
\author{Alexandra Chouldechova and Trevor Hastie\cr
Maintainer: Trevor Hastie \email{hastie@stanford.edu}}
\seealso{
\code{\link{predict.gamsel}}, \code{\link{cv.gamsel}},
\code{\link{plot.gamsel}}, \code{\link{summary.gamsel}},
\code{\link{basis.gen}}, \code{\link{gendata}},
}
\keyword{regression}
\keyword{smooth}
\keyword{nonparametric}
|
/man/print.gamsel.Rd
|
no_license
|
egenn/gamsel2
|
R
| false
| false
| 1,237
|
rd
|
\name{print.gamsel}
\alias{print.gamsel}
\title{
print a gamsel object
}
\description{
Print a summary of the gamsel path at each step along the path
}
\usage{
\method{print}{gamsel}(x, digits = max(3, getOption("digits") - 3), ...)
}
\arguments{
\item{x}{fitted gamsel object}
\item{digits}{significant digits in printout}
\item{\dots}{additional print arguments}
}
\details{
The call that produced the object \code{x} is printed, followed by a five-column
matrix
with columns \code{NonZero}, \code{Lin}, \code{NonLin}, \code{\%Dev}
and \code{Lambda}.
The first three columns say how many nonzero, linear and nonlinear
terms there are. \code{\%Dev} is the percent deviance
explained (relative to the null deviance).
}
\value{
The matrix above is silently returned}
\references{
Chouldechova, A. and Hastie, T. (2015) \emph{Generalized Additive Model
Selection}
}
\author{Alexandra Chouldechova and Trevor Hastie\cr
Maintainer: Trevor Hastie \email{hastie@stanford.edu}}
\seealso{
\code{\link{predict.gamsel}}, \code{\link{cv.gamsel}},
\code{\link{plot.gamsel}}, \code{\link{summary.gamsel}},
\code{\link{basis.gen}}, \code{\link{gendata}},
}
\keyword{regression}
\keyword{smooth}
\keyword{nonparametric}
|
#' Convert Factors to Strings
#'
#' `step_factor2string` will convert one or more factor
#' vectors to strings.
#'
#' @inheritParams step_center
#' @param columns A character string of variables that will be
#' converted. This is `NULL` until computed by
#' [prep.recipe()].
#' @template step-return
#' @keywords datagen
#' @concept preprocessing
#' @concept variable_encodings
#' @concept factors
#' @export
#' @details `prep` has an option `strings_as_factors` that
#' defaults to `TRUE`. If this step is used with the default
#' option, the string(s() produced by this step will be converted
#' to factors after all of the steps have been prepped.
#'
#' When you [`tidy()`] this step, a tibble with columns `terms` (the
#' columns that will be affected) is returned.
#'
#' @seealso [step_string2factor()] [step_dummy()]
#' @examples
#' library(modeldata)
#' data(okc)
#'
#' rec <- recipe(~ diet + location, data = okc)
#'
#' rec <- rec %>%
#' step_string2factor(diet)
#'
#' factor_test <- rec %>%
#' prep(training = okc,
#' strings_as_factors = FALSE) %>%
#' juice
#' # diet is a
#' class(factor_test$diet)
#'
#' rec <- rec %>%
#' step_factor2string(diet)
#'
#' string_test <- rec %>%
#' prep(training = okc,
#' strings_as_factors = FALSE) %>%
#' juice
#' # diet is a
#' class(string_test$diet)
#'
#' tidy(rec, number = 1)
step_factor2string <-
function(recipe,
...,
role = NA,
trained = FALSE,
columns = FALSE,
skip = FALSE,
id = rand_id("factor2string")) {
add_step(
recipe,
step_factor2string_new(
terms = ellipse_check(...),
role = role,
trained = trained,
columns = columns,
skip = skip,
id = id
)
)
}
step_factor2string_new <-
function(terms, role, trained, columns, skip, id) {
step(
subclass = "factor2string",
terms = terms,
role = role,
trained = trained,
columns = columns,
skip = skip,
id = id
)
}
#' @export
prep.step_factor2string <- function(x, training, info = NULL, ...) {
col_names <- eval_select_recipes(x$terms, training, info)
fac_check <-
vapply(training[, col_names], is.factor, logical(1))
if (any(!fac_check))
rlang::abort(
paste0(
"The following variables are not factor vectors: ",
paste0("`", names(fac_check)[!fac_check], "`", collapse = ", ")
)
)
step_factor2string_new(
terms = x$terms,
role = x$role,
trained = TRUE,
columns = col_names,
skip = x$skip,
id = x$id
)
}
#' @export
bake.step_factor2string <- function(object, new_data, ...) {
new_data[, object$columns] <-
map_df(new_data[, object$columns],
as.character)
if (!is_tibble(new_data))
new_data <- as_tibble(new_data)
new_data
}
print.step_factor2string <-
function(x, width = max(20, options()$width - 30), ...) {
cat("Character variables from ")
printer(x$columns, x$terms, x$trained, width = width)
invisible(x)
}
#' @rdname tidy.recipe
#' @param x A `step_factor2string` object.
#' @export
tidy.step_factor2string <- function(x, ...) {
res <- simple_terms(x, ...)
res$id <- x$id
res
}
|
/R/factor2string.R
|
permissive
|
JaeDukSeo/recipes
|
R
| false
| false
| 3,238
|
r
|
#' Convert Factors to Strings
#'
#' `step_factor2string` will convert one or more factor
#' vectors to strings.
#'
#' @inheritParams step_center
#' @param columns A character string of variables that will be
#' converted. This is `NULL` until computed by
#' [prep.recipe()].
#' @template step-return
#' @keywords datagen
#' @concept preprocessing
#' @concept variable_encodings
#' @concept factors
#' @export
#' @details `prep` has an option `strings_as_factors` that
#' defaults to `TRUE`. If this step is used with the default
#' option, the string(s() produced by this step will be converted
#' to factors after all of the steps have been prepped.
#'
#' When you [`tidy()`] this step, a tibble with columns `terms` (the
#' columns that will be affected) is returned.
#'
#' @seealso [step_string2factor()] [step_dummy()]
#' @examples
#' library(modeldata)
#' data(okc)
#'
#' rec <- recipe(~ diet + location, data = okc)
#'
#' rec <- rec %>%
#' step_string2factor(diet)
#'
#' factor_test <- rec %>%
#' prep(training = okc,
#' strings_as_factors = FALSE) %>%
#' juice
#' # diet is a
#' class(factor_test$diet)
#'
#' rec <- rec %>%
#' step_factor2string(diet)
#'
#' string_test <- rec %>%
#' prep(training = okc,
#' strings_as_factors = FALSE) %>%
#' juice
#' # diet is a
#' class(string_test$diet)
#'
#' tidy(rec, number = 1)
step_factor2string <-
function(recipe,
...,
role = NA,
trained = FALSE,
columns = FALSE,
skip = FALSE,
id = rand_id("factor2string")) {
add_step(
recipe,
step_factor2string_new(
terms = ellipse_check(...),
role = role,
trained = trained,
columns = columns,
skip = skip,
id = id
)
)
}
step_factor2string_new <-
function(terms, role, trained, columns, skip, id) {
step(
subclass = "factor2string",
terms = terms,
role = role,
trained = trained,
columns = columns,
skip = skip,
id = id
)
}
#' @export
prep.step_factor2string <- function(x, training, info = NULL, ...) {
col_names <- eval_select_recipes(x$terms, training, info)
fac_check <-
vapply(training[, col_names], is.factor, logical(1))
if (any(!fac_check))
rlang::abort(
paste0(
"The following variables are not factor vectors: ",
paste0("`", names(fac_check)[!fac_check], "`", collapse = ", ")
)
)
step_factor2string_new(
terms = x$terms,
role = x$role,
trained = TRUE,
columns = col_names,
skip = x$skip,
id = x$id
)
}
#' @export
bake.step_factor2string <- function(object, new_data, ...) {
new_data[, object$columns] <-
map_df(new_data[, object$columns],
as.character)
if (!is_tibble(new_data))
new_data <- as_tibble(new_data)
new_data
}
print.step_factor2string <-
function(x, width = max(20, options()$width - 30), ...) {
cat("Character variables from ")
printer(x$columns, x$terms, x$trained, width = width)
invisible(x)
}
#' @rdname tidy.recipe
#' @param x A `step_factor2string` object.
#' @export
tidy.step_factor2string <- function(x, ...) {
res <- simple_terms(x, ...)
res$id <- x$id
res
}
|
## Name: Elizabeth Lee
## Date: 11/2/14
## Function: Draw retrospective zOR choropleth of states
### Extract mean zOR data by state from create_fluseverity_figs_v5/export_zRR_classifState_v5.py
### Filename: /home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/Py_export/SDI_state_classif_covCareAdj_v5_7st.csv
## Data Source:
## Notes: ggplot2 references: http://blog.revolutionanalytics.com/2009/11/choropleth-challenge-result.html
# 7/21/15: update notation
# 7/22/15: reduce margin sizes, similar to F_state_accuracy_choropleth
# 7/30/15: update state notation
# 10/15/15: change legend
##
## useful commands:
## install.packages("pkg", dependencies=TRUE, lib="/usr/local/lib/R/site-library") # in sudo R
## update.packages(lib.loc = "/usr/local/lib/R/site-library")
######## header #################################
rm(list = ls())
require(maps)
require(ggplot2)
require(grid)
setwd(dirname(sys.frame(1)$ofile)) # only works if you source the program
# plot formatting
mar = c(0,0,0,0)
#########################################
## plot data by state (statelevel classif) ##
setwd('../../Py_export')
orig2 <- read.csv('SDI_state_classif_covCareAdj_v5_7st.csv', header=TRUE, colClasses = c('numeric', 'character', 'numeric', 'numeric'))
names(orig2) <- c('season', 'state', 'retro_zOR', 'early_zOR', 'valid_normweeks')
orig2$mean_retro_zOR <- cut(orig2$retro_zOR, breaks = seq(-10, 14, by=3), ordered_result=TRUE)
# 11/2/14: reverse order of levels so that severe values are red and at the top of the legend
orig2$mean_retro_zOR <- factor(orig2$mean_retro_zOR, levels=rev(levels(orig2$mean_retro_zOR)))
# crosswalk state names with call letter abbreviations
setwd('../../../Census')
abbr <- read.csv('state_abbreviations.csv', header=TRUE, colClasses='character')
names(abbr) <- c('region', 'state')
abbr$region <- tolower(abbr$region) # convert state names to lower case because orig2 state names are lower case
orig3 <- merge(orig2, abbr, by = 'state', all=T)
us_state_map <- map_data('state')
setwd('../Manuscripts/Age_Severity/Submission_Materials/BMCMedicine/Submission3_ID/AddlFigures')
for (seas in 2:9){
plotdata <- tbl_df(orig3) %>% filter(season==seas)
seasonmap2 <- ggplot(plotdata, aes(map_id = region)) +
geom_map(aes(fill = mean_retro_zOR), map = us_state_map, color = 'black') +
scale_fill_brewer(expression(paste('severity, ', bar(rho["s,r"](tau)))), palette = 'RdYlBu', guide = 'legend', drop = F) +
expand_limits(x = us_state_map$long, y = us_state_map$lat) +
theme_minimal(base_size = 16, base_family = "") +
theme(panel.background = element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.ticks = element_blank(), axis.text.y = element_blank(), axis.text.x = element_blank(), plot.margin = unit(mar, "mm")) +
labs(x=NULL, y=NULL)
ggsave(seasonmap2, width=5, height=3, file=sprintf('RetrozRR_State_Season%s_stlvl.png', seas))
}
# 10/15/15
|
/scripts/create_fluseverity_figs_v5/F_zRRstate_choropleth_v5.R
|
permissive
|
eclee25/flu-SDI-exploratory-age
|
R
| false
| false
| 2,946
|
r
|
## Name: Elizabeth Lee
## Date: 11/2/14
## Function: Draw retrospective zOR choropleth of states
### Extract mean zOR data by state from create_fluseverity_figs_v5/export_zRR_classifState_v5.py
### Filename: /home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/Py_export/SDI_state_classif_covCareAdj_v5_7st.csv
## Data Source:
## Notes: ggplot2 references: http://blog.revolutionanalytics.com/2009/11/choropleth-challenge-result.html
# 7/21/15: update notation
# 7/22/15: reduce margin sizes, similar to F_state_accuracy_choropleth
# 7/30/15: update state notation
# 10/15/15: change legend
##
## useful commands:
## install.packages("pkg", dependencies=TRUE, lib="/usr/local/lib/R/site-library") # in sudo R
## update.packages(lib.loc = "/usr/local/lib/R/site-library")
######## header #################################
rm(list = ls())
require(maps)
require(ggplot2)
require(grid)
setwd(dirname(sys.frame(1)$ofile)) # only works if you source the program
# plot formatting
mar = c(0,0,0,0)
#########################################
## plot data by state (statelevel classif) ##
setwd('../../Py_export')
orig2 <- read.csv('SDI_state_classif_covCareAdj_v5_7st.csv', header=TRUE, colClasses = c('numeric', 'character', 'numeric', 'numeric'))
names(orig2) <- c('season', 'state', 'retro_zOR', 'early_zOR', 'valid_normweeks')
orig2$mean_retro_zOR <- cut(orig2$retro_zOR, breaks = seq(-10, 14, by=3), ordered_result=TRUE)
# 11/2/14: reverse order of levels so that severe values are red and at the top of the legend
orig2$mean_retro_zOR <- factor(orig2$mean_retro_zOR, levels=rev(levels(orig2$mean_retro_zOR)))
# crosswalk state names with call letter abbreviations
setwd('../../../Census')
abbr <- read.csv('state_abbreviations.csv', header=TRUE, colClasses='character')
names(abbr) <- c('region', 'state')
abbr$region <- tolower(abbr$region) # convert state names to lower case because orig2 state names are lower case
orig3 <- merge(orig2, abbr, by = 'state', all=T)
us_state_map <- map_data('state')
setwd('../Manuscripts/Age_Severity/Submission_Materials/BMCMedicine/Submission3_ID/AddlFigures')
for (seas in 2:9){
plotdata <- tbl_df(orig3) %>% filter(season==seas)
seasonmap2 <- ggplot(plotdata, aes(map_id = region)) +
geom_map(aes(fill = mean_retro_zOR), map = us_state_map, color = 'black') +
scale_fill_brewer(expression(paste('severity, ', bar(rho["s,r"](tau)))), palette = 'RdYlBu', guide = 'legend', drop = F) +
expand_limits(x = us_state_map$long, y = us_state_map$lat) +
theme_minimal(base_size = 16, base_family = "") +
theme(panel.background = element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.ticks = element_blank(), axis.text.y = element_blank(), axis.text.x = element_blank(), plot.margin = unit(mar, "mm")) +
labs(x=NULL, y=NULL)
ggsave(seasonmap2, width=5, height=3, file=sprintf('RetrozRR_State_Season%s_stlvl.png', seas))
}
# 10/15/15
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/projMap2Cube.R
\name{projMap2Cube}
\alias{projMap2Cube}
\title{\
reshape a data matrix from projective mapping
into a brick of data for a \code{distatis} analysis.}
\usage{
projMap2Cube(Data, shape = "flat", nVars = 2, nBlocks = NULL)
}
\arguments{
\item{Data}{a data matrix that can be
\eqn{I} rows by \eqn{J*K} columns (when \code{"flat"}) or
\eqn{I*K} rows by \eqn{J} columns when \code{"long"}.}
\item{shape}{(Default: \code{flat} when \code{"flat"} the data
matrix has dimensions \eqn{I} rows by \eqn{J*K} columns;
when \code{"long"} the data matrix has dimensions
\eqn{I*K} rows by \eqn{J} columns.}
\item{nVars}{Number of variables (default = 2),
relevant only when \code{shape = "flat"}.}
\item{nBlocks}{(Default = \code{NULL}) number
of Blocks (i.e., \eqn{K}) of \eqn{I} products.
Relevant only when \code{shape = "long"}.}
}
\value{
An \eqn{I} by \eqn{J} by \eqn{K} array (i.e., a brick)
to be used to create a cube of distance or covariance.
}
\description{
\code{projMap2Cube}
reshapes a data matrix from projective mapping
into a brick of data for a \code{distatis} analysis.
With \eqn{I} products, \eqn{J} variables, and
\eqn{K} blocks (assessors),
the original data can be 1) "flat"
(e.g., \eqn{I} rows as products,
columns as \eqn{K} blocks of \eqn{J} Variables)
or 2) "long"
"flat" (e.g., \eqn{K} blocks of
\eqn{I} rows as products by assessors,
columns as \eqn{J} Variables).
}
\details{
the output \code{projMap2Cube} (i.e., the brick of data)
is used as input to the function \code{cubeOfCov} that will
create the cubeOfDistance (or covariance) that will be used
as input of \code{distatis}.
\code{projMap2Cube} guesses the
names of the products and variables from the
rownames and columns of the data, but this guess
needs to be verified.
}
\examples{
# Use the data from the BeersProjectiveMapping dataset
data("BeersProjectiveMapping")
# Create the I*J_k*K brick of data
dataBrick <- projMap2Cube(BeersProjectiveMapping$ProjectiveMapping,
shape = 'flat', nVars = 2)
}
\author{
Herve Abdi
}
|
/man/projMap2Cube.Rd
|
no_license
|
cran/DistatisR
|
R
| false
| true
| 2,197
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/projMap2Cube.R
\name{projMap2Cube}
\alias{projMap2Cube}
\title{\
reshape a data matrix from projective mapping
into a brick of data for a \code{distatis} analysis.}
\usage{
projMap2Cube(Data, shape = "flat", nVars = 2, nBlocks = NULL)
}
\arguments{
\item{Data}{a data matrix that can be
\eqn{I} rows by \eqn{J*K} columns (when \code{"flat"}) or
\eqn{I*K} rows by \eqn{J} columns when \code{"long"}.}
\item{shape}{(Default: \code{flat} when \code{"flat"} the data
matrix has dimensions \eqn{I} rows by \eqn{J*K} columns;
when \code{"long"} the data matrix has dimensions
\eqn{I*K} rows by \eqn{J} columns.}
\item{nVars}{Number of variables (default = 2),
relevant only when \code{shape = "flat"}.}
\item{nBlocks}{(Default = \code{NULL}) number
of Blocks (i.e., \eqn{K}) of \eqn{I} products.
Relevant only when \code{shape = "long"}.}
}
\value{
An \eqn{I} by \eqn{J} by \eqn{K} array (i.e., a brick)
to be used to create a cube of distance or covariance.
}
\description{
\code{projMap2Cube}
reshapes a data matrix from projective mapping
into a brick of data for a \code{distatis} analysis.
With \eqn{I} products, \eqn{J} variables, and
\eqn{K} blocks (assessors),
the original data can be 1) "flat"
(e.g., \eqn{I} rows as products,
columns as \eqn{K} blocks of \eqn{J} Variables)
or 2) "long"
"flat" (e.g., \eqn{K} blocks of
\eqn{I} rows as products by assessors,
columns as \eqn{J} Variables).
}
\details{
the output \code{projMap2Cube} (i.e., the brick of data)
is used as input to the function \code{cubeOfCov} that will
create the cubeOfDistance (or covariance) that will be used
as input of \code{distatis}.
\code{projMap2Cube} guesses the
names of the products and variables from the
rownames and columns of the data, but this guess
needs to be verified.
}
\examples{
# Use the data from the BeersProjectiveMapping dataset
data("BeersProjectiveMapping")
# Create the I*J_k*K brick of data
dataBrick <- projMap2Cube(BeersProjectiveMapping$ProjectiveMapping,
shape = 'flat', nVars = 2)
}
\author{
Herve Abdi
}
|
######################################################
# Setup
######################################################
stop()
rm(list=ls(all=T))
gc(reset=T)
library(pbapply)
library(data.table)
library(bit64)
library(ggplot2)
library(Hmisc)
library(jsonlite)
library(reshape2)
library(stringi)
library(ggplot2)
library(ggthemes)
######################################################
# Download data
######################################################
# Newer set of master Mbtests
# old_mbtest_ids <- c(
# '5ce5a4b27347c9002707db2a', # FM Yaml
# '5ce5a44a7347c90029cc30a8', # Current with preds Yaml
# '5ce5a48c7347c9002707db20', # Cosine sim
# '5ce5a49d7347c900251e88fe' # Single column text
# )
old_mbtest_ids <- '5d570a207347c900279268eb' # New current with preds with other yamls added
# Keras tests also listed here: https://github.com/datarobot/DataRobot/pull/37647
# Keras models - current - broken passthrough - needs work
# new_mbtest_ids <- c(
# '5ce5a3f87347c9002707db0c', # FM Yaml
# '5ce5a3807347c900245e4dfe', # Current with preds Yaml
# '5ce5a3b57347c900251e88ed', # Cosine sim
# '5ce5a3e17347c900245e4fe1' # Single column text
# )
# Keras models - current - working passthrough - Looks ok
# new_mbtest_ids <- c(
# '5ce5a3f87347c9002707db0c', # FM Yaml
# '5ce5a3807347c900245e4dfe', # Current with preds Yaml
# '5ce5a3b57347c900251e88ed', # Cosine sim
# '5ce5a3e17347c900245e4fe1' # Single column text
# )
# # Keras models - trying to calibrate - CLOSE THE PR THESE ARE TOO SLOW
# new_mbtest_ids <- c(
# '5ce5b6997347c90029cc32a9', # FM Yaml
# '5ce5b64b7347c9002707db42', # Current with preds Yaml
# '5ce5b6757347c90029cc329f', # Cosine sim
# '5ce5b6877347c900245e5001' # Single column text
# )
# Keras models - current - working passthrough - fixed weight init for multiclass
# Best so far
# new_mbtest_ids <- c(
# '5d02bf2f7347c90026e2d02c', # FM Yaml
# '5d02bef07347c9002931fa58', # Current with preds Yaml
# '5d02bf047347c900248d472a', # Cosine sim
# '5d02bf177347c90026e2d01a' # Single column text
# )
# Keras models - current - working passthrough - fixed weight init for multiclass - learning rate = 1 - CURRENT TEST!
# learning rate = 1 is no good
# new_mbtest_ids <- c(
# '5d0a357e7347c90027fba955', # FM Yaml
# '5d0900997347c90027fba662', # Current with preds Yaml
# '5d0900af7347c900284e1a1c', # Cosine sim
# '5d0900bf7347c900291de9eb' # Single column text
# )
# Keras models - current - 0.1 for class, 0.01 for reg. Find learning rate, cyclic lr, early stopping, smaller default batch size
# OOMS due to stacked predictions. Jesse/Viktor working on RAM FIX
# new_mbtest_ids <- c(
# '5d2659877347c9002610cd64', # FM Yaml
# '5d2659507347c9002c6234a0', # Current with preds Yaml
# '5d26595d7347c9002c62368f', # Cosine sim
# '5d2659757347c9002610cd51' # Single column text
# )
# Keras models - current - 0.1 for class, 0.01 for reg. Find learning rate, cyclic lr, early stopping, smaller default batch size
# RUN ALL AS SLIM RUN TO AVOID THE MULTI MODEL RAM ISSUE
# new_mbtest_ids <- c(
# '5d31dac57347c90027198d85', # FM Yaml
# '5d31daa17347c90027198b97', # Current with preds Yaml
# '5d31dad57347c90029160106', # Cosine sim
# '5d31dae27347c90023a8bb3e' # Single column text
# )
# New test, with just find learning rate turned on
# Some OOMs during pickling =/
# NOT SLIM
# new_mbtest_ids <- c(
# '5d41c5057347c90029fd538b', # FM Yaml
# '5d41c4c37347c90025f7bd51', # Current with preds Yaml
# '5d41c4da7347c90025f7bf40', # Cosine sim
# '5d41c4eb7347c90025f7bf49' # Single column text
# )
#
# # New test, with just find learning rate turned on - ACTUALLY SLIM NOW
# new_mbtest_ids <- c(
# '5d4351067347c90026eeeb73', # FM Yaml
# '5d4350b57347c9002bf429a1', # Current with preds Yaml
# '5d4350dc7347c9002bf42b92', # Cosine sim
# '5d4350f27347c90026eeeb60' # Single column text
# )
# New test, with just find learning rate turned on - ACTUALLY SLIM NOW, MIN BATCH OF 1
# new_mbtest_ids <- c(
# '5d44a0a37347c9002bc9ed12', # FM Yaml
# '5d44a04a7347c900248b1639', # Current with preds Yaml
# '5d44a07b7347c900248b182a', # Cosine sim
# '5d44a0957347c9002bc9ecff' # Single column text
# )
# # New test, just min batch of 1, no find lr
# new_mbtest_ids <- c(
# '5d456e467347c90029d7283c', # FM Yaml
# '5d456dd57347c90025ac0cde', # Current with preds Yaml
# '5d456e2d7347c900248b1843', # Cosine sim
# '5d456e1c7347c90025ac0ecd' # Single column text
# )
# Find LR + jason's fix + batch size 1
# new_mbtest_ids <- c(
# '5d478b517347c9002b435680', # FM Yaml
# '5d478ab47347c90024ea9c6d', # Current with preds Yaml - FAILED DEPLOY
# '5d478b137347c9002b435664', # Cosine sim - FAILED DEPLOY
# '5d478b347347c9002b43566d' # Single column text - FAILED DEPLOY
# )
# Find LR + min batch size of 1 + bug fix for small datasets with only 1 or 2 LR find epochs
# Basically a retest of the above
# new_mbtest_ids <- c(
# '5d48bb307347c9002bb03933', # FM Yaml
# '5d48ba6d7347c90029a74b4b', # Current with preds Yaml
# '5d48bada7347c9002410c54e', # Cosine sim
# '5d48bafb7347c90029a74d3c' # Single column text
# )
# Rerun of the above, because I thought they failed to deploy, but they didnt!
# new_mbtest_ids <- c(
# '5d497fb57347c9002504c393', # FM Yaml
# '5d497f7d7347c9002b3e8fe4', # Current with preds Yaml
# '5d497f8f7347c9002a81cfaf', # Cosine sim
# '5d497f9d7347c9002a81cfb9' # Single column text
# )
new_mbtest_ids <- '5d570a027347c900279266ef' # Test with current with preds file, min lr / 10 heuristic + smaller batch size
# Name 'em
testnames <- c('Current With Preds')
names(old_mbtest_ids) <- testnames
names(new_mbtest_ids) <- testnames
all_tests <- c(old_mbtest_ids, new_mbtest_ids)
prefix = 'http://shrink.prod.hq.datarobot.com/api/leaderboard_export/advanced_export.csv?mbtests='
suffix = '&max_sample_size_only=false'
# Read and name
read_and_name <- function(id){
url <- paste0(prefix, id, suffix)
out <- fread(url)
out[,mbtest_id := id]
out[,mbtest_name := names(all_tests[id == all_tests])]
return(out)
}
dat_old_raw <- pblapply(old_mbtest_ids, read_and_name)
dat_new_raw <- pblapply(new_mbtest_ids, read_and_name)
######################################################
# Convert possible int64s to numeric
######################################################
dat_old <- copy(dat_old_raw)
dat_new <- copy(dat_new_raw)
clean_data <- function(x){
x[,Max_RAM_GB := as.numeric(Max_RAM / 1e9)]
x[,Total_Time_P1_Hours := as.numeric(Total_Time_P1 / 3600)]
x[,size_GB := as.numeric(size / 1e9)]
x[,dataset_size_GB := as.numeric(dataset_size / 1e9)]
x[,x_prod_2_max_cardinal := NULL]
return(x)
}
dat_old <- lapply(dat_old, clean_data)
dat_new <- lapply(dat_new, clean_data)
stopifnot(all(sapply(dat_old, function(x) 'Max_RAM_GB' %in% names(x))))
stopifnot(all(sapply(dat_new, function(x) 'Max_RAM_GB' %in% names(x))))
######################################################
# Combine data within each test
######################################################
get_names <- function(x){
not_int64 <- sapply(x, class) != 'integer64'
names(x)[not_int64]
}
names_old <- Reduce(intersect, lapply(dat_old, get_names))
names_new <- Reduce(intersect, lapply(dat_new, get_names))
names_all <- intersect(names_new, names_old)
stopifnot('Metablueprint' %in% names_all)
dat_old <- lapply(dat_old, function(x) x[,names_all,with=F])
dat_new <- lapply(dat_new, function(x) x[,names_all,with=F])
dat_old <- rbindlist(dat_old, use.names=T)
dat_new <- rbindlist(dat_new, use.names=T)
dat_old[,run := 'master']
dat_new[,run := 'keras']
stopifnot(dat_old[,all(Metablueprint=='Metablueprint v12.0.03-so')])
stopifnot(dat_new[,all(Metablueprint=='Test_Keras v2')])
######################################################
# Combine data BETWEEN the 2 tests
######################################################
tf_bps <- c('TFNNC', 'TFNNR')
keras_bps <- c('KERASR', 'KERASC', 'KERASMULTIC')
nn_bps <- c(tf_bps, keras_bps)
# Subset to RF only
# dat_old <- dat_old[main_task %in% c('RFC', 'RFR'),]
# Exclude baseline BPs from the keras MBtest
dat_new <- dat_new[main_task %in% keras_bps,]
# Combine into 1
dat <- rbindlist(list(dat_old, dat_new), use.names=T)
# Map names to test
filename_to_test_map <- unique(dat[,list(Filename, mbtest_name)])
filename_to_test_map <- filename_to_test_map[!duplicated(Filename),]
######################################################
# Add some vars
######################################################
dat[,dataset_bin := cut(dataset_size_GB, unique(c(0, 1.5, 2.5, 5, ceiling(max(dataset_size_GB)))), ordered_result=T, include.lowest=T)]
dat[,sample_round := Sample_Pct]
dat[sample_round=='--', sample_round := '0']
dat[,sample_round := round(as.numeric(sample_round))]
######################################################
# Add some BP info to keras tasks
######################################################
split_to_named_list <- function(x){
out <- stri_split_fixed(x, ';')
out <- lapply(out, function(a){
tmp <- stri_split_fixed(a, '=')
out <- sapply(tmp, '[', 2)
names(out) <- sapply(tmp, '[', 1)
return(out)
})
return(out)
}
dat[,main_args_list := split_to_named_list(main_args)]
dat[,loss := sapply(main_args_list, '[', 'loss')]
dat[,epochs := as.integer(sapply(main_args_list, '[', 'epochs'))]
dat[,hidden_units := sapply(main_args_list, '[', 'hidden_units')]
dat[,hidden_activation := sapply(main_args_list, '[', 'hidden_activation')]
dat[,learning_rate := as.numeric(sapply(main_args_list, '[', 'learning_rate'))]
dat[,batch_size := sapply(main_args_list, '[', 'batch_size')]
dat[,double_batch_size := sapply(main_args_list, '[', 'double_batch_size')]
dat[,scale_target := sapply(main_args_list, '[', 'scale_target')]
dat[,log_target := sapply(main_args_list, '[', 'log_target')]
dat[,table(hidden_units)] # Get rid of list(512,64,64,64)
# ATM the prelu BPs look better
dat <- dat[hidden_activation == 'prelu' | is.na(hidden_activation),]
dat[,table(hidden_activation, useNA = 'always')]
######################################################
# Exclude some rows
######################################################
dat <- dat[which(!is_blender),] # Exclude blenders to see if Keras will help blends
dat <- dat[which(!is_prime),] # Exclude primes to see if Keras will help primes
# Exclude runs above 64%, as we only trained TF up to validation, and did not use the holdout
# TODO: exclude by autopilot round number
dat <- dat[sample_round <= 64,]
# Subset to one keras BP
# This is the "autopilot model"
dat <- dat[hidden_units %in% c('list(512)', '', NA),]
######################################################
# Summarize stats - non multiclass
######################################################
# Find a var
# a=sort(names(dat)); a[grepl('Y_Type', tolower(a))]
res <- copy(dat)
res <- res[!is.na(Max_RAM_GB),]
res <- res[!is.na(Total_Time_P1_Hours),]
res <- res[!is.na(`Gini Norm_H`),]
res <- res[,list(
Max_RAM_GB = max(Max_RAM_GB),
Total_Time_P1_Hours = max(Total_Time_P1_Hours),
Gini_V = max(`Gini Norm_P1`),
Gini_H = max(`Gini Norm_H`),
Gini_P = max(`Prediction Gini Norm`),
MASE_H = min(`MASE_H`),
MASE_V = min(`MASE_P1`),
LogLoss_H = min(`LogLoss_H`),
LogLoss_V = min(`LogLoss_P1`)
), by=c('run', 'Filename', 'Y_Type')]
measures = c(
'Max_RAM_GB', 'Total_Time_P1_Hours', 'Gini_V', 'Gini_H', 'Gini_P', 'MASE_H', 'MASE_V', 'LogLoss_H', 'LogLoss_V')
for(v in measures){
tmp = sort(unique(res[[v]]))
wont_convert = !is.finite(as.numeric(tmp))
if(any(wont_convert)){
print(tmp[wont_convert])
}
set(res, j=v, value=as.numeric(res[[v]]))
}
res = melt.data.table(res, measure.vars=intersect(names(res), measures))
res = dcast.data.table(res, Filename + Y_Type + variable ~ run, value.var='value')
res[,diff := as.numeric(keras) - as.numeric(master)]
# Add test name
N <- nrow(res)
res <- merge(res, filename_to_test_map, all.x=T, by=c('Filename'))
stopifnot(N == nrow(res))
######################################################
# Plot of results - non multiclass
######################################################
plot_vars = c('Max_RAM_GB', 'Total_Time_P1_Hours', 'Gini_V', 'Gini_H')
plotdat <- res[
variable %in% plot_vars & !is.na(keras) & !is.na(master),]
ggplot(plotdat, aes(x=`master`, y=`keras`, color=mbtest_name)) +
geom_point() + geom_abline(slope=1, intercept=0) +
facet_wrap(~variable, scales='free') +
theme_bw() + theme_tufte() + ggtitle('keras vs master results')
res[keras > 5+master & variable=='Max_RAM_GB',]
res[keras > 1+master & variable=='Total_Time_P1_Hours',]
# Look for good demos
a=res[order(diff),][variable == 'Gini_V' & !is.na(diff) & diff>=0,]
b=res[order(diff),][variable == 'Gini_H' & !is.na(diff) & diff>=0,]
c=res[order(diff),][variable == 'Total_Time_P1_Hours' & !is.na(diff) & keras<0.09,]
x=merge(a, b, by=c('Filename', 'Y_Type'), all=F)
x=merge(x, c, by=c('Filename', 'Y_Type'), all=F)
x[,diff := (diff.x + diff.y)/2]
x[order(diff),][!is.na(diff),]
res[Filename=='reuters_text_train_80.csv',]
######################################################
# Table of results - holdout - non multiclass
######################################################
# Holdout is 20%, so is a larger sample to compare on
# Valid should be good too, as we're comparing up to 64% only.
res_normal = res[variable == 'Gini_H' & diff >= 0,
list(Filename, Y_Type, variable, `master`, `keras`, diff)]
values = c('master', 'keras', 'diff')
res_normal = dcast.data.table(res_normal, Filename + Y_Type ~ variable, value.var = values)
res_cat <- copy(dat)
res_cat <- res_cat[!is.na(Max_RAM_GB),]
res_cat <- res_cat[!is.na(Total_Time_P1_Hours),]
res_cat <- res_cat[!is.na(`Gini Norm_H`),]
res_cat <- res_cat[,list(
best_gini_model = main_task[which.max(`Gini Norm_H`)],
best_mase_model = main_task[which.min(MASE_H)]
), by=c('run', 'Filename')]
measures = c('best_gini_model', 'best_mase_model')
res_cat = melt.data.table(res_cat, measure.vars=intersect(names(res_cat), measures))
res_cat = dcast.data.table(res_cat, Filename + variable ~ run, value.var='value')
cat_norm = res_cat[variable == 'best_gini_model',]
values = c('master', 'keras')
cat_norm = dcast.data.table(cat_norm, Filename ~ variable, value.var = values)
res_normal = merge(res_normal, cat_norm, by='Filename')[order(diff_Gini_H),]
# HUGE improvement on single column text datasets
# HUGE improvements on cosine similarity
# MASSIVELY HUGE improvement on xor text dataset
res_normal[order(diff_Gini_H),]
# On about 8.9%% of datasets, better than the best blender on master!
res[!is.na(diff) & variable == 'Gini_V', sum(diff > 0) / .N]
res[!is.na(diff) & variable == 'Gini_H', sum(diff > 0) / .N]
######################################################
# Compare to old TF Bps
######################################################
dat_nn <- dat[main_task %in% nn_bps,]
dat_nn[,table(main_task)]
res_nn <- copy(dat_nn)
res_nn <- res_nn[!is.na(Max_RAM_GB),]
res_nn <- res_nn[!is.na(Total_Time_P1_Hours),]
res_nn <- res_nn[!is.na(`Gini Norm_H`),]
# Repo models
#res_nn <- res_nn[(main_task %in% tf_bps) | (hidden_units == 'list(512 ,64, 64)'),]
# Autopilot models
res_nn <- res_nn[(main_task %in% tf_bps) | (hidden_units == 'list(512)'),]
res_nn <- res_nn[,list(
Max_RAM_GB = max(Max_RAM_GB),
Total_Time_P1_Hours = max(Total_Time_P1_Hours),
Gini_V = max(`Gini Norm_P1`),
Gini_H = max(`Gini Norm_H`),
Gini_P = max(`Prediction Gini Norm`),
MASE_H = min(`MASE_H`),
MASE_V = min(`MASE_P1`),
LogLoss_H = min(`LogLoss_H`),
LogLoss_V = min(`LogLoss_P1`)
), by=c('run', 'Filename', 'Y_Type')]
measures = c(
'Max_RAM_GB', 'Total_Time_P1_Hours', 'Gini_V', 'Gini_H', 'Gini_P', 'MASE_H', 'MASE_V', 'LogLoss_H', 'LogLoss_V')
res_nn = melt.data.table(res_nn, measure.vars=intersect(names(res_nn), measures))
res_nn = dcast.data.table(res_nn, Filename + Y_Type + variable ~ run, value.var='value')
res_nn[,keras := as.numeric(`keras`)]
res_nn[,master := as.numeric(`master`)]
res_nn[,diff := keras - master]
# Table by gini - V
# 80% better
# trainingDataWithoutNegativeWeights_80.csv
# DR_Demo_Pred_Main_Reg.csv
# terror_mix_train_80.csv
# New_York_Mets_Ian_11.csv
# ofnp_80.csv
summary(res_nn[variable == 'Gini_V',])
res_nn[variable == 'Gini_V'][order(diff),][1:5,]
res_nn[variable == 'Gini_V' & !is.na(diff), sum(diff >= 0) / .N]
# Table by gini - H
# 76% better
# trainingDataWithoutNegativeWeights_80.csv
# DR_Demo_Pred_Main_Reg.csv
# New_York_Mets_Ian_11.csv
summary(res_nn[variable == 'Gini_H',])
res_nn[variable == 'Gini_H'][order(diff),][1:5,]
res_nn[variable == 'Gini_H' & !is.na(diff), sum(diff >= 0) / .N]
# Table by logloss - V
# Worst diff very large
# Best diff large
# Too many epochs? Early stopping? Weight decay?
# Gamblers_80.csv > 3.5 logloss diff!
# trainingDataWithoutNegativeWeights_80.csv > 3.5 logloss diff!
summary(res_nn[variable == 'LogLoss_V',])
res_nn[variable == 'LogLoss_V'][order(-diff),][1:5,]
res_nn[variable == 'LogLoss_V' & !is.na(diff), sum(diff <= 0) / .N]
# Table by logloss - H
# Too many epochs? Early stopping? Weight decay?
# Gamblers_80.csv > 3.5 logloss diff!
# trainingDataWithoutNegativeWeights_80.csv > 3.5 logloss diff!
summary(res_nn[variable == 'LogLoss_H',])
res_nn[variable == 'LogLoss_H'][order(-diff),][1:5,]
res_nn[variable == 'LogLoss_H' & !is.na(diff), sum(diff <= 0) / .N]
# Runtime and RAM worse, but gini better
plot_vars = c('Max_RAM_GB', 'Total_Time_P1_Hours', 'Gini_V', 'Gini_H')
ggplot(res_nn[variable %in% plot_vars,], aes(x=master, y=keras, color=Y_Type)) +
geom_point() + geom_abline(slope=1, intercept=0) +
facet_wrap(~variable, scales='free') +
theme_bw() + theme_tufte() + ggtitle('keras vs tensorflow results')
# Logloss worse
plot_vars = c('Max_RAM_GB', 'Total_Time_P1_Hours', 'LogLoss_V', 'LogLoss_H')
ggplot(res_nn[variable %in% plot_vars,], aes(x=master, y=keras, color=Y_Type)) +
geom_point() + geom_abline(slope=1, intercept=0) +
facet_wrap(~variable, scales='free') +
theme_bw() + theme_tufte() + ggtitle('keras vs tensorflow results')
plot_vars = c('Max_RAM_GB', 'Total_Time_P1_Hours', 'Gini_V', 'Gini_H')
ggplot(res_nn[variable %in% plot_vars,]) +
geom_density(aes(x=master), col='red', adjust=1.5) +
geom_density(aes(x=keras), col='blue', adjust=1.5) +
facet_wrap(~variable, scales='free') +
theme_bw() + theme_tufte() + ggtitle('keras vs tensorflow results')
# Performs better in cases where NN Bps do better
######################################################
# Plot of results - multiclass - good results!
######################################################
plot_vars = c('Max_RAM_GB', 'Total_Time_P1_Hours', 'LogLoss_V', 'LogLoss_H')
ggplot(res[variable %in% plot_vars & Y_Type == 'Multiclass',], aes(x=`master`, y=`keras`)) +
geom_point() + geom_abline(slope=1, intercept=0) +
facet_wrap(~variable, scales='free') +
theme_bw() + theme_tufte() + ggtitle('keras vs master results')
######################################################
# Worst logloss
######################################################
# Seems like the LR finder helps for text datasets
# LR finder sucks for 250p_PA_HS_3_years_since_debut_predict_70p_80.csv
# 0.89824 with find LR, 0.13497 without
# https://s3.amazonaws.com/datarobot_public_datasets/250p_PA_HS_3_years_since_debut_predict_70p_80.csv
res_nn[variable=='LogLoss_H' & Y_Type == 'Binary',][order(diff, decreasing=T),][1:10,]
# Filename Y_Type variable keras master diff
# 1: 250p_PA_HS_3_years_since_debut_predict_70p_80.csv Binary LogLoss_H 0.89824 0.12774 0.77050
# 2: DR_Demo_Telecomms_Churn.csv Binary LogLoss_H 0.87429 0.26408 0.61021
# 3: subreddit_text_cosine_sim.csv Binary LogLoss_H 1.09619 0.58165 0.51454
# 4: DR_Demo_AML_Alert.csv Binary LogLoss_H 0.74326 0.25443 0.48883
# 5: bio_grid_small_80.csv Binary LogLoss_H 0.67097 0.22656 0.44441
# 6: 28_Features_split_train_converted_train80_CVTVH3.csv Binary LogLoss_H 0.57519 0.13606 0.43913
# 7: mlcomp1438_derivation-stats-balanced2_train_80.csv Binary LogLoss_H 1.01814 0.60560 0.41254
# 8: Benefits_80.csv Binary LogLoss_H 0.92692 0.58602 0.34090
# 9: wells_80.csv Binary LogLoss_H 1.00125 0.66479 0.33646
# 10: bio_exp_wide_train_80.csv Binary LogLoss_H 0.90703 0.59035 0.31668
res_nn[variable=='LogLoss_H' & Y_Type == 'Multiclass',][order(diff, decreasing=T),][1:10,]
# Filename Y_Type variable keras master diff
# 1: mfeat-zernike_v1_80.csv Multiclass LogLoss_H 1.27910 0.39709 0.88201
# 2: long Multiclass LogLoss_H 0.90126 0.36900 0.53226
# 3: weighted_rental_train_TVH.csv Multiclass LogLoss_H 0.50198 0.20268 0.29930
# 4: GesturePhaseSegmentationRAW_v1_80.csv Multiclass LogLoss_H 1.20242 0.90726 0.29516
# 5: weighted_and_dated_rental_train_TVH_80.csv Multiclass LogLoss_H 0.51190 0.21750 0.29440
# 6: internet_usage_v1_train.csv Multiclass LogLoss_H 2.24563 1.97423 0.27140
# 7: 10MB_downsampled_BNG(autos)_v1_80.csv Multiclass LogLoss_H 0.99556 0.73422 0.26134
# 8: JapaneseVowels_v1_80.csv Multiclass LogLoss_H 0.32340 0.06428 0.25912
# 9: 10MB_downsampled_BNG(autos,5000,5)_v1_80.csv Multiclass LogLoss_H 1.21086 0.95326 0.25760
# 10: 10MB_downsampled_BNG(autos,10000,1)_v1_80.csv Multiclass LogLoss_H 0.91936 0.68164 0.23772
# "long" is 0MB_downsampled_Physical_Activity_Recognition_Dataset_Using_Smartphone_Sensors_v1_80.csv
######################################################
# Worst runtime
######################################################
res_nn[variable=='Total_Time_P1_Hours' & Y_Type == 'Binary',][order(diff, decreasing=T),][1:10,]
res_nn[variable=='Total_Time_P1_Hours' & Y_Type == 'Multiclass',][order(diff, decreasing=T),][1:10,]
######################################################
# Worst runtime - overall
######################################################
res[variable=='Total_Time_P1_Hours',][order(diff, decreasing=T),][1:10,]
######################################################
# datasets to test
######################################################
dat[Filename=='quora_80.csv' & main_task == 'KERASC',Blueprint]
# [1] "{u'1': [[u'TXT'], [u'PTM3 a=word;b=1;d1=2;d2=0.5;dtype=float32;id=0;lc=1;maxnr=2;minnr=1;mxf=200000;n=l2;sw=None'], u'T'], u'2': [[u'1'], [u'KERASC batch_size=4096;double_batch_size=1;epochs=4;hidden_activation=prelu;hidden_units=list(512);learning_rate=0.01;loss=binary_crossentropy;max_batch_size=131072;pass_through_inputs=1;t_m=LogLoss'], u'P']}"
# https://s3.amazonaws.com/datarobot_public_datasets/quora_80.csv
# https://s3.amazonaws.com/datarobot_public_datasets/amazon_small_80.csv
# - dataset_name: https://s3.amazonaws.com/datarobot_public_datasets/ClickPrediction80.csv
# metric: Tweedie Deviance
# target: clicks
#
# - dataset_name: https://s3.amazonaws.com/datarobot_public_datasets/OnCampusArrests_80.csv
# metric: Tweedie Deviance
# target: LIQUOR12
#
# - dataset_name: https://s3.amazonaws.com/datarobot_public_datasets/cemst-decision-prediction2-asr3_train_80.csv
# metric: LogLoss
# target: y
#
# - dataset_name: https://s3.amazonaws.com/datarobot_public_datasets/trainingDataWithoutNegativeWeights_80.csv
# metric: LogLoss
# target: classification
#
# - dataset_name: https://s3.amazonaws.com/datarobot_public_datasets/bio_response_combined_80.csv
# metric: LogLoss
# target: Activity
#
# - dataset_name: https://s3.amazonaws.com/datarobot_public_datasets/bio_exp_wide_train_80.csv
# target: regulated
# metric: LogLoss
#
# - dataset_name: https://s3.amazonaws.com/datarobot_public_datasets/Gamblers_80.csv
# metric: LogLoss
# target: YES_ALCOHOL
|
/data-science-scripts/zach/analyze_new_keras.R
|
no_license
|
mcohenmcohen/DataRobot
|
R
| false
| false
| 24,003
|
r
|
######################################################
# Setup
######################################################
stop()
rm(list=ls(all=T))
gc(reset=T)
library(pbapply)
library(data.table)
library(bit64)
library(ggplot2)
library(Hmisc)
library(jsonlite)
library(reshape2)
library(stringi)
library(ggplot2)
library(ggthemes)
######################################################
# Download data
######################################################
# Newer set of master Mbtests
# old_mbtest_ids <- c(
# '5ce5a4b27347c9002707db2a', # FM Yaml
# '5ce5a44a7347c90029cc30a8', # Current with preds Yaml
# '5ce5a48c7347c9002707db20', # Cosine sim
# '5ce5a49d7347c900251e88fe' # Single column text
# )
old_mbtest_ids <- '5d570a207347c900279268eb' # New current with preds with other yamls added
# Keras tests also listed here: https://github.com/datarobot/DataRobot/pull/37647
# Keras models - current - broken passthrough - needs work
# new_mbtest_ids <- c(
# '5ce5a3f87347c9002707db0c', # FM Yaml
# '5ce5a3807347c900245e4dfe', # Current with preds Yaml
# '5ce5a3b57347c900251e88ed', # Cosine sim
# '5ce5a3e17347c900245e4fe1' # Single column text
# )
# Keras models - current - working passthrough - Looks ok
# new_mbtest_ids <- c(
# '5ce5a3f87347c9002707db0c', # FM Yaml
# '5ce5a3807347c900245e4dfe', # Current with preds Yaml
# '5ce5a3b57347c900251e88ed', # Cosine sim
# '5ce5a3e17347c900245e4fe1' # Single column text
# )
# # Keras models - trying to calibrate - CLOSE THE PR THESE ARE TOO SLOW
# new_mbtest_ids <- c(
# '5ce5b6997347c90029cc32a9', # FM Yaml
# '5ce5b64b7347c9002707db42', # Current with preds Yaml
# '5ce5b6757347c90029cc329f', # Cosine sim
# '5ce5b6877347c900245e5001' # Single column text
# )
# Keras models - current - working passthrough - fixed weight init for multiclass
# Best so far
# new_mbtest_ids <- c(
# '5d02bf2f7347c90026e2d02c', # FM Yaml
# '5d02bef07347c9002931fa58', # Current with preds Yaml
# '5d02bf047347c900248d472a', # Cosine sim
# '5d02bf177347c90026e2d01a' # Single column text
# )
# Keras models - current - working passthrough - fixed weight init for multiclass - learning rate = 1 - CURRENT TEST!
# learning rate = 1 is no good
# new_mbtest_ids <- c(
# '5d0a357e7347c90027fba955', # FM Yaml
# '5d0900997347c90027fba662', # Current with preds Yaml
# '5d0900af7347c900284e1a1c', # Cosine sim
# '5d0900bf7347c900291de9eb' # Single column text
# )
# Keras models - current - 0.1 for class, 0.01 for reg. Find learning rate, cyclic lr, early stopping, smaller default batch size
# OOMS due to stacked predictions. Jesse/Viktor working on RAM FIX
# new_mbtest_ids <- c(
# '5d2659877347c9002610cd64', # FM Yaml
# '5d2659507347c9002c6234a0', # Current with preds Yaml
# '5d26595d7347c9002c62368f', # Cosine sim
# '5d2659757347c9002610cd51' # Single column text
# )
# Keras models - current - 0.1 for class, 0.01 for reg. Find learning rate, cyclic lr, early stopping, smaller default batch size
# RUN ALL AS SLIM RUN TO AVOID THE MULTI MODEL RAM ISSUE
# new_mbtest_ids <- c(
# '5d31dac57347c90027198d85', # FM Yaml
# '5d31daa17347c90027198b97', # Current with preds Yaml
# '5d31dad57347c90029160106', # Cosine sim
# '5d31dae27347c90023a8bb3e' # Single column text
# )
# New test, with just find learning rate turned on
# Some OOMs during pickling =/
# NOT SLIM
# new_mbtest_ids <- c(
# '5d41c5057347c90029fd538b', # FM Yaml
# '5d41c4c37347c90025f7bd51', # Current with preds Yaml
# '5d41c4da7347c90025f7bf40', # Cosine sim
# '5d41c4eb7347c90025f7bf49' # Single column text
# )
#
# # New test, with just find learning rate turned on - ACTUALLY SLIM NOW
# new_mbtest_ids <- c(
# '5d4351067347c90026eeeb73', # FM Yaml
# '5d4350b57347c9002bf429a1', # Current with preds Yaml
# '5d4350dc7347c9002bf42b92', # Cosine sim
# '5d4350f27347c90026eeeb60' # Single column text
# )
# New test, with just find learning rate turned on - ACTUALLY SLIM NOW, MIN BATCH OF 1
# new_mbtest_ids <- c(
# '5d44a0a37347c9002bc9ed12', # FM Yaml
# '5d44a04a7347c900248b1639', # Current with preds Yaml
# '5d44a07b7347c900248b182a', # Cosine sim
# '5d44a0957347c9002bc9ecff' # Single column text
# )
# # New test, just min batch of 1, no find lr
# new_mbtest_ids <- c(
# '5d456e467347c90029d7283c', # FM Yaml
# '5d456dd57347c90025ac0cde', # Current with preds Yaml
# '5d456e2d7347c900248b1843', # Cosine sim
# '5d456e1c7347c90025ac0ecd' # Single column text
# )
# Find LR + jason's fix + batch size 1
# new_mbtest_ids <- c(
# '5d478b517347c9002b435680', # FM Yaml
# '5d478ab47347c90024ea9c6d', # Current with preds Yaml - FAILED DEPLOY
# '5d478b137347c9002b435664', # Cosine sim - FAILED DEPLOY
# '5d478b347347c9002b43566d' # Single column text - FAILED DEPLOY
# )
# Find LR + min batch size of 1 + bug fix for small datasets with only 1 or 2 LR find epochs
# Basically a retest of the above
# new_mbtest_ids <- c(
# '5d48bb307347c9002bb03933', # FM Yaml
# '5d48ba6d7347c90029a74b4b', # Current with preds Yaml
# '5d48bada7347c9002410c54e', # Cosine sim
# '5d48bafb7347c90029a74d3c' # Single column text
# )
# Rerun of the above, because I thought they failed to deploy, but they didnt!
# new_mbtest_ids <- c(
# '5d497fb57347c9002504c393', # FM Yaml
# '5d497f7d7347c9002b3e8fe4', # Current with preds Yaml
# '5d497f8f7347c9002a81cfaf', # Cosine sim
# '5d497f9d7347c9002a81cfb9' # Single column text
# )
new_mbtest_ids <- '5d570a027347c900279266ef' # Test with current with preds file, min lr / 10 heuristic + smaller batch size
# Name 'em
testnames <- c('Current With Preds')
names(old_mbtest_ids) <- testnames
names(new_mbtest_ids) <- testnames
all_tests <- c(old_mbtest_ids, new_mbtest_ids)
prefix = 'http://shrink.prod.hq.datarobot.com/api/leaderboard_export/advanced_export.csv?mbtests='
suffix = '&max_sample_size_only=false'
# Read and name
read_and_name <- function(id){
url <- paste0(prefix, id, suffix)
out <- fread(url)
out[,mbtest_id := id]
out[,mbtest_name := names(all_tests[id == all_tests])]
return(out)
}
dat_old_raw <- pblapply(old_mbtest_ids, read_and_name)
dat_new_raw <- pblapply(new_mbtest_ids, read_and_name)
######################################################
# Convert possible int64s to numeric
######################################################
dat_old <- copy(dat_old_raw)
dat_new <- copy(dat_new_raw)
clean_data <- function(x){
x[,Max_RAM_GB := as.numeric(Max_RAM / 1e9)]
x[,Total_Time_P1_Hours := as.numeric(Total_Time_P1 / 3600)]
x[,size_GB := as.numeric(size / 1e9)]
x[,dataset_size_GB := as.numeric(dataset_size / 1e9)]
x[,x_prod_2_max_cardinal := NULL]
return(x)
}
dat_old <- lapply(dat_old, clean_data)
dat_new <- lapply(dat_new, clean_data)
stopifnot(all(sapply(dat_old, function(x) 'Max_RAM_GB' %in% names(x))))
stopifnot(all(sapply(dat_new, function(x) 'Max_RAM_GB' %in% names(x))))
######################################################
# Combine data within each test
######################################################
get_names <- function(x){
not_int64 <- sapply(x, class) != 'integer64'
names(x)[not_int64]
}
names_old <- Reduce(intersect, lapply(dat_old, get_names))
names_new <- Reduce(intersect, lapply(dat_new, get_names))
names_all <- intersect(names_new, names_old)
stopifnot('Metablueprint' %in% names_all)
dat_old <- lapply(dat_old, function(x) x[,names_all,with=F])
dat_new <- lapply(dat_new, function(x) x[,names_all,with=F])
dat_old <- rbindlist(dat_old, use.names=T)
dat_new <- rbindlist(dat_new, use.names=T)
dat_old[,run := 'master']
dat_new[,run := 'keras']
stopifnot(dat_old[,all(Metablueprint=='Metablueprint v12.0.03-so')])
stopifnot(dat_new[,all(Metablueprint=='Test_Keras v2')])
######################################################
# Combine data BETWEEN the 2 tests
######################################################
tf_bps <- c('TFNNC', 'TFNNR')
keras_bps <- c('KERASR', 'KERASC', 'KERASMULTIC')
nn_bps <- c(tf_bps, keras_bps)
# Subset to RF only
# dat_old <- dat_old[main_task %in% c('RFC', 'RFR'),]
# Exclude baseline BPs from the keras MBtest
dat_new <- dat_new[main_task %in% keras_bps,]
# Combine into 1
dat <- rbindlist(list(dat_old, dat_new), use.names=T)
# Map names to test
filename_to_test_map <- unique(dat[,list(Filename, mbtest_name)])
filename_to_test_map <- filename_to_test_map[!duplicated(Filename),]
######################################################
# Add some vars
######################################################
dat[,dataset_bin := cut(dataset_size_GB, unique(c(0, 1.5, 2.5, 5, ceiling(max(dataset_size_GB)))), ordered_result=T, include.lowest=T)]
dat[,sample_round := Sample_Pct]
dat[sample_round=='--', sample_round := '0']
dat[,sample_round := round(as.numeric(sample_round))]
######################################################
# Add some BP info to keras tasks
######################################################
split_to_named_list <- function(x){
out <- stri_split_fixed(x, ';')
out <- lapply(out, function(a){
tmp <- stri_split_fixed(a, '=')
out <- sapply(tmp, '[', 2)
names(out) <- sapply(tmp, '[', 1)
return(out)
})
return(out)
}
dat[,main_args_list := split_to_named_list(main_args)]
dat[,loss := sapply(main_args_list, '[', 'loss')]
dat[,epochs := as.integer(sapply(main_args_list, '[', 'epochs'))]
dat[,hidden_units := sapply(main_args_list, '[', 'hidden_units')]
dat[,hidden_activation := sapply(main_args_list, '[', 'hidden_activation')]
dat[,learning_rate := as.numeric(sapply(main_args_list, '[', 'learning_rate'))]
dat[,batch_size := sapply(main_args_list, '[', 'batch_size')]
dat[,double_batch_size := sapply(main_args_list, '[', 'double_batch_size')]
dat[,scale_target := sapply(main_args_list, '[', 'scale_target')]
dat[,log_target := sapply(main_args_list, '[', 'log_target')]
dat[,table(hidden_units)] # Get rid of list(512,64,64,64)
# ATM the prelu BPs look better
dat <- dat[hidden_activation == 'prelu' | is.na(hidden_activation),]
dat[,table(hidden_activation, useNA = 'always')]
######################################################
# Exclude some rows
######################################################
dat <- dat[which(!is_blender),] # Exclude blenders to see if Keras will help blends
dat <- dat[which(!is_prime),] # Exclude primes to see if Keras will help primes
# Exclude runs above 64%, as we only trained TF up to validation, and did not use the holdout
# TODO: exclude by autopilot round number
dat <- dat[sample_round <= 64,]
# Subset to one keras BP
# This is the "autopilot model"
dat <- dat[hidden_units %in% c('list(512)', '', NA),]
######################################################
# Summarize stats - non multiclass
######################################################
# Find a var
# a=sort(names(dat)); a[grepl('Y_Type', tolower(a))]
res <- copy(dat)
res <- res[!is.na(Max_RAM_GB),]
res <- res[!is.na(Total_Time_P1_Hours),]
res <- res[!is.na(`Gini Norm_H`),]
res <- res[,list(
Max_RAM_GB = max(Max_RAM_GB),
Total_Time_P1_Hours = max(Total_Time_P1_Hours),
Gini_V = max(`Gini Norm_P1`),
Gini_H = max(`Gini Norm_H`),
Gini_P = max(`Prediction Gini Norm`),
MASE_H = min(`MASE_H`),
MASE_V = min(`MASE_P1`),
LogLoss_H = min(`LogLoss_H`),
LogLoss_V = min(`LogLoss_P1`)
), by=c('run', 'Filename', 'Y_Type')]
measures = c(
'Max_RAM_GB', 'Total_Time_P1_Hours', 'Gini_V', 'Gini_H', 'Gini_P', 'MASE_H', 'MASE_V', 'LogLoss_H', 'LogLoss_V')
for(v in measures){
tmp = sort(unique(res[[v]]))
wont_convert = !is.finite(as.numeric(tmp))
if(any(wont_convert)){
print(tmp[wont_convert])
}
set(res, j=v, value=as.numeric(res[[v]]))
}
res = melt.data.table(res, measure.vars=intersect(names(res), measures))
res = dcast.data.table(res, Filename + Y_Type + variable ~ run, value.var='value')
res[,diff := as.numeric(keras) - as.numeric(master)]
# Add test name
N <- nrow(res)
res <- merge(res, filename_to_test_map, all.x=T, by=c('Filename'))
stopifnot(N == nrow(res))
######################################################
# Plot of results - non multiclass
######################################################
plot_vars = c('Max_RAM_GB', 'Total_Time_P1_Hours', 'Gini_V', 'Gini_H')
plotdat <- res[
variable %in% plot_vars & !is.na(keras) & !is.na(master),]
ggplot(plotdat, aes(x=`master`, y=`keras`, color=mbtest_name)) +
geom_point() + geom_abline(slope=1, intercept=0) +
facet_wrap(~variable, scales='free') +
theme_bw() + theme_tufte() + ggtitle('keras vs master results')
res[keras > 5+master & variable=='Max_RAM_GB',]
res[keras > 1+master & variable=='Total_Time_P1_Hours',]
# Look for good demos
a=res[order(diff),][variable == 'Gini_V' & !is.na(diff) & diff>=0,]
b=res[order(diff),][variable == 'Gini_H' & !is.na(diff) & diff>=0,]
c=res[order(diff),][variable == 'Total_Time_P1_Hours' & !is.na(diff) & keras<0.09,]
x=merge(a, b, by=c('Filename', 'Y_Type'), all=F)
x=merge(x, c, by=c('Filename', 'Y_Type'), all=F)
x[,diff := (diff.x + diff.y)/2]
x[order(diff),][!is.na(diff),]
res[Filename=='reuters_text_train_80.csv',]
######################################################
# Table of results - holdout - non multiclass
######################################################
# Holdout is 20%, so is a larger sample to compare on
# Valid should be good too, as we're comparing up to 64% only.
res_normal = res[variable == 'Gini_H' & diff >= 0,
list(Filename, Y_Type, variable, `master`, `keras`, diff)]
values = c('master', 'keras', 'diff')
res_normal = dcast.data.table(res_normal, Filename + Y_Type ~ variable, value.var = values)
res_cat <- copy(dat)
res_cat <- res_cat[!is.na(Max_RAM_GB),]
res_cat <- res_cat[!is.na(Total_Time_P1_Hours),]
res_cat <- res_cat[!is.na(`Gini Norm_H`),]
res_cat <- res_cat[,list(
best_gini_model = main_task[which.max(`Gini Norm_H`)],
best_mase_model = main_task[which.min(MASE_H)]
), by=c('run', 'Filename')]
measures = c('best_gini_model', 'best_mase_model')
res_cat = melt.data.table(res_cat, measure.vars=intersect(names(res_cat), measures))
res_cat = dcast.data.table(res_cat, Filename + variable ~ run, value.var='value')
cat_norm = res_cat[variable == 'best_gini_model',]
values = c('master', 'keras')
cat_norm = dcast.data.table(cat_norm, Filename ~ variable, value.var = values)
res_normal = merge(res_normal, cat_norm, by='Filename')[order(diff_Gini_H),]
# HUGE improvement on single column text datasets
# HUGE improvements on cosine similarity
# MASSIVELY HUGE improvement on xor text dataset
res_normal[order(diff_Gini_H),]
# On about 8.9%% of datasets, better than the best blender on master!
res[!is.na(diff) & variable == 'Gini_V', sum(diff > 0) / .N]
res[!is.na(diff) & variable == 'Gini_H', sum(diff > 0) / .N]
######################################################
# Compare to old TF Bps
######################################################
dat_nn <- dat[main_task %in% nn_bps,]
dat_nn[,table(main_task)]
res_nn <- copy(dat_nn)
res_nn <- res_nn[!is.na(Max_RAM_GB),]
res_nn <- res_nn[!is.na(Total_Time_P1_Hours),]
res_nn <- res_nn[!is.na(`Gini Norm_H`),]
# Repo models
#res_nn <- res_nn[(main_task %in% tf_bps) | (hidden_units == 'list(512 ,64, 64)'),]
# Autopilot models
res_nn <- res_nn[(main_task %in% tf_bps) | (hidden_units == 'list(512)'),]
res_nn <- res_nn[,list(
Max_RAM_GB = max(Max_RAM_GB),
Total_Time_P1_Hours = max(Total_Time_P1_Hours),
Gini_V = max(`Gini Norm_P1`),
Gini_H = max(`Gini Norm_H`),
Gini_P = max(`Prediction Gini Norm`),
MASE_H = min(`MASE_H`),
MASE_V = min(`MASE_P1`),
LogLoss_H = min(`LogLoss_H`),
LogLoss_V = min(`LogLoss_P1`)
), by=c('run', 'Filename', 'Y_Type')]
measures = c(
'Max_RAM_GB', 'Total_Time_P1_Hours', 'Gini_V', 'Gini_H', 'Gini_P', 'MASE_H', 'MASE_V', 'LogLoss_H', 'LogLoss_V')
res_nn = melt.data.table(res_nn, measure.vars=intersect(names(res_nn), measures))
res_nn = dcast.data.table(res_nn, Filename + Y_Type + variable ~ run, value.var='value')
res_nn[,keras := as.numeric(`keras`)]
res_nn[,master := as.numeric(`master`)]
res_nn[,diff := keras - master]
# Table by gini - V
# 80% better
# trainingDataWithoutNegativeWeights_80.csv
# DR_Demo_Pred_Main_Reg.csv
# terror_mix_train_80.csv
# New_York_Mets_Ian_11.csv
# ofnp_80.csv
summary(res_nn[variable == 'Gini_V',])
res_nn[variable == 'Gini_V'][order(diff),][1:5,]
res_nn[variable == 'Gini_V' & !is.na(diff), sum(diff >= 0) / .N]
# Table by gini - H
# 76% better
# trainingDataWithoutNegativeWeights_80.csv
# DR_Demo_Pred_Main_Reg.csv
# New_York_Mets_Ian_11.csv
summary(res_nn[variable == 'Gini_H',])
res_nn[variable == 'Gini_H'][order(diff),][1:5,]
res_nn[variable == 'Gini_H' & !is.na(diff), sum(diff >= 0) / .N]
# Table by logloss - V
# Worst diff very large
# Best diff large
# Too many epochs? Early stopping? Weight decay?
# Gamblers_80.csv > 3.5 logloss diff!
# trainingDataWithoutNegativeWeights_80.csv > 3.5 logloss diff!
summary(res_nn[variable == 'LogLoss_V',])
res_nn[variable == 'LogLoss_V'][order(-diff),][1:5,]
res_nn[variable == 'LogLoss_V' & !is.na(diff), sum(diff <= 0) / .N]
# Table by logloss - H
# Too many epochs? Early stopping? Weight decay?
# Gamblers_80.csv > 3.5 logloss diff!
# trainingDataWithoutNegativeWeights_80.csv > 3.5 logloss diff!
summary(res_nn[variable == 'LogLoss_H',])
res_nn[variable == 'LogLoss_H'][order(-diff),][1:5,]
res_nn[variable == 'LogLoss_H' & !is.na(diff), sum(diff <= 0) / .N]
# Runtime and RAM worse, but gini better
plot_vars = c('Max_RAM_GB', 'Total_Time_P1_Hours', 'Gini_V', 'Gini_H')
ggplot(res_nn[variable %in% plot_vars,], aes(x=master, y=keras, color=Y_Type)) +
geom_point() + geom_abline(slope=1, intercept=0) +
facet_wrap(~variable, scales='free') +
theme_bw() + theme_tufte() + ggtitle('keras vs tensorflow results')
# Logloss worse
plot_vars = c('Max_RAM_GB', 'Total_Time_P1_Hours', 'LogLoss_V', 'LogLoss_H')
ggplot(res_nn[variable %in% plot_vars,], aes(x=master, y=keras, color=Y_Type)) +
geom_point() + geom_abline(slope=1, intercept=0) +
facet_wrap(~variable, scales='free') +
theme_bw() + theme_tufte() + ggtitle('keras vs tensorflow results')
plot_vars = c('Max_RAM_GB', 'Total_Time_P1_Hours', 'Gini_V', 'Gini_H')
ggplot(res_nn[variable %in% plot_vars,]) +
geom_density(aes(x=master), col='red', adjust=1.5) +
geom_density(aes(x=keras), col='blue', adjust=1.5) +
facet_wrap(~variable, scales='free') +
theme_bw() + theme_tufte() + ggtitle('keras vs tensorflow results')
# Performs better in cases where NN Bps do better
######################################################
# Plot of results - multiclass - good results!
######################################################
plot_vars = c('Max_RAM_GB', 'Total_Time_P1_Hours', 'LogLoss_V', 'LogLoss_H')
ggplot(res[variable %in% plot_vars & Y_Type == 'Multiclass',], aes(x=`master`, y=`keras`)) +
geom_point() + geom_abline(slope=1, intercept=0) +
facet_wrap(~variable, scales='free') +
theme_bw() + theme_tufte() + ggtitle('keras vs master results')
######################################################
# Worst logloss
######################################################
# Seems like the LR finder helps for text datasets
# LR finder sucks for 250p_PA_HS_3_years_since_debut_predict_70p_80.csv
# 0.89824 with find LR, 0.13497 without
# https://s3.amazonaws.com/datarobot_public_datasets/250p_PA_HS_3_years_since_debut_predict_70p_80.csv
res_nn[variable=='LogLoss_H' & Y_Type == 'Binary',][order(diff, decreasing=T),][1:10,]
# Filename Y_Type variable keras master diff
# 1: 250p_PA_HS_3_years_since_debut_predict_70p_80.csv Binary LogLoss_H 0.89824 0.12774 0.77050
# 2: DR_Demo_Telecomms_Churn.csv Binary LogLoss_H 0.87429 0.26408 0.61021
# 3: subreddit_text_cosine_sim.csv Binary LogLoss_H 1.09619 0.58165 0.51454
# 4: DR_Demo_AML_Alert.csv Binary LogLoss_H 0.74326 0.25443 0.48883
# 5: bio_grid_small_80.csv Binary LogLoss_H 0.67097 0.22656 0.44441
# 6: 28_Features_split_train_converted_train80_CVTVH3.csv Binary LogLoss_H 0.57519 0.13606 0.43913
# 7: mlcomp1438_derivation-stats-balanced2_train_80.csv Binary LogLoss_H 1.01814 0.60560 0.41254
# 8: Benefits_80.csv Binary LogLoss_H 0.92692 0.58602 0.34090
# 9: wells_80.csv Binary LogLoss_H 1.00125 0.66479 0.33646
# 10: bio_exp_wide_train_80.csv Binary LogLoss_H 0.90703 0.59035 0.31668
res_nn[variable=='LogLoss_H' & Y_Type == 'Multiclass',][order(diff, decreasing=T),][1:10,]
# Filename Y_Type variable keras master diff
# 1: mfeat-zernike_v1_80.csv Multiclass LogLoss_H 1.27910 0.39709 0.88201
# 2: long Multiclass LogLoss_H 0.90126 0.36900 0.53226
# 3: weighted_rental_train_TVH.csv Multiclass LogLoss_H 0.50198 0.20268 0.29930
# 4: GesturePhaseSegmentationRAW_v1_80.csv Multiclass LogLoss_H 1.20242 0.90726 0.29516
# 5: weighted_and_dated_rental_train_TVH_80.csv Multiclass LogLoss_H 0.51190 0.21750 0.29440
# 6: internet_usage_v1_train.csv Multiclass LogLoss_H 2.24563 1.97423 0.27140
# 7: 10MB_downsampled_BNG(autos)_v1_80.csv Multiclass LogLoss_H 0.99556 0.73422 0.26134
# 8: JapaneseVowels_v1_80.csv Multiclass LogLoss_H 0.32340 0.06428 0.25912
# 9: 10MB_downsampled_BNG(autos,5000,5)_v1_80.csv Multiclass LogLoss_H 1.21086 0.95326 0.25760
# 10: 10MB_downsampled_BNG(autos,10000,1)_v1_80.csv Multiclass LogLoss_H 0.91936 0.68164 0.23772
# "long" is 0MB_downsampled_Physical_Activity_Recognition_Dataset_Using_Smartphone_Sensors_v1_80.csv
######################################################
# Worst runtime
######################################################
res_nn[variable=='Total_Time_P1_Hours' & Y_Type == 'Binary',][order(diff, decreasing=T),][1:10,]
res_nn[variable=='Total_Time_P1_Hours' & Y_Type == 'Multiclass',][order(diff, decreasing=T),][1:10,]
######################################################
# Worst runtime - overall
######################################################
res[variable=='Total_Time_P1_Hours',][order(diff, decreasing=T),][1:10,]
######################################################
# datasets to test
######################################################
dat[Filename=='quora_80.csv' & main_task == 'KERASC',Blueprint]
# [1] "{u'1': [[u'TXT'], [u'PTM3 a=word;b=1;d1=2;d2=0.5;dtype=float32;id=0;lc=1;maxnr=2;minnr=1;mxf=200000;n=l2;sw=None'], u'T'], u'2': [[u'1'], [u'KERASC batch_size=4096;double_batch_size=1;epochs=4;hidden_activation=prelu;hidden_units=list(512);learning_rate=0.01;loss=binary_crossentropy;max_batch_size=131072;pass_through_inputs=1;t_m=LogLoss'], u'P']}"
# https://s3.amazonaws.com/datarobot_public_datasets/quora_80.csv
# https://s3.amazonaws.com/datarobot_public_datasets/amazon_small_80.csv
# - dataset_name: https://s3.amazonaws.com/datarobot_public_datasets/ClickPrediction80.csv
# metric: Tweedie Deviance
# target: clicks
#
# - dataset_name: https://s3.amazonaws.com/datarobot_public_datasets/OnCampusArrests_80.csv
# metric: Tweedie Deviance
# target: LIQUOR12
#
# - dataset_name: https://s3.amazonaws.com/datarobot_public_datasets/cemst-decision-prediction2-asr3_train_80.csv
# metric: LogLoss
# target: y
#
# - dataset_name: https://s3.amazonaws.com/datarobot_public_datasets/trainingDataWithoutNegativeWeights_80.csv
# metric: LogLoss
# target: classification
#
# - dataset_name: https://s3.amazonaws.com/datarobot_public_datasets/bio_response_combined_80.csv
# metric: LogLoss
# target: Activity
#
# - dataset_name: https://s3.amazonaws.com/datarobot_public_datasets/bio_exp_wide_train_80.csv
# target: regulated
# metric: LogLoss
#
# - dataset_name: https://s3.amazonaws.com/datarobot_public_datasets/Gamblers_80.csv
# metric: LogLoss
# target: YES_ALCOHOL
|
testlist <- list(Rs = numeric(0), atmp = numeric(0), relh = c(7.64681398433536e-304, -4.29227809743625e-307, 1.81037701089217e+87, -2.93112217825115e-158, 9.03412394302482e-46, 7.31195213563656e+256, -1.93925524631599e-68, 2.08343441298214e-168, 1.39098956557385e-309, 4.66631809251609e-301, -4.35371624255136e-143, -6.73292524882432e+44, 1.25561609525069e+163, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), temp = 1.11231963688461e-307)
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result)
|
/meteor/inst/testfiles/ET0_Makkink/AFL_ET0_Makkink/ET0_Makkink_valgrind_files/1615863059-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false
| false
| 643
|
r
|
testlist <- list(Rs = numeric(0), atmp = numeric(0), relh = c(7.64681398433536e-304, -4.29227809743625e-307, 1.81037701089217e+87, -2.93112217825115e-158, 9.03412394302482e-46, 7.31195213563656e+256, -1.93925524631599e-68, 2.08343441298214e-168, 1.39098956557385e-309, 4.66631809251609e-301, -4.35371624255136e-143, -6.73292524882432e+44, 1.25561609525069e+163, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), temp = 1.11231963688461e-307)
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result)
|
library(shiny)
library(shinydashboard)
header <- dashboardHeader(title = "SocraticSwirl",
dropdownMenuOutput("progressMenu"))
sidebar <- dashboardSidebar(
uiOutput("usersessions"),
hr(),
sidebarMenu(
menuItem("Exercise Dashboard", tabName = "exercise_tab", icon = icon("dashboard")),
menuItem("Lesson Overview", tabName = "overview_tab", icon = icon("list")),
menuItem("Submitted Questions", tabName = "questions_tab", icon = icon("question-circle"))
),
p(), #Fix for better separation
hr(),
box(style = "color: black;",
width = NULL, title = "Controls", collapsible = TRUE,
uiOutput("selectCourse"),
uiOutput("selectLesson"),
selectInput("interval", label = "Refresh interval",
choices = c(
"5 seconds" = 5,
"15 seconds" = 15,
"30 seconds" = 30,
"1 minute" = 50,
"5 minutes" = 600,
"Off" = FALSE),
selected = "30"),
uiOutput("timeSinceLastUpdate"),
actionButton("refresh", "Refresh now")
)
)
body <- dashboardBody(
tabItems(
tabItem(tabName = "exercise_tab",
fluidRow(
# Left Column
column(width = 6,
# Exercise Selector & Progress
box(collapsible = FALSE, width = NULL, title = "Select Exercise",
uiOutput("selectExercise"),
uiOutput("attemptedBar", style = "list-style-type: none;"),
uiOutput("completedBar", style = "list-style-type: none;")),
# Plots
tabBox(width = NULL,
tabPanel(title = "Attempt Frequency",
plotOutput("plotFreqAttempts")),
tabPanel(title = "Progress Tracking",
plotOutput("plotProgress"))
)
),
# Right Column
column(width = 6,
# Exercise Info
tabBox(width = NULL,
tabPanel(title = "Exercise Prompt",
uiOutput("exerciseQuestion")),
tabPanel(title = "Correct Answer",
verbatimTextOutput("exerciseAnswer"), collapsible = TRUE)
),
# Answer Table
tabBox(width = NULL,
tabPanel(title = "Incorrect Answers",
# selectInput("incorrectSort", label = "Sort Column:", width = "50%",
# choices = c("updatedAt", "command", "isError", "errorMsg"),
# selected = "updatedAt"),
# checkboxInput("incorrectSortDescending", label = "Descending", value = TRUE),
dataTableOutput("incorrectAnswers")),
tabPanel(title = "Common Errors",
dataTableOutput("commonErrors")
)
)
)
)
),
tabItem(tabName = "overview_tab",
box(collapsible = TRUE, width = NULL,
plotOutput("overviewGraph"))
),
tabItem(tabName = "questions_tab",
box(width = NULL,
dataTableOutput("questionsasked")
)
)
)
)
dashboardPage(header, sidebar, body, skin = "blue")
|
/inst/dashboard/ui.R
|
no_license
|
chaugustin/socraticswirlInstructor
|
R
| false
| false
| 3,607
|
r
|
library(shiny)
library(shinydashboard)
header <- dashboardHeader(title = "SocraticSwirl",
dropdownMenuOutput("progressMenu"))
sidebar <- dashboardSidebar(
uiOutput("usersessions"),
hr(),
sidebarMenu(
menuItem("Exercise Dashboard", tabName = "exercise_tab", icon = icon("dashboard")),
menuItem("Lesson Overview", tabName = "overview_tab", icon = icon("list")),
menuItem("Submitted Questions", tabName = "questions_tab", icon = icon("question-circle"))
),
p(), #Fix for better separation
hr(),
box(style = "color: black;",
width = NULL, title = "Controls", collapsible = TRUE,
uiOutput("selectCourse"),
uiOutput("selectLesson"),
selectInput("interval", label = "Refresh interval",
choices = c(
"5 seconds" = 5,
"15 seconds" = 15,
"30 seconds" = 30,
"1 minute" = 50,
"5 minutes" = 600,
"Off" = FALSE),
selected = "30"),
uiOutput("timeSinceLastUpdate"),
actionButton("refresh", "Refresh now")
)
)
body <- dashboardBody(
tabItems(
tabItem(tabName = "exercise_tab",
fluidRow(
# Left Column
column(width = 6,
# Exercise Selector & Progress
box(collapsible = FALSE, width = NULL, title = "Select Exercise",
uiOutput("selectExercise"),
uiOutput("attemptedBar", style = "list-style-type: none;"),
uiOutput("completedBar", style = "list-style-type: none;")),
# Plots
tabBox(width = NULL,
tabPanel(title = "Attempt Frequency",
plotOutput("plotFreqAttempts")),
tabPanel(title = "Progress Tracking",
plotOutput("plotProgress"))
)
),
# Right Column
column(width = 6,
# Exercise Info
tabBox(width = NULL,
tabPanel(title = "Exercise Prompt",
uiOutput("exerciseQuestion")),
tabPanel(title = "Correct Answer",
verbatimTextOutput("exerciseAnswer"), collapsible = TRUE)
),
# Answer Table
tabBox(width = NULL,
tabPanel(title = "Incorrect Answers",
# selectInput("incorrectSort", label = "Sort Column:", width = "50%",
# choices = c("updatedAt", "command", "isError", "errorMsg"),
# selected = "updatedAt"),
# checkboxInput("incorrectSortDescending", label = "Descending", value = TRUE),
dataTableOutput("incorrectAnswers")),
tabPanel(title = "Common Errors",
dataTableOutput("commonErrors")
)
)
)
)
),
tabItem(tabName = "overview_tab",
box(collapsible = TRUE, width = NULL,
plotOutput("overviewGraph"))
),
tabItem(tabName = "questions_tab",
box(width = NULL,
dataTableOutput("questionsasked")
)
)
)
)
dashboardPage(header, sidebar, body, skin = "blue")
|
##Pengantar Statistika Keuangan 13 Maret 2018##
setwd("D:\\Kuliah\\Semester 6\\Pengantar Statistika Keuangan\\Syntax R") #membuat direktori file
#membuat fungsi
luassegitiga <- function(a, t){
luas = 0.5*a*t
return(luas)} #Return: perintah untuk mendefinisikan output fungsi tersebut}
#nama fungsi tidak dapat dipisah
luassegitiga(4,8)
#perkalian fungsi
perkalian <- function(a, b, c = TRUE, d = TRUE){
kali = a*b*c/d
return(kali)}
perkalian(4, 3, d = 2)
#nilai c dan d itu optional artinya boleh diinput boleh tidak (karena diberi TRUE)
#Looping dalam R
#Kontrol Loop for
for (i in 1:4){
print("Alay boleh, asal taat aturan")
}
#kontrol if
a <- 22.2
if (is.numeric(a)){
cat("Variabel a adalah suatu angka:", a)
}
#cat :mirip seperti print tetapi cat bisa menggabungkan antara beberapa kalimat
#jika is.numeric tidak terpenuhi maka tidak ada output yang dikeluarkan
#kontrol if...else
a <- "Nom...nom"
if (is.numeric(a)){
cat("Variabel a adalah suatu angka:", a)
} else {
cat("Variabel a bukan angka:", a)
}
#penyedian opsi jika benar dan jika salah
#kontrol if..else bertingkat atau berulang
a <- 7
if (a>10){
print("Statistics ENTHUSIASTICS")
} else if (a>0 & a<= 10) {
print("Data analis yang antusias dan berintegritas")
} else {
print("Lima konsentrasi")
}
#kontrol switch (pilihan)
pilih <- switch(3, "Bahasa R", "Bahasa Python", "Bahasa C")
print(pilih)
#atau
pilih <- function(num, a, b)
switch(num,
satu = {
kali = a*b
print(kali)
},
dua = {
bagi = a/b
print(bagi)
}
)
pilih("satu", 2, 5)
|
/Membuat Fungsi (Latihan).R
|
no_license
|
dededianpratiwi/Sintaks-R-Pengantar-Statistika-Keuangan
|
R
| false
| false
| 1,692
|
r
|
##Pengantar Statistika Keuangan 13 Maret 2018##
setwd("D:\\Kuliah\\Semester 6\\Pengantar Statistika Keuangan\\Syntax R") #membuat direktori file
#membuat fungsi
luassegitiga <- function(a, t){
luas = 0.5*a*t
return(luas)} #Return: perintah untuk mendefinisikan output fungsi tersebut}
#nama fungsi tidak dapat dipisah
luassegitiga(4,8)
#perkalian fungsi
perkalian <- function(a, b, c = TRUE, d = TRUE){
kali = a*b*c/d
return(kali)}
perkalian(4, 3, d = 2)
#nilai c dan d itu optional artinya boleh diinput boleh tidak (karena diberi TRUE)
#Looping dalam R
#Kontrol Loop for
for (i in 1:4){
print("Alay boleh, asal taat aturan")
}
#kontrol if
a <- 22.2
if (is.numeric(a)){
cat("Variabel a adalah suatu angka:", a)
}
#cat :mirip seperti print tetapi cat bisa menggabungkan antara beberapa kalimat
#jika is.numeric tidak terpenuhi maka tidak ada output yang dikeluarkan
#kontrol if...else
a <- "Nom...nom"
if (is.numeric(a)){
cat("Variabel a adalah suatu angka:", a)
} else {
cat("Variabel a bukan angka:", a)
}
#penyedian opsi jika benar dan jika salah
#kontrol if..else bertingkat atau berulang
a <- 7
if (a>10){
print("Statistics ENTHUSIASTICS")
} else if (a>0 & a<= 10) {
print("Data analis yang antusias dan berintegritas")
} else {
print("Lima konsentrasi")
}
#kontrol switch (pilihan)
pilih <- switch(3, "Bahasa R", "Bahasa Python", "Bahasa C")
print(pilih)
#atau
pilih <- function(num, a, b)
switch(num,
satu = {
kali = a*b
print(kali)
},
dua = {
bagi = a/b
print(bagi)
}
)
pilih("satu", 2, 5)
|
\name{grpregOverlap-internal}
\title{Internal functions}
\alias{gamma2beta}
\description{Internal functions in the package.}
\usage{
gamma2beta(gamma, incidence.mat, grp.vec, family)
}
\author{
Yaohui Zeng <yaohui-zeng@uiowa.edu>
}
\details{
This function is not intended for use by users. \code{gamma2beta} transforms the latent coefficient matrix (or vector) into non-latent form according to the grouping information.
}
\keyword{internal}
|
/man/grpregOverlap-internal.Rd
|
no_license
|
YaohuiZeng/grpregOverlap
|
R
| false
| false
| 445
|
rd
|
\name{grpregOverlap-internal}
\title{Internal functions}
\alias{gamma2beta}
\description{Internal functions in the package.}
\usage{
gamma2beta(gamma, incidence.mat, grp.vec, family)
}
\author{
Yaohui Zeng <yaohui-zeng@uiowa.edu>
}
\details{
This function is not intended for use by users. \code{gamma2beta} transforms the latent coefficient matrix (or vector) into non-latent form according to the grouping information.
}
\keyword{internal}
|
##This should detect and install missing packages before loading them
## yang yao and kamil bojanczyk
## motivation: R Shiny gallery and look at urls in ui.R
list.of.packages <- c("shiny","ggplot2", "dplyr")
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])]
if(length(new.packages)) install.packages(new.packages)
lapply(list.of.packages,function(x){library(x,character.only=TRUE)})
# TODO
#' 1) successfully read in csv from
#' 1a) lens.org data
#' 1b) Google patents data
#' 2) successfull read in excel file from sumobrain data
#' 3) successfully visualize patent data frame data by
#' 3a) columns (choose which ones to display)
#' 3b) values within rows: example, choose assignees to display
#' 4) successfully display simple patent summaries
#' 4a) total number of documents by docType
#' 4b) number of documents by assignee
#' 5) be able to export data with the following types
#' 5a) csv export
#' 5b) excel export (xlsx)
#'
|
/inst/shiny/app/global.R
|
no_license
|
lupok2001/patentr
|
R
| false
| false
| 980
|
r
|
##This should detect and install missing packages before loading them
## yang yao and kamil bojanczyk
## motivation: R Shiny gallery and look at urls in ui.R
list.of.packages <- c("shiny","ggplot2", "dplyr")
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])]
if(length(new.packages)) install.packages(new.packages)
lapply(list.of.packages,function(x){library(x,character.only=TRUE)})
# TODO
#' 1) successfully read in csv from
#' 1a) lens.org data
#' 1b) Google patents data
#' 2) successfull read in excel file from sumobrain data
#' 3) successfully visualize patent data frame data by
#' 3a) columns (choose which ones to display)
#' 3b) values within rows: example, choose assignees to display
#' 4) successfully display simple patent summaries
#' 4a) total number of documents by docType
#' 4b) number of documents by assignee
#' 5) be able to export data with the following types
#' 5a) csv export
#' 5b) excel export (xlsx)
#'
|
#' esSil
#'
#' Identify and draw sil_width for a clustering pattern. It requires package
#' 'cluster'. It adds the NMF cluster and silhouette width in the es dataframe
#' (using covar_name + cluster or sil_width)
#'
#' @export
#' @param es expression set
#' @param clusters Number of clusters
#' @param covar_name covariate name
#' @note Requires package 'cluster'
#' @author Shahab Asgharzadeh
#' @references "An Introduction to Bioconductor's ExpressionSet Class" \cr Seth
#' Falcon, Martin Morgan, and Robert Gentleman \cr 6 October, 2006; revised 9
#' February, 2007 \cr
#' @keywords ~kwd1 ~kwd2
#' @examples
#'
#' #esSil(eset, clusters, covar_name = "covariate_name_of_interest")
#'
esSil <-
function(es, clusters, covar_name=''){
###########################
## Identify and draw sil_width for a clustering pattern
## requires package 'cluster'
## It adds the NMF cluster and silhoette width in the es dataframe (using covar_name + cluster or sil_width)
###########################
dissE = daisy(t(exprs(es)))
dissEsqr = dissE^2
sk = silhouette(as.integer(clusters), dissE)
#plot(sk)
sk2 = silhouette(as.integer(clusters), dissEsqr)
#plot(sk2)
plot(sk2, col = c("blue"))
#sil = list(sk[,3], sk2[,3])
name <- as.character(all.vars(substitute(clusters)))
if (covar_name=='')
{
colnames(sk2) = lapply(colnames(sk2), function(x) paste0(name,'_',x)) }
else
{
colnames(sk2) = lapply(colnames(sk2), function(x) paste0(covar_name,'_',x))
}
out = data.frame(sk2[,c(1)], sk2[,c(3)])
rownames(out) = sampleNames(es)
colnames(out) = c(colnames(sk2)[1], colnames(sk2)[3])
out
}
|
/r/esSil.R
|
no_license
|
genomelab/esFunctions
|
R
| false
| false
| 1,633
|
r
|
#' esSil
#'
#' Identify and draw sil_width for a clustering pattern. It requires package
#' 'cluster'. It adds the NMF cluster and silhouette width in the es dataframe
#' (using covar_name + cluster or sil_width)
#'
#' @export
#' @param es expression set
#' @param clusters Number of clusters
#' @param covar_name covariate name
#' @note Requires package 'cluster'
#' @author Shahab Asgharzadeh
#' @references "An Introduction to Bioconductor's ExpressionSet Class" \cr Seth
#' Falcon, Martin Morgan, and Robert Gentleman \cr 6 October, 2006; revised 9
#' February, 2007 \cr
#' @keywords ~kwd1 ~kwd2
#' @examples
#'
#' #esSil(eset, clusters, covar_name = "covariate_name_of_interest")
#'
esSil <-
function(es, clusters, covar_name=''){
###########################
## Identify and draw sil_width for a clustering pattern
## requires package 'cluster'
## It adds the NMF cluster and silhoette width in the es dataframe (using covar_name + cluster or sil_width)
###########################
dissE = daisy(t(exprs(es)))
dissEsqr = dissE^2
sk = silhouette(as.integer(clusters), dissE)
#plot(sk)
sk2 = silhouette(as.integer(clusters), dissEsqr)
#plot(sk2)
plot(sk2, col = c("blue"))
#sil = list(sk[,3], sk2[,3])
name <- as.character(all.vars(substitute(clusters)))
if (covar_name=='')
{
colnames(sk2) = lapply(colnames(sk2), function(x) paste0(name,'_',x)) }
else
{
colnames(sk2) = lapply(colnames(sk2), function(x) paste0(covar_name,'_',x))
}
out = data.frame(sk2[,c(1)], sk2[,c(3)])
rownames(out) = sampleNames(es)
colnames(out) = c(colnames(sk2)[1], colnames(sk2)[3])
out
}
|
# WaveLightGLS
#
# Figure 1 & 2
#---------------------------------
list.data = list.files("./results", pattern=".RData", all.files=FALSE,full.names=TRUE)
list.data <- list.data[grepl("SUDA", list.data)]
twilight.dev <- list()
## Calibration function
calib <- function(twl_c, lat, zenith.start = 96) {
z0 <- seq(zenith.start-10, zenith.start+10, by = 0.2)
crds1 <- lapply(cbind(z0), function(x) thresholdPath(twl_c$Twilight, twl_c$Rise, zenith = x)$x)
dist1 <- unlist(lapply(crds1, function(x1) median(abs(x1[,2]-lat))))
z0[which.min(dist1)]
}
###################################
## Calculation of Twiligth Error ##
###################################
lon.breed <- -32.4255
lat.breed <- -3.8496
tm <- seq(as.POSIXct("2017-05-04", tz = "GMT"), as.POSIXct("2018-04-23", tz = "GMT"), by = "day")
rise <- rep(c(TRUE, FALSE), length(tm))
c.dat <- data.frame(Twilight = twilight(rep(tm, each = 2), lon = lon.breed, lat = lat.breed,
rise = rise, zenith = 93), Rise = rise)
calib.tm <- c(as.POSIXct("2017-05-10", tz = "GMT"), as.POSIXct("2017-06-15", tz = "GMT"))
CALIBRATION <- NULL
DATA <- NULL
i = 1
for (data in list.data){
load(data)
### CALIBRATION
twl <- geolight.convert(birdDD$days$tFirst, birdDD$days$tSecond, birdDD$days$type)
twl_calib <- subset(twl, Twilight>=calib.tm[1] & Twilight<=calib.tm[2])
sun <- solar(twl_calib[,1])
z <- refracted(zenith(sun, lon.breed, lat.breed))
twl_t <- twilight(twl_calib[,1], lon.breed, lat.breed, rise = twl_calib[,2], zenith = max(z)+0.1)
twl_dev <- ifelse(twl_calib$Rise, as.numeric(difftime(twl_calib[,1], twl_t, units = "mins")),
as.numeric(difftime(twl_t, twl_calib[,1], units = "mins")))
png(paste0('./calibration/', birdGLS$ID, '.png'))
hist(twl_dev, main = birdGLS$ID, freq = F, breaks = 26)
seq <- seq(0, 80, length = 100)
fitml_ng <- fitdistr(twl_dev, "gamma")
lines(seq, dgamma(seq, fitml_ng$estimate[1], fitml_ng$estimate[2]), col = "firebrick", lwd = 3, lty = 2)
dev.off()
out <- data.frame(bird = birdGLS$ID, zenith.median = median(z), zenith.max = max(z),
shape = fitml_ng$estimate[1], scale = fitml_ng$estimate[2],
model = birdGLS$Model, twl_dev = twl_dev)
CALIBRATION <- rbind(CALIBRATION, out)
### ALL DEPLOYMENT
twl_dev_all0 <- twilight(twl[,1], lon.breed, lat.breed, rise = twl[,2], zenith = max(z)+0.1)
twl_dev_all <- ifelse(twl$Rise, as.numeric(difftime(twl[,1], twl_dev_all0, units = "mins")),
as.numeric(difftime(twl_dev_all0, twl[,1], units = "mins")))
zenith <- calib(twl_calib, lat.breed, 96)
crds <- thresholdPath(twl$Twilight, twl$Rise, zenith = zenith)
if(nrow(birdDD$activity)+1 == nrow(crds$x)){
act <- c(NA,birdDD$activity$mean)
} else{
act <- rep(NA, nrow(crds$x))
}
if(nrow(birdDD$temperature)+1 == nrow(crds$x)){
temp <- c(NA,birdDD$temperature$mean)
} else{
temp <- rep(NA, nrow(crds$x))
}
out <- data.frame(bird = birdGLS$ID, time = crds$time, zenithT = median(z), zenith = max(z),
tw_error = twl_dev_all, lon = crds$x[,1], lat = crds$x[,2],
act = act, temp = temp)
DATA <- rbind(DATA, out)
cat(i, ' out of ', length(list.data), '\n')
i = i+1
}
### TEMPERATURE ERROR AND DEVIATIONS
DATA$temp_fdn_sat <- getSSTPoint(path = "./data/METOFFICE-GLO-SST-L4-REP-OBS-SST_1590497114049.nc",
coord = matrix(rep(c(lon.breed, lat.breed), nrow(DATA)), ncol = 2, byrow = TRUE),
time = DATA$time) -273.15
DATA$temp_sat <- getSSTPoint(path = "./data/METOFFICE-GLO-SST-L4-REP-OBS-SST_1590497114049.nc",
coord = DATA[,c("lon", "lat")],
time = DATA$time) -273.15
diff <- DATA$temp - DATA$temp_fdn_sat
sel <- DATA$time >= calib.tm[1] & DATA$time <= calib.tm[2]
nights <- hour(DATA$time) < 12
# temp_dt <- getSSTPoint(path = "./data/METOFFICE-GLO-SST-L4-REP-OBS-SST_1590497114049.nc",
# coord = matrix(rep(c(lon.breed, lat.breed), 354), ncol = 2, byrow = TRUE),
# time = seq(min(DATA$time), max(DATA$time), by = 'days')) -273.15
#
# plot(temp_dt)
### FIGURE 1
png('./figure/Figure_1.png', width = 760, height = 750)
par(mfrow = c(3,2), mar = c(5,4,1,2))
dev <- CALIBRATION$twl_dev
hist(dev, xlim = c(-15, 40), ylim = c(0, 0.1), breaks = seq(-500, 1500, by = 2.5), freq = F,
main = "", col = "grey",
xlab = "", ylab ="density")
mtext(side=1, line=2, at=5, adj=0, cex=0.8, "(minutes)")
mtext(side=3, line=-2, at=40, adj=1, cex=1, "(a) Twilight Deviation")
mtext(side=3, line=-3.5, at=40, adj=1, cex=0.9, "Calibration period")
gamma <- unique(CALIBRATION[,c("shape", "scale", "model")])
# for ( i in 1:nrow(gamma)){
# xx <- seq(0, 40, by = 0.1)
# yy <- dgamma(xx, gamma$shape[i], gamma$scale[i])
# lines(xx, yy, col = "grey", lty = 2)
# i = i+1
# }
seq <- seq(0, 40, length = 100)
fit_g <- fitdistr(dev, "gamma")
lines(seq, dgamma(seq, fit_g$estimate[1], fit_g$estimate[2]), col = "firebrick", lwd = 2.5, lty = 2)
seq_ = seq(-2,2,by = 0.01)
hist(diff[sel], freq = F, xlim = c(-3, 3), ylim = c(0,2), breaks = seq(-10, 10, by = 0.25),
main = "", col = "grey",
xlab = "", ylab ="density")
mtext(side=1, line=2, at=0, adj=0, cex=0.8, "(celsius)")
mtext(side=3, line=-2, at=3, adj=1, cex=1, "(b) Temperature Deviation")
mtext(side=3, line=-3.5, at=3, adj=1, cex=0.9, "Calibration period")
lines(seq_, 1/1.75*(seq_>-0.25&seq_<1.5), col = "firebrick", lwd = 2.5, lty = 2)
hist(DATA$tw_error, xlim = c(-15, 40), ylim = c(0, 0.1), breaks = seq(-500, 1500, by = 2.5), freq = F,
main = "", col = "#9ECAE1",
xlab = "", ylab="density")
mtext(side=1, line=2, at=5, adj=0, cex=0.8, "(minutes)")
mtext(side=3, line=-2, at=40, adj=1, cex=1, "(c) Twilight Deviation")
mtext(side=3, line=-3.5, at=40, adj=1, cex=0.9, "Year-round data")
lines(seq, dgamma(seq, fit_g$estimate[1], fit_g$estimate[2]), col = "firebrick", lwd = 2.5, lty = 2)
hist(diff, freq = F, xlim = c(-3, 3), ylim = c(0,2), breaks = seq(-10, 10, by = 0.25),
main = "", col = "#9ECAE1",
xlab = "", ylab="density")
mtext(side=3, line=-2, at=3, adj=1, cex=1, "(d) Temperature Deviation")
mtext(side=3, line=-3.5, at=3, adj=1, cex=0.9, "Year-round data")
mtext(side=1, line=2, at=0, adj=0, cex=0.8, "(celsius)")
lines(seq_, 1/1.75*(seq_>-0.25&seq_<1.5), col = "firebrick", lwd = 2.5, lty = 2)
### HISTOGRAMS WITH HIGH ACTIVITY
hist(DATA$tw_error[DATA$act>150], xlim = c(-15, 40), ylim = c(0, 0.1), breaks = seq(-500, 1500, by = 2.5), freq = F,
main = "", col = "#FDAE6B",
xlab = "", ylab="density")
mtext(side=1, line=2, at=5, adj=0, cex=0.8, "(minutes)")
mtext(side=3, line=-2, at=40, adj=1, cex=1, "(e) Twilight Deviation")
mtext(side=3, line=-3.5, at=40, adj=1, cex=0.9, "Time spent in water >75%")
lines(seq, dgamma(seq, fit_g$estimate[1], fit_g$estimate[2]), col = "firebrick", lwd = 2.5, lty = 2)
hist(diff[DATA$act>150], freq = F, xlim = c(-3, 3), ylim = c(0,2), breaks = seq(-10, 10, by = 0.25),
main = "", col = "#FDAE6B",
xlab = "", ylab="density")
mtext(side=3, line=-2, at=3, adj=1, cex=1, "(f) Temperature Deviation")
mtext(side=3, line=-3.5, at=3, adj=1, cex=0.9, "Time spent in water >75%")
mtext(side=1, line=2, at=0, adj=0, cex=0.8, "(celsius)")
lines(seq_, 1/1.75*(seq_>-0.25&seq_<1.5), col = "firebrick", lwd = 2.5, lty = 2)
dev.off()
### FIGURE 2
# load graphic data
data(wrld_simpl)
wrld_simpl@data$id <- wrld_simpl@data$NAME
world <- fortify(wrld_simpl)
eez<-readOGR("./data/World_EEZ.shp", "World_EEZ")
EEZ <- fortify(eez)
EEZ_br <- EEZ[which(EEZ$id==163 & !EEZ$hole),]
### YEAR-ROUND ERROR RANGE AT FDN
days <- seq(min(as.Date(DATA$time)), max(as.Date(DATA$time)), by = "days")
days_rise <- twilight(days, lon.breed, lat.breed,
rise = TRUE, zenith = 96, iters = 3)
days_fall <- twilight(days, lon.breed, lat.breed,
rise = FALSE, zenith = 96, iters = 3)
twilights <- data.frame(Twilight = c(days_rise, days_fall),
Rise = c(rep(TRUE, length(days_rise)), rep(FALSE, length(days_fall))))
twilights <- twilights[order(twilights$Twilight),]
COORD <- NULL
for (k in 1:100){
tw <- twilights
tw$Twilight[tw$Rise] <- tw$Twilight[tw$Rise] + seconds(round( 60*(rgamma(sum(tw$Rise), fit_g$estimate[1], fit_g$estimate[2]))))
tw$Twilight[!tw$Rise] <- tw$Twilight[!tw$Rise] - seconds(round( 60*(rgamma(sum(!tw$Rise), fit_g$estimate[1], fit_g$estimate[2]))))
zenith <- calib(tw, lat.breed, 96)
crds <- thresholdPath(tw$Twilight, tw$Rise, zenith = zenith)
out <- data.frame(lon = crds$x[,1], lat = crds$x[,2], time = crds$time)
out$temp_sat <- getSSTPoint(path = "./data/METOFFICE-GLO-SST-L4-REP-OBS-SST_1590497114049.nc",
coord = crds$x,
time = crds$time) -273.15
### TEMPERATURE ERROR AND DEVIATIONS
out$temp_fdn_sat <- getSSTPoint(path = "./data/METOFFICE-GLO-SST-L4-REP-OBS-SST_1590497114049.nc",
coord = matrix(rep(c(lon.breed, lat.breed), nrow(out)), byrow = TRUE, ncol = 2),
time = crds$time) -273.15
COORD <- rbind(COORD, out)
}
diff <- DATA$temp - DATA$temp_sat
map1_th <- plot.kde.coord(COORD[,c("lon", "lat")], H=2, N=100, alpha = 0, eez = EEZ_br,
title = "(a) Error Range Estimation", col = "firebrick")
map2_th <- plot.kde.coord(COORD[abs(COORD$temp_sat-COORD$temp_fdn_sat)<=0.5,c("lon", "lat")],
H=2, N=100, alpha = 0, eez = EEZ_br,
title = "(b) Error Range Estimation", col = "firebrick")
map1 <- plot.kde.coord(DATA[,c("lon", "lat")], H=2, N=100, alpha = 0.1, eez = EEZ_br,
title = "(c) Positions Distribution", col = "#9ECAE1")
map2 <- plot.kde.coord(DATA[ diff<=1.5 & diff >= -0.25,c("lon", "lat")], H=2, N=100, alpha = 0.1, eez = EEZ_br,
title = "(d) Positions Distribution", col = "#9ECAE1")
map1_act <- plot.kde.coord(DATA[which(DATA$act > 150),c("lon", "lat")], H=2, N=100, alpha = 0.1,
eez = EEZ_br, title = "(e) Wet Positions Distribution", col = "#FDAE6B")
map2_act <- plot.kde.coord(DATA[which(DATA$act > 150 & diff<=1.5 & diff >= -0.25),c("lon", "lat")], H=2, N=100,
alpha = 0.1, eez = EEZ_br,
title = "(f) Wet Positions Distribution", col = "#FDAE6B")
legend <- g_legend(map2_act)
png("./figure/Figure_2.png", width = 1270, height = 800)
grid.arrange(map1_th + theme(legend.position = 'none'),
map1+ theme(legend.position = 'none'),
map1_act+ theme(legend.position = 'none'),
legend,
map2_th+ theme(legend.position = 'none'),
map2+ theme(legend.position = 'none'),
map2_act+ theme(legend.position = 'none'), ncol=4, nrow = 2,
widths = c(2/7, 2/7, 2/7, 1/7))
dev.off()
## Bhattacharyya coefficient
N = 100
H = 2
CRDS <- COORD[,c("lon", "lat")]
# CRDS <- COORD[abs(COORD$temp_sat-COORD$temp_fdn_sat)<=0.5,c("lon", "lat")]
CRDS <- CRDS[!is.na(rowSums(CRDS)),]
f1 <- with(CRDS, kde2d(CRDS[,1], CRDS[,2], n = N, h = H, lims = c(-60, 0, -30, 20)))
CRDS <- DATA[,c("lon", "lat")]
# CRDS <- DATA[abs(diff)<=0.5,c("lon", "lat")]
CRDS <- CRDS[!is.na(rowSums(CRDS)),]
f2 <- with(CRDS, kde2d(CRDS[,1], CRDS[,2], n = N, h = H, lims = c(-60, 0, -30, 20)))
sum(sqrt(f1$z*f2$z/sum(f1$z)/sum(f2$z)))
###
R = 6378
mean(abs(pi * R * (COORD$lon - lon.breed) / 180))
sd(abs(pi * R * (COORD$lon - lon.breed) / 180))
mean(abs(pi * R * (COORD$lat - lat.breed) / 180))
sd(abs(pi * R * (COORD$lat - lat.breed) / 180))
mean(abs(pi * R * (COORD$lon[abs(COORD$temp_sat-COORD$temp_fdn_sat)<=0.5] - lon.breed) / 180), na.rm = TRUE)
sd(abs(pi * R * (COORD$lon[abs(COORD$temp_sat-COORD$temp_fdn_sat)<=0.5] - lon.breed) / 180), na.rm = TRUE)
mean(abs(pi * R * (COORD$lat[abs(COORD$temp_sat-COORD$temp_fdn_sat)<=0.5] - lat.breed) / 180), na.rm = TRUE)
sd(abs(pi * R * (COORD$lat[abs(COORD$temp_sat-COORD$temp_fdn_sat)<=0.5] - lat.breed) / 180), na.rm = TRUE)
|
/Figure_1_and_2.R
|
no_license
|
AmedeeRoy/WaveLightGLS
|
R
| false
| false
| 12,264
|
r
|
# WaveLightGLS
#
# Figure 1 & 2
#---------------------------------
list.data = list.files("./results", pattern=".RData", all.files=FALSE,full.names=TRUE)
list.data <- list.data[grepl("SUDA", list.data)]
twilight.dev <- list()
## Calibration function
calib <- function(twl_c, lat, zenith.start = 96) {
z0 <- seq(zenith.start-10, zenith.start+10, by = 0.2)
crds1 <- lapply(cbind(z0), function(x) thresholdPath(twl_c$Twilight, twl_c$Rise, zenith = x)$x)
dist1 <- unlist(lapply(crds1, function(x1) median(abs(x1[,2]-lat))))
z0[which.min(dist1)]
}
###################################
## Calculation of Twiligth Error ##
###################################
lon.breed <- -32.4255
lat.breed <- -3.8496
tm <- seq(as.POSIXct("2017-05-04", tz = "GMT"), as.POSIXct("2018-04-23", tz = "GMT"), by = "day")
rise <- rep(c(TRUE, FALSE), length(tm))
c.dat <- data.frame(Twilight = twilight(rep(tm, each = 2), lon = lon.breed, lat = lat.breed,
rise = rise, zenith = 93), Rise = rise)
calib.tm <- c(as.POSIXct("2017-05-10", tz = "GMT"), as.POSIXct("2017-06-15", tz = "GMT"))
CALIBRATION <- NULL
DATA <- NULL
i = 1
for (data in list.data){
load(data)
### CALIBRATION
twl <- geolight.convert(birdDD$days$tFirst, birdDD$days$tSecond, birdDD$days$type)
twl_calib <- subset(twl, Twilight>=calib.tm[1] & Twilight<=calib.tm[2])
sun <- solar(twl_calib[,1])
z <- refracted(zenith(sun, lon.breed, lat.breed))
twl_t <- twilight(twl_calib[,1], lon.breed, lat.breed, rise = twl_calib[,2], zenith = max(z)+0.1)
twl_dev <- ifelse(twl_calib$Rise, as.numeric(difftime(twl_calib[,1], twl_t, units = "mins")),
as.numeric(difftime(twl_t, twl_calib[,1], units = "mins")))
png(paste0('./calibration/', birdGLS$ID, '.png'))
hist(twl_dev, main = birdGLS$ID, freq = F, breaks = 26)
seq <- seq(0, 80, length = 100)
fitml_ng <- fitdistr(twl_dev, "gamma")
lines(seq, dgamma(seq, fitml_ng$estimate[1], fitml_ng$estimate[2]), col = "firebrick", lwd = 3, lty = 2)
dev.off()
out <- data.frame(bird = birdGLS$ID, zenith.median = median(z), zenith.max = max(z),
shape = fitml_ng$estimate[1], scale = fitml_ng$estimate[2],
model = birdGLS$Model, twl_dev = twl_dev)
CALIBRATION <- rbind(CALIBRATION, out)
### ALL DEPLOYMENT
twl_dev_all0 <- twilight(twl[,1], lon.breed, lat.breed, rise = twl[,2], zenith = max(z)+0.1)
twl_dev_all <- ifelse(twl$Rise, as.numeric(difftime(twl[,1], twl_dev_all0, units = "mins")),
as.numeric(difftime(twl_dev_all0, twl[,1], units = "mins")))
zenith <- calib(twl_calib, lat.breed, 96)
crds <- thresholdPath(twl$Twilight, twl$Rise, zenith = zenith)
if(nrow(birdDD$activity)+1 == nrow(crds$x)){
act <- c(NA,birdDD$activity$mean)
} else{
act <- rep(NA, nrow(crds$x))
}
if(nrow(birdDD$temperature)+1 == nrow(crds$x)){
temp <- c(NA,birdDD$temperature$mean)
} else{
temp <- rep(NA, nrow(crds$x))
}
out <- data.frame(bird = birdGLS$ID, time = crds$time, zenithT = median(z), zenith = max(z),
tw_error = twl_dev_all, lon = crds$x[,1], lat = crds$x[,2],
act = act, temp = temp)
DATA <- rbind(DATA, out)
cat(i, ' out of ', length(list.data), '\n')
i = i+1
}
### TEMPERATURE ERROR AND DEVIATIONS
DATA$temp_fdn_sat <- getSSTPoint(path = "./data/METOFFICE-GLO-SST-L4-REP-OBS-SST_1590497114049.nc",
coord = matrix(rep(c(lon.breed, lat.breed), nrow(DATA)), ncol = 2, byrow = TRUE),
time = DATA$time) -273.15
DATA$temp_sat <- getSSTPoint(path = "./data/METOFFICE-GLO-SST-L4-REP-OBS-SST_1590497114049.nc",
coord = DATA[,c("lon", "lat")],
time = DATA$time) -273.15
diff <- DATA$temp - DATA$temp_fdn_sat
sel <- DATA$time >= calib.tm[1] & DATA$time <= calib.tm[2]
nights <- hour(DATA$time) < 12
# temp_dt <- getSSTPoint(path = "./data/METOFFICE-GLO-SST-L4-REP-OBS-SST_1590497114049.nc",
# coord = matrix(rep(c(lon.breed, lat.breed), 354), ncol = 2, byrow = TRUE),
# time = seq(min(DATA$time), max(DATA$time), by = 'days')) -273.15
#
# plot(temp_dt)
### FIGURE 1
png('./figure/Figure_1.png', width = 760, height = 750)
par(mfrow = c(3,2), mar = c(5,4,1,2))
dev <- CALIBRATION$twl_dev
hist(dev, xlim = c(-15, 40), ylim = c(0, 0.1), breaks = seq(-500, 1500, by = 2.5), freq = F,
main = "", col = "grey",
xlab = "", ylab ="density")
mtext(side=1, line=2, at=5, adj=0, cex=0.8, "(minutes)")
mtext(side=3, line=-2, at=40, adj=1, cex=1, "(a) Twilight Deviation")
mtext(side=3, line=-3.5, at=40, adj=1, cex=0.9, "Calibration period")
gamma <- unique(CALIBRATION[,c("shape", "scale", "model")])
# for ( i in 1:nrow(gamma)){
# xx <- seq(0, 40, by = 0.1)
# yy <- dgamma(xx, gamma$shape[i], gamma$scale[i])
# lines(xx, yy, col = "grey", lty = 2)
# i = i+1
# }
seq <- seq(0, 40, length = 100)
fit_g <- fitdistr(dev, "gamma")
lines(seq, dgamma(seq, fit_g$estimate[1], fit_g$estimate[2]), col = "firebrick", lwd = 2.5, lty = 2)
seq_ = seq(-2,2,by = 0.01)
hist(diff[sel], freq = F, xlim = c(-3, 3), ylim = c(0,2), breaks = seq(-10, 10, by = 0.25),
main = "", col = "grey",
xlab = "", ylab ="density")
mtext(side=1, line=2, at=0, adj=0, cex=0.8, "(celsius)")
mtext(side=3, line=-2, at=3, adj=1, cex=1, "(b) Temperature Deviation")
mtext(side=3, line=-3.5, at=3, adj=1, cex=0.9, "Calibration period")
lines(seq_, 1/1.75*(seq_>-0.25&seq_<1.5), col = "firebrick", lwd = 2.5, lty = 2)
hist(DATA$tw_error, xlim = c(-15, 40), ylim = c(0, 0.1), breaks = seq(-500, 1500, by = 2.5), freq = F,
main = "", col = "#9ECAE1",
xlab = "", ylab="density")
mtext(side=1, line=2, at=5, adj=0, cex=0.8, "(minutes)")
mtext(side=3, line=-2, at=40, adj=1, cex=1, "(c) Twilight Deviation")
mtext(side=3, line=-3.5, at=40, adj=1, cex=0.9, "Year-round data")
lines(seq, dgamma(seq, fit_g$estimate[1], fit_g$estimate[2]), col = "firebrick", lwd = 2.5, lty = 2)
hist(diff, freq = F, xlim = c(-3, 3), ylim = c(0,2), breaks = seq(-10, 10, by = 0.25),
main = "", col = "#9ECAE1",
xlab = "", ylab="density")
mtext(side=3, line=-2, at=3, adj=1, cex=1, "(d) Temperature Deviation")
mtext(side=3, line=-3.5, at=3, adj=1, cex=0.9, "Year-round data")
mtext(side=1, line=2, at=0, adj=0, cex=0.8, "(celsius)")
lines(seq_, 1/1.75*(seq_>-0.25&seq_<1.5), col = "firebrick", lwd = 2.5, lty = 2)
### HISTOGRAMS WITH HIGH ACTIVITY
hist(DATA$tw_error[DATA$act>150], xlim = c(-15, 40), ylim = c(0, 0.1), breaks = seq(-500, 1500, by = 2.5), freq = F,
main = "", col = "#FDAE6B",
xlab = "", ylab="density")
mtext(side=1, line=2, at=5, adj=0, cex=0.8, "(minutes)")
mtext(side=3, line=-2, at=40, adj=1, cex=1, "(e) Twilight Deviation")
mtext(side=3, line=-3.5, at=40, adj=1, cex=0.9, "Time spent in water >75%")
lines(seq, dgamma(seq, fit_g$estimate[1], fit_g$estimate[2]), col = "firebrick", lwd = 2.5, lty = 2)
hist(diff[DATA$act>150], freq = F, xlim = c(-3, 3), ylim = c(0,2), breaks = seq(-10, 10, by = 0.25),
main = "", col = "#FDAE6B",
xlab = "", ylab="density")
mtext(side=3, line=-2, at=3, adj=1, cex=1, "(f) Temperature Deviation")
mtext(side=3, line=-3.5, at=3, adj=1, cex=0.9, "Time spent in water >75%")
mtext(side=1, line=2, at=0, adj=0, cex=0.8, "(celsius)")
lines(seq_, 1/1.75*(seq_>-0.25&seq_<1.5), col = "firebrick", lwd = 2.5, lty = 2)
dev.off()
### FIGURE 2
# load graphic data
data(wrld_simpl)
wrld_simpl@data$id <- wrld_simpl@data$NAME
world <- fortify(wrld_simpl)
eez<-readOGR("./data/World_EEZ.shp", "World_EEZ")
EEZ <- fortify(eez)
EEZ_br <- EEZ[which(EEZ$id==163 & !EEZ$hole),]
### YEAR-ROUND ERROR RANGE AT FDN
days <- seq(min(as.Date(DATA$time)), max(as.Date(DATA$time)), by = "days")
days_rise <- twilight(days, lon.breed, lat.breed,
rise = TRUE, zenith = 96, iters = 3)
days_fall <- twilight(days, lon.breed, lat.breed,
rise = FALSE, zenith = 96, iters = 3)
twilights <- data.frame(Twilight = c(days_rise, days_fall),
Rise = c(rep(TRUE, length(days_rise)), rep(FALSE, length(days_fall))))
twilights <- twilights[order(twilights$Twilight),]
COORD <- NULL
for (k in 1:100){
tw <- twilights
tw$Twilight[tw$Rise] <- tw$Twilight[tw$Rise] + seconds(round( 60*(rgamma(sum(tw$Rise), fit_g$estimate[1], fit_g$estimate[2]))))
tw$Twilight[!tw$Rise] <- tw$Twilight[!tw$Rise] - seconds(round( 60*(rgamma(sum(!tw$Rise), fit_g$estimate[1], fit_g$estimate[2]))))
zenith <- calib(tw, lat.breed, 96)
crds <- thresholdPath(tw$Twilight, tw$Rise, zenith = zenith)
out <- data.frame(lon = crds$x[,1], lat = crds$x[,2], time = crds$time)
out$temp_sat <- getSSTPoint(path = "./data/METOFFICE-GLO-SST-L4-REP-OBS-SST_1590497114049.nc",
coord = crds$x,
time = crds$time) -273.15
### TEMPERATURE ERROR AND DEVIATIONS
out$temp_fdn_sat <- getSSTPoint(path = "./data/METOFFICE-GLO-SST-L4-REP-OBS-SST_1590497114049.nc",
coord = matrix(rep(c(lon.breed, lat.breed), nrow(out)), byrow = TRUE, ncol = 2),
time = crds$time) -273.15
COORD <- rbind(COORD, out)
}
diff <- DATA$temp - DATA$temp_sat
map1_th <- plot.kde.coord(COORD[,c("lon", "lat")], H=2, N=100, alpha = 0, eez = EEZ_br,
title = "(a) Error Range Estimation", col = "firebrick")
map2_th <- plot.kde.coord(COORD[abs(COORD$temp_sat-COORD$temp_fdn_sat)<=0.5,c("lon", "lat")],
H=2, N=100, alpha = 0, eez = EEZ_br,
title = "(b) Error Range Estimation", col = "firebrick")
map1 <- plot.kde.coord(DATA[,c("lon", "lat")], H=2, N=100, alpha = 0.1, eez = EEZ_br,
title = "(c) Positions Distribution", col = "#9ECAE1")
map2 <- plot.kde.coord(DATA[ diff<=1.5 & diff >= -0.25,c("lon", "lat")], H=2, N=100, alpha = 0.1, eez = EEZ_br,
title = "(d) Positions Distribution", col = "#9ECAE1")
map1_act <- plot.kde.coord(DATA[which(DATA$act > 150),c("lon", "lat")], H=2, N=100, alpha = 0.1,
eez = EEZ_br, title = "(e) Wet Positions Distribution", col = "#FDAE6B")
map2_act <- plot.kde.coord(DATA[which(DATA$act > 150 & diff<=1.5 & diff >= -0.25),c("lon", "lat")], H=2, N=100,
alpha = 0.1, eez = EEZ_br,
title = "(f) Wet Positions Distribution", col = "#FDAE6B")
legend <- g_legend(map2_act)
png("./figure/Figure_2.png", width = 1270, height = 800)
grid.arrange(map1_th + theme(legend.position = 'none'),
map1+ theme(legend.position = 'none'),
map1_act+ theme(legend.position = 'none'),
legend,
map2_th+ theme(legend.position = 'none'),
map2+ theme(legend.position = 'none'),
map2_act+ theme(legend.position = 'none'), ncol=4, nrow = 2,
widths = c(2/7, 2/7, 2/7, 1/7))
dev.off()
## Bhattacharyya coefficient
N = 100
H = 2
CRDS <- COORD[,c("lon", "lat")]
# CRDS <- COORD[abs(COORD$temp_sat-COORD$temp_fdn_sat)<=0.5,c("lon", "lat")]
CRDS <- CRDS[!is.na(rowSums(CRDS)),]
f1 <- with(CRDS, kde2d(CRDS[,1], CRDS[,2], n = N, h = H, lims = c(-60, 0, -30, 20)))
CRDS <- DATA[,c("lon", "lat")]
# CRDS <- DATA[abs(diff)<=0.5,c("lon", "lat")]
CRDS <- CRDS[!is.na(rowSums(CRDS)),]
f2 <- with(CRDS, kde2d(CRDS[,1], CRDS[,2], n = N, h = H, lims = c(-60, 0, -30, 20)))
sum(sqrt(f1$z*f2$z/sum(f1$z)/sum(f2$z)))
###
R = 6378
mean(abs(pi * R * (COORD$lon - lon.breed) / 180))
sd(abs(pi * R * (COORD$lon - lon.breed) / 180))
mean(abs(pi * R * (COORD$lat - lat.breed) / 180))
sd(abs(pi * R * (COORD$lat - lat.breed) / 180))
mean(abs(pi * R * (COORD$lon[abs(COORD$temp_sat-COORD$temp_fdn_sat)<=0.5] - lon.breed) / 180), na.rm = TRUE)
sd(abs(pi * R * (COORD$lon[abs(COORD$temp_sat-COORD$temp_fdn_sat)<=0.5] - lon.breed) / 180), na.rm = TRUE)
mean(abs(pi * R * (COORD$lat[abs(COORD$temp_sat-COORD$temp_fdn_sat)<=0.5] - lat.breed) / 180), na.rm = TRUE)
sd(abs(pi * R * (COORD$lat[abs(COORD$temp_sat-COORD$temp_fdn_sat)<=0.5] - lat.breed) / 180), na.rm = TRUE)
|
\name{COMeantmp}
\alias{COMeantmp}
%- Also NEED an '\alias' for EACH other topic documented here.
\docType{data}
\title{
Mean Monthly Surface Temperature (Celcius) for Colorado, USA
}
\description{
Mean Monthly Surface Temperature at 10' latitude/longitude spatial resolution cropped to the Spatial extent of Colorado, USA. Interpollated from a data set of station means for the period centered on 1961 to 1990.
}
\usage{data("COMeantmp")}
\format{
Formal class 'RasterBrick' [package "raster"] with 12 slots
}
\source{
http://www.cru.uea.ac.uk/data
}
\references{
New, M., Lister, D., Hulme, M., & Maken, I. (2002) A high-resolution data set of surface climate over global land areas. Climate Research, 21, 1-25.
}
\keyword{datasets}
\keyword{climate}
|
/man/COMeantmp.Rd
|
no_license
|
griffithdan/grassmap
|
R
| false
| false
| 775
|
rd
|
\name{COMeantmp}
\alias{COMeantmp}
%- Also NEED an '\alias' for EACH other topic documented here.
\docType{data}
\title{
Mean Monthly Surface Temperature (Celcius) for Colorado, USA
}
\description{
Mean Monthly Surface Temperature at 10' latitude/longitude spatial resolution cropped to the Spatial extent of Colorado, USA. Interpollated from a data set of station means for the period centered on 1961 to 1990.
}
\usage{data("COMeantmp")}
\format{
Formal class 'RasterBrick' [package "raster"] with 12 slots
}
\source{
http://www.cru.uea.ac.uk/data
}
\references{
New, M., Lister, D., Hulme, M., & Maken, I. (2002) A high-resolution data set of surface climate over global land areas. Climate Research, 21, 1-25.
}
\keyword{datasets}
\keyword{climate}
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22821294503235e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(10L, 3L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result)
|
/CNull/inst/testfiles/communities_individual_based_sampling_alpha/AFL_communities_individual_based_sampling_alpha/communities_individual_based_sampling_alpha_valgrind_files/1615781178-test.R
|
no_license
|
akhikolla/updatedatatype-list2
|
R
| false
| false
| 348
|
r
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22821294503235e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(10L, 3L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result)
|
setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source('../../h2o-runit.R')
test.glrm.orthonnmf <- function(conn) {
m <- 1000; n <- 100; k <- 10
Log.info(paste("Uploading random uniform matrix with rows =", m, "and cols =", n))
Y <- matrix(runif(k*n), nrow = k, ncol = n)
X <- matrix(runif(m*k), nrow = m, ncol = k)
train <- X %*% Y
train.h2o <- as.h2o(conn, train)
Log.info("Run GLRM with orthogonal non-negative regularization on X, non-negative regularization on Y")
initY <- matrix(runif(k*n), nrow = k, ncol = n)
fitH2O <- h2o.glrm(train.h2o, init = initY, loss = "L2", regularization_x = "OneSparse", regularization_y = "NonNegative", gamma_x = 1, gamma_y = 1)
Log.info(paste("Iterations:", fitH2O@model$iterations, "\tFinal Objective:", fitH2O@model$objective))
fitY <- t(fitH2O@model$archetypes)
fitX <- h2o.getFrame(fitH2O@model$loading_key$name)
Log.info("Check that X and Y matrices are non-negative")
fitX.mat <- as.matrix(fitX)
expect_true(all(fitY >= 0))
expect_true(all(fitX.mat >= 0))
Log.info("Check that columns of X are orthogonal")
XtX <- t(fitX.mat) %*% fitX.mat
expect_true(all(XtX[!diag(nrow(XtX))] == 0))
expect_equal(sum((train - fitX.mat %*% fitY)^2), fitH2O@model$objective)
Log.info("Run GLRM with orthogonal non-negative regularization on both X and Y")
fitH2O <- h2o.glrm(train.h2o, init = initY, loss = "L2", regularization_x = "OneSparse", regularization_y = "OneSparse", gamma_x = 1, gamma_y = 1)
Log.info(paste("Iterations:", fitH2O@model$iterations, "\tFinal Objective:", fitH2O@model$objective))
fitY <- t(fitH2O@model$archetypes)
fitX <- h2o.getFrame(fitH2O@model$loading_key$name)
Log.info("Check that X and Y matrices are non-negative")
fitX.mat <- as.matrix(fitX)
expect_true(all(fitY >= 0))
expect_true(all(fitX.mat >= 0))
Log.info("Check that columns of X are orthogonal")
XtX <- t(fitX.mat) %*% fitX.mat
expect_true(all(XtX[!diag(nrow(XtX))] == 0))
Log.info("Check that rows of Y are orthogonal")
YYt <- fitY %*% t(fitY)
expect_true(all(YYt[!diag(nrow(YYt))] == 0))
expect_equal(sum((train - fitX.mat %*% fitY)^2), fitH2O@model$objective)
testEnd()
}
doTest("GLRM Test: Orthogonal Non-negative Matrix Factorization", test.glrm.orthonnmf)
|
/h2o-r/tests/testdir_algos/glrm/runit_glrm_orthonnmf.R
|
permissive
|
mrgloom/h2o-3
|
R
| false
| false
| 2,301
|
r
|
setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source('../../h2o-runit.R')
test.glrm.orthonnmf <- function(conn) {
m <- 1000; n <- 100; k <- 10
Log.info(paste("Uploading random uniform matrix with rows =", m, "and cols =", n))
Y <- matrix(runif(k*n), nrow = k, ncol = n)
X <- matrix(runif(m*k), nrow = m, ncol = k)
train <- X %*% Y
train.h2o <- as.h2o(conn, train)
Log.info("Run GLRM with orthogonal non-negative regularization on X, non-negative regularization on Y")
initY <- matrix(runif(k*n), nrow = k, ncol = n)
fitH2O <- h2o.glrm(train.h2o, init = initY, loss = "L2", regularization_x = "OneSparse", regularization_y = "NonNegative", gamma_x = 1, gamma_y = 1)
Log.info(paste("Iterations:", fitH2O@model$iterations, "\tFinal Objective:", fitH2O@model$objective))
fitY <- t(fitH2O@model$archetypes)
fitX <- h2o.getFrame(fitH2O@model$loading_key$name)
Log.info("Check that X and Y matrices are non-negative")
fitX.mat <- as.matrix(fitX)
expect_true(all(fitY >= 0))
expect_true(all(fitX.mat >= 0))
Log.info("Check that columns of X are orthogonal")
XtX <- t(fitX.mat) %*% fitX.mat
expect_true(all(XtX[!diag(nrow(XtX))] == 0))
expect_equal(sum((train - fitX.mat %*% fitY)^2), fitH2O@model$objective)
Log.info("Run GLRM with orthogonal non-negative regularization on both X and Y")
fitH2O <- h2o.glrm(train.h2o, init = initY, loss = "L2", regularization_x = "OneSparse", regularization_y = "OneSparse", gamma_x = 1, gamma_y = 1)
Log.info(paste("Iterations:", fitH2O@model$iterations, "\tFinal Objective:", fitH2O@model$objective))
fitY <- t(fitH2O@model$archetypes)
fitX <- h2o.getFrame(fitH2O@model$loading_key$name)
Log.info("Check that X and Y matrices are non-negative")
fitX.mat <- as.matrix(fitX)
expect_true(all(fitY >= 0))
expect_true(all(fitX.mat >= 0))
Log.info("Check that columns of X are orthogonal")
XtX <- t(fitX.mat) %*% fitX.mat
expect_true(all(XtX[!diag(nrow(XtX))] == 0))
Log.info("Check that rows of Y are orthogonal")
YYt <- fitY %*% t(fitY)
expect_true(all(YYt[!diag(nrow(YYt))] == 0))
expect_equal(sum((train - fitX.mat %*% fitY)^2), fitH2O@model$objective)
testEnd()
}
doTest("GLRM Test: Orthogonal Non-negative Matrix Factorization", test.glrm.orthonnmf)
|
# init
iLAD <- function(y, offset, parms, wt) {
if (is.matrix(y) && ncol(y) > 1)
stop("Matrix response not allowed")
if (!missing(parms) && length(parms) > 0)
warning("parameter argument ignored")
if (length(offset)) y <- y - offset
sfun <- function(yval, dev, wt, ylevel, digits ) {
paste0(" median=", format(signif(yval, digits)),
", LAD=" , format(signif(dev/wt, digits)))
}
environment(sfun) <- .GlobalEnv
list(y = c(y), parms = NULL, numresp = 1, numy = 1, summary = sfun)
}
#eval
eLAD <- function(y, wt, parms) {
wmed <- wmedian(y, wt)
rt <- sum(wt * abs(y - wmed))
list(label = wmed, deviance = rt)
}
#split
sLAD <- function(y, wt, x, parms, continuous) {
n <- length(y)
if (continuous) {
sy <- sort_index(y) + 1
y.sorted <- y[sy]
wt.sorted <- wt[sy]
sy.i <- sort_index(sy) + 1
medians <- getMedians(y.sorted, wt.sorted, sy.i)
if (n < 100) {
goodness <- getGoodness(y, wt, medians)
} else {
goodness <- getGoodnessOMP(y, wt, medians)
}
direction <- sign(medians[1:{n - 1}] - medians[2:n + n - 2])
} else {
# Categorical X variable
ux <- sort(unique(x))
medians <- sapply(ux, function(idx) {
filter <- (x == idx)
wmedian(y[filter], wt[filter])
})
# For anova splits, we can order the categories by their means
# then use the same code as for a non-categorical
ord <- order(medians)
n <- length(ord)
ux.ord <- ux[ord]
filters <- lapply(1:n, function(i) {x == ux.ord[i]})
lmedian <- sum(wt[filters[[1]]]*abs(y[filters[[1]]] - medians[ord[1]]))
rmedian <- sum(wt[filters[[n]]]*abs(y[filters[[n]]] - medians[ord[n]]))
if (n > 2) {
lmedian <- c(lmedian, sapply(2:(n - 1), function(pos) {
filter <- (rowSums(do.call("cbind", filters[1:pos])) > 0)
sum(wt[filter]*abs(y[filter] - wmedian(y[filter], wt[filter])))
}))
rmedian <- c(sapply(2:(n - 1), function(pos) {
filter <- (rowSums(do.call("cbind", filters[pos:n])) > 0) #filter <- x %in% ux.ord[pos:n]
sum(wt[filter]*abs(y[filter] - wmedian(y[filter], wt[filter])))
}), rmedian)
}
goodness <- (lmedian + rmedian) / sum(wt)
direction <- ux.ord
}
goodness <- max(goodness) - goodness
list(goodness = goodness, direction = direction)
}
# text
tLAD <- function (yval, dev, wt, ylevel, digits, n, use.n) {
if (use.n)
paste0(formatg(yval, digits), "\nn=", n)
else formatg(yval, digits)
}
#' 'rpart'-method: List of required functions for inducing 'rpart'-like LAD regression trees
#' @export
#' @examples
#' mystate <- data.frame(state.x77, region = state.region)
#' names(mystate) <- casefold(names(mystate)) #remove mixed case
#'
#' fit <- rpart(murder ~ ., data = mystate, minsplit = 10, method = LAD)
#' plot(fit); text(fit)
#'
LAD <- list(eval = eLAD, split = sLAD, init = iLAD, text = tLAD)
|
/R/method.R
|
no_license
|
cran/rpart.LAD
|
R
| false
| false
| 2,996
|
r
|
# init
iLAD <- function(y, offset, parms, wt) {
if (is.matrix(y) && ncol(y) > 1)
stop("Matrix response not allowed")
if (!missing(parms) && length(parms) > 0)
warning("parameter argument ignored")
if (length(offset)) y <- y - offset
sfun <- function(yval, dev, wt, ylevel, digits ) {
paste0(" median=", format(signif(yval, digits)),
", LAD=" , format(signif(dev/wt, digits)))
}
environment(sfun) <- .GlobalEnv
list(y = c(y), parms = NULL, numresp = 1, numy = 1, summary = sfun)
}
#eval
eLAD <- function(y, wt, parms) {
wmed <- wmedian(y, wt)
rt <- sum(wt * abs(y - wmed))
list(label = wmed, deviance = rt)
}
#split
sLAD <- function(y, wt, x, parms, continuous) {
n <- length(y)
if (continuous) {
sy <- sort_index(y) + 1
y.sorted <- y[sy]
wt.sorted <- wt[sy]
sy.i <- sort_index(sy) + 1
medians <- getMedians(y.sorted, wt.sorted, sy.i)
if (n < 100) {
goodness <- getGoodness(y, wt, medians)
} else {
goodness <- getGoodnessOMP(y, wt, medians)
}
direction <- sign(medians[1:{n - 1}] - medians[2:n + n - 2])
} else {
# Categorical X variable
ux <- sort(unique(x))
medians <- sapply(ux, function(idx) {
filter <- (x == idx)
wmedian(y[filter], wt[filter])
})
# For anova splits, we can order the categories by their means
# then use the same code as for a non-categorical
ord <- order(medians)
n <- length(ord)
ux.ord <- ux[ord]
filters <- lapply(1:n, function(i) {x == ux.ord[i]})
lmedian <- sum(wt[filters[[1]]]*abs(y[filters[[1]]] - medians[ord[1]]))
rmedian <- sum(wt[filters[[n]]]*abs(y[filters[[n]]] - medians[ord[n]]))
if (n > 2) {
lmedian <- c(lmedian, sapply(2:(n - 1), function(pos) {
filter <- (rowSums(do.call("cbind", filters[1:pos])) > 0)
sum(wt[filter]*abs(y[filter] - wmedian(y[filter], wt[filter])))
}))
rmedian <- c(sapply(2:(n - 1), function(pos) {
filter <- (rowSums(do.call("cbind", filters[pos:n])) > 0) #filter <- x %in% ux.ord[pos:n]
sum(wt[filter]*abs(y[filter] - wmedian(y[filter], wt[filter])))
}), rmedian)
}
goodness <- (lmedian + rmedian) / sum(wt)
direction <- ux.ord
}
goodness <- max(goodness) - goodness
list(goodness = goodness, direction = direction)
}
# text
tLAD <- function (yval, dev, wt, ylevel, digits, n, use.n) {
if (use.n)
paste0(formatg(yval, digits), "\nn=", n)
else formatg(yval, digits)
}
#' 'rpart'-method: List of required functions for inducing 'rpart'-like LAD regression trees
#' @export
#' @examples
#' mystate <- data.frame(state.x77, region = state.region)
#' names(mystate) <- casefold(names(mystate)) #remove mixed case
#'
#' fit <- rpart(murder ~ ., data = mystate, minsplit = 10, method = LAD)
#' plot(fit); text(fit)
#'
LAD <- list(eval = eLAD, split = sLAD, init = iLAD, text = tLAD)
|
python_has_modules <- function(python, modules) {
# write code to tempfile
file <- tempfile("reticulate-python-", fileext = ".py")
code <- paste("import", modules)
writeLines(code, con = file)
on.exit(unlink(file), add = TRUE)
# invoke Python
status <- system2(python, shQuote(file), stdout = FALSE, stderr = FALSE)
status == 0L
}
python_has_module <- function(python, module) {
code <- paste("import", module)
args <- c("-E", "-c", shQuote(code))
status <- system2(python, args, stdout = FALSE, stderr = FALSE)
status == 0L
}
python_version <- function(python) {
code <- "import platform; print(platform.python_version())"
args <- c("-E", "-c", shQuote(code))
output <- system2(python, args, stdout = TRUE, stderr = FALSE)
sanitized <- gsub("[^0-9.-]", "", output)
numeric_version(sanitized)
}
python_module_version <- function(python, module) {
fmt <- "import %1$s; print(%1$s.__version__)"
code <- sprintf(fmt, module)
args <- c("-E", "-c", shQuote(code))
output <- system2(python, args, stdout = TRUE, stderr = FALSE)
numeric_version(output)
}
# given the path to a python binary, or an environment path,
# try to find the path to the associated python binary, and
# figure out if it's a virtualenv, conda environment, or none
python_info <- function(path) {
path <- path.expand(path)
parent <- dirname(path)
# NOTE: we check for both 'python' and 'python3' because certain python
# installations might install one version of the binary but not the other.
#
# Some installations might not place Python within a 'Scripts' or 'bin'
# sub-directory, so look in the root directory too.
prefixes <- list(NULL, if (is_windows()) "Scripts" else "bin")
suffixes <- if (is_windows()) "python.exe" else c("python", "python3")
# placeholder for a discovered system python
systemPython <- NULL
while (path != parent) {
# check for virtual environment files
files <- c(
"pyvenv.cfg", # created by venv
file.path(prefixes[[2L]], "activate_this.py") # created by virtualenv
)
paths <- file.path(path, files)
virtualenv <- any(file.exists(paths))
# extra check that we aren't in a conda environment
condapath <- file.path(path, "condabin/conda")
if (file.exists(condapath))
virtualenv <- FALSE
if (virtualenv)
return(python_info_virtualenv(path))
# check for conda environment files
condaenv <- file.exists(file.path(path, "conda-meta"))
if (condaenv)
return(python_info_condaenv(path))
# check for python binary (implies a system install)
# we don't return immediately here because we might find
# as we traverse upwards that some of the expected virtualenv
# or condaenv files exist, so we just save the path and use
# it later if appropriate
if (is.null(systemPython)) {
for (prefix in prefixes) {
for (suffix in suffixes) {
bin <- paste(c(path, prefix, suffix), collapse = "/")
if (file.exists(bin)) {
systemPython <- bin
break
}
}
}
}
# recurse
parent <- path
path <- dirname(path)
}
# if we found a system python, use that as the fallback
if (!is.null(systemPython))
return(python_info_system(dirname(systemPython), systemPython))
stopf("could not find a Python environment for %s", path)
}
python_info_virtualenv <- function(path) {
# form path to python binary
suffix <- if (is_windows()) "Scripts/python.exe" else "bin/python"
python <- file.path(path, suffix)
# return details
list(
python = python,
type = "virtualenv",
root = path
)
}
python_info_condaenv <- function(path) {
# form path to python binary
suffix <- if (is_windows()) "python.exe" else "bin/python"
python <- file.path(path, suffix)
# find path to conda associated with this env
conda <- python_info_condaenv_find(path)
list(
python = python,
type = "conda",
root = path,
conda = conda
)
}
python_info_condaenv_find <- function(path) {
# first, check if we have a condabin
exe <- if (is_windows()) "conda.exe" else "conda"
conda <- file.path(path, "condabin", exe)
if (file.exists(conda))
return(conda)
if (is_windows()) {
# in Anaconda base env, conda.exe lives under Scripts
conda <- file.path(path, "Scripts", exe)
if (file.exists(conda))
return(conda)
# in ArcGIS env, conda.exe lives in a parent directory
conda <- file.path(path, "../..", "Scripts", exe)
conda <- normalizePath(conda, winslash = "/", mustWork = FALSE)
if (file.exists(conda))
return(conda)
}
# read history file
histpath <- file.path(path, "conda-meta/history")
if (!file.exists(histpath))
return(NULL)
history <- readLines(histpath, warn = FALSE)
# look for cmd line
pattern <- "^[[:space:]]*#[[:space:]]*cmd:[[:space:]]*"
lines <- grep(pattern, history, value = TRUE)
if (length(lines) == 0)
return(NULL)
# get path to conda script used
script <- sub("^#\\s+cmd: (.+)\\s+create\\s+.*", "\\1", lines[[1]])
# on Windows, a wrapper script is recorded in the history,
# so instead attempt to find the real conda binary
conda <- file.path(dirname(script), exe)
normalizePath(conda, winslash = "/", mustWork = FALSE)
}
python_info_system <- function(path, python) {
list(
python = python,
type = "system",
root = path
)
}
|
/R/python-tools.R
|
permissive
|
chainsawriot/reticulate
|
R
| false
| false
| 5,487
|
r
|
python_has_modules <- function(python, modules) {
# write code to tempfile
file <- tempfile("reticulate-python-", fileext = ".py")
code <- paste("import", modules)
writeLines(code, con = file)
on.exit(unlink(file), add = TRUE)
# invoke Python
status <- system2(python, shQuote(file), stdout = FALSE, stderr = FALSE)
status == 0L
}
python_has_module <- function(python, module) {
code <- paste("import", module)
args <- c("-E", "-c", shQuote(code))
status <- system2(python, args, stdout = FALSE, stderr = FALSE)
status == 0L
}
python_version <- function(python) {
code <- "import platform; print(platform.python_version())"
args <- c("-E", "-c", shQuote(code))
output <- system2(python, args, stdout = TRUE, stderr = FALSE)
sanitized <- gsub("[^0-9.-]", "", output)
numeric_version(sanitized)
}
python_module_version <- function(python, module) {
fmt <- "import %1$s; print(%1$s.__version__)"
code <- sprintf(fmt, module)
args <- c("-E", "-c", shQuote(code))
output <- system2(python, args, stdout = TRUE, stderr = FALSE)
numeric_version(output)
}
# given the path to a python binary, or an environment path,
# try to find the path to the associated python binary, and
# figure out if it's a virtualenv, conda environment, or none
python_info <- function(path) {
path <- path.expand(path)
parent <- dirname(path)
# NOTE: we check for both 'python' and 'python3' because certain python
# installations might install one version of the binary but not the other.
#
# Some installations might not place Python within a 'Scripts' or 'bin'
# sub-directory, so look in the root directory too.
prefixes <- list(NULL, if (is_windows()) "Scripts" else "bin")
suffixes <- if (is_windows()) "python.exe" else c("python", "python3")
# placeholder for a discovered system python
systemPython <- NULL
while (path != parent) {
# check for virtual environment files
files <- c(
"pyvenv.cfg", # created by venv
file.path(prefixes[[2L]], "activate_this.py") # created by virtualenv
)
paths <- file.path(path, files)
virtualenv <- any(file.exists(paths))
# extra check that we aren't in a conda environment
condapath <- file.path(path, "condabin/conda")
if (file.exists(condapath))
virtualenv <- FALSE
if (virtualenv)
return(python_info_virtualenv(path))
# check for conda environment files
condaenv <- file.exists(file.path(path, "conda-meta"))
if (condaenv)
return(python_info_condaenv(path))
# check for python binary (implies a system install)
# we don't return immediately here because we might find
# as we traverse upwards that some of the expected virtualenv
# or condaenv files exist, so we just save the path and use
# it later if appropriate
if (is.null(systemPython)) {
for (prefix in prefixes) {
for (suffix in suffixes) {
bin <- paste(c(path, prefix, suffix), collapse = "/")
if (file.exists(bin)) {
systemPython <- bin
break
}
}
}
}
# recurse
parent <- path
path <- dirname(path)
}
# if we found a system python, use that as the fallback
if (!is.null(systemPython))
return(python_info_system(dirname(systemPython), systemPython))
stopf("could not find a Python environment for %s", path)
}
python_info_virtualenv <- function(path) {
# form path to python binary
suffix <- if (is_windows()) "Scripts/python.exe" else "bin/python"
python <- file.path(path, suffix)
# return details
list(
python = python,
type = "virtualenv",
root = path
)
}
python_info_condaenv <- function(path) {
# form path to python binary
suffix <- if (is_windows()) "python.exe" else "bin/python"
python <- file.path(path, suffix)
# find path to conda associated with this env
conda <- python_info_condaenv_find(path)
list(
python = python,
type = "conda",
root = path,
conda = conda
)
}
python_info_condaenv_find <- function(path) {
# first, check if we have a condabin
exe <- if (is_windows()) "conda.exe" else "conda"
conda <- file.path(path, "condabin", exe)
if (file.exists(conda))
return(conda)
if (is_windows()) {
# in Anaconda base env, conda.exe lives under Scripts
conda <- file.path(path, "Scripts", exe)
if (file.exists(conda))
return(conda)
# in ArcGIS env, conda.exe lives in a parent directory
conda <- file.path(path, "../..", "Scripts", exe)
conda <- normalizePath(conda, winslash = "/", mustWork = FALSE)
if (file.exists(conda))
return(conda)
}
# read history file
histpath <- file.path(path, "conda-meta/history")
if (!file.exists(histpath))
return(NULL)
history <- readLines(histpath, warn = FALSE)
# look for cmd line
pattern <- "^[[:space:]]*#[[:space:]]*cmd:[[:space:]]*"
lines <- grep(pattern, history, value = TRUE)
if (length(lines) == 0)
return(NULL)
# get path to conda script used
script <- sub("^#\\s+cmd: (.+)\\s+create\\s+.*", "\\1", lines[[1]])
# on Windows, a wrapper script is recorded in the history,
# so instead attempt to find the real conda binary
conda <- file.path(dirname(script), exe)
normalizePath(conda, winslash = "/", mustWork = FALSE)
}
python_info_system <- function(path, python) {
list(
python = python,
type = "system",
root = path
)
}
|
#install.packages(c("ff","kernlab","ffbase","pracma","AUC"),dep=T)
rm(list=ls())
gc()
library(ff)
library(kernlab)
library(ffbase)
library(plyr)
library(pracma)
library(AUC)
OptimisedConc=function(indvar,fittedvalues)
{
Data = cbind(indvar, fittedvalues)
ones = Data[Data[,1] == 1,]
zeros = Data[Data[,1] == 0,]
conc=matrix(0, dim(zeros)[1], dim(ones)[1])
disc=matrix(0, dim(zeros)[1], dim(ones)[1])
ties=matrix(0, dim(zeros)[1], dim(ones)[1])
for (j in 1:dim(zeros)[1])
{50
for (i in 1:dim(ones)[1])
{
if (ones[i,2]>zeros[j,2])
{conc[j,i]=1}
else if (ones[i,2]<zeros[j,2])
{disc[j,i]=1}
else if (ones[i,2]==zeros[j,2])
{ties[j,i]=1}
}
}
Pairs=dim(zeros)[1]*dim(ones)[1]
PercentConcordance=(sum(conc)/Pairs)*100
PercentDiscordance=(sum(disc)/Pairs)*100
PercentTied=(sum(ties)/Pairs)*100
return(list("Percent Concordance"=PercentConcordance,"Percent Discordance"=PercentDiscordance,"Percent Tied"=PercentTied,"Pairs"=Pairs))
}
gp_data <-read.table.ffdf(file="//10.8.8.51/lv0/Move to Box/Mithun/projects/7. DS_New Training/1. data/GP_full_data_base.txt",
header = TRUE,VERBOSE = TRUE,
sep='|',colClasses = c(rep("numeric",50)))
gp_data <- subset(gp_data,select=c('customer_key','Response','percent_disc_last_12_mth','percent_disc_last_6_mth','per_elec_comm',
'gp_hit_ind_tot','num_units_12mth','num_em_campaign','disc_ats','avg_order_amt_last_6_mth','Time_Since_last_disc_purchase',
'non_disc_ats','gp_on_net_sales_ratio','on_sales_rev_ratio_12mth','mobile_ind_tot','ratio_order_6_12_mth',
'pct_off_hit_ind_tot','ratio_rev_wo_rewd_12mth','ratio_disc_non_disc_ats','card_status','br_hit_ind_tot',
'gp_br_sales_ratio','ratio_order_units_6_12_mth','num_disc_comm_responded','purchased','ratio_rev_rewd_12mth',
'num_dist_catg_purchased','num_order_num_last_6_mth','at_hit_ind_tot','gp_go_net_sales_ratio','clearance_hit_ind_tot',
'searchdex_ind_tot','total_plcc_cards','factory_hit_ind_tot','gp_bf_net_sales_ratio','markdown_hit_ind_tot' ))
min.sample.size <- 5000; max.sample.size <- 15000;
sample.sizes <- seq(min.sample.size, max.sample.size, by=1000)
c.list <-c(seq(0.1,1, by=0.3),1.5,3,5,10,20)
balance <- 1
headers1<-cbind("samplesize", "run", "nu", "AUC","Concordance")
write.table(headers1, paste0('//10.8.8.51/lv0/Move to Box/Mithun/projects/7. DS_New Training/3.Documents/GP_DS_SVM_train_', min.sample.size,'_', max.sample.size, '.csv'),
append=FALSE, sep=",",row.names=FALSE,col.names=FALSE)
headers2<-cbind("samplesize","run", "nu" ,"SampleNumber", "AUC","Concordance")
write.table(headers2, paste0('//10.8.8.51/lv0/Move to Box/Mithun/projects/7. DS_New Training/3.Documents/GP_DS_SVM_test_', min.sample.size,'_', max.sample.size, '.csv'),
append=FALSE, sep=",",row.names=FALSE,col.names=FALSE)
for(i in 1:length(sample.sizes))
{
for (s in 1:1){
if (balance==1)
{
gp_data.ones <- gp_data[gp_data$Response ==1,]
index.ones.tra <- bigsample(1:nrow(gp_data.ones), size=0.5*sample.sizes[i], replace=F)
tra_gp.ones <- gp_data.ones[ index.ones.tra,]
tst_gp.ones <- gp_data.ones[-index.ones.tra,]
rm(gp_data.ones); gc();
gp_data.zeroes <- gp_data[gp_data$Response !=1,]
index.zeroes.tra <- bigsample(1:nrow(gp_data.zeroes), size=0.5*sample.sizes[i], replace=F)
tra_gp.zeroes <- gp_data.zeroes[ index.zeroes.tra,]
tst_gp.zeroes <- gp_data.zeroes[-index.zeroes.tra,]
rm(gp_data.zeroes); gc();
tra_gp <- rbind(tra_gp.ones, tra_gp.zeroes)
rm(tra_gp.ones, tra_gp.zeroes); gc();
}
if (balance==0)
{
index.tra <- bigsample(1:nrow(gp_data), size=sample.sizes[i], replace=F)
tra_gp <- gp_data[ index.tra,]
tst_gp.all <- gp_data[-index.tra,]
}
tra_gp <- tra_gp[c(-1)]
prop <- sum(tra_gp[,1])/nrow(tra_gp)
srange<-sigest(Response ~., data=tra_gp)
sigma<-srange[2]
for(j in 1:length(c.list))
{
ksvm.object <- paste0('svm_active_',sample.sizes[i],'_',c.list[j],"_",round(sigma,2),"_",s)
ksvm.model <- ksvm(
as.factor(Response) ~., data=tra_gp, type="C-svc",
kernel="rbfdot", kpar=list(sigma = sigma), C=c.list[j],
cross=10,prob.model=TRUE )
print('------------------------------------------')
print(ksvm.model)
print('------------------------------------------')
save(ksvm.model,file = paste0("//10.8.8.51/lv0/Move to Box/Mithun/projects/7. DS_New Training/Model_Objects/svm/" , ksvm.object , ".RData"))
prob_tra<- predict(ksvm.model,tra_gp[c(-1)],type="probabilities")
roc.area <-
auc(roc(prob_tra[,2], factor(tra_gp$Response)))
concordance <- OptimisedConc(tra_gp$Response,prob_tra[,2])[1]
print(paste("Run","Sample Size","nu",'Prior', 'AUC', 'Concordance'))
print(paste(s,sample.sizes[i],c.list[j], prop, roc.area, concordance))
write.table(cbind(sample.sizes[i], s, c.list[j], roc.area, concordance),
paste0('//10.8.8.51/lv0/Move to Box/Mithun/projects/7. DS_New Training/3.Documents/GP_DS_SVM_train_', min.sample.size,'_', max.sample.size, '.csv'),
append=TRUE, sep=",",row.names=FALSE,col.names=FALSE)
print('------------------------------------------')
print('---------- Running Validations -----------')
print('------------------------------------------')
if (balance==1)
{
index.tst_gp <- sample(1:nrow(rbind(tst_gp.ones, tst_gp.zeroes)),
size=10000*10, replace=F)
}
if (balance==0)
{
index.tst_gp <- sample(1:nrow(tst_gp.all),
size=10000*10, replace=F)
}
print(paste("Run","Sample Size","nu",'Prior', 'AUC', 'Concordance'))
for (l in 1:10)
{
if (balance==1)
{
tst_gp <- rbind(tst_gp.ones, tst_gp.zeroes)[index.tst_gp[((l-1)*10000 + 1):(l*10000)],]
tst_gp <- tst_gp[c(-1)]
}
if (balance==0)
{
tst_gp <- tst_gp.all[index.tst_gp[((l-1)*10000 + 1):(l*10000)],]
tst_gp <- tst_gp[c(-1)]
}
prop.tst <- sum(tst_gp[,1])/nrow(tst_gp)
gc()
prob_tst <- predict(ksvm.model,tst_gp[c(-1)],type="probabilities")
roc.area.tst <-
auc(roc(prob_tst[,2], factor(tst_gp$Response)))
concordance.tst <- OptimisedConc(tst_gp$Response,prob_tst[,2])[1]
print(paste(s,sample.sizes[i],c.list[j],prop.tst, roc.area.tst, concordance.tst))
write.table(cbind(sample.sizes[i], s, c.list[j], l, roc.area.tst, concordance.tst),
paste0('//10.8.8.51/lv0/Move to Box/Mithun/projects/7. DS_New Training/3.Documents/GP_DS_SVM_test_', min.sample.size,'_', max.sample.size, '.csv'),
append=TRUE, sep=",",row.names=FALSE,col.names=FALSE)
}
}
rm(tst_gp.ones, tst_gp.zeroes)
gc()
}
}
|
/GP US DS Model mghosh/2.Code/DS Model Training SVM v1.R
|
no_license
|
ghoshmithun/ImpProjectDoc
|
R
| false
| false
| 7,534
|
r
|
#install.packages(c("ff","kernlab","ffbase","pracma","AUC"),dep=T)
rm(list=ls())
gc()
library(ff)
library(kernlab)
library(ffbase)
library(plyr)
library(pracma)
library(AUC)
OptimisedConc=function(indvar,fittedvalues)
{
Data = cbind(indvar, fittedvalues)
ones = Data[Data[,1] == 1,]
zeros = Data[Data[,1] == 0,]
conc=matrix(0, dim(zeros)[1], dim(ones)[1])
disc=matrix(0, dim(zeros)[1], dim(ones)[1])
ties=matrix(0, dim(zeros)[1], dim(ones)[1])
for (j in 1:dim(zeros)[1])
{50
for (i in 1:dim(ones)[1])
{
if (ones[i,2]>zeros[j,2])
{conc[j,i]=1}
else if (ones[i,2]<zeros[j,2])
{disc[j,i]=1}
else if (ones[i,2]==zeros[j,2])
{ties[j,i]=1}
}
}
Pairs=dim(zeros)[1]*dim(ones)[1]
PercentConcordance=(sum(conc)/Pairs)*100
PercentDiscordance=(sum(disc)/Pairs)*100
PercentTied=(sum(ties)/Pairs)*100
return(list("Percent Concordance"=PercentConcordance,"Percent Discordance"=PercentDiscordance,"Percent Tied"=PercentTied,"Pairs"=Pairs))
}
gp_data <-read.table.ffdf(file="//10.8.8.51/lv0/Move to Box/Mithun/projects/7. DS_New Training/1. data/GP_full_data_base.txt",
header = TRUE,VERBOSE = TRUE,
sep='|',colClasses = c(rep("numeric",50)))
gp_data <- subset(gp_data,select=c('customer_key','Response','percent_disc_last_12_mth','percent_disc_last_6_mth','per_elec_comm',
'gp_hit_ind_tot','num_units_12mth','num_em_campaign','disc_ats','avg_order_amt_last_6_mth','Time_Since_last_disc_purchase',
'non_disc_ats','gp_on_net_sales_ratio','on_sales_rev_ratio_12mth','mobile_ind_tot','ratio_order_6_12_mth',
'pct_off_hit_ind_tot','ratio_rev_wo_rewd_12mth','ratio_disc_non_disc_ats','card_status','br_hit_ind_tot',
'gp_br_sales_ratio','ratio_order_units_6_12_mth','num_disc_comm_responded','purchased','ratio_rev_rewd_12mth',
'num_dist_catg_purchased','num_order_num_last_6_mth','at_hit_ind_tot','gp_go_net_sales_ratio','clearance_hit_ind_tot',
'searchdex_ind_tot','total_plcc_cards','factory_hit_ind_tot','gp_bf_net_sales_ratio','markdown_hit_ind_tot' ))
min.sample.size <- 5000; max.sample.size <- 15000;
sample.sizes <- seq(min.sample.size, max.sample.size, by=1000)
c.list <-c(seq(0.1,1, by=0.3),1.5,3,5,10,20)
balance <- 1
headers1<-cbind("samplesize", "run", "nu", "AUC","Concordance")
write.table(headers1, paste0('//10.8.8.51/lv0/Move to Box/Mithun/projects/7. DS_New Training/3.Documents/GP_DS_SVM_train_', min.sample.size,'_', max.sample.size, '.csv'),
append=FALSE, sep=",",row.names=FALSE,col.names=FALSE)
headers2<-cbind("samplesize","run", "nu" ,"SampleNumber", "AUC","Concordance")
write.table(headers2, paste0('//10.8.8.51/lv0/Move to Box/Mithun/projects/7. DS_New Training/3.Documents/GP_DS_SVM_test_', min.sample.size,'_', max.sample.size, '.csv'),
append=FALSE, sep=",",row.names=FALSE,col.names=FALSE)
for(i in 1:length(sample.sizes))
{
for (s in 1:1){
if (balance==1)
{
gp_data.ones <- gp_data[gp_data$Response ==1,]
index.ones.tra <- bigsample(1:nrow(gp_data.ones), size=0.5*sample.sizes[i], replace=F)
tra_gp.ones <- gp_data.ones[ index.ones.tra,]
tst_gp.ones <- gp_data.ones[-index.ones.tra,]
rm(gp_data.ones); gc();
gp_data.zeroes <- gp_data[gp_data$Response !=1,]
index.zeroes.tra <- bigsample(1:nrow(gp_data.zeroes), size=0.5*sample.sizes[i], replace=F)
tra_gp.zeroes <- gp_data.zeroes[ index.zeroes.tra,]
tst_gp.zeroes <- gp_data.zeroes[-index.zeroes.tra,]
rm(gp_data.zeroes); gc();
tra_gp <- rbind(tra_gp.ones, tra_gp.zeroes)
rm(tra_gp.ones, tra_gp.zeroes); gc();
}
if (balance==0)
{
index.tra <- bigsample(1:nrow(gp_data), size=sample.sizes[i], replace=F)
tra_gp <- gp_data[ index.tra,]
tst_gp.all <- gp_data[-index.tra,]
}
tra_gp <- tra_gp[c(-1)]
prop <- sum(tra_gp[,1])/nrow(tra_gp)
srange<-sigest(Response ~., data=tra_gp)
sigma<-srange[2]
for(j in 1:length(c.list))
{
ksvm.object <- paste0('svm_active_',sample.sizes[i],'_',c.list[j],"_",round(sigma,2),"_",s)
ksvm.model <- ksvm(
as.factor(Response) ~., data=tra_gp, type="C-svc",
kernel="rbfdot", kpar=list(sigma = sigma), C=c.list[j],
cross=10,prob.model=TRUE )
print('------------------------------------------')
print(ksvm.model)
print('------------------------------------------')
save(ksvm.model,file = paste0("//10.8.8.51/lv0/Move to Box/Mithun/projects/7. DS_New Training/Model_Objects/svm/" , ksvm.object , ".RData"))
prob_tra<- predict(ksvm.model,tra_gp[c(-1)],type="probabilities")
roc.area <-
auc(roc(prob_tra[,2], factor(tra_gp$Response)))
concordance <- OptimisedConc(tra_gp$Response,prob_tra[,2])[1]
print(paste("Run","Sample Size","nu",'Prior', 'AUC', 'Concordance'))
print(paste(s,sample.sizes[i],c.list[j], prop, roc.area, concordance))
write.table(cbind(sample.sizes[i], s, c.list[j], roc.area, concordance),
paste0('//10.8.8.51/lv0/Move to Box/Mithun/projects/7. DS_New Training/3.Documents/GP_DS_SVM_train_', min.sample.size,'_', max.sample.size, '.csv'),
append=TRUE, sep=",",row.names=FALSE,col.names=FALSE)
print('------------------------------------------')
print('---------- Running Validations -----------')
print('------------------------------------------')
if (balance==1)
{
index.tst_gp <- sample(1:nrow(rbind(tst_gp.ones, tst_gp.zeroes)),
size=10000*10, replace=F)
}
if (balance==0)
{
index.tst_gp <- sample(1:nrow(tst_gp.all),
size=10000*10, replace=F)
}
print(paste("Run","Sample Size","nu",'Prior', 'AUC', 'Concordance'))
for (l in 1:10)
{
if (balance==1)
{
tst_gp <- rbind(tst_gp.ones, tst_gp.zeroes)[index.tst_gp[((l-1)*10000 + 1):(l*10000)],]
tst_gp <- tst_gp[c(-1)]
}
if (balance==0)
{
tst_gp <- tst_gp.all[index.tst_gp[((l-1)*10000 + 1):(l*10000)],]
tst_gp <- tst_gp[c(-1)]
}
prop.tst <- sum(tst_gp[,1])/nrow(tst_gp)
gc()
prob_tst <- predict(ksvm.model,tst_gp[c(-1)],type="probabilities")
roc.area.tst <-
auc(roc(prob_tst[,2], factor(tst_gp$Response)))
concordance.tst <- OptimisedConc(tst_gp$Response,prob_tst[,2])[1]
print(paste(s,sample.sizes[i],c.list[j],prop.tst, roc.area.tst, concordance.tst))
write.table(cbind(sample.sizes[i], s, c.list[j], l, roc.area.tst, concordance.tst),
paste0('//10.8.8.51/lv0/Move to Box/Mithun/projects/7. DS_New Training/3.Documents/GP_DS_SVM_test_', min.sample.size,'_', max.sample.size, '.csv'),
append=TRUE, sep=",",row.names=FALSE,col.names=FALSE)
}
}
rm(tst_gp.ones, tst_gp.zeroes)
gc()
}
}
|
#! /usr/bin/Rscript
library(argparse)
library(phyloseq)
library(ggplot2)
library(gridExtra)
library(vegan)
library(rbiom)
options(stringsAsFactors=F)
# Command-line arguments
parser=ArgumentParser()
parser$add_argument("-i", "--infile", help="RDS file containing the phyloseq object to analyze (from step 2a)")
parser$add_argument("-r", "--rarefaction", type="integer", help="Text file of Weighted UniFrac distances")
parser$add_argument("--force-rarefaction-level", type="logical", default=FALSE, help="Use the specified rarefaction level even if it is lower than the lowest sample depth. (Default is to use the lowest sample depth if it's higher than the value given by --rarefaction.)")
parser$add_argument("-o", "--outprefix", help="Prefix for all output files")
parser$add_argument("-t", "--type", choices=c("extraction", "amplification"), default="extraction", help="Which experiment set this analysis belongs to")
args=parser$parse_args()
# setwd('/home/jgwall/Projects/Microbiomes/MicrobiomeMethodsDevelopment/CompareSampleExtractionAndAmplification_Mohsen_Cecelia/2020 03 Consolidated Pipeline/')
# args=parser$parse_args(c("-i", "TestPrimers/2_Analysis/2f_otu_table.no_organelles.RDS", "-o", "99_tmp", "-r", "2000", '-t', 'amplification' ))
cat("Assessing community distortion with beta diversity metrics\n")
set.seed(1)
# Load phyloseq data
source("StandardizeLabels.r")
mydata = standardize_labels(readRDS(args$infile), type=args$type)
mydata = prune_samples(mydata, samples=!sample_data(mydata)$sample.type %in% c("blank", "water"))# Filter out blanks and water controls
# Extract individual data components
metadata = sample_data(mydata)
mytable=otu_table(mydata)
mytree=phy_tree(mydata)
# Check if the specified rarefaction is lower than the smallest sample depth and change if it is (and user didn't overrule this behavior)
if(!args$force_rarefaction_level && args$rarefaction < min(sample_sums(mydata))){
cat("\tNote: Specified rarefaction is level is less than the minimum sample depth, so minimum sample depth will be used instead.\n")
args$rarefaction = min(sample_sums(mydata))
}
# Rarefy matrix with rbiom
cat("Calculating distance metrics\n")
rarefied = rbiom::rarefy(mytable, depth=args$rarefaction) # Specify rbiom:: to be absolutely certain we don't use vegan's function of the same name
cat("\tRemoved", ncol(mytable) - ncol(rarefied), "samples for having fewer than", args$rarefaction,"total reads for rarefaction\n")
# Adjust metadata to reflect fewer samples (potentially)
metadata$sample = rownames(metadata)
metadata = subset(metadata, metadata$sample %in% colnames(rarefied))
# Calculate UniFrac distances with rbiom
weighted = unifrac(rarefied, tree=mytree, weighted=TRUE)
unweighted = unifrac(rarefied, tree=mytree, weighted=FALSE)
# Calculate Bray-Curtis distance matrix with vegan
bray = as.matrix(vegdist(t(rarefied), method='bray'))
# Combined distances into a single list item
distances=list("Weighted UniFrac"=as.matrix(weighted), "Unweighted UniFrac"=as.matrix(unweighted), "Bray-Curtis"=bray)
# Helper function to subset and do MDS each time, making a list of output results
subMDS = function(mydistances, metadata, mysamples){
myresults = lapply(mydistances, function(mydist){
mydist = mydist[mysamples, mysamples]
myMDS = cmdscale(mydist, eig=T)
# Combine into a single data frame
mymeta = metadata[mysamples,]
mymeta$PC1 = myMDS$points[,1]
mymeta$PC2 = myMDS$points[,2]
# Variance per PC; saving as a data frame column for convenience
pc_variance = myMDS$eig / sum(myMDS$eig)
mymeta$PC1_var = pc_variance[1]
mymeta$PC2_var = pc_variance[2]
# Standardize treatment factor (otherwise might drop some)
mymeta$treatment = factor(as.character(mymeta$treatment), levels=levels(metadata$treatment))
return(mymeta)
})
return(myresults)
}
# #############
# Main text figure - Construct with grid.arrange()
# #############
# Helper function for plotting MDS plots; returns a single ggplot item
plot.mds = function(mydata, type="", metric="", legend.title=NULL, ...){
myplot = ggplot(data=mydata, mapping=aes(x=PC1, y=PC2, ...), ...) +
xlab(paste("PC1 (", round(mydata$PC1_var[1]*100, 1), "%)", sep="")) +
ylab(paste("PC2 (", round(mydata$PC2_var[2]*100, 1), "%)", sep="")) +
geom_point(size=6, alpha=0.65) +
#ggtitle(paste(type, metric, sep=" - ")) +
ggtitle(type) +
theme_bw() +
theme(aspect.ratio=1, plot.title = element_text(size=10, face="bold"),
axis.title = element_text(size=10, face="bold"),
axis.text = element_blank(),
axis.ticks = element_blank(),
legend.title=element_text(size=10, face="bold"))
if(! is.null(legend.title)){
myplot = myplot + labs(color=legend.title)
}
return(myplot)
}
# Get MDS plots of everything
alldata = subMDS(distances, metadata, metadata$sample)
all_weighted = plot.mds(alldata[['Weighted UniFrac']], color=sample.type, type="All", metric="Weighted UniFrac", legend.title="Sample Type")
all_bray = plot.mds(alldata[['Bray-Curtis']], color=sample.type, type="All", metric="Bray-Curtis", legend.title="Sample Type")
all_plots = list(all_weighted, all_bray)
# Sample-specific plots
metric="Weighted UniFrac"
sample_types = c("Soil 1","Soil 2","Defined Community")
sample_plots = lapply(sample_types, function(mytype){
samples = rownames(metadata)[metadata$sample.type == mytype ]
targets = subMDS(distances, metadata, samples)
plot.mds(targets[[metric]], color=treatment, type=mytype, metric=metric, legend.title="Primer Set") +
scale_color_brewer(palette = "Dark2", drop=FALSE) # Change color scale
})
# Helper function to output PNG and SVG of each figure
write_plots = function(myplots, group="", mywidth=5, myheight=5){
png(paste(args$outprefix, group, "png", sep="."), width=mywidth, height=myheight, units='in', res=300)
grid.arrange(grobs=myplots, nrow=1)
dev.off()
svg(paste(args$outprefix, group, "svg", sep="."), width=mywidth, height=myheight)
grid.arrange(grobs=myplots, nrow=1)
dev.off()
}
# Output graphics
write_plots(all_plots, group="all", mywidth=8, myheight=2)
write_plots(sample_plots, group="by_sample", mywidth=12, myheight=2)
|
/Primers_PCoA.r
|
no_license
|
wallacelab/paper-giangacomo-16s-methods
|
R
| false
| false
| 6,372
|
r
|
#! /usr/bin/Rscript
library(argparse)
library(phyloseq)
library(ggplot2)
library(gridExtra)
library(vegan)
library(rbiom)
options(stringsAsFactors=F)
# Command-line arguments
parser=ArgumentParser()
parser$add_argument("-i", "--infile", help="RDS file containing the phyloseq object to analyze (from step 2a)")
parser$add_argument("-r", "--rarefaction", type="integer", help="Text file of Weighted UniFrac distances")
parser$add_argument("--force-rarefaction-level", type="logical", default=FALSE, help="Use the specified rarefaction level even if it is lower than the lowest sample depth. (Default is to use the lowest sample depth if it's higher than the value given by --rarefaction.)")
parser$add_argument("-o", "--outprefix", help="Prefix for all output files")
parser$add_argument("-t", "--type", choices=c("extraction", "amplification"), default="extraction", help="Which experiment set this analysis belongs to")
args=parser$parse_args()
# setwd('/home/jgwall/Projects/Microbiomes/MicrobiomeMethodsDevelopment/CompareSampleExtractionAndAmplification_Mohsen_Cecelia/2020 03 Consolidated Pipeline/')
# args=parser$parse_args(c("-i", "TestPrimers/2_Analysis/2f_otu_table.no_organelles.RDS", "-o", "99_tmp", "-r", "2000", '-t', 'amplification' ))
cat("Assessing community distortion with beta diversity metrics\n")
set.seed(1)
# Load phyloseq data
source("StandardizeLabels.r")
mydata = standardize_labels(readRDS(args$infile), type=args$type)
mydata = prune_samples(mydata, samples=!sample_data(mydata)$sample.type %in% c("blank", "water"))# Filter out blanks and water controls
# Extract individual data components
metadata = sample_data(mydata)
mytable=otu_table(mydata)
mytree=phy_tree(mydata)
# Check if the specified rarefaction is lower than the smallest sample depth and change if it is (and user didn't overrule this behavior)
if(!args$force_rarefaction_level && args$rarefaction < min(sample_sums(mydata))){
cat("\tNote: Specified rarefaction is level is less than the minimum sample depth, so minimum sample depth will be used instead.\n")
args$rarefaction = min(sample_sums(mydata))
}
# Rarefy matrix with rbiom
cat("Calculating distance metrics\n")
rarefied = rbiom::rarefy(mytable, depth=args$rarefaction) # Specify rbiom:: to be absolutely certain we don't use vegan's function of the same name
cat("\tRemoved", ncol(mytable) - ncol(rarefied), "samples for having fewer than", args$rarefaction,"total reads for rarefaction\n")
# Adjust metadata to reflect fewer samples (potentially)
metadata$sample = rownames(metadata)
metadata = subset(metadata, metadata$sample %in% colnames(rarefied))
# Calculate UniFrac distances with rbiom
weighted = unifrac(rarefied, tree=mytree, weighted=TRUE)
unweighted = unifrac(rarefied, tree=mytree, weighted=FALSE)
# Calculate Bray-Curtis distance matrix with vegan
bray = as.matrix(vegdist(t(rarefied), method='bray'))
# Combined distances into a single list item
distances=list("Weighted UniFrac"=as.matrix(weighted), "Unweighted UniFrac"=as.matrix(unweighted), "Bray-Curtis"=bray)
# Helper function to subset and do MDS each time, making a list of output results
subMDS = function(mydistances, metadata, mysamples){
myresults = lapply(mydistances, function(mydist){
mydist = mydist[mysamples, mysamples]
myMDS = cmdscale(mydist, eig=T)
# Combine into a single data frame
mymeta = metadata[mysamples,]
mymeta$PC1 = myMDS$points[,1]
mymeta$PC2 = myMDS$points[,2]
# Variance per PC; saving as a data frame column for convenience
pc_variance = myMDS$eig / sum(myMDS$eig)
mymeta$PC1_var = pc_variance[1]
mymeta$PC2_var = pc_variance[2]
# Standardize treatment factor (otherwise might drop some)
mymeta$treatment = factor(as.character(mymeta$treatment), levels=levels(metadata$treatment))
return(mymeta)
})
return(myresults)
}
# #############
# Main text figure - Construct with grid.arrange()
# #############
# Helper function for plotting MDS plots; returns a single ggplot item
plot.mds = function(mydata, type="", metric="", legend.title=NULL, ...){
myplot = ggplot(data=mydata, mapping=aes(x=PC1, y=PC2, ...), ...) +
xlab(paste("PC1 (", round(mydata$PC1_var[1]*100, 1), "%)", sep="")) +
ylab(paste("PC2 (", round(mydata$PC2_var[2]*100, 1), "%)", sep="")) +
geom_point(size=6, alpha=0.65) +
#ggtitle(paste(type, metric, sep=" - ")) +
ggtitle(type) +
theme_bw() +
theme(aspect.ratio=1, plot.title = element_text(size=10, face="bold"),
axis.title = element_text(size=10, face="bold"),
axis.text = element_blank(),
axis.ticks = element_blank(),
legend.title=element_text(size=10, face="bold"))
if(! is.null(legend.title)){
myplot = myplot + labs(color=legend.title)
}
return(myplot)
}
# Get MDS plots of everything
alldata = subMDS(distances, metadata, metadata$sample)
all_weighted = plot.mds(alldata[['Weighted UniFrac']], color=sample.type, type="All", metric="Weighted UniFrac", legend.title="Sample Type")
all_bray = plot.mds(alldata[['Bray-Curtis']], color=sample.type, type="All", metric="Bray-Curtis", legend.title="Sample Type")
all_plots = list(all_weighted, all_bray)
# Sample-specific plots
metric="Weighted UniFrac"
sample_types = c("Soil 1","Soil 2","Defined Community")
sample_plots = lapply(sample_types, function(mytype){
samples = rownames(metadata)[metadata$sample.type == mytype ]
targets = subMDS(distances, metadata, samples)
plot.mds(targets[[metric]], color=treatment, type=mytype, metric=metric, legend.title="Primer Set") +
scale_color_brewer(palette = "Dark2", drop=FALSE) # Change color scale
})
# Helper function to output PNG and SVG of each figure
write_plots = function(myplots, group="", mywidth=5, myheight=5){
png(paste(args$outprefix, group, "png", sep="."), width=mywidth, height=myheight, units='in', res=300)
grid.arrange(grobs=myplots, nrow=1)
dev.off()
svg(paste(args$outprefix, group, "svg", sep="."), width=mywidth, height=myheight)
grid.arrange(grobs=myplots, nrow=1)
dev.off()
}
# Output graphics
write_plots(all_plots, group="all", mywidth=8, myheight=2)
write_plots(sample_plots, group="by_sample", mywidth=12, myheight=2)
|
\name{arrayUpdate}
\alias{arrayUpdate}
\title{ Update array allocation }
\description{
Update the allocation of samples on the arrays.
This is a subfunction needed for \code{updateDesign}, but is not directly used.
}
\usage{
arrayUpdate(array.allocation, condition.allocation, nRILs, nSlides)
}
\arguments{
\item{array.allocation}{
matrix with nArray rows and nRIL columns.
Elements of 1/0 indicate this RIL (or strain) is/not selected
for this array.
}
\item{condition.allocation}{
matrix with nCondition rows and nRIL columns.
Elements of 1/0 indicate this RIL (or strain) is/not selected
for this condition.
}
\item{nRILs}{
number of RILs or strains available for the experiment.
}
\item{nSlides}{
total number of slides available for experiment.
}
}
\details{
This function is used only for designing a dual-channel experiment where samples
need to be paired.
}
\value{
A list with the following two elements: \cr
\code{new.array.allocation}: an updated array allocation table \cr
\code{new.condition.allocation}: an updated condition allocation table
}
\references{
Y. Li, R. Breitling and R.C. Jansen. Generalizing genetical
genomics: the added value from environmental perturbation, Trends Genet
(2008) 24:518-524. \cr
Y. Li, M. Swertz, G. Vera, J. Fu, R. Breitling, and R.C. Jansen. designGG:
An R-package and Web tool for the optimal design of genetical genomics
experiments. BMC Bioinformatics 10:188(2009) \cr
http://gbic.biol.rug.nl/designGG
}
\author{
Yang Li <yang.li@rug.nl>, Gonzalo Vera <gonzalo.vera.rodriguez@gmail.com> \cr
Rainer Breitling <r.breitling@rug.nl>, Ritsert Jansen <r.c.jansen@rug.nl>
}
\seealso{
\code{\link{updateDesign}}
}
\keyword{method}
|
/man/arrayUpdate.Rd
|
no_license
|
cran/designGG
|
R
| false
| false
| 1,867
|
rd
|
\name{arrayUpdate}
\alias{arrayUpdate}
\title{ Update array allocation }
\description{
Update the allocation of samples on the arrays.
This is a subfunction needed for \code{updateDesign}, but is not directly used.
}
\usage{
arrayUpdate(array.allocation, condition.allocation, nRILs, nSlides)
}
\arguments{
\item{array.allocation}{
matrix with nArray rows and nRIL columns.
Elements of 1/0 indicate this RIL (or strain) is/not selected
for this array.
}
\item{condition.allocation}{
matrix with nCondition rows and nRIL columns.
Elements of 1/0 indicate this RIL (or strain) is/not selected
for this condition.
}
\item{nRILs}{
number of RILs or strains available for the experiment.
}
\item{nSlides}{
total number of slides available for experiment.
}
}
\details{
This function is used only for designing a dual-channel experiment where samples
need to be paired.
}
\value{
A list with the following two elements: \cr
\code{new.array.allocation}: an updated array allocation table \cr
\code{new.condition.allocation}: an updated condition allocation table
}
\references{
Y. Li, R. Breitling and R.C. Jansen. Generalizing genetical
genomics: the added value from environmental perturbation, Trends Genet
(2008) 24:518-524. \cr
Y. Li, M. Swertz, G. Vera, J. Fu, R. Breitling, and R.C. Jansen. designGG:
An R-package and Web tool for the optimal design of genetical genomics
experiments. BMC Bioinformatics 10:188(2009) \cr
http://gbic.biol.rug.nl/designGG
}
\author{
Yang Li <yang.li@rug.nl>, Gonzalo Vera <gonzalo.vera.rodriguez@gmail.com> \cr
Rainer Breitling <r.breitling@rug.nl>, Ritsert Jansen <r.c.jansen@rug.nl>
}
\seealso{
\code{\link{updateDesign}}
}
\keyword{method}
|
## R code template for analyses reported in Li, Koester, and Lachance et al iScience 2021
## DOI:https://doi.org/10.1016/j.isci.2021.102508
# calculate geometric means -----------------------------------------------
geo_mean <- function(v.FITC.indiv) {
return (sum(v.FITC.indiv * c(1:12)))
}
# examples using the data structure dat.FITC.sub_transpose created using the Loess curve generation script; pay attention to the orientation of your table #
v.FITC.indiv = dat.FITC.sub_transpose[,1]
geo_mean(v.FITC.indiv = v.FITC.indiv)
v_geoMeans = rep(NA, ncol(dat.FITC.sub_transpose))
for (i_col in 1:ncol(dat.FITC.sub_transpose)) {
v_geoMeans[i_col] = geo_mean(v.FITC.indiv = dat.FITC.sub_transpose[,i_col])
}
|
/geometric_means_calculation.R
|
no_license
|
DeyLab/Li_Koester_Lachance_et_al_iScience_2021
|
R
| false
| false
| 719
|
r
|
## R code template for analyses reported in Li, Koester, and Lachance et al iScience 2021
## DOI:https://doi.org/10.1016/j.isci.2021.102508
# calculate geometric means -----------------------------------------------
geo_mean <- function(v.FITC.indiv) {
return (sum(v.FITC.indiv * c(1:12)))
}
# examples using the data structure dat.FITC.sub_transpose created using the Loess curve generation script; pay attention to the orientation of your table #
v.FITC.indiv = dat.FITC.sub_transpose[,1]
geo_mean(v.FITC.indiv = v.FITC.indiv)
v_geoMeans = rep(NA, ncol(dat.FITC.sub_transpose))
for (i_col in 1:ncol(dat.FITC.sub_transpose)) {
v_geoMeans[i_col] = geo_mean(v.FITC.indiv = dat.FITC.sub_transpose[,i_col])
}
|
#logistic.growth.mle.norm
logistic.growth.mle.norm<-function(readings, printer=F, upper=2){
if(printer){print(unique(readings$culture))}
fitted.readings<-readings
start.parameters=c(max(readings$ABS, na.rm=T), 1, min(readings$ABS, na.rm=T),diff(range(readings$ABS, na.rm=T)))
#Log likelyhood function to be minimized
like.growth<-function(parameters=start.parameters, readings){
#Parameter extraction
K<-parameters[1]
r<-parameters[2]
N0<-parameters[3]
#alpha<-parameters[4]
st.dev<-parameters[4]
#Data extraction
ABS<-readings$ABS
Time<-readings$Time
#Logistic growth model
Nt<-(K*N0*exp(r*Time) ) / (K + N0 * (exp(r*Time)-1))
#Nt<-(N0*K) / (N0 + (K-N0)*exp(-r*t)) #Synonymous model
#log likelihood estimate
#Nomral distribution
likelihood<- -sum(dnorm(ABS, Nt, sd=st.dev, log=T))
# Sanity bounds (remove if using "L-BFGS-B" or constrOptim
if(any(c(Nt<0,
Nt>upper,
K>upper,
N0<0,
r<0,
st.dev<0,
st.dev>upper))){likelihood<-NA}
return(likelihood)
}
try.test<-try({
fit<-optim(par=start.parameters,
fn=like.growth,
readings=readings)
# fit<-optim(par=c(1, 1, 0.01,0.1),
# fn=like.growth,
# readings=readings,
# method="L-BFGS-B",
# upper=c(50, 50, 50, 50),
# lower=c(0,0,0,0))
# fit<-constrOptim(theta=c(1, 1, 0.01,0.1),
# f=like.growth,
# readings=readings,
# ui=??,
# ci=??)
#
#
# library(stat4)
# fit<-mle(start=c(1, 1, 0.01,0.1),
# minuslogl=like.growth,
# readings=readings)
#extract fit values
K<-fit$par[1]
r<-fit$par[2]
N0<-fit$par[3]
Time<-readings$Time
predicted<-(K*N0*exp(r*Time) ) / (K + N0 * (exp(r*Time)-1))
fitted.readings$logistic.mle.N0<-N0
fitted.readings$logistic.mle.K<-K
fitted.readings$logistic.mle.r<-r
fitted.readings$logistic.mle.predicted<-predicted
})
#Pad with NAs for failed fits
if(class(try.test)=="try-error"){
fitted.readings$logistic.mle.N0<-NA
fitted.readings$logistic.mle.K<-NA
fitted.readings$logistic.mle.r<-NA
fitted.readings$logistic.mle.predicted<-NA
}
return(fitted.readings)
}
|
/Growth curves/logistic_growth_mle_norm.R
|
no_license
|
low-decarie/Useful-R-functions
|
R
| false
| false
| 2,868
|
r
|
#logistic.growth.mle.norm
logistic.growth.mle.norm<-function(readings, printer=F, upper=2){
if(printer){print(unique(readings$culture))}
fitted.readings<-readings
start.parameters=c(max(readings$ABS, na.rm=T), 1, min(readings$ABS, na.rm=T),diff(range(readings$ABS, na.rm=T)))
#Log likelyhood function to be minimized
like.growth<-function(parameters=start.parameters, readings){
#Parameter extraction
K<-parameters[1]
r<-parameters[2]
N0<-parameters[3]
#alpha<-parameters[4]
st.dev<-parameters[4]
#Data extraction
ABS<-readings$ABS
Time<-readings$Time
#Logistic growth model
Nt<-(K*N0*exp(r*Time) ) / (K + N0 * (exp(r*Time)-1))
#Nt<-(N0*K) / (N0 + (K-N0)*exp(-r*t)) #Synonymous model
#log likelihood estimate
#Nomral distribution
likelihood<- -sum(dnorm(ABS, Nt, sd=st.dev, log=T))
# Sanity bounds (remove if using "L-BFGS-B" or constrOptim
if(any(c(Nt<0,
Nt>upper,
K>upper,
N0<0,
r<0,
st.dev<0,
st.dev>upper))){likelihood<-NA}
return(likelihood)
}
try.test<-try({
fit<-optim(par=start.parameters,
fn=like.growth,
readings=readings)
# fit<-optim(par=c(1, 1, 0.01,0.1),
# fn=like.growth,
# readings=readings,
# method="L-BFGS-B",
# upper=c(50, 50, 50, 50),
# lower=c(0,0,0,0))
# fit<-constrOptim(theta=c(1, 1, 0.01,0.1),
# f=like.growth,
# readings=readings,
# ui=??,
# ci=??)
#
#
# library(stat4)
# fit<-mle(start=c(1, 1, 0.01,0.1),
# minuslogl=like.growth,
# readings=readings)
#extract fit values
K<-fit$par[1]
r<-fit$par[2]
N0<-fit$par[3]
Time<-readings$Time
predicted<-(K*N0*exp(r*Time) ) / (K + N0 * (exp(r*Time)-1))
fitted.readings$logistic.mle.N0<-N0
fitted.readings$logistic.mle.K<-K
fitted.readings$logistic.mle.r<-r
fitted.readings$logistic.mle.predicted<-predicted
})
#Pad with NAs for failed fits
if(class(try.test)=="try-error"){
fitted.readings$logistic.mle.N0<-NA
fitted.readings$logistic.mle.K<-NA
fitted.readings$logistic.mle.r<-NA
fitted.readings$logistic.mle.predicted<-NA
}
return(fitted.readings)
}
|
query <- biOmics::biOmicsSearch("brain")
# Experiments
# "Microarray"-"ExpressionArray"-"ExonArray"-"RNASeq"
# "MiRNAMicroArray"-"Firehose"-"DNAMethylation "-"miRNASeq"-"RRBS"
# "ChipSeq"-"MRESeq"-"Rampage"-"DNAsequencing"
# "fiveC"-RepliSeq"-"Others"
query <- biOmics::biOmicsSearch("brain", experiment = "ExpressionArray")
|
/inst/examples/biomicsSearch.R
|
no_license
|
tiagochst/TCGAbiolinksGUI
|
R
| false
| false
| 327
|
r
|
query <- biOmics::biOmicsSearch("brain")
# Experiments
# "Microarray"-"ExpressionArray"-"ExonArray"-"RNASeq"
# "MiRNAMicroArray"-"Firehose"-"DNAMethylation "-"miRNASeq"-"RRBS"
# "ChipSeq"-"MRESeq"-"Rampage"-"DNAsequencing"
# "fiveC"-RepliSeq"-"Others"
query <- biOmics::biOmicsSearch("brain", experiment = "ExpressionArray")
|
# Load dataset into R
load_data <- function()
{
filename <- "HPC.txt"
EXP <- read.table(filename,header=TRUE,sep=";",na="?")
# convert date and time variables to Date/Time class
EXP$Time <- strptime(paste(EXP$Date, EXP$Time), "%d/%m/%Y %H:%M:%S")
EXP$Date <- as.Date(EXP$Date, format="%d/%m/%Y")
# only use data from the dates 2007-02-01 and 2007-02-02
select_date <- as.Date(c("2007-02-01", "2007-02-02"), "%Y-%m-%d")
EXP <- subset(EXP, Date %in% select_date)
}
plot2<-function()
{
#source(LoadData.R)
EXP<-load_data()
plot(EXP$Time, EXP$Global_active_power, type="l",xlab="",ylab="Global Active Power (kilowatts)")
dev.copy(png, file="plot2.png", height=480, width=480)
dev.off()
}
|
/Plot2A.R
|
no_license
|
rajthilakm/Course_Project_1
|
R
| false
| false
| 732
|
r
|
# Load dataset into R
load_data <- function()
{
filename <- "HPC.txt"
EXP <- read.table(filename,header=TRUE,sep=";",na="?")
# convert date and time variables to Date/Time class
EXP$Time <- strptime(paste(EXP$Date, EXP$Time), "%d/%m/%Y %H:%M:%S")
EXP$Date <- as.Date(EXP$Date, format="%d/%m/%Y")
# only use data from the dates 2007-02-01 and 2007-02-02
select_date <- as.Date(c("2007-02-01", "2007-02-02"), "%Y-%m-%d")
EXP <- subset(EXP, Date %in% select_date)
}
plot2<-function()
{
#source(LoadData.R)
EXP<-load_data()
plot(EXP$Time, EXP$Global_active_power, type="l",xlab="",ylab="Global Active Power (kilowatts)")
dev.copy(png, file="plot2.png", height=480, width=480)
dev.off()
}
|
#参数中不存在equity或price 报错
expect_error(totalEquity(ratio=0.1)())
#传入的资金为浮动资金
pos = totalEquity(ratio=0.1)(initeq=10000,price=10)
expect_equal(pos,100)
|
/trade/SNPACKAGE/test/totalEquityTest.R
|
no_license
|
zhurui1351/RSTOCK_TRAIL
|
R
| false
| false
| 186
|
r
|
#参数中不存在equity或price 报错
expect_error(totalEquity(ratio=0.1)())
#传入的资金为浮动资金
pos = totalEquity(ratio=0.1)(initeq=10000,price=10)
expect_equal(pos,100)
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/entrez_info.r
\name{entrez_info}
\alias{entrez_info}
\title{Get information about EUtils databases}
\usage{
entrez_info(db = NULL, config = NULL)
}
\arguments{
\item{db}{character database about which to retrieve information (optional)}
\item{config}{config vector passed on to \code{httr::GET}}
}
\value{
XMLInternalDocument with information describing either all the
databases available in Eutils (if db is not set) or one particular database
(set by 'db')
}
\description{
Constructs a query to NCBI's einfo and returns a parsed XML object
Note: The most common uses-cases for the einfo util are finding the list of
search fields available for a given database or the other NCBI databases to
which records in a given database might be linked. Both these use cases
are implemented in higher-level functions that return just this information
(\code{entrez_db_searchable} and \code{entrez_db_links} respectively).
Consequently most users will not have a reason to use this function (though
it is exported by \code{rentrez} for the sake of completeness.
}
\examples{
\dontrun{
all_the_data <- entrez_info()
XML::xpathSApply(all_the_data, "//DbName", XML::xmlValue)
entrez_dbs()
}
}
\seealso{
\code{\link[httr]{config}} for available httr configurations
Other einfo: \code{\link{entrez_db_links}};
\code{\link{entrez_db_searchable}};
\code{\link{entrez_db_summary}}; \code{\link{entrez_dbs}}
}
|
/man/entrez_info.Rd
|
no_license
|
parthasen/rentrez
|
R
| false
| false
| 1,484
|
rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/entrez_info.r
\name{entrez_info}
\alias{entrez_info}
\title{Get information about EUtils databases}
\usage{
entrez_info(db = NULL, config = NULL)
}
\arguments{
\item{db}{character database about which to retrieve information (optional)}
\item{config}{config vector passed on to \code{httr::GET}}
}
\value{
XMLInternalDocument with information describing either all the
databases available in Eutils (if db is not set) or one particular database
(set by 'db')
}
\description{
Constructs a query to NCBI's einfo and returns a parsed XML object
Note: The most common uses-cases for the einfo util are finding the list of
search fields available for a given database or the other NCBI databases to
which records in a given database might be linked. Both these use cases
are implemented in higher-level functions that return just this information
(\code{entrez_db_searchable} and \code{entrez_db_links} respectively).
Consequently most users will not have a reason to use this function (though
it is exported by \code{rentrez} for the sake of completeness.
}
\examples{
\dontrun{
all_the_data <- entrez_info()
XML::xpathSApply(all_the_data, "//DbName", XML::xmlValue)
entrez_dbs()
}
}
\seealso{
\code{\link[httr]{config}} for available httr configurations
Other einfo: \code{\link{entrez_db_links}};
\code{\link{entrez_db_searchable}};
\code{\link{entrez_db_summary}}; \code{\link{entrez_dbs}}
}
|
## makecacheMatrix creates a special matrix object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set=set, get=get, setinverse=setinverse, getinverse=getinverse)
}
## cachesSolve computes the inverse of special matrix returned by makeCacheMatrix.
## If the inverse has already been calculated, then cacheSolve should retrieve the
## inverse from the cache
cacheSolve <- function(x, ...) {
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached data.")
return(inv)
}
data <- x$get()
inv <- solve(data)
x$setinverse(inv)
inv
}
|
/cachematrix.R
|
no_license
|
mlticzon/ProgrammingAssignment2
|
R
| false
| false
| 812
|
r
|
## makecacheMatrix creates a special matrix object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set=set, get=get, setinverse=setinverse, getinverse=getinverse)
}
## cachesSolve computes the inverse of special matrix returned by makeCacheMatrix.
## If the inverse has already been calculated, then cacheSolve should retrieve the
## inverse from the cache
cacheSolve <- function(x, ...) {
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached data.")
return(inv)
}
data <- x$get()
inv <- solve(data)
x$setinverse(inv)
inv
}
|
library(crunch)
### Name: crunch-cut
### Title: Cut a numeric Crunch variable
### Aliases: crunch-cut cut,NumericVariable-method
### ** Examples
## Not run:
##D ds <- loadDataset("mtcars")
##D ds$cat_var <- cut(ds$mpg, breaks = c(10, 15, 20),
##D labels = c("small", "medium"), name = "Fuel efficiency")
##D ds$age <- sample(1:100, 32)
##D ds$age4 <- cut(df$age, c(0, 30, 45, 65, 200),
##D c("Youth", "Adult", "Middle-aged", "Elderly"),
##D name = "Age (4 category)")
## End(Not run)
|
/data/genthat_extracted_code/crunch/examples/crunch-cut.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 518
|
r
|
library(crunch)
### Name: crunch-cut
### Title: Cut a numeric Crunch variable
### Aliases: crunch-cut cut,NumericVariable-method
### ** Examples
## Not run:
##D ds <- loadDataset("mtcars")
##D ds$cat_var <- cut(ds$mpg, breaks = c(10, 15, 20),
##D labels = c("small", "medium"), name = "Fuel efficiency")
##D ds$age <- sample(1:100, 32)
##D ds$age4 <- cut(df$age, c(0, 30, 45, 65, 200),
##D c("Youth", "Adult", "Middle-aged", "Elderly"),
##D name = "Age (4 category)")
## End(Not run)
|
## Converter for Progenesis output
## Thanks Ulrich Omasits : use R scripts by Ulrich Omasits, 2015, version 2.1
## output from Progenesis : wide format
#' @export
ProgenesistoMSstatsFormat <- function(input,
annotation,
useUniquePeptide=TRUE,
summaryforMultipleRows=max,
fewMeasurements="remove",
removeOxidationMpeptides=FALSE,
removeProtein_with1Peptide=FALSE){
##############################
## 0. check input
##############################
## there are space in column name
if (!is.element('Modifications', input[2, ])) {
input[2, ][grep('modifications', input[2 ,])] <- 'Modifications'
}
if (!is.element('Protein', input[2,]) & is.element('Accession', input[2,])) {
## use 'Accession' for Protein ID
input[2, ][input[2, ] == 'Accession'] <- 'Protein'
}
required.column <- c('Protein', 'Sequence', 'Charge', 'Modifications')
if (length(grep('quantitation', input[2, ])) > 0){
input[2, ][grep('quantitation', input[2, ])] <- 'Use.in.quantitation'
required.column <- c(required.column, 'Use.in.quantitation')
}
if (!all(required.column %in% input[2, ])) {
missedInput <- which(!(required.column %in% input[2, ]))
stop(paste("**", toString(required.column[missedInput]),
"is not provided in input. Please check the input."))
}
## check annotation
required.annotation <- c('Run', 'BioReplicate', 'Condition')
if (!all(required.annotation %in% colnames(annotation))) {
missedAnnotation <- which(!(required.annotation %in% colnames(annotation)))
stop(paste("**", toString(required.annotation[missedAnnotation]),
"is not provided in Annotation. Please check the annotation."))
}
## check annotation information
## get annotation
annotinfo <- unique(annotation[, c("Run", "Condition", 'BioReplicate')])
## Each Run should has unique information about condition and bioreplicate
check.annot <- xtabs(~Run, annotinfo)
if ( any(check.annot > 1) ) {
stop('** Please check annotation. Each MS run can\'t have multiple conditions or BioReplicates.' )
}
## get abundance information
if (is.element('Raw.abundance', colnames(input)) &
is.element('Normalized.abundance', colnames(input))) {
start.column <- which(colnames(input) == 'Raw.abundance')
check.numRun <- which(colnames(input) == 'Normalized.abundance')
if (start.column-check.numRun != nrow(annotation)) {
stop(message('** Please check annotation file. The numbers of MS runs in annotation and output are not matched.'))
}
raw.abundance.column <- c(start.column:(start.column + nrow(annotation)-1))
input <- input[, c(which(input[2, ] %in% required.column),
raw.abundance.column)]
} else if (is.element('Raw.abundance', colnames(input))) {
start.column <- which(colnames(input) == 'Raw.abundance')
raw.abundance.column <- c(start.column:(start.column + nrow(annotation)-1))
input <- input[, c(which(input[2, ] %in% required.column),
raw.abundance.column)]
}
input <- input[-1, ]
colnames(input) <- input[1, ]
input <- input[-1, ]
##############################
## 1. use only 'use in quantitation = true'
##############################
if (is.element('Use.in.quantitation', colnames(input))) {
## value for use in quantitation is True vs False
if (length( grep('True', unique(input$Use.in.quantitation))) > 0) {
input <- input[input$Use.in.quantitation == 'True', ]
} else if (length(grep('TRUE', unique(input$Use.in.quantitation))) > 0) {
input <- input[input$Use.in.quantitation == TRUE, ]
}
input <- input[, -which(colnames(input) %in% c('Use.in.quantitation'))]
}
##############################
## 2. modify column names and remove some columnts
##############################
input <- input[!is.na(input$Protein) & input$Protein != '', ]
input <- input[!is.na(input$Sequence) & input$Sequence != '', ]
## get modified sequence
input$ModifiedSequence <- paste(input$Sequence, input$Modifications, sep="")
## remove completely duplicated rows
input <- input[!duplicated(input), ]
################################################
## 3. remove the peptides including oxidation (M) sequence
if (removeOxidationMpeptides) {
remove_m_sequence <- unique(input[grep("Oxidation", input$ModifiedSequence), "ModifiedSequence"])
if (length(remove_m_sequence) > 0) {
input <- input[-which(input$ModifiedSequence %in% remove_m_sequence), ]
}
message('Peptides including oxidation(M) in the sequence are removed.')
}
################################################
## 4. remove peptides which are used in more than one protein
## we assume to use unique peptide
################################################
if (useUniquePeptide) {
pepcount <- unique(input[, c("Protein", "Sequence")])
pepcount$Sequence <- factor(pepcount$Sequence)
## count how many proteins are assigned for each peptide
structure <- aggregate(Protein ~ ., data=pepcount, length)
remove_peptide <- structure[structure$Protein!=1, ]
## remove the peptides which are used in more than one protein
if (length(remove_peptide$Protein != 1) != 0) {
input <- input[-which(input$Sequence %in% remove_peptide$Sequence), ]
}
message('** Peptides, that are used in more than one proteins, are removed.')
}
##############################
## 5. remove multiple measurements per feature and run
##############################
input <- input[, -which(colnames(input) %in% c('Sequence', 'Modifications'))]
input_remove <- melt(input, id=c('Protein', 'ModifiedSequence', 'Charge'))
colnames(input_remove) <- c("ProteinName", "PeptideModifiedSequence", "PrecursorCharge", "Run", "Intensity")
input_remove$Intensity <- as.double(input_remove$Intensity)
## maximum or sum up abundances among intensities for identical features within one run
input <- dcast(ProteinName + PeptideModifiedSequence + PrecursorCharge ~ Run, data=input_remove,
value.var='Intensity',
fun.aggregate=summaryforMultipleRows, na.rm=T,
fill=NA_real_)
## reformat for long format
input <- melt(input, id=c('ProteinName', 'PeptideModifiedSequence', 'PrecursorCharge'))
colnames(input)[which(colnames(input) %in% c('variable','value'))] <- c("Run","Intensity")
message('** Multiple measurements in a feature and a run are summarized by summaryforMultipleRows.')
##############################
## 6. add annotation
##############################
input <- merge(input, annotation, by="Run", all=TRUE)
## add other required information
input$FragmentIon <- NA
input$ProductCharge <- NA
input$IsotopeLabelType <- "L"
input.final <- data.frame("ProteinName" = input$ProteinName,
"PeptideModifiedSequence" = input$PeptideModifiedSequence,
"PrecursorCharge" = input$PrecursorCharge,
"FragmentIon" = input$FragmentIon,
"ProductCharge" = input$ProductCharge,
"IsotopeLabelType" = input$IsotopeLabelType,
"Condition" = input$Condition,
"BioReplicate" = input$BioReplicate,
"Run" = input$Run,
"Intensity" = input$Intensity)
if (any(is.element(colnames(input), 'Fraction'))) {
input.final <- data.frame(input.final,
"Fraction" = input$Fraction)
}
input <- input.final
rm(input.final)
##############################
## 7. remove features which has 1 or 2 measurements across runs
##############################
if (fewMeasurements == "remove") {
## it is the same across experiments. # measurement per feature.
input <- .remove_feature_with_few_progenesis(input)
}
##############################
## 8. remove proteins with only one peptide and charge per protein
##############################
if (removeProtein_with1Peptide) {
##remove protein which has only one peptide
input$feature <- paste(input$PeptideModifiedSequence,
input$PrecursorCharge,
input$FragmentIon,
input$ProductCharge,
sep="_")
tmp <- unique(input[, c("ProteinName", 'feature')])
tmp$ProteinName <- factor(tmp$ProteinName)
count <- xtabs( ~ ProteinName, data=tmp)
lengthtotalprotein <- length(count)
removepro <- names(count[count <= 1])
if (length(removepro) > 0) {
input <- input[-which(input$ProteinName %in% removepro), ]
message(paste0("** ", length(removepro),
' proteins, which have only one feature in a protein, are removed among ',
lengthtotalprotein, ' proteins.'))
}
input <- input[, -which(colnames(input) %in% c('feature'))]
}
input$ProteinName <- input$ProteinName
return(input)
}
.remove_feature_with_few_progenesis <- function(x){
xtmp <- x[!is.na(x$Intensity) & x$Intensity > 0, ]
xtmp$feature <- paste(xtmp$PeptideModifiedSequence, xtmp$PrecursorCharge, sep="_")
count_measure <- xtabs( ~feature, xtmp)
remove_feature_name <- count_measure[count_measure < 3]
x$feature <- paste(x$PeptideModifiedSequence, x$PrecursorCharge, sep="_")
if (length(remove_feature_name) > 0) {
x <- x[-which(x$feature %in% names(remove_feature_name)), ]
}
x <- x[, -which(colnames(x) %in% c('feature'))]
return(x)
}
|
/R/ProgenesistoMSstatsFormat.R
|
no_license
|
bpolacco/MSstats
|
R
| false
| false
| 10,557
|
r
|
## Converter for Progenesis output
## Thanks Ulrich Omasits : use R scripts by Ulrich Omasits, 2015, version 2.1
## output from Progenesis : wide format
#' @export
ProgenesistoMSstatsFormat <- function(input,
annotation,
useUniquePeptide=TRUE,
summaryforMultipleRows=max,
fewMeasurements="remove",
removeOxidationMpeptides=FALSE,
removeProtein_with1Peptide=FALSE){
##############################
## 0. check input
##############################
## there are space in column name
if (!is.element('Modifications', input[2, ])) {
input[2, ][grep('modifications', input[2 ,])] <- 'Modifications'
}
if (!is.element('Protein', input[2,]) & is.element('Accession', input[2,])) {
## use 'Accession' for Protein ID
input[2, ][input[2, ] == 'Accession'] <- 'Protein'
}
required.column <- c('Protein', 'Sequence', 'Charge', 'Modifications')
if (length(grep('quantitation', input[2, ])) > 0){
input[2, ][grep('quantitation', input[2, ])] <- 'Use.in.quantitation'
required.column <- c(required.column, 'Use.in.quantitation')
}
if (!all(required.column %in% input[2, ])) {
missedInput <- which(!(required.column %in% input[2, ]))
stop(paste("**", toString(required.column[missedInput]),
"is not provided in input. Please check the input."))
}
## check annotation
required.annotation <- c('Run', 'BioReplicate', 'Condition')
if (!all(required.annotation %in% colnames(annotation))) {
missedAnnotation <- which(!(required.annotation %in% colnames(annotation)))
stop(paste("**", toString(required.annotation[missedAnnotation]),
"is not provided in Annotation. Please check the annotation."))
}
## check annotation information
## get annotation
annotinfo <- unique(annotation[, c("Run", "Condition", 'BioReplicate')])
## Each Run should has unique information about condition and bioreplicate
check.annot <- xtabs(~Run, annotinfo)
if ( any(check.annot > 1) ) {
stop('** Please check annotation. Each MS run can\'t have multiple conditions or BioReplicates.' )
}
## get abundance information
if (is.element('Raw.abundance', colnames(input)) &
is.element('Normalized.abundance', colnames(input))) {
start.column <- which(colnames(input) == 'Raw.abundance')
check.numRun <- which(colnames(input) == 'Normalized.abundance')
if (start.column-check.numRun != nrow(annotation)) {
stop(message('** Please check annotation file. The numbers of MS runs in annotation and output are not matched.'))
}
raw.abundance.column <- c(start.column:(start.column + nrow(annotation)-1))
input <- input[, c(which(input[2, ] %in% required.column),
raw.abundance.column)]
} else if (is.element('Raw.abundance', colnames(input))) {
start.column <- which(colnames(input) == 'Raw.abundance')
raw.abundance.column <- c(start.column:(start.column + nrow(annotation)-1))
input <- input[, c(which(input[2, ] %in% required.column),
raw.abundance.column)]
}
input <- input[-1, ]
colnames(input) <- input[1, ]
input <- input[-1, ]
##############################
## 1. use only 'use in quantitation = true'
##############################
if (is.element('Use.in.quantitation', colnames(input))) {
## value for use in quantitation is True vs False
if (length( grep('True', unique(input$Use.in.quantitation))) > 0) {
input <- input[input$Use.in.quantitation == 'True', ]
} else if (length(grep('TRUE', unique(input$Use.in.quantitation))) > 0) {
input <- input[input$Use.in.quantitation == TRUE, ]
}
input <- input[, -which(colnames(input) %in% c('Use.in.quantitation'))]
}
##############################
## 2. modify column names and remove some columnts
##############################
input <- input[!is.na(input$Protein) & input$Protein != '', ]
input <- input[!is.na(input$Sequence) & input$Sequence != '', ]
## get modified sequence
input$ModifiedSequence <- paste(input$Sequence, input$Modifications, sep="")
## remove completely duplicated rows
input <- input[!duplicated(input), ]
################################################
## 3. remove the peptides including oxidation (M) sequence
if (removeOxidationMpeptides) {
remove_m_sequence <- unique(input[grep("Oxidation", input$ModifiedSequence), "ModifiedSequence"])
if (length(remove_m_sequence) > 0) {
input <- input[-which(input$ModifiedSequence %in% remove_m_sequence), ]
}
message('Peptides including oxidation(M) in the sequence are removed.')
}
################################################
## 4. remove peptides which are used in more than one protein
## we assume to use unique peptide
################################################
if (useUniquePeptide) {
pepcount <- unique(input[, c("Protein", "Sequence")])
pepcount$Sequence <- factor(pepcount$Sequence)
## count how many proteins are assigned for each peptide
structure <- aggregate(Protein ~ ., data=pepcount, length)
remove_peptide <- structure[structure$Protein!=1, ]
## remove the peptides which are used in more than one protein
if (length(remove_peptide$Protein != 1) != 0) {
input <- input[-which(input$Sequence %in% remove_peptide$Sequence), ]
}
message('** Peptides, that are used in more than one proteins, are removed.')
}
##############################
## 5. remove multiple measurements per feature and run
##############################
input <- input[, -which(colnames(input) %in% c('Sequence', 'Modifications'))]
input_remove <- melt(input, id=c('Protein', 'ModifiedSequence', 'Charge'))
colnames(input_remove) <- c("ProteinName", "PeptideModifiedSequence", "PrecursorCharge", "Run", "Intensity")
input_remove$Intensity <- as.double(input_remove$Intensity)
## maximum or sum up abundances among intensities for identical features within one run
input <- dcast(ProteinName + PeptideModifiedSequence + PrecursorCharge ~ Run, data=input_remove,
value.var='Intensity',
fun.aggregate=summaryforMultipleRows, na.rm=T,
fill=NA_real_)
## reformat for long format
input <- melt(input, id=c('ProteinName', 'PeptideModifiedSequence', 'PrecursorCharge'))
colnames(input)[which(colnames(input) %in% c('variable','value'))] <- c("Run","Intensity")
message('** Multiple measurements in a feature and a run are summarized by summaryforMultipleRows.')
##############################
## 6. add annotation
##############################
input <- merge(input, annotation, by="Run", all=TRUE)
## add other required information
input$FragmentIon <- NA
input$ProductCharge <- NA
input$IsotopeLabelType <- "L"
input.final <- data.frame("ProteinName" = input$ProteinName,
"PeptideModifiedSequence" = input$PeptideModifiedSequence,
"PrecursorCharge" = input$PrecursorCharge,
"FragmentIon" = input$FragmentIon,
"ProductCharge" = input$ProductCharge,
"IsotopeLabelType" = input$IsotopeLabelType,
"Condition" = input$Condition,
"BioReplicate" = input$BioReplicate,
"Run" = input$Run,
"Intensity" = input$Intensity)
if (any(is.element(colnames(input), 'Fraction'))) {
input.final <- data.frame(input.final,
"Fraction" = input$Fraction)
}
input <- input.final
rm(input.final)
##############################
## 7. remove features which has 1 or 2 measurements across runs
##############################
if (fewMeasurements == "remove") {
## it is the same across experiments. # measurement per feature.
input <- .remove_feature_with_few_progenesis(input)
}
##############################
## 8. remove proteins with only one peptide and charge per protein
##############################
if (removeProtein_with1Peptide) {
##remove protein which has only one peptide
input$feature <- paste(input$PeptideModifiedSequence,
input$PrecursorCharge,
input$FragmentIon,
input$ProductCharge,
sep="_")
tmp <- unique(input[, c("ProteinName", 'feature')])
tmp$ProteinName <- factor(tmp$ProteinName)
count <- xtabs( ~ ProteinName, data=tmp)
lengthtotalprotein <- length(count)
removepro <- names(count[count <= 1])
if (length(removepro) > 0) {
input <- input[-which(input$ProteinName %in% removepro), ]
message(paste0("** ", length(removepro),
' proteins, which have only one feature in a protein, are removed among ',
lengthtotalprotein, ' proteins.'))
}
input <- input[, -which(colnames(input) %in% c('feature'))]
}
input$ProteinName <- input$ProteinName
return(input)
}
.remove_feature_with_few_progenesis <- function(x){
xtmp <- x[!is.na(x$Intensity) & x$Intensity > 0, ]
xtmp$feature <- paste(xtmp$PeptideModifiedSequence, xtmp$PrecursorCharge, sep="_")
count_measure <- xtabs( ~feature, xtmp)
remove_feature_name <- count_measure[count_measure < 3]
x$feature <- paste(x$PeptideModifiedSequence, x$PrecursorCharge, sep="_")
if (length(remove_feature_name) > 0) {
x <- x[-which(x$feature %in% names(remove_feature_name)), ]
}
x <- x[, -which(colnames(x) %in% c('feature'))]
return(x)
}
|
#' @title gis_advisory
#' @description Advisory Forecast Track, Cone of Uncertainty, and
#' Watches/Warnings
#' @param key Key of storm (i.e., AL012008, EP092015)
#' @param advisory Advisory number. If NULL, all advisories are returned.
#' Intermediate advisories are acceptable.
#' @seealso \code{\link{gis_download}}
#' @export
gis_advisory <- function(key, advisory = as.character()) {
if (is.null(key))
stop("Please provide storm key")
key <- stringr::str_to_lower(key)
if (!grepl("^[[:lower:]]{2}[[:digit:]]{6}$", key))
stop("Invalid key")
key <- stringr::str_match(key,
pattern = paste0("([:lower:]{2})([:digit:]{2})",
"([:digit:]{4})"))
names(key) <- c("original", "basin", "year_num", "year")
# Get list of GIS forecast zips for storm and download
url <- sprintf("%sgis/archive_forecast_results.php?id=%s%s&year=%s",
get_nhc_link(), key[["basin"]], key[["year_num"]],
key[["year"]])
contents <- readr::read_lines(url)
# Match zip files. If advisory is empty then need to pull all zip files for
# the storm. Otherwise, pull only selected advisory.
if (purrr::is_empty(advisory)) {
ptn <- sprintf(".+(forecast/archive/%s.*?\\.zip).+",
stringr::str_to_lower(key[["original"]]))
} else {
advisory <- stringr::str_match(advisory, "([:digit:]{1,3})([:alpha:]*)")
names(advisory) <- c("original", "advisory", "int_adv")
ptn <- sprintf(".+(forecast/archive/%s.*?%s%s\\.zip).+",
stringr::str_to_lower(key["original"]),
stringr::str_pad(string = advisory[["advisory"]],
width = 3, side = "left", pad = "0"),
advisory[["int_adv"]])
}
matches <- contents[stringr::str_detect(contents, pattern = ptn)]
# Extract link to zip files. Error gracefully if no matches.
tryCatch(links <- stringr::str_match(matches, pattern = ptn)[,2],
error = function(c) {
c$message <- "No data avaialable for requested storm/advisory"
stop(c$message, call. = FALSE)
})
# Append website domain to links
links <- paste0("http://www.nhc.noaa.gov/gis/", links)
return(links)
}
#' @title gis_breakpoints
#' @description Return link to breakpoints shapefile by year
#' @param year Default is current year. Breakpoints only available >= 2008.
#' @details Coastal areas placed under tropical storm and hurricane watches and
#' warnings are identified through the use of "breakpoints." A tropical cyclone
#' breakpoint is defined as an agreed upon coastal location that can be chosen
#' as one of two specific end points or designated places between which a
#' tropical storm/hurricane watch/warning is in effect. The U.S. National
#' Weather Service designates the locations along the U.S. East, Gulf, and
#' California coasts, Puerto Rico, and Hawaii. These points are listed in NWS
#' Directive 10-605 (PDF). Individual countries across the Caribbean, Central
#' America, and South America provide coastal locations for their areas of
#' responsibility to the U.S. National Weather Service for the National
#' Hurricane Center's use in tropical cyclone advisories when watches/warnings
#' are issued by international partners. The National Hurricane Center maintains
#' a list of pre-arranged breakpoints for the U.S. Atlantic and Gulf coasts,
#' Mexico, Cuba and the Bahamas. Other sites are unofficial and sites not on the
#' list can be selected if conditions warrant.
#' @export
gis_breakpoints <- function(year = as.numeric(strftime(Sys.Date(), "%Y"))) {
# xpath pattern
xp <- "//a"
links <- httr::POST("http://www.nhc.noaa.gov/gis/archive_breakpoints.php",
body = list(year = year), encode = "form") %>%
httr::content(as = "parsed", encoding = "UTF-8") %>%
rvest::html_nodes(xpath = xp) %>%
rvest::html_attr("href") %>%
stringr::str_extract(sprintf("Breakpoints_%s\\.zip$", year)) %>%
.[stats::complete.cases(.)]
if (purrr::is_empty(links))
return(NULL)
links <- paste0("http://www.nhc.noaa.gov/gis/breakpoints/archive/", links)
return(links)
}
#' @title gis_download
#' @description Get GIS data for storm.
#' @param url link to GIS dataset to download.
#' @param ... additional parameters for rgdal::readOGR
#' @export
gis_download <- function(url, ...) {
destdir <- tempdir()
utils::download.file(file.path(url), zip_file <- tempfile())
zip_contents <- utils::unzip(zip_file, list = TRUE)$Name
utils::unzip(zip_file, exdir = destdir)
shp_files <- stringr::str_match(zip_contents, pattern = ".+shp$") %>%
.[!is.na(.)]
ds <- purrr::map2(.x = shp_files, .y = destdir, .f = function(f, d) {
shp_file <- stringr::str_match(f, "^(.+)\\.shp$")[,2]
sp_object <- rgdal::readOGR(dsn = d, layer = shp_file,
encoding = "UTF-8",
stringsAsFactors = FALSE,
use_iconv = TRUE,
...)
return(sp_object)
})
names(ds) <- stringr::str_match(shp_files, "^(.+)\\.shp$")[,2] %>%
stringr::str_replace_all("[[:punct:][:space:]]", "_")
# clean up
x <- unlink(c(paste(destdir, zip_contents, sep = "/"), zip_file))
return(ds)
}
#' @title gis_latest
#' @description Latest GIS datasets for active cyclones
#' @param basins AL and/or EP.
#' @param ... additional parameters for rgdal::readOGR
#' @export
gis_latest <- function(basins = c("AL", "EP"), ...) {
if (!(all(basins %in% c("AL", "EP"))))
stop("Invalid basin")
urls <- list("AL" = "http://www.nhc.noaa.gov/gis-at.xml",
"EP" = "http://www.nhc.noaa.gov/gis-ep.xml")
gis_zips <- purrr::map(basins, ~ xml2::read_xml(urls[[.x]])) %>%
purrr::map(~ xml2::xml_find_all(.x, xpath = ".//link") %>%
xml2::xml_text()) %>%
purrr::map(stringr::str_match, ".+\\.zip$") %>%
purrr::flatten_chr() %>%
.[!is.na(.)]
if (!purrr::is_empty(gis_zips)) {
ds <- purrr::map(gis_zips, gis_download, ...)
return(ds)
}
return(FALSE)
}
#' @title gis_outlook
#' @description Tropical Weather Outlook
#' @seealso \code{\link{gis_download}}
#' @export
gis_outlook <- function() {
url <- "http://www.nhc.noaa.gov/xgtwo/gtwo_shapefiles.zip"
return(url)
}
#' @title gis_prob_storm_surge
#' @description Probabilistic Storm Surge
#' @param key Key of storm (i.e., AL012008, EP092015)
#' @param products list of products and associated n values; psurge (0:20) or
#' esurge (10, 20, 30, 40, 50).
#' @param datetime Datetime in \%Y\%m\%d\%H format.
#' @details Probabilistic Storm Surge Forecasts
#' @section Products:
#' \describe{
#' \item{esurge}{The Tropical Cyclone Storm Surge Exceedances (P-Surge 2.5)
#' data shows the probability, in percent, of a specified storm surge,
#' including tides, exceeding the specified height, in feet, during
#' the forecast period indicated. The 10 percent exceedance height,
#' for example, is the storm surge height, including tides, above
#' ground level (AGL) such that there is a 10 percent chance of
#' exceeding it. The product is based upon an ensemble of Sea, Lake,
#' and Overland Surge from Hurricanes (SLOSH) model runs using the
#' National Hurricane Center (NHC) official advisory and accounts for
#' track, size, and intensity errors based on historical errors and
#' astronomical tide. Valid values are 10, 20, 30, 40 or 50.}
#' \item{psurge}{The Tropical Cyclone Storm Surge Probabilities (P-Surge
#' 2.5) data shows the probability, in percent, of a specified storm
#' surge occurring during the forecast period indicated. The product
#' is based upon an ensemble of Sea, Lake, and Overland Surge from
#' Hurricanes (SLOSH) model runs using the National Hurricane Center
#' (NHC) official advisory and accounts for track, size, and intensity
#' errors based on historical errors and astronomical tide. Valid
#' values are 0:20.}
#' }
#' @seealso \href{http://www.nhc.noaa.gov/surge/psurge.php}{Tropical Cyclone Storm Surge Probabilities}
#' @seealso \code{\link{gis_download}}
#' @examples
#' \dontrun{
#' # Return the last psurge0 product for storm AL092016
#' gis_prob_storm_surge("AL092016", products = list("psurge" = 0))
#'
#' # Return the psurge0 and esurge10 products for storm AL092016
#' gis_prob_storm_surge("AL092016", products = list("psurge" = 0, "esurge" = 10))
#'
#' # Return all psurge0 products for Sep 2, 2016, storm AL092016
#' gis_prob_storm_surge("AL092016", products = list("psurge" = 0),
#' datetime = "20160902")
#' }
#' @export
gis_prob_storm_surge <- function(key, products, datetime = NULL) {
if (is.null(key))
stop("Please provide storm key")
# Validate products
if (!(all(names(products) %in% c("psurge", "esurge"))))
stop("Invalid product. Must be psurge and/or esurge")
if (!is.null(products[["psurge"]]))
if (!(all(dplyr::between(products[["psurge"]], 0, 20))))
stop("psurge values must be between 0 and 20")
if (!is.null(products[["esurge"]]))
if (!(all(products[["esurge"]] %in% seq(10, 50, by = 10))))
stop("esurge values must be 10, 20, 30, 40 or 50")
key <- stringr::str_to_lower(key)
if (!grepl("^[[:lower:]]{2}[[:digit:]]{6}$", key))
stop("Invalid key")
key <- stringr::str_match(key, pattern = paste0("([:lower:]{2})([:digit:]",
"{2})([:digit:]{4})"))
names(key) <- c("original", "basin", "year_num", "year")
# Get list of GIS forecast zips for storm and download
url <- sprintf("%sgis/archive_psurge_results.php?id=%s%s&year=%s",
get_nhc_link(), key[["basin"]], key[["year_num"]],
key[["year"]])
contents <- readr::read_lines(url)
# Build product pattern
ptn_product <- names(products) %>%
purrr::map(.f = function(x) paste0(x, products[[x]])) %>%
purrr::flatten_chr()
# Build datetime pattern
if (is.null(datetime)) {
ptn_datetime <- "[:digit:]+"
} else {
# If x$datetime is 10 digits, then user is looking for specific datetime
# value. Pattern must be that value.
if (grepl("[[:digit:]]{10}", datetime)) {
ptn_datetime <- datetime
} else {
# Otherwise, x$datetime is beginning of pattern with wildcard at end
ptn_datetime <- paste0(datetime, "[:digit:]+")
}
}
# Match zip files.
ptn <- sprintf(".+(storm_surge/%s_(%s)_(%s)\\.zip).+",
stringr::str_to_lower(key[["original"]]),
paste(ptn_product, collapse = "|"),
ptn_datetime)
ds <- contents[stringr::str_detect(contents, pattern = ptn)]
# Extract link to zip files. Error gracefully if no matches.
tryCatch(links <- stringr::str_match(ds, pattern = ptn)[,2],
error = function(c) {
c$message <- "No data available for requested storm/advisory"
stop(c$message, call. = FALSE)
})
# Prepend domains to links
links <- paste0("http://www.nhc.noaa.gov/gis/", links)
return(links)
}
#' @title gis_storm_surge_flood
#' @description Potential Storm Surge Flooding (Inundation)
#' @param key Key of storm (i.e., AL012008, EP092015)
#' @param advisory Advisory number. If NULL, all available advisories are
#' returned.
#' @param products indundation or tidalmask
#' @seealso \code{\link{gis_download}}
#' @keywords internal
gis_storm_surge_flood <- function(key, advisory = as.numeric(),
products = c("inundation", "tidalmask")) {
warning("These are raster files, not shapefiles.")
if (is.null(key))
stop("Please provide storm key")
key <- stringr::str_to_upper(key)
if (!grepl("^[[:alpha:]]{2}[[:digit:]]{6}$", key))
stop("Invalid key")
if (!(any(products %in% c("inundation", "tidalmask"))))
stop("Invalid products")
key <- stringr::str_match(key, pattern = paste0("([:alpha:]{2})([:digit:]",
"{2})([:digit:]{4})"))
names(key) <- c("original", "basin", "year_num", "year")
# Get list of GIS zips for storm and download
url <- sprintf("%sgis/archive_inundation_results.php?id=%s%s&year=%s",
get_nhc_link(), key[["basin"]], key[["year_num"]],
key[["year"]])
contents <- readr::read_lines(url)
if (purrr::is_empty(advisory)) {
ptn <- sprintf(".+(%s/%s%s%s_[:digit:]{1,2}_(%s)\\.zip).+",
"inundation/forecasts",
key[["basin"]],
key[["year_num"]],
stringr::str_sub(key[["year"]], start = 3L, end = 4L),
paste(products, collapse = "|"))
} else {
ptn <- sprintf(".+(inundation/forecasts/%s%s%s_%s_(%s)\\.zip).+",
key[["basin"]],
key[["year_num"]],
stringr::str_sub(key[["year"]], start = 3L, end = 4L),
stringr::str_pad(advisory, width = 2, side = "left",
pad = "0"),
paste(products, collapse = "|"))
}
matches <- contents[stringr::str_detect(contents, pattern = ptn)]
# Extract link to zip files. Error gracefully if no matches.
tryCatch(links <- stringr::str_match(matches, pattern = ptn)[,2],
error = function(c) {
c$message <- "No data avaialable for requested storm/advisory"
stop(c$message, call. = FALSE)
})
# Create sub directories for each zip file
links <- paste0("http://www.nhc.noaa.gov/gis/", links)
return(links)
}
#' @title gis_windfield
#' @description Advisory Wind Field and Forecast Wind Radii
#' @param key Key of storm (i.e., AL012008, EP092015)
#' @param advisory Advisory number. If NULL, all advisories are returned.
#' Intermediate advisories are acceptable.
#' @details Tropical Cyclone Advisory Wind Field
#' http://www.nhc.noaa.gov/gis/archive_forecast_info_results.php?id=al14&year=2016
#' http://www.nhc.noaa.gov/gis/forecast/archive/
#' Example file name: al012017_fcst_001.zip
#' [basin]{2}[year_num]{2}[year]{4}_fcst_[advisory]{3}.zip
#' Many storms do not appear to have this data; especially earlier.
#'
#' Not all advisories will be available for storms. For example,
#' \href{http://www.nhc.noaa.gov/gis/archive_forecast_info_results.php?id=al14&year=2016}{Hurricane Matthew (AL142016)}
#' is missing several advisories.
#' @seealso \code{\link{gis_download}}
#' @export
gis_windfield <- function(key, advisory = as.character()) {
if (is.null(key))
stop("Please provide storm key")
key <- stringr::str_to_lower(key)
if (!grepl("^[[:lower:]]{2}[[:digit:]]{6}$", key))
stop("Invalid key")
key <- stringr::str_match(key, pattern = paste0("([:lower:]{2})([:digit:]",
"{2})([:digit:]{4})"))
names(key) <- c("original", "basin", "year_num", "year")
# Get list of GIS forecast zips for storm and download
url <- sprintf("%sgis/archive_forecast_info_results.php?id=%s%s&year=%s",
get_nhc_link(), key[["basin"]], key[["year_num"]],
key[["year"]])
contents <- readr::read_lines(url)
# Match zip files. If advisory is empty then need to pull all zip files for
# the storm. Otherwise, pull only selected advisory.
if (purrr::is_empty(advisory)) {
ptn <- sprintf(".+(forecast/archive/%s.*?\\.zip).+",
stringr::str_to_lower(key[["original"]]))
} else {
advisory <- stringr::str_match(advisory, "([:digit:]{1,3})([:alpha:]*)")
names(advisory) <- c("original", "advisory", "int_adv")
ptn <- sprintf(".+(forecast/archive/%s.*?%s%s\\.zip).+",
stringr::str_to_lower(key["original"]),
stringr::str_pad(string = advisory[["advisory"]],
width = 3, side = "left", pad = "0"),
advisory[["int_adv"]])
}
matches <- contents[stringr::str_detect(contents, pattern = ptn)]
# Extract link to zip files. Error gracefully if no matches.
tryCatch(links <- stringr::str_match(matches, pattern = ptn)[,2],
error = function(c) {
c$message <- "No data avaialable for requested storm/advisory"
stop(c$message, call. = FALSE)
})
links <- paste0("http://www.nhc.noaa.gov/gis/", links)
return(links)
}
#' @title gis_wsp
#' @description Wind Speed Probabilities
#' @param datetime Datetime in \%Y\%m\%d\%H format. \%m, \%d and \%H are
#' optional but will return more datasets.
#' @param res Resolution as a numeric vector; 5, 0.5, 0.1.
#' @details Probability winds affecting an area within a forecast period.
#' Datasets contain windfields for 34kt, 50kt and 64kt. Resolution is at 5km,
#' 0.5 degrees and 0.1 degrees. Not all resolutions may be available for all
#' storms. Not all windfields will be available for all advisories.
#' @seealso \code{\link{gis_download}}
#' @examples
#' \dontrun{
#' # Return datasets for January 1, 2016 with resolution of 0.5 degrees
#' gis_wsp("20160101", res = 0.5)
#'
#' # Return wsp of 0.1 and 0.5 degree resolution, July, 2015
#' gis_wsp("201507", res = c(0.5, 0.1))
#' }
#' @export
gis_wsp <- function(datetime, res = c(5, 0.5, 0.1)) {
if (!grepl("[[:digit:]]{4,10}", datetime))
stop("Invalid datetime")
if (!(all(res %in% c(5.0, 0.5, 0.1))))
stop("Invalid resolution")
res <- as.character(res)
res <- stringr::str_replace(res, "^5$", "5km")
res <- stringr::str_replace(res, "^0.5$", "halfDeg")
res <- stringr::str_replace(res, "^0.1$", "tenthDeg")
year <- stringr::str_sub(datetime, 0L, 4L)
request <- httr::GET("http://www.nhc.noaa.gov/gis/archive_wsp.php",
body = list(year = year), encode = "form")
contents <- httr::content(request, as = "parsed", encoding = "UTF-8")
ds <- rvest::html_nodes(contents, xpath = "//a") %>%
rvest::html_attr("href") %>%
stringr::str_extract(".+\\.zip$") %>%
.[stats::complete.cases(.)]
if (nchar(datetime) < 10) {
ptn_datetime <- paste0(datetime, "[:digit:]+")
} else {
ptn_datetime <- datetime
}
ptn_res <- paste(res, collapse = "|")
ptn <- sprintf("%s_wsp_[:digit:]{1,3}hr(%s)", ptn_datetime, ptn_res)
links <- ds[stringr::str_detect(ds, ptn)]
links <- paste0("http://www.nhc.noaa.gov/gis/", links)
return(links)
}
#' @title shp_to_df
#' @description Convert shapefile object to dataframe
#' @param obj Spatial object to convert. See details.
#' @details Takes a SpatialLinesDataFrame object or SpatialPolygonsDataFrame
#' object and converts into a dataframe that can be plotted in ggplot2.
#' @export
shp_to_df <- function(obj) {
if (class(obj) %in% c("SpatialLinesDataFrame", "SpatialPolygonsDataFrame")) {
obj@data$id <- rownames(obj@data)
obj <- dplyr::left_join(broom::tidy(obj, region = "id"),
obj@data, by = "id") %>%
tibble::as_data_frame()
}
return(obj)
}
|
/R/gis.R
|
permissive
|
mraza007/rrricanes
|
R
| false
| false
| 18,564
|
r
|
#' @title gis_advisory
#' @description Advisory Forecast Track, Cone of Uncertainty, and
#' Watches/Warnings
#' @param key Key of storm (i.e., AL012008, EP092015)
#' @param advisory Advisory number. If NULL, all advisories are returned.
#' Intermediate advisories are acceptable.
#' @seealso \code{\link{gis_download}}
#' @export
gis_advisory <- function(key, advisory = as.character()) {
if (is.null(key))
stop("Please provide storm key")
key <- stringr::str_to_lower(key)
if (!grepl("^[[:lower:]]{2}[[:digit:]]{6}$", key))
stop("Invalid key")
key <- stringr::str_match(key,
pattern = paste0("([:lower:]{2})([:digit:]{2})",
"([:digit:]{4})"))
names(key) <- c("original", "basin", "year_num", "year")
# Get list of GIS forecast zips for storm and download
url <- sprintf("%sgis/archive_forecast_results.php?id=%s%s&year=%s",
get_nhc_link(), key[["basin"]], key[["year_num"]],
key[["year"]])
contents <- readr::read_lines(url)
# Match zip files. If advisory is empty then need to pull all zip files for
# the storm. Otherwise, pull only selected advisory.
if (purrr::is_empty(advisory)) {
ptn <- sprintf(".+(forecast/archive/%s.*?\\.zip).+",
stringr::str_to_lower(key[["original"]]))
} else {
advisory <- stringr::str_match(advisory, "([:digit:]{1,3})([:alpha:]*)")
names(advisory) <- c("original", "advisory", "int_adv")
ptn <- sprintf(".+(forecast/archive/%s.*?%s%s\\.zip).+",
stringr::str_to_lower(key["original"]),
stringr::str_pad(string = advisory[["advisory"]],
width = 3, side = "left", pad = "0"),
advisory[["int_adv"]])
}
matches <- contents[stringr::str_detect(contents, pattern = ptn)]
# Extract link to zip files. Error gracefully if no matches.
tryCatch(links <- stringr::str_match(matches, pattern = ptn)[,2],
error = function(c) {
c$message <- "No data avaialable for requested storm/advisory"
stop(c$message, call. = FALSE)
})
# Append website domain to links
links <- paste0("http://www.nhc.noaa.gov/gis/", links)
return(links)
}
#' @title gis_breakpoints
#' @description Return link to breakpoints shapefile by year
#' @param year Default is current year. Breakpoints only available >= 2008.
#' @details Coastal areas placed under tropical storm and hurricane watches and
#' warnings are identified through the use of "breakpoints." A tropical cyclone
#' breakpoint is defined as an agreed upon coastal location that can be chosen
#' as one of two specific end points or designated places between which a
#' tropical storm/hurricane watch/warning is in effect. The U.S. National
#' Weather Service designates the locations along the U.S. East, Gulf, and
#' California coasts, Puerto Rico, and Hawaii. These points are listed in NWS
#' Directive 10-605 (PDF). Individual countries across the Caribbean, Central
#' America, and South America provide coastal locations for their areas of
#' responsibility to the U.S. National Weather Service for the National
#' Hurricane Center's use in tropical cyclone advisories when watches/warnings
#' are issued by international partners. The National Hurricane Center maintains
#' a list of pre-arranged breakpoints for the U.S. Atlantic and Gulf coasts,
#' Mexico, Cuba and the Bahamas. Other sites are unofficial and sites not on the
#' list can be selected if conditions warrant.
#' @export
gis_breakpoints <- function(year = as.numeric(strftime(Sys.Date(), "%Y"))) {
# xpath pattern
xp <- "//a"
links <- httr::POST("http://www.nhc.noaa.gov/gis/archive_breakpoints.php",
body = list(year = year), encode = "form") %>%
httr::content(as = "parsed", encoding = "UTF-8") %>%
rvest::html_nodes(xpath = xp) %>%
rvest::html_attr("href") %>%
stringr::str_extract(sprintf("Breakpoints_%s\\.zip$", year)) %>%
.[stats::complete.cases(.)]
if (purrr::is_empty(links))
return(NULL)
links <- paste0("http://www.nhc.noaa.gov/gis/breakpoints/archive/", links)
return(links)
}
#' @title gis_download
#' @description Get GIS data for storm.
#' @param url link to GIS dataset to download.
#' @param ... additional parameters for rgdal::readOGR
#' @export
gis_download <- function(url, ...) {
destdir <- tempdir()
utils::download.file(file.path(url), zip_file <- tempfile())
zip_contents <- utils::unzip(zip_file, list = TRUE)$Name
utils::unzip(zip_file, exdir = destdir)
shp_files <- stringr::str_match(zip_contents, pattern = ".+shp$") %>%
.[!is.na(.)]
ds <- purrr::map2(.x = shp_files, .y = destdir, .f = function(f, d) {
shp_file <- stringr::str_match(f, "^(.+)\\.shp$")[,2]
sp_object <- rgdal::readOGR(dsn = d, layer = shp_file,
encoding = "UTF-8",
stringsAsFactors = FALSE,
use_iconv = TRUE,
...)
return(sp_object)
})
names(ds) <- stringr::str_match(shp_files, "^(.+)\\.shp$")[,2] %>%
stringr::str_replace_all("[[:punct:][:space:]]", "_")
# clean up
x <- unlink(c(paste(destdir, zip_contents, sep = "/"), zip_file))
return(ds)
}
#' @title gis_latest
#' @description Latest GIS datasets for active cyclones
#' @param basins AL and/or EP.
#' @param ... additional parameters for rgdal::readOGR
#' @export
gis_latest <- function(basins = c("AL", "EP"), ...) {
if (!(all(basins %in% c("AL", "EP"))))
stop("Invalid basin")
urls <- list("AL" = "http://www.nhc.noaa.gov/gis-at.xml",
"EP" = "http://www.nhc.noaa.gov/gis-ep.xml")
gis_zips <- purrr::map(basins, ~ xml2::read_xml(urls[[.x]])) %>%
purrr::map(~ xml2::xml_find_all(.x, xpath = ".//link") %>%
xml2::xml_text()) %>%
purrr::map(stringr::str_match, ".+\\.zip$") %>%
purrr::flatten_chr() %>%
.[!is.na(.)]
if (!purrr::is_empty(gis_zips)) {
ds <- purrr::map(gis_zips, gis_download, ...)
return(ds)
}
return(FALSE)
}
#' @title gis_outlook
#' @description Tropical Weather Outlook
#' @seealso \code{\link{gis_download}}
#' @export
gis_outlook <- function() {
url <- "http://www.nhc.noaa.gov/xgtwo/gtwo_shapefiles.zip"
return(url)
}
#' @title gis_prob_storm_surge
#' @description Probabilistic Storm Surge
#' @param key Key of storm (i.e., AL012008, EP092015)
#' @param products list of products and associated n values; psurge (0:20) or
#' esurge (10, 20, 30, 40, 50).
#' @param datetime Datetime in \%Y\%m\%d\%H format.
#' @details Probabilistic Storm Surge Forecasts
#' @section Products:
#' \describe{
#' \item{esurge}{The Tropical Cyclone Storm Surge Exceedances (P-Surge 2.5)
#' data shows the probability, in percent, of a specified storm surge,
#' including tides, exceeding the specified height, in feet, during
#' the forecast period indicated. The 10 percent exceedance height,
#' for example, is the storm surge height, including tides, above
#' ground level (AGL) such that there is a 10 percent chance of
#' exceeding it. The product is based upon an ensemble of Sea, Lake,
#' and Overland Surge from Hurricanes (SLOSH) model runs using the
#' National Hurricane Center (NHC) official advisory and accounts for
#' track, size, and intensity errors based on historical errors and
#' astronomical tide. Valid values are 10, 20, 30, 40 or 50.}
#' \item{psurge}{The Tropical Cyclone Storm Surge Probabilities (P-Surge
#' 2.5) data shows the probability, in percent, of a specified storm
#' surge occurring during the forecast period indicated. The product
#' is based upon an ensemble of Sea, Lake, and Overland Surge from
#' Hurricanes (SLOSH) model runs using the National Hurricane Center
#' (NHC) official advisory and accounts for track, size, and intensity
#' errors based on historical errors and astronomical tide. Valid
#' values are 0:20.}
#' }
#' @seealso \href{http://www.nhc.noaa.gov/surge/psurge.php}{Tropical Cyclone Storm Surge Probabilities}
#' @seealso \code{\link{gis_download}}
#' @examples
#' \dontrun{
#' # Return the last psurge0 product for storm AL092016
#' gis_prob_storm_surge("AL092016", products = list("psurge" = 0))
#'
#' # Return the psurge0 and esurge10 products for storm AL092016
#' gis_prob_storm_surge("AL092016", products = list("psurge" = 0, "esurge" = 10))
#'
#' # Return all psurge0 products for Sep 2, 2016, storm AL092016
#' gis_prob_storm_surge("AL092016", products = list("psurge" = 0),
#' datetime = "20160902")
#' }
#' @export
gis_prob_storm_surge <- function(key, products, datetime = NULL) {
if (is.null(key))
stop("Please provide storm key")
# Validate products
if (!(all(names(products) %in% c("psurge", "esurge"))))
stop("Invalid product. Must be psurge and/or esurge")
if (!is.null(products[["psurge"]]))
if (!(all(dplyr::between(products[["psurge"]], 0, 20))))
stop("psurge values must be between 0 and 20")
if (!is.null(products[["esurge"]]))
if (!(all(products[["esurge"]] %in% seq(10, 50, by = 10))))
stop("esurge values must be 10, 20, 30, 40 or 50")
key <- stringr::str_to_lower(key)
if (!grepl("^[[:lower:]]{2}[[:digit:]]{6}$", key))
stop("Invalid key")
key <- stringr::str_match(key, pattern = paste0("([:lower:]{2})([:digit:]",
"{2})([:digit:]{4})"))
names(key) <- c("original", "basin", "year_num", "year")
# Get list of GIS forecast zips for storm and download
url <- sprintf("%sgis/archive_psurge_results.php?id=%s%s&year=%s",
get_nhc_link(), key[["basin"]], key[["year_num"]],
key[["year"]])
contents <- readr::read_lines(url)
# Build product pattern
ptn_product <- names(products) %>%
purrr::map(.f = function(x) paste0(x, products[[x]])) %>%
purrr::flatten_chr()
# Build datetime pattern
if (is.null(datetime)) {
ptn_datetime <- "[:digit:]+"
} else {
# If x$datetime is 10 digits, then user is looking for specific datetime
# value. Pattern must be that value.
if (grepl("[[:digit:]]{10}", datetime)) {
ptn_datetime <- datetime
} else {
# Otherwise, x$datetime is beginning of pattern with wildcard at end
ptn_datetime <- paste0(datetime, "[:digit:]+")
}
}
# Match zip files.
ptn <- sprintf(".+(storm_surge/%s_(%s)_(%s)\\.zip).+",
stringr::str_to_lower(key[["original"]]),
paste(ptn_product, collapse = "|"),
ptn_datetime)
ds <- contents[stringr::str_detect(contents, pattern = ptn)]
# Extract link to zip files. Error gracefully if no matches.
tryCatch(links <- stringr::str_match(ds, pattern = ptn)[,2],
error = function(c) {
c$message <- "No data available for requested storm/advisory"
stop(c$message, call. = FALSE)
})
# Prepend domains to links
links <- paste0("http://www.nhc.noaa.gov/gis/", links)
return(links)
}
#' @title gis_storm_surge_flood
#' @description Potential Storm Surge Flooding (Inundation)
#' @param key Key of storm (i.e., AL012008, EP092015)
#' @param advisory Advisory number. If NULL, all available advisories are
#' returned.
#' @param products indundation or tidalmask
#' @seealso \code{\link{gis_download}}
#' @keywords internal
gis_storm_surge_flood <- function(key, advisory = as.numeric(),
products = c("inundation", "tidalmask")) {
warning("These are raster files, not shapefiles.")
if (is.null(key))
stop("Please provide storm key")
key <- stringr::str_to_upper(key)
if (!grepl("^[[:alpha:]]{2}[[:digit:]]{6}$", key))
stop("Invalid key")
if (!(any(products %in% c("inundation", "tidalmask"))))
stop("Invalid products")
key <- stringr::str_match(key, pattern = paste0("([:alpha:]{2})([:digit:]",
"{2})([:digit:]{4})"))
names(key) <- c("original", "basin", "year_num", "year")
# Get list of GIS zips for storm and download
url <- sprintf("%sgis/archive_inundation_results.php?id=%s%s&year=%s",
get_nhc_link(), key[["basin"]], key[["year_num"]],
key[["year"]])
contents <- readr::read_lines(url)
if (purrr::is_empty(advisory)) {
ptn <- sprintf(".+(%s/%s%s%s_[:digit:]{1,2}_(%s)\\.zip).+",
"inundation/forecasts",
key[["basin"]],
key[["year_num"]],
stringr::str_sub(key[["year"]], start = 3L, end = 4L),
paste(products, collapse = "|"))
} else {
ptn <- sprintf(".+(inundation/forecasts/%s%s%s_%s_(%s)\\.zip).+",
key[["basin"]],
key[["year_num"]],
stringr::str_sub(key[["year"]], start = 3L, end = 4L),
stringr::str_pad(advisory, width = 2, side = "left",
pad = "0"),
paste(products, collapse = "|"))
}
matches <- contents[stringr::str_detect(contents, pattern = ptn)]
# Extract link to zip files. Error gracefully if no matches.
tryCatch(links <- stringr::str_match(matches, pattern = ptn)[,2],
error = function(c) {
c$message <- "No data avaialable for requested storm/advisory"
stop(c$message, call. = FALSE)
})
# Create sub directories for each zip file
links <- paste0("http://www.nhc.noaa.gov/gis/", links)
return(links)
}
#' @title gis_windfield
#' @description Advisory Wind Field and Forecast Wind Radii
#' @param key Key of storm (i.e., AL012008, EP092015)
#' @param advisory Advisory number. If NULL, all advisories are returned.
#' Intermediate advisories are acceptable.
#' @details Tropical Cyclone Advisory Wind Field
#' http://www.nhc.noaa.gov/gis/archive_forecast_info_results.php?id=al14&year=2016
#' http://www.nhc.noaa.gov/gis/forecast/archive/
#' Example file name: al012017_fcst_001.zip
#' [basin]{2}[year_num]{2}[year]{4}_fcst_[advisory]{3}.zip
#' Many storms do not appear to have this data; especially earlier.
#'
#' Not all advisories will be available for storms. For example,
#' \href{http://www.nhc.noaa.gov/gis/archive_forecast_info_results.php?id=al14&year=2016}{Hurricane Matthew (AL142016)}
#' is missing several advisories.
#' @seealso \code{\link{gis_download}}
#' @export
gis_windfield <- function(key, advisory = as.character()) {
if (is.null(key))
stop("Please provide storm key")
key <- stringr::str_to_lower(key)
if (!grepl("^[[:lower:]]{2}[[:digit:]]{6}$", key))
stop("Invalid key")
key <- stringr::str_match(key, pattern = paste0("([:lower:]{2})([:digit:]",
"{2})([:digit:]{4})"))
names(key) <- c("original", "basin", "year_num", "year")
# Get list of GIS forecast zips for storm and download
url <- sprintf("%sgis/archive_forecast_info_results.php?id=%s%s&year=%s",
get_nhc_link(), key[["basin"]], key[["year_num"]],
key[["year"]])
contents <- readr::read_lines(url)
# Match zip files. If advisory is empty then need to pull all zip files for
# the storm. Otherwise, pull only selected advisory.
if (purrr::is_empty(advisory)) {
ptn <- sprintf(".+(forecast/archive/%s.*?\\.zip).+",
stringr::str_to_lower(key[["original"]]))
} else {
advisory <- stringr::str_match(advisory, "([:digit:]{1,3})([:alpha:]*)")
names(advisory) <- c("original", "advisory", "int_adv")
ptn <- sprintf(".+(forecast/archive/%s.*?%s%s\\.zip).+",
stringr::str_to_lower(key["original"]),
stringr::str_pad(string = advisory[["advisory"]],
width = 3, side = "left", pad = "0"),
advisory[["int_adv"]])
}
matches <- contents[stringr::str_detect(contents, pattern = ptn)]
# Extract link to zip files. Error gracefully if no matches.
tryCatch(links <- stringr::str_match(matches, pattern = ptn)[,2],
error = function(c) {
c$message <- "No data avaialable for requested storm/advisory"
stop(c$message, call. = FALSE)
})
links <- paste0("http://www.nhc.noaa.gov/gis/", links)
return(links)
}
#' @title gis_wsp
#' @description Wind Speed Probabilities
#' @param datetime Datetime in \%Y\%m\%d\%H format. \%m, \%d and \%H are
#' optional but will return more datasets.
#' @param res Resolution as a numeric vector; 5, 0.5, 0.1.
#' @details Probability winds affecting an area within a forecast period.
#' Datasets contain windfields for 34kt, 50kt and 64kt. Resolution is at 5km,
#' 0.5 degrees and 0.1 degrees. Not all resolutions may be available for all
#' storms. Not all windfields will be available for all advisories.
#' @seealso \code{\link{gis_download}}
#' @examples
#' \dontrun{
#' # Return datasets for January 1, 2016 with resolution of 0.5 degrees
#' gis_wsp("20160101", res = 0.5)
#'
#' # Return wsp of 0.1 and 0.5 degree resolution, July, 2015
#' gis_wsp("201507", res = c(0.5, 0.1))
#' }
#' @export
gis_wsp <- function(datetime, res = c(5, 0.5, 0.1)) {
if (!grepl("[[:digit:]]{4,10}", datetime))
stop("Invalid datetime")
if (!(all(res %in% c(5.0, 0.5, 0.1))))
stop("Invalid resolution")
res <- as.character(res)
res <- stringr::str_replace(res, "^5$", "5km")
res <- stringr::str_replace(res, "^0.5$", "halfDeg")
res <- stringr::str_replace(res, "^0.1$", "tenthDeg")
year <- stringr::str_sub(datetime, 0L, 4L)
request <- httr::GET("http://www.nhc.noaa.gov/gis/archive_wsp.php",
body = list(year = year), encode = "form")
contents <- httr::content(request, as = "parsed", encoding = "UTF-8")
ds <- rvest::html_nodes(contents, xpath = "//a") %>%
rvest::html_attr("href") %>%
stringr::str_extract(".+\\.zip$") %>%
.[stats::complete.cases(.)]
if (nchar(datetime) < 10) {
ptn_datetime <- paste0(datetime, "[:digit:]+")
} else {
ptn_datetime <- datetime
}
ptn_res <- paste(res, collapse = "|")
ptn <- sprintf("%s_wsp_[:digit:]{1,3}hr(%s)", ptn_datetime, ptn_res)
links <- ds[stringr::str_detect(ds, ptn)]
links <- paste0("http://www.nhc.noaa.gov/gis/", links)
return(links)
}
#' @title shp_to_df
#' @description Convert shapefile object to dataframe
#' @param obj Spatial object to convert. See details.
#' @details Takes a SpatialLinesDataFrame object or SpatialPolygonsDataFrame
#' object and converts into a dataframe that can be plotted in ggplot2.
#' @export
shp_to_df <- function(obj) {
if (class(obj) %in% c("SpatialLinesDataFrame", "SpatialPolygonsDataFrame")) {
obj@data$id <- rownames(obj@data)
obj <- dplyr::left_join(broom::tidy(obj, region = "id"),
obj@data, by = "id") %>%
tibble::as_data_frame()
}
return(obj)
}
|
library(arules)
install.packages("corrplot")
library(corrplot)
#load data
movies=read.csv(file.choose())
movies
head(movies)
summary(movies)
str(movies)
corrplot(cor(movies[,6:15]))
#build algorithm
movies_rules <- apriori(as.matrix(movies[,6:15]),parameter = list(support = 0.005,confidence= 0.05,minlen=3))
movies_rules
inspect(head(sort(movies_rules,by="lift")))
inspect(head(sort(movies_rules,by="confidence")))
inspect(head(sort(movies_rules,by="support")))
inspect(head(sort(movies_rules,by=c("count","lift"))))
|
/movies.R
|
no_license
|
karthi-25/Tutorials-on-R-codes
|
R
| false
| false
| 518
|
r
|
library(arules)
install.packages("corrplot")
library(corrplot)
#load data
movies=read.csv(file.choose())
movies
head(movies)
summary(movies)
str(movies)
corrplot(cor(movies[,6:15]))
#build algorithm
movies_rules <- apriori(as.matrix(movies[,6:15]),parameter = list(support = 0.005,confidence= 0.05,minlen=3))
movies_rules
inspect(head(sort(movies_rules,by="lift")))
inspect(head(sort(movies_rules,by="confidence")))
inspect(head(sort(movies_rules,by="support")))
inspect(head(sort(movies_rules,by=c("count","lift"))))
|
# Välj län genom att ange lanskoden för det län du vill göra uttag för
Lanskod = 20
# För att mäta hur lång tid körningen tar
StartTid <- Sys.time()
# Laddar in nödvändiga packages ----------------------------------------------
library(tidyverse)
library(RSelenium)
library(rvest)
library(stringr)
# Ladda funktioner för att navigera på AF:s statistiksida och ladda hem data
source("funktioner_veckostat.R", encoding = "UTF-8")
retry(RegList <- RegionLista())
ValdRegion_df <- RegionKommunMatris(Lanskod)
# Ange vilket län som uttaget avses. Värdet tas från valet som är gjort
# på rad 2
lannr_meny = ValdRegion_df[1,5]
# Lägg antalet kommuner i vald region i varibeln ant_kommuner - värdet hämtas
# från val av region på rad 2 - alla kommuner tas med
ant_kommuner = nrow(ValdRegion_df)
# Ange slutvecka -------------------------------------------------------------------------------------
# Data laddas alltid hem från vecka 1 varje år till den vecka som anges i
# variabeln "veckonr". 1 rad i de returnerade resultaten innehåller text (rad
# 20) vill man ha resultat för 52 veckor måste därför antal veckor anges till
# 53. Rekommendationen är att alltid ladda hem all data till sista veckan på
# året, dvs att låta veckonr vara lika med 53
veckonr <- 54 # Låt stå kvar!
# Anslut till AFs QlikView-server -----------------------------------------------------------------
fanslut_till_server <- function() {
remDr <<- remoteDriver$new(
remoteServerAddr = "localhost",
port = 4444,
browserName = "firefox"
)
# Kolla så att chromeversionen är rätt under Chrome -> Hjälp
rd <<- rsDriver(port = 4567L,
chromever = "87.0.4280.20")
}
# Stoppa session och frigör portar och gör nytt anslutningsförsök
fstoppa_session_anslut <- function() {
system("taskkill /im java.exe /f", intern=FALSE, ignore.stdout=FALSE)
gc()
Sys.sleep(1)
fanslut_till_server()
}
# Fel vid anslutningen till servern beror nästan alltid på att man startat om och
# att porten därför är upptagen från en tidigare session. Startar man om java
# frigörs porten och det går att starta en ny session
tryCatch(fanslut_till_server(),
error=function(e) fstoppa_session_anslut())
remDr <- rd[["client"]]
url <- "http://qvs12ext.ams.se/QvAJAXZfc/opendoc.htm?document=extern%5Cvstatplus_extern.qvw&host=QVS%40w001765&anonymous=true%20&select=StartTrigger,1"
#Felhantering - fungerar? vet ej
remDr$setTimeout(type = "page load", milliseconds = 10000)
remDr$navigate(url)
# Lång paus för att sidan ska hinna laddas
Sys.sleep(4)
################# Här har sidan laddats in och inhämtning av data börjar
# Nyamnälda platser
retry(fplatser_valj_rapport(1))
# Om det syns ett diagram och inte en tabell - klicka på rutan "Visa som: tabell"
retry(fplatser_valj_tabell())
# Spara nyanmälda platser i riket
dfriket_nyanm_platser <- fplatser_skapa_tab()
# Skapa tabell för alla län
dfallalan_nyanm_platser <- fplatser_extr_data_lan()
# Välj län (välj län som är förvalt)
retry(fplatser_valj_lan(lannr_meny))
# ladda hem nyanm platser per kommun
dfkom_nyanm_platser <- fplatser_extr_data_kommuner(ant_kommuner)
# Avvälj län
retry(fplatser_avvalj_lan())
dfnyanm_platser <- bind_rows(dfallalan_nyanm_platser,
dfriket_nyanm_platser,
dfkom_nyanm_platser) %>%
separate(region,
into = c("region_kod", "region"), sep = "\\s", extra = "merge") %>%
pivot_longer(cols = 2:4, names_to = "ar", values_to = "antal") %>%
filter(!is.na(antal))
################ Skriv dataframe till Excelfilen
# Sökvägen på högskoledatorn
sokvag1 <- "C:\\Users\\pmo\\OneDrive - Högskolan Dalarna\\Auto AF\\Uttag\\AF LedigaPlatser uttag.xlsx"
# Sökvägen på hemmadatorn
sokvag2 <- "C:\\Users\\Administratör\\OneDrive - Region Dalarna\\Auto AF\\Uttag\\AF LedigaPlatser uttag.xlsx"
# Testa om sökväg på högskoledatorn finns (filen måste finnas), om inte så används
# sökväg för hemmadatorn
if (file.exists(sokvag1)) sokvag <- sokvag1 else sokvag <- sokvag2
writexl::write_xlsx(list(LedigaPlatser = dfnyanm_platser),
path = sokvag)
|
/AF_Rselenium/AF_LedigaPlatser.R
|
no_license
|
Analytikernatverket/R
|
R
| false
| false
| 4,384
|
r
|
# Välj län genom att ange lanskoden för det län du vill göra uttag för
Lanskod = 20
# För att mäta hur lång tid körningen tar
StartTid <- Sys.time()
# Laddar in nödvändiga packages ----------------------------------------------
library(tidyverse)
library(RSelenium)
library(rvest)
library(stringr)
# Ladda funktioner för att navigera på AF:s statistiksida och ladda hem data
source("funktioner_veckostat.R", encoding = "UTF-8")
retry(RegList <- RegionLista())
ValdRegion_df <- RegionKommunMatris(Lanskod)
# Ange vilket län som uttaget avses. Värdet tas från valet som är gjort
# på rad 2
lannr_meny = ValdRegion_df[1,5]
# Lägg antalet kommuner i vald region i varibeln ant_kommuner - värdet hämtas
# från val av region på rad 2 - alla kommuner tas med
ant_kommuner = nrow(ValdRegion_df)
# Ange slutvecka -------------------------------------------------------------------------------------
# Data laddas alltid hem från vecka 1 varje år till den vecka som anges i
# variabeln "veckonr". 1 rad i de returnerade resultaten innehåller text (rad
# 20) vill man ha resultat för 52 veckor måste därför antal veckor anges till
# 53. Rekommendationen är att alltid ladda hem all data till sista veckan på
# året, dvs att låta veckonr vara lika med 53
veckonr <- 54 # Låt stå kvar!
# Anslut till AFs QlikView-server -----------------------------------------------------------------
fanslut_till_server <- function() {
remDr <<- remoteDriver$new(
remoteServerAddr = "localhost",
port = 4444,
browserName = "firefox"
)
# Kolla så att chromeversionen är rätt under Chrome -> Hjälp
rd <<- rsDriver(port = 4567L,
chromever = "87.0.4280.20")
}
# Stoppa session och frigör portar och gör nytt anslutningsförsök
fstoppa_session_anslut <- function() {
system("taskkill /im java.exe /f", intern=FALSE, ignore.stdout=FALSE)
gc()
Sys.sleep(1)
fanslut_till_server()
}
# Fel vid anslutningen till servern beror nästan alltid på att man startat om och
# att porten därför är upptagen från en tidigare session. Startar man om java
# frigörs porten och det går att starta en ny session
tryCatch(fanslut_till_server(),
error=function(e) fstoppa_session_anslut())
remDr <- rd[["client"]]
url <- "http://qvs12ext.ams.se/QvAJAXZfc/opendoc.htm?document=extern%5Cvstatplus_extern.qvw&host=QVS%40w001765&anonymous=true%20&select=StartTrigger,1"
#Felhantering - fungerar? vet ej
remDr$setTimeout(type = "page load", milliseconds = 10000)
remDr$navigate(url)
# Lång paus för att sidan ska hinna laddas
Sys.sleep(4)
################# Här har sidan laddats in och inhämtning av data börjar
# Nyamnälda platser
retry(fplatser_valj_rapport(1))
# Om det syns ett diagram och inte en tabell - klicka på rutan "Visa som: tabell"
retry(fplatser_valj_tabell())
# Spara nyanmälda platser i riket
dfriket_nyanm_platser <- fplatser_skapa_tab()
# Skapa tabell för alla län
dfallalan_nyanm_platser <- fplatser_extr_data_lan()
# Välj län (välj län som är förvalt)
retry(fplatser_valj_lan(lannr_meny))
# ladda hem nyanm platser per kommun
dfkom_nyanm_platser <- fplatser_extr_data_kommuner(ant_kommuner)
# Avvälj län
retry(fplatser_avvalj_lan())
dfnyanm_platser <- bind_rows(dfallalan_nyanm_platser,
dfriket_nyanm_platser,
dfkom_nyanm_platser) %>%
separate(region,
into = c("region_kod", "region"), sep = "\\s", extra = "merge") %>%
pivot_longer(cols = 2:4, names_to = "ar", values_to = "antal") %>%
filter(!is.na(antal))
################ Skriv dataframe till Excelfilen
# Sökvägen på högskoledatorn
sokvag1 <- "C:\\Users\\pmo\\OneDrive - Högskolan Dalarna\\Auto AF\\Uttag\\AF LedigaPlatser uttag.xlsx"
# Sökvägen på hemmadatorn
sokvag2 <- "C:\\Users\\Administratör\\OneDrive - Region Dalarna\\Auto AF\\Uttag\\AF LedigaPlatser uttag.xlsx"
# Testa om sökväg på högskoledatorn finns (filen måste finnas), om inte så används
# sökväg för hemmadatorn
if (file.exists(sokvag1)) sokvag <- sokvag1 else sokvag <- sokvag2
writexl::write_xlsx(list(LedigaPlatser = dfnyanm_platser),
path = sokvag)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/public.R
\name{DataSetUpdate}
\alias{DataSetUpdate}
\title{Update local data sets and update R/sysdata.rda file}
\usage{
DataSetUpdate(ds = "all", samples = FALSE, use.remote = TRUE,
force.update = FALSE, wizard = FALSE)
}
\arguments{
\item{ds}{Selects the data set for this operation. Default set to "all".
Check available options with DataSetList()}
\item{samples}{if TRUE it will create sample data.frames and store them in /data}
\item{use.remote}{if TRUE it will download sysdata.rda from net.security github}
\item{force.update}{if TRUE it will rebuil the package at last step.}
\item{wizard}{if TRUE launch an interactive menu with some help.}
}
\value{
Date Official source files download date time.
}
\description{
\code{DataSetUpdate} Starts the process for updating local data sets available with \code{\link{GetDataFrame}} function.
}
\details{
The process include the following phases:
\enumerate{
\item Download files from MITRE, NIST and INCIBE sources.
\item Process MITRE raw data.
\item Process NIST raw data. One file per year.
\item Indexing data. Includes CSV and XML parsing. Build data frame.
\item Tidy data frame.
\item Compress and save data.frame to internal data.
}
}
\examples{
\dontrun{
net.security::DataSetUpdate(ds = "all")
}
\dontrun{
net.security::DataSetUpdate(ds = "cves")
}
}
|
/man/DataSetUpdate.Rd
|
no_license
|
carlesUdG/net.security
|
R
| false
| true
| 1,416
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/public.R
\name{DataSetUpdate}
\alias{DataSetUpdate}
\title{Update local data sets and update R/sysdata.rda file}
\usage{
DataSetUpdate(ds = "all", samples = FALSE, use.remote = TRUE,
force.update = FALSE, wizard = FALSE)
}
\arguments{
\item{ds}{Selects the data set for this operation. Default set to "all".
Check available options with DataSetList()}
\item{samples}{if TRUE it will create sample data.frames and store them in /data}
\item{use.remote}{if TRUE it will download sysdata.rda from net.security github}
\item{force.update}{if TRUE it will rebuil the package at last step.}
\item{wizard}{if TRUE launch an interactive menu with some help.}
}
\value{
Date Official source files download date time.
}
\description{
\code{DataSetUpdate} Starts the process for updating local data sets available with \code{\link{GetDataFrame}} function.
}
\details{
The process include the following phases:
\enumerate{
\item Download files from MITRE, NIST and INCIBE sources.
\item Process MITRE raw data.
\item Process NIST raw data. One file per year.
\item Indexing data. Includes CSV and XML parsing. Build data frame.
\item Tidy data frame.
\item Compress and save data.frame to internal data.
}
}
\examples{
\dontrun{
net.security::DataSetUpdate(ds = "all")
}
\dontrun{
net.security::DataSetUpdate(ds = "cves")
}
}
|
.onLoad <- function(libname, pkgname) {
rcudanlp.path <- '/Users/dy/nlp-cuda/bin/librcudanlp.so'
sysname <- Sys.info()['sysname']
if (sysname == 'Windows') {
path <- 'C:/lib'
}
print(dyn.load(rcudanlp.path))
}
|
/R/utils.R
|
no_license
|
hack1nt0/rcudanlp
|
R
| false
| false
| 238
|
r
|
.onLoad <- function(libname, pkgname) {
rcudanlp.path <- '/Users/dy/nlp-cuda/bin/librcudanlp.so'
sysname <- Sys.info()['sysname']
if (sysname == 'Windows') {
path <- 'C:/lib'
}
print(dyn.load(rcudanlp.path))
}
|
# MIT License
#
# Copyright (c) 2017-2021 TileDB Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
## sparse matrix helper 'roughly similar' to fromDataFrame()
##' Create (or return) a TileDB sparse array
##'
##' The functions \code{fromSparseMatrix} and \code{toSparseMatrix} help in storing
##' (and retrieving) sparse matrices using a TileDB backend.
##' @param obj A sparse matrix object.
##' @param uri A character variable with an Array URI.
##' @param cell_order A character variable with one of the TileDB cell order values,
##' default is \dQuote{COL_MAJOR}.
##' @param tile_order A character variable with one of the TileDB tile order values,
##' default is \dQuote{COL_MAJOR}.
##' @param filter A character variable vector, defaults to \sQuote{ZSTD}, for
##' one or more filters to be applied to each attribute;
##' @param capacity A integer value with the schema capacity, default is 10000.
##' @return Null, invisibly.
##' @examples
##' \dontshow{ctx <- tiledb_ctx(limitTileDBCores())}
##' \dontrun{
##' if (requireNamespace("Matrix", quietly=TRUE)) {
##' library(Matrix)
##' set.seed(123) # just to fix it
##' mat <- matrix(0, nrow=20, ncol=10)
##' mat[sample(seq_len(200), 20)] <- seq(1, 20)
##' spmat <- as(mat, "dgTMatrix") # sparse matrix in dgTMatrix format
##' uri <- "sparse_matrix"
##' fromSparseMatrix(spmat, uri) # now written
##' chk <- toSparseMatrix(uri) # and re-read
##' print(chk)
##' all.equal(spmat, chk)
##' }
##' }
##' @importFrom methods as
##' @export
fromSparseMatrix <- function(obj,
uri,
cell_order = "ROW_MAJOR",
tile_order = "ROW_MAJOR",
filter="ZSTD",
capacity = 10000L) {
stopifnot(`obj must be Matrix object` = inherits(obj, "Matrix"),
`obj must be sparse` = is(obj, "sparseMatrix"),
`uri must character` = is.character(uri))
if (class(obj)[1] != "dgTMatrix") obj <- as(obj, "dgTMatrix")
dimi <- tiledb_dim(name="i", type = "FLOAT64", # wider range
tile = as.numeric(obj@Dim[1]),
domain = c(0, obj@Dim[1]-1L))
dimj <- tiledb_dim(name="j", type = "FLOAT64", # wider range
tile = as.numeric(obj@Dim[2]),
domain = c(0, obj@Dim[2]-1L))
dom <- tiledb_domain(dims = c(dimi, dimj))
cl <- class(obj@x)[1]
if (cl == "integer")
tp <- "INT32"
else if (cl == "numeric")
tp <- "FLOAT64"
else
stop("Currently unsupported type: ", cl)
filterlist <- tiledb_filter_list(sapply(filter, tiledb_filter))
attx <- tiledb_attr(name="x", type = tp, ncells = 1, filter_list = filterlist)
schema <- tiledb_array_schema(dom, attrs=attx,
cell_order = cell_order, tile_order = tile_order,
sparse = TRUE, capacity=capacity)
tiledb_array_create(uri, schema)
arr <- tiledb_array(uri)
arr[] <- data.frame(i = obj@i, j = obj@j, x = obj@x)
invisible(NULL)
}
##' @rdname fromSparseMatrix
##' @export
toSparseMatrix <- function(uri) {
arr <- tiledb_array(uri, as.data.frame=TRUE, query_layout="UNORDERED")
obj <- arr[]
dims <- dimensions(domain(schema(uri)))
d1 <- domain(dims[[1]]) #tiledb:::libtiledb_dim_get_domain(dims[[1]]@ptr) + 1
d2 <- domain(dims[[2]]) #tiledb:::libtiledb_dim_get_domain(dims[[2]]@ptr) + 2
stopifnot(`No column i in data`=!is.na(match("i", colnames(obj))),
`No column j in data`=!is.na(match("j", colnames(obj))),
`No column x in data`=!is.na(match("x", colnames(obj))),
`Matrix package needed`=requireNamespace("Matrix", quietly=TRUE))
sp <- Matrix::sparseMatrix(i = obj$i + 1,
j = obj$j + 1,
x = obj$x,
dims = c(d1[2] + 1, d2[2] + 1),
repr = "T")
sp
}
|
/R/SparseMatrix.R
|
permissive
|
dcooley/TileDB-R
|
R
| false
| false
| 5,102
|
r
|
# MIT License
#
# Copyright (c) 2017-2021 TileDB Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
## sparse matrix helper 'roughly similar' to fromDataFrame()
##' Create (or return) a TileDB sparse array
##'
##' The functions \code{fromSparseMatrix} and \code{toSparseMatrix} help in storing
##' (and retrieving) sparse matrices using a TileDB backend.
##' @param obj A sparse matrix object.
##' @param uri A character variable with an Array URI.
##' @param cell_order A character variable with one of the TileDB cell order values,
##' default is \dQuote{COL_MAJOR}.
##' @param tile_order A character variable with one of the TileDB tile order values,
##' default is \dQuote{COL_MAJOR}.
##' @param filter A character variable vector, defaults to \sQuote{ZSTD}, for
##' one or more filters to be applied to each attribute;
##' @param capacity A integer value with the schema capacity, default is 10000.
##' @return Null, invisibly.
##' @examples
##' \dontshow{ctx <- tiledb_ctx(limitTileDBCores())}
##' \dontrun{
##' if (requireNamespace("Matrix", quietly=TRUE)) {
##' library(Matrix)
##' set.seed(123) # just to fix it
##' mat <- matrix(0, nrow=20, ncol=10)
##' mat[sample(seq_len(200), 20)] <- seq(1, 20)
##' spmat <- as(mat, "dgTMatrix") # sparse matrix in dgTMatrix format
##' uri <- "sparse_matrix"
##' fromSparseMatrix(spmat, uri) # now written
##' chk <- toSparseMatrix(uri) # and re-read
##' print(chk)
##' all.equal(spmat, chk)
##' }
##' }
##' @importFrom methods as
##' @export
fromSparseMatrix <- function(obj,
uri,
cell_order = "ROW_MAJOR",
tile_order = "ROW_MAJOR",
filter="ZSTD",
capacity = 10000L) {
stopifnot(`obj must be Matrix object` = inherits(obj, "Matrix"),
`obj must be sparse` = is(obj, "sparseMatrix"),
`uri must character` = is.character(uri))
if (class(obj)[1] != "dgTMatrix") obj <- as(obj, "dgTMatrix")
dimi <- tiledb_dim(name="i", type = "FLOAT64", # wider range
tile = as.numeric(obj@Dim[1]),
domain = c(0, obj@Dim[1]-1L))
dimj <- tiledb_dim(name="j", type = "FLOAT64", # wider range
tile = as.numeric(obj@Dim[2]),
domain = c(0, obj@Dim[2]-1L))
dom <- tiledb_domain(dims = c(dimi, dimj))
cl <- class(obj@x)[1]
if (cl == "integer")
tp <- "INT32"
else if (cl == "numeric")
tp <- "FLOAT64"
else
stop("Currently unsupported type: ", cl)
filterlist <- tiledb_filter_list(sapply(filter, tiledb_filter))
attx <- tiledb_attr(name="x", type = tp, ncells = 1, filter_list = filterlist)
schema <- tiledb_array_schema(dom, attrs=attx,
cell_order = cell_order, tile_order = tile_order,
sparse = TRUE, capacity=capacity)
tiledb_array_create(uri, schema)
arr <- tiledb_array(uri)
arr[] <- data.frame(i = obj@i, j = obj@j, x = obj@x)
invisible(NULL)
}
##' @rdname fromSparseMatrix
##' @export
toSparseMatrix <- function(uri) {
arr <- tiledb_array(uri, as.data.frame=TRUE, query_layout="UNORDERED")
obj <- arr[]
dims <- dimensions(domain(schema(uri)))
d1 <- domain(dims[[1]]) #tiledb:::libtiledb_dim_get_domain(dims[[1]]@ptr) + 1
d2 <- domain(dims[[2]]) #tiledb:::libtiledb_dim_get_domain(dims[[2]]@ptr) + 2
stopifnot(`No column i in data`=!is.na(match("i", colnames(obj))),
`No column j in data`=!is.na(match("j", colnames(obj))),
`No column x in data`=!is.na(match("x", colnames(obj))),
`Matrix package needed`=requireNamespace("Matrix", quietly=TRUE))
sp <- Matrix::sparseMatrix(i = obj$i + 1,
j = obj$j + 1,
x = obj$x,
dims = c(d1[2] + 1, d2[2] + 1),
repr = "T")
sp
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/slim_lang.R
\name{calcPairHeterozygosity}
\alias{calcPairHeterozygosity}
\alias{SLiMBuiltin$calcPairHeterozygosity}
\alias{.SB$calcPairHeterozygosity}
\title{SLiM method calcPairHeterozygosity}
\usage{
calcPairHeterozygosity(genome1, genome2, start, end, infiniteSites)
}
\arguments{
\item{genome1}{An object of type Genome object. Must be of length 1 (a
singleton). See details for description.}
\item{genome2}{An object of type Genome object. Must be of length 1 (a
singleton). See details for description.}
\item{start}{An object of type null or integer. Must be of length 1 (a
singleton). The default value is \code{NULL}. See details for description.}
\item{end}{An object of type null or integer. Must be of length 1 (a singleton).
The default value is \code{NULL}. See details for description.}
\item{infiniteSites}{An object of type logical. Must be of length 1 (a
singleton). The default value is \code{T}. See details for description.}
}
\value{
An object of type float. Return will be of length 1 (a singleton)
}
\description{
Documentation for SLiM function \code{calcPairHeterozygosity}, which is a method
of the SLiM class \code{\link{SLiMBuiltin}}.
Note that the R function is a stub, it does not do anything in R (except bring
up this documentation). It will only do
anything useful when used inside a \code{\link{slim_block}} function further
nested in a \code{\link{slim_script}}
function call, where it will be translated into valid SLiM code as part of a
full SLiM script.
}
\details{
Documentation for this function can be found in the official
\href{http://benhaller.com/slim/SLiM_Manual.pdf#page=707}{SLiM manual: page
707}.
Calculates the heterozygosity for a pair of genomes; these will
typically be the two genomes of a diploid individual (individual.genome1 and
individual.genome2), but any two genomes may be supplied. The calculation can
be narrowed to apply to only a window - a subrange of the full chromosome - by
passing the interval bounds [start, end] for the desired window. In this case,
the vector of mutations used for the calculation will be subset to include
only mutations within the specified window. The default behavior, with start
and end of NULL, provides the genome-wide heterozygosity. The implementation
calcPairHeterozygosity(), viewable with functionSource(), treats every mutation
as independent in the heterozygosity calculations by default (i.e., with
infiniteSites=T). If mutations are stacked, the heterozygosity calculated
therefore depends upon the number of unshared mutations, not the number of
differing sites. Similarly, if multiple Mutation objects exist in different
genomes at the same site (whether representing different genetic states, or
multiple mutational lineages for the same genetic state), each Mutation object
is treated separately for purposes of the heterozygosity calculation, just as
if they were at different sites. One could regard these choices as embodying an
infinite-sites interpretation of the segregating mutations. In most biologically
realistic models, such genetic states will be quite rare, and so the impact of
this choice will be negligible; however, in some models this distinction may be
important. The behavior of calcPairHeterozygosity() can be switched to calculate
based upon the number of differing sites, rather than the number of unshared
mutations, by passing infiniteSites=F.
}
\section{Copyright}{
This is documentation for a function in the SLiM software, and has been
reproduced from the official manual,
which can be found here: \url{http://benhaller.com/slim/SLiM_Manual.pdf}. This
documentation is
Copyright © 2016-2020 Philipp Messer. All rights reserved. More information
about SLiM can be found
on the official website: \url{https://messerlab.org/slim/}
}
\seealso{
Other SLiMBuiltin:
\code{\link{SB}},
\code{\link{calcFST}()},
\code{\link{calcHeterozygosity}()},
\code{\link{calcInbreedingLoad}()},
\code{\link{calcVA}()},
\code{\link{calcWattersonsTheta}()},
\code{\link{codonsToAminoAcids}()},
\code{\link{mm16To256}()},
\code{\link{mmJukesCantor}()},
\code{\link{mmKimura}()},
\code{\link{nucleotideCounts}()},
\code{\link{nucleotideFrequencies}()},
\code{\link{nucleotidesToCodons}()},
\code{\link{summarizeIndividuals}()},
\code{\link{treeSeqMetadata}()}
}
\author{
Benjamin C Haller (\email{bhaller@benhaller.com}) and Philipp W Messer
(\email{messer@cornell.edu})
}
\concept{SLiMBuiltin}
|
/man/calcPairHeterozygosity.Rd
|
permissive
|
rdinnager/slimr
|
R
| false
| true
| 4,498
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/slim_lang.R
\name{calcPairHeterozygosity}
\alias{calcPairHeterozygosity}
\alias{SLiMBuiltin$calcPairHeterozygosity}
\alias{.SB$calcPairHeterozygosity}
\title{SLiM method calcPairHeterozygosity}
\usage{
calcPairHeterozygosity(genome1, genome2, start, end, infiniteSites)
}
\arguments{
\item{genome1}{An object of type Genome object. Must be of length 1 (a
singleton). See details for description.}
\item{genome2}{An object of type Genome object. Must be of length 1 (a
singleton). See details for description.}
\item{start}{An object of type null or integer. Must be of length 1 (a
singleton). The default value is \code{NULL}. See details for description.}
\item{end}{An object of type null or integer. Must be of length 1 (a singleton).
The default value is \code{NULL}. See details for description.}
\item{infiniteSites}{An object of type logical. Must be of length 1 (a
singleton). The default value is \code{T}. See details for description.}
}
\value{
An object of type float. Return will be of length 1 (a singleton)
}
\description{
Documentation for SLiM function \code{calcPairHeterozygosity}, which is a method
of the SLiM class \code{\link{SLiMBuiltin}}.
Note that the R function is a stub, it does not do anything in R (except bring
up this documentation). It will only do
anything useful when used inside a \code{\link{slim_block}} function further
nested in a \code{\link{slim_script}}
function call, where it will be translated into valid SLiM code as part of a
full SLiM script.
}
\details{
Documentation for this function can be found in the official
\href{http://benhaller.com/slim/SLiM_Manual.pdf#page=707}{SLiM manual: page
707}.
Calculates the heterozygosity for a pair of genomes; these will
typically be the two genomes of a diploid individual (individual.genome1 and
individual.genome2), but any two genomes may be supplied. The calculation can
be narrowed to apply to only a window - a subrange of the full chromosome - by
passing the interval bounds [start, end] for the desired window. In this case,
the vector of mutations used for the calculation will be subset to include
only mutations within the specified window. The default behavior, with start
and end of NULL, provides the genome-wide heterozygosity. The implementation
calcPairHeterozygosity(), viewable with functionSource(), treats every mutation
as independent in the heterozygosity calculations by default (i.e., with
infiniteSites=T). If mutations are stacked, the heterozygosity calculated
therefore depends upon the number of unshared mutations, not the number of
differing sites. Similarly, if multiple Mutation objects exist in different
genomes at the same site (whether representing different genetic states, or
multiple mutational lineages for the same genetic state), each Mutation object
is treated separately for purposes of the heterozygosity calculation, just as
if they were at different sites. One could regard these choices as embodying an
infinite-sites interpretation of the segregating mutations. In most biologically
realistic models, such genetic states will be quite rare, and so the impact of
this choice will be negligible; however, in some models this distinction may be
important. The behavior of calcPairHeterozygosity() can be switched to calculate
based upon the number of differing sites, rather than the number of unshared
mutations, by passing infiniteSites=F.
}
\section{Copyright}{
This is documentation for a function in the SLiM software, and has been
reproduced from the official manual,
which can be found here: \url{http://benhaller.com/slim/SLiM_Manual.pdf}. This
documentation is
Copyright © 2016-2020 Philipp Messer. All rights reserved. More information
about SLiM can be found
on the official website: \url{https://messerlab.org/slim/}
}
\seealso{
Other SLiMBuiltin:
\code{\link{SB}},
\code{\link{calcFST}()},
\code{\link{calcHeterozygosity}()},
\code{\link{calcInbreedingLoad}()},
\code{\link{calcVA}()},
\code{\link{calcWattersonsTheta}()},
\code{\link{codonsToAminoAcids}()},
\code{\link{mm16To256}()},
\code{\link{mmJukesCantor}()},
\code{\link{mmKimura}()},
\code{\link{nucleotideCounts}()},
\code{\link{nucleotideFrequencies}()},
\code{\link{nucleotidesToCodons}()},
\code{\link{summarizeIndividuals}()},
\code{\link{treeSeqMetadata}()}
}
\author{
Benjamin C Haller (\email{bhaller@benhaller.com}) and Philipp W Messer
(\email{messer@cornell.edu})
}
\concept{SLiMBuiltin}
|
library(tidyverse)
library(rjson);
## Formatted output to the standard out
outf <- function(...){
write(sprintf(...),file=stdout());
}
## Load a JSON file and let us know about it
read_json_from_file <- function(filename){
outf("input: %s", filename);
fromJSON(file=filename);
}
## Write a table to a file and let us know about it.
write_table_to_file <- function(data, filename, options=list()){
outf("output: %s", filename);
args <- append(list(data,file=filename), options);
do.call(write.table, args);
}
read_csv_from_file <- function(filename){
outf("input: %s", filename);
read_csv(filename);
}
ggsave_logged <- function(filename, plot, options=list()){
outf("output: %s", filename);
do.call(ggsave, append(list(filename, plot), options));
}
write_text_to_file <- function(filename, ...){
outf("output: %s", filename);
write(sprintf(...),file=filename);
}
|
/preamble.R
|
no_license
|
VincentToups/ds-pres-repo
|
R
| false
| false
| 916
|
r
|
library(tidyverse)
library(rjson);
## Formatted output to the standard out
outf <- function(...){
write(sprintf(...),file=stdout());
}
## Load a JSON file and let us know about it
read_json_from_file <- function(filename){
outf("input: %s", filename);
fromJSON(file=filename);
}
## Write a table to a file and let us know about it.
write_table_to_file <- function(data, filename, options=list()){
outf("output: %s", filename);
args <- append(list(data,file=filename), options);
do.call(write.table, args);
}
read_csv_from_file <- function(filename){
outf("input: %s", filename);
read_csv(filename);
}
ggsave_logged <- function(filename, plot, options=list()){
outf("output: %s", filename);
do.call(ggsave, append(list(filename, plot), options));
}
write_text_to_file <- function(filename, ...){
outf("output: %s", filename);
write(sprintf(...),file=filename);
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/analysis_request.R
\name{remove_dataset}
\alias{remove_dataset}
\title{remove_dataset}
\usage{
remove_dataset(x, dataset_name)
}
\arguments{
\item{x}{The \code{\link{ReactomeAnalysisRequest}} to remove the dataset from}
\item{dataset_name}{character The dataset's name}
}
\value{
The updated \code{\link{ReactomeAnalysisRequest}}
}
\description{
Remove the dataset from the \code{\link{ReactomeAnalysisRequest}} object.
}
|
/man/remove_dataset.Rd
|
no_license
|
reactome/ReactomeGSA
|
R
| false
| true
| 501
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/analysis_request.R
\name{remove_dataset}
\alias{remove_dataset}
\title{remove_dataset}
\usage{
remove_dataset(x, dataset_name)
}
\arguments{
\item{x}{The \code{\link{ReactomeAnalysisRequest}} to remove the dataset from}
\item{dataset_name}{character The dataset's name}
}
\value{
The updated \code{\link{ReactomeAnalysisRequest}}
}
\description{
Remove the dataset from the \code{\link{ReactomeAnalysisRequest}} object.
}
|
/Atelier1.R
|
no_license
|
Camcor/atelier1
|
R
| false
| false
| 633
|
r
| ||
# K-Nearest Neighbors (KNN) Model
# importing the dataset
ads <- read.csv('Social_Network_Ads.csv')
summary(ads)
# removing id and gender columns from independent variables
ads <- ads[, 3:5]
# Scaling the data
ads[, 1:2] <- scale(ads[, 1:2])
# splitting the data into train and test set
library(caTools)
set.seed(2407)
train_index <- sample.split(ads$Purchased, SplitRatio = 0.75)
ads_train <- ads[train_index,]
ads_test <- ads[!train_index,]
# Fitting the KNN model and predicting the test set results
library(class)
ads_pred <- knn(train = ads_train[, 1:2], test = ads_test[, 1:2],
cl = ads_train$Purchased, k = 5)
# Creating the confusion matrix
ads_confMat <- table(ads_test$Purchased, ads_pred)
# visualizing results: Training set
library(ElemStatLearn)
data <- ads_train
x1 <- seq(min(data$Age) - 1, max(data$Age) + 1, by = 0.01)
x2 <- seq(min(data$EstimatedSalary) - 1, max(data$EstimatedSalary) + 1 , by = 0.01)
grid_data <- expand.grid(x1, x2)
colnames(grid_data) <- c('Age', 'EstimatedSalary')
data_pred <- knn(train = ads_train[, 1:2], test = grid_data,
cl = ads_train$Purchased, k = 5)
plot(data[, -3], main = 'KNN Training Set',
xlab = 'Age', ylab = 'Estimated Salary',
xlim = range(x1), ylim = range(x2))
contour(x1, x2, matrix(data_pred, length(x1), length(x2)), add = TRUE)
points(grid_data, pch = '.', col = ifelse(data_pred == 1, 'springgreen3', 'tomato'))
points(data, pch = 21, bg = ifelse(data$Purchased == 1, 'green4', 'red3'))
# visualizing results: Test set
data <- ads_test
x1 <- seq(min(data$Age) - 1, max(data$Age) + 1, by = 0.01)
x2 <- seq(min(data$EstimatedSalary) - 1, max(data$EstimatedSalary) + 1 , by = 0.01)
grid_data <- expand.grid(x1, x2)
colnames(grid_data) <- c('Age', 'EstimatedSalary')
data_pred <- knn(train = ads_train[, 1:2], test = grid_data,
cl = ads_train$Purchased, k = 5)
plot(data[, -3], main = 'KNN Training Set',
xlab = 'Age', ylab = 'Estimated Salary',
xlim = range(x1), ylim = range(x2))
contour(x1, x2, matrix(data_pred, length(x1), length(x2)), add = TRUE)
points(grid_data, pch = '.', col = ifelse(data_pred == 1, 'springgreen3', 'tomato'))
points(data, pch = 21, bg = ifelse(data$Purchased == 1, 'green4', 'red3'))
|
/KNN Model.R
|
no_license
|
jainaniket24/R
|
R
| false
| false
| 2,252
|
r
|
# K-Nearest Neighbors (KNN) Model
# importing the dataset
ads <- read.csv('Social_Network_Ads.csv')
summary(ads)
# removing id and gender columns from independent variables
ads <- ads[, 3:5]
# Scaling the data
ads[, 1:2] <- scale(ads[, 1:2])
# splitting the data into train and test set
library(caTools)
set.seed(2407)
train_index <- sample.split(ads$Purchased, SplitRatio = 0.75)
ads_train <- ads[train_index,]
ads_test <- ads[!train_index,]
# Fitting the KNN model and predicting the test set results
library(class)
ads_pred <- knn(train = ads_train[, 1:2], test = ads_test[, 1:2],
cl = ads_train$Purchased, k = 5)
# Creating the confusion matrix
ads_confMat <- table(ads_test$Purchased, ads_pred)
# visualizing results: Training set
library(ElemStatLearn)
data <- ads_train
x1 <- seq(min(data$Age) - 1, max(data$Age) + 1, by = 0.01)
x2 <- seq(min(data$EstimatedSalary) - 1, max(data$EstimatedSalary) + 1 , by = 0.01)
grid_data <- expand.grid(x1, x2)
colnames(grid_data) <- c('Age', 'EstimatedSalary')
data_pred <- knn(train = ads_train[, 1:2], test = grid_data,
cl = ads_train$Purchased, k = 5)
plot(data[, -3], main = 'KNN Training Set',
xlab = 'Age', ylab = 'Estimated Salary',
xlim = range(x1), ylim = range(x2))
contour(x1, x2, matrix(data_pred, length(x1), length(x2)), add = TRUE)
points(grid_data, pch = '.', col = ifelse(data_pred == 1, 'springgreen3', 'tomato'))
points(data, pch = 21, bg = ifelse(data$Purchased == 1, 'green4', 'red3'))
# visualizing results: Test set
data <- ads_test
x1 <- seq(min(data$Age) - 1, max(data$Age) + 1, by = 0.01)
x2 <- seq(min(data$EstimatedSalary) - 1, max(data$EstimatedSalary) + 1 , by = 0.01)
grid_data <- expand.grid(x1, x2)
colnames(grid_data) <- c('Age', 'EstimatedSalary')
data_pred <- knn(train = ads_train[, 1:2], test = grid_data,
cl = ads_train$Purchased, k = 5)
plot(data[, -3], main = 'KNN Training Set',
xlab = 'Age', ylab = 'Estimated Salary',
xlim = range(x1), ylim = range(x2))
contour(x1, x2, matrix(data_pred, length(x1), length(x2)), add = TRUE)
points(grid_data, pch = '.', col = ifelse(data_pred == 1, 'springgreen3', 'tomato'))
points(data, pch = 21, bg = ifelse(data$Purchased == 1, 'green4', 'red3'))
|
setwd("D:/R Examples/Hackathon")
a <- read.csv("train.csv")
View(a)
b <- read.csv("test.csv")
View(b)
c <- rbind(a[,-81],b)
View(c)
summary(c)
str(c)
colnames(c)[colSums(is.na(c))>0]
c$MSZoning[is.na(c$MSZoning)] <- "RL"
hist(c$LotFrontage)
c$LotFrontage[is.na(c$LotFrontage)] <- 68
c$Exterior1st[is.na(c$Exterior1st)] <- "VinylSd"
c$Exterior2nd[is.na(c$Exterior2nd)] <- "VinylSd"
c$MasVnrType[is.na(c$MasVnrType)] <- "None"
c$MasVnrArea[is.na(c$MasVnrArea)] <- 0
levels(c$BsmtQual) <- c("Ex", "Fa", "Gd", "TA", "No Basement")
c$BsmtQual[is.na(c$BsmtQual)] <- "No Basement"
levels(c$BsmtExposure) <- c("Av", "Gd", "Mn", "No", "No Basement")
c$BsmtExposure[is.na(c$BsmtExposure)] <- "No Basement"
levels(c$BsmtFinType1) <- c("ALQ", "BLQ", "GLQ", "LwQ", "Rec", "Unf", "No Basement")
c$BsmtFinType1[is.na(c$BsmtFinType1)] <- "No Basement"
hist(c$BsmtFinSF1)
c$BsmtFinSF1[is.na(c$BsmtFinSF1)] <- 368
hist(c$BsmtUnfSF)
c$BsmtUnfSF[is.na(c$BsmtUnfSF)] <- 467
c$TotalBsmtSF[is.na(c$TotalBsmtSF)] <- 990
hist(c$TotalBsmtSF)
c$BsmtFullBath <- as.factor(c$BsmtFullBath)
c$BsmtFullBath[is.na(c$BsmtFullBath)] <- 0
c$KitchenQual[is.na(c$KitchenQual)] <- "TA"
levels(c$FireplaceQu) <- c("Ex", "Fa", "Gd", "Po", "TA", "No Fireplace")
c$FireplaceQu[is.na(c$FireplaceQu)] <- "No Fireplace"
levels(c$GarageType) <- c("2Types", "Attchd", "Basment", "BuiltIn", "CarPort", "Detchd", "No Garage")
c$GarageType[is.na(c$GarageType)] <- "No Garage"
levels(c$GarageYrBlt)
c$GarageYrBlt <- as.factor(c$GarageYrBlt)
c$GarageYrBlt[is.na(c$GarageYrBlt)] <- "2005"
levels(c$GarageFinish) <- c("Fin", "RFn", "Unf", "No Garage")
c$GarageFinish[is.na(c$GarageFinish)] <- "No Garage"
c$GarageCars <- as.factor(c$GarageCars)
c$GarageCars[is.na(c$GarageCars)] <- 2
hist(c$GarageArea)
c$GarageArea[is.na(c$GarageArea)] <- 473
levels(c$Fence) <- c("GdPrv", "GdWo", "MnPrv", "MnWw", "No Fence")
c$Fence[is.na(c$Fence)] <- "No Fence"
c$GarageYrBlt <- as.numeric(c$GarageYrBlt)
summary(c)
c$GarageCars <- as.numeric(c$GarageCars)
aa <- subset(c, c$Id<1461)
aa <- aa[,c(-6,-7,-73,-72,-71,-70,-42,-40,-43,-56,-64,-65,-66,-75,-10,-13,-14,-32,-23,-9,-12,-29,-36,-37,-79,-49,-49)]
aa <- cbind(aa,a$SalePrice)
View(aa)
bb <- subset(c, c$Id>1460)
bb <- bb[,c(-6,-7,-73,-72,-71,-70,-42,-40,-43,-56,-64,-65,-66,-75,-10,-13,-14,-32,-23,-9,-12,-29,-36,-37,-79,-49,-49)]
View(bb)
colnames(aa)[55] <- "SalePrice"
model1 <- lm(SalePrice~.-Exterior2nd-BsmtFinType1-GarageFinish-GrLivArea, data = aa)
summary(model1)
px <- predict(model1, bb)
View(px)
write.csv(px, file = "sample prediction.csv")
?write.csv
|
/House_Prices.r
|
no_license
|
fegadeharish/House-Price-Prediction
|
R
| false
| false
| 2,654
|
r
|
setwd("D:/R Examples/Hackathon")
a <- read.csv("train.csv")
View(a)
b <- read.csv("test.csv")
View(b)
c <- rbind(a[,-81],b)
View(c)
summary(c)
str(c)
colnames(c)[colSums(is.na(c))>0]
c$MSZoning[is.na(c$MSZoning)] <- "RL"
hist(c$LotFrontage)
c$LotFrontage[is.na(c$LotFrontage)] <- 68
c$Exterior1st[is.na(c$Exterior1st)] <- "VinylSd"
c$Exterior2nd[is.na(c$Exterior2nd)] <- "VinylSd"
c$MasVnrType[is.na(c$MasVnrType)] <- "None"
c$MasVnrArea[is.na(c$MasVnrArea)] <- 0
levels(c$BsmtQual) <- c("Ex", "Fa", "Gd", "TA", "No Basement")
c$BsmtQual[is.na(c$BsmtQual)] <- "No Basement"
levels(c$BsmtExposure) <- c("Av", "Gd", "Mn", "No", "No Basement")
c$BsmtExposure[is.na(c$BsmtExposure)] <- "No Basement"
levels(c$BsmtFinType1) <- c("ALQ", "BLQ", "GLQ", "LwQ", "Rec", "Unf", "No Basement")
c$BsmtFinType1[is.na(c$BsmtFinType1)] <- "No Basement"
hist(c$BsmtFinSF1)
c$BsmtFinSF1[is.na(c$BsmtFinSF1)] <- 368
hist(c$BsmtUnfSF)
c$BsmtUnfSF[is.na(c$BsmtUnfSF)] <- 467
c$TotalBsmtSF[is.na(c$TotalBsmtSF)] <- 990
hist(c$TotalBsmtSF)
c$BsmtFullBath <- as.factor(c$BsmtFullBath)
c$BsmtFullBath[is.na(c$BsmtFullBath)] <- 0
c$KitchenQual[is.na(c$KitchenQual)] <- "TA"
levels(c$FireplaceQu) <- c("Ex", "Fa", "Gd", "Po", "TA", "No Fireplace")
c$FireplaceQu[is.na(c$FireplaceQu)] <- "No Fireplace"
levels(c$GarageType) <- c("2Types", "Attchd", "Basment", "BuiltIn", "CarPort", "Detchd", "No Garage")
c$GarageType[is.na(c$GarageType)] <- "No Garage"
levels(c$GarageYrBlt)
c$GarageYrBlt <- as.factor(c$GarageYrBlt)
c$GarageYrBlt[is.na(c$GarageYrBlt)] <- "2005"
levels(c$GarageFinish) <- c("Fin", "RFn", "Unf", "No Garage")
c$GarageFinish[is.na(c$GarageFinish)] <- "No Garage"
c$GarageCars <- as.factor(c$GarageCars)
c$GarageCars[is.na(c$GarageCars)] <- 2
hist(c$GarageArea)
c$GarageArea[is.na(c$GarageArea)] <- 473
levels(c$Fence) <- c("GdPrv", "GdWo", "MnPrv", "MnWw", "No Fence")
c$Fence[is.na(c$Fence)] <- "No Fence"
c$GarageYrBlt <- as.numeric(c$GarageYrBlt)
summary(c)
c$GarageCars <- as.numeric(c$GarageCars)
aa <- subset(c, c$Id<1461)
aa <- aa[,c(-6,-7,-73,-72,-71,-70,-42,-40,-43,-56,-64,-65,-66,-75,-10,-13,-14,-32,-23,-9,-12,-29,-36,-37,-79,-49,-49)]
aa <- cbind(aa,a$SalePrice)
View(aa)
bb <- subset(c, c$Id>1460)
bb <- bb[,c(-6,-7,-73,-72,-71,-70,-42,-40,-43,-56,-64,-65,-66,-75,-10,-13,-14,-32,-23,-9,-12,-29,-36,-37,-79,-49,-49)]
View(bb)
colnames(aa)[55] <- "SalePrice"
model1 <- lm(SalePrice~.-Exterior2nd-BsmtFinType1-GarageFinish-GrLivArea, data = aa)
summary(model1)
px <- predict(model1, bb)
View(px)
write.csv(px, file = "sample prediction.csv")
?write.csv
|
context("Test get_R")
test_that("Test against reference results", {
skip_on_cran()
## simulate basic epicurve
dat <- c(0, 2, 2, 3, 3, 5, 5, 5, 6, 6, 6, 6)
i <- incidence(dat)
## example with a function for SI
si <- distcrete("gamma", interval = 1L,
shape = 1.5,
scale = 2, w = 0)
R_1 <- get_R(i, si = si)
expect_equal_to_reference(R_1, file = "rds/R_1.rds")
expect_identical(i, R_1$incidence)
})
test_that("Test that SI is used consistently", {
skip_on_cran()
## simulate basic epicurve
dat <- c(0, 2, 2, 3, 3, 5, 5, 5, 6, 6, 6, 6)
i <- incidence(dat)
## example with a function for SI
si <- distcrete("gamma", interval = 1L,
shape = 1.5,
scale = 2, w = 0)
R_1 <- get_R(i, si = si)
expect_identical(si, R_1$si)
## with internally generated SI
mu <- 10
sd <- 3.2213
params <- epitrix::gamma_mucv2shapescale(mu, sd/mu)
R_2 <- get_R(i, si_mean = mu, si_sd = sd)
expect_identical(params, R_2$si$parameters)
})
test_that("Errors are thrown when they should", {
expect_error(get_R("mklmbldfb"),
"No method for objects of class character")
i <- incidence(1:10, 3)
expect_error(get_R(i, "ebola"),
"daily incidence needed, but interval is 3 days")
i <- incidence(1:10, 1, group = letters[1:10])
expect_error(get_R(i, "ebola"),
"cannot use multiple groups in incidence object")
i <- incidence(1)
si <- distcrete("gamma", interval = 5L,
shape = 1.5,
scale = 2, w = 0)
expect_error(get_R(i, si = si),
"interval used in si is not 1 day, but 5")
})
|
/tests/testthat/test_get_R.R
|
no_license
|
Gulfa/earlyR
|
R
| false
| false
| 1,779
|
r
|
context("Test get_R")
test_that("Test against reference results", {
skip_on_cran()
## simulate basic epicurve
dat <- c(0, 2, 2, 3, 3, 5, 5, 5, 6, 6, 6, 6)
i <- incidence(dat)
## example with a function for SI
si <- distcrete("gamma", interval = 1L,
shape = 1.5,
scale = 2, w = 0)
R_1 <- get_R(i, si = si)
expect_equal_to_reference(R_1, file = "rds/R_1.rds")
expect_identical(i, R_1$incidence)
})
test_that("Test that SI is used consistently", {
skip_on_cran()
## simulate basic epicurve
dat <- c(0, 2, 2, 3, 3, 5, 5, 5, 6, 6, 6, 6)
i <- incidence(dat)
## example with a function for SI
si <- distcrete("gamma", interval = 1L,
shape = 1.5,
scale = 2, w = 0)
R_1 <- get_R(i, si = si)
expect_identical(si, R_1$si)
## with internally generated SI
mu <- 10
sd <- 3.2213
params <- epitrix::gamma_mucv2shapescale(mu, sd/mu)
R_2 <- get_R(i, si_mean = mu, si_sd = sd)
expect_identical(params, R_2$si$parameters)
})
test_that("Errors are thrown when they should", {
expect_error(get_R("mklmbldfb"),
"No method for objects of class character")
i <- incidence(1:10, 3)
expect_error(get_R(i, "ebola"),
"daily incidence needed, but interval is 3 days")
i <- incidence(1:10, 1, group = letters[1:10])
expect_error(get_R(i, "ebola"),
"cannot use multiple groups in incidence object")
i <- incidence(1)
si <- distcrete("gamma", interval = 5L,
shape = 1.5,
scale = 2, w = 0)
expect_error(get_R(i, si = si),
"interval used in si is not 1 day, but 5")
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/common-get_options.R
\name{get_rkeops_options}
\alias{get_rkeops_options}
\title{Get the current \code{rkeops} options in \code{R} global options scope}
\usage{
get_rkeops_options(tag = NULL)
}
\arguments{
\item{tag}{text string being \code{"compile"} or \code{"runtime"} to get corresponding
options. If missing (default), both are returned.}
}
\value{
a list with \code{rkeops} current options values (see Details).
}
\description{
\code{rkeops} uses two sets of options: compile options (see
\code{\link[rkeops:compile_options]{rkeops::compile_options()}}) and runtime options (see
\code{\link[rkeops:runtime_options]{rkeops::runtime_options()}}). These options define the behavior of \code{rkeops}
when compiling or when calling user-defined operators.
You can read the current states of \code{rkeops} options by calling
\code{get_rkeops_options()}.
}
\details{
\code{rkeops} global options includes two lists defining options used at
compilation of user-defined operators or at runtime. These two list
contains specific informations (see \code{\link[rkeops:compile_options]{rkeops::compile_options()}} and
\code{\link[rkeops:runtime_options]{rkeops::runtime_options()}} respectively, in particular for default values).
If the \code{tag} input parameter is specified (e.g. \code{"compile"} or \code{"runtime"}),
only the corresponding option list is returned.
These options are set with the functions \code{\link[rkeops:set_rkeops_options]{rkeops::set_rkeops_options()}} and
\code{\link[rkeops:set_rkeops_option]{rkeops::set_rkeops_option()}}. To know which values are allowed for which
options, you can check \code{\link[rkeops:compile_options]{rkeops::compile_options()}} and
\code{\link[rkeops:runtime_options]{rkeops::runtime_options()}}.
}
\examples{
library(rkeops)
get_rkeops_options()
}
\seealso{
\code{\link[rkeops:get_rkeops_option]{rkeops::get_rkeops_option()}}, \code{\link[rkeops:compile_options]{rkeops::compile_options()}},
\code{\link[rkeops:runtime_options]{rkeops::runtime_options()}}, \code{\link[rkeops:set_rkeops_options]{rkeops::set_rkeops_options()}},
\code{\link[rkeops:set_rkeops_option]{rkeops::set_rkeops_option()}}
}
\author{
Ghislain Durif
}
|
/rkeops/man/get_rkeops_options.Rd
|
permissive
|
dvolgyes/keops
|
R
| false
| true
| 2,256
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/common-get_options.R
\name{get_rkeops_options}
\alias{get_rkeops_options}
\title{Get the current \code{rkeops} options in \code{R} global options scope}
\usage{
get_rkeops_options(tag = NULL)
}
\arguments{
\item{tag}{text string being \code{"compile"} or \code{"runtime"} to get corresponding
options. If missing (default), both are returned.}
}
\value{
a list with \code{rkeops} current options values (see Details).
}
\description{
\code{rkeops} uses two sets of options: compile options (see
\code{\link[rkeops:compile_options]{rkeops::compile_options()}}) and runtime options (see
\code{\link[rkeops:runtime_options]{rkeops::runtime_options()}}). These options define the behavior of \code{rkeops}
when compiling or when calling user-defined operators.
You can read the current states of \code{rkeops} options by calling
\code{get_rkeops_options()}.
}
\details{
\code{rkeops} global options includes two lists defining options used at
compilation of user-defined operators or at runtime. These two list
contains specific informations (see \code{\link[rkeops:compile_options]{rkeops::compile_options()}} and
\code{\link[rkeops:runtime_options]{rkeops::runtime_options()}} respectively, in particular for default values).
If the \code{tag} input parameter is specified (e.g. \code{"compile"} or \code{"runtime"}),
only the corresponding option list is returned.
These options are set with the functions \code{\link[rkeops:set_rkeops_options]{rkeops::set_rkeops_options()}} and
\code{\link[rkeops:set_rkeops_option]{rkeops::set_rkeops_option()}}. To know which values are allowed for which
options, you can check \code{\link[rkeops:compile_options]{rkeops::compile_options()}} and
\code{\link[rkeops:runtime_options]{rkeops::runtime_options()}}.
}
\examples{
library(rkeops)
get_rkeops_options()
}
\seealso{
\code{\link[rkeops:get_rkeops_option]{rkeops::get_rkeops_option()}}, \code{\link[rkeops:compile_options]{rkeops::compile_options()}},
\code{\link[rkeops:runtime_options]{rkeops::runtime_options()}}, \code{\link[rkeops:set_rkeops_options]{rkeops::set_rkeops_options()}},
\code{\link[rkeops:set_rkeops_option]{rkeops::set_rkeops_option()}}
}
\author{
Ghislain Durif
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pmse_lm.R
\name{pmse_lm}
\alias{pmse_lm}
\title{Prediction mean squared error for Bayesian regularized regression models}
\usage{
pmse_lm(object, ytest = NULL, y = NULL, N_train = NULL)
}
\arguments{
\item{object}{An object of class `stanfit` returned by `stan_reg_lm`.}
\item{ytest}{Numeric vector of output values for the test set. Provide either `ytest` or `y` and `N_train`.}
\item{y}{Numeric vector[N] of output values. Provide either `ytest` or `y` and `N_train`.}
\item{N_train}{Size of the training set. First part of the data will be used for training. Provide either `ytest` or `y` and `N_train`.}
}
\value{
Numeric value for the prediction mean squared error.
}
\description{
Function to compute the prediction mean squared error (PMSE) on models fit using `stan_reg_lm`. The PMSE is computed as:
\eqn{ \frac{1}{N} \Sigma^N_{i=1} (y^{gen}_i - y_i)^2 }, with \eqn{ y^{gen}_i } being the posterior mean of the MCMC draws
for the predicted value of that observation and \eqn{y_i} being the actual value in the test set.
}
|
/man/pmse_lm.Rd
|
permissive
|
sara-vanerp/bayesreg
|
R
| false
| true
| 1,111
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pmse_lm.R
\name{pmse_lm}
\alias{pmse_lm}
\title{Prediction mean squared error for Bayesian regularized regression models}
\usage{
pmse_lm(object, ytest = NULL, y = NULL, N_train = NULL)
}
\arguments{
\item{object}{An object of class `stanfit` returned by `stan_reg_lm`.}
\item{ytest}{Numeric vector of output values for the test set. Provide either `ytest` or `y` and `N_train`.}
\item{y}{Numeric vector[N] of output values. Provide either `ytest` or `y` and `N_train`.}
\item{N_train}{Size of the training set. First part of the data will be used for training. Provide either `ytest` or `y` and `N_train`.}
}
\value{
Numeric value for the prediction mean squared error.
}
\description{
Function to compute the prediction mean squared error (PMSE) on models fit using `stan_reg_lm`. The PMSE is computed as:
\eqn{ \frac{1}{N} \Sigma^N_{i=1} (y^{gen}_i - y_i)^2 }, with \eqn{ y^{gen}_i } being the posterior mean of the MCMC draws
for the predicted value of that observation and \eqn{y_i} being the actual value in the test set.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/canon.R
\name{Rd_canonize}
\alias{Rd_canonize}
\alias{Rd_canonize_text}
\alias{Rd_canonize_code}
\title{Rd Canonical Form}
\usage{
Rd_canonize(rd, ..., .check = TRUE)
Rd_canonize_text(rd, .check = TRUE, ...)
Rd_canonize_code(rd, .check = TRUE, ...)
}
\arguments{
\item{rd}{the Rd container object to put in canonical form.}
\item{...}{Arguments passed on to \code{is_valid_Rd_object}
\describe{
\item{x}{object to test}
\item{strict}{if the class must be set. A value of NA indicates that the
first level need not be classed but all subsequent must be.}
\item{tags}{the type of tag(s) allowed in the \code{Rd_tag} attribute.}
\item{deep}{should contained elements also be checked for validity?}
}}
\item{.check}{Perform input checks?}
}
\description{
Canonical form is simply described as that which would come out from reading
an Rd file via, \code{\link[tools:parse_Rd]{tools::parse_Rd()}}.
}
\details{
\strong{Canonical Rd Text has:}
\itemize{
\item One line per element, with \code{attr(., 'Rd_tag')=='TEXT'}
\item The indents are merged with content if the first content is text.
\item Newlines are contained with the content provided the content is 'TEXT',
but the newline must be the last character in the string and cannot appear anywhere else.
\item Comments are a separate class and do not include the newline.
}
\strong{Canonical R code follows the following rules:}
\itemize{
\item One element per line of code.
\item newline is included at the end of the line string,
not as a separate element.
\item if there are multiple lines they are bound together in an Rd or Rd_tag list.
}
}
\section{Functions}{
\itemize{
\item \code{Rd_canonize_text}: Put text in canonical form.
\item \code{Rd_canonize_code}: Put R code in canonical form.
}}
\examples{
## Rd_c does not guarantee canonical code.
x <- Rd_c(Rd('Testing'), Rd('\\n'))
str(x)
str(Rd_canonize(x))
}
|
/man/Rd_canonize.Rd
|
no_license
|
cran/Rd
|
R
| false
| true
| 2,024
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/canon.R
\name{Rd_canonize}
\alias{Rd_canonize}
\alias{Rd_canonize_text}
\alias{Rd_canonize_code}
\title{Rd Canonical Form}
\usage{
Rd_canonize(rd, ..., .check = TRUE)
Rd_canonize_text(rd, .check = TRUE, ...)
Rd_canonize_code(rd, .check = TRUE, ...)
}
\arguments{
\item{rd}{the Rd container object to put in canonical form.}
\item{...}{Arguments passed on to \code{is_valid_Rd_object}
\describe{
\item{x}{object to test}
\item{strict}{if the class must be set. A value of NA indicates that the
first level need not be classed but all subsequent must be.}
\item{tags}{the type of tag(s) allowed in the \code{Rd_tag} attribute.}
\item{deep}{should contained elements also be checked for validity?}
}}
\item{.check}{Perform input checks?}
}
\description{
Canonical form is simply described as that which would come out from reading
an Rd file via, \code{\link[tools:parse_Rd]{tools::parse_Rd()}}.
}
\details{
\strong{Canonical Rd Text has:}
\itemize{
\item One line per element, with \code{attr(., 'Rd_tag')=='TEXT'}
\item The indents are merged with content if the first content is text.
\item Newlines are contained with the content provided the content is 'TEXT',
but the newline must be the last character in the string and cannot appear anywhere else.
\item Comments are a separate class and do not include the newline.
}
\strong{Canonical R code follows the following rules:}
\itemize{
\item One element per line of code.
\item newline is included at the end of the line string,
not as a separate element.
\item if there are multiple lines they are bound together in an Rd or Rd_tag list.
}
}
\section{Functions}{
\itemize{
\item \code{Rd_canonize_text}: Put text in canonical form.
\item \code{Rd_canonize_code}: Put R code in canonical form.
}}
\examples{
## Rd_c does not guarantee canonical code.
x <- Rd_c(Rd('Testing'), Rd('\\n'))
str(x)
str(Rd_canonize(x))
}
|
\name{mycamweather-package}
\alias{mycamweather-package}
\alias{mycamweather}
\docType{package}
\title{
What the package does (short line)
~~ package title ~~
}
\description{
More about what it does (maybe more than one line)
~~ A concise (1-5 lines) description of the package ~~
}
\details{
\tabular{ll}{
Package: \tab mycamweather\cr
Type: \tab Package\cr
Version: \tab 1.0\cr
Date: \tab 2014-01-08\cr
License: \tab What license is it under?\cr
}
~~ An overview of how to use the package, including the most important ~~
~~ functions ~~
}
\author{
Who wrote it
Maintainer: Who to complain to <yourfault@somewhere.net>
~~ The author and/or maintainer of the package ~~
}
\references{
~~ Literature or other references for background information ~~
}
~~ Optionally other standard keywords, one per line, from file KEYWORDS in ~~
~~ the R documentation directory ~~
\keyword{ package }
\seealso{
~~ Optional links to other man pages, e.g. ~~
~~ \code{\link[<pkg>:<pkg>-package]{<pkg>}} ~~
}
\examples{
~~ simple examples of the most important functions ~~
}
|
/man/mycamweather-package.Rd
|
no_license
|
cbudjan/mycamweather
|
R
| false
| false
| 1,059
|
rd
|
\name{mycamweather-package}
\alias{mycamweather-package}
\alias{mycamweather}
\docType{package}
\title{
What the package does (short line)
~~ package title ~~
}
\description{
More about what it does (maybe more than one line)
~~ A concise (1-5 lines) description of the package ~~
}
\details{
\tabular{ll}{
Package: \tab mycamweather\cr
Type: \tab Package\cr
Version: \tab 1.0\cr
Date: \tab 2014-01-08\cr
License: \tab What license is it under?\cr
}
~~ An overview of how to use the package, including the most important ~~
~~ functions ~~
}
\author{
Who wrote it
Maintainer: Who to complain to <yourfault@somewhere.net>
~~ The author and/or maintainer of the package ~~
}
\references{
~~ Literature or other references for background information ~~
}
~~ Optionally other standard keywords, one per line, from file KEYWORDS in ~~
~~ the R documentation directory ~~
\keyword{ package }
\seealso{
~~ Optional links to other man pages, e.g. ~~
~~ \code{\link[<pkg>:<pkg>-package]{<pkg>}} ~~
}
\examples{
~~ simple examples of the most important functions ~~
}
|
rm(list = ls())
library(Daniel)
library(dplyr)
library(nnet)
CalcCImultinom <- function(fit)
{
s <- summary(fit)
coef <- s$coefficients
ses <- s$standard.errors
ci.1 <- coef[1,2] + c(-1, 1)*1.96*ses[1, 2]
ci.2 <- coef[2,2] + c(-1, 1)*1.96*ses[2, 2]
return(rbind(ci.1,ci.2))
}
#key
# A, B,C,D,E,F - betaE[2] = 1.25, 1.5, 1.75, 2, 2.25, 2.5
# A,B,C, D, E,F - betaU = 2,3,4,5,6,7
patt <- "DB"
beta0 <- c(-6, -5)
betaE <- c(log(1), log(2))
betaU <- c(log(3), log(2.5))
sigmaU <- 1
n.sample <- 50000
n.sim <- 1000
AllY <- matrix(nr = n.sim, nc = 3)
sace.diff1 <- sace.diff2 <- ace.diff1 <- ace.diff2 <-
sace.or1 <- sace.or2 <- ace.or1 <- ace.or2 <-
or.approx1 <- or.approx2 <- or.approx.true1 <- or.approx.true2 <-
pop.never.s1 <- pop.never.s2 <- vector(length = n.sim)
ci1 <- ci2 <- matrix(nr = n.sim, nc = 2)
for (j in 1:n.sim)
{
CatIndex(j)
# Simulate genetic score
U <- rnorm(n.sample, 0, sd = sigmaU)
#### Calcualte probabilites for each subtype with and without the exposure ####
e1E0 <- exp(beta0[1] + betaU[1]*U)
e1E1 <- exp(beta0[1] + betaE[1] + betaU[1]*U)
e2E0 <- exp(beta0[2] + betaU[2]*U)
e2E1 <- exp(beta0[2] + betaE[2] + betaU[2]*U)
prE0Y1 <- e1E0/(1 + e1E0 + e2E0)
prE0Y2 <- e2E0/(1 + e1E0 + e2E0)
prE1Y1 <- e1E1/(1 + e1E1 + e2E1)
prE1Y2 <- e2E1/(1 + e1E1 + e2E1)
probsE0 <- cbind(prE0Y1, prE0Y2, 1 - prE0Y1 - prE0Y2)
probsE1 <- cbind(prE1Y1, prE1Y2, 1 - prE1Y1 - prE1Y2)
# Simulate subtypes #
Yctrl <- Ytrt <- vector(length = n.sample)
X <- rbinom(n = n.sample, 1, 0.5)
for (i in 1:n.sample)
{
Yctrl[i] <- sample(c(1,2,0), 1, replace = T, prob = probsE0[i, ])
Ytrt[i] <- sample(c(1,2,0), 1, replace = T, prob = probsE1[i, ])
}
Y <- (1-X)*Yctrl + X*Ytrt
AllY[j, ] <- table(Y)
Y1ctrl <- Yctrl==1
Y1trt <- Ytrt==1
Y2ctrl <- Yctrl==2
Y2trt <- Ytrt==2
pop.never.s1[j] <- mean(Y1ctrl==0 & Y1trt==0)
pop.never.s2[j] <- mean(Y2ctrl==0 & Y2trt==0)
# estimate causal parameters
sace.diff1[j] <- mean((Y1trt - Y1ctrl)[Y2ctrl==0 & Y2trt==0])
sace.diff2[j]<- mean((Y2trt - Y2ctrl)[Y1ctrl==0 & Y1trt==0])
ace.diff1[j] <- mean((Y1trt[Y2trt==0 & X==1]) - mean(Y1ctrl[Y2ctrl==0 & X==0]))
ace.diff2[j] <- mean((Y2trt[Y1trt==0 & X==1]) - mean(Y2ctrl[Y1ctrl==0 & X==0]))
# Ypo <- c(Yctrl, Ytrt)
# Upo <- rep(U,2)
# Xpo <- rep(x = c(0,1), each = n.sample)
# fit.full.po <- multinom(Ypo~ Xpo + Upo)
# fit.po <- multinom(Ypo~ Xpo)
fit <- multinom(Y~ X)
cis <- CalcCImultinom(fit)
ci1[j, ] <- cis[1, ]
ci2[j, ] <- cis[2, ]
Y1only <- Y[Y<2]
X1only <- X[Y<2]
U1only <-U[Y<2]
Y2only <- Y[Y!=1]
X2only <- X[Y!=1]
U2only <-U[Y!=1]
Y2only[Y2only>0] <- 1
vec.for.or.1only <- c(sum((1 - Y1only) * (1 - X1only)) , sum(Y1only * (1 - X1only)),
sum((1 - Y1only) * X1only), sum(Y1only*X1only))
vec.for.or.2only <- c(sum((1 - Y2only) * (1 - X2only)) , sum(Y2only * (1 - X2only)),
sum((1 - Y2only) * X2only), sum(Y2only*X2only))
ace.or1[j] <- CalcOR(vec.for.or.1only)
ace.or2[j] <- CalcOR(vec.for.or.2only)
Y1only.sace <- Y[Ytrt <2 & Yctrl < 2]
X1only.sace <- X[Ytrt <2 & Yctrl < 2]
U1only.sace <-U[Ytrt <2 & Yctrl < 2]
Y2only.sace <- Y[Ytrt!=1 & Y1ctrl!=1]
X2only.sace <- X[Ytrt!=1 & Y1ctrl!=1]
U2only.sace <-U[Ytrt!=1 & Y1ctrl!=1]
Y2only.sace[Y2only.sace>0] <- 1
vec.for.or.sace1 <- c(sum((1 - Y1only.sace) * (1 - X1only.sace)) , sum(Y1only.sace * (1 - X1only.sace)),
sum((1 - Y1only.sace) * X1only.sace), sum(Y1only.sace*X1only.sace))
vec.for.or.sace2 <- c(sum((1 - Y2only.sace) * (1 - X2only.sace)) , sum(Y2only.sace * (1 - X2only.sace)),
sum((1 - Y2only.sace) * X2only.sace), sum(Y2only.sace*X2only.sace))
sace.or1[j] <- CalcOR(vec.for.or.sace1)
sace.or2[j] <- CalcOR(vec.for.or.sace2)
Y1 <- Y==1
Y2 <- Y==2
fit.logistic.Y1 <- glm(Y1 ~ X, family = "binomial")
fit.logistic.true.Y1 <- glm(Y1 ~ X + U, family = "binomial")
fit.logistic.Y2 <- glm(Y2 ~ X, family = "binomial")
fit.logistic.true.Y2 <- glm(Y2 ~ X + U, family = "binomial")
or.approx1[j] <- exp(coef(fit.logistic.Y1)[2])
or.approx.true1[j] <- exp(coef(fit.logistic.true.Y1)[2])
or.approx2[j] <- exp(coef(fit.logistic.Y2)[2])
or.approx.true2[j] <- exp(coef(fit.logistic.true.Y2)[2])
}
save.image(paste0("CMPEn50krareScen4",patt,".RData"))
|
/Simulations/Scripts/R/Rare/Scenario 4/CMPEn50KrareScen4DB.R
|
no_license
|
yadevi/CausalMPE
|
R
| false
| false
| 4,215
|
r
|
rm(list = ls())
library(Daniel)
library(dplyr)
library(nnet)
CalcCImultinom <- function(fit)
{
s <- summary(fit)
coef <- s$coefficients
ses <- s$standard.errors
ci.1 <- coef[1,2] + c(-1, 1)*1.96*ses[1, 2]
ci.2 <- coef[2,2] + c(-1, 1)*1.96*ses[2, 2]
return(rbind(ci.1,ci.2))
}
#key
# A, B,C,D,E,F - betaE[2] = 1.25, 1.5, 1.75, 2, 2.25, 2.5
# A,B,C, D, E,F - betaU = 2,3,4,5,6,7
patt <- "DB"
beta0 <- c(-6, -5)
betaE <- c(log(1), log(2))
betaU <- c(log(3), log(2.5))
sigmaU <- 1
n.sample <- 50000
n.sim <- 1000
AllY <- matrix(nr = n.sim, nc = 3)
sace.diff1 <- sace.diff2 <- ace.diff1 <- ace.diff2 <-
sace.or1 <- sace.or2 <- ace.or1 <- ace.or2 <-
or.approx1 <- or.approx2 <- or.approx.true1 <- or.approx.true2 <-
pop.never.s1 <- pop.never.s2 <- vector(length = n.sim)
ci1 <- ci2 <- matrix(nr = n.sim, nc = 2)
for (j in 1:n.sim)
{
CatIndex(j)
# Simulate genetic score
U <- rnorm(n.sample, 0, sd = sigmaU)
#### Calcualte probabilites for each subtype with and without the exposure ####
e1E0 <- exp(beta0[1] + betaU[1]*U)
e1E1 <- exp(beta0[1] + betaE[1] + betaU[1]*U)
e2E0 <- exp(beta0[2] + betaU[2]*U)
e2E1 <- exp(beta0[2] + betaE[2] + betaU[2]*U)
prE0Y1 <- e1E0/(1 + e1E0 + e2E0)
prE0Y2 <- e2E0/(1 + e1E0 + e2E0)
prE1Y1 <- e1E1/(1 + e1E1 + e2E1)
prE1Y2 <- e2E1/(1 + e1E1 + e2E1)
probsE0 <- cbind(prE0Y1, prE0Y2, 1 - prE0Y1 - prE0Y2)
probsE1 <- cbind(prE1Y1, prE1Y2, 1 - prE1Y1 - prE1Y2)
# Simulate subtypes #
Yctrl <- Ytrt <- vector(length = n.sample)
X <- rbinom(n = n.sample, 1, 0.5)
for (i in 1:n.sample)
{
Yctrl[i] <- sample(c(1,2,0), 1, replace = T, prob = probsE0[i, ])
Ytrt[i] <- sample(c(1,2,0), 1, replace = T, prob = probsE1[i, ])
}
Y <- (1-X)*Yctrl + X*Ytrt
AllY[j, ] <- table(Y)
Y1ctrl <- Yctrl==1
Y1trt <- Ytrt==1
Y2ctrl <- Yctrl==2
Y2trt <- Ytrt==2
pop.never.s1[j] <- mean(Y1ctrl==0 & Y1trt==0)
pop.never.s2[j] <- mean(Y2ctrl==0 & Y2trt==0)
# estimate causal parameters
sace.diff1[j] <- mean((Y1trt - Y1ctrl)[Y2ctrl==0 & Y2trt==0])
sace.diff2[j]<- mean((Y2trt - Y2ctrl)[Y1ctrl==0 & Y1trt==0])
ace.diff1[j] <- mean((Y1trt[Y2trt==0 & X==1]) - mean(Y1ctrl[Y2ctrl==0 & X==0]))
ace.diff2[j] <- mean((Y2trt[Y1trt==0 & X==1]) - mean(Y2ctrl[Y1ctrl==0 & X==0]))
# Ypo <- c(Yctrl, Ytrt)
# Upo <- rep(U,2)
# Xpo <- rep(x = c(0,1), each = n.sample)
# fit.full.po <- multinom(Ypo~ Xpo + Upo)
# fit.po <- multinom(Ypo~ Xpo)
fit <- multinom(Y~ X)
cis <- CalcCImultinom(fit)
ci1[j, ] <- cis[1, ]
ci2[j, ] <- cis[2, ]
Y1only <- Y[Y<2]
X1only <- X[Y<2]
U1only <-U[Y<2]
Y2only <- Y[Y!=1]
X2only <- X[Y!=1]
U2only <-U[Y!=1]
Y2only[Y2only>0] <- 1
vec.for.or.1only <- c(sum((1 - Y1only) * (1 - X1only)) , sum(Y1only * (1 - X1only)),
sum((1 - Y1only) * X1only), sum(Y1only*X1only))
vec.for.or.2only <- c(sum((1 - Y2only) * (1 - X2only)) , sum(Y2only * (1 - X2only)),
sum((1 - Y2only) * X2only), sum(Y2only*X2only))
ace.or1[j] <- CalcOR(vec.for.or.1only)
ace.or2[j] <- CalcOR(vec.for.or.2only)
Y1only.sace <- Y[Ytrt <2 & Yctrl < 2]
X1only.sace <- X[Ytrt <2 & Yctrl < 2]
U1only.sace <-U[Ytrt <2 & Yctrl < 2]
Y2only.sace <- Y[Ytrt!=1 & Y1ctrl!=1]
X2only.sace <- X[Ytrt!=1 & Y1ctrl!=1]
U2only.sace <-U[Ytrt!=1 & Y1ctrl!=1]
Y2only.sace[Y2only.sace>0] <- 1
vec.for.or.sace1 <- c(sum((1 - Y1only.sace) * (1 - X1only.sace)) , sum(Y1only.sace * (1 - X1only.sace)),
sum((1 - Y1only.sace) * X1only.sace), sum(Y1only.sace*X1only.sace))
vec.for.or.sace2 <- c(sum((1 - Y2only.sace) * (1 - X2only.sace)) , sum(Y2only.sace * (1 - X2only.sace)),
sum((1 - Y2only.sace) * X2only.sace), sum(Y2only.sace*X2only.sace))
sace.or1[j] <- CalcOR(vec.for.or.sace1)
sace.or2[j] <- CalcOR(vec.for.or.sace2)
Y1 <- Y==1
Y2 <- Y==2
fit.logistic.Y1 <- glm(Y1 ~ X, family = "binomial")
fit.logistic.true.Y1 <- glm(Y1 ~ X + U, family = "binomial")
fit.logistic.Y2 <- glm(Y2 ~ X, family = "binomial")
fit.logistic.true.Y2 <- glm(Y2 ~ X + U, family = "binomial")
or.approx1[j] <- exp(coef(fit.logistic.Y1)[2])
or.approx.true1[j] <- exp(coef(fit.logistic.true.Y1)[2])
or.approx2[j] <- exp(coef(fit.logistic.Y2)[2])
or.approx.true2[j] <- exp(coef(fit.logistic.true.Y2)[2])
}
save.image(paste0("CMPEn50krareScen4",patt,".RData"))
|
Length<-20
bredth<-30
height<-40
area<-(Length*bredth)
print(area)
perimeter<-(Length+bredth+height)
print(perimeter)
|
/A2p4.r
|
no_license
|
AyushSinghdeo/DA-LAB
|
R
| false
| false
| 123
|
r
|
Length<-20
bredth<-30
height<-40
area<-(Length*bredth)
print(area)
perimeter<-(Length+bredth+height)
print(perimeter)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RHDFql.R
\docType{package}
\name{RHDFql-package}
\alias{RHDFql}
\alias{RHDFql-package}
\title{RHDFql: Interface to 'HDFql'}
\description{
A DBI-like interface to HDF files using HDFql.
}
\details{
TBD.
}
\author{
\strong{Maintainer}: Michael Koohafkan \email{michael.koohafkan@gmail.com}
}
|
/man/RHDFql-package.Rd
|
no_license
|
mkoohafkan/RHDFql
|
R
| false
| true
| 369
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RHDFql.R
\docType{package}
\name{RHDFql-package}
\alias{RHDFql}
\alias{RHDFql-package}
\title{RHDFql: Interface to 'HDFql'}
\description{
A DBI-like interface to HDF files using HDFql.
}
\details{
TBD.
}
\author{
\strong{Maintainer}: Michael Koohafkan \email{michael.koohafkan@gmail.com}
}
|
library(shiny)
library(r2d3)
ui <- fluidPage(
inputPanel(
sliderInput("bar_max", label = "Max:",
min = 0.1, max = 1.0, value = 0.2, step = 0.1)
),
d3Output("d3")
)
server <- function(input, output) {
output$d3 <- renderD3({
r2d3(
runif(5, 0, input$bar_max),
script = system.file("examples/baranims.js", package = "r2d3")
)
})
}
shinyApp(ui = ui, server = server)
|
/src/12. shiny.R
|
no_license
|
laurentpellet/meetup2019
|
R
| false
| false
| 437
|
r
|
library(shiny)
library(r2d3)
ui <- fluidPage(
inputPanel(
sliderInput("bar_max", label = "Max:",
min = 0.1, max = 1.0, value = 0.2, step = 0.1)
),
d3Output("d3")
)
server <- function(input, output) {
output$d3 <- renderD3({
r2d3(
runif(5, 0, input$bar_max),
script = system.file("examples/baranims.js", package = "r2d3")
)
})
}
shinyApp(ui = ui, server = server)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/unimodularity.R
\name{reduceMatrix}
\alias{reduceMatrix}
\title{Apply reduction method from Scholtus (2008)}
\usage{
reduceMatrix(A)
}
\arguments{
\item{A}{An object of class matrix in \eqn{\{-1,0,1\}^{m\times n}}.}
}
\value{
The reduction of A.
}
\description{
Apply the reduction method in the appendix of Scholtus (2008) to a matrix.
Let \eqn{A} with coefficients in \eqn{\{-1,0,1\}}. If, after a possible
permutation of columns it can be written
in the form \eqn{A=[B,C]} where each column in \eqn{B} has at most 1 nonzero
element, then \eqn{A} is totally unimodular if and only if \eqn{C} is totally
unimodular. By transposition, a similar theorem holds for the rows of A. This
function iteratively removes rows and columns with only 1 nonzero element
from \eqn{A} and returns the reduced result.
}
\references{
Scholtus S (2008). Algorithms for correcting some obvious
inconsistencies and rounding errors in business survey data. Technical
Report 08015, Netherlands.
}
\seealso{
\code{\link{is_totally_unimodular}}
}
\keyword{internal}
|
/man/reduceMatrix.Rd
|
no_license
|
cran/lintools
|
R
| false
| true
| 1,122
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/unimodularity.R
\name{reduceMatrix}
\alias{reduceMatrix}
\title{Apply reduction method from Scholtus (2008)}
\usage{
reduceMatrix(A)
}
\arguments{
\item{A}{An object of class matrix in \eqn{\{-1,0,1\}^{m\times n}}.}
}
\value{
The reduction of A.
}
\description{
Apply the reduction method in the appendix of Scholtus (2008) to a matrix.
Let \eqn{A} with coefficients in \eqn{\{-1,0,1\}}. If, after a possible
permutation of columns it can be written
in the form \eqn{A=[B,C]} where each column in \eqn{B} has at most 1 nonzero
element, then \eqn{A} is totally unimodular if and only if \eqn{C} is totally
unimodular. By transposition, a similar theorem holds for the rows of A. This
function iteratively removes rows and columns with only 1 nonzero element
from \eqn{A} and returns the reduced result.
}
\references{
Scholtus S (2008). Algorithms for correcting some obvious
inconsistencies and rounding errors in business survey data. Technical
Report 08015, Netherlands.
}
\seealso{
\code{\link{is_totally_unimodular}}
}
\keyword{internal}
|
setwd("/project/home17/whb17/Documents/project3/project_files/preprocessing/ex_4/")
inp.prot_data <- read.csv("../../data/protein_data.csv")
inp.inf_id <- read.csv("../../data/id_info.csv")
#Remove weird superfluous columns
inp.prot_data <- inp.prot_data[,-c(25,26)]
#Separate off info columns
Pt <- inp.prot_data$Pt
inp.prot_data.body <- inp.prot_data[,-c(1,2)]
library("stringr")
# Remove uncategorised patients and those with missing labels
for (i in rev(1:nrow(inp.inf_id))){
if (sum(is.na(inp.inf_id[i,])) > 0){
inp.inf_id <- inp.inf_id[-i,]
} else if (inp.inf_id$inf.status[i] == "Unassigned"){
inp.inf_id <- inp.inf_id[-i,]
} else if (inp.inf_id$inf.status[i] == "Excluded"){
inp.inf_id <- inp.inf_id[-i,]
} else if (inp.inf_id$array.id[i] == "no"){
inp.inf_id <- inp.inf_id[-i,]
}
}
# Get relevent indices
ind.inf_id <- c()
ind.prot_data <- c()
for (i in 1:nrow(inp.inf_id)){
for (j in 1:length(Pt)){
if (str_detect(toString(Pt[j]),toString(inp.inf_id$prot.id[i]))){
ind.inf_id <- c(ind.inf_id, i)
ind.prot_data <- c(ind.prot_data, j)
}
}
}
#Get rows selected by indices
df.sel.inf_id <- inp.inf_id[ind.inf_id,]
df.sel.prot_data <-inp.prot_data.body[ind.prot_data,]
#Set up individual columns for disease classification
hiv.status <- c()
tb.status <- c()
group <- c()
nulabel <- c()
# Modify infection status to make easier to categorise
inf.status <- df.sel.inf_id$inf.status
inf.status <- as.character(inf.status)
inf.status[inf.status == "TB_HIV-"] <- "TB+/HIV-"
inf.status[inf.status == "S_TB_HIV-"] <- "TB+/HIV-"
inf.status[inf.status == "S_TB_HIV+"] <- "TB+/HIV+"
inf.status[inf.status == "TB_HIV+"] <- "TB+/HIV+"
inf.status[inf.status == "LTBI_HIV-"] <- "LTBI/HIV-"
inf.status[inf.status == "LTBI_long_term_HIV-"] <- "LTBI/HIV-"
inf.status[inf.status == "LTBI_HIV+"] <- "LTBI/HIV+"
inf.status[inf.status == "HIV+/Inf Not TB"] <- "OD/HIV+"
inf.status[inf.status == "Sick_control_HIV+"] <- "OD/HIV+"
inf.status[inf.status == "Excl_well_LTBI-_HIV+"] <- "OD/HIV+"
inf.status[inf.status == "HIV-/Inf Not TB"] <- "OD/HIV-"
inf.status[inf.status == "Excl_long_term_HIV-"] <- "OD/HIV-"
inf.status[inf.status == "Sick_control_HIV-"] <- "OD/HIV-"
inf.status[inf.status == "HIV-/Inf Not TB"] <- "OD/HIV-"
inf.status[inf.status == "Excl_long_term_HIV-"] <- "OD/HIV-"
inf.status[inf.status == "Excl_well_LTBI-_HIV-"] <- "OD/HIV-"
# Populate HIV column
for (i in 1:nrow(df.sel.inf_id)){
if (str_detect(toString(inf.status[i]), "HIV-")){
hiv.status <- c(hiv.status, "HIV-")
} else {
hiv.status <- c(hiv.status, "HIV+")
}
}
# Populate TB column
for (i in 1:nrow(df.sel.inf_id)){
if (str_detect(toString(inf.status[i]), "OD")){
tb.status <- c(tb.status, "OD")
} else if (str_detect(toString(df.sel.inf_id$inf.status[i]), "LTBI")){
tb.status <- c(tb.status, "LTBI")
} else {
tb.status <- c(tb.status, "TB")
}
}
# Populate group column
for (i in 1:length(hiv.status)){
if (hiv.status[i] == "HIV-"){
if (tb.status[i] == "TB"){
group <- c(group, 1)
} else if (tb.status[i] == "LTBI"){
group <- c(group, 3)
} else if (tb.status[i] == "OD"){
group <- c(group, 6)
}
} else if (hiv.status[i] == "HIV+"){
if (tb.status[i] == "TB"){
group <- c(group, 2)
} else if (tb.status[i] == "LTBI"){
group <- c(group, 4)
} else if (tb.status[i] == "OD"){
group <- c(group, 5)
}
}
}
# Create new patient labels
for (i in 1:nrow(df.sel.inf_id)){
label <- paste(group[i], "_", df.sel.inf_id$site[i], "_", df.sel.inf_id$prot.id[i] ,sep="")
nulabel <- c(nulabel, label)
}
df.prot_data.ex <- cbind(data.frame(row.names=nulabel, df.sel.inf_id$prot.id, df.sel.inf_id$array.id, hiv.status, tb.status, group, df.sel.inf_id$site, df.sel.inf_id$sex), df.sel.prot_data)
colnames(df.prot_data.ex)[1] <- "prot.id"
colnames(df.prot_data.ex)[2] <- "array.id"
colnames(df.prot_data.ex)[6] <- "site"
colnames(df.prot_data.ex)[7] <- "sex"
#Remove Malawian patients (For some reason, none in this set anyway)
for (i in rev(1:nrow(df.prot_data.ex))){
if (df.prot_data.ex$site[i] == "ML"){
df.prot_data.ex <- df.prot_data.ex[-c(i),]
}
}
#sum(
#length(df.prot_data.ex$group[df.prot_data.ex$group==1]) * 0.3
#,
#length(df.prot_data.ex$group[df.prot_data.ex$group==2]) * 0.3
#,
#length(df.prot_data.ex$group[df.prot_data.ex$group==3]) * 0.3
#,
#length(df.prot_data.ex$group[df.prot_data.ex$group==4]) * 0.3
#,
#length(df.prot_data.ex$group[df.prot_data.ex$group==5]) * 0.3
#,
#length(df.prot_data.ex$group[df.prot_data.ex$group==6]) * 0.3
#)
# Write to .csv file
write.csv(df.prot_data.ex[,-c(1:7)],"../../data/ex_4/prot_data_body.csv",row.names=TRUE)
write.csv(df.prot_data.ex[,c(1:7)],"../../data/ex_4/prot_data_meta.csv",row.names=TRUE)
|
/preprocessing/ex_4/prot_choice4.R
|
no_license
|
whtbowers/multiomics
|
R
| false
| false
| 4,784
|
r
|
setwd("/project/home17/whb17/Documents/project3/project_files/preprocessing/ex_4/")
inp.prot_data <- read.csv("../../data/protein_data.csv")
inp.inf_id <- read.csv("../../data/id_info.csv")
#Remove weird superfluous columns
inp.prot_data <- inp.prot_data[,-c(25,26)]
#Separate off info columns
Pt <- inp.prot_data$Pt
inp.prot_data.body <- inp.prot_data[,-c(1,2)]
library("stringr")
# Remove uncategorised patients and those with missing labels
for (i in rev(1:nrow(inp.inf_id))){
if (sum(is.na(inp.inf_id[i,])) > 0){
inp.inf_id <- inp.inf_id[-i,]
} else if (inp.inf_id$inf.status[i] == "Unassigned"){
inp.inf_id <- inp.inf_id[-i,]
} else if (inp.inf_id$inf.status[i] == "Excluded"){
inp.inf_id <- inp.inf_id[-i,]
} else if (inp.inf_id$array.id[i] == "no"){
inp.inf_id <- inp.inf_id[-i,]
}
}
# Get relevent indices
ind.inf_id <- c()
ind.prot_data <- c()
for (i in 1:nrow(inp.inf_id)){
for (j in 1:length(Pt)){
if (str_detect(toString(Pt[j]),toString(inp.inf_id$prot.id[i]))){
ind.inf_id <- c(ind.inf_id, i)
ind.prot_data <- c(ind.prot_data, j)
}
}
}
#Get rows selected by indices
df.sel.inf_id <- inp.inf_id[ind.inf_id,]
df.sel.prot_data <-inp.prot_data.body[ind.prot_data,]
#Set up individual columns for disease classification
hiv.status <- c()
tb.status <- c()
group <- c()
nulabel <- c()
# Modify infection status to make easier to categorise
inf.status <- df.sel.inf_id$inf.status
inf.status <- as.character(inf.status)
inf.status[inf.status == "TB_HIV-"] <- "TB+/HIV-"
inf.status[inf.status == "S_TB_HIV-"] <- "TB+/HIV-"
inf.status[inf.status == "S_TB_HIV+"] <- "TB+/HIV+"
inf.status[inf.status == "TB_HIV+"] <- "TB+/HIV+"
inf.status[inf.status == "LTBI_HIV-"] <- "LTBI/HIV-"
inf.status[inf.status == "LTBI_long_term_HIV-"] <- "LTBI/HIV-"
inf.status[inf.status == "LTBI_HIV+"] <- "LTBI/HIV+"
inf.status[inf.status == "HIV+/Inf Not TB"] <- "OD/HIV+"
inf.status[inf.status == "Sick_control_HIV+"] <- "OD/HIV+"
inf.status[inf.status == "Excl_well_LTBI-_HIV+"] <- "OD/HIV+"
inf.status[inf.status == "HIV-/Inf Not TB"] <- "OD/HIV-"
inf.status[inf.status == "Excl_long_term_HIV-"] <- "OD/HIV-"
inf.status[inf.status == "Sick_control_HIV-"] <- "OD/HIV-"
inf.status[inf.status == "HIV-/Inf Not TB"] <- "OD/HIV-"
inf.status[inf.status == "Excl_long_term_HIV-"] <- "OD/HIV-"
inf.status[inf.status == "Excl_well_LTBI-_HIV-"] <- "OD/HIV-"
# Populate HIV column
for (i in 1:nrow(df.sel.inf_id)){
if (str_detect(toString(inf.status[i]), "HIV-")){
hiv.status <- c(hiv.status, "HIV-")
} else {
hiv.status <- c(hiv.status, "HIV+")
}
}
# Populate TB column
for (i in 1:nrow(df.sel.inf_id)){
if (str_detect(toString(inf.status[i]), "OD")){
tb.status <- c(tb.status, "OD")
} else if (str_detect(toString(df.sel.inf_id$inf.status[i]), "LTBI")){
tb.status <- c(tb.status, "LTBI")
} else {
tb.status <- c(tb.status, "TB")
}
}
# Populate group column
for (i in 1:length(hiv.status)){
if (hiv.status[i] == "HIV-"){
if (tb.status[i] == "TB"){
group <- c(group, 1)
} else if (tb.status[i] == "LTBI"){
group <- c(group, 3)
} else if (tb.status[i] == "OD"){
group <- c(group, 6)
}
} else if (hiv.status[i] == "HIV+"){
if (tb.status[i] == "TB"){
group <- c(group, 2)
} else if (tb.status[i] == "LTBI"){
group <- c(group, 4)
} else if (tb.status[i] == "OD"){
group <- c(group, 5)
}
}
}
# Create new patient labels
for (i in 1:nrow(df.sel.inf_id)){
label <- paste(group[i], "_", df.sel.inf_id$site[i], "_", df.sel.inf_id$prot.id[i] ,sep="")
nulabel <- c(nulabel, label)
}
df.prot_data.ex <- cbind(data.frame(row.names=nulabel, df.sel.inf_id$prot.id, df.sel.inf_id$array.id, hiv.status, tb.status, group, df.sel.inf_id$site, df.sel.inf_id$sex), df.sel.prot_data)
colnames(df.prot_data.ex)[1] <- "prot.id"
colnames(df.prot_data.ex)[2] <- "array.id"
colnames(df.prot_data.ex)[6] <- "site"
colnames(df.prot_data.ex)[7] <- "sex"
#Remove Malawian patients (For some reason, none in this set anyway)
for (i in rev(1:nrow(df.prot_data.ex))){
if (df.prot_data.ex$site[i] == "ML"){
df.prot_data.ex <- df.prot_data.ex[-c(i),]
}
}
#sum(
#length(df.prot_data.ex$group[df.prot_data.ex$group==1]) * 0.3
#,
#length(df.prot_data.ex$group[df.prot_data.ex$group==2]) * 0.3
#,
#length(df.prot_data.ex$group[df.prot_data.ex$group==3]) * 0.3
#,
#length(df.prot_data.ex$group[df.prot_data.ex$group==4]) * 0.3
#,
#length(df.prot_data.ex$group[df.prot_data.ex$group==5]) * 0.3
#,
#length(df.prot_data.ex$group[df.prot_data.ex$group==6]) * 0.3
#)
# Write to .csv file
write.csv(df.prot_data.ex[,-c(1:7)],"../../data/ex_4/prot_data_body.csv",row.names=TRUE)
write.csv(df.prot_data.ex[,c(1:7)],"../../data/ex_4/prot_data_meta.csv",row.names=TRUE)
|
## Put comments here that give an overall description of what your
## functions do
# Both these functions used in conjunction with each other allow for storage of the inverse
# of a matrix so as to prevent recaclulating over and over again.
## Write a short comment describing this function
## makeCacheMatrix is analogous to a class in an object oriented programming language like java.
# It takes in a matrix and stores that matrix in a setMatrix attribute which is also a function.
# It also sets the attribute inverse to null and stores the inverse when setInverse is called.
# It returns a list of functions with stored variables.
makeCacheMatrix <- function(x = matrix()) {
inverse <- NULL
setMatrix <- function(z) {
x <<- z
inverse <<- NULL
}
getMatrix <- function() x
setInverse <- function(inverseMat) {
inverse <<- inverseMat
}
getInverse <- function() inverse
list(setMatrix = setMatrix, getMatrix = getMatrix, getInverse = getInverse, setInverse = setInverse)
}
## Write a short comment describing this function
# cacheSolve takes in a special cacheMatrix "object" (list) that was made using the previous function
# It gets the inverse. if the inverse is already stored, then it uses that inverse, otherwise it
# calculates the inverse and sets the inverse using the function setInverse.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getInverse()
if (!is.null(inv)) {
message("getting cached data");
return(inv);
}
inv <- x$setInverse(solve(x$getMatrix()), ...);
inv
}
|
/cachematrix.R
|
no_license
|
sbhave77/ProgrammingAssignment2
|
R
| false
| false
| 1,699
|
r
|
## Put comments here that give an overall description of what your
## functions do
# Both these functions used in conjunction with each other allow for storage of the inverse
# of a matrix so as to prevent recaclulating over and over again.
## Write a short comment describing this function
## makeCacheMatrix is analogous to a class in an object oriented programming language like java.
# It takes in a matrix and stores that matrix in a setMatrix attribute which is also a function.
# It also sets the attribute inverse to null and stores the inverse when setInverse is called.
# It returns a list of functions with stored variables.
makeCacheMatrix <- function(x = matrix()) {
inverse <- NULL
setMatrix <- function(z) {
x <<- z
inverse <<- NULL
}
getMatrix <- function() x
setInverse <- function(inverseMat) {
inverse <<- inverseMat
}
getInverse <- function() inverse
list(setMatrix = setMatrix, getMatrix = getMatrix, getInverse = getInverse, setInverse = setInverse)
}
## Write a short comment describing this function
# cacheSolve takes in a special cacheMatrix "object" (list) that was made using the previous function
# It gets the inverse. if the inverse is already stored, then it uses that inverse, otherwise it
# calculates the inverse and sets the inverse using the function setInverse.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getInverse()
if (!is.null(inv)) {
message("getting cached data");
return(inv);
}
inv <- x$setInverse(solve(x$getMatrix()), ...);
inv
}
|
"Groups modules by ZSummary and extracts the gene names"
Data = read.csv('./Data/Metrics LUSC_Tumor.csv')
#Filter Preserved
Filter = Data[Data[, 10] > 10, ]
modules = Filter$module
modules = as.character(modules)
dir.create('./Enrichment/Preserved')
for (i in modules) {
file = list.files('./Data/Modules/LUSC_Tumor')
pos = grepl(sprintf(' %s ', i), file)
file = file[pos]
file = paste('./Data/Modules/LUSC_Tumor', file, sep = '/')
load(file)
geneNames = rownames(adjacencyModule)
geneNames = as.character(geneNames)
geneNames = sapply(geneNames, function (x) strsplit(x, split = '\\.')[[1]][1])
geneNames = unname(geneNames)
write.csv(geneNames, sprintf('./Enrichment/Preserved/%s.csv', i))
}
|
/Scripts/Enrichment/ExtractGenesForEnrichment.R
|
no_license
|
StefanKanan/Analysing-Gene-Networks
|
R
| false
| false
| 755
|
r
|
"Groups modules by ZSummary and extracts the gene names"
Data = read.csv('./Data/Metrics LUSC_Tumor.csv')
#Filter Preserved
Filter = Data[Data[, 10] > 10, ]
modules = Filter$module
modules = as.character(modules)
dir.create('./Enrichment/Preserved')
for (i in modules) {
file = list.files('./Data/Modules/LUSC_Tumor')
pos = grepl(sprintf(' %s ', i), file)
file = file[pos]
file = paste('./Data/Modules/LUSC_Tumor', file, sep = '/')
load(file)
geneNames = rownames(adjacencyModule)
geneNames = as.character(geneNames)
geneNames = sapply(geneNames, function (x) strsplit(x, split = '\\.')[[1]][1])
geneNames = unname(geneNames)
write.csv(geneNames, sprintf('./Enrichment/Preserved/%s.csv', i))
}
|
test_that("result of function", {
expect_equal(class(votes_get_clubs_links("http://www.sejm.gov.pl/Sejm7.nsf/",
"http://www.sejm.gov.pl/Sejm7.nsf/agent.xsp?symbol=glosowania&NrKadencji=7&NrPosiedzenia=1&NrGlosowania=1")),
"data.frame")
})
test_that("columns of table", {
expect_equal(ncol(votes_get_clubs_links("http://www.sejm.gov.pl/Sejm7.nsf/",
"http://www.sejm.gov.pl/Sejm7.nsf/agent.xsp?symbol=glosowania&NrKadencji=7&NrPosiedzenia=1&NrGlosowania=1")), 2)
})
test_that("rows of table", {
expect_more_than(nrow(votes_get_clubs_links("http://www.sejm.gov.pl/Sejm7.nsf/",
"http://www.sejm.gov.pl/Sejm7.nsf/agent.xsp?symbol=glosowania&NrKadencji=7&NrPosiedzenia=1&NrGlosowania=1")), 0)
})
|
/sejmRP/tests/testthat/test_vottes_get_clubs_links.R
|
no_license
|
PaulinaKostrzewa/sejmRP
|
R
| false
| false
| 714
|
r
|
test_that("result of function", {
expect_equal(class(votes_get_clubs_links("http://www.sejm.gov.pl/Sejm7.nsf/",
"http://www.sejm.gov.pl/Sejm7.nsf/agent.xsp?symbol=glosowania&NrKadencji=7&NrPosiedzenia=1&NrGlosowania=1")),
"data.frame")
})
test_that("columns of table", {
expect_equal(ncol(votes_get_clubs_links("http://www.sejm.gov.pl/Sejm7.nsf/",
"http://www.sejm.gov.pl/Sejm7.nsf/agent.xsp?symbol=glosowania&NrKadencji=7&NrPosiedzenia=1&NrGlosowania=1")), 2)
})
test_that("rows of table", {
expect_more_than(nrow(votes_get_clubs_links("http://www.sejm.gov.pl/Sejm7.nsf/",
"http://www.sejm.gov.pl/Sejm7.nsf/agent.xsp?symbol=glosowania&NrKadencji=7&NrPosiedzenia=1&NrGlosowania=1")), 0)
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/elementRetrieval.R, R/elementRetrievalDoc.R
\name{findElementFromElement}
\alias{findElementFromElement}
\title{Search for an element on the page, starting from another element}
\usage{
findElementFromElement(webElem, using = c("xpath", "css selector", "id",
"name", "tag name", "class name", "link text", "partial link text"), value,
...)
}
\arguments{
\item{webElem}{An object of class "wElement". A web Element object see
\code{\link{wbElement}}.}
\item{using}{Locator scheme to use to search the element, available
schemes: {"class name", "css selector", "id", "name", "link text",
"partial link text", "tag name", "xpath" }. Defaults to 'xpath'.
Partial string matching is accepted.}
\item{value}{The search target. See examples.}
\item{...}{Additonal function arguments - Currently passes the
\code{\link{retry}} argument.}
}
\value{
invisible(wbElement(res$value, webElem$remDr)): An object of
class "wElement" is invisibly returned. A webElement object see
\code{\link{wbElement}}. This allows for chaining from this function
to other functions that take such an object as an argument. See
examples for further details.
}
\description{
\code{findElementFromElement} Search for an element on the page,
starting from the node defined by the parent webElement. The located
element will be returned as an object of wElement class.
}
\details{
Details of possible locator schemes
\describe{
\item{"class name" :}{Returns an element whose class name contains
the search value; compound class names are not permitted.}
\item{"css selector" :}{Returns an element matching a CSS selector.}
\item{"id" :}{Returns an element whose ID attribute matches the
search value.}
\item{"name" :}{Returns an element whose NAME attribute matches
the search value.}
\item{"link text" :}{Returns an anchor element whose visible text
matches the search value.}
\item{"partial link text" :}{Returns an anchor element whose
visible text partially matches the search value.}
\item{"tag name" :}{Returns an element whose tag name matches the
search value.}
\item{"xpath" :}{Returns an element matching an XPath expression.}
}
}
\examples{
\dontrun{
remDr <- remoteDr()
remDr \%>\% go("http://www.google.com/ncr")
# find the search form query box and search for "R project"
webElem <- remDr \%>\% findElement("name", "q") \%>\%
elementSendKeys("R project", key = "enter")
# click the first link hopefully should be www.r-project.org
remDr \%>\% findElement("css", "h3.r a") \%>\% elementClick
# get the navigation div
navElem <- remDr \%>\% findElement("css", "div[role='navigation']")
# find all the links in this div
navLinks <- navElem \%>\% findElementsFromElement("css", "a")
# check the links
nLinks <- sapply(navLinks, function(x) x \%>\% getElementText)
# compare with all links
allLinks <- remDr \%>\% findElements("css", "a")
aLinks <- sapply(allLinks, function(x) x \%>\% getElementText)
# show the effect of searching for elements from element
aLinks \%in\% nLinks
remDr \%>\% deleteSession
}
}
\seealso{
Other elementRetrieval functions: \code{\link{findElementsFromElement}},
\code{\link{findElements}}, \code{\link{findElement}},
\code{\link{getActiveElement}}
}
|
/man/findElementFromElement.Rd
|
no_license
|
johndharrison/seleniumPipes
|
R
| false
| true
| 3,400
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/elementRetrieval.R, R/elementRetrievalDoc.R
\name{findElementFromElement}
\alias{findElementFromElement}
\title{Search for an element on the page, starting from another element}
\usage{
findElementFromElement(webElem, using = c("xpath", "css selector", "id",
"name", "tag name", "class name", "link text", "partial link text"), value,
...)
}
\arguments{
\item{webElem}{An object of class "wElement". A web Element object see
\code{\link{wbElement}}.}
\item{using}{Locator scheme to use to search the element, available
schemes: {"class name", "css selector", "id", "name", "link text",
"partial link text", "tag name", "xpath" }. Defaults to 'xpath'.
Partial string matching is accepted.}
\item{value}{The search target. See examples.}
\item{...}{Additonal function arguments - Currently passes the
\code{\link{retry}} argument.}
}
\value{
invisible(wbElement(res$value, webElem$remDr)): An object of
class "wElement" is invisibly returned. A webElement object see
\code{\link{wbElement}}. This allows for chaining from this function
to other functions that take such an object as an argument. See
examples for further details.
}
\description{
\code{findElementFromElement} Search for an element on the page,
starting from the node defined by the parent webElement. The located
element will be returned as an object of wElement class.
}
\details{
Details of possible locator schemes
\describe{
\item{"class name" :}{Returns an element whose class name contains
the search value; compound class names are not permitted.}
\item{"css selector" :}{Returns an element matching a CSS selector.}
\item{"id" :}{Returns an element whose ID attribute matches the
search value.}
\item{"name" :}{Returns an element whose NAME attribute matches
the search value.}
\item{"link text" :}{Returns an anchor element whose visible text
matches the search value.}
\item{"partial link text" :}{Returns an anchor element whose
visible text partially matches the search value.}
\item{"tag name" :}{Returns an element whose tag name matches the
search value.}
\item{"xpath" :}{Returns an element matching an XPath expression.}
}
}
\examples{
\dontrun{
remDr <- remoteDr()
remDr \%>\% go("http://www.google.com/ncr")
# find the search form query box and search for "R project"
webElem <- remDr \%>\% findElement("name", "q") \%>\%
elementSendKeys("R project", key = "enter")
# click the first link hopefully should be www.r-project.org
remDr \%>\% findElement("css", "h3.r a") \%>\% elementClick
# get the navigation div
navElem <- remDr \%>\% findElement("css", "div[role='navigation']")
# find all the links in this div
navLinks <- navElem \%>\% findElementsFromElement("css", "a")
# check the links
nLinks <- sapply(navLinks, function(x) x \%>\% getElementText)
# compare with all links
allLinks <- remDr \%>\% findElements("css", "a")
aLinks <- sapply(allLinks, function(x) x \%>\% getElementText)
# show the effect of searching for elements from element
aLinks \%in\% nLinks
remDr \%>\% deleteSession
}
}
\seealso{
Other elementRetrieval functions: \code{\link{findElementsFromElement}},
\code{\link{findElements}}, \code{\link{findElement}},
\code{\link{getActiveElement}}
}
|
# integrate.R
# 7-26-2019
# Integrates single cell genomics datasets using Seurat methods
# Currently designed to use seurat's reference based CCA method
# for integration. The script expects all datasets to be merged
# into 1 seurat object
# usage: Rscript integrate.r -s "/data/seurat_object.rds" -g "individual" -r "Ind4"
# Rscript integrate.r --seuratObject" "../data/BreastAtlas.rds" \
# --groupLabel "sample_origin" \
# --reference "Ind4" \
# --out "../data/BreastCancerAtlas.integrated.rds"
library(Seurat)
library(ggplot2)
options(future.globals.maxSize = 4000 * 1024^2)
# parse arguments
suppressPackageStartupMessages(require(optparse))
option_list = list(
make_option(c("-s", "--seuratObject"), action="store", default=NA, type="character"),
make_option(c("-g", "--groupLabel"), action="store", default=NA, type="character"),
make_option(c("-r", "--reference"), action="store", default=NA, type="character"),
make_option(c("-o", "--out"), action="store", default=NA, type="character")
)
opt = parse_args(OptionParser(option_list=option_list))
main <- function(seuratObject, groupLabel, reference, out) {
cat("\nSeurat object: "); cat(seuratObject); cat("\n")
cat("Group label: "); cat(groupLabel); cat("\n")
cat("Reference(s): "); for (i in reference){ cat(i); cat(" ") }; cat("\n")
cat("Output path: "); cat(out); cat("\n\n")
cat("Reading seurat object\n")
sobj <- readRDS(seuratObject)
cat("Splitting seurat object by group label\n")
sobj.list <- SplitObject(sobj, split.by = groupLabel)
cat("Datasets:");cat(names(sobj.list)); cat("\n")
cat("Normalizing each dataset\n")
for (i in names(sobj.list)) {
cat("\t"); cat(i); cat("\n")
sobj.list[[i]] <- SCTransform(sobj.list[[i]], verbose = FALSE)
}
cat("Preparing for integration\n")
sobj.features <- SelectIntegrationFeatures(object.list = sobj.list, nfeatures = 3000)
sobj.list <- PrepSCTIntegration(object.list = sobj.list, anchor.features = sobj.features)
cat("Setting reference dataset\n")
reference <- strsplit(reference, split=",")[[1]]
reference_dataset <- which(names(sobj.list) == reference)
cat("Finding integration anchors\n")
sobj.anchors <- FindIntegrationAnchors(object.list = sobj.list, normalization.method = "SCT",
anchor.features = sobj.features, reference = reference_dataset)
cat("Integrating dataset\n")
sobj.integrated <- IntegrateData(anchorset = sobj.anchors, normalization.method = "SCT")
cat("Running Dimension reduction\n")
sobj.integrated <- RunPCA(object = sobj.integrated, verbose = TRUE)
sobj.integrated <- RunUMAP(object = sobj.integrated, dims = 1:30)
cat("Saving integrated object")
saveRDS(sobj.integrated, file=out)
}
# argument error handling
if (is.na(opt$seuratObject)) {
cat("Input error: please provide path for a seurat object \n")
} else if (is.na(opt$groupLabel)) {
cat("Input error: please provide a group label \n")
} else if (is.na(opt$reference)) {
cat("Input error: please provide the name of your reference dataset \n")
} else if (is.na(opt$out)) {
cat("Input error: please provide the output path \n")
} else {
# split up references
reference <- strsplit(opt$reference, split=",")[[1]]
main(opt$seuratObject, opt$groupLabel, reference, opt$out)
}
|
/integrate.R
|
no_license
|
jasenjackson/singlecell
|
R
| false
| false
| 3,376
|
r
|
# integrate.R
# 7-26-2019
# Integrates single cell genomics datasets using Seurat methods
# Currently designed to use seurat's reference based CCA method
# for integration. The script expects all datasets to be merged
# into 1 seurat object
# usage: Rscript integrate.r -s "/data/seurat_object.rds" -g "individual" -r "Ind4"
# Rscript integrate.r --seuratObject" "../data/BreastAtlas.rds" \
# --groupLabel "sample_origin" \
# --reference "Ind4" \
# --out "../data/BreastCancerAtlas.integrated.rds"
library(Seurat)
library(ggplot2)
options(future.globals.maxSize = 4000 * 1024^2)
# parse arguments
suppressPackageStartupMessages(require(optparse))
option_list = list(
make_option(c("-s", "--seuratObject"), action="store", default=NA, type="character"),
make_option(c("-g", "--groupLabel"), action="store", default=NA, type="character"),
make_option(c("-r", "--reference"), action="store", default=NA, type="character"),
make_option(c("-o", "--out"), action="store", default=NA, type="character")
)
opt = parse_args(OptionParser(option_list=option_list))
main <- function(seuratObject, groupLabel, reference, out) {
cat("\nSeurat object: "); cat(seuratObject); cat("\n")
cat("Group label: "); cat(groupLabel); cat("\n")
cat("Reference(s): "); for (i in reference){ cat(i); cat(" ") }; cat("\n")
cat("Output path: "); cat(out); cat("\n\n")
cat("Reading seurat object\n")
sobj <- readRDS(seuratObject)
cat("Splitting seurat object by group label\n")
sobj.list <- SplitObject(sobj, split.by = groupLabel)
cat("Datasets:");cat(names(sobj.list)); cat("\n")
cat("Normalizing each dataset\n")
for (i in names(sobj.list)) {
cat("\t"); cat(i); cat("\n")
sobj.list[[i]] <- SCTransform(sobj.list[[i]], verbose = FALSE)
}
cat("Preparing for integration\n")
sobj.features <- SelectIntegrationFeatures(object.list = sobj.list, nfeatures = 3000)
sobj.list <- PrepSCTIntegration(object.list = sobj.list, anchor.features = sobj.features)
cat("Setting reference dataset\n")
reference <- strsplit(reference, split=",")[[1]]
reference_dataset <- which(names(sobj.list) == reference)
cat("Finding integration anchors\n")
sobj.anchors <- FindIntegrationAnchors(object.list = sobj.list, normalization.method = "SCT",
anchor.features = sobj.features, reference = reference_dataset)
cat("Integrating dataset\n")
sobj.integrated <- IntegrateData(anchorset = sobj.anchors, normalization.method = "SCT")
cat("Running Dimension reduction\n")
sobj.integrated <- RunPCA(object = sobj.integrated, verbose = TRUE)
sobj.integrated <- RunUMAP(object = sobj.integrated, dims = 1:30)
cat("Saving integrated object")
saveRDS(sobj.integrated, file=out)
}
# argument error handling
if (is.na(opt$seuratObject)) {
cat("Input error: please provide path for a seurat object \n")
} else if (is.na(opt$groupLabel)) {
cat("Input error: please provide a group label \n")
} else if (is.na(opt$reference)) {
cat("Input error: please provide the name of your reference dataset \n")
} else if (is.na(opt$out)) {
cat("Input error: please provide the output path \n")
} else {
# split up references
reference <- strsplit(opt$reference, split=",")[[1]]
main(opt$seuratObject, opt$groupLabel, reference, opt$out)
}
|
#########################
## bring fishbase data ##
#########################
library(XML)
library(Hmisc)
theurl <- "http://www.fishbase.de/Topic/List.php?group=29"
pagetree <- htmlTreeParse(theurl, error=function(...){})
urltable <- pagetree$children$html$children$body$children$table
urls <- vector(mode='character')
families <- vector(mode='character')
species <- vector(mode='character')
for(i in 1:length(urltable$children[[2]])) {
urls[i] <- unlist(urltable$children[[2]][[i]])[7]
families[i] <- unlist(urltable$children[[2]][[i]][[5]])[5]
species[i] <- unlist(urltable$children[[2]][[i]])[9]
}
urls <- sub("..",'http://www.fishbase.de',urls)
URLREF <- vector(mode='character')
for(i in 1:length(urls)) {
newurl <- urls[i]
sptree <- htmlTreeParse(newurl, error=function(...){})
sptable <- sptree$children$html$children$body$children$table
URL <- vector(mode='character')
for(a in 1:length(sptable$children[[2]])) {
if(length(unlist(sptable$children[[2]][[a]])[6]) > 0) {
URL[a] <- unlist(sptable$children[[2]][[a]])[6]
}
}
URLREF <- c(URLREF, URL)
}
URLREF <- unlist(lapply(URLREF, function(x){paste("http://www.fishbase.de", x, sep="")}))
fishbase.rate <- data.frame(family='',species='',rate=NA,rate20=NA,weight=NA,temp=NA,salinity=NA,activity='',stress='',stringsAsFactors = FALSE)[-1,]
for(i in 1:length(urls)) {
theurl <- urls[i]
pagetree <- htmlTreeParse(theurl, error=function(...){})
urltable <- pagetree$children$html$children$body$children$table
dog <- data.frame(family='',species='',rate=NA,rate20=NA,weight=NA,temp=NA,salinity=NA,activity='',stress='',
stringsAsFactors = FALSE)[rep(1,length(urltable$children[[2]])),]
for(j in 1:length(urltable$children[[2]])) {
dog$family[j] <- families[i]
dog$species[j] <- species[i]
if(length(urltable$children[[2]][[j]][[1]])>0) {
dog[j,3] <- as.numeric(sub(',','',unlist(urltable$children[[2]][[j]][[1]][[1]])[4]))}
for(k in 2:5) {
if(length(urltable$children[[2]][[j]][[k]])>0) {
dog[j,k+2] <- as.numeric(sub(',','',unlist(urltable$children[[2]][[j]][[k]][[1]])[2]))}}
if(length(urltable$children[[2]][[j]][[6]])>0) {
dog[j,8] <- unlist(urltable$children[[2]][[j]][[6]][[1]])[2]}
if(length(urltable$children[[2]][[j]][[7]])>0) {
dog[j,9] <- unlist(urltable$children[[2]][[j]][[7]][[1]])[2]}
}
fishbase.rate <- rbind(fishbase.rate, dog)
}
#################################
## get traits for each species ##
#################################
spp.met <- sort(unique(fishbase.rate$species))
fishbase.spp <- vector()
for(i in 1:length(spp.met)){
vec <- unlist(strsplit(spp.met[i], " "))
if(length(vec) == 2){
fishbase.spp[i] <- paste(vec[1], "-", vec[2], sep="")
} else {
fishbase.spp[i] <- paste(vec[1], "-", vec[2], "+", vec[3], sep="")
}
}
fishbase.spp <- paste("http://fishbase.de/summary/", fishbase.spp, ".html", sep="")
rm(i, vec)
diet_num <- vector(mode='character', length=length(fishbase.spp))
habitat <- vector(mode='character', length=length(fishbase.spp))
reef <- vector(mode='character', length=length(fishbase.spp))
for(j in seq_along(fishbase.spp)) {
theurl <- htmlTreeParse(fishbase.spp[j], error=function(...){})
step1 <- unlist(theurl$children$html$children$body)
step2 <- step1[grep("Based", step1)]
step3 <- step1[grep("Freshwater;", step1)]
step4 <- step1[grep("Marine;", step1)]
step5 <- step1[grep("reef-associated", step1, ignore.case=TRUE)]
if(length(step2) != 0)
diet_num[j] <- step2
else
diet_num[j] <- "missing_diet"
if(length(step5) != 0)
reef[j] <- "yes"
else
reef[j] <- "no"
if(length(step3) != 0 & length(step4) != 0)
habitat[j] <- "Both"
if(length(step3) != 0 & length(step4) == 0)
habitat[j] <- "Freshwater"
if(length(step3) == 0 & length(step4) != 0)
habitat[j] <- "Marine"
if(length(step3) == 0 & length(step4) == 0)
habitat[j] <- "None"
}
re <- ".+[[:space:]]+([0-9.]+)[[:space:]].*"
diet_num <- as.numeric(unname(sub(re, "\\1", diet_num)))
diet.table <- data.frame(species=spp.met, diet_num=diet_num, habitat=habitat, reef=reef, stringsAsFactors=FALSE)
diet.table$diet[diet.table$diet_num >= 2 & diet.table$diet_num < 2.20] <- "H"
diet.table$diet[diet.table$diet_num >= 2.2 & diet.table$diet_num < 2.80] <- "O"
diet.table$diet[diet.table$diet_num >= 2.8 & diet.table$diet_num < 3.70] <- "C"
diet.table$diet[diet.table$diet_num >= 3.7] <- "P"
unique(diet.table$diet_num) #good to go
fishbase.rate <- merge(fishbase.rate, diet.table, by=c("species","species"))
fishbase.rate <- fishbase.rate[,!names(fishbase.rate) %in% c("rate20","diet_num","diet")]
#################################################
## bind new metabolic rates data for reef fish ##
#################################################
#first, download the reef-fish metabolic rates .csv file provided in the online supporting information. Then follow the script as below.
reef.rates <- read.csv("data/ELEbarnecheST1.csv", header=TRUE, stringsAsFactors=FALSE, na.strings=c("",NA))
#convert ml or uL of 02/h to mg02/kg/h
reef.rates$rate[reef.rates$rate_unit=="mlO2_per_hour"] <- reef.rates$rate[reef.rates$rate_unit=="mlO2_per_hour"]*1.429 / (reef.rates$weight_mg[reef.rates$rate_unit=="mlO2_per_hour"]/1000000)
reef.rates$rate[reef.rates$rate_unit=="uLO2_per_h"] <- reef.rates$rate[reef.rates$rate_unit=="uLO2_per_h"]*1.429*10e-4 / (reef.rates$weight_mg[reef.rates$rate_unit=="uLO2_per_h"]/1000000)
reef.rates$rate_unit <- "mgO2_per_kg_per_h"
#convert mass from mg to grams
reef.rates$weight_mg <- reef.rates$weight_mg/1000
#standardize taxonomic names
reef.rates$family <- capitalize(reef.rates$family)
reef.rates$species <- capitalize(gsub("_"," ",reef.rates$species))
#assuming psu, ppt and ppm are the equivalente under tropical sealevel conditions
reef.rates$salinity[reef.rates$salinity=="field_seawater"] <- 35
reef.rates$salinity <- as.numeric(reef.rates$salinity)
#specify types of stress
reef.rates$stress <- "none specified"
reef.rates$stress[reef.rates$dissolved_oxygen %in% 3:5] <- "hypoxia"
reef.rates$stress[!reef.rates$salinity %in% 30:35] <- "salinity"
reef.rates <- data.frame(species=reef.rates$species, family=reef.rates$family, rate=reef.rates$rate,
weight=reef.rates$weight_mg, temp=reef.rates$temperature, salinity=reef.rates$salinity,
activity=reef.rates$rate_type, stress=reef.rates$stress, habitat="Marine",
reef="yes", stringsAsFactors=FALSE)
metabolicRates <- rbind(fishbase.rate, reef.rates)
rm(list=ls()[!(ls() %in% c("metabolicRates"))])
save.image("re-run/database-10-metabolicRates.RData")
|
/re-run/database-10-metabolicRates.R
|
no_license
|
dbarneche/ELEBarneche
|
R
| false
| false
| 6,984
|
r
|
#########################
## bring fishbase data ##
#########################
library(XML)
library(Hmisc)
theurl <- "http://www.fishbase.de/Topic/List.php?group=29"
pagetree <- htmlTreeParse(theurl, error=function(...){})
urltable <- pagetree$children$html$children$body$children$table
urls <- vector(mode='character')
families <- vector(mode='character')
species <- vector(mode='character')
for(i in 1:length(urltable$children[[2]])) {
urls[i] <- unlist(urltable$children[[2]][[i]])[7]
families[i] <- unlist(urltable$children[[2]][[i]][[5]])[5]
species[i] <- unlist(urltable$children[[2]][[i]])[9]
}
urls <- sub("..",'http://www.fishbase.de',urls)
URLREF <- vector(mode='character')
for(i in 1:length(urls)) {
newurl <- urls[i]
sptree <- htmlTreeParse(newurl, error=function(...){})
sptable <- sptree$children$html$children$body$children$table
URL <- vector(mode='character')
for(a in 1:length(sptable$children[[2]])) {
if(length(unlist(sptable$children[[2]][[a]])[6]) > 0) {
URL[a] <- unlist(sptable$children[[2]][[a]])[6]
}
}
URLREF <- c(URLREF, URL)
}
URLREF <- unlist(lapply(URLREF, function(x){paste("http://www.fishbase.de", x, sep="")}))
fishbase.rate <- data.frame(family='',species='',rate=NA,rate20=NA,weight=NA,temp=NA,salinity=NA,activity='',stress='',stringsAsFactors = FALSE)[-1,]
for(i in 1:length(urls)) {
theurl <- urls[i]
pagetree <- htmlTreeParse(theurl, error=function(...){})
urltable <- pagetree$children$html$children$body$children$table
dog <- data.frame(family='',species='',rate=NA,rate20=NA,weight=NA,temp=NA,salinity=NA,activity='',stress='',
stringsAsFactors = FALSE)[rep(1,length(urltable$children[[2]])),]
for(j in 1:length(urltable$children[[2]])) {
dog$family[j] <- families[i]
dog$species[j] <- species[i]
if(length(urltable$children[[2]][[j]][[1]])>0) {
dog[j,3] <- as.numeric(sub(',','',unlist(urltable$children[[2]][[j]][[1]][[1]])[4]))}
for(k in 2:5) {
if(length(urltable$children[[2]][[j]][[k]])>0) {
dog[j,k+2] <- as.numeric(sub(',','',unlist(urltable$children[[2]][[j]][[k]][[1]])[2]))}}
if(length(urltable$children[[2]][[j]][[6]])>0) {
dog[j,8] <- unlist(urltable$children[[2]][[j]][[6]][[1]])[2]}
if(length(urltable$children[[2]][[j]][[7]])>0) {
dog[j,9] <- unlist(urltable$children[[2]][[j]][[7]][[1]])[2]}
}
fishbase.rate <- rbind(fishbase.rate, dog)
}
#################################
## get traits for each species ##
#################################
spp.met <- sort(unique(fishbase.rate$species))
fishbase.spp <- vector()
for(i in 1:length(spp.met)){
vec <- unlist(strsplit(spp.met[i], " "))
if(length(vec) == 2){
fishbase.spp[i] <- paste(vec[1], "-", vec[2], sep="")
} else {
fishbase.spp[i] <- paste(vec[1], "-", vec[2], "+", vec[3], sep="")
}
}
fishbase.spp <- paste("http://fishbase.de/summary/", fishbase.spp, ".html", sep="")
rm(i, vec)
diet_num <- vector(mode='character', length=length(fishbase.spp))
habitat <- vector(mode='character', length=length(fishbase.spp))
reef <- vector(mode='character', length=length(fishbase.spp))
for(j in seq_along(fishbase.spp)) {
theurl <- htmlTreeParse(fishbase.spp[j], error=function(...){})
step1 <- unlist(theurl$children$html$children$body)
step2 <- step1[grep("Based", step1)]
step3 <- step1[grep("Freshwater;", step1)]
step4 <- step1[grep("Marine;", step1)]
step5 <- step1[grep("reef-associated", step1, ignore.case=TRUE)]
if(length(step2) != 0)
diet_num[j] <- step2
else
diet_num[j] <- "missing_diet"
if(length(step5) != 0)
reef[j] <- "yes"
else
reef[j] <- "no"
if(length(step3) != 0 & length(step4) != 0)
habitat[j] <- "Both"
if(length(step3) != 0 & length(step4) == 0)
habitat[j] <- "Freshwater"
if(length(step3) == 0 & length(step4) != 0)
habitat[j] <- "Marine"
if(length(step3) == 0 & length(step4) == 0)
habitat[j] <- "None"
}
re <- ".+[[:space:]]+([0-9.]+)[[:space:]].*"
diet_num <- as.numeric(unname(sub(re, "\\1", diet_num)))
diet.table <- data.frame(species=spp.met, diet_num=diet_num, habitat=habitat, reef=reef, stringsAsFactors=FALSE)
diet.table$diet[diet.table$diet_num >= 2 & diet.table$diet_num < 2.20] <- "H"
diet.table$diet[diet.table$diet_num >= 2.2 & diet.table$diet_num < 2.80] <- "O"
diet.table$diet[diet.table$diet_num >= 2.8 & diet.table$diet_num < 3.70] <- "C"
diet.table$diet[diet.table$diet_num >= 3.7] <- "P"
unique(diet.table$diet_num) #good to go
fishbase.rate <- merge(fishbase.rate, diet.table, by=c("species","species"))
fishbase.rate <- fishbase.rate[,!names(fishbase.rate) %in% c("rate20","diet_num","diet")]
#################################################
## bind new metabolic rates data for reef fish ##
#################################################
#first, download the reef-fish metabolic rates .csv file provided in the online supporting information. Then follow the script as below.
reef.rates <- read.csv("data/ELEbarnecheST1.csv", header=TRUE, stringsAsFactors=FALSE, na.strings=c("",NA))
#convert ml or uL of 02/h to mg02/kg/h
reef.rates$rate[reef.rates$rate_unit=="mlO2_per_hour"] <- reef.rates$rate[reef.rates$rate_unit=="mlO2_per_hour"]*1.429 / (reef.rates$weight_mg[reef.rates$rate_unit=="mlO2_per_hour"]/1000000)
reef.rates$rate[reef.rates$rate_unit=="uLO2_per_h"] <- reef.rates$rate[reef.rates$rate_unit=="uLO2_per_h"]*1.429*10e-4 / (reef.rates$weight_mg[reef.rates$rate_unit=="uLO2_per_h"]/1000000)
reef.rates$rate_unit <- "mgO2_per_kg_per_h"
#convert mass from mg to grams
reef.rates$weight_mg <- reef.rates$weight_mg/1000
#standardize taxonomic names
reef.rates$family <- capitalize(reef.rates$family)
reef.rates$species <- capitalize(gsub("_"," ",reef.rates$species))
#assuming psu, ppt and ppm are the equivalente under tropical sealevel conditions
reef.rates$salinity[reef.rates$salinity=="field_seawater"] <- 35
reef.rates$salinity <- as.numeric(reef.rates$salinity)
#specify types of stress
reef.rates$stress <- "none specified"
reef.rates$stress[reef.rates$dissolved_oxygen %in% 3:5] <- "hypoxia"
reef.rates$stress[!reef.rates$salinity %in% 30:35] <- "salinity"
reef.rates <- data.frame(species=reef.rates$species, family=reef.rates$family, rate=reef.rates$rate,
weight=reef.rates$weight_mg, temp=reef.rates$temperature, salinity=reef.rates$salinity,
activity=reef.rates$rate_type, stress=reef.rates$stress, habitat="Marine",
reef="yes", stringsAsFactors=FALSE)
metabolicRates <- rbind(fishbase.rate, reef.rates)
rm(list=ls()[!(ls() %in% c("metabolicRates"))])
save.image("re-run/database-10-metabolicRates.RData")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/unnest.R
\name{dt_unnest}
\alias{dt_unnest}
\title{Unnest: Fast Unnesting of Data Tables}
\usage{
dt_unnest(dt_, col)
}
\arguments{
\item{dt_}{the data table to unnest}
\item{col}{the column to unnest}
}
\description{
Quickly unnest data tables, particularly those nested by \code{dt_nest()}.
}
\examples{
library(data.table)
dt <- data.table(
x = rnorm(1e5),
y = runif(1e5),
grp = sample(1L:3L, 1e5, replace = TRUE)
)
nested <- dt_nest(dt, grp)
dt_unnest(nested, col = data)
}
|
/man/dt_unnest.Rd
|
no_license
|
TysonStanley/tidyfast
|
R
| false
| true
| 565
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/unnest.R
\name{dt_unnest}
\alias{dt_unnest}
\title{Unnest: Fast Unnesting of Data Tables}
\usage{
dt_unnest(dt_, col)
}
\arguments{
\item{dt_}{the data table to unnest}
\item{col}{the column to unnest}
}
\description{
Quickly unnest data tables, particularly those nested by \code{dt_nest()}.
}
\examples{
library(data.table)
dt <- data.table(
x = rnorm(1e5),
y = runif(1e5),
grp = sample(1L:3L, 1e5, replace = TRUE)
)
nested <- dt_nest(dt, grp)
dt_unnest(nested, col = data)
}
|
# Meta --------------------------------------------------------------------
## HW 03, Problem 1 checks
#
# Description:
# Check R built-in functions against "my_*" function variants
# Load libraries / functions ----------------------------------------------
library(testthat)
source("problem_01_functions.R")
# Set up test environment -------------------------------------------------
# Basic atomic vectors
v_int <- 1:5
v_dbl <- 1:5 + .1
v_chr <- letters[1:5]
v_lgl <- c(TRUE, FALSE, TRUE, FALSE)
# Non-atomic objects
l_int <- list(1, 2, 3, 4, 5)
# Atomic numerics with missingness
v_int_na <- c(1:4, NA_integer_, 6:8, NA_integer_, NA_integer_)
v_dbl_na <- c(1:4 + .1, NA, 6:8 + .1, NA, NA)
# Empty atomics
empty_int <- integer(0)
empty_dbl <- double(0)
# Check custom functions --------------------------------------------------
test_that("Assertions are met", {
expect_error(my_sum(v_chr))
expect_error(my_sum(v_lgl))
expect_error(my_sum(l_int))
expect_error(my_mean(v_chr))
expect_error(my_mean(v_lgl))
expect_error(my_mean(l_int))
expect_error(my_var(v_chr))
expect_error(my_var(v_lgl))
expect_error(my_var(l_int))
})
test_that("Functions mimic basic inputs", {
# Basic inputs
expect_equal(my_sum(v_int), sum(v_int))
expect_equal(my_sum(v_dbl), sum(v_dbl))
expect_equal(my_mean(v_int), mean(v_int))
expect_equal(my_mean(v_dbl), mean(v_dbl))
expect_equal(my_var(v_int), var(v_int))
expect_equal(my_var(v_dbl), var(v_dbl))
})
test_that("Functions mimic NA handling", {
for (rm_type in c(TRUE, FALSE)) {
expect_equal(my_sum(v_int_na, na.rm = rm_type), sum(v_int_na, na.rm = rm_type))
expect_equal(my_sum(v_dbl_na, na.rm = rm_type), sum(v_dbl_na, na.rm = rm_type))
expect_equal(my_mean(v_int_na, na.rm = rm_type), mean(v_int_na, na.rm = rm_type))
expect_equal(my_mean(v_dbl_na, na.rm = rm_type), mean(v_dbl_na, na.rm = rm_type))
expect_equal(my_var(v_int_na, na.rm = rm_type), var(v_int_na, na.rm = rm_type))
expect_equal(my_var(v_dbl_na, na.rm = rm_type), var(v_dbl_na, na.rm = rm_type))
}
})
test_that("Functions mimic empty inputs", {
expect_equal(my_sum(empty_int), sum(empty_int))
expect_equal(my_sum(empty_dbl), sum(empty_dbl))
expect_equal(my_mean(empty_int), mean(empty_int))
expect_equal(my_mean(empty_dbl), mean(empty_dbl))
expect_equal(my_var(empty_int), var(empty_int))
expect_equal(my_var(empty_dbl), var(empty_dbl))
})
|
/assignment3/solution_spencer/problem_01_checks.R
|
no_license
|
hmsuw-learn-r/HomeworkSolutions
|
R
| false
| false
| 2,445
|
r
|
# Meta --------------------------------------------------------------------
## HW 03, Problem 1 checks
#
# Description:
# Check R built-in functions against "my_*" function variants
# Load libraries / functions ----------------------------------------------
library(testthat)
source("problem_01_functions.R")
# Set up test environment -------------------------------------------------
# Basic atomic vectors
v_int <- 1:5
v_dbl <- 1:5 + .1
v_chr <- letters[1:5]
v_lgl <- c(TRUE, FALSE, TRUE, FALSE)
# Non-atomic objects
l_int <- list(1, 2, 3, 4, 5)
# Atomic numerics with missingness
v_int_na <- c(1:4, NA_integer_, 6:8, NA_integer_, NA_integer_)
v_dbl_na <- c(1:4 + .1, NA, 6:8 + .1, NA, NA)
# Empty atomics
empty_int <- integer(0)
empty_dbl <- double(0)
# Check custom functions --------------------------------------------------
test_that("Assertions are met", {
expect_error(my_sum(v_chr))
expect_error(my_sum(v_lgl))
expect_error(my_sum(l_int))
expect_error(my_mean(v_chr))
expect_error(my_mean(v_lgl))
expect_error(my_mean(l_int))
expect_error(my_var(v_chr))
expect_error(my_var(v_lgl))
expect_error(my_var(l_int))
})
test_that("Functions mimic basic inputs", {
# Basic inputs
expect_equal(my_sum(v_int), sum(v_int))
expect_equal(my_sum(v_dbl), sum(v_dbl))
expect_equal(my_mean(v_int), mean(v_int))
expect_equal(my_mean(v_dbl), mean(v_dbl))
expect_equal(my_var(v_int), var(v_int))
expect_equal(my_var(v_dbl), var(v_dbl))
})
test_that("Functions mimic NA handling", {
for (rm_type in c(TRUE, FALSE)) {
expect_equal(my_sum(v_int_na, na.rm = rm_type), sum(v_int_na, na.rm = rm_type))
expect_equal(my_sum(v_dbl_na, na.rm = rm_type), sum(v_dbl_na, na.rm = rm_type))
expect_equal(my_mean(v_int_na, na.rm = rm_type), mean(v_int_na, na.rm = rm_type))
expect_equal(my_mean(v_dbl_na, na.rm = rm_type), mean(v_dbl_na, na.rm = rm_type))
expect_equal(my_var(v_int_na, na.rm = rm_type), var(v_int_na, na.rm = rm_type))
expect_equal(my_var(v_dbl_na, na.rm = rm_type), var(v_dbl_na, na.rm = rm_type))
}
})
test_that("Functions mimic empty inputs", {
expect_equal(my_sum(empty_int), sum(empty_int))
expect_equal(my_sum(empty_dbl), sum(empty_dbl))
expect_equal(my_mean(empty_int), mean(empty_int))
expect_equal(my_mean(empty_dbl), mean(empty_dbl))
expect_equal(my_var(empty_int), var(empty_int))
expect_equal(my_var(empty_dbl), var(empty_dbl))
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot-recr-mcmc.R
\name{plot_recr_mcmc}
\alias{plot_recr_mcmc}
\title{Plot MCMC recruitments for iSCAM models}
\usage{
plot_recr_mcmc(
models,
show_ro = TRUE,
ro_color = base_color,
legend_title = "Models",
xlim = NULL,
ylim = NULL,
line_width = 1,
point_size = 2,
ro_ribbon = TRUE,
ro_alpha = 0.3,
palette = iscam_palette,
base_color = "black",
r_dodge = 0.1,
x_space = 0.5,
append_base_txt = NULL,
ind_letter = NULL,
leg_loc = c(1, 1),
probs = c(0.025, 0.5, 0.975),
text_title_size = 12,
angle_x_labels = FALSE,
...
)
}
\arguments{
\item{models}{A list of iscam model objects (class \link{mdl_lst_cls})}
\item{show_ro}{Show the initial recruitment, R0 median line and credible interval}
\item{legend_title}{Title for legend}
\item{xlim}{The x limits for the plot. If \code{NULL}, the limits of the data
will be used}
\item{ylim}{The y limits for the plot. If \code{NULL}, the limits of the data
will be used}
\item{line_width}{Width of all median lines on the plot}
\item{point_size}{Point size for all median points on the plot}
\item{ro_ribbon}{See \code{refpts_ribbon} in \code{\link[=plot_biomass_mcmc]{plot_biomass_mcmc()}}}
\item{ro_alpha}{See \code{refpts_alpha} in \code{\link[=plot_biomass_mcmc]{plot_biomass_mcmc()}}}
\item{palette}{A palette value that is in \link[RColorBrewer:ColorBrewer]{RColorBrewer::brewer.pal.info}}
\item{base_color}{A color to prepend to the brewer colors which are set by
\code{palette}. This is called \code{base_color} because it is likely to be a base
model}
\item{r_dodge}{See \code{bo_dodge} in \code{\link[=plot_biomass_mcmc]{plot_biomass_mcmc()}}}
\item{x_space}{The amount of x-interval space to pad the left and right of
the plot with. To remove all padding, make this 0}
\item{append_base_txt}{A vector of strings to append to the model names for
display on the plot legend or title}
\item{leg_loc}{A two-element vector describing the X-Y values between 0 and
1 to anchor the legend to. eg. c(1, 1) is the top right corner and c(0, 0)
is the bottom left corner}
\item{probs}{A 3-element vector of probabilities that appear in the output
data frames. This is provided in case the data frames have more than three
different quantile levels}
\item{angle_x_labels}{If \code{TRUE} put 45 degree angle on x-axis tick labels}
}
\description{
Plot the MCMC recruitment time series trajectories with credible intervals
for iscam models.
}
\seealso{
Other Time series plotting functions:
\code{\link{plot_biomass_grid_mcmc}()},
\code{\link{plot_biomass_mcmc}()},
\code{\link{plot_biomass_mpd}()},
\code{\link{plot_biomass_proj_mcmc}()},
\code{\link{plot_catch_fit_mcmc}()},
\code{\link{plot_f_mcmc}()},
\code{\link{plot_index_mcmc}()},
\code{\link{plot_index_mpd}()},
\code{\link{plot_q_mcmc}()},
\code{\link{plot_rdevs_mcmc}()},
\code{\link{plot_recr_grid_mcmc}()},
\code{\link{plot_recr_mpd}()},
\code{\link{plot_ts_mcmc}()},
\code{\link{plot_vuln_mcmc}()}
}
\concept{Time series plotting functions}
|
/man/plot_recr_mcmc.Rd
|
no_license
|
pbs-assess/gfiscamutils
|
R
| false
| true
| 3,077
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot-recr-mcmc.R
\name{plot_recr_mcmc}
\alias{plot_recr_mcmc}
\title{Plot MCMC recruitments for iSCAM models}
\usage{
plot_recr_mcmc(
models,
show_ro = TRUE,
ro_color = base_color,
legend_title = "Models",
xlim = NULL,
ylim = NULL,
line_width = 1,
point_size = 2,
ro_ribbon = TRUE,
ro_alpha = 0.3,
palette = iscam_palette,
base_color = "black",
r_dodge = 0.1,
x_space = 0.5,
append_base_txt = NULL,
ind_letter = NULL,
leg_loc = c(1, 1),
probs = c(0.025, 0.5, 0.975),
text_title_size = 12,
angle_x_labels = FALSE,
...
)
}
\arguments{
\item{models}{A list of iscam model objects (class \link{mdl_lst_cls})}
\item{show_ro}{Show the initial recruitment, R0 median line and credible interval}
\item{legend_title}{Title for legend}
\item{xlim}{The x limits for the plot. If \code{NULL}, the limits of the data
will be used}
\item{ylim}{The y limits for the plot. If \code{NULL}, the limits of the data
will be used}
\item{line_width}{Width of all median lines on the plot}
\item{point_size}{Point size for all median points on the plot}
\item{ro_ribbon}{See \code{refpts_ribbon} in \code{\link[=plot_biomass_mcmc]{plot_biomass_mcmc()}}}
\item{ro_alpha}{See \code{refpts_alpha} in \code{\link[=plot_biomass_mcmc]{plot_biomass_mcmc()}}}
\item{palette}{A palette value that is in \link[RColorBrewer:ColorBrewer]{RColorBrewer::brewer.pal.info}}
\item{base_color}{A color to prepend to the brewer colors which are set by
\code{palette}. This is called \code{base_color} because it is likely to be a base
model}
\item{r_dodge}{See \code{bo_dodge} in \code{\link[=plot_biomass_mcmc]{plot_biomass_mcmc()}}}
\item{x_space}{The amount of x-interval space to pad the left and right of
the plot with. To remove all padding, make this 0}
\item{append_base_txt}{A vector of strings to append to the model names for
display on the plot legend or title}
\item{leg_loc}{A two-element vector describing the X-Y values between 0 and
1 to anchor the legend to. eg. c(1, 1) is the top right corner and c(0, 0)
is the bottom left corner}
\item{probs}{A 3-element vector of probabilities that appear in the output
data frames. This is provided in case the data frames have more than three
different quantile levels}
\item{angle_x_labels}{If \code{TRUE} put 45 degree angle on x-axis tick labels}
}
\description{
Plot the MCMC recruitment time series trajectories with credible intervals
for iscam models.
}
\seealso{
Other Time series plotting functions:
\code{\link{plot_biomass_grid_mcmc}()},
\code{\link{plot_biomass_mcmc}()},
\code{\link{plot_biomass_mpd}()},
\code{\link{plot_biomass_proj_mcmc}()},
\code{\link{plot_catch_fit_mcmc}()},
\code{\link{plot_f_mcmc}()},
\code{\link{plot_index_mcmc}()},
\code{\link{plot_index_mpd}()},
\code{\link{plot_q_mcmc}()},
\code{\link{plot_rdevs_mcmc}()},
\code{\link{plot_recr_grid_mcmc}()},
\code{\link{plot_recr_mpd}()},
\code{\link{plot_ts_mcmc}()},
\code{\link{plot_vuln_mcmc}()}
}
\concept{Time series plotting functions}
|
#Testing Spatial Effects
n=20
graph = gridConstructor(100) #4 connections
generations.4graph = c(1)
for (i in 1:n)
{
ga = new.GA.env(GA.base.args=new.GA.base.args(max.gen=500,numPop=2), fitness.args=new.fitness.args(fitness.fn=twoPop.one.max.withCoupling(.5), goal=30, externalConnectionsMatrix=matrix(c(1:100, 1:100), nrow=100, ncol=2)), xover.args = new.xover.args(keepSecondaryParent=FALSE), selection.args=new.selection.args(elitism=TRUE,adjMatrix=graph), verbose=FALSE)
generational.ga(ga)
generations.4graph[i] = ga$gen
print(paste(i,"Complete"))
rm(ga)
}
graph = gridConstructor.withDiag(100) #8 connections
generations.8graph = c(1)
for (i in 1:n)
{
ga = new.GA.env(GA.base.args=new.GA.base.args(max.gen=500,numPop=2), fitness.args=new.fitness.args(fitness.fn=twoPop.one.max.withCoupling(.5), goal=30, externalConnectionsMatrix=matrix(c(1:100, 1:100), nrow=100, ncol=2)), xover.args = new.xover.args(keepSecondaryParent=FALSE), selection.args=new.selection.args(elitism=TRUE,adjMatrix=graph), verbose=FALSE)
generational.ga(ga)
generations.8graph[i] = ga$gen
print(paste(i,"Complete"))
rm(ga)
}
graph = complete.graph(100) #complete connections
generations.complete = c(1)
for (i in 1:n)
{
ga = new.GA.env(GA.base.args=new.GA.base.args(max.gen=500,numPop=2), fitness.args=new.fitness.args(twoPop.one.max.withCoupling(.5), goal=30, externalConnectionsMatrix=matrix(c(1:100, 1:100), nrow=100, ncol=2)), xover.args = new.xover.args(keepSecondaryParent=FALSE), selection.args=new.selection.args(elitism=TRUE,adjMatrix=graph), verbose=FALSE)
generational.ga(ga)
generations.complete[i] = ga$gen
print(paste(i,"Complete"))
rm(ga)
}
graph = ring.graph(100) #ring graph
generations.ring4 = c(1)
for (i in 1:n)
{
ga = new.GA.env(GA.base.args=new.GA.base.args(max.gen=5000,numPop=2), fitness.args=new.fitness.args(twoPop.one.max.withCoupling(), goal=30, externalConnectionsMatrix=matrix(c(1:100, 1:100), nrow=100, ncol=2)), xover.args = new.xover.args(keepSecondaryParent=FALSE), selection.args=new.selection.args(elitism=TRUE,adjMatrix=graph), verbose=FALSE)
generational.ga(ga)
generations.ring4[i] = ga$gen
print(paste(i,"Complete"))
rm(ga)
}
save(generations.ring4,file="coevo.ring4.2elite")
graph = ring.graph.extra(100) #ring graph more connection
generations.ring8 = c(1)
for (i in 1:n)
{
ga = new.GA.env(GA.base.args=new.GA.base.args(max.gen=5000,numPop=2), fitness.args=new.fitness.args(twoPop.one.max.withCoupling(), goal=30, externalConnectionsMatrix=matrix(c(1:100, 1:100), nrow=100, ncol=2)), xover.args = new.xover.args(keepSecondaryParent=FALSE), selection.args=new.selection.args(elitism=TRUE,adjMatrix=graph), verbose=FALSE)
generational.ga(ga)
generations.ring8[i] = ga$gen
print(paste(i,"Complete"))
rm(ga)
}
save(generations.ring8,file="coevo.ring8.2elite")
#random graph - 4 connections
generations.random4 = c(1)
for (i in 1:n)
{
graph =randomConstructor.NoDuplicate(4,100)
ga = new.GA.env(GA.base.args=new.GA.base.args(max.gen=5000,numPop=2), fitness.args=new.fitness.args(twoPop.one.max.withCoupling(.5), goal=30, externalConnectionsMatrix=matrix(c(1:100, 1:100), nrow=100, ncol=2)), xover.args = new.xover.args(keepSecondaryParent=FALSE), selection.args=new.selection.args(elitism=TRUE,adjMatrix=graph), verbose=FALSE)
generational.ga(ga)
generations.random4[i] = ga$gen
print(paste(i,"Complete"))
rm(ga)
}
save(generations.random4,file="coevo.rand4.2elite")
#random graph - 8 connections
generations.random8 = c(1)
for (i in 1:n)
{
graph =randomConstructor.NoDuplicate(8,100)
ga = new.GA.env(GA.base.args=new.GA.base.args(max.gen=5000,numPop=2), fitness.args=new.fitness.args(twoPop.one.max.withCoupling(.5), goal=30, externalConnectionsMatrix=matrix(c(1:100, 1:100), nrow=100, ncol=2)), xover.args = new.xover.args(keepSecondaryParent=FALSE), selection.args=new.selection.args(elitism=TRUE,adjMatrix=graph), verbose=FALSE)
generational.ga(ga)
generations.random8[i] = ga$gen
print(paste(i,"Complete"))
rm(ga)
}
save(generations.random8,file="coevo.rand8.2elite")
#random graph with line - 4 connections
generations.randomWithLine4 = c(1)
for (i in 1:n)
{
graph =randomConstructor.withLine(4,100)
ga = new.GA.env(GA.base.args=new.GA.base.args(max.gen=500,numPop=2), fitness.args=new.fitness.args(twoPop.one.max.withCoupling(.5), goal=30, externalConnectionsMatrix=matrix(c(1:100, 1:100), nrow=100, ncol=2)), xover.args = new.xover.args(keepSecondaryParent=FALSE), selection.args=new.selection.args(elitism=TRUE,adjMatrix=graph), verbose=FALSE)
generational.ga(ga)
generations.randomWithLine4[i] = ga$gen
print(paste(i,"Complete"))
rm(ga)
}
#random graph with line - 8 connections
generations.randomWithLine8 = c(1)
for (i in 1:n)
{
graph =randomConstructor.withLine(8,100)
ga = new.GA.env(GA.base.args=new.GA.base.args(max.gen=500,numPop=2), fitness.args=new.fitness.args(twoPop.one.max.withCoupling(.5), goal=30, externalConnectionsMatrix=matrix(c(1:100, 1:100), nrow=100, ncol=2)), xover.args = new.xover.args(keepSecondaryParent=FALSE), selection.args=new.selection.args(elitism=TRUE,adjMatrix=graph), verbose=FALSE)
generational.ga(ga)
generations.randomWithLine8[i] = ga$gen
print(paste(i,"Complete"))
rm(ga)
}
#random graph 2 seperate pop - 4 connections
generations.rand.2pop.4conn = c(1)
for (i in 1:n)
{
graph =randomConstructor.withSeperatePop.noDuplicate(4,100,2)
ga = new.GA.env(GA.base.args=new.GA.base.args(max.gen=5000,numPop=2), fitness.args=new.fitness.args(twoPop.one.max.withCoupling(.5), goal=30, externalConnectionsMatrix=matrix(c(1:100, 1:100), nrow=100, ncol=2)), xover.args = new.xover.args(keepSecondaryParent=FALSE), selection.args=new.selection.args(elitism=TRUE,adjMatrix=graph), verbose=FALSE)
generational.ga(ga)
generations.rand.2pop.4conn[i] = ga$gen
print(paste(i,"Complete"))
rm(ga)
}
save(generations.rand.2pop.4conn,file="rand.2pop.4conn")
#random graph 2 seperate pop - 8 connections
generations.rand.2pop.8conn = c(1)
for (i in 1:n)
{
graph =randomConstructor.withSeperatePop.noDuplicate(8,100,2)
ga = new.GA.env(GA.base.args=new.GA.base.args(max.gen=5000,numPop=2), fitness.args=new.fitness.args(twoPop.one.max.withCoupling(.5), goal=30, externalConnectionsMatrix=matrix(c(1:100, 1:100), nrow=100, ncol=2)), xover.args = new.xover.args(keepSecondaryParent=FALSE), selection.args=new.selection.args(elitism=TRUE,adjMatrix=graph), verbose=FALSE)
generational.ga(ga)
generations.rand.2pop.8conn[i] = ga$gen
print(paste(i,"Complete"))
rm(ga)
}
save(generations.rand.2pop.8conn,file="rand.2pop.8conn")
#random graph 4 seperate pop - 4 connections
generations.rand.4pop.4conn = c(1)
for (i in 1:n)
{
graph =randomConstructor.withSeperatePop.noDuplicate(4,100,4)
ga = new.GA.env(GA.base.args=new.GA.base.args(max.gen=5000,numPop=2), fitness.args=new.fitness.args(twoPop.one.max.withCoupling(.5), goal=30, externalConnectionsMatrix=matrix(c(1:100, 1:100), nrow=100, ncol=2)), xover.args = new.xover.args(keepSecondaryParent=FALSE), selection.args=new.selection.args(elitism=TRUE,adjMatrix=graph), verbose=FALSE)
generational.ga(ga)
generations.rand.4pop.4conn[i] = ga$gen
print(paste(i,"Complete"))
rm(ga)
}
save(generations.rand.4pop.4conn,file="rand.4pop.4conn")
#random graph 4 seperate pop - 8 connections
generations.rand.4pop.8conn = c(1)
for (i in 1:n)
{
graph =randomConstructor.withSeperatePop.noDuplicate(8,100,4)
ga = new.GA.env(GA.base.args=new.GA.base.args(max.gen=5000,numPop=2), fitness.args=new.fitness.args(twoPop.one.max.withCoupling(.5), goal=30, externalConnectionsMatrix=matrix(c(1:100, 1:100), nrow=100, ncol=2)), xover.args = new.xover.args(keepSecondaryParent=FALSE), selection.args=new.selection.args(elitism=TRUE,adjMatrix=graph), verbose=FALSE)
generational.ga(ga)
generations.rand.4pop.8conn[i] = ga$gen
print(paste(i,"Complete"))
rm(ga)
}
save(generations.rand.4pop.8conn,file="rand.4pop.8conn")
#random graph 10 seperate pop - 4 connections
generations.rand.10pop.4conn = c(1)
for (i in 1:n)
{
graph =randomConstructor.withSeperatePop.noDuplicate(4,100,10)
ga = new.GA.env(GA.base.args=new.GA.base.args(max.gen=5000,numPop=2), fitness.args=new.fitness.args(twoPop.one.max.withCoupling(.5), goal=30, externalConnectionsMatrix=matrix(c(1:100, 1:100), nrow=100, ncol=2)), xover.args = new.xover.args(keepSecondaryParent=FALSE), selection.args=new.selection.args(elitism=TRUE,adjMatrix=graph), verbose=FALSE)
generational.ga(ga)
generations.rand.10pop.4conn[i] = ga$gen
print(paste(i,"Complete"))
rm(ga)
}
save(generations.rand.10pop.4conn,file="rand.10pop.4conn")
#random graph 10 seperate pop - 8 connections
generations.rand.10pop.8conn = c(1)
for (i in 1:n)
{
graph =randomConstructor.withSeperatePop.noDuplicate(8,100,10)
ga = new.GA.env(GA.base.args=new.GA.base.args(max.gen=5000,numPop=2), fitness.args=new.fitness.args(twoPop.one.max.withCoupling(.5), goal=30, externalConnectionsMatrix=matrix(c(1:100, 1:100), nrow=100, ncol=2)), xover.args = new.xover.args(keepSecondaryParent=FALSE), selection.args=new.selection.args(elitism=TRUE,adjMatrix=graph), verbose=FALSE)
generational.ga(ga)
generations.rand.10pop.8conn[i] = ga$gen
print(paste(i,"Complete"))
rm(ga)
}
save(generations.rand.10pop.8conn,file="rand.10pop.8conn")
#Experiment Data
generations.4graph = c(27,129,20,30,25,52,26,73,73,25,39,63,21,88,53,65,28,21,73,39,22,98,25,57,20,42,124,42,111,38,74,19,43,21,19,64,23,124,26,24,36,27,87,26,27,34,64,28,23,51,30,23,18,24,20,61,21,130,18,32,38,33,105,44,33,131,37,30,20,123,45,32,44,29,31,51,47,60,34,88,60,31,53,22,64,30,45,107,100,22,23,31,30,29,24,18,42,23,52,43)
generations.8graph=c(131,95,33,93,28,41,134,31,27,101,32,37,46,42,82,37,83,47,66,22,41,44,142,60,27,41,90,36,34,46,79,45,29,62,34,33,104,37,26,19,28,82,36,35,77,31,48,27,35,50,33,57,24,110,53,69,25,109,42,33,48,112,157,250,43,125,100,76,23,45,22,37,109,167,148,30,166,21,55,60,36,21,22,44,93,24,135,74,30,40,43,40,163,47,39,250,33,24,25,87)
generations.complete = c(61,173,52,76,102,67,485,59,81,42,140,63,149,189,34,49,57,85,136,66,105,56,500,77,76,69,77,59,79,55,68,64,73,91,110,75,93,315,107,69,107,80,174,47,86,175,71,106,34,82,75,75,58,75,326,40,78,66,292,40,123,73,91,282,62,79,33,82,312,61,63,83,87,108,91,95,67,42,236,83,136,168,77,147,52,66,432,114,74,57,138,79,66,44,85,79,102,500,75,52)
generations.ring4=c(45,23,41,48,81,48,20,49,40,31,21,48,63,85,61,109,23,174,74,19,30,21,56,33,62,18,37,99,22,36,65,36,29,20,138,42,21,80,30,25,21,43,18,40,55,27,82,42,56,22,20,24,27,50,21,39,99,45,44,18,33,35,41,26,20,22,20,53,74,38,71,22,32,87,50,259,21,20,60,17,65,26,19,56,21,23,29,23,20,20,23,32,68,20,45,106,46,32,29,110)
generations.ring8 = c(80,28,88,37,44,53,34,34,26,44,73,82,46,33,66,48,70,46,22,21,29,54,29,93,62,32,31,81,30,71,93,61,41,46,29,30,63,55,36,26,48,25,47,75,33,127,54,96,109,56,42,33,44,32,234,194,34,44,36,38,35,45,49,159,40,43,34,51,51,165,35,23,41,28,32,36,26,112,310,54,24,56,189,136,66,28,36,145,70,139,96,30,110,30,45,58,63,40,25,34)
generations.random4 = c(56,43,33,27,23,34,66,30,102,53,33,62,26,33,35,35,68,27,97,32,100,31,22,20,21,54,48,19,33,31,84,149,20,22,43,173,41,79,100,88,18,46,38,96,17,42,42,33,208,500,19,32,51,68,19,20,76,19,18,169,119,49,15,33,34,59,57,72,200,24,12,61,39,22,62,38,57,25,42,30,66,40,31,25,23,22,23,46,31,34,28,24,33,22,71,30,132,40,38,56)
generations.random8 = c(95,42,37,33,45,44,51,29,43,24,402,137,103,54,31,42,500,164,63,95,38,72,44,44,37,226,49,214,114,38,123,49,144,33,38,38,42,27,23,31,99,42,114,28,148,34,151,89,29,51,29,42,48,29,30,108,31,79,64,34,28,26,29,500,39,43,106,31,51,45,35,41,104,82,107,52,249,26,32,102,115,76,20,55,163,500,44,39,53,68,53,29,28,32,33,30,59,39,50,84)
generations.randomWithLine4 = c(34,31,28,101,61,35,50,76,320,19,500,85,97,30,17,34,19,23,52,160,54,33,96,34,32,26,28,25,88,24,22,79,33,62,91,29,24,19,28,23,103,33,26,43,27,63,64,44,35,19,45 26 500 245 37 58 31 251 41 90 23 25 49 75 37 68 29 30 114 72 117 48 57 45 23 56 67 20 17 40,18 55 28 19 25 40 23 65 25 70 20 151 29 39 19 28 27 77 51 21)
generations.randomWithLine8 = c(36,37,78,279,500,23,500,22,26,150,30,26,31,50,44,149,112,40,180,133,159,26,105,103,60,77,46,31,500,46,30,50,33,76,115,111,187,25,73,54,68,98,99,41,378,28,33,142,51,22,37,126,87,68,52,44,37,24,23,116,21,29,29,23,95,500,110,43,36,29,28,24,21,48,42,43,134,41,38,37,53,39,155,36,35,75,30,27,79,34,36 205,100,98,32,500,37,35,53,500)
median(generations.4graph);median(generations.8graph);median(generations.complete);median(generations.ring4);median(generations.ring8);median(generations.random4);median(generations.random8);median(generations.randomWithLine4);median(generations.randomWithLine8)
var(generations.4graph);var(generations.random4);var(generations.randomWithLine4);var(generations.ring4)
var(generations.8graph);var(generations.random8);var(generations.randomWithLine8);var(generations.ring8)
var(generations.complete)
boxplot(generations.4graph,generations.8graph,generations.complete,ylab="Generations", names=c("4 Grid", "8 Grid", "Complete"), main="Spatial Effects on Spatial One-Max")
boxplot(generations.4graph,generations.ring4,generations.8graph,generations.ring8,generations.complete,ylab="Generations", names=c("4 Grid","4 Ring","8 Ring", "8 Grid", "Complete"), )
|
/Testing Functions/spatial_oneMax_Experiment.R
|
no_license
|
Fozefy/GeneticAlgorithm
|
R
| false
| false
| 13,355
|
r
|
#Testing Spatial Effects
n=20
graph = gridConstructor(100) #4 connections
generations.4graph = c(1)
for (i in 1:n)
{
ga = new.GA.env(GA.base.args=new.GA.base.args(max.gen=500,numPop=2), fitness.args=new.fitness.args(fitness.fn=twoPop.one.max.withCoupling(.5), goal=30, externalConnectionsMatrix=matrix(c(1:100, 1:100), nrow=100, ncol=2)), xover.args = new.xover.args(keepSecondaryParent=FALSE), selection.args=new.selection.args(elitism=TRUE,adjMatrix=graph), verbose=FALSE)
generational.ga(ga)
generations.4graph[i] = ga$gen
print(paste(i,"Complete"))
rm(ga)
}
graph = gridConstructor.withDiag(100) #8 connections
generations.8graph = c(1)
for (i in 1:n)
{
ga = new.GA.env(GA.base.args=new.GA.base.args(max.gen=500,numPop=2), fitness.args=new.fitness.args(fitness.fn=twoPop.one.max.withCoupling(.5), goal=30, externalConnectionsMatrix=matrix(c(1:100, 1:100), nrow=100, ncol=2)), xover.args = new.xover.args(keepSecondaryParent=FALSE), selection.args=new.selection.args(elitism=TRUE,adjMatrix=graph), verbose=FALSE)
generational.ga(ga)
generations.8graph[i] = ga$gen
print(paste(i,"Complete"))
rm(ga)
}
graph = complete.graph(100) #complete connections
generations.complete = c(1)
for (i in 1:n)
{
ga = new.GA.env(GA.base.args=new.GA.base.args(max.gen=500,numPop=2), fitness.args=new.fitness.args(twoPop.one.max.withCoupling(.5), goal=30, externalConnectionsMatrix=matrix(c(1:100, 1:100), nrow=100, ncol=2)), xover.args = new.xover.args(keepSecondaryParent=FALSE), selection.args=new.selection.args(elitism=TRUE,adjMatrix=graph), verbose=FALSE)
generational.ga(ga)
generations.complete[i] = ga$gen
print(paste(i,"Complete"))
rm(ga)
}
graph = ring.graph(100) #ring graph
generations.ring4 = c(1)
for (i in 1:n)
{
ga = new.GA.env(GA.base.args=new.GA.base.args(max.gen=5000,numPop=2), fitness.args=new.fitness.args(twoPop.one.max.withCoupling(), goal=30, externalConnectionsMatrix=matrix(c(1:100, 1:100), nrow=100, ncol=2)), xover.args = new.xover.args(keepSecondaryParent=FALSE), selection.args=new.selection.args(elitism=TRUE,adjMatrix=graph), verbose=FALSE)
generational.ga(ga)
generations.ring4[i] = ga$gen
print(paste(i,"Complete"))
rm(ga)
}
save(generations.ring4,file="coevo.ring4.2elite")
graph = ring.graph.extra(100) #ring graph more connection
generations.ring8 = c(1)
for (i in 1:n)
{
ga = new.GA.env(GA.base.args=new.GA.base.args(max.gen=5000,numPop=2), fitness.args=new.fitness.args(twoPop.one.max.withCoupling(), goal=30, externalConnectionsMatrix=matrix(c(1:100, 1:100), nrow=100, ncol=2)), xover.args = new.xover.args(keepSecondaryParent=FALSE), selection.args=new.selection.args(elitism=TRUE,adjMatrix=graph), verbose=FALSE)
generational.ga(ga)
generations.ring8[i] = ga$gen
print(paste(i,"Complete"))
rm(ga)
}
save(generations.ring8,file="coevo.ring8.2elite")
#random graph - 4 connections
generations.random4 = c(1)
for (i in 1:n)
{
graph =randomConstructor.NoDuplicate(4,100)
ga = new.GA.env(GA.base.args=new.GA.base.args(max.gen=5000,numPop=2), fitness.args=new.fitness.args(twoPop.one.max.withCoupling(.5), goal=30, externalConnectionsMatrix=matrix(c(1:100, 1:100), nrow=100, ncol=2)), xover.args = new.xover.args(keepSecondaryParent=FALSE), selection.args=new.selection.args(elitism=TRUE,adjMatrix=graph), verbose=FALSE)
generational.ga(ga)
generations.random4[i] = ga$gen
print(paste(i,"Complete"))
rm(ga)
}
save(generations.random4,file="coevo.rand4.2elite")
#random graph - 8 connections
generations.random8 = c(1)
for (i in 1:n)
{
graph =randomConstructor.NoDuplicate(8,100)
ga = new.GA.env(GA.base.args=new.GA.base.args(max.gen=5000,numPop=2), fitness.args=new.fitness.args(twoPop.one.max.withCoupling(.5), goal=30, externalConnectionsMatrix=matrix(c(1:100, 1:100), nrow=100, ncol=2)), xover.args = new.xover.args(keepSecondaryParent=FALSE), selection.args=new.selection.args(elitism=TRUE,adjMatrix=graph), verbose=FALSE)
generational.ga(ga)
generations.random8[i] = ga$gen
print(paste(i,"Complete"))
rm(ga)
}
save(generations.random8,file="coevo.rand8.2elite")
#random graph with line - 4 connections
generations.randomWithLine4 = c(1)
for (i in 1:n)
{
graph =randomConstructor.withLine(4,100)
ga = new.GA.env(GA.base.args=new.GA.base.args(max.gen=500,numPop=2), fitness.args=new.fitness.args(twoPop.one.max.withCoupling(.5), goal=30, externalConnectionsMatrix=matrix(c(1:100, 1:100), nrow=100, ncol=2)), xover.args = new.xover.args(keepSecondaryParent=FALSE), selection.args=new.selection.args(elitism=TRUE,adjMatrix=graph), verbose=FALSE)
generational.ga(ga)
generations.randomWithLine4[i] = ga$gen
print(paste(i,"Complete"))
rm(ga)
}
#random graph with line - 8 connections
generations.randomWithLine8 = c(1)
for (i in 1:n)
{
graph =randomConstructor.withLine(8,100)
ga = new.GA.env(GA.base.args=new.GA.base.args(max.gen=500,numPop=2), fitness.args=new.fitness.args(twoPop.one.max.withCoupling(.5), goal=30, externalConnectionsMatrix=matrix(c(1:100, 1:100), nrow=100, ncol=2)), xover.args = new.xover.args(keepSecondaryParent=FALSE), selection.args=new.selection.args(elitism=TRUE,adjMatrix=graph), verbose=FALSE)
generational.ga(ga)
generations.randomWithLine8[i] = ga$gen
print(paste(i,"Complete"))
rm(ga)
}
#random graph 2 seperate pop - 4 connections
generations.rand.2pop.4conn = c(1)
for (i in 1:n)
{
graph =randomConstructor.withSeperatePop.noDuplicate(4,100,2)
ga = new.GA.env(GA.base.args=new.GA.base.args(max.gen=5000,numPop=2), fitness.args=new.fitness.args(twoPop.one.max.withCoupling(.5), goal=30, externalConnectionsMatrix=matrix(c(1:100, 1:100), nrow=100, ncol=2)), xover.args = new.xover.args(keepSecondaryParent=FALSE), selection.args=new.selection.args(elitism=TRUE,adjMatrix=graph), verbose=FALSE)
generational.ga(ga)
generations.rand.2pop.4conn[i] = ga$gen
print(paste(i,"Complete"))
rm(ga)
}
save(generations.rand.2pop.4conn,file="rand.2pop.4conn")
#random graph 2 seperate pop - 8 connections
generations.rand.2pop.8conn = c(1)
for (i in 1:n)
{
graph =randomConstructor.withSeperatePop.noDuplicate(8,100,2)
ga = new.GA.env(GA.base.args=new.GA.base.args(max.gen=5000,numPop=2), fitness.args=new.fitness.args(twoPop.one.max.withCoupling(.5), goal=30, externalConnectionsMatrix=matrix(c(1:100, 1:100), nrow=100, ncol=2)), xover.args = new.xover.args(keepSecondaryParent=FALSE), selection.args=new.selection.args(elitism=TRUE,adjMatrix=graph), verbose=FALSE)
generational.ga(ga)
generations.rand.2pop.8conn[i] = ga$gen
print(paste(i,"Complete"))
rm(ga)
}
save(generations.rand.2pop.8conn,file="rand.2pop.8conn")
#random graph 4 seperate pop - 4 connections
generations.rand.4pop.4conn = c(1)
for (i in 1:n)
{
graph =randomConstructor.withSeperatePop.noDuplicate(4,100,4)
ga = new.GA.env(GA.base.args=new.GA.base.args(max.gen=5000,numPop=2), fitness.args=new.fitness.args(twoPop.one.max.withCoupling(.5), goal=30, externalConnectionsMatrix=matrix(c(1:100, 1:100), nrow=100, ncol=2)), xover.args = new.xover.args(keepSecondaryParent=FALSE), selection.args=new.selection.args(elitism=TRUE,adjMatrix=graph), verbose=FALSE)
generational.ga(ga)
generations.rand.4pop.4conn[i] = ga$gen
print(paste(i,"Complete"))
rm(ga)
}
save(generations.rand.4pop.4conn,file="rand.4pop.4conn")
#random graph 4 seperate pop - 8 connections
generations.rand.4pop.8conn = c(1)
for (i in 1:n)
{
graph =randomConstructor.withSeperatePop.noDuplicate(8,100,4)
ga = new.GA.env(GA.base.args=new.GA.base.args(max.gen=5000,numPop=2), fitness.args=new.fitness.args(twoPop.one.max.withCoupling(.5), goal=30, externalConnectionsMatrix=matrix(c(1:100, 1:100), nrow=100, ncol=2)), xover.args = new.xover.args(keepSecondaryParent=FALSE), selection.args=new.selection.args(elitism=TRUE,adjMatrix=graph), verbose=FALSE)
generational.ga(ga)
generations.rand.4pop.8conn[i] = ga$gen
print(paste(i,"Complete"))
rm(ga)
}
save(generations.rand.4pop.8conn,file="rand.4pop.8conn")
#random graph 10 seperate pop - 4 connections
generations.rand.10pop.4conn = c(1)
for (i in 1:n)
{
graph =randomConstructor.withSeperatePop.noDuplicate(4,100,10)
ga = new.GA.env(GA.base.args=new.GA.base.args(max.gen=5000,numPop=2), fitness.args=new.fitness.args(twoPop.one.max.withCoupling(.5), goal=30, externalConnectionsMatrix=matrix(c(1:100, 1:100), nrow=100, ncol=2)), xover.args = new.xover.args(keepSecondaryParent=FALSE), selection.args=new.selection.args(elitism=TRUE,adjMatrix=graph), verbose=FALSE)
generational.ga(ga)
generations.rand.10pop.4conn[i] = ga$gen
print(paste(i,"Complete"))
rm(ga)
}
save(generations.rand.10pop.4conn,file="rand.10pop.4conn")
#random graph 10 seperate pop - 8 connections
generations.rand.10pop.8conn = c(1)
for (i in 1:n)
{
graph =randomConstructor.withSeperatePop.noDuplicate(8,100,10)
ga = new.GA.env(GA.base.args=new.GA.base.args(max.gen=5000,numPop=2), fitness.args=new.fitness.args(twoPop.one.max.withCoupling(.5), goal=30, externalConnectionsMatrix=matrix(c(1:100, 1:100), nrow=100, ncol=2)), xover.args = new.xover.args(keepSecondaryParent=FALSE), selection.args=new.selection.args(elitism=TRUE,adjMatrix=graph), verbose=FALSE)
generational.ga(ga)
generations.rand.10pop.8conn[i] = ga$gen
print(paste(i,"Complete"))
rm(ga)
}
save(generations.rand.10pop.8conn,file="rand.10pop.8conn")
#Experiment Data
generations.4graph = c(27,129,20,30,25,52,26,73,73,25,39,63,21,88,53,65,28,21,73,39,22,98,25,57,20,42,124,42,111,38,74,19,43,21,19,64,23,124,26,24,36,27,87,26,27,34,64,28,23,51,30,23,18,24,20,61,21,130,18,32,38,33,105,44,33,131,37,30,20,123,45,32,44,29,31,51,47,60,34,88,60,31,53,22,64,30,45,107,100,22,23,31,30,29,24,18,42,23,52,43)
generations.8graph=c(131,95,33,93,28,41,134,31,27,101,32,37,46,42,82,37,83,47,66,22,41,44,142,60,27,41,90,36,34,46,79,45,29,62,34,33,104,37,26,19,28,82,36,35,77,31,48,27,35,50,33,57,24,110,53,69,25,109,42,33,48,112,157,250,43,125,100,76,23,45,22,37,109,167,148,30,166,21,55,60,36,21,22,44,93,24,135,74,30,40,43,40,163,47,39,250,33,24,25,87)
generations.complete = c(61,173,52,76,102,67,485,59,81,42,140,63,149,189,34,49,57,85,136,66,105,56,500,77,76,69,77,59,79,55,68,64,73,91,110,75,93,315,107,69,107,80,174,47,86,175,71,106,34,82,75,75,58,75,326,40,78,66,292,40,123,73,91,282,62,79,33,82,312,61,63,83,87,108,91,95,67,42,236,83,136,168,77,147,52,66,432,114,74,57,138,79,66,44,85,79,102,500,75,52)
generations.ring4=c(45,23,41,48,81,48,20,49,40,31,21,48,63,85,61,109,23,174,74,19,30,21,56,33,62,18,37,99,22,36,65,36,29,20,138,42,21,80,30,25,21,43,18,40,55,27,82,42,56,22,20,24,27,50,21,39,99,45,44,18,33,35,41,26,20,22,20,53,74,38,71,22,32,87,50,259,21,20,60,17,65,26,19,56,21,23,29,23,20,20,23,32,68,20,45,106,46,32,29,110)
generations.ring8 = c(80,28,88,37,44,53,34,34,26,44,73,82,46,33,66,48,70,46,22,21,29,54,29,93,62,32,31,81,30,71,93,61,41,46,29,30,63,55,36,26,48,25,47,75,33,127,54,96,109,56,42,33,44,32,234,194,34,44,36,38,35,45,49,159,40,43,34,51,51,165,35,23,41,28,32,36,26,112,310,54,24,56,189,136,66,28,36,145,70,139,96,30,110,30,45,58,63,40,25,34)
generations.random4 = c(56,43,33,27,23,34,66,30,102,53,33,62,26,33,35,35,68,27,97,32,100,31,22,20,21,54,48,19,33,31,84,149,20,22,43,173,41,79,100,88,18,46,38,96,17,42,42,33,208,500,19,32,51,68,19,20,76,19,18,169,119,49,15,33,34,59,57,72,200,24,12,61,39,22,62,38,57,25,42,30,66,40,31,25,23,22,23,46,31,34,28,24,33,22,71,30,132,40,38,56)
generations.random8 = c(95,42,37,33,45,44,51,29,43,24,402,137,103,54,31,42,500,164,63,95,38,72,44,44,37,226,49,214,114,38,123,49,144,33,38,38,42,27,23,31,99,42,114,28,148,34,151,89,29,51,29,42,48,29,30,108,31,79,64,34,28,26,29,500,39,43,106,31,51,45,35,41,104,82,107,52,249,26,32,102,115,76,20,55,163,500,44,39,53,68,53,29,28,32,33,30,59,39,50,84)
generations.randomWithLine4 = c(34,31,28,101,61,35,50,76,320,19,500,85,97,30,17,34,19,23,52,160,54,33,96,34,32,26,28,25,88,24,22,79,33,62,91,29,24,19,28,23,103,33,26,43,27,63,64,44,35,19,45 26 500 245 37 58 31 251 41 90 23 25 49 75 37 68 29 30 114 72 117 48 57 45 23 56 67 20 17 40,18 55 28 19 25 40 23 65 25 70 20 151 29 39 19 28 27 77 51 21)
generations.randomWithLine8 = c(36,37,78,279,500,23,500,22,26,150,30,26,31,50,44,149,112,40,180,133,159,26,105,103,60,77,46,31,500,46,30,50,33,76,115,111,187,25,73,54,68,98,99,41,378,28,33,142,51,22,37,126,87,68,52,44,37,24,23,116,21,29,29,23,95,500,110,43,36,29,28,24,21,48,42,43,134,41,38,37,53,39,155,36,35,75,30,27,79,34,36 205,100,98,32,500,37,35,53,500)
median(generations.4graph);median(generations.8graph);median(generations.complete);median(generations.ring4);median(generations.ring8);median(generations.random4);median(generations.random8);median(generations.randomWithLine4);median(generations.randomWithLine8)
var(generations.4graph);var(generations.random4);var(generations.randomWithLine4);var(generations.ring4)
var(generations.8graph);var(generations.random8);var(generations.randomWithLine8);var(generations.ring8)
var(generations.complete)
boxplot(generations.4graph,generations.8graph,generations.complete,ylab="Generations", names=c("4 Grid", "8 Grid", "Complete"), main="Spatial Effects on Spatial One-Max")
boxplot(generations.4graph,generations.ring4,generations.8graph,generations.ring8,generations.complete,ylab="Generations", names=c("4 Grid","4 Ring","8 Ring", "8 Grid", "Complete"), )
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/od-funs.R
\name{line2df}
\alias{line2df}
\title{Convert straight SpatialLinesDataFrame to a data.frame with from and to coords}
\usage{
line2df(l)
}
\arguments{
\item{l}{A SpatialLinesDataFrame}
}
\description{
Convert straight SpatialLinesDataFrame to a data.frame with from and to coords
}
\examples{
\dontrun{
data(flowlines) # load demo flowlines dataset
ldf <- line2df(flowlines)
}
}
|
/man/line2df.Rd
|
permissive
|
bestwpw/stplanr
|
R
| false
| true
| 468
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/od-funs.R
\name{line2df}
\alias{line2df}
\title{Convert straight SpatialLinesDataFrame to a data.frame with from and to coords}
\usage{
line2df(l)
}
\arguments{
\item{l}{A SpatialLinesDataFrame}
}
\description{
Convert straight SpatialLinesDataFrame to a data.frame with from and to coords
}
\examples{
\dontrun{
data(flowlines) # load demo flowlines dataset
ldf <- line2df(flowlines)
}
}
|
#' Tests the white noise assumption for a VAR model using a portmanteau test on the residuals
#'
#' This function tests the white noise assumption for the residuals of the endogenous variables in the specified VAR model. This function implements the portmanteau test known as the Ljung-Box test, and results are comparable with STATA's \code{wntestq}. Of the p-levels resulting from assessing the white noise assumption for the residuals of that variable, the minimum is returned.
#' @param varest A \code{varest} model.
#' @return This function returns a p-level.
#' @examples
#' data_matrix <- matrix(nrow = 40, ncol = 3)
#' data_matrix[, ] <- runif(ncol(data_matrix) * nrow(data_matrix), 1, nrow(data_matrix))
#' colnames(data_matrix) <- c('rumination', 'happiness', 'activity')
#' varest <- autovarCore:::run_var(data_matrix, NULL, 1)
#' autovarCore:::assess_portmanteau(varest)
assess_portmanteau <- function(varest) {
data <- unname(resid(varest))
portmanteau_test_data(data)
}
portmanteau_test_data <- function(data) {
# This function is also used by assess_portmanteau_squared.
nr_cols <- ncol(data)
nr_rows <- nrow(data)
if (is.null(nr_cols) || nr_cols < 1 || is.null(nr_rows) || nr_rows < 1)
stop("No residuals found")
port_lags <- determine_portmanteau_lags(data)
if (port_lags < 1)
stop("Not enough observations in the data")
minimum_p_level_port <- Inf
for (column_index in 1:nr_cols) {
column_data <- data[, column_index]
port_test_statistic <- portmanteau_test_statistic(column_data, nr_rows, port_lags)
p_level_port <- chi_squared_prob(port_test_statistic, port_lags)
if (p_level_port < minimum_p_level_port)
minimum_p_level_port <- p_level_port
}
minimum_p_level_port
}
determine_portmanteau_lags <- function(data) {
# This is the default value used in STATA.
min(floor(nrow(data)/2) - 2, 40)
}
portmanteau_test_statistic <- function(data, n, h) {
data <- data - mean(data)
suma <- 0
for (k in 1:h)
suma <- suma + (sample_autocorrelation(data, k, n)^2)/(n - k)
q <- n * (n + 2) * suma
q
}
sample_autocorrelation <- function(data, k, n) {
res <- 0
for (t in (k + 1):n)
res <- res + data[t] * data[t - k]
# See the paper of Ljung-Box test for this definition of autocorrelation.
denom <- 0
for (t in 1:n)
denom <- denom + data[t]^2
res <- res/denom
res
}
chi_squared_prob <- function(q, h) {
pchisq(q, h, lower.tail = FALSE)
}
|
/autovarCore/R/assess_portmanteau.r
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 2,443
|
r
|
#' Tests the white noise assumption for a VAR model using a portmanteau test on the residuals
#'
#' This function tests the white noise assumption for the residuals of the endogenous variables in the specified VAR model. This function implements the portmanteau test known as the Ljung-Box test, and results are comparable with STATA's \code{wntestq}. Of the p-levels resulting from assessing the white noise assumption for the residuals of that variable, the minimum is returned.
#' @param varest A \code{varest} model.
#' @return This function returns a p-level.
#' @examples
#' data_matrix <- matrix(nrow = 40, ncol = 3)
#' data_matrix[, ] <- runif(ncol(data_matrix) * nrow(data_matrix), 1, nrow(data_matrix))
#' colnames(data_matrix) <- c('rumination', 'happiness', 'activity')
#' varest <- autovarCore:::run_var(data_matrix, NULL, 1)
#' autovarCore:::assess_portmanteau(varest)
assess_portmanteau <- function(varest) {
data <- unname(resid(varest))
portmanteau_test_data(data)
}
portmanteau_test_data <- function(data) {
# This function is also used by assess_portmanteau_squared.
nr_cols <- ncol(data)
nr_rows <- nrow(data)
if (is.null(nr_cols) || nr_cols < 1 || is.null(nr_rows) || nr_rows < 1)
stop("No residuals found")
port_lags <- determine_portmanteau_lags(data)
if (port_lags < 1)
stop("Not enough observations in the data")
minimum_p_level_port <- Inf
for (column_index in 1:nr_cols) {
column_data <- data[, column_index]
port_test_statistic <- portmanteau_test_statistic(column_data, nr_rows, port_lags)
p_level_port <- chi_squared_prob(port_test_statistic, port_lags)
if (p_level_port < minimum_p_level_port)
minimum_p_level_port <- p_level_port
}
minimum_p_level_port
}
determine_portmanteau_lags <- function(data) {
# This is the default value used in STATA.
min(floor(nrow(data)/2) - 2, 40)
}
portmanteau_test_statistic <- function(data, n, h) {
data <- data - mean(data)
suma <- 0
for (k in 1:h)
suma <- suma + (sample_autocorrelation(data, k, n)^2)/(n - k)
q <- n * (n + 2) * suma
q
}
sample_autocorrelation <- function(data, k, n) {
res <- 0
for (t in (k + 1):n)
res <- res + data[t] * data[t - k]
# See the paper of Ljung-Box test for this definition of autocorrelation.
denom <- 0
for (t in 1:n)
denom <- denom + data[t]^2
res <- res/denom
res
}
chi_squared_prob <- function(q, h) {
pchisq(q, h, lower.tail = FALSE)
}
|
# Function to test for overdispersion in any model
#
# source: https://stat.ethz.ch/pipermail/r-sig-mixed-models/2011q1/015392.html
dispersion_glmer <- function(modelglmer){
# computing estimated scale ( binomial model) following D. Bates :
# That quantity is the square root of the penalized residual sum of
# squares divided by n, the number of observations, evaluated as:
n <- length(resid(modelglmer))
return( sqrt( sum(c(resid(modelglmer),modelglmer@u) ^2) / n ) )
}
#should be between, 0.75 and 1.4 if not under- or overdispersed, respectively
|
/R/dispersion_glmer.R
|
no_license
|
cran/blmeco
|
R
| false
| false
| 577
|
r
|
# Function to test for overdispersion in any model
#
# source: https://stat.ethz.ch/pipermail/r-sig-mixed-models/2011q1/015392.html
dispersion_glmer <- function(modelglmer){
# computing estimated scale ( binomial model) following D. Bates :
# That quantity is the square root of the penalized residual sum of
# squares divided by n, the number of observations, evaluated as:
n <- length(resid(modelglmer))
return( sqrt( sum(c(resid(modelglmer),modelglmer@u) ^2) / n ) )
}
#should be between, 0.75 and 1.4 if not under- or overdispersed, respectively
|
# ------------- AMR gene analysis - Intrinsic genes ---------------
## This track of the ARIBA analysis script analyses AMR gene reports
## from ARIBA and generates result files based on user input of
## selected genes of interest.
# ------------------------- Parameters ----------------------------
args <- commandArgs(trailingOnly = TRUE)
in_report_loc <- args[1]
output_loc <- args[2]
genes <- args[3]
ending <- as.character(args[4])
gyr_par_fix <- args[5]
# adjust parameters for filtering
if (grepl("all", genes, ignore.case = TRUE) == TRUE) {
genes <- "ALL"
} else {
genes <- unlist(strsplit(genes, ",", fixed = TRUE))
}
# ------------------------ Load libraries -------------------------
packages <-
c(
"dplyr",
"tidyr",
"purrr",
"stringr",
"impoRt",
"vampfunc",
"funtools"
)
suppressPackageStartupMessages(
invisible(lapply(packages, function(x)
library(
x,
character.only = T,
quietly = T,
warn.conflicts = FALSE
))))
# -------------------------- Analysis ----------------------------
# Create output directory
dir.create(paste0(output_loc, "/amr_in/"), showWarnings = FALSE)
amr_output <- paste0(output_loc, "/amr_in/")
## Intrinsic genes
in_data <- get_data(in_report_loc,
ending,
convert = TRUE) %>%
fix_gene_names(ending, db = "res")
in_flags <- check_flags(in_data)
write.table(in_flags,
paste0(amr_output, "intrinsic_flag_report.tsv"),
sep = "\t",
row.names = FALSE,
quote = FALSE)
if (all(in_flags$flag_result == 0) == TRUE) {
print("No flags accepted, please check the flag report")
stop()
}
in_table <- create_table(in_data, acquired = FALSE)
if (exists("gyr_par_fix") == TRUE) {
in_table <- fix_gyr_par_results(in_table)
} else {
in_table <- in_table
}
if ("ALL" %in% genes) {
in_table_filtered <- in_table
} else {
in_table_filtered <- filter_table(in_table, genes)
in_flags <- filter_table(in_flags, genes)
}
in_report <- create_report(in_table_filtered, mut = FALSE)
in_mut_report <- create_report(in_table_filtered, mut = TRUE)
in_stats <- calc_stats(in_table_filtered)
## Write results to file
write.table(in_report,
paste0(amr_output, "intrinsic_gene_report.tsv"),
sep = "\t",
row.names = FALSE,
quote = FALSE)
write.table(in_mut_report,
paste0(amr_output, "intrinsic_mut_report.tsv"),
sep = "\t",
row.names = FALSE,
quote = FALSE)
write.table(in_stats,
paste0(amr_output, "intrinsic_gene_stats.tsv"),
sep = "\t",
row.names = FALSE,
quote = FALSE)
|
/src/intrinsic_script.R
|
permissive
|
hkaspersen/VAMPIR
|
R
| false
| false
| 2,709
|
r
|
# ------------- AMR gene analysis - Intrinsic genes ---------------
## This track of the ARIBA analysis script analyses AMR gene reports
## from ARIBA and generates result files based on user input of
## selected genes of interest.
# ------------------------- Parameters ----------------------------
args <- commandArgs(trailingOnly = TRUE)
in_report_loc <- args[1]
output_loc <- args[2]
genes <- args[3]
ending <- as.character(args[4])
gyr_par_fix <- args[5]
# adjust parameters for filtering
if (grepl("all", genes, ignore.case = TRUE) == TRUE) {
genes <- "ALL"
} else {
genes <- unlist(strsplit(genes, ",", fixed = TRUE))
}
# ------------------------ Load libraries -------------------------
packages <-
c(
"dplyr",
"tidyr",
"purrr",
"stringr",
"impoRt",
"vampfunc",
"funtools"
)
suppressPackageStartupMessages(
invisible(lapply(packages, function(x)
library(
x,
character.only = T,
quietly = T,
warn.conflicts = FALSE
))))
# -------------------------- Analysis ----------------------------
# Create output directory
dir.create(paste0(output_loc, "/amr_in/"), showWarnings = FALSE)
amr_output <- paste0(output_loc, "/amr_in/")
## Intrinsic genes
in_data <- get_data(in_report_loc,
ending,
convert = TRUE) %>%
fix_gene_names(ending, db = "res")
in_flags <- check_flags(in_data)
write.table(in_flags,
paste0(amr_output, "intrinsic_flag_report.tsv"),
sep = "\t",
row.names = FALSE,
quote = FALSE)
if (all(in_flags$flag_result == 0) == TRUE) {
print("No flags accepted, please check the flag report")
stop()
}
in_table <- create_table(in_data, acquired = FALSE)
if (exists("gyr_par_fix") == TRUE) {
in_table <- fix_gyr_par_results(in_table)
} else {
in_table <- in_table
}
if ("ALL" %in% genes) {
in_table_filtered <- in_table
} else {
in_table_filtered <- filter_table(in_table, genes)
in_flags <- filter_table(in_flags, genes)
}
in_report <- create_report(in_table_filtered, mut = FALSE)
in_mut_report <- create_report(in_table_filtered, mut = TRUE)
in_stats <- calc_stats(in_table_filtered)
## Write results to file
write.table(in_report,
paste0(amr_output, "intrinsic_gene_report.tsv"),
sep = "\t",
row.names = FALSE,
quote = FALSE)
write.table(in_mut_report,
paste0(amr_output, "intrinsic_mut_report.tsv"),
sep = "\t",
row.names = FALSE,
quote = FALSE)
write.table(in_stats,
paste0(amr_output, "intrinsic_gene_stats.tsv"),
sep = "\t",
row.names = FALSE,
quote = FALSE)
|
coef.fittedloop <- function (object,...) object$values
|
/hysteresis/R/coef.fittedloop.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 56
|
r
|
coef.fittedloop <- function (object,...) object$values
|
gbm_train<-function(dat_train){
library(gbm)
tm.train<-system.time(gbm.fit<-gbm(emotion_idx~.,
distribution="multinomial",
data=dat_train,
n.trees = 200,
bag.fraction=0.65,
shrinkage = 0.1,
cv.folds=3))
return(list(gbm.fit,tm.train))
}
|
/lib/gbm_train.R
|
no_license
|
TZstatsADS/fall2019-proj3-sec2--group5
|
R
| false
| false
| 311
|
r
|
gbm_train<-function(dat_train){
library(gbm)
tm.train<-system.time(gbm.fit<-gbm(emotion_idx~.,
distribution="multinomial",
data=dat_train,
n.trees = 200,
bag.fraction=0.65,
shrinkage = 0.1,
cv.folds=3))
return(list(gbm.fit,tm.train))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/check-names.R
\name{check_names}
\alias{check_names}
\title{Check Names}
\usage{
check_names(
x,
names = character(0),
exclusive = FALSE,
order = FALSE,
x_name = NULL
)
}
\arguments{
\item{x}{The object to check.}
\item{names}{A character vector of the required names.}
\item{exclusive}{A flag specifying whether x must only contain the required names.}
\item{order}{A flag specifying whether the order of the required names in x must match the order in names.}
\item{x_name}{A string of the name of object x or NULL.}
}
\value{
An informative error if the test fails or an invisible copy of x.
}
\description{
Checks the names of an object.
}
\examples{
x <- c(x = 1, y = 2)
check_names(x, c("y", "x"))
try(check_names(x, c("y", "x"), order = TRUE))
try(check_names(x, "x", exclusive = TRUE))
}
\seealso{
Other check:
\code{\link{check_data}()},
\code{\link{check_dim}()},
\code{\link{check_dirs}()},
\code{\link{check_files}()},
\code{\link{check_key}()},
\code{\link{check_values}()}
}
\concept{check}
|
/man/check_names.Rd
|
permissive
|
poissonconsulting/chk
|
R
| false
| true
| 1,098
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/check-names.R
\name{check_names}
\alias{check_names}
\title{Check Names}
\usage{
check_names(
x,
names = character(0),
exclusive = FALSE,
order = FALSE,
x_name = NULL
)
}
\arguments{
\item{x}{The object to check.}
\item{names}{A character vector of the required names.}
\item{exclusive}{A flag specifying whether x must only contain the required names.}
\item{order}{A flag specifying whether the order of the required names in x must match the order in names.}
\item{x_name}{A string of the name of object x or NULL.}
}
\value{
An informative error if the test fails or an invisible copy of x.
}
\description{
Checks the names of an object.
}
\examples{
x <- c(x = 1, y = 2)
check_names(x, c("y", "x"))
try(check_names(x, c("y", "x"), order = TRUE))
try(check_names(x, "x", exclusive = TRUE))
}
\seealso{
Other check:
\code{\link{check_data}()},
\code{\link{check_dim}()},
\code{\link{check_dirs}()},
\code{\link{check_files}()},
\code{\link{check_key}()},
\code{\link{check_values}()}
}
\concept{check}
|
#Bern Romey, 04Feb15 ~ ESM567 Term Project
#PCA
#Data
dta<-read.csv("ApochthoniusMorphLatLon04Feb15.csv", header=T)
dt <-na.omit(dta)
am <- dt[c(6:24)]
#Assumptions
boxplot(am, main = "Not scaled")
boxplot(scale(am), main="Scaled (centered) with Z-score")
boxplot(scale(log(am+1)),main="log transformed")
cor.matrix(scale(am))#source cor.matrix function
cov(scale(am)) #calculate correlatin matrix with the standardized data:
#Z-score from -1 to 1 (PCC)
cor(am) #same as covariance with scale
#PCA Analysis
require(MASS) #loads the PCA package
pca <- princomp(scale(am)) #creates a PC matrix using the correlation matrix
biplot(pca, expand = 1.05,main = "Biplot", xlab = "Comp.1 (30.1%)", ylab = "Comp.2 (14.8%)")
#Scale for sites(PC matrix-pca$scores) on top, scale for variables (vectors-loadings) along bottom
summary(pca) #proportion of variance is eigenvalues for each PC
broken.stick(18) #After comparing, keep components with eigenvalues > broken stick from summary
plot(pca, main="Scree Plot") #Scree plot
round(loadings(pca),2) #Check eigenvectors: length of vector is relative variance and how much it contributes to the PC
#Principal component loading (pg 50). The further from zero, the greater the contribution.
round(loadings(pca)[,c(1:2)],2) #Loading for PC1 & 2 only
round((pca$scores),2) #PC matrix showing site scores for all PCs. How far each is(SD) from the the grand centroid
#This is the distribution of PC1 and PC2 site scores (top scale). Each variable for each component.
#In this case due to broken stick, PC1 and PC2
|
/PCA.R
|
no_license
|
sydney2/Ev567Proj
|
R
| false
| false
| 1,557
|
r
|
#Bern Romey, 04Feb15 ~ ESM567 Term Project
#PCA
#Data
dta<-read.csv("ApochthoniusMorphLatLon04Feb15.csv", header=T)
dt <-na.omit(dta)
am <- dt[c(6:24)]
#Assumptions
boxplot(am, main = "Not scaled")
boxplot(scale(am), main="Scaled (centered) with Z-score")
boxplot(scale(log(am+1)),main="log transformed")
cor.matrix(scale(am))#source cor.matrix function
cov(scale(am)) #calculate correlatin matrix with the standardized data:
#Z-score from -1 to 1 (PCC)
cor(am) #same as covariance with scale
#PCA Analysis
require(MASS) #loads the PCA package
pca <- princomp(scale(am)) #creates a PC matrix using the correlation matrix
biplot(pca, expand = 1.05,main = "Biplot", xlab = "Comp.1 (30.1%)", ylab = "Comp.2 (14.8%)")
#Scale for sites(PC matrix-pca$scores) on top, scale for variables (vectors-loadings) along bottom
summary(pca) #proportion of variance is eigenvalues for each PC
broken.stick(18) #After comparing, keep components with eigenvalues > broken stick from summary
plot(pca, main="Scree Plot") #Scree plot
round(loadings(pca),2) #Check eigenvectors: length of vector is relative variance and how much it contributes to the PC
#Principal component loading (pg 50). The further from zero, the greater the contribution.
round(loadings(pca)[,c(1:2)],2) #Loading for PC1 & 2 only
round((pca$scores),2) #PC matrix showing site scores for all PCs. How far each is(SD) from the the grand centroid
#This is the distribution of PC1 and PC2 site scores (top scale). Each variable for each component.
#In this case due to broken stick, PC1 and PC2
|
#' Construct an overall coverage cohort plot
#'
#' Given a matrix construct a plot to display sequencing depth acheived
#' as percentage bars for a cohort of samples.
#' @name covBars
#' @param x Object of class matrix with rows representing coverage achieved
#' at bases and columns corresponding to each sample in the cohort.
#' @param colour Character vector specifying colours to represent sequencing
#' depth.
#' @param plot_title Character string specifying the title to display on the
#' plot.
#' @param x_title_size Integer specifying the size of the x-axis title.
#' @param y_title_size Integer specifying the size of the y-axis title.
#' @param facet_lab_size Integer specifying the size of the faceted labels
#' plotted.
#' @param plotLayer Valid ggplot2 layer to be added to the plot.
#' @param out Character vector specifying the the object to output, one of
#' "data", "grob", or "plot", defaults to "plot" (see returns).
#' @return One of the following, a list of dataframes containing data to be
#' plotted, a grob object, or a plot.
#' @importFrom reshape2 melt
#' @examples
#' # Create data
#' x <- matrix(sample(100000,500), nrow=50, ncol=10, dimnames=list(0:49,paste0("Sample",1:10)))
#'
#' # Call plot function
#' covBars(x)
#' @export
covBars <- function(x, colour=NULL, plot_title=NULL, x_title_size=12,
y_title_size=12, facet_lab_size=10, plotLayer=NULL,
out="plot")
{
# Perform quality check on input data
dat <- covBars_qual(x, colour)
x <- dat[[1]]
colour <- dat[[2]]
# resort the rows (increasing rowname as integer)
x <- x[order(as.numeric(rownames(x))),]
# normalize each sample (each sample should sum to 1)
xnorm <- apply(x, 2, function(y){y/sum(as.numeric(y))})
# get the cumulative sum of each sample
xcs <- apply(xnorm, 2, cumsum)
# melt the data for ggplot2 call
xmelt <- reshape2::melt(xcs)
colnames(xmelt) <- c('depth', 'sample', 'bp')
# define the xmin to be used in the plot (xmax is bp)
xmelt <- cbind(xmelt, xmin=rep(NA,nrow(xmelt)))
for(i in unique(xmelt$sample))
{
tmpcs <- xmelt$bp[xmelt$sample==i]
xmelt$xmin[xmelt$sample==i] <- c(0, tmpcs[0:(length(tmpcs)-1)])
}
xmelt <- as.data.frame(xmelt)
# Maintain the order of samples
xmelt$sample <- factor(xmelt$sample, levels=colnames(x))
# Construct the plot
p1 <- covBars_buildMain(xmelt, col=colour, plot_title=plot_title,
x_lab_size=x_title_size, y_lab_size=y_title_size,
facet_lab_size=facet_lab_size, layers=plotLayer)
# Decide what to output
output <- multi_selectOut(data=xmelt, plot=p1, out=out)
return(output)
}
|
/R/covBars.R
|
permissive
|
cbrueffer/GenVisR
|
R
| false
| false
| 2,741
|
r
|
#' Construct an overall coverage cohort plot
#'
#' Given a matrix construct a plot to display sequencing depth acheived
#' as percentage bars for a cohort of samples.
#' @name covBars
#' @param x Object of class matrix with rows representing coverage achieved
#' at bases and columns corresponding to each sample in the cohort.
#' @param colour Character vector specifying colours to represent sequencing
#' depth.
#' @param plot_title Character string specifying the title to display on the
#' plot.
#' @param x_title_size Integer specifying the size of the x-axis title.
#' @param y_title_size Integer specifying the size of the y-axis title.
#' @param facet_lab_size Integer specifying the size of the faceted labels
#' plotted.
#' @param plotLayer Valid ggplot2 layer to be added to the plot.
#' @param out Character vector specifying the the object to output, one of
#' "data", "grob", or "plot", defaults to "plot" (see returns).
#' @return One of the following, a list of dataframes containing data to be
#' plotted, a grob object, or a plot.
#' @importFrom reshape2 melt
#' @examples
#' # Create data
#' x <- matrix(sample(100000,500), nrow=50, ncol=10, dimnames=list(0:49,paste0("Sample",1:10)))
#'
#' # Call plot function
#' covBars(x)
#' @export
covBars <- function(x, colour=NULL, plot_title=NULL, x_title_size=12,
y_title_size=12, facet_lab_size=10, plotLayer=NULL,
out="plot")
{
# Perform quality check on input data
dat <- covBars_qual(x, colour)
x <- dat[[1]]
colour <- dat[[2]]
# resort the rows (increasing rowname as integer)
x <- x[order(as.numeric(rownames(x))),]
# normalize each sample (each sample should sum to 1)
xnorm <- apply(x, 2, function(y){y/sum(as.numeric(y))})
# get the cumulative sum of each sample
xcs <- apply(xnorm, 2, cumsum)
# melt the data for ggplot2 call
xmelt <- reshape2::melt(xcs)
colnames(xmelt) <- c('depth', 'sample', 'bp')
# define the xmin to be used in the plot (xmax is bp)
xmelt <- cbind(xmelt, xmin=rep(NA,nrow(xmelt)))
for(i in unique(xmelt$sample))
{
tmpcs <- xmelt$bp[xmelt$sample==i]
xmelt$xmin[xmelt$sample==i] <- c(0, tmpcs[0:(length(tmpcs)-1)])
}
xmelt <- as.data.frame(xmelt)
# Maintain the order of samples
xmelt$sample <- factor(xmelt$sample, levels=colnames(x))
# Construct the plot
p1 <- covBars_buildMain(xmelt, col=colour, plot_title=plot_title,
x_lab_size=x_title_size, y_lab_size=y_title_size,
facet_lab_size=facet_lab_size, layers=plotLayer)
# Decide what to output
output <- multi_selectOut(data=xmelt, plot=p1, out=out)
return(output)
}
|
\name{fpca}
\alias{fpca}
\title{Focused Principal Components Analysis}
\description{
Graphical representation similar to a principal components analysis but adapted to data structured with dependent/independent variables
}
\usage{
fpca(formula=NULL,y=NULL, x=NULL, data, cx=0.75, pvalues="No",
partial="Yes", input="data", contraction="No", sample.size=1)
}
\arguments{
\item{formula}{"model" formula, of the form y ~ x }
\item{y}{column number of the dependent variable}
\item{x}{column numbers of the independent (explanatory) variables}
\item{data}{name of datafile}
\item{cx}{size of the lettering (0.75 by default, 1 for bigger letters, 0.5 for smaller)}
\item{pvalues}{vector of prespecified pvalues (pvalues="No" by default) (see below)}
\item{partial}{partial="Yes" by default, corresponds to the original method (see below)}
\item{input}{input="Cor" for a correlation matrix (input="data" by default)}
\item{contraction}{change the aspect of the diagram, contraction="Yes" is convenient for large data set (contraction="No" by default)}
\item{sample.size}{to be specified if input="Cor"}
}
\details{
This representation is close to a Principal Components Analysis (PCA).
Contrary to PCA, correlations between the dependent variable and the other variables are represented faithfully. The relationships between non dependent variables are interpreted like in a PCA: correlated variables are close or diametrically opposite (for negative correlations), independent variables make a right angle with the origin.
The focus on the dependent variable leads formally to a partialisation of the correlations between the non dependent variables by the dependent variable (see reference). To avoid this partialisation, the option partial="No" can be used.
It may be interesting to represent graphically the strength of association between the dependent variable and the other variables using p values coming from a model. A vector of pvalue may be specified in this case.
}
\value{
A plot (q plots in fact).
}
\references{Falissard B, Focused Principal Components Analysis: looking at a correlation matrix with a particular interest in a given variable. Journal of Computational and Graphical Statistics (1999), 8(4): 906-912.}
\author{Bruno Falissard, Bill Morphey, Adeline Abbe}
\examples{
data(sleep)
fpca(Paradoxical.sleep~Body.weight+Brain.weight+Slow.wave.sleep+Maximum.life.span+
Gestation.time+Predation+Sleep.exposure+Danger,data=sleep)
fpca(y="Paradoxical.sleep",x=c("Body.weight","Brain.weight","Slow.wave.sleep",
"Maximum.life.span","Gestation.time","Predation","Sleep.exposure","Danger"),data=sleep)
## focused PCA of the duration of paradoxical sleep (dreams, 5th column)
## against constitutional variables in mammals (columns 2, 3, 4, 7, 8, 9, 10, 11).
## Variables inside the red cercle are significantly correlated
## to the dependent variable with p<0.05.
## Green variables are positively correlated to the dependent variable,
## yellow variables are negatively correlated.
## There are three clear clusters of independent variables.
corsleep <- as.data.frame(cor(sleep[,2:11],use="pairwise.complete.obs"))
fpca(Paradoxical.sleep~Body.weight+Brain.weight+Slow.wave.sleep+Maximum.life.span+
Gestation.time+Predation+Sleep.exposure+Danger,
data=corsleep,input="Cor",sample.size=60)
## when missing data are numerous, the representation of a pairwise correlation
## matrix may be preferred (even if mathematical properties are not so good...)
numer <- c(2:4,7:11)
l <- length(numer)
resu <- vector(length=l)
for(i in 1:l)
{
int <- sleep[,numer[i]]
mod <- lm(sleep$Paradoxical.sleep~int)
resu[i] <- summary(mod)[[4]][2,4]*sign(summary(mod)[[4]][2,1])
}
fpca(Paradoxical.sleep~Body.weight+Brain.weight+Slow.wave.sleep+Maximum.life.span+
Gestation.time+Predation+Sleep.exposure+Danger,
data=sleep,pvalues=resu)
## A representation with p values
## When input="Cor" or pvalues="Yes" partial is turned to "No"
mod <- lm(sleep$Paradoxical.sleep~sleep$Body.weight+sleep$Brain.weight+
sleep$Slow.wave.sleep+sleep$Maximum.life.span+sleep$Gestation.time+
sleep$Predation+sleep$Sleep.exposure+sleep$Danger)
resu <- summary(mod)[[4]][2:9,4]*sign(summary(mod)[[4]][2:9,1])
fpca(Paradoxical.sleep~Body.weight+Brain.weight+Slow.wave.sleep+Maximum.life.span+
Gestation.time+Predation+Sleep.exposure+Danger,
data=sleep,pvalues=resu)
## A representation with p values which come from a multiple linear model
## (here results are difficult to interpret)
}
\keyword{multivariate}
|
/man/fpca.Rd
|
no_license
|
cran/psy
|
R
| false
| false
| 4,534
|
rd
|
\name{fpca}
\alias{fpca}
\title{Focused Principal Components Analysis}
\description{
Graphical representation similar to a principal components analysis but adapted to data structured with dependent/independent variables
}
\usage{
fpca(formula=NULL,y=NULL, x=NULL, data, cx=0.75, pvalues="No",
partial="Yes", input="data", contraction="No", sample.size=1)
}
\arguments{
\item{formula}{"model" formula, of the form y ~ x }
\item{y}{column number of the dependent variable}
\item{x}{column numbers of the independent (explanatory) variables}
\item{data}{name of datafile}
\item{cx}{size of the lettering (0.75 by default, 1 for bigger letters, 0.5 for smaller)}
\item{pvalues}{vector of prespecified pvalues (pvalues="No" by default) (see below)}
\item{partial}{partial="Yes" by default, corresponds to the original method (see below)}
\item{input}{input="Cor" for a correlation matrix (input="data" by default)}
\item{contraction}{change the aspect of the diagram, contraction="Yes" is convenient for large data set (contraction="No" by default)}
\item{sample.size}{to be specified if input="Cor"}
}
\details{
This representation is close to a Principal Components Analysis (PCA).
Contrary to PCA, correlations between the dependent variable and the other variables are represented faithfully. The relationships between non dependent variables are interpreted like in a PCA: correlated variables are close or diametrically opposite (for negative correlations), independent variables make a right angle with the origin.
The focus on the dependent variable leads formally to a partialisation of the correlations between the non dependent variables by the dependent variable (see reference). To avoid this partialisation, the option partial="No" can be used.
It may be interesting to represent graphically the strength of association between the dependent variable and the other variables using p values coming from a model. A vector of pvalue may be specified in this case.
}
\value{
A plot (q plots in fact).
}
\references{Falissard B, Focused Principal Components Analysis: looking at a correlation matrix with a particular interest in a given variable. Journal of Computational and Graphical Statistics (1999), 8(4): 906-912.}
\author{Bruno Falissard, Bill Morphey, Adeline Abbe}
\examples{
data(sleep)
fpca(Paradoxical.sleep~Body.weight+Brain.weight+Slow.wave.sleep+Maximum.life.span+
Gestation.time+Predation+Sleep.exposure+Danger,data=sleep)
fpca(y="Paradoxical.sleep",x=c("Body.weight","Brain.weight","Slow.wave.sleep",
"Maximum.life.span","Gestation.time","Predation","Sleep.exposure","Danger"),data=sleep)
## focused PCA of the duration of paradoxical sleep (dreams, 5th column)
## against constitutional variables in mammals (columns 2, 3, 4, 7, 8, 9, 10, 11).
## Variables inside the red cercle are significantly correlated
## to the dependent variable with p<0.05.
## Green variables are positively correlated to the dependent variable,
## yellow variables are negatively correlated.
## There are three clear clusters of independent variables.
corsleep <- as.data.frame(cor(sleep[,2:11],use="pairwise.complete.obs"))
fpca(Paradoxical.sleep~Body.weight+Brain.weight+Slow.wave.sleep+Maximum.life.span+
Gestation.time+Predation+Sleep.exposure+Danger,
data=corsleep,input="Cor",sample.size=60)
## when missing data are numerous, the representation of a pairwise correlation
## matrix may be preferred (even if mathematical properties are not so good...)
numer <- c(2:4,7:11)
l <- length(numer)
resu <- vector(length=l)
for(i in 1:l)
{
int <- sleep[,numer[i]]
mod <- lm(sleep$Paradoxical.sleep~int)
resu[i] <- summary(mod)[[4]][2,4]*sign(summary(mod)[[4]][2,1])
}
fpca(Paradoxical.sleep~Body.weight+Brain.weight+Slow.wave.sleep+Maximum.life.span+
Gestation.time+Predation+Sleep.exposure+Danger,
data=sleep,pvalues=resu)
## A representation with p values
## When input="Cor" or pvalues="Yes" partial is turned to "No"
mod <- lm(sleep$Paradoxical.sleep~sleep$Body.weight+sleep$Brain.weight+
sleep$Slow.wave.sleep+sleep$Maximum.life.span+sleep$Gestation.time+
sleep$Predation+sleep$Sleep.exposure+sleep$Danger)
resu <- summary(mod)[[4]][2:9,4]*sign(summary(mod)[[4]][2:9,1])
fpca(Paradoxical.sleep~Body.weight+Brain.weight+Slow.wave.sleep+Maximum.life.span+
Gestation.time+Predation+Sleep.exposure+Danger,
data=sleep,pvalues=resu)
## A representation with p values which come from a multiple linear model
## (here results are difficult to interpret)
}
\keyword{multivariate}
|
library(naniar)
### Name: miss_var_which
### Title: Which variables contain missing values?
### Aliases: miss_var_which
### ** Examples
miss_var_which(airquality)
miss_var_which(iris)
|
/data/genthat_extracted_code/naniar/examples/miss_var_which.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 193
|
r
|
library(naniar)
### Name: miss_var_which
### Title: Which variables contain missing values?
### Aliases: miss_var_which
### ** Examples
miss_var_which(airquality)
miss_var_which(iris)
|
library(shinyShortcut)
### Name: shinyShortcut
### Title: Create Shiny App Shortcut
### Aliases: shinyShortcut
### ** Examples
shinyShortcut()
|
/data/genthat_extracted_code/shinyShortcut/examples/shinyShortcut.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 150
|
r
|
library(shinyShortcut)
### Name: shinyShortcut
### Title: Create Shiny App Shortcut
### Aliases: shinyShortcut
### ** Examples
shinyShortcut()
|
## File Name: mlnormal_update_V_R.R
## File Version: 0.28
##############################################
# update matrix V and its inverse
mlnormal_update_V_R <- function( Z_index, G, theta,
Z_list, use_ginverse, variance_shortcut, freq_id,
do_compute, rcpp_args){
dimZ <- dim( Z_index )
Z2 <- dimZ[2]
V_list <- as.list(1:G)
V1_list <- V_list
dimZ <- dim( Z_index )
Z2 <- dimZ[2]
do_computation <- TRUE
for (gg in 1:G){
# gg <- 1
# compute V for group gg
if ( do_compute[gg] ){
Z_index_gg <- Z_index[gg,,,drop=FALSE]
Z_list_gg <- Z_list[[gg]]
V_gg <- 0*Z_list_gg[[1]]
for (pp in 1:Z2){
# pp <- 1
# theta^q
a1 <- prod( theta^( Z_index_gg[1,pp,] ) )
V_gg <- V_gg + a1 * Z_list_gg[[pp]]
}
## use generalized inverse instead of inverse if
## solve does not work in case of singularity
if ( ! use_ginverse ){
V_gg1 <- solve( V_gg )
} else {
V_gg1 <- sirt::ginverse_sym( V_gg )
}
} # end do computation
V_list[[gg]] <- V_gg
V1_list[[gg]] <- V_gg1
}
#--- output
res <- list("V_list"=V_list, "V1_list"=V1_list,
"rcpp_args"=rcpp_args )
return(res)
}
######################################################################
|
/LAM/R/mlnormal_update_V_R.R
|
no_license
|
akhikolla/TestedPackages-NoIssues
|
R
| false
| false
| 1,517
|
r
|
## File Name: mlnormal_update_V_R.R
## File Version: 0.28
##############################################
# update matrix V and its inverse
mlnormal_update_V_R <- function( Z_index, G, theta,
Z_list, use_ginverse, variance_shortcut, freq_id,
do_compute, rcpp_args){
dimZ <- dim( Z_index )
Z2 <- dimZ[2]
V_list <- as.list(1:G)
V1_list <- V_list
dimZ <- dim( Z_index )
Z2 <- dimZ[2]
do_computation <- TRUE
for (gg in 1:G){
# gg <- 1
# compute V for group gg
if ( do_compute[gg] ){
Z_index_gg <- Z_index[gg,,,drop=FALSE]
Z_list_gg <- Z_list[[gg]]
V_gg <- 0*Z_list_gg[[1]]
for (pp in 1:Z2){
# pp <- 1
# theta^q
a1 <- prod( theta^( Z_index_gg[1,pp,] ) )
V_gg <- V_gg + a1 * Z_list_gg[[pp]]
}
## use generalized inverse instead of inverse if
## solve does not work in case of singularity
if ( ! use_ginverse ){
V_gg1 <- solve( V_gg )
} else {
V_gg1 <- sirt::ginverse_sym( V_gg )
}
} # end do computation
V_list[[gg]] <- V_gg
V1_list[[gg]] <- V_gg1
}
#--- output
res <- list("V_list"=V_list, "V1_list"=V1_list,
"rcpp_args"=rcpp_args )
return(res)
}
######################################################################
|
# 程序名称:土壤肥力综合评价初步研究 算法
# 版本:V3.0,2017.9.5修订
# 作者:Guoqiang Li
# E-Mail: agri521#gmail.com
# 说明:算法摘自“土壤肥力综合评价初步研究”,浙江大学学报,1999,25(4):378-382
## 隶属函数定义
fun_Membership <- function(x,xmin,xmax){
result <- rep(0,length(x))
for( i in 1:length(x)){
if ( x[i] < xmax && x[i] >=xmin ){
result[i] <- 0.9*(x[i] - xmin)/(xmax - xmin)+0.1
}
else if(x[i] >= xmax){
result[i] <- 1
}
else if(x[i] < xmin){
result[i] <- 0.1
}
}
return(result)
}
# 单项肥力权重确定
fun_Weight <- function(dataForCor){
mydata.cor <- dataForCor
# 相关系数
m.cor <- cor(mydata.cor)
## m.cor是矩阵
## 相关系数平均数
cor.sum <- apply(m.cor,1,sum)
cor.mean <- (cor.sum-1)/(nrow(m.cor)-1)
# 权重
## 数据转置后,转换为data.frame
indexWeight <- as.data.frame(t(cor.mean/sum(cor.mean)*100))
return(indexWeight)
}
|
/UserDefinedFunction.R
|
no_license
|
agri521/SoilFertilityEvaluation
|
R
| false
| false
| 1,011
|
r
|
# 程序名称:土壤肥力综合评价初步研究 算法
# 版本:V3.0,2017.9.5修订
# 作者:Guoqiang Li
# E-Mail: agri521#gmail.com
# 说明:算法摘自“土壤肥力综合评价初步研究”,浙江大学学报,1999,25(4):378-382
## 隶属函数定义
fun_Membership <- function(x,xmin,xmax){
result <- rep(0,length(x))
for( i in 1:length(x)){
if ( x[i] < xmax && x[i] >=xmin ){
result[i] <- 0.9*(x[i] - xmin)/(xmax - xmin)+0.1
}
else if(x[i] >= xmax){
result[i] <- 1
}
else if(x[i] < xmin){
result[i] <- 0.1
}
}
return(result)
}
# 单项肥力权重确定
fun_Weight <- function(dataForCor){
mydata.cor <- dataForCor
# 相关系数
m.cor <- cor(mydata.cor)
## m.cor是矩阵
## 相关系数平均数
cor.sum <- apply(m.cor,1,sum)
cor.mean <- (cor.sum-1)/(nrow(m.cor)-1)
# 权重
## 数据转置后,转换为data.frame
indexWeight <- as.data.frame(t(cor.mean/sum(cor.mean)*100))
return(indexWeight)
}
|
`clogistLoglike` <-
function(n,m,x,beta){
M<-sum(m)
N<-sum(n)
if (M==0) return(0)
else if (M==N) return(0)
x<-as.matrix(x)
eta<- x %*% beta
U<-exp(eta)
if (M==1) return(sum(eta*m) - log(sum(U*n)) )
if (M>N/2){
## for efficiency, keep loop part of calculation to minimum
## by switching m and n-m, beta and -beta
m<-n-m
M<-N-M
U<-1/U
eta<- -eta
}
if (M==1) return(sum(eta*m) - log(sum(U*n)) )
B<-rep(1,N-M+1)
u<-rep(NA,N)
count<-1
for (a in 1:length(n)){
u[count:(count+n[a]-1)]<-U[a]
count<-count+n[a]
}
## The last 2 lines of this function, may be written more
## clearly (i.e., more like in Gail, et al) BUT LESS EFFICIENTLY as:
#B<-matrix(0,M+1,N+1)
#B[1,]<-1
#for (i in 1:M){
#for (j in i:(N-M+i)){
#B[i+1,j+1]<- B[i+1,j]+u[j]*B[i,j]
#}
#}
#sum(eta*m) - log(B[M+1,N+1])
for (i in 1:(M-1)) B<- cumsum(B*u[i:(N-M+i)])
sum(eta*m) - log(sum(B*u[M:N]))
}
|
/R/clogistLoglike.R
|
no_license
|
cran/saws
|
R
| false
| false
| 1,050
|
r
|
`clogistLoglike` <-
function(n,m,x,beta){
M<-sum(m)
N<-sum(n)
if (M==0) return(0)
else if (M==N) return(0)
x<-as.matrix(x)
eta<- x %*% beta
U<-exp(eta)
if (M==1) return(sum(eta*m) - log(sum(U*n)) )
if (M>N/2){
## for efficiency, keep loop part of calculation to minimum
## by switching m and n-m, beta and -beta
m<-n-m
M<-N-M
U<-1/U
eta<- -eta
}
if (M==1) return(sum(eta*m) - log(sum(U*n)) )
B<-rep(1,N-M+1)
u<-rep(NA,N)
count<-1
for (a in 1:length(n)){
u[count:(count+n[a]-1)]<-U[a]
count<-count+n[a]
}
## The last 2 lines of this function, may be written more
## clearly (i.e., more like in Gail, et al) BUT LESS EFFICIENTLY as:
#B<-matrix(0,M+1,N+1)
#B[1,]<-1
#for (i in 1:M){
#for (j in i:(N-M+i)){
#B[i+1,j+1]<- B[i+1,j]+u[j]*B[i,j]
#}
#}
#sum(eta*m) - log(B[M+1,N+1])
for (i in 1:(M-1)) B<- cumsum(B*u[i:(N-M+i)])
sum(eta*m) - log(sum(B*u[M:N]))
}
|
as_df_btprob <- function(m) {
# convert to matrix
if (!is.matrix(m)) m <- as.matrix(m)
m[lower.tri(m, diag = TRUE)] <- NA
# make the data frame
out <- dplyr::as_data_frame(as.data.frame.table(m, useNA = "no", stringsAsFactors = FALSE))
out <- dplyr::filter(out, !is.na(Freq))
out <- dplyr::rename(out, prob1wins = Freq)
out <- dplyr::mutate(out, prob2wins = 1 - as.numeric(prob1wins))
out
}
#' Calculates Bradley-Terry probabilities
#'
#' Calculates the Bradley-Terry probabilities of each item in a fully-connected component of the comparison graph, \eqn{G_W}, winning against every other item in that component (see Details).
#'
#' Consider a set of \eqn{K} items. Let the items be nodes in a graph and let there be a directed edge \eqn{(i, j)} when \eqn{i} has won against \eqn{j} at least once. We call this the comparison graph of the data, and denote it by \eqn{G_W}. Assuming that \eqn{G_W} is fully connected, the Bradley-Terry model states that the probability that item \eqn{i} beats item \eqn{j} is
#' \deqn{p_{ij} = \frac{\pi_i}{\pi_i + \pi_j},}
#' where \eqn{\pi_i} and \eqn{\pi_j} are positive-valued parameters representing the skills of items \eqn{i} and \eqn{j}, for \eqn{1 \le i, j, \le K}. The function \code{\link{btfit}} can be used to find the strength parameter \eqn{\pi}. It produces a \code{"btfit"} object that can then be passed to \code{btprob} to obtain the Bradley-Terry probabilities \eqn{p_{ij}}.
#'
#' If \eqn{G_W} is not fully connected, then a penalised strength parameter can be obtained using the method of Caron and Doucet (2012) (see \code{\link{btfit}}, with \code{a > 1}), which allows for a Bradley-Terry probability of any of the K items beating any of the others. Alternatively, the MLE can be found for each fully connected component of \eqn{G_W} (see \code{\link{btfit}}, with \code{a = 1}), and the probability of each item in each component beating any other item in that component can be found.
#'
#' @param object An object of class "btfit", typically the result \code{ob} of \code{ob <- btfit(..)}. See \code{\link{btfit}}.
#' @param as_df Logical scalar, determining class of output. If \code{TRUE}, the function returns a data frame. If \code{FALSE} (the default), the function returns a matrix (or list of matrices). Note that setting \code{as_df = TRUE} can have a significant computational cost when any of the components have a large number of items.
#'@param subset A condition for selecting one or more subsets of the components. This can either be a character vector of names of the components (i.e. a subset of \code{names(object$pi)}), a single predicate function (that takes a vector of \code{object$pi} as its argument), or a logical vector of the same length as the number of components, (i.e. \code{length(object$pi)}).
#' @return If \code{as_df = FALSE}, returns a matrix where the \eqn{i,j}-th element is the Bradley-Terry probability \eqn{p_{ij}}, or, if the comparison graph, \eqn{G_W}, is not fully connected and \code{\link{btfit}} has been run with \code{a = 1}, a list of such matrices for each fully-connected component of \eqn{G_W}. If \code{as_df = TRUE}, returns a five-column data frame, where the first column is the component that the two items are in, the second column is \code{item1}, the third column is \code{item2}, the fourth column is the Bradley-Terry probability that item 1 beats item 2 and the fifth column is the Bradley-Terry probability that item 2 beats item 1. If the original \code{btdata$wins} matrix has named dimnames, these will be the \code{colnames} for columns one and two. See Details.
#' @references Bradley, R. A. and Terry, M. E. (1952). Rank analysis of incomplete block designs: 1. The method of paired comparisons. \emph{Biometrika}, \strong{39}(3/4), 324-345.
#' @references Caron, F. and Doucet, A. (2012). Efficient Bayesian Inference for Generalized Bradley-Terry Models. \emph{Journal of Computational and Graphical Statistics}, \strong{21}(1), 174-196.
#' @seealso \code{\link{btfit}}, \code{\link{btdata}}
#' @examples
#' citations_btdata <- btdata(BradleyTerryScalable::citations)
#' fit1 <- btfit(citations_btdata, 1)
#' btprob(fit1)
#' btprob(fit1, as_df = TRUE)
#' toy_df_4col <- codes_to_counts(BradleyTerryScalable::toy_data, c("W1", "W2", "D"))
#' toy_btdata <- btdata(toy_df_4col)
#' fit2a <- btfit(toy_btdata, 1)
#' btprob(fit2a)
#' btprob(fit2a, as_df = TRUE)
#' btprob(fit2a, subset = function(x) "Amy" %in% names(x))
#' fit2b <- btfit(toy_btdata, 1.1)
#' btprob(fit2b, as_df = TRUE)
#' @author Ella Kaye
#' @export
btprob <- function(object, subset = NULL, as_df = FALSE) {
if (!inherits(object, "btfit")) stop("Object should be a 'btfit' object")
pi <- object$pi
# check and get subset
if (!is.null(subset)) {
pi <- subset_by_pi(pi, subset)
}
components <- purrr::map(pi, names)
# set up names of dimnames
names_dimnames <- object$names_dimnames
names_dimnames_list <- list(names_dimnames)
# calculate the probabilities, by component
p <- purrr::map(pi, btprob_vec)
p <- purrr::map2(p, components, name_matrix_function)
p <- purrr::map2(p, names_dimnames_list, name_dimnames_function)
# convert to data frame, if requested
if (as_df) {
comp_names <- names(pi)
p <- purrr::map(p, as_df_btprob)
reps <- purrr::map_int(p, nrow)
p <- purrr::map(p, df_col_rename_func, names_dimnames)
p <- dplyr::bind_rows(p)
comps_for_df <- purrr::map2(comp_names, reps, ~rep(.x, each = .y))
comps_for_df <- unlist(comps_for_df)
p <- dplyr::mutate(p, component = comps_for_df)
# hack to avoid CRAN note
component <- NULL
p <- dplyr::select(p, component, 1:4)
}
if (length(pi) == 1 & !as_df) {
if (names(pi) == "full_dataset") {
p <- p[[1]]
}
}
p
}
|
/R/btprob.R
|
no_license
|
cran/BradleyTerryScalable
|
R
| false
| false
| 5,861
|
r
|
as_df_btprob <- function(m) {
# convert to matrix
if (!is.matrix(m)) m <- as.matrix(m)
m[lower.tri(m, diag = TRUE)] <- NA
# make the data frame
out <- dplyr::as_data_frame(as.data.frame.table(m, useNA = "no", stringsAsFactors = FALSE))
out <- dplyr::filter(out, !is.na(Freq))
out <- dplyr::rename(out, prob1wins = Freq)
out <- dplyr::mutate(out, prob2wins = 1 - as.numeric(prob1wins))
out
}
#' Calculates Bradley-Terry probabilities
#'
#' Calculates the Bradley-Terry probabilities of each item in a fully-connected component of the comparison graph, \eqn{G_W}, winning against every other item in that component (see Details).
#'
#' Consider a set of \eqn{K} items. Let the items be nodes in a graph and let there be a directed edge \eqn{(i, j)} when \eqn{i} has won against \eqn{j} at least once. We call this the comparison graph of the data, and denote it by \eqn{G_W}. Assuming that \eqn{G_W} is fully connected, the Bradley-Terry model states that the probability that item \eqn{i} beats item \eqn{j} is
#' \deqn{p_{ij} = \frac{\pi_i}{\pi_i + \pi_j},}
#' where \eqn{\pi_i} and \eqn{\pi_j} are positive-valued parameters representing the skills of items \eqn{i} and \eqn{j}, for \eqn{1 \le i, j, \le K}. The function \code{\link{btfit}} can be used to find the strength parameter \eqn{\pi}. It produces a \code{"btfit"} object that can then be passed to \code{btprob} to obtain the Bradley-Terry probabilities \eqn{p_{ij}}.
#'
#' If \eqn{G_W} is not fully connected, then a penalised strength parameter can be obtained using the method of Caron and Doucet (2012) (see \code{\link{btfit}}, with \code{a > 1}), which allows for a Bradley-Terry probability of any of the K items beating any of the others. Alternatively, the MLE can be found for each fully connected component of \eqn{G_W} (see \code{\link{btfit}}, with \code{a = 1}), and the probability of each item in each component beating any other item in that component can be found.
#'
#' @param object An object of class "btfit", typically the result \code{ob} of \code{ob <- btfit(..)}. See \code{\link{btfit}}.
#' @param as_df Logical scalar, determining class of output. If \code{TRUE}, the function returns a data frame. If \code{FALSE} (the default), the function returns a matrix (or list of matrices). Note that setting \code{as_df = TRUE} can have a significant computational cost when any of the components have a large number of items.
#'@param subset A condition for selecting one or more subsets of the components. This can either be a character vector of names of the components (i.e. a subset of \code{names(object$pi)}), a single predicate function (that takes a vector of \code{object$pi} as its argument), or a logical vector of the same length as the number of components, (i.e. \code{length(object$pi)}).
#' @return If \code{as_df = FALSE}, returns a matrix where the \eqn{i,j}-th element is the Bradley-Terry probability \eqn{p_{ij}}, or, if the comparison graph, \eqn{G_W}, is not fully connected and \code{\link{btfit}} has been run with \code{a = 1}, a list of such matrices for each fully-connected component of \eqn{G_W}. If \code{as_df = TRUE}, returns a five-column data frame, where the first column is the component that the two items are in, the second column is \code{item1}, the third column is \code{item2}, the fourth column is the Bradley-Terry probability that item 1 beats item 2 and the fifth column is the Bradley-Terry probability that item 2 beats item 1. If the original \code{btdata$wins} matrix has named dimnames, these will be the \code{colnames} for columns one and two. See Details.
#' @references Bradley, R. A. and Terry, M. E. (1952). Rank analysis of incomplete block designs: 1. The method of paired comparisons. \emph{Biometrika}, \strong{39}(3/4), 324-345.
#' @references Caron, F. and Doucet, A. (2012). Efficient Bayesian Inference for Generalized Bradley-Terry Models. \emph{Journal of Computational and Graphical Statistics}, \strong{21}(1), 174-196.
#' @seealso \code{\link{btfit}}, \code{\link{btdata}}
#' @examples
#' citations_btdata <- btdata(BradleyTerryScalable::citations)
#' fit1 <- btfit(citations_btdata, 1)
#' btprob(fit1)
#' btprob(fit1, as_df = TRUE)
#' toy_df_4col <- codes_to_counts(BradleyTerryScalable::toy_data, c("W1", "W2", "D"))
#' toy_btdata <- btdata(toy_df_4col)
#' fit2a <- btfit(toy_btdata, 1)
#' btprob(fit2a)
#' btprob(fit2a, as_df = TRUE)
#' btprob(fit2a, subset = function(x) "Amy" %in% names(x))
#' fit2b <- btfit(toy_btdata, 1.1)
#' btprob(fit2b, as_df = TRUE)
#' @author Ella Kaye
#' @export
btprob <- function(object, subset = NULL, as_df = FALSE) {
if (!inherits(object, "btfit")) stop("Object should be a 'btfit' object")
pi <- object$pi
# check and get subset
if (!is.null(subset)) {
pi <- subset_by_pi(pi, subset)
}
components <- purrr::map(pi, names)
# set up names of dimnames
names_dimnames <- object$names_dimnames
names_dimnames_list <- list(names_dimnames)
# calculate the probabilities, by component
p <- purrr::map(pi, btprob_vec)
p <- purrr::map2(p, components, name_matrix_function)
p <- purrr::map2(p, names_dimnames_list, name_dimnames_function)
# convert to data frame, if requested
if (as_df) {
comp_names <- names(pi)
p <- purrr::map(p, as_df_btprob)
reps <- purrr::map_int(p, nrow)
p <- purrr::map(p, df_col_rename_func, names_dimnames)
p <- dplyr::bind_rows(p)
comps_for_df <- purrr::map2(comp_names, reps, ~rep(.x, each = .y))
comps_for_df <- unlist(comps_for_df)
p <- dplyr::mutate(p, component = comps_for_df)
# hack to avoid CRAN note
component <- NULL
p <- dplyr::select(p, component, 1:4)
}
if (length(pi) == 1 & !as_df) {
if (names(pi) == "full_dataset") {
p <- p[[1]]
}
}
p
}
|
#' Calculates the fence and the loop of a gemplot (i.e. the outer gemstone).
#'
#' The fence inflates the the bag relative to the depth median by the
#' factor inflation. Data points outside the bag and inside the fence
#' the loop or outer gemstone are flagged as outliers. Data points
#' outside the fence are marked as outliers. In the case of a
#' 3-dimensional data set, the loop can be visualized by an outer
#' gemstone around the inner gemstone or bag.
#' @title Calculates the fence and the loop
#' @param D Data set with rows representing the individuals and
#' columns representing the features. In the case of three
#' dimensions, the colnames of D must be c("x", "y", "z").
#' @param B List containing the information about the coordinates of
#' the bag and the convex hull that forms the bag (determined by
#' \code{\link{bag}}).
#' @param inflation A numeric value > 0 that specifies the inflation
#' factor of the bag relative to the median (default = 3).
#' @param dm The coordinates of the depth median as produced by
#' \code{\link{depmed}}.
#' @return A list containing the following elements:
#' \describe{
#' \item{\emph{coords.loop}}{Coordinates of the data points that are inside the convex hull around the loop.}
#' \item{\emph{hull.loop}}{A data matrix that contains the indices of the margin data points of the loop that cover the convex hull by triangles. Each row represnts one triangle. The indices correspond to the rows of coords.loop.}
#' \item{\emph{coords.fence}}{Coordinates of the grid points that are inside the fence but outside the bag.}
#' \item{\emph{hull.fence}}{A data matrix that contains the indices of the margin grid points of the fence that cover the convex hull around the fence by triangles. Each row represnts one triangle. The indices correspond to the rows of coords.fence.}
#' \item{\emph{outliers}}{A vector of length equal to the sample size. Data points that are inside the fence are labelled by 0 and values outside the fence (i.e. outliers) are labelled by 1.}
#' }
#'
#' @references
#' Rousseeuw, P. J., Ruts, I., & Tukey, J. W. (1999). The bagplot: a bivariate boxplot. \emph{The American Statistician}, \strong{53(4)}, 382-387. \doi{10.1080/00031305.1999.10474494}
#'
#' Kruppa, J., & Jung, K. (2017). Automated multigroup outlier identification in molecular high-throughput data using bagplots and gemplots. \emph{BMC bioinformatics}, \strong{18(1)}, 1-10. \url{https://link.springer.com/article/10.1186/s12859-017-1645-5}
#' @author Jochen Kruppa, Klaus Jung
#' @importFrom rgl material3d bg3d points3d text3d spheres3d axes3d
#' @export
#' @examples
#' ## Attention: calculation is currently time-consuming.
#' ## Remove #-Symbols to run examples
#'
#' ## Two 3-dimensional example data sets D1 and D2
#'# n <- 200
#'# x1 <- rnorm(n, 0, 1)
#'# y1 <- rnorm(n, 0, 1)
#'# z1 <- rnorm(n, 0, 1)
#'# D1 <- data.frame(cbind(x1, y1, z1))
#'# x2 <- rnorm(n, 1, 1)
#'# y2 <- rnorm(n, 1, 1)
#'# z2 <- rnorm(n, 1, 1)
#'# D2 <- data.frame(cbind(x2, y2, z2))
#'# colnames(D1) <- c("x", "y", "z")
#'# colnames(D2) <- c("x", "y", "z")
#'
#' ## Placing outliers in D1 and D2
#'# D1[17,] = c(4, 5, 6)
#'# D2[99,] = -c(3, 4, 5)
#'
#' ## Grid size and graphic parameters
#'# grid.size <- 20
#'# red <- rgb(200, 100, 100, alpha = 100, maxColorValue = 255)
#'# blue <- rgb(100, 100, 200, alpha = 100, maxColorValue = 255)
#'# yel <- rgb(255, 255, 102, alpha = 100, maxColorValue = 255)
#'# white <- rgb(255, 255, 255, alpha = 100, maxColorValue = 255)
#'# require(rgl)
#'# material3d(color=c(red, blue, yel, white),
#'# alpha=c(0.5, 0.5, 0.5, 0.5), smooth=FALSE, specular="black")
#'
#' ## Calucation and visualization of gemplot for D1
#'# G <- gridfun(D1, grid.size=20)
#'# G$H <- hldepth(D1, G, verbose=TRUE)
#'# dm <- depmed(G)
#'# B <- bag(D1, G)
#'# L <- loop(D1, B, dm=dm)
#'# bg3d(color = "gray39" )
#'# points3d(D1[L$outliers==0,1], D1[L$outliers==0,2], D1[L$outliers==0,3], col="green")
#'# text3d(D1[L$outliers==1,1], D1[L$outliers==1,2], D1[L$outliers==1,3],
#'# as.character(which(L$outliers==1)), col=yel)
#'# spheres3d(dm[1], dm[2], dm[3], col=yel, radius=0.1)
#'# material3d(1,alpha=0.4)
#'# gem(B$coords, B$hull, red)
#'# gem(L$coords.loop, L$hull.loop, red)
#'# axes3d(col="white")
#'
#' ## Calucation and visualization of gemplot for D2
#'# G <- gridfun(D2, grid.size=20)
#'# G$H <- hldepth(D2, G, verbose=TRUE)
#'# dm <- depmed(G)
#'# B <- bag(D2, G)
#'# L <- loop(D2, B, dm=dm)
#'# points3d(D2[L$outliers==0,1], D2[L$outliers==0,2], D2[L$outliers==0,3], col="green")
#'# text3d(D2[L$outliers==1,1], D2[L$outliers==1,2], D2[L$outliers==1,3],
#'# as.character(which(L$outliers==1)), col=yel)
#'# spheres3d(dm[1], dm[2], dm[3], col=yel, radius=0.1)
#'# gem(B$coords, B$hull, blue)
#'# gem(L$coords.loop, L$hull.loop, blue)
loop <- function (D, B, inflation = 3, dm)
{
n <- dim(D)[1]
d = dim(D)[2]
if (d==3) dm = matrix(dm, 1, 3)
index.F <- sort(intersect(as.vector(B$hull), as.vector(B$hull)))
FENCE <- B$coords[index.F, ]
MED.MAT <- t(matrix(dm, d, dim(FENCE)[1]))
FENCE <- MED.MAT + inflation * (FENCE - MED.MAT)
colnames(FENCE) <- colnames(D)
convH <- convhulln(FENCE)
outliers <- rep(0, n)
for (i in 1:n) {
Z <- rbind(FENCE, D[i, ])
convH.Z <- convhulln(Z)
if (!is.na(match(dim(FENCE)[1] + 1, convH.Z))) {
outliers[i] <- 1
}
}
LOOP <- D[which(outliers == 0), ]
convH2 <- convhulln(LOOP)
return(list(coords.loop = LOOP, hull.loop = convH2, coords.fence = FENCE,
hull.fence = convH, outliers = outliers))
}
|
/R/loop.R
|
no_license
|
cran/RepeatedHighDim
|
R
| false
| false
| 5,683
|
r
|
#' Calculates the fence and the loop of a gemplot (i.e. the outer gemstone).
#'
#' The fence inflates the the bag relative to the depth median by the
#' factor inflation. Data points outside the bag and inside the fence
#' the loop or outer gemstone are flagged as outliers. Data points
#' outside the fence are marked as outliers. In the case of a
#' 3-dimensional data set, the loop can be visualized by an outer
#' gemstone around the inner gemstone or bag.
#' @title Calculates the fence and the loop
#' @param D Data set with rows representing the individuals and
#' columns representing the features. In the case of three
#' dimensions, the colnames of D must be c("x", "y", "z").
#' @param B List containing the information about the coordinates of
#' the bag and the convex hull that forms the bag (determined by
#' \code{\link{bag}}).
#' @param inflation A numeric value > 0 that specifies the inflation
#' factor of the bag relative to the median (default = 3).
#' @param dm The coordinates of the depth median as produced by
#' \code{\link{depmed}}.
#' @return A list containing the following elements:
#' \describe{
#' \item{\emph{coords.loop}}{Coordinates of the data points that are inside the convex hull around the loop.}
#' \item{\emph{hull.loop}}{A data matrix that contains the indices of the margin data points of the loop that cover the convex hull by triangles. Each row represnts one triangle. The indices correspond to the rows of coords.loop.}
#' \item{\emph{coords.fence}}{Coordinates of the grid points that are inside the fence but outside the bag.}
#' \item{\emph{hull.fence}}{A data matrix that contains the indices of the margin grid points of the fence that cover the convex hull around the fence by triangles. Each row represnts one triangle. The indices correspond to the rows of coords.fence.}
#' \item{\emph{outliers}}{A vector of length equal to the sample size. Data points that are inside the fence are labelled by 0 and values outside the fence (i.e. outliers) are labelled by 1.}
#' }
#'
#' @references
#' Rousseeuw, P. J., Ruts, I., & Tukey, J. W. (1999). The bagplot: a bivariate boxplot. \emph{The American Statistician}, \strong{53(4)}, 382-387. \doi{10.1080/00031305.1999.10474494}
#'
#' Kruppa, J., & Jung, K. (2017). Automated multigroup outlier identification in molecular high-throughput data using bagplots and gemplots. \emph{BMC bioinformatics}, \strong{18(1)}, 1-10. \url{https://link.springer.com/article/10.1186/s12859-017-1645-5}
#' @author Jochen Kruppa, Klaus Jung
#' @importFrom rgl material3d bg3d points3d text3d spheres3d axes3d
#' @export
#' @examples
#' ## Attention: calculation is currently time-consuming.
#' ## Remove #-Symbols to run examples
#'
#' ## Two 3-dimensional example data sets D1 and D2
#'# n <- 200
#'# x1 <- rnorm(n, 0, 1)
#'# y1 <- rnorm(n, 0, 1)
#'# z1 <- rnorm(n, 0, 1)
#'# D1 <- data.frame(cbind(x1, y1, z1))
#'# x2 <- rnorm(n, 1, 1)
#'# y2 <- rnorm(n, 1, 1)
#'# z2 <- rnorm(n, 1, 1)
#'# D2 <- data.frame(cbind(x2, y2, z2))
#'# colnames(D1) <- c("x", "y", "z")
#'# colnames(D2) <- c("x", "y", "z")
#'
#' ## Placing outliers in D1 and D2
#'# D1[17,] = c(4, 5, 6)
#'# D2[99,] = -c(3, 4, 5)
#'
#' ## Grid size and graphic parameters
#'# grid.size <- 20
#'# red <- rgb(200, 100, 100, alpha = 100, maxColorValue = 255)
#'# blue <- rgb(100, 100, 200, alpha = 100, maxColorValue = 255)
#'# yel <- rgb(255, 255, 102, alpha = 100, maxColorValue = 255)
#'# white <- rgb(255, 255, 255, alpha = 100, maxColorValue = 255)
#'# require(rgl)
#'# material3d(color=c(red, blue, yel, white),
#'# alpha=c(0.5, 0.5, 0.5, 0.5), smooth=FALSE, specular="black")
#'
#' ## Calucation and visualization of gemplot for D1
#'# G <- gridfun(D1, grid.size=20)
#'# G$H <- hldepth(D1, G, verbose=TRUE)
#'# dm <- depmed(G)
#'# B <- bag(D1, G)
#'# L <- loop(D1, B, dm=dm)
#'# bg3d(color = "gray39" )
#'# points3d(D1[L$outliers==0,1], D1[L$outliers==0,2], D1[L$outliers==0,3], col="green")
#'# text3d(D1[L$outliers==1,1], D1[L$outliers==1,2], D1[L$outliers==1,3],
#'# as.character(which(L$outliers==1)), col=yel)
#'# spheres3d(dm[1], dm[2], dm[3], col=yel, radius=0.1)
#'# material3d(1,alpha=0.4)
#'# gem(B$coords, B$hull, red)
#'# gem(L$coords.loop, L$hull.loop, red)
#'# axes3d(col="white")
#'
#' ## Calucation and visualization of gemplot for D2
#'# G <- gridfun(D2, grid.size=20)
#'# G$H <- hldepth(D2, G, verbose=TRUE)
#'# dm <- depmed(G)
#'# B <- bag(D2, G)
#'# L <- loop(D2, B, dm=dm)
#'# points3d(D2[L$outliers==0,1], D2[L$outliers==0,2], D2[L$outliers==0,3], col="green")
#'# text3d(D2[L$outliers==1,1], D2[L$outliers==1,2], D2[L$outliers==1,3],
#'# as.character(which(L$outliers==1)), col=yel)
#'# spheres3d(dm[1], dm[2], dm[3], col=yel, radius=0.1)
#'# gem(B$coords, B$hull, blue)
#'# gem(L$coords.loop, L$hull.loop, blue)
loop <- function (D, B, inflation = 3, dm)
{
n <- dim(D)[1]
d = dim(D)[2]
if (d==3) dm = matrix(dm, 1, 3)
index.F <- sort(intersect(as.vector(B$hull), as.vector(B$hull)))
FENCE <- B$coords[index.F, ]
MED.MAT <- t(matrix(dm, d, dim(FENCE)[1]))
FENCE <- MED.MAT + inflation * (FENCE - MED.MAT)
colnames(FENCE) <- colnames(D)
convH <- convhulln(FENCE)
outliers <- rep(0, n)
for (i in 1:n) {
Z <- rbind(FENCE, D[i, ])
convH.Z <- convhulln(Z)
if (!is.na(match(dim(FENCE)[1] + 1, convH.Z))) {
outliers[i] <- 1
}
}
LOOP <- D[which(outliers == 0), ]
convH2 <- convhulln(LOOP)
return(list(coords.loop = LOOP, hull.loop = convH2, coords.fence = FENCE,
hull.fence = convH, outliers = outliers))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_object.R
\name{get_object}
\alias{get_object}
\title{Get a DataONE object}
\usage{
get_object(data_pid, as = "parsed", ...)
}
\arguments{
\item{data_pid}{(character) The data or metadata object PID}
\item{as}{desired type of output: raw, text or parsed. content attempts to automatically figure out which one is most appropriate, based on the content-type. (based on \code{httr::content()})}
\item{...}{pass arguments to read.csv}
}
\description{
This function download a DataONE data or metadata object into the R environment
}
\examples{
\dontrun{
data <- get_object("urn:uuid:a81f49db-5841-4095-aee2-b0cad7a35cc0")
meta <- get_object("doi:10.18739/A2PC2T79B")
}
}
|
/man/get_object.Rd
|
no_license
|
isteves/dataimport
|
R
| false
| true
| 758
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_object.R
\name{get_object}
\alias{get_object}
\title{Get a DataONE object}
\usage{
get_object(data_pid, as = "parsed", ...)
}
\arguments{
\item{data_pid}{(character) The data or metadata object PID}
\item{as}{desired type of output: raw, text or parsed. content attempts to automatically figure out which one is most appropriate, based on the content-type. (based on \code{httr::content()})}
\item{...}{pass arguments to read.csv}
}
\description{
This function download a DataONE data or metadata object into the R environment
}
\examples{
\dontrun{
data <- get_object("urn:uuid:a81f49db-5841-4095-aee2-b0cad7a35cc0")
meta <- get_object("doi:10.18739/A2PC2T79B")
}
}
|
\name{duo_clustering_all_parameter_settings_v2}
\alias{duo_clustering_all_parameter_settings_v2}
\title{
Hyperparameter values
}
\arguments{
\item{metadata}{Logical, whether only metadata should be returned}
}
\description{
Hyperparameter values for all clustering algorithms and data sets in
v2 of Duo et al (F1000Research 2018)
}
\details{
List of hyperparameter values used for all clustering algorithms and
data sets in v2 of Duò et al (F1000Research 2018).
}
\usage{
duo_clustering_all_parameter_settings_v2(metadata = FALSE)
}
\examples{
duo_clustering_all_parameter_settings_v2()
}
\value{Returns a \code{list} with hyperparameter values for all data sets and
methods.}
\references{
Duò, A., Robinson, M.D., and Soneson, C. (2018).
\emph{A systematic performance evaluation of clustering methods for
single-cell RNA-seq data.}
F1000Research, 7:1141.
}
\keyword{datasets}
|
/man/duo_clustering_all_parameter_settings_v2.Rd
|
no_license
|
chanwkimlab/DuoClustering2018
|
R
| false
| false
| 882
|
rd
|
\name{duo_clustering_all_parameter_settings_v2}
\alias{duo_clustering_all_parameter_settings_v2}
\title{
Hyperparameter values
}
\arguments{
\item{metadata}{Logical, whether only metadata should be returned}
}
\description{
Hyperparameter values for all clustering algorithms and data sets in
v2 of Duo et al (F1000Research 2018)
}
\details{
List of hyperparameter values used for all clustering algorithms and
data sets in v2 of Duò et al (F1000Research 2018).
}
\usage{
duo_clustering_all_parameter_settings_v2(metadata = FALSE)
}
\examples{
duo_clustering_all_parameter_settings_v2()
}
\value{Returns a \code{list} with hyperparameter values for all data sets and
methods.}
\references{
Duò, A., Robinson, M.D., and Soneson, C. (2018).
\emph{A systematic performance evaluation of clustering methods for
single-cell RNA-seq data.}
F1000Research, 7:1141.
}
\keyword{datasets}
|
# ============================
# = Simulate Trophic Cascade =
# ============================
set.seed(2)
# =================
# = Steve's Notes =
# =================
# Treat-and-Halt using the foodweb model, rolling window statistics, and quickest detection
# Foodweb model for simulating transients, adapted from FS6_trans0.r
# This version has continuous reproduction and mortality for piscivores, not pulsed
# Simulation of the full food web for investigating the squeal
# employed in the PLoS paper
# Noise is added to F, H and P
# SRC 12 Nov 2012
# =================================
# = Load Parameters and Functions =
# =================================
simFuns.location <- "~/Documents/School&Work/pinskyPost/edmShift/R/functions/simFuns"
invisible(sapply(paste(simFuns.location, list.files(simFuns.location), sep="/"), source, .GlobalEnv))
# ===========================
# = Load Plotting Functions =
# ===========================
figFuns.location <- "~/Documents/School&Work/pinskyPost/edmShift/R/functions/figFuns"
invisible(sapply(paste(figFuns.location, list.files(figFuns.location), sep="/"), source, .GlobalEnv))
# ==============================
# = Experiment #1: Constant qE =
# ==============================
# Set up options and result array
exp1.steps <- 200 # number of steps for each qE in experiment 1
# exp1.qE <- c(1, 1.2, 1.4, 1.6, 1.7, 1.8, 1.9) # qE values for Exp #1; from Fig S2.1 Carpenter et al. 2008 Eco Lett
exp1.qE <- c(0, 0.7, 1.19, 1.3, 1.5, 1.7, 2.0) # qE values for Exp #1
fw.exp1 <- array(data=NA, dim=c(exp1.steps, 6, length(exp1.qE)), dimnames=list(NULL, c("qE","At","Ft","Jt","Ht","Pt"),NULL))
# Run Experiment 1
for(i in 1:length(exp1.qE)){
fw.exp1[,,i] <- FWsim.wrap(qE=exp1.qE[i], step=exp1.steps, mthd="constant")
}
# ===============================================
# = Experiment #2: Waver far from tipping point =
# ===============================================
# Set up Exp 2
exp2.steps <- 200 # number of steps for each qE
exp2.qE <- c(0.7, 1.19, 0.7, 1.19)
fw.exp2 <- array(data=NA, dim=c(exp2.steps, 6, length(exp2.qE)), dimnames=list(NULL, c("qE","At","Ft","Jt","Ht","Pt"),NULL))
# Run Experiment 2
fw.exp2 <- FWsim.wrap(qE=exp2.qE, step=exp2.steps, mthd="linear")
# ===================================
# = Experiment #3: Gradual Increase =
# ===================================
# Set up Exp 3
exp3.steps <- 300 # number of steps for each qE
exp3.qE <- c(1.18, 1.72)
fw.exp3 <- array(data=NA, dim=c(exp3.steps, 6, 1), dimnames=list(NULL, c("qE","At","Ft","Jt","Ht","Pt"),NULL))
# Run Experiment 3
fw.exp3 <- FWsim.wrap(qE=exp3.qE, step=exp3.steps, mthd="linear")
# ===================================
# = Experiment 4: Waver all over qE =
# ===================================
# Set up Exp 4
exp4.steps <- 400
exp4.qE <- c(rep(c(0.9, 1.2, 1.5, 1.8), each=2)+rep(c(0.1, -0.1),4))
fw.exp4 <- array(data=NA, dim=c(exp4.steps, 6, 1), dimnames=list(NULL, c("qE","At","Ft","Jt","Ht","Pt"),NULL))
# Run Experiment 4
fw.exp4 <- FWsim.wrap(qE=exp4.qE, step=exp4.steps, mthd="linear")
save(
exp1.steps, exp1.qE, fw.exp1,
exp2.steps, exp2.qE, fw.exp2,
exp3.steps, exp3.qE, fw.exp3,
exp4.steps, exp4.qE, fw.exp4,
file="/Users/Battrd/Documents/School&Work/pinskyPost/edmShift/results/FWsim/FWsim.RData"
)
|
/R/simulate/FWsim.R
|
no_license
|
rBatt/edmShift
|
R
| false
| false
| 3,272
|
r
|
# ============================
# = Simulate Trophic Cascade =
# ============================
set.seed(2)
# =================
# = Steve's Notes =
# =================
# Treat-and-Halt using the foodweb model, rolling window statistics, and quickest detection
# Foodweb model for simulating transients, adapted from FS6_trans0.r
# This version has continuous reproduction and mortality for piscivores, not pulsed
# Simulation of the full food web for investigating the squeal
# employed in the PLoS paper
# Noise is added to F, H and P
# SRC 12 Nov 2012
# =================================
# = Load Parameters and Functions =
# =================================
simFuns.location <- "~/Documents/School&Work/pinskyPost/edmShift/R/functions/simFuns"
invisible(sapply(paste(simFuns.location, list.files(simFuns.location), sep="/"), source, .GlobalEnv))
# ===========================
# = Load Plotting Functions =
# ===========================
figFuns.location <- "~/Documents/School&Work/pinskyPost/edmShift/R/functions/figFuns"
invisible(sapply(paste(figFuns.location, list.files(figFuns.location), sep="/"), source, .GlobalEnv))
# ==============================
# = Experiment #1: Constant qE =
# ==============================
# Set up options and result array
exp1.steps <- 200 # number of steps for each qE in experiment 1
# exp1.qE <- c(1, 1.2, 1.4, 1.6, 1.7, 1.8, 1.9) # qE values for Exp #1; from Fig S2.1 Carpenter et al. 2008 Eco Lett
exp1.qE <- c(0, 0.7, 1.19, 1.3, 1.5, 1.7, 2.0) # qE values for Exp #1
fw.exp1 <- array(data=NA, dim=c(exp1.steps, 6, length(exp1.qE)), dimnames=list(NULL, c("qE","At","Ft","Jt","Ht","Pt"),NULL))
# Run Experiment 1
for(i in 1:length(exp1.qE)){
fw.exp1[,,i] <- FWsim.wrap(qE=exp1.qE[i], step=exp1.steps, mthd="constant")
}
# ===============================================
# = Experiment #2: Waver far from tipping point =
# ===============================================
# Set up Exp 2
exp2.steps <- 200 # number of steps for each qE
exp2.qE <- c(0.7, 1.19, 0.7, 1.19)
fw.exp2 <- array(data=NA, dim=c(exp2.steps, 6, length(exp2.qE)), dimnames=list(NULL, c("qE","At","Ft","Jt","Ht","Pt"),NULL))
# Run Experiment 2
fw.exp2 <- FWsim.wrap(qE=exp2.qE, step=exp2.steps, mthd="linear")
# ===================================
# = Experiment #3: Gradual Increase =
# ===================================
# Set up Exp 3
exp3.steps <- 300 # number of steps for each qE
exp3.qE <- c(1.18, 1.72)
fw.exp3 <- array(data=NA, dim=c(exp3.steps, 6, 1), dimnames=list(NULL, c("qE","At","Ft","Jt","Ht","Pt"),NULL))
# Run Experiment 3
fw.exp3 <- FWsim.wrap(qE=exp3.qE, step=exp3.steps, mthd="linear")
# ===================================
# = Experiment 4: Waver all over qE =
# ===================================
# Set up Exp 4
exp4.steps <- 400
exp4.qE <- c(rep(c(0.9, 1.2, 1.5, 1.8), each=2)+rep(c(0.1, -0.1),4))
fw.exp4 <- array(data=NA, dim=c(exp4.steps, 6, 1), dimnames=list(NULL, c("qE","At","Ft","Jt","Ht","Pt"),NULL))
# Run Experiment 4
fw.exp4 <- FWsim.wrap(qE=exp4.qE, step=exp4.steps, mthd="linear")
save(
exp1.steps, exp1.qE, fw.exp1,
exp2.steps, exp2.qE, fw.exp2,
exp3.steps, exp3.qE, fw.exp3,
exp4.steps, exp4.qE, fw.exp4,
file="/Users/Battrd/Documents/School&Work/pinskyPost/edmShift/results/FWsim/FWsim.RData"
)
|
?qplot
library(ggplot2)
qplot(data=stats, x=Internet.users)
# criando um gráfico de barras bem simples
qplot(data=stats, x = Income.Group, y = Birth.rate)
# nesse caso acrescentei o tamanho dos pontos e também a cor, é importante colocar o I antes para o R entender que vc quer colocar a cor azul e o
# tamanho 3 se não colocar e colocar apenas colour = "blue" fica rosa
qplot(data=stats, x = Income.Group, y = Birth.rate , size = I(3) ,
colour = I("blue"))
# para adicionar um gráfico diferente, acrescentamos o geom e o tipo de gráfico que queremos
qplot(data=stats, x = Income.Group, y = Birth.rate , geom = "boxplot")
|
/R Programming Basic and Intermediate/Introduction to qplot.R
|
no_license
|
guilhermeaugusto9/R-Userful-Scripts
|
R
| false
| false
| 673
|
r
|
?qplot
library(ggplot2)
qplot(data=stats, x=Internet.users)
# criando um gráfico de barras bem simples
qplot(data=stats, x = Income.Group, y = Birth.rate)
# nesse caso acrescentei o tamanho dos pontos e também a cor, é importante colocar o I antes para o R entender que vc quer colocar a cor azul e o
# tamanho 3 se não colocar e colocar apenas colour = "blue" fica rosa
qplot(data=stats, x = Income.Group, y = Birth.rate , size = I(3) ,
colour = I("blue"))
# para adicionar um gráfico diferente, acrescentamos o geom e o tipo de gráfico que queremos
qplot(data=stats, x = Income.Group, y = Birth.rate , geom = "boxplot")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GOF.control.R
\name{get.label}
\alias{get.label}
\title{Labels for known NONMEM variables}
\usage{
get.label(x, trans = NULL)
}
\arguments{
\item{x}{column to get label for}
\item{trans}{transformation}
}
\description{
get.label match known NONMEM variables to the GOF-dictionary and returns the matched label.
Unless trans is NULL, the label is modified to 'f(matched label)'
}
\seealso{
[get.GOF.dictionary()], [set.GOF.dictionary()], and [default.GOF.dictionary()].
}
|
/man/get.label.Rd
|
no_license
|
cran/nonmem2R
|
R
| false
| true
| 571
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GOF.control.R
\name{get.label}
\alias{get.label}
\title{Labels for known NONMEM variables}
\usage{
get.label(x, trans = NULL)
}
\arguments{
\item{x}{column to get label for}
\item{trans}{transformation}
}
\description{
get.label match known NONMEM variables to the GOF-dictionary and returns the matched label.
Unless trans is NULL, the label is modified to 'f(matched label)'
}
\seealso{
[get.GOF.dictionary()], [set.GOF.dictionary()], and [default.GOF.dictionary()].
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\name{example_biotracer_data}
\alias{example_biotracer_data}
\title{Example biotracer data}
\format{A table with 15 rows and 3 columns. Each row is an isotopic sample from one individual.
The columns are:
\describe{
\item{group}{the trophic group the individual belonged to}
\item{d13C}{the d13C measurement made on that individual}
\item{d15N}{the d15N measurement made on that individual}
}}
\description{
This is an artificial and simple biotracer dataset, more specifically stable isotope analyses,
made to illustrate the package on a simple case. All tables whose name start by "example" are describing
different data from the same trophic groups.
}
|
/man/example_biotracer_data.Rd
|
no_license
|
jimjunker1/EcoDiet
|
R
| false
| true
| 748
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\name{example_biotracer_data}
\alias{example_biotracer_data}
\title{Example biotracer data}
\format{A table with 15 rows and 3 columns. Each row is an isotopic sample from one individual.
The columns are:
\describe{
\item{group}{the trophic group the individual belonged to}
\item{d13C}{the d13C measurement made on that individual}
\item{d15N}{the d15N measurement made on that individual}
}}
\description{
This is an artificial and simple biotracer dataset, more specifically stable isotope analyses,
made to illustrate the package on a simple case. All tables whose name start by "example" are describing
different data from the same trophic groups.
}
|
# The code used to clean the data
options(width = 120)
load("Lesson10SU.RData")
library(DT)
datatable(movies100lesson10)
## Your R code used to clean movies100lesson10
library(tibble)
for (i in c(1:dim(movies100lesson10)[1])){
name = unlist(strsplit(as.character(movies100lesson10[i,"Genre"]),"\n \n "))
movies100lesson10[i,name] <- T
}
movies100lesson10[is.na(movies100lesson10)] <- F
movies100lesson10 = movies100lesson10 %>% add_column(ActionAdventureThriller = ifelse((movies100lesson10$Adventure == TRUE | movies100lesson10$Action == TRUE | movies100lesson10$Thriller == TRUE),T,F), .before = "Action")
movies100lesson10 = subset(movies100lesson10,select= -c(Genre,Adventure,Action,Thriller))
movies100lesson10$Running.Time <- as.character(movies100lesson10$Running.Time)
for (i in c(1:dim(movies100lesson10)[1])){
t = unlist(strsplit(movies100lesson10[i,"Running.Time"], " "))
movies100lesson10[i,"Running.Time"] = as.numeric(t[1])*60 + as.numeric(t[3])
}
movies100lesson10$Running.Time <- as.numeric(movies100lesson10$Running.Time)
names(movies100lesson10)[8] = "PctOfTotal"
names(movies100lesson10)[17] = "SciFi"
movies100lesson10$Opening <- as.numeric(gsub('[$,]', '', movies100lesson10$Opening))
movies100lesson10$Gross <- as.numeric(gsub('[$,]', '', movies100lesson10$Gross))
movies100lesson10$intGross <- as.numeric(gsub('[$,]', '', movies100lesson10$intGross))
movies100lesson10$Budget <- as.numeric(gsub('[$,]', '', movies100lesson10$Budget))
movies100lesson10$Max.Th <- as.numeric(gsub('[$,]', '', movies100lesson10$Max.Th))
movies100lesson10$Open.Th <- as.numeric(gsub('[$,]', '', movies100lesson10$Open.Th))
movies100lesson10$PctOfTotal <- as.numeric(gsub('[%$,]', '', movies100lesson10$PctOfTotal))
movies100lesson10$OpenDate = as.Date(movies100lesson10$OpenDate, '%b %d, %Y')
movies100lesson10$CloseDate = as.Date(movies100lesson10$CloseDate, '%b %d, %Y')
movies100lesson10$daysrun = movies100lesson10$CloseDate-movies100lesson10$OpenDate
movies100lesson10 = subset(movies100lesson10,select = c(Rank,Release,Running.Time,mpaa,Gross,Opening,PctOfTotal,Max.Th,Open.Th,Distributor,intGross,Budget,OpenDate,CloseDate,ActionAdventureThriller,Drama,Comedy,SciFi,Family,Horror,Biography,daysrun))
# datatable result
library(DT)
datatable(movies100lesson10) ## modify this please
# summary result
summary(movies100lesson10) ## modify this please
# str result
str(movies100lesson10) ## modify this please
|
/DataCleaning/DataCleaning.R
|
no_license
|
ruiwenhe-10/R_practice
|
R
| false
| false
| 2,481
|
r
|
# The code used to clean the data
options(width = 120)
load("Lesson10SU.RData")
library(DT)
datatable(movies100lesson10)
## Your R code used to clean movies100lesson10
library(tibble)
for (i in c(1:dim(movies100lesson10)[1])){
name = unlist(strsplit(as.character(movies100lesson10[i,"Genre"]),"\n \n "))
movies100lesson10[i,name] <- T
}
movies100lesson10[is.na(movies100lesson10)] <- F
movies100lesson10 = movies100lesson10 %>% add_column(ActionAdventureThriller = ifelse((movies100lesson10$Adventure == TRUE | movies100lesson10$Action == TRUE | movies100lesson10$Thriller == TRUE),T,F), .before = "Action")
movies100lesson10 = subset(movies100lesson10,select= -c(Genre,Adventure,Action,Thriller))
movies100lesson10$Running.Time <- as.character(movies100lesson10$Running.Time)
for (i in c(1:dim(movies100lesson10)[1])){
t = unlist(strsplit(movies100lesson10[i,"Running.Time"], " "))
movies100lesson10[i,"Running.Time"] = as.numeric(t[1])*60 + as.numeric(t[3])
}
movies100lesson10$Running.Time <- as.numeric(movies100lesson10$Running.Time)
names(movies100lesson10)[8] = "PctOfTotal"
names(movies100lesson10)[17] = "SciFi"
movies100lesson10$Opening <- as.numeric(gsub('[$,]', '', movies100lesson10$Opening))
movies100lesson10$Gross <- as.numeric(gsub('[$,]', '', movies100lesson10$Gross))
movies100lesson10$intGross <- as.numeric(gsub('[$,]', '', movies100lesson10$intGross))
movies100lesson10$Budget <- as.numeric(gsub('[$,]', '', movies100lesson10$Budget))
movies100lesson10$Max.Th <- as.numeric(gsub('[$,]', '', movies100lesson10$Max.Th))
movies100lesson10$Open.Th <- as.numeric(gsub('[$,]', '', movies100lesson10$Open.Th))
movies100lesson10$PctOfTotal <- as.numeric(gsub('[%$,]', '', movies100lesson10$PctOfTotal))
movies100lesson10$OpenDate = as.Date(movies100lesson10$OpenDate, '%b %d, %Y')
movies100lesson10$CloseDate = as.Date(movies100lesson10$CloseDate, '%b %d, %Y')
movies100lesson10$daysrun = movies100lesson10$CloseDate-movies100lesson10$OpenDate
movies100lesson10 = subset(movies100lesson10,select = c(Rank,Release,Running.Time,mpaa,Gross,Opening,PctOfTotal,Max.Th,Open.Th,Distributor,intGross,Budget,OpenDate,CloseDate,ActionAdventureThriller,Drama,Comedy,SciFi,Family,Horror,Biography,daysrun))
# datatable result
library(DT)
datatable(movies100lesson10) ## modify this please
# summary result
summary(movies100lesson10) ## modify this please
# str result
str(movies100lesson10) ## modify this please
|
rankall <- function(outcome, num = "best") {
## Read outcome data
data <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
# Keep columns only from Hospital.Name, State, Heart.Attack, Heart.Failure and Pneumonia
data <- data[, c(2, 7, 11, 17, 23)]
## Check that state and outcome are valid
valid_outcomes <- c("heart attack", "heart failure", "pneumonia")
if (!outcome %in% valid_outcomes) {
stop ("invalid outcome")
}
if (class(num) == "character") {
if (!(num == "best" || num == "worst")) {
stop ("invalid rank")
}
}
## For each state, find the hospital of the given rank
## Return a data frame with the hospital names and the (abbreviated) state name
# Remove columns by outcome, only left HospitalName and Deaths by outcome
if (outcome == "heart attack") {
data = data[, c(1, 2, 3)]
} else if (outcome == "heart failure") {
data = data[, c(1, 2, 4)]
} else if (outcome == "pneumonia") {
data = data[, c(1, 2, 5)]
}
names(data)[3] = "DeathRate"
data[, 3] = suppressWarnings(as.numeric(data[, 3]))
# Remove NA rows
data = data[!is.na(data$DeathRate),]
split_data = split(data, data$State)
new = lapply(split_data, function(x, num) {
# Order by DeathRate and HospitalName
x = x[order(x$DeathRate, x$Hospital.Name),]
# Return specified Hospital.Name
if (class(num) == "character") {
if (num == "best") {
return (x$Hospital.Name[1])
}
else if (num == "worst") {
return (x$Hospital.Name[nrow(x)])
}
}
else {
return (x$Hospital.Name[num])
}
}, num)
# Return data frame
return (data.frame(hospital = unlist(new), state = names(new)))
}
|
/r-programming/ProgrammingAssignment3/rankall.R
|
no_license
|
ngoharry19/datasciencecoursera
|
R
| false
| false
| 1,780
|
r
|
rankall <- function(outcome, num = "best") {
## Read outcome data
data <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
# Keep columns only from Hospital.Name, State, Heart.Attack, Heart.Failure and Pneumonia
data <- data[, c(2, 7, 11, 17, 23)]
## Check that state and outcome are valid
valid_outcomes <- c("heart attack", "heart failure", "pneumonia")
if (!outcome %in% valid_outcomes) {
stop ("invalid outcome")
}
if (class(num) == "character") {
if (!(num == "best" || num == "worst")) {
stop ("invalid rank")
}
}
## For each state, find the hospital of the given rank
## Return a data frame with the hospital names and the (abbreviated) state name
# Remove columns by outcome, only left HospitalName and Deaths by outcome
if (outcome == "heart attack") {
data = data[, c(1, 2, 3)]
} else if (outcome == "heart failure") {
data = data[, c(1, 2, 4)]
} else if (outcome == "pneumonia") {
data = data[, c(1, 2, 5)]
}
names(data)[3] = "DeathRate"
data[, 3] = suppressWarnings(as.numeric(data[, 3]))
# Remove NA rows
data = data[!is.na(data$DeathRate),]
split_data = split(data, data$State)
new = lapply(split_data, function(x, num) {
# Order by DeathRate and HospitalName
x = x[order(x$DeathRate, x$Hospital.Name),]
# Return specified Hospital.Name
if (class(num) == "character") {
if (num == "best") {
return (x$Hospital.Name[1])
}
else if (num == "worst") {
return (x$Hospital.Name[nrow(x)])
}
}
else {
return (x$Hospital.Name[num])
}
}, num)
# Return data frame
return (data.frame(hospital = unlist(new), state = names(new)))
}
|
require("stats")
require("tfplot")
Sys.info()
tmp <- tempfile()
z <- ts(matrix(100 + rnorm(200),100,2), start=c(1991,1), frequency=4)
tsWrite(z, file=tmp)
zz <- tsScan(tmp, nseries=2)
file.remove(tmp)
cat("max difference ", max(abs(z - zz)) )
if (max(abs(z - zz)) > 1e-10) stop("file write and read comparison failed.")
#### tfL ####
if ( !all(1 == (ts(1:5) - tfL(ts(1:5)))))
stop("default test of tfL for ts failed.")
if ( !all(1 == (as.ts(1:5) - tfL((1:5)))))
stop("default test of tfL for non-ts vector failed.")
if ( !all(2 == (ts(1:5) - tfL(ts(1:5), p= 2))))
stop("2 period lag test of tfL failed.")
z <- ts(1:10, start=c(1992,1), frequency=4)
if ( !all(1 == (z - tfL(z)))) stop("frequency=4 test of tfL failed.")
z <- ts(matrix(1:10,5,2), start=c(1992,1), frequency=4)
seriesNames(z) <- c("One", "Two")
if ( !all(1 == (z - tfL(z)))) stop("matrix test of tfL failed.")
#### annualizedGrowth ####
fuzz <- 1e-14
if ( !all(fuzz > (100/(1:4) - annualizedGrowth(ts(1:5)))))
stop("default test of annualizedGrowth for ts failed.")
#if ( !all(fuzz > (100/as.ts(1:4) - annualizedGrowth((1:5)))))
# stop("default test of annualizedGrowth for non-ts vector failed.")
z <- ts(1:5, start=c(1992,1), frequency=4)
if ( !all(fuzz > (100*((2:5 / 1:4)^4 -1) - annualizedGrowth(z)))) stop("frequency=4 test of annualizedGrowth failed.")
zz <- matrix(1:10,5,2)
z <- ts(zz, start=c(1992,1), frequency=4)
seriesNames(z) <- c("One", "Two")
if ( !all(fuzz > (100*((zz[2:5,] / zz[1:4,])^4 -1) - annualizedGrowth(z)))) stop("matrix test of annualizedGrowth failed.")
|
/tests/utils.R
|
no_license
|
cran/tfplot
|
R
| false
| false
| 1,614
|
r
|
require("stats")
require("tfplot")
Sys.info()
tmp <- tempfile()
z <- ts(matrix(100 + rnorm(200),100,2), start=c(1991,1), frequency=4)
tsWrite(z, file=tmp)
zz <- tsScan(tmp, nseries=2)
file.remove(tmp)
cat("max difference ", max(abs(z - zz)) )
if (max(abs(z - zz)) > 1e-10) stop("file write and read comparison failed.")
#### tfL ####
if ( !all(1 == (ts(1:5) - tfL(ts(1:5)))))
stop("default test of tfL for ts failed.")
if ( !all(1 == (as.ts(1:5) - tfL((1:5)))))
stop("default test of tfL for non-ts vector failed.")
if ( !all(2 == (ts(1:5) - tfL(ts(1:5), p= 2))))
stop("2 period lag test of tfL failed.")
z <- ts(1:10, start=c(1992,1), frequency=4)
if ( !all(1 == (z - tfL(z)))) stop("frequency=4 test of tfL failed.")
z <- ts(matrix(1:10,5,2), start=c(1992,1), frequency=4)
seriesNames(z) <- c("One", "Two")
if ( !all(1 == (z - tfL(z)))) stop("matrix test of tfL failed.")
#### annualizedGrowth ####
fuzz <- 1e-14
if ( !all(fuzz > (100/(1:4) - annualizedGrowth(ts(1:5)))))
stop("default test of annualizedGrowth for ts failed.")
#if ( !all(fuzz > (100/as.ts(1:4) - annualizedGrowth((1:5)))))
# stop("default test of annualizedGrowth for non-ts vector failed.")
z <- ts(1:5, start=c(1992,1), frequency=4)
if ( !all(fuzz > (100*((2:5 / 1:4)^4 -1) - annualizedGrowth(z)))) stop("frequency=4 test of annualizedGrowth failed.")
zz <- matrix(1:10,5,2)
z <- ts(zz, start=c(1992,1), frequency=4)
seriesNames(z) <- c("One", "Two")
if ( !all(fuzz > (100*((zz[2:5,] / zz[1:4,])^4 -1) - annualizedGrowth(z)))) stop("matrix test of annualizedGrowth failed.")
|
#' Genotype Data
#'
#' The data is a list with a SnpMatrix `genotypes` (2000 rows, 50 columns) and a data frame `map`.
#' It should be used in the \code{magpa} function to test multivariate correlation.
#'
#' \itemize{
#' \item genotypes
#' \item map
#'}
#'
#' @docType data
#' @keywords datasets
#' @name geno
#' @usage data(geno)
#' @format An object of list with a Fromal class \code{'SnpMatrix'} and a data.frame
#' @examples
#' data(geno)
#' snps <- geno$genotypes
NULL
|
/R/geno.R
|
no_license
|
changebio/MAGPA
|
R
| false
| false
| 479
|
r
|
#' Genotype Data
#'
#' The data is a list with a SnpMatrix `genotypes` (2000 rows, 50 columns) and a data frame `map`.
#' It should be used in the \code{magpa} function to test multivariate correlation.
#'
#' \itemize{
#' \item genotypes
#' \item map
#'}
#'
#' @docType data
#' @keywords datasets
#' @name geno
#' @usage data(geno)
#' @format An object of list with a Fromal class \code{'SnpMatrix'} and a data.frame
#' @examples
#' data(geno)
#' snps <- geno$genotypes
NULL
|
#' @title Compile a data.frame from screening results
#'
#' @description Check measured pattern plausibility
#'
#' @param screened_listed
#' @param pattern
#' @param at_RT
#' @param measurements_table
#' @param compound_table
#' @param cut_score
#' @param do_for
#'
#' @details enviMass workflow function
#'
get_screening_results<-function(
screened_listed,
pattern,
at_RT,
profileList,
measurements_table,
compound_table,
cut_score
){
IDs<-as.numeric(measurements_table[,1])
num_samples_all<-rep(0,length(screened_listed))
num_blanks_all<-rep(0,length(screened_listed))
max_score_sample_all<-rep(0,length(screened_listed))
max_score_blank_all<-rep(0,length(screened_listed))
num_peaks_sample_all<-rep(0,length(screened_listed))
num_peaks_blank_all<-rep(0,length(screened_listed))
mean_int_ratio<-rep(0,length(screened_listed))
IDed<-rep("")
named<-rep("")
adducted<-rep("")
at_len<-1
max_len<-1000
at_matrix<-matrix(nrow=10000,ncol=9,0)
min_ID<-(min(as.numeric(profileList[[4]]))-1) # adjust to lowest file ID; otherwise too many empty list entries will be caused
colnames(at_matrix)<-c("m/z","log Intensity","Measured RT","m/z deviation [ppm]","RT deviation within","above_cutscore",
"Time sequence","Expected RT","File ID")
set_ID<-seq(1:length(measurements_table[,1]))
for(i in 1:length(screened_listed)){
IDed[i]<-strsplit(names(pattern)[i],"_")[[1]][1]
named[i]<-compound_table[compound_table[,"ID"]==strsplit(names(pattern)[i],"_")[[1]][1],2]
adducted[i]<-strsplit(names(pattern)[i],"_")[[1]][2]
num_samples<-(0)
num_blanks<-(0)
max_score_sample<-(0)
max_score_blank<-(0)
num_peaks_sample<-(0)
num_peaks_blank<-(0)
centro_sample<-list()
centro_blank<-list()
for(j in 1:length(pattern[[i]][,1])){
centro_sample[[j]]<-numeric(0);
centro_blank[[j]]<-numeric(0);
}
if(length(screened_listed[[i]])>0){
for(m in 1:length(screened_listed[[i]])){
if(length(screened_listed[[i]][[m]])>0){
at_ID<-set_ID[measurements_table[,1]==screened_listed[[i]][[m]][[1]]$file_ID]
is_sample<-(measurements_table[at_ID,3]!="blank") # sample, calibration, doted; but not blank/blind
if(!is_sample){ # could still be doted or blind or ...
is_blank<-(measurements_table[at_ID,3]=="blank")
}else{
is_blank<-FALSE
}
if(!is_sample & !is_blank){next}
max_score<-0
max_num_peaks<-0
for(k in 1:length(screened_listed[[i]][[m]])){
if(length(screened_listed[[i]][[m]][[k]])>0){
local_score<-0
if(!is.na(screened_listed[[i]][[m]][[k]]$score_1)){
local_score<-(local_score+screened_listed[[i]][[m]][[k]]$score_1)
}
if( (local_score>=1) || (is.na(screened_listed[[i]][[m]][[k]]$score_1)) ){
if(!is.na(screened_listed[[i]][[m]][[k]]$score_2)){
local_score<-(local_score+screened_listed[[i]][[m]][[k]]$score_2)
}
}
if(local_score>max_score){
max_score<-local_score
}
if(length(screened_listed[[i]][[m]][[k]]$Peaks[,1])>max_num_peaks){
max_num_peaks<-length(screened_listed[[i]][[m]][[k]]$Peaks[,1])
}
if(is_sample & (local_score>=cut_score)){
for(d in 1:length(screened_listed[[i]][[m]][[k]][[1]][,1])){
centro_sample[[ screened_listed[[i]][[m]][[k]][[1]][d,1] ]]<-c(
centro_sample[[ screened_listed[[i]][[m]][[k]][[1]][d,1] ]],
profileList[[2]][screened_listed[[i]][[m]][[k]][[1]][d,2],2]
)
}
}
if(is_blank & (local_score>=cut_score)){
for(d in 1:length(screened_listed[[i]][[m]][[k]][[1]][,1])){
centro_blank[[ screened_listed[[i]][[m]][[k]][[1]][d,1] ]]<-c(
centro_blank[[ screened_listed[[i]][[m]][[k]][[1]][d,1] ]],
profileList[[2]][screened_listed[[i]][[m]][[k]][[1]][d,2],2]
)
}
}
local_len<-length(screened_listed[[i]][[m]][[k]][[7]])
if((at_len+local_len)>max_len){
at_matrix<-rbind(
at_matrix,
matrix(nrow=10000,ncol=9,0)
)
max_len<-(max_len+10000)
}
at_matrix[at_len:(at_len+local_len-1),1]<-screened_listed[[i]][[m]][[k]][[7]]
at_matrix[at_len:(at_len+local_len-1),2]<-screened_listed[[i]][[m]][[k]][[8]]
at_matrix[at_len:(at_len+local_len-1),3]<-screened_listed[[i]][[m]][[k]][[9]]
at_matrix[at_len:(at_len+local_len-1),4]<-screened_listed[[i]][[m]][[k]][[4]]
at_matrix[at_len:(at_len+local_len-1),5]<-screened_listed[[i]][[m]][[k]][[5]]
if(local_score>=cut_score){
at_matrix[at_len:(at_len+local_len-1),6]<-1
}
at_matrix[at_len:(at_len+local_len-1),7]<-(
as.numeric(as.Date(measurements_table[at_ID,"Date"]))+
as.numeric(as.difftime(measurements_table[at_ID,"Time"])/(24*60*60))
)
at_matrix[at_len:(at_len+local_len-1),8]<-at_RT[i]
at_matrix[at_len:(at_len+local_len-1),9]<-as.numeric(measurements_table[at_ID,1]);
at_len<-(at_len+local_len)
}
}
if(is_sample){
if(max_score>=cut_score){
num_samples<-(num_samples+1)
}
if(max_score>max_score_sample){
max_score_sample<-max_score
}
if(max_num_peaks>num_peaks_sample){
num_peaks_sample<-max_num_peaks
}
}
if(is_blank){
if(max_score>=cut_score){
num_blanks<-(num_blanks+1)
}
if(max_score>max_score_blank){
max_score_blank<-max_score
}
if(max_num_peaks>num_peaks_blank){
num_peaks_blank<-max_num_peaks
}
}
}
}
ratios<-c()
wei<-c()
for(j in 1:length(centro_sample)){
if( (length(centro_sample[[j]])>0) & (length(centro_blank[[j]])>0) ){
ratios<-c(ratios,( mean(centro_sample[[j]])/mean(centro_blank[[j]]) ) )
wei<-c(wei,((length(centro_sample[[j]])>0)+(length(centro_blank[[j]])>0)))
}
}
if(length(ratios)>0){
mean_int_ratio[[i]]<-mean(x=ratios,w=wei)
}
num_samples_all[i]<-num_samples
num_blanks_all[i]<-num_blanks
max_score_sample_all[i]<-max_score_sample
max_score_blank_all[i]<-max_score_blank
num_peaks_sample_all[i]<-num_peaks_sample
num_peaks_blank_all[i]<-num_peaks_blank
}
}
##########################################################################################
# Table with adducts per compound itemized ###############################################
results_table_1<-data.frame(
IDed,named,adducted,
num_samples_all,
round(max_score_sample_all,digits=2),
num_peaks_sample_all,
num_blanks_all,
round(max_score_blank_all,digits=2),
num_peaks_blank_all,
round(mean_int_ratio,digits=1),
rep(NA,length(mean_int_ratio)),
rep(NA,length(mean_int_ratio)),
stringsAsFactors=FALSE
)
names(results_table_1)<-c(
"ID","compound","adduct",
"Sample matches",
"Max. sample score",
"Max. sample peaks",
"Blank matches",
"Max. blank score",
"Max. blank peaks",
"Int. ratio sample/blank",
"Max. conc.",
"Latest conc."
)
##########################################################################################
# Table with adducts per compound summarized #############################################
ID_comp<-unique(IDed)
adduct_sum<-rep("",length(ID_comp))
named_sum<-rep("",length(ID_comp))
max_score_sample_all_sum<-rep(0,length(ID_comp))
max_score_blank_all_sum<-rep(0,length(ID_comp))
num_peaks_sample_all_sum<-rep(0,length(ID_comp))
num_peaks_blank_all_sum<-rep(0,length(ID_comp))
for(i in 1:length(ID_comp)){
those<-which(IDed==ID_comp[i])
those<-those[((max_score_sample_all[those]>0) | (max_score_blank_all[those]>0))]
if(length(those)>0){
named_sum[i]<-unique(named[those])
adduct_sum[i]<-paste(adducted[those],collapse=", ")
max_score_sample_all_sum[i]<-max(round(max_score_sample_all[those],digits=2))
max_score_blank_all_sum[i]<-max(round(max_score_blank_all[those],digits=2))
num_peaks_sample_all_sum[i]<-max(num_peaks_sample_all[those])
num_peaks_blank_all_sum[i]<-max(num_peaks_blank_all[those])
}else{
those<-(IDed==ID_comp[i])
named_sum[i]<-unique(named[those])
}
}
results_table_2<-data.frame(
ID_comp,named_sum,adduct_sum,
max_score_sample_all_sum,
num_peaks_sample_all_sum,
max_score_blank_all_sum,
num_peaks_blank_all_sum,
rep(NA,length(num_peaks_blank_all_sum)),
rep(NA,length(num_peaks_blank_all_sum)),
stringsAsFactors=FALSE
)
names(results_table_2)<-c(
"ID","compound","adducts",
"Max. sample score",
"Max. sample peaks",
"Max. blank score",
"Max. blank peaks",
"Max. conc.",
"Latest conc."
)
##########################################################################################
results<-list()
results[[1]]<-results_table_1
results[[2]]<-results_table_2
if(at_len>0){
at_matrix<-at_matrix[1:(at_len-1),,drop=FALSE]
results[[3]]<-at_matrix
}else{
results[[3]]<-numeric(0)
}
return(results)
}
|
/R/get_screening_results.r
|
no_license
|
uweschmitt/enviMass
|
R
| false
| false
| 9,206
|
r
|
#' @title Compile a data.frame from screening results
#'
#' @description Check measured pattern plausibility
#'
#' @param screened_listed
#' @param pattern
#' @param at_RT
#' @param measurements_table
#' @param compound_table
#' @param cut_score
#' @param do_for
#'
#' @details enviMass workflow function
#'
get_screening_results<-function(
screened_listed,
pattern,
at_RT,
profileList,
measurements_table,
compound_table,
cut_score
){
IDs<-as.numeric(measurements_table[,1])
num_samples_all<-rep(0,length(screened_listed))
num_blanks_all<-rep(0,length(screened_listed))
max_score_sample_all<-rep(0,length(screened_listed))
max_score_blank_all<-rep(0,length(screened_listed))
num_peaks_sample_all<-rep(0,length(screened_listed))
num_peaks_blank_all<-rep(0,length(screened_listed))
mean_int_ratio<-rep(0,length(screened_listed))
IDed<-rep("")
named<-rep("")
adducted<-rep("")
at_len<-1
max_len<-1000
at_matrix<-matrix(nrow=10000,ncol=9,0)
min_ID<-(min(as.numeric(profileList[[4]]))-1) # adjust to lowest file ID; otherwise too many empty list entries will be caused
colnames(at_matrix)<-c("m/z","log Intensity","Measured RT","m/z deviation [ppm]","RT deviation within","above_cutscore",
"Time sequence","Expected RT","File ID")
set_ID<-seq(1:length(measurements_table[,1]))
for(i in 1:length(screened_listed)){
IDed[i]<-strsplit(names(pattern)[i],"_")[[1]][1]
named[i]<-compound_table[compound_table[,"ID"]==strsplit(names(pattern)[i],"_")[[1]][1],2]
adducted[i]<-strsplit(names(pattern)[i],"_")[[1]][2]
num_samples<-(0)
num_blanks<-(0)
max_score_sample<-(0)
max_score_blank<-(0)
num_peaks_sample<-(0)
num_peaks_blank<-(0)
centro_sample<-list()
centro_blank<-list()
for(j in 1:length(pattern[[i]][,1])){
centro_sample[[j]]<-numeric(0);
centro_blank[[j]]<-numeric(0);
}
if(length(screened_listed[[i]])>0){
for(m in 1:length(screened_listed[[i]])){
if(length(screened_listed[[i]][[m]])>0){
at_ID<-set_ID[measurements_table[,1]==screened_listed[[i]][[m]][[1]]$file_ID]
is_sample<-(measurements_table[at_ID,3]!="blank") # sample, calibration, doted; but not blank/blind
if(!is_sample){ # could still be doted or blind or ...
is_blank<-(measurements_table[at_ID,3]=="blank")
}else{
is_blank<-FALSE
}
if(!is_sample & !is_blank){next}
max_score<-0
max_num_peaks<-0
for(k in 1:length(screened_listed[[i]][[m]])){
if(length(screened_listed[[i]][[m]][[k]])>0){
local_score<-0
if(!is.na(screened_listed[[i]][[m]][[k]]$score_1)){
local_score<-(local_score+screened_listed[[i]][[m]][[k]]$score_1)
}
if( (local_score>=1) || (is.na(screened_listed[[i]][[m]][[k]]$score_1)) ){
if(!is.na(screened_listed[[i]][[m]][[k]]$score_2)){
local_score<-(local_score+screened_listed[[i]][[m]][[k]]$score_2)
}
}
if(local_score>max_score){
max_score<-local_score
}
if(length(screened_listed[[i]][[m]][[k]]$Peaks[,1])>max_num_peaks){
max_num_peaks<-length(screened_listed[[i]][[m]][[k]]$Peaks[,1])
}
if(is_sample & (local_score>=cut_score)){
for(d in 1:length(screened_listed[[i]][[m]][[k]][[1]][,1])){
centro_sample[[ screened_listed[[i]][[m]][[k]][[1]][d,1] ]]<-c(
centro_sample[[ screened_listed[[i]][[m]][[k]][[1]][d,1] ]],
profileList[[2]][screened_listed[[i]][[m]][[k]][[1]][d,2],2]
)
}
}
if(is_blank & (local_score>=cut_score)){
for(d in 1:length(screened_listed[[i]][[m]][[k]][[1]][,1])){
centro_blank[[ screened_listed[[i]][[m]][[k]][[1]][d,1] ]]<-c(
centro_blank[[ screened_listed[[i]][[m]][[k]][[1]][d,1] ]],
profileList[[2]][screened_listed[[i]][[m]][[k]][[1]][d,2],2]
)
}
}
local_len<-length(screened_listed[[i]][[m]][[k]][[7]])
if((at_len+local_len)>max_len){
at_matrix<-rbind(
at_matrix,
matrix(nrow=10000,ncol=9,0)
)
max_len<-(max_len+10000)
}
at_matrix[at_len:(at_len+local_len-1),1]<-screened_listed[[i]][[m]][[k]][[7]]
at_matrix[at_len:(at_len+local_len-1),2]<-screened_listed[[i]][[m]][[k]][[8]]
at_matrix[at_len:(at_len+local_len-1),3]<-screened_listed[[i]][[m]][[k]][[9]]
at_matrix[at_len:(at_len+local_len-1),4]<-screened_listed[[i]][[m]][[k]][[4]]
at_matrix[at_len:(at_len+local_len-1),5]<-screened_listed[[i]][[m]][[k]][[5]]
if(local_score>=cut_score){
at_matrix[at_len:(at_len+local_len-1),6]<-1
}
at_matrix[at_len:(at_len+local_len-1),7]<-(
as.numeric(as.Date(measurements_table[at_ID,"Date"]))+
as.numeric(as.difftime(measurements_table[at_ID,"Time"])/(24*60*60))
)
at_matrix[at_len:(at_len+local_len-1),8]<-at_RT[i]
at_matrix[at_len:(at_len+local_len-1),9]<-as.numeric(measurements_table[at_ID,1]);
at_len<-(at_len+local_len)
}
}
if(is_sample){
if(max_score>=cut_score){
num_samples<-(num_samples+1)
}
if(max_score>max_score_sample){
max_score_sample<-max_score
}
if(max_num_peaks>num_peaks_sample){
num_peaks_sample<-max_num_peaks
}
}
if(is_blank){
if(max_score>=cut_score){
num_blanks<-(num_blanks+1)
}
if(max_score>max_score_blank){
max_score_blank<-max_score
}
if(max_num_peaks>num_peaks_blank){
num_peaks_blank<-max_num_peaks
}
}
}
}
ratios<-c()
wei<-c()
for(j in 1:length(centro_sample)){
if( (length(centro_sample[[j]])>0) & (length(centro_blank[[j]])>0) ){
ratios<-c(ratios,( mean(centro_sample[[j]])/mean(centro_blank[[j]]) ) )
wei<-c(wei,((length(centro_sample[[j]])>0)+(length(centro_blank[[j]])>0)))
}
}
if(length(ratios)>0){
mean_int_ratio[[i]]<-mean(x=ratios,w=wei)
}
num_samples_all[i]<-num_samples
num_blanks_all[i]<-num_blanks
max_score_sample_all[i]<-max_score_sample
max_score_blank_all[i]<-max_score_blank
num_peaks_sample_all[i]<-num_peaks_sample
num_peaks_blank_all[i]<-num_peaks_blank
}
}
##########################################################################################
# Table with adducts per compound itemized ###############################################
results_table_1<-data.frame(
IDed,named,adducted,
num_samples_all,
round(max_score_sample_all,digits=2),
num_peaks_sample_all,
num_blanks_all,
round(max_score_blank_all,digits=2),
num_peaks_blank_all,
round(mean_int_ratio,digits=1),
rep(NA,length(mean_int_ratio)),
rep(NA,length(mean_int_ratio)),
stringsAsFactors=FALSE
)
names(results_table_1)<-c(
"ID","compound","adduct",
"Sample matches",
"Max. sample score",
"Max. sample peaks",
"Blank matches",
"Max. blank score",
"Max. blank peaks",
"Int. ratio sample/blank",
"Max. conc.",
"Latest conc."
)
##########################################################################################
# Table with adducts per compound summarized #############################################
ID_comp<-unique(IDed)
adduct_sum<-rep("",length(ID_comp))
named_sum<-rep("",length(ID_comp))
max_score_sample_all_sum<-rep(0,length(ID_comp))
max_score_blank_all_sum<-rep(0,length(ID_comp))
num_peaks_sample_all_sum<-rep(0,length(ID_comp))
num_peaks_blank_all_sum<-rep(0,length(ID_comp))
for(i in 1:length(ID_comp)){
those<-which(IDed==ID_comp[i])
those<-those[((max_score_sample_all[those]>0) | (max_score_blank_all[those]>0))]
if(length(those)>0){
named_sum[i]<-unique(named[those])
adduct_sum[i]<-paste(adducted[those],collapse=", ")
max_score_sample_all_sum[i]<-max(round(max_score_sample_all[those],digits=2))
max_score_blank_all_sum[i]<-max(round(max_score_blank_all[those],digits=2))
num_peaks_sample_all_sum[i]<-max(num_peaks_sample_all[those])
num_peaks_blank_all_sum[i]<-max(num_peaks_blank_all[those])
}else{
those<-(IDed==ID_comp[i])
named_sum[i]<-unique(named[those])
}
}
results_table_2<-data.frame(
ID_comp,named_sum,adduct_sum,
max_score_sample_all_sum,
num_peaks_sample_all_sum,
max_score_blank_all_sum,
num_peaks_blank_all_sum,
rep(NA,length(num_peaks_blank_all_sum)),
rep(NA,length(num_peaks_blank_all_sum)),
stringsAsFactors=FALSE
)
names(results_table_2)<-c(
"ID","compound","adducts",
"Max. sample score",
"Max. sample peaks",
"Max. blank score",
"Max. blank peaks",
"Max. conc.",
"Latest conc."
)
##########################################################################################
results<-list()
results[[1]]<-results_table_1
results[[2]]<-results_table_2
if(at_len>0){
at_matrix<-at_matrix[1:(at_len-1),,drop=FALSE]
results[[3]]<-at_matrix
}else{
results[[3]]<-numeric(0)
}
return(results)
}
|
## script to clean up original PA WIC source (by fitting to schema); data pulled from PA WIC website
## load libs / set up
library(dplyr)
write_loc <- "food-data/Cleaned_data_files/"
## ----------------------- read in data_model
data_mod <- readxl::read_excel("schema.xlsx", sheet = "master_table") %>%
filter(!str_detect(STATUS, "remove|REMOVE|eliminate"))
## create empty dataframe according to data model; (elegant approach suggested by Connor that perserves data types)
dat0 <- data_mod %>%
select(field, type) %>%
mutate(value = case_when(type %in% c("string", "date") ~ list("a"),
type %in% c("int", "float") ~ list(1),
type %in% "bool" ~ list(NA))) %>%
select(-type) %>%
tidyr::pivot_wider(names_from = "field", values_from = "value") %>%
purrr::map_dfr(unlist) %>%
slice(-1)
# ---------------------------------WIC
# read in wicresults.json dataset
library(jsonlite)
library(janitor)
WIC <- fromJSON("food-data/new-datasets/wicresults.json")$Result %>%
clean_names()
WIC <- dat0 %>%
bind_rows(WIC %>%
mutate(address = ifelse(is.na(street_addr_line2), street_addr_line1, paste(street_addr_line1, street_addr_line2)),
original_id = NA) %>%
select(name = store_name,
address,
city,
state,
zip_code,
original_id)) %>%
rowwise() %>%
mutate(source_org = "PA WIC",
source_file = "wicresults.json",
latlng_source = "na",
food_bucks = NA,
SNAP = NA,
WIC = 1,
FMNP = NA,
fresh_produce = NA,
free_distribution = 0,
open_to_spec_group = 0,
data_issues = "no type;no phone;no date/time info") %>%
ungroup()
write_csv(WIC, paste0(write_loc, "cleaned_PA_WIC.csv"))
###--- clean up
rm(dat0, data_mod, write_loc, WIC)
|
/data_prep_scripts/prep_source_scripts/prep_wic_sites.R
|
permissive
|
cgmoreno/food-access-map-data
|
R
| false
| false
| 1,957
|
r
|
## script to clean up original PA WIC source (by fitting to schema); data pulled from PA WIC website
## load libs / set up
library(dplyr)
write_loc <- "food-data/Cleaned_data_files/"
## ----------------------- read in data_model
data_mod <- readxl::read_excel("schema.xlsx", sheet = "master_table") %>%
filter(!str_detect(STATUS, "remove|REMOVE|eliminate"))
## create empty dataframe according to data model; (elegant approach suggested by Connor that perserves data types)
dat0 <- data_mod %>%
select(field, type) %>%
mutate(value = case_when(type %in% c("string", "date") ~ list("a"),
type %in% c("int", "float") ~ list(1),
type %in% "bool" ~ list(NA))) %>%
select(-type) %>%
tidyr::pivot_wider(names_from = "field", values_from = "value") %>%
purrr::map_dfr(unlist) %>%
slice(-1)
# ---------------------------------WIC
# read in wicresults.json dataset
library(jsonlite)
library(janitor)
WIC <- fromJSON("food-data/new-datasets/wicresults.json")$Result %>%
clean_names()
WIC <- dat0 %>%
bind_rows(WIC %>%
mutate(address = ifelse(is.na(street_addr_line2), street_addr_line1, paste(street_addr_line1, street_addr_line2)),
original_id = NA) %>%
select(name = store_name,
address,
city,
state,
zip_code,
original_id)) %>%
rowwise() %>%
mutate(source_org = "PA WIC",
source_file = "wicresults.json",
latlng_source = "na",
food_bucks = NA,
SNAP = NA,
WIC = 1,
FMNP = NA,
fresh_produce = NA,
free_distribution = 0,
open_to_spec_group = 0,
data_issues = "no type;no phone;no date/time info") %>%
ungroup()
write_csv(WIC, paste0(write_loc, "cleaned_PA_WIC.csv"))
###--- clean up
rm(dat0, data_mod, write_loc, WIC)
|
# Dear ProteoSign user,
# Please find below the code that ProteoSign uses to generate the data plots.
# The two main functions are: do_results_plots, which produces the Reproducibility plot, the Volcano plot, the MA plot and the Scatterplot (matrix),
# and do_limma_plots, which produces the replicates' intensities boxplots before and after normalization, as well as the average intensity histogram.
options(warn=1)
source("http://www.bioconductor.org/biocLite.R")
if(!require("ggplot2"))
{
install.packages("ggplot2", repos="http://cran.fhcrc.org")
library(ggplot2)
}
if(!require("gtools"))
{
install.packages("gtools", repos="http://cran.fhcrc.org")
library(gtools)
}
# do_results_plots produces the Reproducibility plot, the Volcano plot, the MA plot and the Scatterplot (matrix)
do_results_plots<-function(){
#ratio_combs contains the combinations of the conditions
ratio_combs<-combinations(nConditions,2,1:nConditions)
#Set the theme in ggplot2:
theme_set(theme_bw())
# cbPalette will be used in creating the plots
# the default one is a customized colorblind-friendly palette from http://wiki.stdout.org/rcookbook/Graphs/Colors%20(ggplot2)/
cbPalette <- c("#999999", "#D55E00", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#CC79A7")
#Plot generation:
for(i in 1:nrow(ratio_combs)){
#Prepare the combination:
print(paste("Generating plots for combination #",i," ..."),change=1,after=T)
result <- tryCatch({
ratio_i_str<-paste(conditions.labels[ratio_combs[i,2]],".",conditions.labels[ratio_combs[i,1]],sep="")
ratio_i_<-paste("log2.",ratio_i_str,sep="")
ratio_i_sd_col<-paste("log2.sd.",ratio_i_str,sep="")
tmp2<-results[,colnames(results)[grep(gsub("\\.","\\\\.",paste0(ratio_i_, " ")),colnames(results))]]+results[,colnames(results)[grep(gsub("\\.","\\\\.",paste0(ratio_i_sd_col, "$")),colnames(results))]]
tmp1<-results[,colnames(results)[grep(gsub("\\.","\\\\.",paste0(ratio_i_, " ")),colnames(results))]]-results[,colnames(results)[grep(gsub("\\.","\\\\.",paste0(ratio_i_sd_col, "$")),colnames(results))]]
ratiolim<-ceiling(max(max(range(tmp1,na.rm=T),range(tmp2,na.rm=T)),abs(min(range(tmp1,na.rm=T),range(tmp2,na.rm=T)))))
#If two conditions contain exactly the same data ratiolim will be equal to 0. In this case add all the intensities to the same block
if(ratiolim == 0)
{
ratiolim <- 5
}
panel.hist.breaks<<-(-ratiolim:ratiolim)
}, error = function(err){
print(paste0("Warning! ", ratio_i_str, " combination preparation failed!"))
})
# 1 - volcano - -log10 P-value vs log ratio
result <- tryCatch({
print("Making volcano plot ...")
#Customize the filename and the plot size by editing the following two lines:
figsuffix<-paste("_",ratio_i_str,"-volcano","_",sep="")
pdf(file=paste(outputFigsPrefix,figsuffix,time.point,".pdf",sep=""),width=10, height=7, family = "Helvetica", pointsize=8)
#Data preparation:
ratio_i_p.value.adj<-paste("p.value.adj.",paste(conditions.labels[ratio_combs[i,2]],".",conditions.labels[ratio_combs[i,1]],sep=""),sep="")
ratio_i_avg_col<-paste("log2.avg.",ratio_i_str,sep="")
mlog10_ratio_i_p.value.adj<-paste("mlog10_",ratio_i_p.value.adj,sep="")
diffexp_ratio_i<-paste("diffexp_",ratio_i_str,sep="")
results[,mlog10_ratio_i_p.value.adj]<-(-log10(results[,ratio_i_p.value.adj]))
na_indexes<-which(is.na(results[,ratio_i_p.value.adj]))
if(length(na_indexes)>0){
results[na_indexes,ratio_i_p.value.adj]<-1
results[,diffexp_ratio_i]<-results[,ratio_i_p.value.adj]<pThreshold
results[na_indexes,ratio_i_p.value.adj]<-NA
}else{
results[,diffexp_ratio_i]<-results[,ratio_i_p.value.adj]<pThreshold
}
#The following lines optimize the plot's x-label in specific dataset types
if(!IsobaricLabel)
{
myxlab <- paste("average log2 ",sub("\\.","/",ratio_i_str),sep="")
}else{
if(!PDdata)
{
myxlab <- paste("average log2 ", ratio_i_str, sep="")
myxlab <- gsub("Reporter\\.intensity\\.", "Reporter ", myxlab)
}else{
myxlab <- paste("average log2 ",ratio_i_str ,sep="")
myxlab <- gsub("X([[:digit:]])", "\\1", myxlab)
}
}
myxlab <- gsub("\\.", "/", myxlab)
# p is a plot created by the ggplot library
# Change the next command to suit your needs:
p<-ggplot(data=results, aes_string(x=ratio_i_avg_col, y=mlog10_ratio_i_p.value.adj, colour=diffexp_ratio_i)) +
geom_point(alpha=0.7, size=1.75) +
theme(legend.position = "none", axis.title.y=element_text(vjust=0.2), axis.title.x=element_text(vjust=0), plot.title = element_text(vjust=1.5, lineheight=.8, face="bold")) +
xlim(c(-ratiolim, ratiolim)) + ylim(c(0, 6)) + scale_colour_manual(values=cbPalette) +
xlab(myxlab) + ylab("-log10 P-value") + ggtitle("P-value vs Fold change") +
geom_hline(aes(yintercept=-log10(pThreshold)), colour="#990000", linetype="dashed") +
geom_text(size=2.5, hjust=1, vjust=-0.5,aes(x=-4.2, y=-log10(pThreshold)), label=paste0("P-value=", pThreshold),colour="#990000")
print(p)
dev.off()
}, error = function(err){
print(paste0("Warning! ", ratio_i_str, " volcano plot failed"))
})
# 2 - value-ordered - log ratio
result <- tryCatch({
print("Making value-ordered plot ...")
#Customize the filename and the plot size by editing the following two lines:
figsuffix<-paste("_",ratio_i_str,"-value-ordered-log-ratio","_",sep="")
pdf(file=paste(outputFigsPrefix,figsuffix,time.point,".pdf",sep=""),width=10, height=7, family = "Helvetica", pointsize=8)
#Data preparation:
results<-results[with(results, order(results[,c(ratio_i_avg_col)])),]
results$nID<-1:nrow(results)
ratio_i_avg_col_ymax<-paste(ratio_i_avg_col,".ymax",sep="")
ratio_i_avg_col_ymin<-paste(ratio_i_avg_col,".ymin",sep="")
results[,ratio_i_avg_col_ymax]<-results[,ratio_i_avg_col]+results[,ratio_i_sd_col]
results[,ratio_i_avg_col_ymin]<-results[,ratio_i_avg_col]-results[,ratio_i_sd_col]
#The following lines optimize the plot's y-label in specific dataset types
if(!IsobaricLabel)
{
myylab <- paste("average log2 ",sub("\\.","/",ratio_i_str),sep="")
}else{
if(!PDdata)
{
myylab <- paste("average log2 ", ratio_i_str, sep="")
myylab <- gsub("Reporter\\.intensity\\.", "Reporter ", myylab)
}else{
myylab <- paste("average log2 ", ratio_i_str, sep="")
myylab <- gsub("X([[:digit:]])", "\\1", myylab)
}
}
myylab <- gsub("\\.", "/", myylab)
# p is a plot created by the ggplot library
# Change the next command to suit your needs:
p<-ggplot(data=results, aes_string(x="nID", y=ratio_i_avg_col, colour=diffexp_ratio_i)) +
geom_point(alpha=0.7, size=1.5) +
geom_errorbar(aes_string(ymin=ratio_i_avg_col_ymin, ymax=ratio_i_avg_col_ymax), width=1.5) +
theme(legend.position = "none", axis.title.y=element_text(vjust=0.2), axis.title.x=element_text(vjust=0), plot.title = element_text(vjust=1.5, lineheight=.8, face="bold")) +
ylim(c(-ratiolim, ratiolim)) + scale_colour_manual(values=cbPalette) +
xlab(paste(quantitated_items_lbl,"ID")) + ylab(myylab) + ggtitle("Value-ordered fold change")
print(p)
dev.off()
}, error = function(err){
print(paste0("Warning! ", ratio_i_str, " value-ordered plot failed"))
})
# 3 - MA plot
result <- tryCatch({
print("Making MA plot ...")
#Customize the filename and the plot size by editing the following two lines:
figsuffix<-paste("_",ratio_i_str,"-MA","_",sep="")
ratio_i_avgI_col<-paste("log2.avg.I.",ratio_i_str,sep="")
pdf(file=paste(outputFigsPrefix,figsuffix,time.point,".pdf",sep=""),width=10, height=7, family = "Helvetica", pointsize=8)
#The following lines optimize the plot's y-label in specific dataset types
if(!IsobaricLabel)
{
myylab <- paste("A (average log2 ",sub("\\.","/",ratio_i_str),")",sep="")
}else{
if(!PDdata)
{
myylab <- paste("A (average log2 ", ratio_i_str, ")", sep="")
myylab <- gsub("Reporter\\.intensity\\.", "Reporter ", myylab)
}else{
myylab <- paste("A (average log2 ",ratio_i_str,")",sep="")
myylab <- gsub("X([[:digit:]])", "\\1", myylab)
}
}
myylab <- gsub("\\.", "/", myylab)
# p is a plot created by the ggplot library
# Change the next command to suit your needs:
p<-ggplot(data=results, aes_string(x=ratio_i_avgI_col, y=ratio_i_avg_col, colour=diffexp_ratio_i)) +
geom_point(alpha=0.7, size=1.75) +
theme(legend.position = "none", axis.title.y=element_text(vjust=0.2), axis.title.x=element_text(vjust=0), plot.title = element_text(vjust=1.5, lineheight=.8, face="bold")) +
ylim(c(-ratiolim, ratiolim)) + scale_colour_manual(values=cbPalette) +
xlab("M (average log2 Intensity)") + ylab(myylab) + ggtitle("MA plot")
print(p)
dev.off()
}, error = function(err){
print(paste0("Warning! ", ratio_i_str, " MA plot failed"))
})
# 4 - Reproducibility plots & histograms
result <- tryCatch({
print("Making reproducibility plot ...")
#Customize the filename suffix by editing the following line:
figsuffix<-paste("_",ratio_i_str,"-reproducibility","_",sep="")
allratios<-results[,colnames(results)[grep(paste0(ratio_i_, " "),colnames(results))]]
#The following lines optimize the plot's y-label in specific dataset types
if(!IsobaricLabel)
{
colnames(allratios)<-sub(ratio_i_,paste("log2(",sub("\\.","/",ratio_i_str),") ",sep=""),colnames(allratios))
}else{
if(!PDdata){
colnames(allratios)<-sub(ratio_i_,paste("log2(",ratio_i_str,") ",sep=""),colnames(allratios))
colnames(allratios) <- gsub("Reporter\\.intensity\\.", "Reporter ", colnames(allratios))
}else{
colnames(allratios)<-sub(ratio_i_,paste("log2(",ratio_i_str,") ",sep=""),colnames(allratios))
colnames(allratios) <- gsub("X([[:digit:]])", "\\1", colnames(allratios))
}
}
colnames(allratios) <- gsub("\\.", "/", colnames(allratios))
#Customize the filename and the size of the plot by editing the following line:
pdf(file=paste(outputFigsPrefix,figsuffix,time.point,".pdf",sep=""),width=10, height=7, family = "Helvetica", pointsize=8)
pairs.panels(allratios,scale=T,lm=T)
dev.off()
}, error = function(err){
print(paste0("Warning! ", ratio_i_str, " reproducibility plot failed"))
})
}
}
# do_limma_plots draws the limma boxplots and the limma histograms in one pdf file:
do_limma_plots<-function()
{
ratio_combs<-combinations(nConditions,2,1:nConditions)
pdf(file=paste(outputFigsPrefix,"_limma-graphs_",time.point,".pdf",sep=""),width=10, height=7, family = "Helvetica", pointsize=8)
# Create the intensities before normalisation boxplot
print("Making Intensities before normalisation limma boxplot")
boxplot(log.intensities)
title(main="Intensities Before Normalisation")
# Create the intensities after normalisation boxplot
print("Making Intensities after normalisation limma boxplot")
boxplot(norm.intensities)
title(main="Intensities After Normalisation")
#Create the limma histograms for each combination:
print("Making limma histograms")
for(i in 1:nrow(ratio_combs)){
ratio_i_str<-paste(conditions.labels[ratio_combs[i,2]],"/",conditions.labels[ratio_combs[i,1]],sep="")
hist(fit2.coefficients[,i],main=paste("Log2 Fold Change ",ratio_i_str,sep=""), xlab="Log2 Fold Change", breaks=50 )
}
dev.off()
}
# For more customization options someone can modify the following functions:
# FROM: http://musicroamer.com/blog/2011/01/16/r-tips-and-tricks-modified-pairs-plot/
# The following functions are used to draw the different parts of the Reproducibility plot
# panel.cor.scale displays the correllation coeeficients in the upper right half of the Reproducibility plot
# the size of the text is proportional to the value of the coefficient
panel.cor.scale <- function(x, y, digits=2, prefix="", cex.cor){
usr <- par("usr"); on.exit(par(usr))
par(usr = c(0, 1, 0, 1))
r = (cor(x, y,use="pairwise"))
txt <- format(c(r, 0.123456789), digits=digits)[1]
txt <- paste(prefix, txt, sep="")
if(missing(cex.cor)) cex <- 0.8/strwidth(txt)
if(is.na(r))
{
txt="NA"
text(0.5, 0.5, txt, cex = cex * 0.25)
}
else
{
text(0.5, 0.5, txt, cex = cex * abs(r))
}
}
#panel.cor is not called by default but can replace panel.cor.scale if scaling the text acording to the R value is not desirable
panel.cor <- function(x, y, digits=2, prefix="", cex.cor){
usr <- par("usr"); on.exit(par(usr))
par(usr = c(0, 1, 0, 1))
r = (cor(x, y,use="pairwise"))
txt <- format(c(r, 0.123456789), digits=digits)[1]
txt <- paste(prefix, txt, sep="")
if(missing(cex.cor)) cex <- 0.8/strwidth(txt)
text(0.5, 0.5, txt, cex = cex )
}
# panel.hist draws the histograms in the diagonal of the Reproducibility plot
panel.hist <- function(x, ...){
#ratios.hist.colour is the colour of the histogram columns
ratios.hist.colour<-"cyan"
usr <- par("usr"); on.exit(par(usr))
par(usr = c(usr[1:2], 0, 1.5) )
h <- hist(x, breaks=panel.hist.breaks,plot = FALSE)
breaks <- h$breaks; nB <- length(breaks)
y <- h$counts; y <- y/max(y)
#If all values are 0 create a simple rectangle in the middle:
non_zero_values <- x != 0
if(any(non_zero_values))
{
rect(breaks[-nB], 0, breaks[-1], y, col=ratios.hist.colour, ...)
}
else
{
rect(-0.25, 0, 0.25, max(y), col=ratios.hist.colour, ...)
}
}
# panel.lmline creates the scatterplots displayed in the bottom left half of the Reproducibility plot
# FROM: http://www-personal.umich.edu/~ladamic/presentations/Rtutorial/Rtutorial.R
panel.lmline = function (x, y, col = par("col"), bg = NA, pch = par("pch"), cex = 1, col.smooth = "red", ...){
#Note: col.smooth is the colour of the linear regression line (by default red)
points(x, y, pch = pch, col = col, bg = bg, cex = cex)
ok <- is.finite(x) & is.finite(y)
unequal_values <- x != y
if (any(ok) && any(unequal_values))
{
lm_slope = coef(lm(y[ok] ~ x[ok]))[2]
if (!is.na(lm_slope))
{
abline(lm(y[ok] ~ x[ok]), col = col.smooth, ...)
}
else
{
print("Warning!: panel.lmline: found abline with NA slope, the regression line will not be drawn")
}
}
}
#Called by do_results_plot (by default smooth=TRUE,scale=TRUE,lm=TRUE)
pairs.panels <- function (x,y,smooth=TRUE,scale=FALSE,lm=FALSE){
if (smooth){
if (scale) {
if(lm){
pairs(x,diag.panel=panel.hist,upper.panel=panel.cor.scale,lower.panel=panel.lmline)
}else{
pairs(x,diag.panel=panel.hist,upper.panel=panel.cor.scale,lower.panel=panel.smooth)
}
}else{
if(lm){
pairs(x,diag.panel=panel.hist,upper.panel=panel.cor,lower.panel=panel.lmline)
}else{
pairs(x,diag.panel=panel.hist,upper.panel=panel.cor,lower.panel=panel.smooth)
}
}
}else{
if(scale){
pairs(x,diag.panel=panel.hist,upper.panel=panel.cor.scale)
}else{
pairs(x,diag.panel=panel.hist,upper.panel=panel.cor)
}
}
}
#MAIN proccess:
#Load the necessary variables: the file Plot_Generator.RData must be contained in the same folder with this script
load("Plot_Generator.RData", .GlobalEnv)
#Draw the basic plots:
do_results_plots()
# Draw the limma plots:
do_limma_plots()
print("Procedure finished")
|
/cgi-bin/Plot_Generator.R
|
no_license
|
yorgodillo/ProteoSign
|
R
| false
| false
| 15,820
|
r
|
# Dear ProteoSign user,
# Please find below the code that ProteoSign uses to generate the data plots.
# The two main functions are: do_results_plots, which produces the Reproducibility plot, the Volcano plot, the MA plot and the Scatterplot (matrix),
# and do_limma_plots, which produces the replicates' intensities boxplots before and after normalization, as well as the average intensity histogram.
options(warn=1)
source("http://www.bioconductor.org/biocLite.R")
if(!require("ggplot2"))
{
install.packages("ggplot2", repos="http://cran.fhcrc.org")
library(ggplot2)
}
if(!require("gtools"))
{
install.packages("gtools", repos="http://cran.fhcrc.org")
library(gtools)
}
# do_results_plots produces the Reproducibility plot, the Volcano plot, the MA plot and the Scatterplot (matrix)
do_results_plots<-function(){
#ratio_combs contains the combinations of the conditions
ratio_combs<-combinations(nConditions,2,1:nConditions)
#Set the theme in ggplot2:
theme_set(theme_bw())
# cbPalette will be used in creating the plots
# the default one is a customized colorblind-friendly palette from http://wiki.stdout.org/rcookbook/Graphs/Colors%20(ggplot2)/
cbPalette <- c("#999999", "#D55E00", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#CC79A7")
#Plot generation:
for(i in 1:nrow(ratio_combs)){
#Prepare the combination:
print(paste("Generating plots for combination #",i," ..."),change=1,after=T)
result <- tryCatch({
ratio_i_str<-paste(conditions.labels[ratio_combs[i,2]],".",conditions.labels[ratio_combs[i,1]],sep="")
ratio_i_<-paste("log2.",ratio_i_str,sep="")
ratio_i_sd_col<-paste("log2.sd.",ratio_i_str,sep="")
tmp2<-results[,colnames(results)[grep(gsub("\\.","\\\\.",paste0(ratio_i_, " ")),colnames(results))]]+results[,colnames(results)[grep(gsub("\\.","\\\\.",paste0(ratio_i_sd_col, "$")),colnames(results))]]
tmp1<-results[,colnames(results)[grep(gsub("\\.","\\\\.",paste0(ratio_i_, " ")),colnames(results))]]-results[,colnames(results)[grep(gsub("\\.","\\\\.",paste0(ratio_i_sd_col, "$")),colnames(results))]]
ratiolim<-ceiling(max(max(range(tmp1,na.rm=T),range(tmp2,na.rm=T)),abs(min(range(tmp1,na.rm=T),range(tmp2,na.rm=T)))))
#If two conditions contain exactly the same data ratiolim will be equal to 0. In this case add all the intensities to the same block
if(ratiolim == 0)
{
ratiolim <- 5
}
panel.hist.breaks<<-(-ratiolim:ratiolim)
}, error = function(err){
print(paste0("Warning! ", ratio_i_str, " combination preparation failed!"))
})
# 1 - volcano - -log10 P-value vs log ratio
result <- tryCatch({
print("Making volcano plot ...")
#Customize the filename and the plot size by editing the following two lines:
figsuffix<-paste("_",ratio_i_str,"-volcano","_",sep="")
pdf(file=paste(outputFigsPrefix,figsuffix,time.point,".pdf",sep=""),width=10, height=7, family = "Helvetica", pointsize=8)
#Data preparation:
ratio_i_p.value.adj<-paste("p.value.adj.",paste(conditions.labels[ratio_combs[i,2]],".",conditions.labels[ratio_combs[i,1]],sep=""),sep="")
ratio_i_avg_col<-paste("log2.avg.",ratio_i_str,sep="")
mlog10_ratio_i_p.value.adj<-paste("mlog10_",ratio_i_p.value.adj,sep="")
diffexp_ratio_i<-paste("diffexp_",ratio_i_str,sep="")
results[,mlog10_ratio_i_p.value.adj]<-(-log10(results[,ratio_i_p.value.adj]))
na_indexes<-which(is.na(results[,ratio_i_p.value.adj]))
if(length(na_indexes)>0){
results[na_indexes,ratio_i_p.value.adj]<-1
results[,diffexp_ratio_i]<-results[,ratio_i_p.value.adj]<pThreshold
results[na_indexes,ratio_i_p.value.adj]<-NA
}else{
results[,diffexp_ratio_i]<-results[,ratio_i_p.value.adj]<pThreshold
}
#The following lines optimize the plot's x-label in specific dataset types
if(!IsobaricLabel)
{
myxlab <- paste("average log2 ",sub("\\.","/",ratio_i_str),sep="")
}else{
if(!PDdata)
{
myxlab <- paste("average log2 ", ratio_i_str, sep="")
myxlab <- gsub("Reporter\\.intensity\\.", "Reporter ", myxlab)
}else{
myxlab <- paste("average log2 ",ratio_i_str ,sep="")
myxlab <- gsub("X([[:digit:]])", "\\1", myxlab)
}
}
myxlab <- gsub("\\.", "/", myxlab)
# p is a plot created by the ggplot library
# Change the next command to suit your needs:
p<-ggplot(data=results, aes_string(x=ratio_i_avg_col, y=mlog10_ratio_i_p.value.adj, colour=diffexp_ratio_i)) +
geom_point(alpha=0.7, size=1.75) +
theme(legend.position = "none", axis.title.y=element_text(vjust=0.2), axis.title.x=element_text(vjust=0), plot.title = element_text(vjust=1.5, lineheight=.8, face="bold")) +
xlim(c(-ratiolim, ratiolim)) + ylim(c(0, 6)) + scale_colour_manual(values=cbPalette) +
xlab(myxlab) + ylab("-log10 P-value") + ggtitle("P-value vs Fold change") +
geom_hline(aes(yintercept=-log10(pThreshold)), colour="#990000", linetype="dashed") +
geom_text(size=2.5, hjust=1, vjust=-0.5,aes(x=-4.2, y=-log10(pThreshold)), label=paste0("P-value=", pThreshold),colour="#990000")
print(p)
dev.off()
}, error = function(err){
print(paste0("Warning! ", ratio_i_str, " volcano plot failed"))
})
# 2 - value-ordered - log ratio
result <- tryCatch({
print("Making value-ordered plot ...")
#Customize the filename and the plot size by editing the following two lines:
figsuffix<-paste("_",ratio_i_str,"-value-ordered-log-ratio","_",sep="")
pdf(file=paste(outputFigsPrefix,figsuffix,time.point,".pdf",sep=""),width=10, height=7, family = "Helvetica", pointsize=8)
#Data preparation:
results<-results[with(results, order(results[,c(ratio_i_avg_col)])),]
results$nID<-1:nrow(results)
ratio_i_avg_col_ymax<-paste(ratio_i_avg_col,".ymax",sep="")
ratio_i_avg_col_ymin<-paste(ratio_i_avg_col,".ymin",sep="")
results[,ratio_i_avg_col_ymax]<-results[,ratio_i_avg_col]+results[,ratio_i_sd_col]
results[,ratio_i_avg_col_ymin]<-results[,ratio_i_avg_col]-results[,ratio_i_sd_col]
#The following lines optimize the plot's y-label in specific dataset types
if(!IsobaricLabel)
{
myylab <- paste("average log2 ",sub("\\.","/",ratio_i_str),sep="")
}else{
if(!PDdata)
{
myylab <- paste("average log2 ", ratio_i_str, sep="")
myylab <- gsub("Reporter\\.intensity\\.", "Reporter ", myylab)
}else{
myylab <- paste("average log2 ", ratio_i_str, sep="")
myylab <- gsub("X([[:digit:]])", "\\1", myylab)
}
}
myylab <- gsub("\\.", "/", myylab)
# p is a plot created by the ggplot library
# Change the next command to suit your needs:
p<-ggplot(data=results, aes_string(x="nID", y=ratio_i_avg_col, colour=diffexp_ratio_i)) +
geom_point(alpha=0.7, size=1.5) +
geom_errorbar(aes_string(ymin=ratio_i_avg_col_ymin, ymax=ratio_i_avg_col_ymax), width=1.5) +
theme(legend.position = "none", axis.title.y=element_text(vjust=0.2), axis.title.x=element_text(vjust=0), plot.title = element_text(vjust=1.5, lineheight=.8, face="bold")) +
ylim(c(-ratiolim, ratiolim)) + scale_colour_manual(values=cbPalette) +
xlab(paste(quantitated_items_lbl,"ID")) + ylab(myylab) + ggtitle("Value-ordered fold change")
print(p)
dev.off()
}, error = function(err){
print(paste0("Warning! ", ratio_i_str, " value-ordered plot failed"))
})
# 3 - MA plot
result <- tryCatch({
print("Making MA plot ...")
#Customize the filename and the plot size by editing the following two lines:
figsuffix<-paste("_",ratio_i_str,"-MA","_",sep="")
ratio_i_avgI_col<-paste("log2.avg.I.",ratio_i_str,sep="")
pdf(file=paste(outputFigsPrefix,figsuffix,time.point,".pdf",sep=""),width=10, height=7, family = "Helvetica", pointsize=8)
#The following lines optimize the plot's y-label in specific dataset types
if(!IsobaricLabel)
{
myylab <- paste("A (average log2 ",sub("\\.","/",ratio_i_str),")",sep="")
}else{
if(!PDdata)
{
myylab <- paste("A (average log2 ", ratio_i_str, ")", sep="")
myylab <- gsub("Reporter\\.intensity\\.", "Reporter ", myylab)
}else{
myylab <- paste("A (average log2 ",ratio_i_str,")",sep="")
myylab <- gsub("X([[:digit:]])", "\\1", myylab)
}
}
myylab <- gsub("\\.", "/", myylab)
# p is a plot created by the ggplot library
# Change the next command to suit your needs:
p<-ggplot(data=results, aes_string(x=ratio_i_avgI_col, y=ratio_i_avg_col, colour=diffexp_ratio_i)) +
geom_point(alpha=0.7, size=1.75) +
theme(legend.position = "none", axis.title.y=element_text(vjust=0.2), axis.title.x=element_text(vjust=0), plot.title = element_text(vjust=1.5, lineheight=.8, face="bold")) +
ylim(c(-ratiolim, ratiolim)) + scale_colour_manual(values=cbPalette) +
xlab("M (average log2 Intensity)") + ylab(myylab) + ggtitle("MA plot")
print(p)
dev.off()
}, error = function(err){
print(paste0("Warning! ", ratio_i_str, " MA plot failed"))
})
# 4 - Reproducibility plots & histograms
result <- tryCatch({
print("Making reproducibility plot ...")
#Customize the filename suffix by editing the following line:
figsuffix<-paste("_",ratio_i_str,"-reproducibility","_",sep="")
allratios<-results[,colnames(results)[grep(paste0(ratio_i_, " "),colnames(results))]]
#The following lines optimize the plot's y-label in specific dataset types
if(!IsobaricLabel)
{
colnames(allratios)<-sub(ratio_i_,paste("log2(",sub("\\.","/",ratio_i_str),") ",sep=""),colnames(allratios))
}else{
if(!PDdata){
colnames(allratios)<-sub(ratio_i_,paste("log2(",ratio_i_str,") ",sep=""),colnames(allratios))
colnames(allratios) <- gsub("Reporter\\.intensity\\.", "Reporter ", colnames(allratios))
}else{
colnames(allratios)<-sub(ratio_i_,paste("log2(",ratio_i_str,") ",sep=""),colnames(allratios))
colnames(allratios) <- gsub("X([[:digit:]])", "\\1", colnames(allratios))
}
}
colnames(allratios) <- gsub("\\.", "/", colnames(allratios))
#Customize the filename and the size of the plot by editing the following line:
pdf(file=paste(outputFigsPrefix,figsuffix,time.point,".pdf",sep=""),width=10, height=7, family = "Helvetica", pointsize=8)
pairs.panels(allratios,scale=T,lm=T)
dev.off()
}, error = function(err){
print(paste0("Warning! ", ratio_i_str, " reproducibility plot failed"))
})
}
}
# do_limma_plots draws the limma boxplots and the limma histograms in one pdf file:
do_limma_plots<-function()
{
ratio_combs<-combinations(nConditions,2,1:nConditions)
pdf(file=paste(outputFigsPrefix,"_limma-graphs_",time.point,".pdf",sep=""),width=10, height=7, family = "Helvetica", pointsize=8)
# Create the intensities before normalisation boxplot
print("Making Intensities before normalisation limma boxplot")
boxplot(log.intensities)
title(main="Intensities Before Normalisation")
# Create the intensities after normalisation boxplot
print("Making Intensities after normalisation limma boxplot")
boxplot(norm.intensities)
title(main="Intensities After Normalisation")
#Create the limma histograms for each combination:
print("Making limma histograms")
for(i in 1:nrow(ratio_combs)){
ratio_i_str<-paste(conditions.labels[ratio_combs[i,2]],"/",conditions.labels[ratio_combs[i,1]],sep="")
hist(fit2.coefficients[,i],main=paste("Log2 Fold Change ",ratio_i_str,sep=""), xlab="Log2 Fold Change", breaks=50 )
}
dev.off()
}
# For more customization options someone can modify the following functions:
# FROM: http://musicroamer.com/blog/2011/01/16/r-tips-and-tricks-modified-pairs-plot/
# The following functions are used to draw the different parts of the Reproducibility plot
# panel.cor.scale displays the correllation coeeficients in the upper right half of the Reproducibility plot
# the size of the text is proportional to the value of the coefficient
panel.cor.scale <- function(x, y, digits=2, prefix="", cex.cor){
usr <- par("usr"); on.exit(par(usr))
par(usr = c(0, 1, 0, 1))
r = (cor(x, y,use="pairwise"))
txt <- format(c(r, 0.123456789), digits=digits)[1]
txt <- paste(prefix, txt, sep="")
if(missing(cex.cor)) cex <- 0.8/strwidth(txt)
if(is.na(r))
{
txt="NA"
text(0.5, 0.5, txt, cex = cex * 0.25)
}
else
{
text(0.5, 0.5, txt, cex = cex * abs(r))
}
}
#panel.cor is not called by default but can replace panel.cor.scale if scaling the text acording to the R value is not desirable
panel.cor <- function(x, y, digits=2, prefix="", cex.cor){
usr <- par("usr"); on.exit(par(usr))
par(usr = c(0, 1, 0, 1))
r = (cor(x, y,use="pairwise"))
txt <- format(c(r, 0.123456789), digits=digits)[1]
txt <- paste(prefix, txt, sep="")
if(missing(cex.cor)) cex <- 0.8/strwidth(txt)
text(0.5, 0.5, txt, cex = cex )
}
# panel.hist draws the histograms in the diagonal of the Reproducibility plot
panel.hist <- function(x, ...){
#ratios.hist.colour is the colour of the histogram columns
ratios.hist.colour<-"cyan"
usr <- par("usr"); on.exit(par(usr))
par(usr = c(usr[1:2], 0, 1.5) )
h <- hist(x, breaks=panel.hist.breaks,plot = FALSE)
breaks <- h$breaks; nB <- length(breaks)
y <- h$counts; y <- y/max(y)
#If all values are 0 create a simple rectangle in the middle:
non_zero_values <- x != 0
if(any(non_zero_values))
{
rect(breaks[-nB], 0, breaks[-1], y, col=ratios.hist.colour, ...)
}
else
{
rect(-0.25, 0, 0.25, max(y), col=ratios.hist.colour, ...)
}
}
# panel.lmline creates the scatterplots displayed in the bottom left half of the Reproducibility plot
# FROM: http://www-personal.umich.edu/~ladamic/presentations/Rtutorial/Rtutorial.R
panel.lmline = function (x, y, col = par("col"), bg = NA, pch = par("pch"), cex = 1, col.smooth = "red", ...){
#Note: col.smooth is the colour of the linear regression line (by default red)
points(x, y, pch = pch, col = col, bg = bg, cex = cex)
ok <- is.finite(x) & is.finite(y)
unequal_values <- x != y
if (any(ok) && any(unequal_values))
{
lm_slope = coef(lm(y[ok] ~ x[ok]))[2]
if (!is.na(lm_slope))
{
abline(lm(y[ok] ~ x[ok]), col = col.smooth, ...)
}
else
{
print("Warning!: panel.lmline: found abline with NA slope, the regression line will not be drawn")
}
}
}
#Called by do_results_plot (by default smooth=TRUE,scale=TRUE,lm=TRUE)
pairs.panels <- function (x,y,smooth=TRUE,scale=FALSE,lm=FALSE){
if (smooth){
if (scale) {
if(lm){
pairs(x,diag.panel=panel.hist,upper.panel=panel.cor.scale,lower.panel=panel.lmline)
}else{
pairs(x,diag.panel=panel.hist,upper.panel=panel.cor.scale,lower.panel=panel.smooth)
}
}else{
if(lm){
pairs(x,diag.panel=panel.hist,upper.panel=panel.cor,lower.panel=panel.lmline)
}else{
pairs(x,diag.panel=panel.hist,upper.panel=panel.cor,lower.panel=panel.smooth)
}
}
}else{
if(scale){
pairs(x,diag.panel=panel.hist,upper.panel=panel.cor.scale)
}else{
pairs(x,diag.panel=panel.hist,upper.panel=panel.cor)
}
}
}
#MAIN proccess:
#Load the necessary variables: the file Plot_Generator.RData must be contained in the same folder with this script
load("Plot_Generator.RData", .GlobalEnv)
#Draw the basic plots:
do_results_plots()
# Draw the limma plots:
do_limma_plots()
print("Procedure finished")
|
#' esoph_ca: Esophageal Cancer dataset
#'
#' @description
#' Data from a case-control study of esophageal cancer in Ille-et-Vilaine, France, evaluating the effects of smoking and alcohol on the incidence of esophageal cancer. Smoking and alcohol are associated risk factors for squamous cell cancer of the esophagus, rather than adenocarcinoma of the esophagus, which is associated with obesity and esophageal reflux (more details available below the variable definitions).
#'
#' @details
#' An original base R dataset, though of somewhat unclear origin. The statistical textbook source is clear, though it is not clear which of the original epidemiological papers on esophageal cancer in Ille-et-Vilaine is referred to by this dataset. The original authors of the medical study were **not** credited in the base R dataset. There are several possible papers in PubMed, none of which quite match up with this dataset. This could be from Tuyns, AJ, et al., Bull Cancer, 1977;64(1):45-60, but this paper reports 778 controls, rather than the 975 found here. A 1975 paper from the same group reported 718 cases (Int J Epidemiol, 1975 Mar;4(1):55-9. doi: 10.1093/ije/4.1.55.). There is also another possible source - a 1975 paper from the same group, *Usefulness of population controls in retrospective studies of alcohol consumption. Experience from a case--control study of esophageal cancer in Ille-et-Vilaine, France*, Journal of Studies on Alcohol, 39(1): 175-182 (1978), which is behind a publisher paywall.
#'
#' @format A data frame with 88 rows and 5 variables, with 200 cases and 975 controls.
#'
#' \describe{
#' \item{agegp}{6 levels of age: "25-34", "35-44", "45-54", "55-64", "65-74", "75+"; type: ordinal factor}
#' \item{alcgp}{4 levels of alcohol consumption: "0-39g/day", "40-79", "80-119", "120+"; type: ordinal factor}
#' \item{tobgp}{4 levels of tobacco consumption: "0-9g/day", "10-19", '20-29", "30+"; type: ordinal factor}
#' \item{ncases}{Number of cases; type: integer}
#' \item{ncontrols}{Number of controls; type: integer}
#' }
#'
#'@section Figure 1 Benign FNA of Breast: Benign fine needle aspirate (FNA) of a breast lesion. Notice the regular size of cells and nuclei, which are organized in orderly spacing. The nuclei are homogeneously dark with few visible nucleoli.
#'\if{html}{\figure{benign_breast.png}{options: width=100\%}}
#'
#'@section Figure 2 Cancerous FNA of Breast: Malignant (cancerous) fine needle aspirate (FNA) of a breast lesion. Notice the very irregular size of cells and nuclei, which are are disorganized and seem to be growing over each other. The nuclei are also less homogeneously dark and more granular, suggesting active transcription from the dark nucleoli within each nucleus.
#'\if{html}{\figure{malignant_breast.png}{options: width=100\%}}
#'
#' @source Breslow, N. E. and Day, N. E. (1980) Statistical Methods in Cancer Research. Volume 1: The Analysis of Case-Control Studies. IARC Lyon / Oxford University Press.
#' Originally in base R datasets.
"esoph_ca"
|
/R/esoph_ca.R
|
permissive
|
higgi13425/medicaldata
|
R
| false
| false
| 3,031
|
r
|
#' esoph_ca: Esophageal Cancer dataset
#'
#' @description
#' Data from a case-control study of esophageal cancer in Ille-et-Vilaine, France, evaluating the effects of smoking and alcohol on the incidence of esophageal cancer. Smoking and alcohol are associated risk factors for squamous cell cancer of the esophagus, rather than adenocarcinoma of the esophagus, which is associated with obesity and esophageal reflux (more details available below the variable definitions).
#'
#' @details
#' An original base R dataset, though of somewhat unclear origin. The statistical textbook source is clear, though it is not clear which of the original epidemiological papers on esophageal cancer in Ille-et-Vilaine is referred to by this dataset. The original authors of the medical study were **not** credited in the base R dataset. There are several possible papers in PubMed, none of which quite match up with this dataset. This could be from Tuyns, AJ, et al., Bull Cancer, 1977;64(1):45-60, but this paper reports 778 controls, rather than the 975 found here. A 1975 paper from the same group reported 718 cases (Int J Epidemiol, 1975 Mar;4(1):55-9. doi: 10.1093/ije/4.1.55.). There is also another possible source - a 1975 paper from the same group, *Usefulness of population controls in retrospective studies of alcohol consumption. Experience from a case--control study of esophageal cancer in Ille-et-Vilaine, France*, Journal of Studies on Alcohol, 39(1): 175-182 (1978), which is behind a publisher paywall.
#'
#' @format A data frame with 88 rows and 5 variables, with 200 cases and 975 controls.
#'
#' \describe{
#' \item{agegp}{6 levels of age: "25-34", "35-44", "45-54", "55-64", "65-74", "75+"; type: ordinal factor}
#' \item{alcgp}{4 levels of alcohol consumption: "0-39g/day", "40-79", "80-119", "120+"; type: ordinal factor}
#' \item{tobgp}{4 levels of tobacco consumption: "0-9g/day", "10-19", '20-29", "30+"; type: ordinal factor}
#' \item{ncases}{Number of cases; type: integer}
#' \item{ncontrols}{Number of controls; type: integer}
#' }
#'
#'@section Figure 1 Benign FNA of Breast: Benign fine needle aspirate (FNA) of a breast lesion. Notice the regular size of cells and nuclei, which are organized in orderly spacing. The nuclei are homogeneously dark with few visible nucleoli.
#'\if{html}{\figure{benign_breast.png}{options: width=100\%}}
#'
#'@section Figure 2 Cancerous FNA of Breast: Malignant (cancerous) fine needle aspirate (FNA) of a breast lesion. Notice the very irregular size of cells and nuclei, which are are disorganized and seem to be growing over each other. The nuclei are also less homogeneously dark and more granular, suggesting active transcription from the dark nucleoli within each nucleus.
#'\if{html}{\figure{malignant_breast.png}{options: width=100\%}}
#'
#' @source Breslow, N. E. and Day, N. E. (1980) Statistical Methods in Cancer Research. Volume 1: The Analysis of Case-Control Studies. IARC Lyon / Oxford University Press.
#' Originally in base R datasets.
"esoph_ca"
|
\name{summary.cv.plsRglmmodel}
\alias{summary.cv.plsRglmmodel}
\title{Summary method for plsRglm models}
\description{
This function provides a summary method for the class \code{"cv.plsRglmmodel"}
}
\usage{
\method{summary}{cv.plsRglmmodel}(object, \dots)
}
\arguments{
\item{object}{an object of the class \code{"cv.plsRglmmodel"}}
\item{\dots}{further arguments to be passed to or from methods.}
}
%\details{}
\value{An object of class \code{"summary.cv.plsRmodel"} if \code{model} is missing or \code{model="pls"}. Otherwise an object of class \code{"summary.cv.plsRglmmodel"}.}
\references{
Nicolas Meyer, Myriam Maumy-Bertrand et \enc{Frederic}{Fr\'ed\'eric} Bertrand (2010). Comparing the linear and the logistic PLS regression with qualitative predictors: application to allelotyping data. \emph{Journal de la Societe Francaise de Statistique}, 151(2), pages 1-18.
\url{http://publications-sfds.math.cnrs.fr/index.php/J-SFdS/article/view/47}
}
\author{\enc{Frederic}{Fr\'ed\'eric} Bertrand\cr
\email{frederic.bertrand@math.unistra.fr}\cr
\url{http://www-irma.u-strasbg.fr/~fbertran/}
}
\seealso{\code{\link{summary}}}
\examples{
data(Cornell)
XCornell<-Cornell[,1:7]
yCornell<-Cornell[,8]
bbb <- cv.plsRglm(dataY=yCornell,dataX=XCornell,nt=10,NK=1,
modele="pls-glm-family",family=gaussian())
summary(bbb)
rm(list=c("XCornell","yCornell","bbb"))
}
\keyword{methods}
\keyword{print}
|
/man/summary.cv.plsRglmmodel.Rd
|
no_license
|
weecology/plsRglm
|
R
| false
| false
| 1,395
|
rd
|
\name{summary.cv.plsRglmmodel}
\alias{summary.cv.plsRglmmodel}
\title{Summary method for plsRglm models}
\description{
This function provides a summary method for the class \code{"cv.plsRglmmodel"}
}
\usage{
\method{summary}{cv.plsRglmmodel}(object, \dots)
}
\arguments{
\item{object}{an object of the class \code{"cv.plsRglmmodel"}}
\item{\dots}{further arguments to be passed to or from methods.}
}
%\details{}
\value{An object of class \code{"summary.cv.plsRmodel"} if \code{model} is missing or \code{model="pls"}. Otherwise an object of class \code{"summary.cv.plsRglmmodel"}.}
\references{
Nicolas Meyer, Myriam Maumy-Bertrand et \enc{Frederic}{Fr\'ed\'eric} Bertrand (2010). Comparing the linear and the logistic PLS regression with qualitative predictors: application to allelotyping data. \emph{Journal de la Societe Francaise de Statistique}, 151(2), pages 1-18.
\url{http://publications-sfds.math.cnrs.fr/index.php/J-SFdS/article/view/47}
}
\author{\enc{Frederic}{Fr\'ed\'eric} Bertrand\cr
\email{frederic.bertrand@math.unistra.fr}\cr
\url{http://www-irma.u-strasbg.fr/~fbertran/}
}
\seealso{\code{\link{summary}}}
\examples{
data(Cornell)
XCornell<-Cornell[,1:7]
yCornell<-Cornell[,8]
bbb <- cv.plsRglm(dataY=yCornell,dataX=XCornell,nt=10,NK=1,
modele="pls-glm-family",family=gaussian())
summary(bbb)
rm(list=c("XCornell","yCornell","bbb"))
}
\keyword{methods}
\keyword{print}
|
# SYS 6018 Kaggle Competition 4
# Group C4-4
# Navin Kasa
# Niharika Reddy
# Mengyao Zhang
library(tidyverse)
library(MASS)
library(dplyr)
library(jsonlite)
library(readr)
library(magrittr)
library(lubridate)
library(purrr)
library(ggplot2)
library(gridExtra)
#install.packages("countrycode")
library(countrycode)
#install.packages("highcharacter")
library(highcharter)
#install.packages("ggExtra")
library(ggExtra)
library(data.table)
#install.packages("funModeling")
library(funModeling)
library(gridExtra)
#library(dplyr)
#install.packages("zoo")
library(zoo)
library(stringr)
#install.packages("chron")
library(chron)
#install.packages("splusTimeDate")
library(splusTimeDate)
#install.packages("bsts")
library(bsts)
library(chron)
#Reading in data and combining the training and testing datasets
train <- read_csv(file = "train.csv", col_names = T) %>% mutate(Data= "Training")
test <- read_csv(file = "test.csv", col_names = T) %>% mutate(Data= "Testing")
df<-rbind(train,test)
# read in the updated test data
test_new <- read_csv(file="test_v2.csv",col_names = T)
head(test_new)
test_new$totals[1:10]
test_new$hits[1:10]
# drop the hits column
test_new <- test_new[,-c(2,7)] # dop the hits and customDimensions columns
colnames(test_new)
"
================================================================================
DATA CLEANING
================================================================================"
#Reading in data and combining the training and testing datasets
train <- read_csv(file = "train.csv", col_names = T) %>% mutate(Data= "Training")
test <- read_csv(file = "test.csv", col_names = T) %>% mutate(Data= "Testing")
df<-rbind(train,test)
#Viewing the data
head(df)
#There seem to be some JSON columns
str(df)
#JSON columns are : device, geoNetwork, totals, trafficSource
#Writing function to parse JSON
ParseJSONColumn <- function(x) {
paste("[ ", paste(x, collapse = ",", sep=" "), " ]") %>%
fromJSON(flatten = T) %>%
as.tibble()
}
JSONcolumn_data <- df %>%
dplyr::select(trafficSource, totals, geoNetwork, device)
JSON_cols<-apply(JSONcolumn_data,2, FUN = ParseJSONColumn)
save(JSON_cols, file = "JSON_parsed.Rdata")
head(JSON_cols)
df <- cbind(df, JSON_cols)
# dropping the old json columns
df<-df %>% dplyr::select(-device, -geoNetwork, -totals, -trafficSource)
head(df)
#Several of the columns seem to have "not available in demo dataset","(not provided) "
#setting the same to NA
# values to convert to NA
na_vals <- c("unknown.unknown", "(not set)", "not available in demo dataset",
"(not provided)", "(none)", "<NA>")
for(col in 1:ncol(df)){
df[which(df[,col] %in% na_vals), col]= NA
}
glimpse(df)
#write.table(df, "cleaned_total_data.csv", row.names=F, sep=",")
#All of the columns that were converted from json are of class character.
#For some, we will need to change this.
# character columns to convert to numeric
num_cols <- c('totals.hits', 'totals.pageviews', 'totals.bounces', 'totals.newVisits',
'totals.transactionRevenue')
df[, num_cols] = lapply(df[, num_cols], function(x){as.numeric(x)})
glimpse(df)
#Coverting date from int to date format
df$date <- as.Date(as.character(df$date), format='%Y%m%d')
# convert visitStartTime to POSIXct
df$visitStartTime <- as_datetime(df$visitStartTime)
glimpse(df)
#imputing transaction revenue to 0 before removing na columns
df$totals.transactionRevenue[is.na(df$totals.transactionRevenue)] <- 0
# Imputing missing countries where city is captured
df$geoNetwork.city[(df$geoNetwork.country %>% is.na()) & (!df$geoNetwork.city %>% is.na())]
# [1] "Ningbo" "New York" "San Francisco" "Tunis" "Nairobi"
# [6] "New York" "Manila" "Osaka" "New York" "Kyiv"
# [11] "Kyiv" "Kyiv" "Hong Kong" "Santa Clara" "Kyiv"
# [16] "Moscow" "Kyiv" "Kyiv" "Kyiv" "Kyiv"
# [21] "London" "Dublin" "London" "Minneapolis" "New York"
# [26] "New York" "Melbourne" "Buenos Aires" "London" "Dublin"
# [31] "Kyiv" "London" "Kyiv" "Kyiv" "Kyiv"
# [36] "Kyiv" "Kyiv" "Bengaluru"
#
df$geoNetwork.country[df$geoNetwork.city %in% c("San Francisco", "New York","Santa Clara","Minneapolis")] <- "United States"
df$geoNetwork.country[df$geoNetwork.city %in% c("Tunis")] <- "Tunisia"
df$geoNetwork.country[df$geoNetwork.city %in% c("Nairobi")] <- "Kenya"
df$geoNetwork.country[df$geoNetwork.city %in% c("Manila")] <- "Philippines"
df$geoNetwork.country[df$geoNetwork.city %in% c("Osaka")] <- "Japan"
df$geoNetwork.country[df$geoNetwork.city %in% c("Kyiv")] <- "Ukraine"
df$geoNetwork.country[df$geoNetwork.city %in% c("Hong Kong")] <- "Hong Kong"
df$geoNetwork.country[df$geoNetwork.city %in% c("Moscow")] <- "Moscow"
df$geoNetwork.country[df$geoNetwork.city %in% c("London")] <- "United Kingdom"
df$geoNetwork.country[df$geoNetwork.city %in% c("Dublin")] <- "Ireland"
df$geoNetwork.country[df$geoNetwork.city %in% c("Melbourne")] <- "Australia"
df$geoNetwork.country[df$geoNetwork.city %in% c("Buenos Aires")] <- "Argentina"
df$geoNetwork.country[df$geoNetwork.city %in% c("Bengaluru")] <- "India"
"
============================================================================
EDA and Dimensionality Reduction
============================================================================
"
# Finding time ranges for train and test data
time_range_train <- range(train$date)
print(time_range_train)
#[1] "2016-08-01" "2017-08-01"
time_range_test <- range(test$date)
print(time_range_test)
#"2017-08-02" "2018-04-30"
#Checking the distribution of transaction revenues across time in the training data
g1 <- train[, .(n = .N), by=date] %>%
ggplot(aes(x=date, y=n)) +
geom_line(color='steelblue') +
geom_smooth(color='orange') +
labs(
x='',
y='Visits (000s)',
title='Daily visits'
)
g2 <- train[, .(revenue = sum(transactionRevenue, na.rm=TRUE)), by=date] %>%
ggplot(aes(x=date, y=revenue)) +
geom_line(color='steelblue') +
geom_smooth(color='orange') +
labs(
x='',
y='Revenue (unit dollars)',
title='Daily transaction revenue'
)
grid.arrange(g1, g2, nrow=2)
g1 <- train[, .(n = .N), by=channelGrouping] %>%
ggplot(aes(x=reorder(channelGrouping, -n), y=n/1000)) +
geom_bar(stat='identity', fill='steelblue') +
labs(x='Channel Grouping',
y='Visits (000s)',
title='Visits by channel grouping')
#Checking for columns with missing values
options(repr.plot.height=4)
NAcol <- which(colSums(is.na(df)) > 0)
NAcount <- sort(colSums(sapply(df[NAcol], is.na)), decreasing = TRUE)
colSums(df["device.operatingSystemVersion"] %>% is.na())
NAcount
NADF <- data.frame(variable=names(NAcount), missing=NAcount)
NADF$PctMissing <- round(((NADF$missing/nrow(df))*100),1)
NADF %>%
ggplot(aes(x=reorder(variable, PctMissing), y=PctMissing)) +
geom_bar(stat='identity', fill='blue') + coord_flip(y=c(0,110)) +
labs(x="", y="Percent missing") +
geom_text(aes(label=paste0(NADF$PctMissing, "%"), hjust=-0.1))
#Dropping all columns with more than 90% missing values
df1<-df[,colSums(!is.na(df)) > 0.9*nrow(df) ]
glimpse(df1)
# Converting some of the character variables to factors
categorical_columns <- c("device.browser", "device.deviceCategory", "device.operatingSystem", "geoNetwork.continent", "geoNetwork.country", "geoNetwork.subContinent", "trafficSource.source")
df1 <- mutate_at(df1, categorical_columns, as.factor)
#Exploring no. of unique values in columns to decide which additional columns can be dropped
#trafficSource.source analysis
unique(df1$trafficSource.source)
# More than 1 unique columns hence retaining the column
unique(df1$totals.visits)
# Unique values are 1, hence dropping the column
unique(df1$channelGrouping)
unique(df1$totals.hits)
unique(df1$totals.pageviews)
unique(df1$visitNumber)
unique(df1$socialEngagementType)
#Need to drop socialEngagementType as there is only 1 unique value
df1 <- subset(df1, select = -c(totals.visits,socialEngagementType))
# As continent and subcontinent are dervived from country, dropping those columns as well
df1 <- subset(df1, select = -c(geoNetwork.continent,geoNetwork.subContinent))
####### geoNetwork.country analysis
###### Unique country list
# [1] "Turkey" "Australia" "Spain" "Indonesia"
# [5] "United Kingdom" "Italy" "Pakistan" "Austria"
# [9] "Netherlands" "India" "France" "Brazil"
# [13] "China" "Singapore" "Argentina" "Poland"
# [17] "Germany" "Canada" "Thailand" "Hungary"
# [21] "Malaysia" "Denmark" "Taiwan" "Russia"
# [25] "Nigeria" "Belgium" "South Korea" "Chile"
# [29] "Ireland" "Philippines" "Greece" "Mexico"
# [33] "Montenegro" "United States" "Bangladesh" "Japan"
# [37] "Slovenia" "Czechia" "Sweden" "United Arab Emirates"
# [41] "Switzerland" "Portugal" "Peru" "Hong Kong"
# [45] "Vietnam" "Sri Lanka" "Serbia" "Norway"
# [49] "Romania" "Kenya" "Ukraine" "Israel"
# [53] "Slovakia" NA "Lithuania" "Puerto Rico"
# [57] "Bosnia & Herzegovina" "Croatia" "South Africa" "Paraguay"
# [61] "Botswana" "Colombia" "Uruguay" "Algeria"
# [65] "Finland" "Guatemala" "Egypt" "Malta"
# [69] "Bulgaria" "New Zealand" "Kuwait" "Uzbekistan"
# [73] "Saudi Arabia" "Cyprus" "Estonia" "Côte d’Ivoire"
# [77] "Morocco" "Tunisia" "Venezuela" "Dominican Republic"
# [81] "Senegal" "Cape Verde" "Costa Rica" "Kazakhstan"
# [85] "Macedonia (FYROM)" "Oman" "Laos" "Ethiopia"
# [89] "Panama" "Belarus" "Myanmar (Burma)" "Moldova"
# [93] "Zimbabwe" "Bahrain" "Mongolia" "Ghana"
# [97] "Albania" "Kosovo" "Georgia" "Tanzania"
# [101] "Bolivia" "Cambodia" "Turks & Caicos Islands" "Iraq"
# [105] "Jordan" "Lebanon" "Ecuador" "Madagascar"
# [109] "Togo" "Gambia" "Jamaica" "Trinidad & Tobago"
# [113] "Mauritius" "Libya" "Mauritania" "El Salvador"
# [117] "Azerbaijan" "Nicaragua" "Palestine" "Réunion"
# [121] "Iceland" "Greenland" "Armenia" "Haiti"
# [125] "Uganda" "Qatar" "St. Kitts & Nevis" "Somalia"
# [129] "Cameroon" "Namibia" "Latvia" "Congo - Kinshasa"
# [133] "New Caledonia" "Rwanda" "Kyrgyzstan" "Honduras"
# [137] "Nepal" "Benin" "Luxembourg" "Guinea"
# [141] "Belize" "Guinea-Bissau" "Sudan" "Yemen"
# [145] "Gabon" "Maldives" "Mozambique" "French Guiana"
# [149] "Zambia" "Macau" "Tajikistan" "Angola"
# [153] "Guadeloupe" "Martinique" "Brunei" "Guyana"
# [157] "St. Lucia" "Iran" "Monaco" "Swaziland"
# [161] "Curaçao" "Bermuda" "Guernsey" "Afghanistan"
# [165] "Northern Mariana Islands" "Guam" "Antigua & Barbuda" "Sint Maarten"
# [169] "Andorra" "St. Vincent & Grenadines" "Fiji" "Mali"
# [173] "Papua New Guinea" "Jersey" "Faroe Islands" "Cayman Islands"
# [177] "Chad" "French Polynesia" "Malawi" "Suriname"
# [181] "Barbados" "U.S. Virgin Islands" "Djibouti" "Mayotte"
# [185] "Aruba" "Lesotho" "Equatorial Guinea" "Burkina Faso"
# [189] "Grenada" "Norfolk Island" "Isle of Man" "Liechtenstein"
# [193] "Vanuatu" "Sierra Leone" "Bahamas" "Åland Islands"
# [197] "St. Pierre & Miquelon" "Gibraltar" "British Virgin Islands" "Burundi"
# [201] "Turkmenistan" "Niger" "Samoa" "Timor-Leste"
# [205] "Syria" "Comoros" "Liberia" "Bhutan"
# [209] "Cook Islands" "American Samoa" "Dominica" "Anguilla"
# [213] "Caribbean Netherlands" "Marshall Islands" "Congo - Brazzaville" "Seychelles"
# [217] "San Marino" "Central African Republic" "St. Martin" "São Tomé & Príncipe"
# [221] "Eritrea" "St. Barthélemy" "South Sudan" "Solomon Islands"
# [225] "Montserrat" "St. Helena" "Tonga" "Micronesia"
# Feature engineering using Date for holidays
us.bank.holidays <- read_csv("US Bank holidays.csv")
us.bank.holidays <- us.bank.holidays[, ! names(us.bank.holidays) %in% c("index"), drop = F]
holidays <- us.bank.holidays$date %>% as.list()
for(i in 1:11){
buffer.dates <- holidays %>% lapply(function(d){
data.frame(date=as.Date(d)-i, holiday = us.bank.holidays$holiday[us.bank.holidays$date==as.Date(d)])
})
buffer.dates <- do.call(rbind,buffer.dates)
us.bank.holidays <- us.bank.holidays %>% rbind(buffer.dates)
}
us.bank.holidays = us.bank.holidays[!duplicated(us.bank.holidays$date),]
df2 <- left_join(df1,unique(us.bank.holidays), by=c("date"))
df2 <- df2[,!names(df2) %in% c("holiday.x"), drop=F]
names(df2)[names(df2) == 'holiday.y'] <- 'holiday'
# removing some holidays for non-US countries
us.holidays <- c("New Year Day", "Independence Day", "Labor Day", "Thanksgiving Day", "Christmas Day")
row.holidays <- c("New Year Day", "Christmas Day")
df2$holiday[(df2$geoNetwork.country =="United States") & ! (df2$holiday %in% us.holidays) ] <- NA
df2$holiday[(df2$geoNetwork.country!="United States") & ! (df2$holiday %in% row.holidays) ] <- NA
df2["is.holiday"] <- !(df2$holiday %>% is.na())
## Engineering features to check if date is during a weekend, monthend or start of month
df2["weekend"] <- df2$date %>% is.weekend()
df2["monthend"] <- df2$date %>% format("%d") %in% c('27','28','29','30','31')
df2["monthstart"] <- df2$date %>% format("%d") %in% c('1','2','3', '4', '5')
df2$holiday <-ifelse(is.na(df2$holiday),"No",df2$holiday)
df2$monthend <- ifelse(df2$monthend==FALSE,"No","Yes")
df2$monthstart <- ifelse(df2$monthstart==FALSE,"No","Yes")
#Converting character vectors to factors
categorical_columns <- c("channelGrouping", "device.isMobile", "is.holiday", "monthend", "monthstart", "weekend")
df2 <- mutate_at(df2, categorical_columns, as.factor)
glimpse(df2)
levels(df2$monthstart)
# No dates in the start of the month, hence dropping the column
df2 <- subset(df2, select = -c(monthstart, holiday))
options(repr.plot.height=4)
NAcol <- which(colSums(is.na(df2)) > 0)
NAcount <- sort(colSums(sapply(df2[NAcol], is.na)), decreasing = TRUE)
NADF <- data.frame(variable=names(NAcount), missing=NAcount)
NADF$PctMissing <- round(((NADF$missing/nrow(df2))*100),1)
NADF %>%
ggplot(aes(x=reorder(variable, PctMissing), y=PctMissing)) +
geom_bar(stat='identity', fill='blue') + coord_flip(y=c(0,110)) +
labs(x="", y="Percent missing") +
geom_text(aes(label=paste0(NADF$PctMissing, "%"), hjust=-0.1))
# Imputing missing values in device.operatingSystem and geoNetwork.country with "unknown"
df2$device.operatingSystem <-ifelse(is.na(df2$device.operatingSystem),"Unknown",df2$device.operatingSystem)
df2$geoNetwork.country <-ifelse(is.na(df2$geoNetwork.country),"Unknown",df$geoNetwork.country)
train<- df2 %>% filter(df$Data == "Training")
test<- df2 %>% filter(df$Data == "Testing")
write.csv(df2, file="df2.csv", row.names=FALSE)
write.csv(train, file="train_final.csv", row.names=FALSE)
write.csv(test, file="test_final.csv", row.names=FALSE)
"
==========================================
OLS
==========================================
"
Mode <- function(x) {
ux <- unique(x)
ux[which.max(tabulate(match(x, ux)))]
}
#load("train.Rdata")
#load("test. Rdata. Rdata. Rdata")
#train[1:5,1:10]
str(train)
# convert categorical variables to factors
train$geoNetwork.country <- as.factor(train$geoNetwork.country)
train$device.operatingSystem <- as.factor(train$device.operatingSystem)
train$is.holiday <- as.factor(train$is.holiday)
# split train into estimation set and validation set
set.seed(123)
est_index <- sample(1:nrow(train), size =nrow(train)/2 )
train.est <- train[est_index,]
train.val <- train[-est_index,]
# check NAs in estimation set
nas.cols <- as.vector(rep(0, ncol(train.est)))
for(i in 1:ncol(train.est)){
nas.cols[i] <- sum(is.na(train.est[i]))
}
nas.cols
# Naming the vector colums
names(nas.cols) <- names(train.est)[1:ncol(train.est)]
# Finding columns with NAs for train.est data
with.nas <- nas.cols[nas.cols!=0]
with.nas
# trafficSource.source totals.pageviews device.browser
# 32 52 5
# impute NAs for trafficSource.source
Mode(train.est$trafficSource.source)
# [1] google
# 499 Levels: (direct) ... yt-go-12345.googleplex.com
train.est$trafficSource.source[which(is.na(train.est$trafficSource.source))] <- "google"
# impute NAs for totals.pageviews
train.est$totals.pageviews[which(is.na(train.est$totals.pageviews))] <- median(train.est$totals.pageviews,na.rm=TRUE)
# impute NAs for device.browser
Mode(train.est$device.browser)
# [1] Chrome
# 128 Levels: ;__CT_JOB_ID__:0a075729-93a5-43d0-9638-4cbd41d5f5a5; ...
train.est$device.browser[which(is.na(train.est$device.browser))] <- "Chrome"
# Model 1
# Excluding the following variables:
# date,fullVisitorId,sessionId,visitId,visitStartTime, Data
# trafficSource.source (get memory error if included)
# geoNetwork.country (too many levels)
# device.browser (too many levels)
lm.1 <- lm(totals.transactionRevenue ~channelGrouping+visitNumber+totals.hits+totals.pageviews
+device.operatingSystem+device.isMobile+device.deviceCategory+is.holiday+weekend+monthend, data=train.est)
summary(lm.1)
# Model 2
# Take out channelGrouping, device.operatingSystem
lm.2 <- lm(totals.transactionRevenue ~visitNumber+totals.hits+totals.pageviews
+device.isMobile+device.deviceCategory+is.holiday+weekend+monthend, data=train.est)
summary(lm.2)
# Model 3
# Take out totals.hits, device.isMobileTRUE,device.deviceCategory
lm.3 <- lm(totals.transactionRevenue ~visitNumber+totals.pageviews
+is.holiday+weekend+monthend, data=train.est)
summary(lm.3)
"
===========================
cross validate on valid set
===========================
"
# check NAs in valid set
nas.cols <- as.vector(rep(0, ncol(train.val)))
for(i in 1:ncol(train.val)){
nas.cols[i] <- sum(is.na(train.val[i]))
}
nas.cols
# Naming the vector colums
names(nas.cols) <- names(train.val)[1:ncol(train.val)]
# Finding columns with NAs for train.val data
with.nas <- nas.cols[nas.cols!=0]
with.nas
# trafficSource.source totals.pageviews device.browser
# 37 48 3
# impute NAs for trafficSource.source
Mode(train.val$trafficSource.source)
# [1] google
# 499 Levels: (direct) ... yt-go-12345.googleplex.com
train.val$trafficSource.source[which(is.na(train.val$trafficSource.source))] <- "google"
# impute NAs for totals.pageviews
train.val$totals.pageviews[which(is.na(train.val$totals.pageviews))] <- median(train.val$totals.pageviews,na.rm=TRUE)
# impute NAs for device.browser
Mode(train.val$device.browser)
# [1] Chrome
# 128 Levels: ;__CT_JOB_ID__:0a075729-93a5-43d0-9638-4cbd41d5f5a5; ...
train.val$device.browser[which(is.na(train.val$device.browser))] <- "Chrome"
# predict using lm.1
pred.1 <- predict(lm.1, newdata=train.val)
# factor device.operatingSystem has new levels 12, 13, 18
# find indices in train.val with these values
index.12 <- which(train.val$device.operatingSystem == 12)
index.12
# [1] 49618 303150
index.13 <- which(train.val$device.operatingSystem == 13)
index.13
# [1] 207100
index.18 <- which(train.val$device.operatingSystem == 18)
index.18
# [1] 314667
# replace those with mode in train.est for device.operatingSystem
OS.mode <- Mode(train.est$device.operatingSystem)
OS.mode
# [1] 21
train.val$device.operatingSystem[cbind(index.12,index.13,index.18)] <- 21
# predict using lm.1 again
pred.1 <- predict(lm.1, newdata=train.val)
MSE <- mean((train.val$totals.transactionRevenue-pred.1)^2)
MSE
# [1] 3.114098e+15
# Predict using lm.2
pred.2 <- predict(lm.2, newdata=train.val)
MSE <- mean((train.val$totals.transactionRevenue-pred.2)^2)
MSE
# [1] 3.115807e+15
# Predict using lm.3
pred.3 <- predict(lm.3, newdata=train.val)
MSE <- mean((train.val$totals.transactionRevenue-pred.3)^2)
MSE
# [1] 3.116231e+15
# MODEL 1,2 have lower MSE, build models on entire train
lm.1 <- lm(totals.transactionRevenue ~channelGrouping+visitNumber+totals.hits+totals.pageviews
+device.operatingSystem+device.isMobile+device.deviceCategory+is.holiday+weekend+monthend, data=train)
summary(lm.1)
lm.2 <- lm(totals.transactionRevenue ~visitNumber+totals.hits+totals.pageviews
+device.isMobile+device.deviceCategory+is.holiday+weekend+monthend, data=train)
summary(lm.2)
"
========================
PREDICT ON OLD TEST DATA
========================
"
# convert categorical variables to factors
test$geoNetwork.country <- as.factor(test$geoNetwork.country)
test$device.operatingSystem <- as.factor(test$device.operatingSystem)
test$is.holiday <- as.factor(test$is.holiday)
# predict using lm.1
test.pred.1 <- predict(lm.1, newdata=test)
# Error in model.frame.default(Terms, newdata, na.action = na.action, xlev = object$xlevels) :
# factor device.operatingSystem has new levels 15, 16, 19, 20
# replace those with mode in test for device.operatingSystem
OS.mode.test <- Mode(test$device.operatingSystem)
OS.mode.test
# [1] "21"
# find indices in test with these values
index.15 <- which(test$device.operatingSystem == 15)
index.15
index.16 <- which(test$device.operatingSystem == 16)
index.16
index.19 <- which(test$device.operatingSystem == 19)
index.19
index.20 <- which(test$device.operatingSystem == 20)
index.20
test$device.operatingSystem[cbind(index.15,index.16,index.19,index.20)] <- 21
# predict using lm.1 again after cleaning of test
test.pred.1 <- predict(lm.1, newdata=test)
# bind fullVisitorId with predicted value
prediction.1 <- data.frame(cbind(test$fullVisitorId,test.pred.1))
names(prediction.1) <- c("fullVisitorId","predRevenue")
prediction.1$predRevenue <- as.numeric(prediction.1$predRevenue)
# group by fullVistorId
prediction.1.new <-group_by(prediction.1,fullVisitorId)
prediction.1.summary <-summarise(prediction.1.new, total = sum(predRevenue))
prediction.1.summary$PredictedLogRevenue <-log(prediction.1.summary$total+1)
prediction.1.summary <- prediction.1.summary[,c(1,3)]
head(prediction.1.summary)
# fullVisitorId PredictedLogRevenue
# <fct> <dbl>
# 1 0000000259678714014 11.3
# 2 0000049363351866189 11.1
# 3 0000053049821714864 8.24
# 4 0000059488412965267 9.27
# 5 0000085840370633780 6.18
# 6 0000091131414287111 8.18
nrow(prediction.1.summary)
# replace NAs in the summary with 0
prediction.1.summary[which(is.na(prediction.1.summary$PredictedLogRevenue)),2] <- 0
# write to txt file so fullVisitorId has leading zeros
# for submission, import txt file to Excel and then save as csv
write.table(prediction.1.summary, file = "C4-4_OLS_1.txt", sep = "\t",
row.names = F, col.names = c("fullVisitorId", "PredictedLogRevenue"))
# predict using lm.2
test.pred.2 <- predict(lm.2, newdata=test)
prediction.2 <- data.frame(cbind(test$fullVisitorId,test.pred.2))
names(prediction.2) <- c("fullVisitorId","predRevenue")
prediction.2$predRevenue <- as.numeric(prediction.2$predRevenue)
# group by fullVistorId
prediction.2.new <-group_by(prediction.2,fullVisitorId)
prediction.2.summary <-summarise(prediction.2.new, total = sum(predRevenue))
prediction.2.summary$PredictedLogRevenue <-log(prediction.2.summary$total+1)
prediction.2.summary <- prediction.2.summary[,c(1,3)]
head(prediction.2.summary)
# fullVisitorId PredictedLogRevenue
# <fct> <dbl>
# 1 0000000259678714014 7.84
# 2 0000049363351866189 7.05
# 3 0000053049821714864 6.38
# 4 0000059488412965267 7.04
# 5 0000085840370633780 6.39
# 6 0000091131414287111 6.10
nrow(prediction.2.summary)
# replace NAs in the summary with 0
prediction.2.summary[which(is.na(prediction.2.summary$PredictedLogRevenue)),2] <- 0
# write to txt file so fullVisitorId has leading zeros
# for submission, import txt file to Excel and then save as csv
write.table(prediction.2.summary, file = "C4-4_OLS_2.txt", sep = "\t",
row.names = F, col.names = c("fullVisitorId", "PredictedLogRevenue"))
"
The following code is trying to use the model built on old train data to predict on new test data
"
"
=====================
CLEAN NEW TEST DATA
=====================
"
str(test_new)
#JSON columns are : device, geoNetwork, totals, trafficSource
# parse JSON
JSONcolumn_data <- test_new %>%
dplyr::select(trafficSource, totals, geoNetwork, device)
JSON_cols<-apply(JSONcolumn_data,2, FUN = ParseJSONColumn)
save(JSON_cols, file = "test_JSON_parsed.Rdata")
head(JSON_cols)
test_new <- cbind(test_new, JSON_cols)
# dropping the old json columns
test_new<-test_new %>% dplyr::select(-device, -geoNetwork, -totals, -trafficSource)
head(test_new)
#Several of the columns seem to have "not available in demo dataset","(not provided) "
#setting the same to NA
# values to convert to NA
na_vals <- c("unknown.unknown", "(not set)", "not available in demo dataset",
"(not provided)", "(none)", "<NA>")
for(col in 1:ncol(test_new)){
test_new[which(test_new[,col] %in% na_vals), col]= NA
}
glimpse(test_new)
#write.table(df, "cleaned_total_data.csv", row.names=F, sep=",")
#All of the columns that were converted from json are of class character.
#For some, we will need to change this.
# character columns to convert to numeric
num_cols <- c('totals.hits', 'totals.pageviews', 'totals.bounces', 'totals.newVisits',
'totals.transactionRevenue')
test_new[, num_cols] = lapply(test_new[, num_cols], function(x){as.numeric(x)})
glimpse(test_new)
#Coverting date from int to date format
test_new$date <- as.Date(as.character(test_new$date), format='%Y%m%d')
# convert visitStartTime to POSIXct
test_new$visitStartTime <- as_datetime(test_new$visitStartTime)
glimpse(test_new)
#imputing transaction revenue to 0 before removing na columns
test_new$totals.transactionRevenue[is.na(test_new$totals.transactionRevenue)] <- 0
# Imputing missing countries where city is captured
test_new$geoNetwork.city[(test_new$geoNetwork.country %>% is.na()) & (!test_new$geoNetwork.city %>% is.na())]
# [1] "Mexico City" "Bengaluru" "Bengaluru" "Santa Clara" "Austin"
test_new$geoNetwork.country[test_new$geoNetwork.city %in% c("Santa Clara", "Austin")] <- "United States"
test_new$geoNetwork.country[test_new$geoNetwork.city %in% c("Mexico City")] <- "Mexico"
test_new$geoNetwork.country[test_new$geoNetwork.city %in% c("Bengaluru")] <- "India"
col_name_train <- colnames(train)
# Feature engineering using Date for holidays
us.bank.holidays <- read_csv("US Bank holidays.csv")
us.bank.holidays <- us.bank.holidays[, ! names(us.bank.holidays) %in% c("index"), drop = F]
holidays <- us.bank.holidays$date %>% as.list()
for(i in 1:11){
buffer.dates <- holidays %>% lapply(function(d){
data.frame(date=as.Date(d)-i, holiday = us.bank.holidays$holiday[us.bank.holidays$date==as.Date(d)])
})
buffer.dates <- do.call(rbind,buffer.dates)
us.bank.holidays <- us.bank.holidays %>% rbind(buffer.dates)
}
us.bank.holidays = us.bank.holidays[!duplicated(us.bank.holidays$date),]
test_new_2 <- left_join(test_new,unique(us.bank.holidays), by=c("date"))
test_new_2 <- test_new_2[,!names(test_new_2) %in% c("holiday.x"), drop=F]
names(test_new_2)[names(test_new_2) == 'holiday.y'] <- 'holiday'
# removing some holidays for non-US countries
us.holidays <- c("New Year Day", "Independence Day", "Labor Day", "Thanksgiving Day", "Christmas Day")
row.holidays <- c("New Year Day", "Christmas Day")
test_new_2$holiday[(test_new_2$geoNetwork.country =="United States") & ! (test_new_2$holiday %in% us.holidays) ] <- NA
test_new_2$holiday[(test_new_2$geoNetwork.country!="United States") & ! (test_new_2$holiday %in% row.holidays) ] <- NA
test_new_2["is.holiday"] <- !(test_new_2$holiday %>% is.na())
## Engineering features to check if date is during a weekend, monthend or start of month
test_new_2["weekend"] <- test_new_2$date %>% is.weekend()
test_new_2["monthend"] <- test_new_2$date %>% format("%d") %in% c('27','28','29','30','31')
test_new_2["monthstart"] <- test_new_2$date %>% format("%d") %in% c('1','2','3', '4', '5')
test_new_2$holiday <-ifelse(is.na(test_new_2$holiday),"No",test_new_2$holiday)
test_new_2$monthend <- ifelse(test_new_2$monthend==FALSE,"No","Yes")
test_new_2$monthstart <- ifelse(test_new_2$monthstart==FALSE,"No","Yes")
# keep the same columns as train
col_name_train <- col_name_train[-c(4,8)]
test_new_2 <- test_new_2[,col_name_train]
glimpse(test_new_2)
# convert categorical variables to factors
test_new_2$geoNetwork.country <- as.factor(test_new_2$geoNetwork.country)
test_new_2$device.operatingSystem <- as.factor(test_new_2$device.operatingSystem)
test_new_2$is.holiday <- as.factor(test_new_2$is.holiday)
categorical_col <- c("channelGrouping","trafficSource.source","device.browser",
"device.isMobile","device.deviceCategory","is.holiday","weekend","monthend")
test_new_2 <- mutate_at(test_new_2, categorical_col, as.factor)
# write.csv(test_new_2, file="test_new_clean.csv", row.names=FALSE)
# save(test_new_2,file="test_new_2.Rdata")
"
========================
PREDICT ON NEW TEST
========================
"
# predict using lm.2
test.pred.2 <- predict(lm.2, newdata=test_new_2)
prediction <- data.frame(cbind(test_new_2$fullVisitorId,test.pred.2))
names(prediction) <- c("fullVisitorId","predRevenue")
prediction$predRevenue <- as.numeric(prediction$predRevenue)
prediction.new <-group_by(prediction,fullVisitorId)
prediction.summary <-summarise(prediction.new, total = sum(predRevenue))
prediction.summary$PredictedLogRevenue <-log(prediction.summary$total+1)
prediction.summary <- prediction.summary[,c(1,3)]
head(prediction.summary)
nrow(prediction.summary)
# replace NAs in summary with 0
prediction.summary[which(is.na(prediction.summary$PredictedLogRevenue)),2] <- 0
# write to txt file so fullVisitorId is in right format
# for submission, import txt file to Excel and then save as csv
write.table(prediction.summary, file = "C4-4_OLS.txt", sep = "\t",
row.names = F, col.names = c("fullVisitorId", "PredictedLogRevenue"))
test.pred.3 <- predict(lm.3, newdata=test_new_2)
|
/C4-4 OLS.r
|
no_license
|
zhang90s/sys6018-competition-revenue-prediction
|
R
| false
| false
| 34,258
|
r
|
# SYS 6018 Kaggle Competition 4
# Group C4-4
# Navin Kasa
# Niharika Reddy
# Mengyao Zhang
library(tidyverse)
library(MASS)
library(dplyr)
library(jsonlite)
library(readr)
library(magrittr)
library(lubridate)
library(purrr)
library(ggplot2)
library(gridExtra)
#install.packages("countrycode")
library(countrycode)
#install.packages("highcharacter")
library(highcharter)
#install.packages("ggExtra")
library(ggExtra)
library(data.table)
#install.packages("funModeling")
library(funModeling)
library(gridExtra)
#library(dplyr)
#install.packages("zoo")
library(zoo)
library(stringr)
#install.packages("chron")
library(chron)
#install.packages("splusTimeDate")
library(splusTimeDate)
#install.packages("bsts")
library(bsts)
library(chron)
#Reading in data and combining the training and testing datasets
train <- read_csv(file = "train.csv", col_names = T) %>% mutate(Data= "Training")
test <- read_csv(file = "test.csv", col_names = T) %>% mutate(Data= "Testing")
df<-rbind(train,test)
# read in the updated test data
test_new <- read_csv(file="test_v2.csv",col_names = T)
head(test_new)
test_new$totals[1:10]
test_new$hits[1:10]
# drop the hits column
test_new <- test_new[,-c(2,7)] # dop the hits and customDimensions columns
colnames(test_new)
"
================================================================================
DATA CLEANING
================================================================================"
#Reading in data and combining the training and testing datasets
train <- read_csv(file = "train.csv", col_names = T) %>% mutate(Data= "Training")
test <- read_csv(file = "test.csv", col_names = T) %>% mutate(Data= "Testing")
df<-rbind(train,test)
#Viewing the data
head(df)
#There seem to be some JSON columns
str(df)
#JSON columns are : device, geoNetwork, totals, trafficSource
#Writing function to parse JSON
ParseJSONColumn <- function(x) {
paste("[ ", paste(x, collapse = ",", sep=" "), " ]") %>%
fromJSON(flatten = T) %>%
as.tibble()
}
JSONcolumn_data <- df %>%
dplyr::select(trafficSource, totals, geoNetwork, device)
JSON_cols<-apply(JSONcolumn_data,2, FUN = ParseJSONColumn)
save(JSON_cols, file = "JSON_parsed.Rdata")
head(JSON_cols)
df <- cbind(df, JSON_cols)
# dropping the old json columns
df<-df %>% dplyr::select(-device, -geoNetwork, -totals, -trafficSource)
head(df)
#Several of the columns seem to have "not available in demo dataset","(not provided) "
#setting the same to NA
# values to convert to NA
na_vals <- c("unknown.unknown", "(not set)", "not available in demo dataset",
"(not provided)", "(none)", "<NA>")
for(col in 1:ncol(df)){
df[which(df[,col] %in% na_vals), col]= NA
}
glimpse(df)
#write.table(df, "cleaned_total_data.csv", row.names=F, sep=",")
#All of the columns that were converted from json are of class character.
#For some, we will need to change this.
# character columns to convert to numeric
num_cols <- c('totals.hits', 'totals.pageviews', 'totals.bounces', 'totals.newVisits',
'totals.transactionRevenue')
df[, num_cols] = lapply(df[, num_cols], function(x){as.numeric(x)})
glimpse(df)
#Coverting date from int to date format
df$date <- as.Date(as.character(df$date), format='%Y%m%d')
# convert visitStartTime to POSIXct
df$visitStartTime <- as_datetime(df$visitStartTime)
glimpse(df)
#imputing transaction revenue to 0 before removing na columns
df$totals.transactionRevenue[is.na(df$totals.transactionRevenue)] <- 0
# Imputing missing countries where city is captured
df$geoNetwork.city[(df$geoNetwork.country %>% is.na()) & (!df$geoNetwork.city %>% is.na())]
# [1] "Ningbo" "New York" "San Francisco" "Tunis" "Nairobi"
# [6] "New York" "Manila" "Osaka" "New York" "Kyiv"
# [11] "Kyiv" "Kyiv" "Hong Kong" "Santa Clara" "Kyiv"
# [16] "Moscow" "Kyiv" "Kyiv" "Kyiv" "Kyiv"
# [21] "London" "Dublin" "London" "Minneapolis" "New York"
# [26] "New York" "Melbourne" "Buenos Aires" "London" "Dublin"
# [31] "Kyiv" "London" "Kyiv" "Kyiv" "Kyiv"
# [36] "Kyiv" "Kyiv" "Bengaluru"
#
df$geoNetwork.country[df$geoNetwork.city %in% c("San Francisco", "New York","Santa Clara","Minneapolis")] <- "United States"
df$geoNetwork.country[df$geoNetwork.city %in% c("Tunis")] <- "Tunisia"
df$geoNetwork.country[df$geoNetwork.city %in% c("Nairobi")] <- "Kenya"
df$geoNetwork.country[df$geoNetwork.city %in% c("Manila")] <- "Philippines"
df$geoNetwork.country[df$geoNetwork.city %in% c("Osaka")] <- "Japan"
df$geoNetwork.country[df$geoNetwork.city %in% c("Kyiv")] <- "Ukraine"
df$geoNetwork.country[df$geoNetwork.city %in% c("Hong Kong")] <- "Hong Kong"
df$geoNetwork.country[df$geoNetwork.city %in% c("Moscow")] <- "Moscow"
df$geoNetwork.country[df$geoNetwork.city %in% c("London")] <- "United Kingdom"
df$geoNetwork.country[df$geoNetwork.city %in% c("Dublin")] <- "Ireland"
df$geoNetwork.country[df$geoNetwork.city %in% c("Melbourne")] <- "Australia"
df$geoNetwork.country[df$geoNetwork.city %in% c("Buenos Aires")] <- "Argentina"
df$geoNetwork.country[df$geoNetwork.city %in% c("Bengaluru")] <- "India"
"
============================================================================
EDA and Dimensionality Reduction
============================================================================
"
# Finding time ranges for train and test data
time_range_train <- range(train$date)
print(time_range_train)
#[1] "2016-08-01" "2017-08-01"
time_range_test <- range(test$date)
print(time_range_test)
#"2017-08-02" "2018-04-30"
#Checking the distribution of transaction revenues across time in the training data
g1 <- train[, .(n = .N), by=date] %>%
ggplot(aes(x=date, y=n)) +
geom_line(color='steelblue') +
geom_smooth(color='orange') +
labs(
x='',
y='Visits (000s)',
title='Daily visits'
)
g2 <- train[, .(revenue = sum(transactionRevenue, na.rm=TRUE)), by=date] %>%
ggplot(aes(x=date, y=revenue)) +
geom_line(color='steelblue') +
geom_smooth(color='orange') +
labs(
x='',
y='Revenue (unit dollars)',
title='Daily transaction revenue'
)
grid.arrange(g1, g2, nrow=2)
g1 <- train[, .(n = .N), by=channelGrouping] %>%
ggplot(aes(x=reorder(channelGrouping, -n), y=n/1000)) +
geom_bar(stat='identity', fill='steelblue') +
labs(x='Channel Grouping',
y='Visits (000s)',
title='Visits by channel grouping')
#Checking for columns with missing values
options(repr.plot.height=4)
NAcol <- which(colSums(is.na(df)) > 0)
NAcount <- sort(colSums(sapply(df[NAcol], is.na)), decreasing = TRUE)
colSums(df["device.operatingSystemVersion"] %>% is.na())
NAcount
NADF <- data.frame(variable=names(NAcount), missing=NAcount)
NADF$PctMissing <- round(((NADF$missing/nrow(df))*100),1)
NADF %>%
ggplot(aes(x=reorder(variable, PctMissing), y=PctMissing)) +
geom_bar(stat='identity', fill='blue') + coord_flip(y=c(0,110)) +
labs(x="", y="Percent missing") +
geom_text(aes(label=paste0(NADF$PctMissing, "%"), hjust=-0.1))
#Dropping all columns with more than 90% missing values
df1<-df[,colSums(!is.na(df)) > 0.9*nrow(df) ]
glimpse(df1)
# Converting some of the character variables to factors
categorical_columns <- c("device.browser", "device.deviceCategory", "device.operatingSystem", "geoNetwork.continent", "geoNetwork.country", "geoNetwork.subContinent", "trafficSource.source")
df1 <- mutate_at(df1, categorical_columns, as.factor)
#Exploring no. of unique values in columns to decide which additional columns can be dropped
#trafficSource.source analysis
unique(df1$trafficSource.source)
# More than 1 unique columns hence retaining the column
unique(df1$totals.visits)
# Unique values are 1, hence dropping the column
unique(df1$channelGrouping)
unique(df1$totals.hits)
unique(df1$totals.pageviews)
unique(df1$visitNumber)
unique(df1$socialEngagementType)
#Need to drop socialEngagementType as there is only 1 unique value
df1 <- subset(df1, select = -c(totals.visits,socialEngagementType))
# As continent and subcontinent are dervived from country, dropping those columns as well
df1 <- subset(df1, select = -c(geoNetwork.continent,geoNetwork.subContinent))
####### geoNetwork.country analysis
###### Unique country list
# [1] "Turkey" "Australia" "Spain" "Indonesia"
# [5] "United Kingdom" "Italy" "Pakistan" "Austria"
# [9] "Netherlands" "India" "France" "Brazil"
# [13] "China" "Singapore" "Argentina" "Poland"
# [17] "Germany" "Canada" "Thailand" "Hungary"
# [21] "Malaysia" "Denmark" "Taiwan" "Russia"
# [25] "Nigeria" "Belgium" "South Korea" "Chile"
# [29] "Ireland" "Philippines" "Greece" "Mexico"
# [33] "Montenegro" "United States" "Bangladesh" "Japan"
# [37] "Slovenia" "Czechia" "Sweden" "United Arab Emirates"
# [41] "Switzerland" "Portugal" "Peru" "Hong Kong"
# [45] "Vietnam" "Sri Lanka" "Serbia" "Norway"
# [49] "Romania" "Kenya" "Ukraine" "Israel"
# [53] "Slovakia" NA "Lithuania" "Puerto Rico"
# [57] "Bosnia & Herzegovina" "Croatia" "South Africa" "Paraguay"
# [61] "Botswana" "Colombia" "Uruguay" "Algeria"
# [65] "Finland" "Guatemala" "Egypt" "Malta"
# [69] "Bulgaria" "New Zealand" "Kuwait" "Uzbekistan"
# [73] "Saudi Arabia" "Cyprus" "Estonia" "Côte d’Ivoire"
# [77] "Morocco" "Tunisia" "Venezuela" "Dominican Republic"
# [81] "Senegal" "Cape Verde" "Costa Rica" "Kazakhstan"
# [85] "Macedonia (FYROM)" "Oman" "Laos" "Ethiopia"
# [89] "Panama" "Belarus" "Myanmar (Burma)" "Moldova"
# [93] "Zimbabwe" "Bahrain" "Mongolia" "Ghana"
# [97] "Albania" "Kosovo" "Georgia" "Tanzania"
# [101] "Bolivia" "Cambodia" "Turks & Caicos Islands" "Iraq"
# [105] "Jordan" "Lebanon" "Ecuador" "Madagascar"
# [109] "Togo" "Gambia" "Jamaica" "Trinidad & Tobago"
# [113] "Mauritius" "Libya" "Mauritania" "El Salvador"
# [117] "Azerbaijan" "Nicaragua" "Palestine" "Réunion"
# [121] "Iceland" "Greenland" "Armenia" "Haiti"
# [125] "Uganda" "Qatar" "St. Kitts & Nevis" "Somalia"
# [129] "Cameroon" "Namibia" "Latvia" "Congo - Kinshasa"
# [133] "New Caledonia" "Rwanda" "Kyrgyzstan" "Honduras"
# [137] "Nepal" "Benin" "Luxembourg" "Guinea"
# [141] "Belize" "Guinea-Bissau" "Sudan" "Yemen"
# [145] "Gabon" "Maldives" "Mozambique" "French Guiana"
# [149] "Zambia" "Macau" "Tajikistan" "Angola"
# [153] "Guadeloupe" "Martinique" "Brunei" "Guyana"
# [157] "St. Lucia" "Iran" "Monaco" "Swaziland"
# [161] "Curaçao" "Bermuda" "Guernsey" "Afghanistan"
# [165] "Northern Mariana Islands" "Guam" "Antigua & Barbuda" "Sint Maarten"
# [169] "Andorra" "St. Vincent & Grenadines" "Fiji" "Mali"
# [173] "Papua New Guinea" "Jersey" "Faroe Islands" "Cayman Islands"
# [177] "Chad" "French Polynesia" "Malawi" "Suriname"
# [181] "Barbados" "U.S. Virgin Islands" "Djibouti" "Mayotte"
# [185] "Aruba" "Lesotho" "Equatorial Guinea" "Burkina Faso"
# [189] "Grenada" "Norfolk Island" "Isle of Man" "Liechtenstein"
# [193] "Vanuatu" "Sierra Leone" "Bahamas" "Åland Islands"
# [197] "St. Pierre & Miquelon" "Gibraltar" "British Virgin Islands" "Burundi"
# [201] "Turkmenistan" "Niger" "Samoa" "Timor-Leste"
# [205] "Syria" "Comoros" "Liberia" "Bhutan"
# [209] "Cook Islands" "American Samoa" "Dominica" "Anguilla"
# [213] "Caribbean Netherlands" "Marshall Islands" "Congo - Brazzaville" "Seychelles"
# [217] "San Marino" "Central African Republic" "St. Martin" "São Tomé & Príncipe"
# [221] "Eritrea" "St. Barthélemy" "South Sudan" "Solomon Islands"
# [225] "Montserrat" "St. Helena" "Tonga" "Micronesia"
# Feature engineering using Date for holidays
us.bank.holidays <- read_csv("US Bank holidays.csv")
us.bank.holidays <- us.bank.holidays[, ! names(us.bank.holidays) %in% c("index"), drop = F]
holidays <- us.bank.holidays$date %>% as.list()
for(i in 1:11){
buffer.dates <- holidays %>% lapply(function(d){
data.frame(date=as.Date(d)-i, holiday = us.bank.holidays$holiday[us.bank.holidays$date==as.Date(d)])
})
buffer.dates <- do.call(rbind,buffer.dates)
us.bank.holidays <- us.bank.holidays %>% rbind(buffer.dates)
}
us.bank.holidays = us.bank.holidays[!duplicated(us.bank.holidays$date),]
df2 <- left_join(df1,unique(us.bank.holidays), by=c("date"))
df2 <- df2[,!names(df2) %in% c("holiday.x"), drop=F]
names(df2)[names(df2) == 'holiday.y'] <- 'holiday'
# removing some holidays for non-US countries
us.holidays <- c("New Year Day", "Independence Day", "Labor Day", "Thanksgiving Day", "Christmas Day")
row.holidays <- c("New Year Day", "Christmas Day")
df2$holiday[(df2$geoNetwork.country =="United States") & ! (df2$holiday %in% us.holidays) ] <- NA
df2$holiday[(df2$geoNetwork.country!="United States") & ! (df2$holiday %in% row.holidays) ] <- NA
df2["is.holiday"] <- !(df2$holiday %>% is.na())
## Engineering features to check if date is during a weekend, monthend or start of month
df2["weekend"] <- df2$date %>% is.weekend()
df2["monthend"] <- df2$date %>% format("%d") %in% c('27','28','29','30','31')
df2["monthstart"] <- df2$date %>% format("%d") %in% c('1','2','3', '4', '5')
df2$holiday <-ifelse(is.na(df2$holiday),"No",df2$holiday)
df2$monthend <- ifelse(df2$monthend==FALSE,"No","Yes")
df2$monthstart <- ifelse(df2$monthstart==FALSE,"No","Yes")
#Converting character vectors to factors
categorical_columns <- c("channelGrouping", "device.isMobile", "is.holiday", "monthend", "monthstart", "weekend")
df2 <- mutate_at(df2, categorical_columns, as.factor)
glimpse(df2)
levels(df2$monthstart)
# No dates in the start of the month, hence dropping the column
df2 <- subset(df2, select = -c(monthstart, holiday))
options(repr.plot.height=4)
NAcol <- which(colSums(is.na(df2)) > 0)
NAcount <- sort(colSums(sapply(df2[NAcol], is.na)), decreasing = TRUE)
NADF <- data.frame(variable=names(NAcount), missing=NAcount)
NADF$PctMissing <- round(((NADF$missing/nrow(df2))*100),1)
NADF %>%
ggplot(aes(x=reorder(variable, PctMissing), y=PctMissing)) +
geom_bar(stat='identity', fill='blue') + coord_flip(y=c(0,110)) +
labs(x="", y="Percent missing") +
geom_text(aes(label=paste0(NADF$PctMissing, "%"), hjust=-0.1))
# Imputing missing values in device.operatingSystem and geoNetwork.country with "unknown"
df2$device.operatingSystem <-ifelse(is.na(df2$device.operatingSystem),"Unknown",df2$device.operatingSystem)
df2$geoNetwork.country <-ifelse(is.na(df2$geoNetwork.country),"Unknown",df$geoNetwork.country)
train<- df2 %>% filter(df$Data == "Training")
test<- df2 %>% filter(df$Data == "Testing")
write.csv(df2, file="df2.csv", row.names=FALSE)
write.csv(train, file="train_final.csv", row.names=FALSE)
write.csv(test, file="test_final.csv", row.names=FALSE)
"
==========================================
OLS
==========================================
"
Mode <- function(x) {
ux <- unique(x)
ux[which.max(tabulate(match(x, ux)))]
}
#load("train.Rdata")
#load("test. Rdata. Rdata. Rdata")
#train[1:5,1:10]
str(train)
# convert categorical variables to factors
train$geoNetwork.country <- as.factor(train$geoNetwork.country)
train$device.operatingSystem <- as.factor(train$device.operatingSystem)
train$is.holiday <- as.factor(train$is.holiday)
# split train into estimation set and validation set
set.seed(123)
est_index <- sample(1:nrow(train), size =nrow(train)/2 )
train.est <- train[est_index,]
train.val <- train[-est_index,]
# check NAs in estimation set
nas.cols <- as.vector(rep(0, ncol(train.est)))
for(i in 1:ncol(train.est)){
nas.cols[i] <- sum(is.na(train.est[i]))
}
nas.cols
# Naming the vector colums
names(nas.cols) <- names(train.est)[1:ncol(train.est)]
# Finding columns with NAs for train.est data
with.nas <- nas.cols[nas.cols!=0]
with.nas
# trafficSource.source totals.pageviews device.browser
# 32 52 5
# impute NAs for trafficSource.source
Mode(train.est$trafficSource.source)
# [1] google
# 499 Levels: (direct) ... yt-go-12345.googleplex.com
train.est$trafficSource.source[which(is.na(train.est$trafficSource.source))] <- "google"
# impute NAs for totals.pageviews
train.est$totals.pageviews[which(is.na(train.est$totals.pageviews))] <- median(train.est$totals.pageviews,na.rm=TRUE)
# impute NAs for device.browser
Mode(train.est$device.browser)
# [1] Chrome
# 128 Levels: ;__CT_JOB_ID__:0a075729-93a5-43d0-9638-4cbd41d5f5a5; ...
train.est$device.browser[which(is.na(train.est$device.browser))] <- "Chrome"
# Model 1
# Excluding the following variables:
# date,fullVisitorId,sessionId,visitId,visitStartTime, Data
# trafficSource.source (get memory error if included)
# geoNetwork.country (too many levels)
# device.browser (too many levels)
lm.1 <- lm(totals.transactionRevenue ~channelGrouping+visitNumber+totals.hits+totals.pageviews
+device.operatingSystem+device.isMobile+device.deviceCategory+is.holiday+weekend+monthend, data=train.est)
summary(lm.1)
# Model 2
# Take out channelGrouping, device.operatingSystem
lm.2 <- lm(totals.transactionRevenue ~visitNumber+totals.hits+totals.pageviews
+device.isMobile+device.deviceCategory+is.holiday+weekend+monthend, data=train.est)
summary(lm.2)
# Model 3
# Take out totals.hits, device.isMobileTRUE,device.deviceCategory
lm.3 <- lm(totals.transactionRevenue ~visitNumber+totals.pageviews
+is.holiday+weekend+monthend, data=train.est)
summary(lm.3)
"
===========================
cross validate on valid set
===========================
"
# check NAs in valid set
nas.cols <- as.vector(rep(0, ncol(train.val)))
for(i in 1:ncol(train.val)){
nas.cols[i] <- sum(is.na(train.val[i]))
}
nas.cols
# Naming the vector colums
names(nas.cols) <- names(train.val)[1:ncol(train.val)]
# Finding columns with NAs for train.val data
with.nas <- nas.cols[nas.cols!=0]
with.nas
# trafficSource.source totals.pageviews device.browser
# 37 48 3
# impute NAs for trafficSource.source
Mode(train.val$trafficSource.source)
# [1] google
# 499 Levels: (direct) ... yt-go-12345.googleplex.com
train.val$trafficSource.source[which(is.na(train.val$trafficSource.source))] <- "google"
# impute NAs for totals.pageviews
train.val$totals.pageviews[which(is.na(train.val$totals.pageviews))] <- median(train.val$totals.pageviews,na.rm=TRUE)
# impute NAs for device.browser
Mode(train.val$device.browser)
# [1] Chrome
# 128 Levels: ;__CT_JOB_ID__:0a075729-93a5-43d0-9638-4cbd41d5f5a5; ...
train.val$device.browser[which(is.na(train.val$device.browser))] <- "Chrome"
# predict using lm.1
pred.1 <- predict(lm.1, newdata=train.val)
# factor device.operatingSystem has new levels 12, 13, 18
# find indices in train.val with these values
index.12 <- which(train.val$device.operatingSystem == 12)
index.12
# [1] 49618 303150
index.13 <- which(train.val$device.operatingSystem == 13)
index.13
# [1] 207100
index.18 <- which(train.val$device.operatingSystem == 18)
index.18
# [1] 314667
# replace those with mode in train.est for device.operatingSystem
OS.mode <- Mode(train.est$device.operatingSystem)
OS.mode
# [1] 21
train.val$device.operatingSystem[cbind(index.12,index.13,index.18)] <- 21
# predict using lm.1 again
pred.1 <- predict(lm.1, newdata=train.val)
MSE <- mean((train.val$totals.transactionRevenue-pred.1)^2)
MSE
# [1] 3.114098e+15
# Predict using lm.2
pred.2 <- predict(lm.2, newdata=train.val)
MSE <- mean((train.val$totals.transactionRevenue-pred.2)^2)
MSE
# [1] 3.115807e+15
# Predict using lm.3
pred.3 <- predict(lm.3, newdata=train.val)
MSE <- mean((train.val$totals.transactionRevenue-pred.3)^2)
MSE
# [1] 3.116231e+15
# MODEL 1,2 have lower MSE, build models on entire train
lm.1 <- lm(totals.transactionRevenue ~channelGrouping+visitNumber+totals.hits+totals.pageviews
+device.operatingSystem+device.isMobile+device.deviceCategory+is.holiday+weekend+monthend, data=train)
summary(lm.1)
lm.2 <- lm(totals.transactionRevenue ~visitNumber+totals.hits+totals.pageviews
+device.isMobile+device.deviceCategory+is.holiday+weekend+monthend, data=train)
summary(lm.2)
"
========================
PREDICT ON OLD TEST DATA
========================
"
# convert categorical variables to factors
test$geoNetwork.country <- as.factor(test$geoNetwork.country)
test$device.operatingSystem <- as.factor(test$device.operatingSystem)
test$is.holiday <- as.factor(test$is.holiday)
# predict using lm.1
test.pred.1 <- predict(lm.1, newdata=test)
# Error in model.frame.default(Terms, newdata, na.action = na.action, xlev = object$xlevels) :
# factor device.operatingSystem has new levels 15, 16, 19, 20
# replace those with mode in test for device.operatingSystem
OS.mode.test <- Mode(test$device.operatingSystem)
OS.mode.test
# [1] "21"
# find indices in test with these values
index.15 <- which(test$device.operatingSystem == 15)
index.15
index.16 <- which(test$device.operatingSystem == 16)
index.16
index.19 <- which(test$device.operatingSystem == 19)
index.19
index.20 <- which(test$device.operatingSystem == 20)
index.20
test$device.operatingSystem[cbind(index.15,index.16,index.19,index.20)] <- 21
# predict using lm.1 again after cleaning of test
test.pred.1 <- predict(lm.1, newdata=test)
# bind fullVisitorId with predicted value
prediction.1 <- data.frame(cbind(test$fullVisitorId,test.pred.1))
names(prediction.1) <- c("fullVisitorId","predRevenue")
prediction.1$predRevenue <- as.numeric(prediction.1$predRevenue)
# group by fullVistorId
prediction.1.new <-group_by(prediction.1,fullVisitorId)
prediction.1.summary <-summarise(prediction.1.new, total = sum(predRevenue))
prediction.1.summary$PredictedLogRevenue <-log(prediction.1.summary$total+1)
prediction.1.summary <- prediction.1.summary[,c(1,3)]
head(prediction.1.summary)
# fullVisitorId PredictedLogRevenue
# <fct> <dbl>
# 1 0000000259678714014 11.3
# 2 0000049363351866189 11.1
# 3 0000053049821714864 8.24
# 4 0000059488412965267 9.27
# 5 0000085840370633780 6.18
# 6 0000091131414287111 8.18
nrow(prediction.1.summary)
# replace NAs in the summary with 0
prediction.1.summary[which(is.na(prediction.1.summary$PredictedLogRevenue)),2] <- 0
# write to txt file so fullVisitorId has leading zeros
# for submission, import txt file to Excel and then save as csv
write.table(prediction.1.summary, file = "C4-4_OLS_1.txt", sep = "\t",
row.names = F, col.names = c("fullVisitorId", "PredictedLogRevenue"))
# predict using lm.2
test.pred.2 <- predict(lm.2, newdata=test)
prediction.2 <- data.frame(cbind(test$fullVisitorId,test.pred.2))
names(prediction.2) <- c("fullVisitorId","predRevenue")
prediction.2$predRevenue <- as.numeric(prediction.2$predRevenue)
# group by fullVistorId
prediction.2.new <-group_by(prediction.2,fullVisitorId)
prediction.2.summary <-summarise(prediction.2.new, total = sum(predRevenue))
prediction.2.summary$PredictedLogRevenue <-log(prediction.2.summary$total+1)
prediction.2.summary <- prediction.2.summary[,c(1,3)]
head(prediction.2.summary)
# fullVisitorId PredictedLogRevenue
# <fct> <dbl>
# 1 0000000259678714014 7.84
# 2 0000049363351866189 7.05
# 3 0000053049821714864 6.38
# 4 0000059488412965267 7.04
# 5 0000085840370633780 6.39
# 6 0000091131414287111 6.10
nrow(prediction.2.summary)
# replace NAs in the summary with 0
prediction.2.summary[which(is.na(prediction.2.summary$PredictedLogRevenue)),2] <- 0
# write to txt file so fullVisitorId has leading zeros
# for submission, import txt file to Excel and then save as csv
write.table(prediction.2.summary, file = "C4-4_OLS_2.txt", sep = "\t",
row.names = F, col.names = c("fullVisitorId", "PredictedLogRevenue"))
"
The following code is trying to use the model built on old train data to predict on new test data
"
"
=====================
CLEAN NEW TEST DATA
=====================
"
str(test_new)
#JSON columns are : device, geoNetwork, totals, trafficSource
# parse JSON
JSONcolumn_data <- test_new %>%
dplyr::select(trafficSource, totals, geoNetwork, device)
JSON_cols<-apply(JSONcolumn_data,2, FUN = ParseJSONColumn)
save(JSON_cols, file = "test_JSON_parsed.Rdata")
head(JSON_cols)
test_new <- cbind(test_new, JSON_cols)
# dropping the old json columns
test_new<-test_new %>% dplyr::select(-device, -geoNetwork, -totals, -trafficSource)
head(test_new)
#Several of the columns seem to have "not available in demo dataset","(not provided) "
#setting the same to NA
# values to convert to NA
na_vals <- c("unknown.unknown", "(not set)", "not available in demo dataset",
"(not provided)", "(none)", "<NA>")
for(col in 1:ncol(test_new)){
test_new[which(test_new[,col] %in% na_vals), col]= NA
}
glimpse(test_new)
#write.table(df, "cleaned_total_data.csv", row.names=F, sep=",")
#All of the columns that were converted from json are of class character.
#For some, we will need to change this.
# character columns to convert to numeric
num_cols <- c('totals.hits', 'totals.pageviews', 'totals.bounces', 'totals.newVisits',
'totals.transactionRevenue')
test_new[, num_cols] = lapply(test_new[, num_cols], function(x){as.numeric(x)})
glimpse(test_new)
#Coverting date from int to date format
test_new$date <- as.Date(as.character(test_new$date), format='%Y%m%d')
# convert visitStartTime to POSIXct
test_new$visitStartTime <- as_datetime(test_new$visitStartTime)
glimpse(test_new)
#imputing transaction revenue to 0 before removing na columns
test_new$totals.transactionRevenue[is.na(test_new$totals.transactionRevenue)] <- 0
# Imputing missing countries where city is captured
test_new$geoNetwork.city[(test_new$geoNetwork.country %>% is.na()) & (!test_new$geoNetwork.city %>% is.na())]
# [1] "Mexico City" "Bengaluru" "Bengaluru" "Santa Clara" "Austin"
test_new$geoNetwork.country[test_new$geoNetwork.city %in% c("Santa Clara", "Austin")] <- "United States"
test_new$geoNetwork.country[test_new$geoNetwork.city %in% c("Mexico City")] <- "Mexico"
test_new$geoNetwork.country[test_new$geoNetwork.city %in% c("Bengaluru")] <- "India"
col_name_train <- colnames(train)
# Feature engineering using Date for holidays
us.bank.holidays <- read_csv("US Bank holidays.csv")
us.bank.holidays <- us.bank.holidays[, ! names(us.bank.holidays) %in% c("index"), drop = F]
holidays <- us.bank.holidays$date %>% as.list()
for(i in 1:11){
buffer.dates <- holidays %>% lapply(function(d){
data.frame(date=as.Date(d)-i, holiday = us.bank.holidays$holiday[us.bank.holidays$date==as.Date(d)])
})
buffer.dates <- do.call(rbind,buffer.dates)
us.bank.holidays <- us.bank.holidays %>% rbind(buffer.dates)
}
us.bank.holidays = us.bank.holidays[!duplicated(us.bank.holidays$date),]
test_new_2 <- left_join(test_new,unique(us.bank.holidays), by=c("date"))
test_new_2 <- test_new_2[,!names(test_new_2) %in% c("holiday.x"), drop=F]
names(test_new_2)[names(test_new_2) == 'holiday.y'] <- 'holiday'
# removing some holidays for non-US countries
us.holidays <- c("New Year Day", "Independence Day", "Labor Day", "Thanksgiving Day", "Christmas Day")
row.holidays <- c("New Year Day", "Christmas Day")
test_new_2$holiday[(test_new_2$geoNetwork.country =="United States") & ! (test_new_2$holiday %in% us.holidays) ] <- NA
test_new_2$holiday[(test_new_2$geoNetwork.country!="United States") & ! (test_new_2$holiday %in% row.holidays) ] <- NA
test_new_2["is.holiday"] <- !(test_new_2$holiday %>% is.na())
## Engineering features to check if date is during a weekend, monthend or start of month
test_new_2["weekend"] <- test_new_2$date %>% is.weekend()
test_new_2["monthend"] <- test_new_2$date %>% format("%d") %in% c('27','28','29','30','31')
test_new_2["monthstart"] <- test_new_2$date %>% format("%d") %in% c('1','2','3', '4', '5')
test_new_2$holiday <-ifelse(is.na(test_new_2$holiday),"No",test_new_2$holiday)
test_new_2$monthend <- ifelse(test_new_2$monthend==FALSE,"No","Yes")
test_new_2$monthstart <- ifelse(test_new_2$monthstart==FALSE,"No","Yes")
# keep the same columns as train
col_name_train <- col_name_train[-c(4,8)]
test_new_2 <- test_new_2[,col_name_train]
glimpse(test_new_2)
# convert categorical variables to factors
test_new_2$geoNetwork.country <- as.factor(test_new_2$geoNetwork.country)
test_new_2$device.operatingSystem <- as.factor(test_new_2$device.operatingSystem)
test_new_2$is.holiday <- as.factor(test_new_2$is.holiday)
categorical_col <- c("channelGrouping","trafficSource.source","device.browser",
"device.isMobile","device.deviceCategory","is.holiday","weekend","monthend")
test_new_2 <- mutate_at(test_new_2, categorical_col, as.factor)
# write.csv(test_new_2, file="test_new_clean.csv", row.names=FALSE)
# save(test_new_2,file="test_new_2.Rdata")
"
========================
PREDICT ON NEW TEST
========================
"
# predict using lm.2
test.pred.2 <- predict(lm.2, newdata=test_new_2)
prediction <- data.frame(cbind(test_new_2$fullVisitorId,test.pred.2))
names(prediction) <- c("fullVisitorId","predRevenue")
prediction$predRevenue <- as.numeric(prediction$predRevenue)
prediction.new <-group_by(prediction,fullVisitorId)
prediction.summary <-summarise(prediction.new, total = sum(predRevenue))
prediction.summary$PredictedLogRevenue <-log(prediction.summary$total+1)
prediction.summary <- prediction.summary[,c(1,3)]
head(prediction.summary)
nrow(prediction.summary)
# replace NAs in summary with 0
prediction.summary[which(is.na(prediction.summary$PredictedLogRevenue)),2] <- 0
# write to txt file so fullVisitorId is in right format
# for submission, import txt file to Excel and then save as csv
write.table(prediction.summary, file = "C4-4_OLS.txt", sep = "\t",
row.names = F, col.names = c("fullVisitorId", "PredictedLogRevenue"))
test.pred.3 <- predict(lm.3, newdata=test_new_2)
|
#' Check Template
#'
#' Checks if the examples of given template can be run without any error.
#'
#' If everything went fine and you get a list of \code{success} equals to \code{TRUE} values, otherwise \code{success} returns \code{FALSE} with additional \code{message}
#' @param fp a character vector containing template name (".tpl" extension is optional), file path or a text to be split by line breaks
#' @export
#' @examples \dontrun{
#' tpl.check('example')
#' }
tpl.check <- function(fp) {
examples <- tryCatch(tpl.example(fp, 'all'), error = function(e) e$message)
if (is.character(examples))
return(list(success = FALSE, message = sprintf('Errors found while running all examples: `%s`', examples)))
errors <- NULL
if (class(examples) == 'rapport')
examples <- list(examples)
for (example in examples)
for (part in example$report) {
if (part$type == 'block')
errors <- c(errors, part$robject$msg$errors)
else
errors <- c(errors, part$msg$errors)
}
if (!is.null(errors))
return(list(success = FALSE, message = sprintf('%s errors found while running examples: %s', length(errors), p(errors, wrap = '`'))))
return(list(success = TRUE))
}
|
/R/tpl-check.R
|
no_license
|
tothg/rapport
|
R
| false
| false
| 1,272
|
r
|
#' Check Template
#'
#' Checks if the examples of given template can be run without any error.
#'
#' If everything went fine and you get a list of \code{success} equals to \code{TRUE} values, otherwise \code{success} returns \code{FALSE} with additional \code{message}
#' @param fp a character vector containing template name (".tpl" extension is optional), file path or a text to be split by line breaks
#' @export
#' @examples \dontrun{
#' tpl.check('example')
#' }
tpl.check <- function(fp) {
examples <- tryCatch(tpl.example(fp, 'all'), error = function(e) e$message)
if (is.character(examples))
return(list(success = FALSE, message = sprintf('Errors found while running all examples: `%s`', examples)))
errors <- NULL
if (class(examples) == 'rapport')
examples <- list(examples)
for (example in examples)
for (part in example$report) {
if (part$type == 'block')
errors <- c(errors, part$robject$msg$errors)
else
errors <- c(errors, part$msg$errors)
}
if (!is.null(errors))
return(list(success = FALSE, message = sprintf('%s errors found while running examples: %s', length(errors), p(errors, wrap = '`'))))
return(list(success = TRUE))
}
|
# OM <- OM_xl(file.path("C:/Users/arhor/Dropbox/CAProject/CACaseStudies/OMs/OMTables.xlsx"), "RSU")
# Data <- new("Data", file.path("C:/Users/arhor/Dropbox/CAProject/CACaseStudies/DataObjects/RSU/RSU_data.csv"))
Turing <- function(OM, Data) {
if (class(OM) != "OM") stop("OM must be class 'OM'")
if (class(Data) != "Data") stop("Data must be class 'Data'")
# if (length(Data@Year) != OM@nyears) {
# message("Note: length Data@Year (", length(Data@Year), ") is not of length OM@nyears (", OM@nyears, ") \nUsing last ",
# length(Data@Year), " years of simulations")
# } # fix this for when Data is longer than OM
#
nyr <- length(Data@Year)
nyears <- OM@nyears <- length(Data@Year)
sims <- sample(1:OM@nsim, 5)
# What Data are available?
DF <- data.frame(Data=c(
"Index of Abundance",
"Total Catch",
"Recruitment Index",
"Catch-at-Age",
"Catch-at-Length"),
Slot = c("Ind",
"Cat",
"Rec",
"CAA",
"CAL"),
Available=FALSE,
Real =0,
stringsAsFactors = FALSE)
for (r in 1:nrow(DF)) {
if(!all(is.na(slot(Data, DF$Slot[r])))) DF$Available[r] <- TRUE
}
if (sum(DF$Available) == 0 ) {
stop("No data found in slots: ", paste(DF$Slot, ""), call.=FALSE)
} else {
message("Data found in slots: ", paste(DF$Slot[DF$Available], ""))
}
# if length data exists, make sure length bins are the same
if (DF$Available[DF$Slot == "CAL"]) {
CAL_bins <- Data@CAL_bins
OM@cpars$CAL_bins <- CAL_bins
}
# Run historical simulations
Hist <- runMSE(OM, Hist=TRUE)
message("Plotting:")
# Index of Abundance
if (DF$Available[DF$Slot == "Ind"]) {
message(DF$Data[DF$Slot == "Ind"])
Ind <- Data@Ind[1,]
ind <- which(!is.na(Ind))
simInd <- t(Hist$Data@Ind[sims, ind])
simInd <- simInd/matrix(apply(simInd, 2, mean), nrow=length(ind), ncol=length(sims), byrow=TRUE)
allInd <- cbind(Ind[ind], simInd)
ranIndex <- sample(1:ncol(allInd), ncol(allInd))
allInd <- allInd[,ranIndex]
DF$Real[DF$Slot == "Ind"] <- which(ranIndex == 1)
par(mfrow=c(3,2), bty="l", mar=c(3,3,1,1), oma=c(2,2,1,0))
for (X in 1:ncol(allInd)) {
plot(Data@Year[ind],allInd[,X], type="l", ylim=c(0, max(allInd)), xlab="", ylab="",
axes=FALSE, xaxs="i", yaxs='i', lwd=2)
if (X %in% c(5,6)) {
axis(side=1)
} else {
axis(side=1, label=FALSE)
}
if (X %in% c(1,3,5)) {
axis(side=2, las=1)
} else {
axis(side=2, label=FALSE)
}
}
title(paste("Index of Abundance for last", length(ind), "years"), outer=TRUE)
}
# Total Catch
if (DF$Available[DF$Slot == "Cat"]) {
message(DF$Data[DF$Slot == "Cat"])
Cat <- Data@Cat[1,]
ind <- which(!is.na(Cat))
Cat[ind] <- Cat[ind]/mean(Cat[ind])
simCat <- t(Hist$Data@Cat[sims,ind])
simCat <- simCat/matrix(apply(simCat, 2, mean), nrow=length(ind), ncol=length(sims), byrow=TRUE)
allCat <- cbind(Cat[ind], simCat)
ranIndex <- sample(1:ncol(allCat), ncol(allCat))
allCat <- allCat[,ranIndex]
DF$Real[DF$Slot == "Cat"] <- which(ranIndex == 1)
par(mfrow=c(3,2), bty="l", mar=c(3,3,1,1), oma=c(2,2,1,0))
for (X in 1:ncol(allCat)) {
plot(Data@Year[ind],allCat[,X], type="l", ylim=c(0, max(allCat)), xlab="", ylab="",
axes=FALSE, xaxs="i", yaxs='i', lwd=2)
if (X %in% c(5,6)) {
axis(side=1)
} else {
axis(side=1, label=FALSE)
}
if (X %in% c(1,3,5)) {
axis(side=2, las=1)
} else {
axis(side=2, label=FALSE)
}
}
title(paste("Catch Trends for last", length(ind), "years"), outer=TRUE)
}
# Recruitment
if (DF$Available[DF$Slot == "Rec"]) {
message(DF$Data[DF$Slot == "Rec"])
Rec <- Data@Rec[1,]
ind <- which(!is.na(Rec))
Rec[ind] <- Cat[ind]/mean(Rec[ind])
simRec <- t(Hist$Data@Rec[sims,ind])
simRec <- simRec/matrix(apply(simRect, 2, mean), nrow=length(ind), ncol=length(sims), byrow=TRUE)
allRec <- cbind(Rec[ind], simRect)
ranIndex <- sample(1:ncol(allRec), ncol(allRec))
allRec <- allRec[,ranIndex]
DF$Real[DF$Slot == "Rec"] <- which(ranIndex == 1)
par(mfrow=c(3,2), bty="l", mar=c(3,3,1,1), oma=c(2,2,1,0))
for (X in 1:ncol(allCat)) {
plot(Data@Year[ind],allRec[,X], type="l", ylim=c(0, max(allRec)), xlab="", ylab="",
axes=FALSE, xaxs="i", yaxs='i', lwd=2)
if (X %in% c(5,6)) {
axis(side=1)
} else {
axis(side=1, label=FALSE)
}
if (X %in% c(1,3,5)) {
axis(side=2, las=1)
} else {
axis(side=2, label=FALSE)
}
}
title(paste("Recruitment Trends for last", length(ind), "years"), outer=TRUE)
}
# Catch-at-age
Data@CAA
# Catch-at-Length
if (DF$Available[DF$Slot == "CAL"]) {
message(DF$Data[DF$Slot == "CAL"])
CAL <- Data@CAL[1,,]
LBins <- Data@CAL_bins
BW <- LBins[2] - LBins[1]
LMids <- seq(LBins[1] + BW*0.5, by=BW, length.out=length(LBins)-1)
if (!all(Hist$Data@CAL_bins == LBins)) stop("Length bins of simulated and real data are not the same", call.=FALSE)
simCAL <- Hist$Data@CAL[sims,ind]
# need to match years
ind <- which(!is.na(Rec))
Rec[ind] <- Cat[ind]/mean(Rec[ind])
simRec <- Hist$TSdata$Rec[ind,sims]
simRec <- simRec/matrix(apply(simRect, 2, mean), nrow=length(ind), ncol=length(sims), byrow=TRUE)
allRec <- cbind(Rec[ind], simRect)
ranIndex <- sample(1:ncol(allRec), ncol(allRec))
allRec <- allRec[,ranIndex]
DF$Real[DF$Slot == "Rec"] <- which(ranIndex == 1)
par(mfrow=c(3,2), bty="l", mar=c(3,3,1,1), oma=c(2,2,1,0))
for (X in 1:ncol(allCat)) {
plot(Data@Year[ind],allRec[,X], type="l", ylim=c(0, max(allRec)), xlab="", ylab="",
axes=FALSE, xaxs="i", yaxs='i', lwd=2)
if (X %in% c(5,6)) {
axis(side=1)
} else {
axis(side=1, label=FALSE)
}
if (X %in% c(1,3,5)) {
axis(side=2, las=1)
} else {
axis(side=2, label=FALSE)
}
}
title(paste("Recruitment Trends for last", length(ind), "years"), outer=TRUE)
}
Data@CAL
Data@CAL_bins
slotNames(Data)
Cat <- Hist$TSdata$Catch[(nyears-nyr+1):nyears,sims]
meancat <- matrix(apply(Cat, 2, mean), nrow=nyr, ncol=length(sims), byrow=TRUE)
Cat <- Cat/meancat
Cat_d <- as.numeric(Data@Cat/mean(Data@Cat, na.rm=TRUE))
Cat <- cbind(Cat, Cat_d)
ind <- sample(1:ncol(Cat), ncol(Cat))
Cat <- Cat[,ind]
par(mfrow=c(3,2))
for (X in 1:ncol(Cat)) plot(Cat[,X], type="l", ylim=c(0, max(Cat)))
dim(Data@CAA)
dim(Data@CAL)
}
|
/Turing.R
|
no_license
|
DLMtool/DLMDev
|
R
| false
| false
| 6,823
|
r
|
# OM <- OM_xl(file.path("C:/Users/arhor/Dropbox/CAProject/CACaseStudies/OMs/OMTables.xlsx"), "RSU")
# Data <- new("Data", file.path("C:/Users/arhor/Dropbox/CAProject/CACaseStudies/DataObjects/RSU/RSU_data.csv"))
Turing <- function(OM, Data) {
if (class(OM) != "OM") stop("OM must be class 'OM'")
if (class(Data) != "Data") stop("Data must be class 'Data'")
# if (length(Data@Year) != OM@nyears) {
# message("Note: length Data@Year (", length(Data@Year), ") is not of length OM@nyears (", OM@nyears, ") \nUsing last ",
# length(Data@Year), " years of simulations")
# } # fix this for when Data is longer than OM
#
nyr <- length(Data@Year)
nyears <- OM@nyears <- length(Data@Year)
sims <- sample(1:OM@nsim, 5)
# What Data are available?
DF <- data.frame(Data=c(
"Index of Abundance",
"Total Catch",
"Recruitment Index",
"Catch-at-Age",
"Catch-at-Length"),
Slot = c("Ind",
"Cat",
"Rec",
"CAA",
"CAL"),
Available=FALSE,
Real =0,
stringsAsFactors = FALSE)
for (r in 1:nrow(DF)) {
if(!all(is.na(slot(Data, DF$Slot[r])))) DF$Available[r] <- TRUE
}
if (sum(DF$Available) == 0 ) {
stop("No data found in slots: ", paste(DF$Slot, ""), call.=FALSE)
} else {
message("Data found in slots: ", paste(DF$Slot[DF$Available], ""))
}
# if length data exists, make sure length bins are the same
if (DF$Available[DF$Slot == "CAL"]) {
CAL_bins <- Data@CAL_bins
OM@cpars$CAL_bins <- CAL_bins
}
# Run historical simulations
Hist <- runMSE(OM, Hist=TRUE)
message("Plotting:")
# Index of Abundance
if (DF$Available[DF$Slot == "Ind"]) {
message(DF$Data[DF$Slot == "Ind"])
Ind <- Data@Ind[1,]
ind <- which(!is.na(Ind))
simInd <- t(Hist$Data@Ind[sims, ind])
simInd <- simInd/matrix(apply(simInd, 2, mean), nrow=length(ind), ncol=length(sims), byrow=TRUE)
allInd <- cbind(Ind[ind], simInd)
ranIndex <- sample(1:ncol(allInd), ncol(allInd))
allInd <- allInd[,ranIndex]
DF$Real[DF$Slot == "Ind"] <- which(ranIndex == 1)
par(mfrow=c(3,2), bty="l", mar=c(3,3,1,1), oma=c(2,2,1,0))
for (X in 1:ncol(allInd)) {
plot(Data@Year[ind],allInd[,X], type="l", ylim=c(0, max(allInd)), xlab="", ylab="",
axes=FALSE, xaxs="i", yaxs='i', lwd=2)
if (X %in% c(5,6)) {
axis(side=1)
} else {
axis(side=1, label=FALSE)
}
if (X %in% c(1,3,5)) {
axis(side=2, las=1)
} else {
axis(side=2, label=FALSE)
}
}
title(paste("Index of Abundance for last", length(ind), "years"), outer=TRUE)
}
# Total Catch
if (DF$Available[DF$Slot == "Cat"]) {
message(DF$Data[DF$Slot == "Cat"])
Cat <- Data@Cat[1,]
ind <- which(!is.na(Cat))
Cat[ind] <- Cat[ind]/mean(Cat[ind])
simCat <- t(Hist$Data@Cat[sims,ind])
simCat <- simCat/matrix(apply(simCat, 2, mean), nrow=length(ind), ncol=length(sims), byrow=TRUE)
allCat <- cbind(Cat[ind], simCat)
ranIndex <- sample(1:ncol(allCat), ncol(allCat))
allCat <- allCat[,ranIndex]
DF$Real[DF$Slot == "Cat"] <- which(ranIndex == 1)
par(mfrow=c(3,2), bty="l", mar=c(3,3,1,1), oma=c(2,2,1,0))
for (X in 1:ncol(allCat)) {
plot(Data@Year[ind],allCat[,X], type="l", ylim=c(0, max(allCat)), xlab="", ylab="",
axes=FALSE, xaxs="i", yaxs='i', lwd=2)
if (X %in% c(5,6)) {
axis(side=1)
} else {
axis(side=1, label=FALSE)
}
if (X %in% c(1,3,5)) {
axis(side=2, las=1)
} else {
axis(side=2, label=FALSE)
}
}
title(paste("Catch Trends for last", length(ind), "years"), outer=TRUE)
}
# Recruitment
if (DF$Available[DF$Slot == "Rec"]) {
message(DF$Data[DF$Slot == "Rec"])
Rec <- Data@Rec[1,]
ind <- which(!is.na(Rec))
Rec[ind] <- Cat[ind]/mean(Rec[ind])
simRec <- t(Hist$Data@Rec[sims,ind])
simRec <- simRec/matrix(apply(simRect, 2, mean), nrow=length(ind), ncol=length(sims), byrow=TRUE)
allRec <- cbind(Rec[ind], simRect)
ranIndex <- sample(1:ncol(allRec), ncol(allRec))
allRec <- allRec[,ranIndex]
DF$Real[DF$Slot == "Rec"] <- which(ranIndex == 1)
par(mfrow=c(3,2), bty="l", mar=c(3,3,1,1), oma=c(2,2,1,0))
for (X in 1:ncol(allCat)) {
plot(Data@Year[ind],allRec[,X], type="l", ylim=c(0, max(allRec)), xlab="", ylab="",
axes=FALSE, xaxs="i", yaxs='i', lwd=2)
if (X %in% c(5,6)) {
axis(side=1)
} else {
axis(side=1, label=FALSE)
}
if (X %in% c(1,3,5)) {
axis(side=2, las=1)
} else {
axis(side=2, label=FALSE)
}
}
title(paste("Recruitment Trends for last", length(ind), "years"), outer=TRUE)
}
# Catch-at-age
Data@CAA
# Catch-at-Length
if (DF$Available[DF$Slot == "CAL"]) {
message(DF$Data[DF$Slot == "CAL"])
CAL <- Data@CAL[1,,]
LBins <- Data@CAL_bins
BW <- LBins[2] - LBins[1]
LMids <- seq(LBins[1] + BW*0.5, by=BW, length.out=length(LBins)-1)
if (!all(Hist$Data@CAL_bins == LBins)) stop("Length bins of simulated and real data are not the same", call.=FALSE)
simCAL <- Hist$Data@CAL[sims,ind]
# need to match years
ind <- which(!is.na(Rec))
Rec[ind] <- Cat[ind]/mean(Rec[ind])
simRec <- Hist$TSdata$Rec[ind,sims]
simRec <- simRec/matrix(apply(simRect, 2, mean), nrow=length(ind), ncol=length(sims), byrow=TRUE)
allRec <- cbind(Rec[ind], simRect)
ranIndex <- sample(1:ncol(allRec), ncol(allRec))
allRec <- allRec[,ranIndex]
DF$Real[DF$Slot == "Rec"] <- which(ranIndex == 1)
par(mfrow=c(3,2), bty="l", mar=c(3,3,1,1), oma=c(2,2,1,0))
for (X in 1:ncol(allCat)) {
plot(Data@Year[ind],allRec[,X], type="l", ylim=c(0, max(allRec)), xlab="", ylab="",
axes=FALSE, xaxs="i", yaxs='i', lwd=2)
if (X %in% c(5,6)) {
axis(side=1)
} else {
axis(side=1, label=FALSE)
}
if (X %in% c(1,3,5)) {
axis(side=2, las=1)
} else {
axis(side=2, label=FALSE)
}
}
title(paste("Recruitment Trends for last", length(ind), "years"), outer=TRUE)
}
Data@CAL
Data@CAL_bins
slotNames(Data)
Cat <- Hist$TSdata$Catch[(nyears-nyr+1):nyears,sims]
meancat <- matrix(apply(Cat, 2, mean), nrow=nyr, ncol=length(sims), byrow=TRUE)
Cat <- Cat/meancat
Cat_d <- as.numeric(Data@Cat/mean(Data@Cat, na.rm=TRUE))
Cat <- cbind(Cat, Cat_d)
ind <- sample(1:ncol(Cat), ncol(Cat))
Cat <- Cat[,ind]
par(mfrow=c(3,2))
for (X in 1:ncol(Cat)) plot(Cat[,X], type="l", ylim=c(0, max(Cat)))
dim(Data@CAA)
dim(Data@CAL)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visEarthPole.R
\name{visEarthPole}
\alias{visEarthPole}
\title{usecase for local leaflet projections}
\usage{
visEarthPole(dateString="2011-10-04", layerList=c(12,10,11),groupList=NULL,scale=scale500,zoom=5)
}
\arguments{
\item{dateString}{a date in the convienient format "2011-10-04". Basically the retrieve of non existing time slots is corrected to the next existing.}
\item{layerList}{default is (12,10,11). You will find 32 layers to choose. See Details for more info}
\item{groupList}{default = "500" there are two more "250" and "1000" predifined group list according to the resolution of the data . if you choose "burst" you will get all layers.}
\item{scale}{set scale groups according to the resolution will be removed options are "scale250","scale500" "scale1000".}
\item{zoom}{set zoom level maximum is 5}
}
\description{
visEarthPole is an usecase interface to the Global Imagery
Browse Services - GIBS Basically the projection at the South Pole is EPSG
3031 and somehow a perfect test implementation of proj4leaflet.
It is up to now VERY basic and just demonstrate the possibilities of using it along with mapview.
}
\details{
Layerlisting for details pleas look at \url{https://wiki.earthdata.nasa.gov/display/GIBS/GIBS+Available+Imagery+Products}\cr
[1] "AMSR2_Sea_Ice_Concentration_12km" \cr
[2] "AMSR2_Sea_Ice_Concentration_25km" \cr
[3] "AMSR2_Sea_Ice_Brightness_Temp_6km_89H" \cr
[4] "AMSR2_Sea_Ice_Brightness_Temp_6km_89V" \cr
[5] "AMSRE_Sea_Ice_Concentration_12km" \cr
[6] "AMSRE_Snow_Depth_Over_Ice" \cr
[7] "AMSRE_Sea_Ice_Concentration_25km" \cr
[8] "AMSRE_Sea_Ice_Brightness_Temp_89H" \cr
[9] "AMSRE_Sea_Ice_Brightness_Temp_89V" \cr
[10] "BlueMarble_NextGeneration" \cr
[11] "BlueMarble_ShadedRelief" \cr
[12] "BlueMarble_ShadedRelief_Bathymetry" \cr
[13] "Coastlines" \cr
[14] "Graticule" \cr
[15] "MODIS_Terra_Snow_Cover" \cr
[16] "MODIS_Terra_Sea_Ice" \cr
[17] "MODIS_Terra_Brightness_Temp_Band31_Day" \cr
[18] "MODIS_Terra_Brightness_Temp_Band31_Night" \cr
[19] "MODIS_Terra_CorrectedReflectance_TrueColor" \cr
[20] "MODIS_Terra_CorrectedReflectance_Bands367" \cr
[21] "MODIS_Terra_CorrectedReflectance_Bands721" \cr
[22] "MODIS_Aqua_Snow_Cover" \cr
[23] "MODIS_Aqua_Sea_Ice" \cr
[24] "MODIS_Aqua_Brightness_Temp_Band31_Day" \cr
[25] "MODIS_Aqua_Brightness_Temp_Band31_Night" \cr
[26] "MODIS_Aqua_CorrectedReflectance_TrueColor" \cr
[27] "MODIS_Aqua_CorrectedReflectance_Bands721" \cr
[28] "SCAR_Land_Mask" \cr
[29] "SCAR_Land_Water_Map \cr"
[30] "VIIRS_SNPP_CorrectedReflectance_TrueColor" \cr
[31] "VIIRS_SNPP_CorrectedReflectance_BandsM11-I2-I1" \cr
[32] "VIIRS_SNPP_CorrectedReflectance_BandsM3-I3-M11" \cr
}
\examples{
\dontrun{
visEarthPole(groupList="1000",dateString="2014-02-04")
}
}
\author{
Chris Reudenbach
}
\references{
\url{https://wiki.earthdata.nasa.gov/display/GIBS/Global+Imagery+Browse+Services+-+GIBS}\cr
\url{https://wiki.earthdata.nasa.gov/display/GIBS/GIBS+Available+Imagery+Products}\cr
\url{http://map1.vis.earthdata.nasa.gov/twms-antarctic/twms.cgi?request=GetTileService}\cr
\url{https://github.com/kartena/Proj4Leaflet}\cr
}
|
/man/visEarthPole.Rd
|
no_license
|
gisma/robubu
|
R
| false
| true
| 3,210
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visEarthPole.R
\name{visEarthPole}
\alias{visEarthPole}
\title{usecase for local leaflet projections}
\usage{
visEarthPole(dateString="2011-10-04", layerList=c(12,10,11),groupList=NULL,scale=scale500,zoom=5)
}
\arguments{
\item{dateString}{a date in the convienient format "2011-10-04". Basically the retrieve of non existing time slots is corrected to the next existing.}
\item{layerList}{default is (12,10,11). You will find 32 layers to choose. See Details for more info}
\item{groupList}{default = "500" there are two more "250" and "1000" predifined group list according to the resolution of the data . if you choose "burst" you will get all layers.}
\item{scale}{set scale groups according to the resolution will be removed options are "scale250","scale500" "scale1000".}
\item{zoom}{set zoom level maximum is 5}
}
\description{
visEarthPole is an usecase interface to the Global Imagery
Browse Services - GIBS Basically the projection at the South Pole is EPSG
3031 and somehow a perfect test implementation of proj4leaflet.
It is up to now VERY basic and just demonstrate the possibilities of using it along with mapview.
}
\details{
Layerlisting for details pleas look at \url{https://wiki.earthdata.nasa.gov/display/GIBS/GIBS+Available+Imagery+Products}\cr
[1] "AMSR2_Sea_Ice_Concentration_12km" \cr
[2] "AMSR2_Sea_Ice_Concentration_25km" \cr
[3] "AMSR2_Sea_Ice_Brightness_Temp_6km_89H" \cr
[4] "AMSR2_Sea_Ice_Brightness_Temp_6km_89V" \cr
[5] "AMSRE_Sea_Ice_Concentration_12km" \cr
[6] "AMSRE_Snow_Depth_Over_Ice" \cr
[7] "AMSRE_Sea_Ice_Concentration_25km" \cr
[8] "AMSRE_Sea_Ice_Brightness_Temp_89H" \cr
[9] "AMSRE_Sea_Ice_Brightness_Temp_89V" \cr
[10] "BlueMarble_NextGeneration" \cr
[11] "BlueMarble_ShadedRelief" \cr
[12] "BlueMarble_ShadedRelief_Bathymetry" \cr
[13] "Coastlines" \cr
[14] "Graticule" \cr
[15] "MODIS_Terra_Snow_Cover" \cr
[16] "MODIS_Terra_Sea_Ice" \cr
[17] "MODIS_Terra_Brightness_Temp_Band31_Day" \cr
[18] "MODIS_Terra_Brightness_Temp_Band31_Night" \cr
[19] "MODIS_Terra_CorrectedReflectance_TrueColor" \cr
[20] "MODIS_Terra_CorrectedReflectance_Bands367" \cr
[21] "MODIS_Terra_CorrectedReflectance_Bands721" \cr
[22] "MODIS_Aqua_Snow_Cover" \cr
[23] "MODIS_Aqua_Sea_Ice" \cr
[24] "MODIS_Aqua_Brightness_Temp_Band31_Day" \cr
[25] "MODIS_Aqua_Brightness_Temp_Band31_Night" \cr
[26] "MODIS_Aqua_CorrectedReflectance_TrueColor" \cr
[27] "MODIS_Aqua_CorrectedReflectance_Bands721" \cr
[28] "SCAR_Land_Mask" \cr
[29] "SCAR_Land_Water_Map \cr"
[30] "VIIRS_SNPP_CorrectedReflectance_TrueColor" \cr
[31] "VIIRS_SNPP_CorrectedReflectance_BandsM11-I2-I1" \cr
[32] "VIIRS_SNPP_CorrectedReflectance_BandsM3-I3-M11" \cr
}
\examples{
\dontrun{
visEarthPole(groupList="1000",dateString="2014-02-04")
}
}
\author{
Chris Reudenbach
}
\references{
\url{https://wiki.earthdata.nasa.gov/display/GIBS/Global+Imagery+Browse+Services+-+GIBS}\cr
\url{https://wiki.earthdata.nasa.gov/display/GIBS/GIBS+Available+Imagery+Products}\cr
\url{http://map1.vis.earthdata.nasa.gov/twms-antarctic/twms.cgi?request=GetTileService}\cr
\url{https://github.com/kartena/Proj4Leaflet}\cr
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.