content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
#### Rud.Is Challenge #2
# https://github.com/52vis/2016-14
# https://rud.is/b/2016/04/06/52vis-week-2-2016-week-14-honing-in-on-the-homeless/
## `````````````````````````````````````````````
## Load Libraries ####
# only install if not already done
# http://stackoverflow.com/questions/4090169/elegant-way-to-check-for-missing-packages-and-install-them
list.of.packages <- c("ggplot2", "showtext", "grid","ggalt","ggthemes","readxl","hrbrmisc","stringr","virdis","purrr","dplyr","tidyr","scales","albersusa")
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])]
if(length(new.packages)) install.packages(new.packages)
library(ggplot2)
library(grid)
library(ggalt)
library(readxl)
library(hrbrmisc)
library(stringr)
library(purrr)
library(dplyr)
library(tidyr)
library(scales)
devtools::install_github("hrbrmstr/albersusa")
## `````````````````````````````````````````````
## `````````````````````````````````````````````
## Scraping Data ####
## `````````````````````````````````````````````
# grab the HUD homeless data
URL <- "https://www.hudexchange.info/resources/documents/2007-2015-PIT-Counts-by-CoC.xlsx"
fil <- basename(URL)
if (!file.exists(fil)) download.file(URL, fil, mode="wb")
# turn the excel tabs into a long data.frame
yrs <- 2015:2007
names(yrs) <- 1:9
homeless <- map_df(names(yrs), function(i) {
df <- suppressWarnings(read_excel(fil, as.numeric(i)))
df[,3:ncol(df)] <- suppressWarnings(lapply(df[,3:ncol(df)], as.numeric))
new_names <- tolower(make.names(colnames(df)))
new_names <- str_replace_all(new_names, "\\.+", "_")
df <- setNames(df, str_replace_all(new_names, "_[[:digit:]]+$", ""))
bind_cols(df, data_frame(year=rep(yrs[i], nrow(df))))
})
# clean it up a bit
homeless <- mutate(homeless,
state=str_match(coc_number, "^([[:alpha:]]{2})")[,2],
coc_name=str_replace(coc_name, " CoC$", ""))
homeless <- select(homeless, year, state, everything())
homeless <- filter(homeless, !is.na(state))
## `````````````````````````````````````````````
## `````````````````````````````````````````````
## Read Data ####
## `````````````````````````````````````````````
# read in the us population data
uspop <- read.csv("/mnt/r.rudis.challenge2/data/uspop - 2.csv", stringsAsFactors=FALSE)
uspop_long <- gather(uspop, year, population, -name, -iso_3166_2)
uspop_long$year <- sub("X", "", uspop_long$year)
## `````````````````````````````````````````````
## `````````````````````````````````````````````
## Data Manipulations ####
# normalize the values
states <- count(homeless, year, state, wt=total_homeless)
states <- left_join(states, albersusa::usa_composite()@data[,3:4], by=c("state"="iso_3166_2"))
states <- ungroup(filter(states, !is.na(name)))
states$year <- as.character(states$year)
states <- mutate(left_join(states, uspop_long), homeless_per_100k=(n/population)*100000)
# we want to order from worst to best
group_by(states, name) %>%
summarise(mean=mean(homeless_per_100k, na.rm=TRUE)) %>%
arrange(desc(mean)) -> ordr
states$year <- factor(states$year, levels=as.character(2006:2016))
states$name <- factor(states$name, levels=ordr$name)
## `````````````````````````````````````````````
## `````````````````````````````````````````````
## Data Visulization ####
## `````````````````````````````````````````````
# plot
#+ fig.retina=2, fig.width=10, fig.height=15
gg <- ggplot(states, aes(x=year, y=homeless_per_100k))
gg <- gg + geom_segment(aes(xend=year, yend=0), size=0.33)
gg <- gg + geom_point(size=0.5)
gg <- gg + scale_x_discrete(expand=c(0,0),
breaks=seq(2007, 2015, length.out=5),
labels=c("2007", "", "2011", "", "2015"),
drop=FALSE)
gg <- gg + scale_y_continuous(expand=c(0,0), labels=comma, limits=c(0,1400))
gg <- gg + labs(x=NULL, y=NULL,
title="US Department of Housing & Urban Development (HUD) Total (Estimated) Homeless Population",
subtitle="Counts aggregated from HUD Communities of Care Regional Surveys (normalized per 100K population)",
caption="Data from: https://www.hudexchange.info/resource/4832/2015-ahar-part-1-pit-estimates-of-homelessness/")
gg <- gg + facet_wrap(~name, scales="free", ncol=6)
#gg <- gg + theme_hrbrmstr_an(grid="Y", axis="", strip_text_size=9)
gg <- gg + theme(axis.text.x=element_text(size=8))
gg <- gg + theme(axis.text.y=element_text(size=7))
gg <- gg + theme(panel.margin=unit(c(10, 10), "pt"))
gg <- gg + theme(panel.background=element_rect(color="#97cbdc44", fill="#97cbdc44"))
gg <- gg + theme(plot.margin=margin(10, 20, 10, 15))
gg
## `````````````````````````````````````````````
|
/pattern/scripts/v1.R
|
no_license
|
52vis/2016-14
|
R
| false
| false
| 4,708
|
r
|
#### Rud.Is Challenge #2
# https://github.com/52vis/2016-14
# https://rud.is/b/2016/04/06/52vis-week-2-2016-week-14-honing-in-on-the-homeless/
## `````````````````````````````````````````````
## Load Libraries ####
# only install if not already done
# http://stackoverflow.com/questions/4090169/elegant-way-to-check-for-missing-packages-and-install-them
list.of.packages <- c("ggplot2", "showtext", "grid","ggalt","ggthemes","readxl","hrbrmisc","stringr","virdis","purrr","dplyr","tidyr","scales","albersusa")
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])]
if(length(new.packages)) install.packages(new.packages)
library(ggplot2)
library(grid)
library(ggalt)
library(readxl)
library(hrbrmisc)
library(stringr)
library(purrr)
library(dplyr)
library(tidyr)
library(scales)
devtools::install_github("hrbrmstr/albersusa")
## `````````````````````````````````````````````
## `````````````````````````````````````````````
## Scraping Data ####
## `````````````````````````````````````````````
# grab the HUD homeless data
URL <- "https://www.hudexchange.info/resources/documents/2007-2015-PIT-Counts-by-CoC.xlsx"
fil <- basename(URL)
if (!file.exists(fil)) download.file(URL, fil, mode="wb")
# turn the excel tabs into a long data.frame
yrs <- 2015:2007
names(yrs) <- 1:9
homeless <- map_df(names(yrs), function(i) {
df <- suppressWarnings(read_excel(fil, as.numeric(i)))
df[,3:ncol(df)] <- suppressWarnings(lapply(df[,3:ncol(df)], as.numeric))
new_names <- tolower(make.names(colnames(df)))
new_names <- str_replace_all(new_names, "\\.+", "_")
df <- setNames(df, str_replace_all(new_names, "_[[:digit:]]+$", ""))
bind_cols(df, data_frame(year=rep(yrs[i], nrow(df))))
})
# clean it up a bit
homeless <- mutate(homeless,
state=str_match(coc_number, "^([[:alpha:]]{2})")[,2],
coc_name=str_replace(coc_name, " CoC$", ""))
homeless <- select(homeless, year, state, everything())
homeless <- filter(homeless, !is.na(state))
## `````````````````````````````````````````````
## `````````````````````````````````````````````
## Read Data ####
## `````````````````````````````````````````````
# read in the us population data
uspop <- read.csv("/mnt/r.rudis.challenge2/data/uspop - 2.csv", stringsAsFactors=FALSE)
uspop_long <- gather(uspop, year, population, -name, -iso_3166_2)
uspop_long$year <- sub("X", "", uspop_long$year)
## `````````````````````````````````````````````
## `````````````````````````````````````````````
## Data Manipulations ####
# normalize the values
states <- count(homeless, year, state, wt=total_homeless)
states <- left_join(states, albersusa::usa_composite()@data[,3:4], by=c("state"="iso_3166_2"))
states <- ungroup(filter(states, !is.na(name)))
states$year <- as.character(states$year)
states <- mutate(left_join(states, uspop_long), homeless_per_100k=(n/population)*100000)
# we want to order from worst to best
group_by(states, name) %>%
summarise(mean=mean(homeless_per_100k, na.rm=TRUE)) %>%
arrange(desc(mean)) -> ordr
states$year <- factor(states$year, levels=as.character(2006:2016))
states$name <- factor(states$name, levels=ordr$name)
## `````````````````````````````````````````````
## `````````````````````````````````````````````
## Data Visulization ####
## `````````````````````````````````````````````
# plot
#+ fig.retina=2, fig.width=10, fig.height=15
gg <- ggplot(states, aes(x=year, y=homeless_per_100k))
gg <- gg + geom_segment(aes(xend=year, yend=0), size=0.33)
gg <- gg + geom_point(size=0.5)
gg <- gg + scale_x_discrete(expand=c(0,0),
breaks=seq(2007, 2015, length.out=5),
labels=c("2007", "", "2011", "", "2015"),
drop=FALSE)
gg <- gg + scale_y_continuous(expand=c(0,0), labels=comma, limits=c(0,1400))
gg <- gg + labs(x=NULL, y=NULL,
title="US Department of Housing & Urban Development (HUD) Total (Estimated) Homeless Population",
subtitle="Counts aggregated from HUD Communities of Care Regional Surveys (normalized per 100K population)",
caption="Data from: https://www.hudexchange.info/resource/4832/2015-ahar-part-1-pit-estimates-of-homelessness/")
gg <- gg + facet_wrap(~name, scales="free", ncol=6)
#gg <- gg + theme_hrbrmstr_an(grid="Y", axis="", strip_text_size=9)
gg <- gg + theme(axis.text.x=element_text(size=8))
gg <- gg + theme(axis.text.y=element_text(size=7))
gg <- gg + theme(panel.margin=unit(c(10, 10), "pt"))
gg <- gg + theme(panel.background=element_rect(color="#97cbdc44", fill="#97cbdc44"))
gg <- gg + theme(plot.margin=margin(10, 20, 10, 15))
gg
## `````````````````````````````````````````````
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sumCountsAcrossCells.R
\name{sumCountsAcrossCells}
\alias{sumCountsAcrossCells}
\title{Sum counts across a set of cells}
\usage{
sumCountsAcrossCells(object, ids, exprs_values = "counts",
BPPARAM = SerialParam())
}
\arguments{
\item{object}{A \linkS4class{SingleCellExperiment} object or a count matrix.}
\item{ids}{A factor specifying the set to which each cell in \code{object} belongs.}
\item{exprs_values}{A string or integer scalar specifying the assay of \code{object} containing counts, if \code{object} is a SingleCellExperiment.}
\item{BPPARAM}{A \linkS4class{BiocParallelParam} object specifying how summation should be parallelized.}
}
\value{
A count matrix where counts for all cells in the same set are summed together for each feature.
Columns are ordered by \code{levels(ids)}.
}
\description{
Create a count matrix where counts for all cells in a set are summed together.
}
\details{
This function provides a convenient method for aggregating counts across multiple columns for each feature.
A typical application would be to sum counts across all cells in each cluster to obtain \dQuote{pseudo-bulk} samples for further analysis.
The behaviour of this function is equivalent to that of \code{\link{colsum}}.
However, this function can operate on any matrix representation in \code{object},
and can do so in a parallelized manner for large matrices without resorting to block processing.
Any \code{NA} values in \code{ids} are implicitly ignored and will not be considered or reported.
This may be useful, e.g., to remove undesirable cells by setting their entries in \code{ids} to \code{NA}.
}
\examples{
data("sc_example_counts")
data("sc_example_cell_info")
example_sce <- SingleCellExperiment(
assays = list(counts = sc_example_counts),
colData = sc_example_cell_info)
ids <- sample(LETTERS[1:5], ncol(example_sce), replace=TRUE)
out <- sumCountsAcrossCells(example_sce, ids)
dimnames(out)
}
\author{
Aaron Lun
}
|
/man/sumCountsAcrossCells.Rd
|
no_license
|
mimi3421/scater
|
R
| false
| true
| 2,029
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sumCountsAcrossCells.R
\name{sumCountsAcrossCells}
\alias{sumCountsAcrossCells}
\title{Sum counts across a set of cells}
\usage{
sumCountsAcrossCells(object, ids, exprs_values = "counts",
BPPARAM = SerialParam())
}
\arguments{
\item{object}{A \linkS4class{SingleCellExperiment} object or a count matrix.}
\item{ids}{A factor specifying the set to which each cell in \code{object} belongs.}
\item{exprs_values}{A string or integer scalar specifying the assay of \code{object} containing counts, if \code{object} is a SingleCellExperiment.}
\item{BPPARAM}{A \linkS4class{BiocParallelParam} object specifying how summation should be parallelized.}
}
\value{
A count matrix where counts for all cells in the same set are summed together for each feature.
Columns are ordered by \code{levels(ids)}.
}
\description{
Create a count matrix where counts for all cells in a set are summed together.
}
\details{
This function provides a convenient method for aggregating counts across multiple columns for each feature.
A typical application would be to sum counts across all cells in each cluster to obtain \dQuote{pseudo-bulk} samples for further analysis.
The behaviour of this function is equivalent to that of \code{\link{colsum}}.
However, this function can operate on any matrix representation in \code{object},
and can do so in a parallelized manner for large matrices without resorting to block processing.
Any \code{NA} values in \code{ids} are implicitly ignored and will not be considered or reported.
This may be useful, e.g., to remove undesirable cells by setting their entries in \code{ids} to \code{NA}.
}
\examples{
data("sc_example_counts")
data("sc_example_cell_info")
example_sce <- SingleCellExperiment(
assays = list(counts = sc_example_counts),
colData = sc_example_cell_info)
ids <- sample(LETTERS[1:5], ncol(example_sce), replace=TRUE)
out <- sumCountsAcrossCells(example_sce, ids)
dimnames(out)
}
\author{
Aaron Lun
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BoxCox.R
\name{step_BoxCox}
\alias{step_BoxCox}
\title{Box-Cox Transformation for Non-Negative Data}
\usage{
step_BoxCox(recipe, ..., role = NA, trained = FALSE, lambdas = NULL,
limits = c(-5, 5), nunique = 5)
}
\arguments{
\item{recipe}{A recipe object. The step will be added to the sequence of
operations for this recipe.}
\item{...}{One or more selector functions to choose which variables are
affected by the step. See \code{\link{selections}} for more details.}
\item{role}{Not used by this step since no new variables are created.}
\item{trained}{A logical to indicate if the quantities for preprocessing
have been estimated.}
\item{lambdas}{A numeric vector of transformation values. This is
\code{NULL} until computed by \code{\link{prepare.recipe}}.}
\item{limits}{A length 2 numeric vector defining the range to compute the
transformation parameter lambda.}
\item{nunique}{An integer where data that have less possible values will
not be evaluate for a transformation}
}
\value{
\code{step_BoxCox} returns an object of class \code{step_BoxCox}.
}
\description{
\code{step_BoxCox} creates a \emph{specification} of a recipe step that will
transform data using a simple Box-Cox transformation.
}
\details{
The Box-Cox transformation, which requires a strictly positive
variable, can be used to rescale a variable to be more similar to a
normal distribution. In this package, the partial log-likelihood function
is directly optimized within a reasonable set of transformation values
(which can be changed by the user).
This transformation is typically done on the outcome variable using the
residuals for a statistical model (such as ordinary least squares).
Here, a simple null model (intercept only) is used to apply the
transformation to the \emph{predictor} variables individually. This can
have the effect of making the variable distributions more symmetric.
If the transformation parameters are estimated to be very closed to the
bounds, or if the optimization fails, a value of \code{NA} is used and
no transformation is applied.
}
\examples{
rec <- recipe(~ ., data = as.data.frame(state.x77))
bc_trans <- step_BoxCox(rec, all_numeric())
bc_estimates <- prepare(bc_trans, training = as.data.frame(state.x77))
bc_data <- bake(bc_estimates, as.data.frame(state.x77))
plot(density(state.x77[, "Illiteracy"]), main = "before")
plot(density(bc_data$Illiteracy), main = "after")
}
\references{
Sakia, R. M. (1992). The Box-Cox transformation technique:
A review. \emph{The Statistician}, 169-178..
}
\seealso{
\code{\link{step_YeoJohnson}} \code{\link{recipe}}
\code{\link{prepare.recipe}} \code{\link{bake.recipe}}
}
\concept{
preprocessing transformation_methods
}
\keyword{datagen}
|
/man/step_BoxCox.Rd
|
no_license
|
lionel-/recipes
|
R
| false
| true
| 2,815
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BoxCox.R
\name{step_BoxCox}
\alias{step_BoxCox}
\title{Box-Cox Transformation for Non-Negative Data}
\usage{
step_BoxCox(recipe, ..., role = NA, trained = FALSE, lambdas = NULL,
limits = c(-5, 5), nunique = 5)
}
\arguments{
\item{recipe}{A recipe object. The step will be added to the sequence of
operations for this recipe.}
\item{...}{One or more selector functions to choose which variables are
affected by the step. See \code{\link{selections}} for more details.}
\item{role}{Not used by this step since no new variables are created.}
\item{trained}{A logical to indicate if the quantities for preprocessing
have been estimated.}
\item{lambdas}{A numeric vector of transformation values. This is
\code{NULL} until computed by \code{\link{prepare.recipe}}.}
\item{limits}{A length 2 numeric vector defining the range to compute the
transformation parameter lambda.}
\item{nunique}{An integer where data that have less possible values will
not be evaluate for a transformation}
}
\value{
\code{step_BoxCox} returns an object of class \code{step_BoxCox}.
}
\description{
\code{step_BoxCox} creates a \emph{specification} of a recipe step that will
transform data using a simple Box-Cox transformation.
}
\details{
The Box-Cox transformation, which requires a strictly positive
variable, can be used to rescale a variable to be more similar to a
normal distribution. In this package, the partial log-likelihood function
is directly optimized within a reasonable set of transformation values
(which can be changed by the user).
This transformation is typically done on the outcome variable using the
residuals for a statistical model (such as ordinary least squares).
Here, a simple null model (intercept only) is used to apply the
transformation to the \emph{predictor} variables individually. This can
have the effect of making the variable distributions more symmetric.
If the transformation parameters are estimated to be very closed to the
bounds, or if the optimization fails, a value of \code{NA} is used and
no transformation is applied.
}
\examples{
rec <- recipe(~ ., data = as.data.frame(state.x77))
bc_trans <- step_BoxCox(rec, all_numeric())
bc_estimates <- prepare(bc_trans, training = as.data.frame(state.x77))
bc_data <- bake(bc_estimates, as.data.frame(state.x77))
plot(density(state.x77[, "Illiteracy"]), main = "before")
plot(density(bc_data$Illiteracy), main = "after")
}
\references{
Sakia, R. M. (1992). The Box-Cox transformation technique:
A review. \emph{The Statistician}, 169-178..
}
\seealso{
\code{\link{step_YeoJohnson}} \code{\link{recipe}}
\code{\link{prepare.recipe}} \code{\link{bake.recipe}}
}
\concept{
preprocessing transformation_methods
}
\keyword{datagen}
|
#' Computes the Call Price
#'
#' Pricing function to apply to each scenario in order to generate the P&L distribution, as described
#' A. Meucci, "Personalized Risk Management: Historical Scenarios with Fully Flexible Probabilities"
#' GARP Risk Professional, Dec 2010, p 47-51
#'
#' @param P matrix of prices
#' @param K
#' @param r risk
#' @param t expiry
#' @param s volatility
#'
#' @return C Prices
#'
#' @references
#' \url{http://www.symmys.com/node/150}
#' See Meucci script for "CallPrice.m"
#'
#' @author Xavier Valls \email{flamejat@@gmail.com}
CallPrice = function( P, K, r, t, s )
{
d_1 = log( P/K ) + ( r + s * s/2 ) * t;
d_2 = d_1 - s * sqrt( t );
C = P * pnorm( d_1 ) - K * exp( -r * t ) * pnorm( d_2 );
return( C );
}
#'This script uses Entropy Pooling to compute Fully Flexible Probabilities for historical scenarios
#'based on time periods, market conditions, constraints on moments, etc., as described in
#'A. Meucci, "Personalized Risk Management: Historical Scenarios with Fully Flexible Probabilities"
#'GARP Risk Professional, Dec 2010, p 47-51
#'
#' Most recent version of article and MATLAB code available at
#' http://www.symmys.com/node/150
#' @references
#' \url{http://www.symmys.com/node/150}
#' See Meucci script for "DoubleDecay.m"
#'
#' @author Xavier Valls \email{flamejat@@gmail.com}
##########################################################################
# risk drivers scenarios
###########################################################################
data("dbFFP" )
Infl = dbFFP$Data[ , length( dbFFP$Names ) ];
Vix = dbFFP$Data[ , length( dbFFP$Names ) - 1 ];
Crude = dbFFP$Data[ , length( dbFFP$Names )-3 ];
Swp10 = dbFFP$Data[ , 2 ];
SnP = dbFFP$Data[ , 4 ];
X = diff( log( cbind( SnP, Vix, Swp10 ) ) );
Y = matrix(Infl[ -nrow( dbFFP$Data ) ]);
##########################################################################
#assign probabilities to historical scenarios
###########################################################################
# DefineProbs = "1" : rolling window
# DefineProbs = "2" : exponential smoothing
# DefineProbs = "3" : market conditions
# DefineProbs = "4" : kernel damping
# DefineProbs = "5" : partial information prox. kernel damping
# DefineProbs = "6" : partial information: match covariance
DefineProbs = 1;
T = dim(X)[1];
p = matrix( 0, T, 1 );
if( DefineProbs == 1)
{
# rolling window
tau = 2 * 252;
p[ 1:tau ] = 1;
p = p / sum( p );
} else if( DefineProbs == 2 )
{
# exponential smoothing
lmd = 0.0166;
p = exp( -lmd * ( T - ( 1 : T ) ) );
p = p / sum( p );
} else if( DefineProbs == 3 )
{
# market conditions
Cond = Y >= 2.8;
p[ Cond ] = 1;
p = p / sum( p );
} else if( DefineProbs == 4 )
{
# kernel damping
y = 3;
h2 = cov( matrix( diff( Y ) ) );
p = dmvnorm( Y, y, h2 );
p = p / sum( p );
} else if( DefineProbs == 5 )
{
# partial information prox. kernel damping
y = 3;
h2 = NaN; # set h2=NaN for no conditioning on second moments
h2 = cov( 1 * diff( Y ) );
p = LeastInfoKernel( Y, y, h2 );
} else if( DefineProbs == 6 ){
#partial information: match covariance
l_c = 0.0055;
l_s = 0.0166;
N = 20;
Dd = DoubleDecay( X, l_c, l_s );
p = Fit2Moms( X, Dd$m, Dd$S );
}
###########################################################################
# P&L scenarios
###########################################################################
N = 20;
# call parameters
S_0 = SnP[ length(SnP) ];
vol_0 = Vix[ length(Vix)];
rf_0 = Swp10[ length(Swp10) ];
K = S_0 * ( seq( 0.8, 1.1, length = N) );
Expiry = ( 2: (N+1) ) / 252;
S_T = S_0 * exp( X[ , 1 ] );
vol_T = vol_0 * exp( X[ , 2 ] );
rf_T = rf_0 * exp( X[ , 3 ] );
PnL = matrix( NaN, T, N );
# securities scenarios
for( n in 1:N )
{
Call_1 = CallPrice( S_T, K[ n ], rf_T, Expiry[ n ] - 1 / 252, vol_T );
Call_0 = CallPrice( S_0, K[ n ], rf_0, Expiry[ n ], vol_0 );
PnL[ , n ] = Call_1 - Call_0;
}
# portfolio scenarios
u = -rbind( -matrix( 1, N/2, 1 ), matrix( 1, N/2, 1 ) ); # number of units (contracts/shares/etc)
PnL_u = PnL %*% u;
|
/demo/FullFlexProbs.R
|
no_license
|
runiaruni/Meucci
|
R
| false
| false
| 4,273
|
r
|
#' Computes the Call Price
#'
#' Pricing function to apply to each scenario in order to generate the P&L distribution, as described
#' A. Meucci, "Personalized Risk Management: Historical Scenarios with Fully Flexible Probabilities"
#' GARP Risk Professional, Dec 2010, p 47-51
#'
#' @param P matrix of prices
#' @param K
#' @param r risk
#' @param t expiry
#' @param s volatility
#'
#' @return C Prices
#'
#' @references
#' \url{http://www.symmys.com/node/150}
#' See Meucci script for "CallPrice.m"
#'
#' @author Xavier Valls \email{flamejat@@gmail.com}
CallPrice = function( P, K, r, t, s )
{
d_1 = log( P/K ) + ( r + s * s/2 ) * t;
d_2 = d_1 - s * sqrt( t );
C = P * pnorm( d_1 ) - K * exp( -r * t ) * pnorm( d_2 );
return( C );
}
#'This script uses Entropy Pooling to compute Fully Flexible Probabilities for historical scenarios
#'based on time periods, market conditions, constraints on moments, etc., as described in
#'A. Meucci, "Personalized Risk Management: Historical Scenarios with Fully Flexible Probabilities"
#'GARP Risk Professional, Dec 2010, p 47-51
#'
#' Most recent version of article and MATLAB code available at
#' http://www.symmys.com/node/150
#' @references
#' \url{http://www.symmys.com/node/150}
#' See Meucci script for "DoubleDecay.m"
#'
#' @author Xavier Valls \email{flamejat@@gmail.com}
##########################################################################
# risk drivers scenarios
###########################################################################
data("dbFFP" )
Infl = dbFFP$Data[ , length( dbFFP$Names ) ];
Vix = dbFFP$Data[ , length( dbFFP$Names ) - 1 ];
Crude = dbFFP$Data[ , length( dbFFP$Names )-3 ];
Swp10 = dbFFP$Data[ , 2 ];
SnP = dbFFP$Data[ , 4 ];
X = diff( log( cbind( SnP, Vix, Swp10 ) ) );
Y = matrix(Infl[ -nrow( dbFFP$Data ) ]);
##########################################################################
#assign probabilities to historical scenarios
###########################################################################
# DefineProbs = "1" : rolling window
# DefineProbs = "2" : exponential smoothing
# DefineProbs = "3" : market conditions
# DefineProbs = "4" : kernel damping
# DefineProbs = "5" : partial information prox. kernel damping
# DefineProbs = "6" : partial information: match covariance
DefineProbs = 1;
T = dim(X)[1];
p = matrix( 0, T, 1 );
if( DefineProbs == 1)
{
# rolling window
tau = 2 * 252;
p[ 1:tau ] = 1;
p = p / sum( p );
} else if( DefineProbs == 2 )
{
# exponential smoothing
lmd = 0.0166;
p = exp( -lmd * ( T - ( 1 : T ) ) );
p = p / sum( p );
} else if( DefineProbs == 3 )
{
# market conditions
Cond = Y >= 2.8;
p[ Cond ] = 1;
p = p / sum( p );
} else if( DefineProbs == 4 )
{
# kernel damping
y = 3;
h2 = cov( matrix( diff( Y ) ) );
p = dmvnorm( Y, y, h2 );
p = p / sum( p );
} else if( DefineProbs == 5 )
{
# partial information prox. kernel damping
y = 3;
h2 = NaN; # set h2=NaN for no conditioning on second moments
h2 = cov( 1 * diff( Y ) );
p = LeastInfoKernel( Y, y, h2 );
} else if( DefineProbs == 6 ){
#partial information: match covariance
l_c = 0.0055;
l_s = 0.0166;
N = 20;
Dd = DoubleDecay( X, l_c, l_s );
p = Fit2Moms( X, Dd$m, Dd$S );
}
###########################################################################
# P&L scenarios
###########################################################################
N = 20;
# call parameters
S_0 = SnP[ length(SnP) ];
vol_0 = Vix[ length(Vix)];
rf_0 = Swp10[ length(Swp10) ];
K = S_0 * ( seq( 0.8, 1.1, length = N) );
Expiry = ( 2: (N+1) ) / 252;
S_T = S_0 * exp( X[ , 1 ] );
vol_T = vol_0 * exp( X[ , 2 ] );
rf_T = rf_0 * exp( X[ , 3 ] );
PnL = matrix( NaN, T, N );
# securities scenarios
for( n in 1:N )
{
Call_1 = CallPrice( S_T, K[ n ], rf_T, Expiry[ n ] - 1 / 252, vol_T );
Call_0 = CallPrice( S_0, K[ n ], rf_0, Expiry[ n ], vol_0 );
PnL[ , n ] = Call_1 - Call_0;
}
# portfolio scenarios
u = -rbind( -matrix( 1, N/2, 1 ), matrix( 1, N/2, 1 ) ); # number of units (contracts/shares/etc)
PnL_u = PnL %*% u;
|
library(testthat)
context("simple neural network execution with nnet")
library(parsnip)
library(nnet)
###################################################################
num_pred <- names(iris)[1:4]
iris_nnet <- mlp(mode = "classification", hidden_units = 2)
ctrl <- fit_control(verbosity = 1, catch = FALSE)
caught_ctrl <- fit_control(verbosity = 1, catch = TRUE)
quiet_ctrl <- fit_control(verbosity = 0, catch = TRUE)
test_that('nnet execution, classification', {
skip_if_not_installed("nnet")
expect_error(
res <- parsnip::fit(
iris_nnet,
Species ~ Sepal.Width + Sepal.Length,
data = iris,
engine = "nnet",
control = ctrl
),
regexp = NA
)
expect_error(
res <- parsnip::fit(
iris_nnet,
x = iris[, num_pred],
y = iris$Species,
engine = "nnet",
control = ctrl
),
regexp = NA
)
expect_error(
res <- parsnip::fit(
iris_nnet,
Species ~ novar,
data = iris,
engine = "nnet",
control = ctrl
)
)
})
test_that('nnet classification prediction', {
xy_fit <- fit(
iris_nnet,
x = iris[, num_pred],
y = iris$Species,
engine = "nnet",
control = ctrl
)
xy_pred <- predict(xy_fit$fit, newdata = iris[1:8, num_pred], type = "class")
xy_pred <- factor(xy_pred, levels = levels(iris$Species))
expect_equal(xy_pred, predict_class(xy_fit, newdata = iris[1:8, num_pred]))
form_fit <- fit(
iris_nnet,
Species ~ .,
data = iris,
engine = "nnet",
control = ctrl
)
form_pred <- predict(form_fit$fit, newdata = iris[1:8, num_pred], type = "class")
form_pred <- factor(form_pred, levels = levels(iris$Species))
expect_equal(form_pred, predict_class(form_fit, newdata = iris[1:8, num_pred]))
})
###################################################################
num_pred <- names(mtcars)[3:6]
car_basic <- mlp(mode = "regression")
bad_nnet_reg <- mlp(mode = "regression",
others = list(min.node.size = -10))
bad_rf_reg <- mlp(mode = "regression",
others = list(sampsize = -10))
ctrl <- list(verbosity = 1, catch = FALSE)
caught_ctrl <- list(verbosity = 1, catch = TRUE)
quiet_ctrl <- list(verbosity = 0, catch = TRUE)
test_that('nnet execution, regression', {
skip_if_not_installed("nnet")
expect_error(
res <- parsnip::fit(
car_basic,
mpg ~ .,
data = mtcars,
engine = "nnet",
control = ctrl
),
regexp = NA
)
expect_error(
res <- parsnip::fit(
car_basic,
x = mtcars[, num_pred],
y = mtcars$mpg,
engine = "nnet",
control = ctrl
),
regexp = NA
)
})
test_that('nnet regression prediction', {
xy_fit <- fit(
car_basic,
x = mtcars[, -1],
y = mtcars$mpg,
engine = "nnet",
control = ctrl
)
xy_pred <- predict(xy_fit$fit, newdata = mtcars[1:8, -1])[,1]
xy_pred <- unname(xy_pred)
expect_equal(xy_pred, predict(xy_fit, newdata = mtcars[1:8, -1]))
form_fit <- fit(
car_basic,
mpg ~ .,
data = mtcars,
engine = "nnet",
control = ctrl
)
form_pred <- predict(form_fit$fit, newdata = mtcars[1:8, -1])[,1]
form_pred <- unname(form_pred)
expect_equal(form_pred, predict(form_fit, newdata = mtcars[1:8, -1]))
})
|
/tests/testthat/test_mlp_nnet.R
|
no_license
|
ledell/parsnip
|
R
| false
| false
| 3,285
|
r
|
library(testthat)
context("simple neural network execution with nnet")
library(parsnip)
library(nnet)
###################################################################
num_pred <- names(iris)[1:4]
iris_nnet <- mlp(mode = "classification", hidden_units = 2)
ctrl <- fit_control(verbosity = 1, catch = FALSE)
caught_ctrl <- fit_control(verbosity = 1, catch = TRUE)
quiet_ctrl <- fit_control(verbosity = 0, catch = TRUE)
test_that('nnet execution, classification', {
skip_if_not_installed("nnet")
expect_error(
res <- parsnip::fit(
iris_nnet,
Species ~ Sepal.Width + Sepal.Length,
data = iris,
engine = "nnet",
control = ctrl
),
regexp = NA
)
expect_error(
res <- parsnip::fit(
iris_nnet,
x = iris[, num_pred],
y = iris$Species,
engine = "nnet",
control = ctrl
),
regexp = NA
)
expect_error(
res <- parsnip::fit(
iris_nnet,
Species ~ novar,
data = iris,
engine = "nnet",
control = ctrl
)
)
})
test_that('nnet classification prediction', {
xy_fit <- fit(
iris_nnet,
x = iris[, num_pred],
y = iris$Species,
engine = "nnet",
control = ctrl
)
xy_pred <- predict(xy_fit$fit, newdata = iris[1:8, num_pred], type = "class")
xy_pred <- factor(xy_pred, levels = levels(iris$Species))
expect_equal(xy_pred, predict_class(xy_fit, newdata = iris[1:8, num_pred]))
form_fit <- fit(
iris_nnet,
Species ~ .,
data = iris,
engine = "nnet",
control = ctrl
)
form_pred <- predict(form_fit$fit, newdata = iris[1:8, num_pred], type = "class")
form_pred <- factor(form_pred, levels = levels(iris$Species))
expect_equal(form_pred, predict_class(form_fit, newdata = iris[1:8, num_pred]))
})
###################################################################
num_pred <- names(mtcars)[3:6]
car_basic <- mlp(mode = "regression")
bad_nnet_reg <- mlp(mode = "regression",
others = list(min.node.size = -10))
bad_rf_reg <- mlp(mode = "regression",
others = list(sampsize = -10))
ctrl <- list(verbosity = 1, catch = FALSE)
caught_ctrl <- list(verbosity = 1, catch = TRUE)
quiet_ctrl <- list(verbosity = 0, catch = TRUE)
test_that('nnet execution, regression', {
skip_if_not_installed("nnet")
expect_error(
res <- parsnip::fit(
car_basic,
mpg ~ .,
data = mtcars,
engine = "nnet",
control = ctrl
),
regexp = NA
)
expect_error(
res <- parsnip::fit(
car_basic,
x = mtcars[, num_pred],
y = mtcars$mpg,
engine = "nnet",
control = ctrl
),
regexp = NA
)
})
test_that('nnet regression prediction', {
xy_fit <- fit(
car_basic,
x = mtcars[, -1],
y = mtcars$mpg,
engine = "nnet",
control = ctrl
)
xy_pred <- predict(xy_fit$fit, newdata = mtcars[1:8, -1])[,1]
xy_pred <- unname(xy_pred)
expect_equal(xy_pred, predict(xy_fit, newdata = mtcars[1:8, -1]))
form_fit <- fit(
car_basic,
mpg ~ .,
data = mtcars,
engine = "nnet",
control = ctrl
)
form_pred <- predict(form_fit$fit, newdata = mtcars[1:8, -1])[,1]
form_pred <- unname(form_pred)
expect_equal(form_pred, predict(form_fit, newdata = mtcars[1:8, -1]))
})
|
y <- read.csv('./Aex_ac_hist_1103.txt', header=FALSE)$V1
y <- y/sum(y)
x <- seq(-.19,1.19,.02)
c <- c(.14189, .31037, .52897)
w <- c(.0948, .20806, .38386)
A <- c(.00293, .00815, .00868)
o <- 1.7167e-4
f <- function(x, c, A, w) return(A/w/sqrt(pi/2)*exp(-2*((x-c)/w)^2))
xx <- seq(-.2, 1.18, .001)
peak.1 <- f(xx, c[1], A[1], w[1])
peak.2 <- f(xx, c[2], A[2], w[2])
peak.3 <- f(xx, c[3], A[3], w[3])
peak <- peak.1 + peak.2 + peak.3
p <- ggplot(NULL)+
geom_col(aes(x, y), fill='#DC0000FF')+
geom_area(aes(xx, peak.2+o), size=1, fill='#4DBBD5FF', alpha=.8)+
geom_area(aes(xx, peak.1+o), size=1, fill='#B09C85FF', alpha=.8)+
geom_area(aes(xx, peak.3+o), size=1, fill='#FFFFFFFF', alpha=.8)+
geom_line(aes(xx, peak+o), size=1, col='#000000FF')+
scale_x_continuous(breaks=seq(-.2,1.2,.2), limits=c(-.2, 1.2))+
scale_y_continuous(limits=c(0,.061))+
theme_bw()
ggsave('./A_y_hist.pdf', p, dpi=300, width=12.9, height=3)
|
/TIRF_EA_hist/3/A_y_hist.r
|
no_license
|
psichen/affinity-of-Skp-to-OmpC
|
R
| false
| false
| 950
|
r
|
y <- read.csv('./Aex_ac_hist_1103.txt', header=FALSE)$V1
y <- y/sum(y)
x <- seq(-.19,1.19,.02)
c <- c(.14189, .31037, .52897)
w <- c(.0948, .20806, .38386)
A <- c(.00293, .00815, .00868)
o <- 1.7167e-4
f <- function(x, c, A, w) return(A/w/sqrt(pi/2)*exp(-2*((x-c)/w)^2))
xx <- seq(-.2, 1.18, .001)
peak.1 <- f(xx, c[1], A[1], w[1])
peak.2 <- f(xx, c[2], A[2], w[2])
peak.3 <- f(xx, c[3], A[3], w[3])
peak <- peak.1 + peak.2 + peak.3
p <- ggplot(NULL)+
geom_col(aes(x, y), fill='#DC0000FF')+
geom_area(aes(xx, peak.2+o), size=1, fill='#4DBBD5FF', alpha=.8)+
geom_area(aes(xx, peak.1+o), size=1, fill='#B09C85FF', alpha=.8)+
geom_area(aes(xx, peak.3+o), size=1, fill='#FFFFFFFF', alpha=.8)+
geom_line(aes(xx, peak+o), size=1, col='#000000FF')+
scale_x_continuous(breaks=seq(-.2,1.2,.2), limits=c(-.2, 1.2))+
scale_y_continuous(limits=c(0,.061))+
theme_bw()
ggsave('./A_y_hist.pdf', p, dpi=300, width=12.9, height=3)
|
#=====================================================================#
#====== CONFIDENCE INTERVALS & DIFFERENCES IN MEANS/PROPORTIONS ======#
#=====================================================================#
# Author: Jonah Gabry (jsg2201@columbia.edu)
# Written using R version 3.1.1 on Mac OS X 10.9.3
# Setup -------------------------------------------------------------------
# _________________________________________________________________________
# set working directory (replace text inside quotes with path to the desired directory)
setwd("INSERT PATH TO DIRECTORY")
# load packages
library(QMSS)
library(plyr)
# load data
load("GSS.RData")
load("GSS_2010.RData")
# Confidence Intervals ----------------------------------------------------
# _________________________________________________________________________
### Looking at internet hours ###
# Create a new variable "totalhr" adding "wwhr" and "emailhr" variables
GSS_2010$totalhr <- with(GSS_2010, wwwhr + emailhr)
summary(GSS_2010$totalhr)
# Manually calculate 95% confidence interval:
mean <- mean(GSS_2010$totalhr, na.rm = T)
sd <- sd(GSS_2010$totalhr, na.rm = T)
n <- sum(!is.na(GSS_2010$totalhr))
ci <- mean + sd/sqrt(n) * qt(p = c(0.025, 0.975), df = n - 1) # qt() is the quantile function for the t distribution
ci
# Have R compute the 95% confidence interval
t.test(GSS_2010$totalhr)$conf.int
# Or the 99% confidence interval
t.test(GSS_2010$totalhr, conf.level = 0.99)$conf.int
# Compare confidence intervals for various confidence levels
# store the levels to use in a vector called levs
levs <- c(0.1, 0.5, 0.9, 0.95, 0.99, 0.9999)
# make an empty matrix (all 0s) with length(levs) rows and 2 columns that
# we'll fill in with the confidence intervals for the different levels
ci <- mat.or.vec(nr = length(levs), nc = 2)
# Many ways to proceed. For practice with loops we can use a loop over the
# indices of the levs vector
for (j in 1:length(levs)) {
# now we set jth row of the empty ci matrix to be the confidence interval
# obtained using the jth level in levs
ci[j,] <- t.test(GSS_2010$totalhr, conf.level = levs[j])$conf.int
}
# make the row names the conf levels as percents (mutiple by 100 & add % symbol)
rownames(ci) <- paste0(100*levs,"%")
# make the column names "lower" and "upper" for the bounds of the interval
colnames(ci) <- c("Lower","Upper")
# display the confidence intervals rounded to 3 decimal places
round(ci, 3)
# We could do a similar loop using the manual calculation of the CIs instead
# of the t.test function
ci <- mat.or.vec(length(levs), 2)
for(j in 1:length(levs)){
q <- (1 - levs[j])/2
q <- c(q, 1 - q)
ci[j,] <- mean + sd/sqrt(n)*qt(q, df = n-1)
}
rownames(ci) <- paste0(100*levs,"%")
colnames(ci) <- c("Lower","Upper")
round(ci, 3)
### Politicians example ###
# Use the simple Tab function we wrote to get a table of counts and percentages
# The function is also included in the QMSS package
?Tab
Tab(GSS_2010$polgreed)
Tab(GSS_2010$polgreed, useNA = "ifany")
# Create a new variable new.polgreed by recoding polgreed with mapvalues
# function in plyr package
GSS_2010$new.polgreed <- mapvalues(GSS_2010$polgreed,
from = 1:5, # old values
to = c(rep(1,2), rep(0,3))) # new values
Tab(GSS_2010$new.polgreed)
# we can also assign the output from Tab() to an object
tab.polgreed <- Tab(GSS_2010$new.polgreed)
tab.polgreed
# Get 95% confidence interval for proportion using binom.test()
# note: we pass to binom.test() a vector of "success" and "failure" counts
# first, we can get just the counts from tab.polgreed in several ways
tab.polgreed[, 1]
tab.polgreed[, "Count"]
tab.polgreed[1:2]
# this gives us the count of 0s and then the count of 1s, but for binom.test()
# we want to give it the "successes" (the 1s) first so we can use
# tab.polgreed[2:1] instead of tab.polgreed[1:2]
tab.polgreed[2:1]
# now use binom.test()
binom.test(tab.polgreed[1:2])
# or just view the confidence interval
binom.test(tab.polgreed[1:2])$conf.int
# Test for difference in means and proportions ----------------------------
# _________________________________________________________________________
### Working hours and race example ###
# Make indicator variable for race = white
GSS_2010$white <- ifelse(GSS_2010$race == 1, 1, 0)
Tab(GSS_2010$white)
# Two-sample t-test (for difference of two means) with equal variances assumed
t.test(hrs1 ~ white, data = GSS_2010, var.equal = T) # set var.equal = F to not assume equal variances
### Gender and kindness example ###
# extract 2004 subset of cumulative GSS
GSS_2004 <- subset(GSS, year==2004)
# look at levels of kindpers variable
Tab(GSS_2004$kindpers)
# create indicator variable "kind" for kindpers==1
GSS_2004$kind <- ifelse(GSS_2004$kindpers == 1, 1, 0)
# Test for differences in proportions between genders with prop.test()
table.kind <- with(GSS_2004, table(sex, kind))
table.kind
table.kind[, 2:1] # flip column so column for kind=1 is first
prop.test(table.kind[, 2:1])
|
/Code/Quantitative Analytic Techniques/03_Inference.R
|
permissive
|
anhnguyendepocen/QMSS_in_R
|
R
| false
| false
| 5,139
|
r
|
#=====================================================================#
#====== CONFIDENCE INTERVALS & DIFFERENCES IN MEANS/PROPORTIONS ======#
#=====================================================================#
# Author: Jonah Gabry (jsg2201@columbia.edu)
# Written using R version 3.1.1 on Mac OS X 10.9.3
# Setup -------------------------------------------------------------------
# _________________________________________________________________________
# set working directory (replace text inside quotes with path to the desired directory)
setwd("INSERT PATH TO DIRECTORY")
# load packages
library(QMSS)
library(plyr)
# load data
load("GSS.RData")
load("GSS_2010.RData")
# Confidence Intervals ----------------------------------------------------
# _________________________________________________________________________
### Looking at internet hours ###
# Create a new variable "totalhr" adding "wwhr" and "emailhr" variables
GSS_2010$totalhr <- with(GSS_2010, wwwhr + emailhr)
summary(GSS_2010$totalhr)
# Manually calculate 95% confidence interval:
mean <- mean(GSS_2010$totalhr, na.rm = T)
sd <- sd(GSS_2010$totalhr, na.rm = T)
n <- sum(!is.na(GSS_2010$totalhr))
ci <- mean + sd/sqrt(n) * qt(p = c(0.025, 0.975), df = n - 1) # qt() is the quantile function for the t distribution
ci
# Have R compute the 95% confidence interval
t.test(GSS_2010$totalhr)$conf.int
# Or the 99% confidence interval
t.test(GSS_2010$totalhr, conf.level = 0.99)$conf.int
# Compare confidence intervals for various confidence levels
# store the levels to use in a vector called levs
levs <- c(0.1, 0.5, 0.9, 0.95, 0.99, 0.9999)
# make an empty matrix (all 0s) with length(levs) rows and 2 columns that
# we'll fill in with the confidence intervals for the different levels
ci <- mat.or.vec(nr = length(levs), nc = 2)
# Many ways to proceed. For practice with loops we can use a loop over the
# indices of the levs vector
for (j in 1:length(levs)) {
# now we set jth row of the empty ci matrix to be the confidence interval
# obtained using the jth level in levs
ci[j,] <- t.test(GSS_2010$totalhr, conf.level = levs[j])$conf.int
}
# make the row names the conf levels as percents (mutiple by 100 & add % symbol)
rownames(ci) <- paste0(100*levs,"%")
# make the column names "lower" and "upper" for the bounds of the interval
colnames(ci) <- c("Lower","Upper")
# display the confidence intervals rounded to 3 decimal places
round(ci, 3)
# We could do a similar loop using the manual calculation of the CIs instead
# of the t.test function
ci <- mat.or.vec(length(levs), 2)
for(j in 1:length(levs)){
q <- (1 - levs[j])/2
q <- c(q, 1 - q)
ci[j,] <- mean + sd/sqrt(n)*qt(q, df = n-1)
}
rownames(ci) <- paste0(100*levs,"%")
colnames(ci) <- c("Lower","Upper")
round(ci, 3)
### Politicians example ###
# Use the simple Tab function we wrote to get a table of counts and percentages
# The function is also included in the QMSS package
?Tab
Tab(GSS_2010$polgreed)
Tab(GSS_2010$polgreed, useNA = "ifany")
# Create a new variable new.polgreed by recoding polgreed with mapvalues
# function in plyr package
GSS_2010$new.polgreed <- mapvalues(GSS_2010$polgreed,
from = 1:5, # old values
to = c(rep(1,2), rep(0,3))) # new values
Tab(GSS_2010$new.polgreed)
# we can also assign the output from Tab() to an object
tab.polgreed <- Tab(GSS_2010$new.polgreed)
tab.polgreed
# Get 95% confidence interval for proportion using binom.test()
# note: we pass to binom.test() a vector of "success" and "failure" counts
# first, we can get just the counts from tab.polgreed in several ways
tab.polgreed[, 1]
tab.polgreed[, "Count"]
tab.polgreed[1:2]
# this gives us the count of 0s and then the count of 1s, but for binom.test()
# we want to give it the "successes" (the 1s) first so we can use
# tab.polgreed[2:1] instead of tab.polgreed[1:2]
tab.polgreed[2:1]
# now use binom.test()
binom.test(tab.polgreed[1:2])
# or just view the confidence interval
binom.test(tab.polgreed[1:2])$conf.int
# Test for difference in means and proportions ----------------------------
# _________________________________________________________________________
### Working hours and race example ###
# Make indicator variable for race = white
GSS_2010$white <- ifelse(GSS_2010$race == 1, 1, 0)
Tab(GSS_2010$white)
# Two-sample t-test (for difference of two means) with equal variances assumed
t.test(hrs1 ~ white, data = GSS_2010, var.equal = T) # set var.equal = F to not assume equal variances
### Gender and kindness example ###
# extract 2004 subset of cumulative GSS
GSS_2004 <- subset(GSS, year==2004)
# look at levels of kindpers variable
Tab(GSS_2004$kindpers)
# create indicator variable "kind" for kindpers==1
GSS_2004$kind <- ifelse(GSS_2004$kindpers == 1, 1, 0)
# Test for differences in proportions between genders with prop.test()
table.kind <- with(GSS_2004, table(sex, kind))
table.kind
table.kind[, 2:1] # flip column so column for kind=1 is first
prop.test(table.kind[, 2:1])
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 514
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 514
c
c Input Parameter (command line, file):
c input filename QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query09_query15_1344n.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 285
c no.of clauses 514
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 514
c
c QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query09_query15_1344n.qdimacs 285 514 E1 [] 0 12 273 514 NONE
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query09_query15_1344n/query09_query15_1344n.R
|
no_license
|
arey0pushpa/dcnf-autarky
|
R
| false
| false
| 704
|
r
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 514
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 514
c
c Input Parameter (command line, file):
c input filename QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query09_query15_1344n.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 285
c no.of clauses 514
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 514
c
c QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query09_query15_1344n.qdimacs 285 514 E1 [] 0 12 273 514 NONE
|
# Example of using STM to perform topic modelling
# Load Libraries
library(stm)
library(stringr)
# Read in data
data <- read.csv("data.csv", sep="^", quote="")
# Read in stopwords
stopwords <- read.csv("all_stopwords.txt",header=F)
stopwords[] <- lapply(stopwords,as.character)
stopwords <- stopwords$V1
# Process meta data
processed <- textProcessor(data$topic_content, metadata=data,customstopwords=stopwords)
meta <- processed$meta
meta$date_no <- as.numeric(as.Date(meta$sitting_date))
meta$year <- as.POSIXlt(as.Date(meta$sitting_date))$year + 1900
out <- prepDocuments(processed$documents, processed$vocab, meta)
# The main STM modelling function - See STM help file for more options
mod.out <- stm(out$documents, out$vocab, K=20, prevalence = ~s(date_no)+year,data=out$meta,ngroups=4,seed=7244757,init.type="Spectral")
# Diagnostic plot for convergence
plot(mod.out$convergence$bound,type="l",ylab="Approximate Objective",main="Convergence")
# Plot of topic proportions
plot.STM(mod.out, xlim=c(0,.5), labeltype="prob", n=7)
# Identify topics
findThoughts(mod.out, meta$topic_content, topics=11)
# Output examples of each topic and top words to a text file for human analysis and labelling
topics_out <- capture.output(findThoughts(mod.out, meta$topic_content, topics=c(1:20),n=5))
cat(topics_out,file="/topics_text.txt",sep="\n",append=TRUE)
words_out <- capture.output(labelTopics(mod.out,c(1:20)))
cat(words_out,file="topics_text.txt",sep="\n",append=TRUE)
TopicNames = c() # Set topic names once you're done!
# STM's estimate effect function is essentially a regression fit
prep <- estimateEffect(1:20~s(date_no)+year, mod.out, metadata=out$meta)
# You can plot selected topics.
plot.estimateEffect(prep, "date_no", topics=c(9,4,19), method="continuous",labeltype="custom", custom.labels=c("Municipal", "Education","Citizenship"),axis(1, at=seq(-1825,-730, by=365), labels=seq(1965,1968)))
plot.estimateEffect(prep, "date_no", topics=c(9), method="continuous",ci.level=0,xaxt="n",xlab="Time (Month)",printlegend=FALSE)
yearseq <- as.numeric(seq(from=as.Date("1966-01-01"),to=as.Date("1968-01-01"),by="year"))
axis(1,at=yearseq,labels=c(1966,1967,1968))
title(main="Prevalence of Topic 9 over Time")
# Point Estimate option can be useful in some situations
plot.estimateEffect(prep, "year", topics=9, method="pointestimate")
# Topic correlations - STM also has a plotting function to show the network
mod.out.corr<-topicCorr(mod.out)
plot.topicCorr(mod.out.corr, vlabels=TopicNames)
# Word clouds
cloud(mod.out,topic=11)
# Save wordclouds to file
for(i in 1:length(topicNames)) {
writeLines(paste0("Topic ",i,": ",topicNames[i]))
png(paste0('./topic',i,'.png'), width=600, height=600)
cloud(mod.out, topic=i, scale=c(3,.5), random.order=FALSE)
dev.off()
}
# STM also has a helper package to help with visualization of topic correlations in a web format
library(stmCorrViz)
stmCorrViz(mod.out, "stmviz.html", documents_raw=as.character(data$topic_content), documents_matrix=out,title="Analysis of _____", labels_number=7, display=TRUE, verbose=TRUE)
|
/stm_example.R
|
no_license
|
angshenting/stm-example
|
R
| false
| false
| 3,088
|
r
|
# Example of using STM to perform topic modelling
# Load Libraries
library(stm)
library(stringr)
# Read in data
data <- read.csv("data.csv", sep="^", quote="")
# Read in stopwords
stopwords <- read.csv("all_stopwords.txt",header=F)
stopwords[] <- lapply(stopwords,as.character)
stopwords <- stopwords$V1
# Process meta data
processed <- textProcessor(data$topic_content, metadata=data,customstopwords=stopwords)
meta <- processed$meta
meta$date_no <- as.numeric(as.Date(meta$sitting_date))
meta$year <- as.POSIXlt(as.Date(meta$sitting_date))$year + 1900
out <- prepDocuments(processed$documents, processed$vocab, meta)
# The main STM modelling function - See STM help file for more options
mod.out <- stm(out$documents, out$vocab, K=20, prevalence = ~s(date_no)+year,data=out$meta,ngroups=4,seed=7244757,init.type="Spectral")
# Diagnostic plot for convergence
plot(mod.out$convergence$bound,type="l",ylab="Approximate Objective",main="Convergence")
# Plot of topic proportions
plot.STM(mod.out, xlim=c(0,.5), labeltype="prob", n=7)
# Identify topics
findThoughts(mod.out, meta$topic_content, topics=11)
# Output examples of each topic and top words to a text file for human analysis and labelling
topics_out <- capture.output(findThoughts(mod.out, meta$topic_content, topics=c(1:20),n=5))
cat(topics_out,file="/topics_text.txt",sep="\n",append=TRUE)
words_out <- capture.output(labelTopics(mod.out,c(1:20)))
cat(words_out,file="topics_text.txt",sep="\n",append=TRUE)
TopicNames = c() # Set topic names once you're done!
# STM's estimate effect function is essentially a regression fit
prep <- estimateEffect(1:20~s(date_no)+year, mod.out, metadata=out$meta)
# You can plot selected topics.
plot.estimateEffect(prep, "date_no", topics=c(9,4,19), method="continuous",labeltype="custom", custom.labels=c("Municipal", "Education","Citizenship"),axis(1, at=seq(-1825,-730, by=365), labels=seq(1965,1968)))
plot.estimateEffect(prep, "date_no", topics=c(9), method="continuous",ci.level=0,xaxt="n",xlab="Time (Month)",printlegend=FALSE)
yearseq <- as.numeric(seq(from=as.Date("1966-01-01"),to=as.Date("1968-01-01"),by="year"))
axis(1,at=yearseq,labels=c(1966,1967,1968))
title(main="Prevalence of Topic 9 over Time")
# Point Estimate option can be useful in some situations
plot.estimateEffect(prep, "year", topics=9, method="pointestimate")
# Topic correlations - STM also has a plotting function to show the network
mod.out.corr<-topicCorr(mod.out)
plot.topicCorr(mod.out.corr, vlabels=TopicNames)
# Word clouds
cloud(mod.out,topic=11)
# Save wordclouds to file
for(i in 1:length(topicNames)) {
writeLines(paste0("Topic ",i,": ",topicNames[i]))
png(paste0('./topic',i,'.png'), width=600, height=600)
cloud(mod.out, topic=i, scale=c(3,.5), random.order=FALSE)
dev.off()
}
# STM also has a helper package to help with visualization of topic correlations in a web format
library(stmCorrViz)
stmCorrViz(mod.out, "stmviz.html", documents_raw=as.character(data$topic_content), documents_matrix=out,title="Analysis of _____", labels_number=7, display=TRUE, verbose=TRUE)
|
# BEMTOOL - Bio-Economic Model TOOLs - version 2.5
# Authors: G. Lembo, I. Bitetto, M.T. Facchini, M.T. Spedicato 2018
# COISPA Tecnologia & Ricerca, Via dei Trulli 18/20 - (Bari), Italy
# In case of use of the model, the Authors should be cited.
# If you have any comments or suggestions please contact the following e-mail address: facchini@coispa.it
# BEMTOOL is believed to be reliable. However, we disclaim any implied warranty or representation about its accuracy,
# completeness or appropriateness for any particular purpose.
#
#
#
#
#
#
#
#
#
# ------------------------------------------------------------------------------
# Add the columns to to be rendered in the tree
# ------------------------------------------------------------------------------
#
bmt_price_elast_MW.add_columns <- function(treeview) {
# print("Adding column to the model...")
bmt_price_elast_MW.model <- treeview$getModel()
# number column
renderer <- gtkCellRendererTextNew()
# gSignalConnect(renderer, "edited", cell.edited, model)
year_frame <- data.frame(c(0))
colnames(year_frame) <- c(" Species ")
renderer$setData("column", year_frame)
treeview$insertColumnWithAttributes(-1, " Species " , renderer, text = 0, editable = FALSE)
for (e in 1:length(BMT_FLEETSEGMENTS)) {
# number column
renderer <- gtkCellRendererTextNew()
gSignalConnect(renderer, "edited", bmt_price_elast_MW.cell_edited, bmt_price_elast_MW.model)
month_frame <- data.frame(c(e))
colnames(month_frame) <- paste(" ", BMT_FLEETSEGMENTS[e], " ", sep="")
renderer$setData("column", month_frame)
treeview$insertColumnWithAttributes(-1, paste(" ", BMT_FLEETSEGMENTS[e], " ", sep="") , renderer, text = e, editable = (length(BMT_FLEETSEGMENTS)+1))
}
}
|
/BEMTOOL-ver2.5-2018_0901/bmtgui/economic_params/price/price_elast_MW/price_elast_MW.add_columns.r
|
no_license
|
gresci/BEMTOOL2.5
|
R
| false
| false
| 1,768
|
r
|
# BEMTOOL - Bio-Economic Model TOOLs - version 2.5
# Authors: G. Lembo, I. Bitetto, M.T. Facchini, M.T. Spedicato 2018
# COISPA Tecnologia & Ricerca, Via dei Trulli 18/20 - (Bari), Italy
# In case of use of the model, the Authors should be cited.
# If you have any comments or suggestions please contact the following e-mail address: facchini@coispa.it
# BEMTOOL is believed to be reliable. However, we disclaim any implied warranty or representation about its accuracy,
# completeness or appropriateness for any particular purpose.
#
#
#
#
#
#
#
#
#
# ------------------------------------------------------------------------------
# Add the columns to to be rendered in the tree
# ------------------------------------------------------------------------------
#
bmt_price_elast_MW.add_columns <- function(treeview) {
# print("Adding column to the model...")
bmt_price_elast_MW.model <- treeview$getModel()
# number column
renderer <- gtkCellRendererTextNew()
# gSignalConnect(renderer, "edited", cell.edited, model)
year_frame <- data.frame(c(0))
colnames(year_frame) <- c(" Species ")
renderer$setData("column", year_frame)
treeview$insertColumnWithAttributes(-1, " Species " , renderer, text = 0, editable = FALSE)
for (e in 1:length(BMT_FLEETSEGMENTS)) {
# number column
renderer <- gtkCellRendererTextNew()
gSignalConnect(renderer, "edited", bmt_price_elast_MW.cell_edited, bmt_price_elast_MW.model)
month_frame <- data.frame(c(e))
colnames(month_frame) <- paste(" ", BMT_FLEETSEGMENTS[e], " ", sep="")
renderer$setData("column", month_frame)
treeview$insertColumnWithAttributes(-1, paste(" ", BMT_FLEETSEGMENTS[e], " ", sep="") , renderer, text = e, editable = (length(BMT_FLEETSEGMENTS)+1))
}
}
|
plot.marssMLE <-
function(x,
plot.type = c("fitted.ytT", "xtT", "model.resids", "state.resids", "qqplot.model.resids", "qqplot.state.resids"),
form = c("marxss", "marss", "dfa"),
conf.int = TRUE, conf.level = 0.95, decorate = TRUE,
plot.par = list(), ...) {
# Argument checks
plot.type <- match.arg(plot.type, several.ok = TRUE)
old.plot.type = c("observations", "states", "model.residuals", "state.residuals", "model.residuals.qqplot", "state.residuals.qqplot")
new.plot.type = c("fitted.ytT", "xtT", "model.resids", "state.resids", "qqplot.model.resids", "qqplot.state.resids")
for(i in 1:NROW(old.plot.type)) if(old.plot.type[i] %in% plot.type) plot.type[plot.type==old.plot.type[i]] <- new.plot.type[i]
if (!is.numeric(conf.level) || length(conf.level) != 1 || conf.level > 1 || conf.level < 0) stop("plot.marssMLE: conf.level must be between 0 and 1.", call. = FALSE)
if (!(conf.int %in% c(TRUE, FALSE))) stop("plot.marssMLE: conf.int must be TRUE/FALSE", call. = FALSE)
if (missing(form)) {
model_form <- attr(x[["model"]], "form")[1]
} else {
model_form <- match.arg(form)
}
plotpar <- list(point.pch = 19, point.col = "blue", point.fill = "blue", point.size = 1,
line.col = "black", line.size = 1, line.linetype = "solid",
ci.fill = "grey70", ci.col = "grey70", ci.border = FALSE,
ci.linesize = 0, ci.alpha = 0.6)
if (!is.list(plot.par)) stop("plot.marssMLE: plot.par must be a list.", call. = FALSE)
if (!missing(plot.par)){
if (!all(names(plot.par) %in% names(plotpar))){
stop(paste0("plot.marssMLE: Allowed plot.par names are ", paste(names(plotpar), collapse=", "), ".\n"), call. = FALSE) } else {
for( i in names(plot.par))
plotpar[[i]] <- plot.par[[i]]
}
}
extras <- list()
if (!missing(...)) {
extras <- list(...)
allowednames <- c("rotate", "method", "hessian.fun", "nboot")
bad.names <- names(extras)[!(names(extras) %in% allowednames)]
if (!all(names(extras) %in% allowednames)) stop(paste("plot.marssMLE:", paste(bad.names, collapse = " "), "is/are unknown argument(s). See ?tidy.marssMLE for allowed arguments.\n"), call. = FALSE)
if (model_form != "dfa" & "rotate" %in% names(extras)) {
cat("plot.marssMLE: 'rotate' argument is ignored if form!='dfa'\n Pass in form='dfa' if your model is a DFA model, but the form \n attribute is not set (because you set up your DFA model manually).\n\n")
rotate <- FALSE
}
}
# End Argument checks
alpha <- 1 - conf.level
if ("xtT" %in% plot.type) {
# make plot of states and CIs
if ("rotate" %in% names(extras)) {
rotate <- extras[["rotate"]]
if (!(rotate %in% c(TRUE, FALSE))) stop("plot.marssMLE: rotate must be TRUE/FALSE. \n")
} else {
rotate <- FALSE
}
states <- tidy.marssMLE(x, type = "xtT", conf.int = conf.int, conf.level = conf.level, ...)
if (model_form == "dfa") {
if (rotate) {
rottext <- "rotated"
} else {
rottext <- ""
}
states$.rownames <- paste("DFA", rottext, "trend", states$.rownames)
} else {
states$.rownames <- paste0("State ", states$.rownames)
}
nX <- min(9, attr(x$model, "model.dims")$x[1])
plot.nrow <- round(sqrt(nX))
plot.ncol <- ceiling(nX / plot.nrow)
par(mfrow = c(plot.nrow, plot.ncol), mar = c(2, 4, 2, 1) + 0.1)
for (plt in unique(states$.rownames)) {
with(subset(states, states$.rownames == plt), {
ylims <- c(min(estimate, conf.low, na.rm = TRUE), max(estimate, conf.high, na.rm = TRUE))
plot(t, estimate, type = "l", xlab = "", ylab = "Estimate", ylim = ylims)
title(plt)
if (conf.int) polygon(c(t, rev(t)), c(conf.low, rev(conf.high)), col = plotpar$ci.col, border = plotpar$ci.border)
lines(t, estimate)
box()
})
}
plot.type <- plot.type[plot.type != "xtT"]
cat(paste("plot type = \"xtT\" Estimated States\n"))
if (length(plot.type) != 0) {
ans <- readline(prompt = "Hit <Return> to see next plot (q to exit): ")
if (tolower(ans) == "q") {
return()
}
}
}
if ("fitted.ytT" %in% plot.type) {
# make plot of observations
df <- fitted.marssMLE(x, type = "ytT", interval="confidence", conf.level=conf.level)
df$ymin <- df$.conf.low
df$ymax <- df$.conf.up
df2 <- fitted.marssMLE(x, type = "ytT", interval="prediction", conf.level=conf.level)
df$ymin.pi <- df2$.lwr
df$ymax.pi <- df2$.upr
nY <- min(9, attr(x$model, "model.dims")$y[1])
plot.ncol <- round(sqrt(nY))
plot.nrow <- ceiling(nY / plot.ncol)
par(mfrow = c(plot.nrow, plot.ncol), mar = c(2, 4, 2, 1) + 0.1)
for (plt in levels(df$.rownames)) {
with(subset(df, df$.rownames == plt), {
ylims <- c(min(.fitted, y, ymin, ymax, na.rm = TRUE), max(.fitted, y, ymin, ymax, na.rm = TRUE))
plot(t, .fitted, type = "l", xlab = "", ylab = "Estimate", ylim = ylims)
title(plt)
if (conf.int) polygon(c(t, rev(t)), c(ymin, rev(ymax)), col = plotpar$ci.col, border = plotpar$ci.border)
points(t, y, col = plotpar$point.col, pch = plotpar$point.pch)
lines(t, .fitted, col = plotpar$line.col, lwd = plotpar$line.lwd)
if (decorate){
lines(t, ymin.pi, col = "black", lwd = 1, lty=2)
lines(t, ymax.pi, col = "black", lwd = 1, lty=2)
}
box()
})
}
plot.type <- plot.type[plot.type != "fitted.ytT"]
cat(paste("plot type = \"fitted.ytT\" Observations with Fitted Values\n"))
if (length(plot.type) != 0) {
ans <- readline(prompt = "Hit <Return> to see next plot (q to exit): ")
if (tolower(ans) == "q") {
return()
}
}
}
if ("model.resids" %in% plot.type) {
# make plot of observation residuals
df <- augment.marssMLE(x, type = "ytT", form = "marxss")
df$.resids[is.na(df$y)] <- NA
nY <- min(9, attr(x$model, "model.dims")$y[1])
plot.ncol <- round(sqrt(nY))
plot.nrow <- ceiling(nY / plot.ncol)
par(mfrow = c(plot.nrow, plot.ncol), mar = c(2, 4, 2, 1) + 0.1)
for (plt in levels(df$.rownames)) {
with(subset(df, df$.rownames == plt), {
ylims <- c(min(.resids, na.rm = TRUE), max(.resids, na.rm = TRUE))
if (decorate) {
lo <- predict(loess(.resids ~ t), newdata = data.frame(t = t), se = TRUE)
lo.t <- names(lo$fit)
sigma <- .sigma
sigma[is.na(y)] <- 0
ymin <- qnorm(alpha / 2) * sigma
ymax <- - qnorm(alpha / 2) * sigma
ylims <- c(min(.resids, ymin, na.rm = TRUE), max(.resids, ymax, na.rm = TRUE))
}
plot(t, .resids,
type = "p", xlab = "",
ylab = "", ylim = ylims,
col = plotpar$point.col, pch = plotpar$point.pch
)
title(plt)
if (decorate) {
polygon(c(t, rev(t)),
c(ymin, rev(ymax)),
col = plotpar$ci.col, border = plotpar$ci.border
)
lines(t, lo$fit, col = plotpar$line.col, lwd = plotpar$line.lwd)
}
points(t, .resids, col = plotpar$point.col, pch = plotpar$point.pch)
box()
abline(h = 0, lty = 3)
})
mtext("Observation residuals, y - E[y]", side = 2, outer = TRUE, line = -1)
}
plot.type <- plot.type[plot.type != "model.resids"]
cat(paste("plot type = \"model.resids\" Model Residuals\n"))
if (length(plot.type) != 0) {
ans <- readline(prompt = "Hit <Return> to see next plot (q to exit): ")
if (tolower(ans) == "q") {
return()
}
}
}
if ("state.resids" %in% plot.type) {
# make plot of process residuals; set form='marxss' to get process resids
df <- augment.marssMLE(x, type = "xtT", form = "marxss")
df$.rownames <- paste0("State ", df$.rownames)
nX <- min(9, attr(x$model, "model.dims")$x[1])
plot.nrow <- round(sqrt(nX))
plot.ncol <- ceiling(nX / plot.nrow)
par(mfrow = c(plot.nrow, plot.ncol), mar = c(2, 4, 2, 1) + 0.1)
for (plt in unique(df$.rownames)) {
with(subset(df, df$.rownames == plt), {
ylims <- c(min(.resids, na.rm = TRUE), max(.resids, na.rm = TRUE))
if (decorate) {
lo <- predict(loess(.resids ~ t), newdata = data.frame(t = t), se = TRUE)
lo.t <- names(lo$fit)
ymin <- qnorm(alpha / 2) * .sigma
ymax <- - qnorm(alpha / 2) * .sigma
ylims <- c(min(.resids, ymin, na.rm = TRUE), max(.resids, ymax, na.rm = TRUE))
}
plot(t, .resids,
type = "p", xlab = "",
ylab = "", ylim = ylims,
col = plotpar$point.col, pch = plotpar$point.pch
)
title(plt)
if (decorate) {
polygon(c(t, rev(t)),
c(ymin, rev(ymax)),
col = plotpar$ci.col, border = plotpar$ci.border
)
lines(t, lo$fit, col = plotpar$line.col, lwd = plotpar$line.lwd)
}
points(t, .resids, col = plotpar$point.col, pch = plotpar$point.pch)
box()
abline(h = 0, lty = 3)
})
mtext("State residuals, xtT - E[x]", side = 2, outer = TRUE, line = -1)
}
plot.type <- plot.type[plot.type != "state.resids"]
cat(paste("plot type = \"state.resids\" State Residuals\n"))
if (length(plot.type) != 0) {
ans <- readline(prompt = "Hit <Return> to see next plot (q to exit): ")
if (tolower(ans) == "q") {
return()
}
}
}
slp <- function(yy) {
y <- quantile(yy[!is.na(yy)], c(0.25, 0.75))
x <- qnorm(c(0.25, 0.75))
slope <- diff(y) / diff(x)
return(slope)
}
int <- function(yy) {
y <- quantile(yy[!is.na(yy)], c(0.25, 0.75))
x <- qnorm(c(0.25, 0.75))
slope <- diff(y) / diff(x)
int <- y[1L] - slope * x[1L]
return(int)
}
if ("qqplot.model.resids" %in% plot.type) {
# make plot of observation residuals
df <- augment.marssMLE(x, type = "ytT", form = "marxss")
slope <- tapply(df$.std.resid, df$.rownames, slp)
intercept <- tapply(df$.std.resid, df$.rownames, int)
nY <- min(9, attr(x$model, "model.dims")$y[1])
plot.ncol <- round(sqrt(nY))
plot.nrow <- ceiling(nY / plot.ncol)
par(mfrow = c(plot.nrow, plot.ncol), mar = c(2, 4, 2, 1) + 0.1)
for (plt in levels(df$.rownames)) {
with(subset(df, df$.rownames == plt), {
qqnorm(.std.resid, main = plt)
abline(a = intercept[plt], b = slope[plt], col = plotpar$line.col, lwd = plotpar$line.lwd)
})
}
plot.type <- plot.type[plot.type != "qqplot.model.resids"]
cat(paste("plot type = \"qqplot.model.resids\" QQplot of Model Standardized Residuals\n"))
if (length(plot.type) != 0) {
ans <- readline(prompt = "Hit <Return> to see next plot (q to exit): ")
if (tolower(ans) == "q") {
return()
}
}
}
if ("qqplot.state.resids" %in% plot.type) {
# make qqplot of state residuals
df <- augment.marssMLE(x, type = "xtT", form = "marxss")
df$.rownames <- paste0("State ", df$.rownames)
slope <- tapply(df$.std.resid, df$.rownames, slp)
intercept <- tapply(df$.std.resid, df$.rownames, int)
nX <- min(9, attr(x$model, "model.dims")$x[1])
plot.nrow <- round(sqrt(nX))
plot.ncol <- ceiling(nX / plot.nrow)
par(mfrow = c(plot.nrow, plot.ncol), mar = c(2, 4, 2, 1) + 0.1)
for (plt in unique(df$.rownames)) {
with(subset(df, df$.rownames == plt), {
qqnorm(.std.resid, main = plt)
abline(a = intercept[plt], b = slope[plt], col = plotpar$line.col, lwd = plotpar$line.lwd)
})
}
plot.type <- plot.type[plot.type != "qqplot.state.resids"]
cat(paste("plot type = \"qqplot.state.resids\" QQplot of State Standardized Residuals\n"))
if (length(plot.type) != 0) {
ans <- readline(prompt = "Hit <Return> to see next plot (q to exit): ")
if (tolower(ans) == "q") {
return()
}
}
}
}
|
/R/plot.marssMLE.R
|
permissive
|
abaudelle/MARSS
|
R
| false
| false
| 12,621
|
r
|
plot.marssMLE <-
function(x,
plot.type = c("fitted.ytT", "xtT", "model.resids", "state.resids", "qqplot.model.resids", "qqplot.state.resids"),
form = c("marxss", "marss", "dfa"),
conf.int = TRUE, conf.level = 0.95, decorate = TRUE,
plot.par = list(), ...) {
# Argument checks
plot.type <- match.arg(plot.type, several.ok = TRUE)
old.plot.type = c("observations", "states", "model.residuals", "state.residuals", "model.residuals.qqplot", "state.residuals.qqplot")
new.plot.type = c("fitted.ytT", "xtT", "model.resids", "state.resids", "qqplot.model.resids", "qqplot.state.resids")
for(i in 1:NROW(old.plot.type)) if(old.plot.type[i] %in% plot.type) plot.type[plot.type==old.plot.type[i]] <- new.plot.type[i]
if (!is.numeric(conf.level) || length(conf.level) != 1 || conf.level > 1 || conf.level < 0) stop("plot.marssMLE: conf.level must be between 0 and 1.", call. = FALSE)
if (!(conf.int %in% c(TRUE, FALSE))) stop("plot.marssMLE: conf.int must be TRUE/FALSE", call. = FALSE)
if (missing(form)) {
model_form <- attr(x[["model"]], "form")[1]
} else {
model_form <- match.arg(form)
}
plotpar <- list(point.pch = 19, point.col = "blue", point.fill = "blue", point.size = 1,
line.col = "black", line.size = 1, line.linetype = "solid",
ci.fill = "grey70", ci.col = "grey70", ci.border = FALSE,
ci.linesize = 0, ci.alpha = 0.6)
if (!is.list(plot.par)) stop("plot.marssMLE: plot.par must be a list.", call. = FALSE)
if (!missing(plot.par)){
if (!all(names(plot.par) %in% names(plotpar))){
stop(paste0("plot.marssMLE: Allowed plot.par names are ", paste(names(plotpar), collapse=", "), ".\n"), call. = FALSE) } else {
for( i in names(plot.par))
plotpar[[i]] <- plot.par[[i]]
}
}
extras <- list()
if (!missing(...)) {
extras <- list(...)
allowednames <- c("rotate", "method", "hessian.fun", "nboot")
bad.names <- names(extras)[!(names(extras) %in% allowednames)]
if (!all(names(extras) %in% allowednames)) stop(paste("plot.marssMLE:", paste(bad.names, collapse = " "), "is/are unknown argument(s). See ?tidy.marssMLE for allowed arguments.\n"), call. = FALSE)
if (model_form != "dfa" & "rotate" %in% names(extras)) {
cat("plot.marssMLE: 'rotate' argument is ignored if form!='dfa'\n Pass in form='dfa' if your model is a DFA model, but the form \n attribute is not set (because you set up your DFA model manually).\n\n")
rotate <- FALSE
}
}
# End Argument checks
alpha <- 1 - conf.level
if ("xtT" %in% plot.type) {
# make plot of states and CIs
if ("rotate" %in% names(extras)) {
rotate <- extras[["rotate"]]
if (!(rotate %in% c(TRUE, FALSE))) stop("plot.marssMLE: rotate must be TRUE/FALSE. \n")
} else {
rotate <- FALSE
}
states <- tidy.marssMLE(x, type = "xtT", conf.int = conf.int, conf.level = conf.level, ...)
if (model_form == "dfa") {
if (rotate) {
rottext <- "rotated"
} else {
rottext <- ""
}
states$.rownames <- paste("DFA", rottext, "trend", states$.rownames)
} else {
states$.rownames <- paste0("State ", states$.rownames)
}
nX <- min(9, attr(x$model, "model.dims")$x[1])
plot.nrow <- round(sqrt(nX))
plot.ncol <- ceiling(nX / plot.nrow)
par(mfrow = c(plot.nrow, plot.ncol), mar = c(2, 4, 2, 1) + 0.1)
for (plt in unique(states$.rownames)) {
with(subset(states, states$.rownames == plt), {
ylims <- c(min(estimate, conf.low, na.rm = TRUE), max(estimate, conf.high, na.rm = TRUE))
plot(t, estimate, type = "l", xlab = "", ylab = "Estimate", ylim = ylims)
title(plt)
if (conf.int) polygon(c(t, rev(t)), c(conf.low, rev(conf.high)), col = plotpar$ci.col, border = plotpar$ci.border)
lines(t, estimate)
box()
})
}
plot.type <- plot.type[plot.type != "xtT"]
cat(paste("plot type = \"xtT\" Estimated States\n"))
if (length(plot.type) != 0) {
ans <- readline(prompt = "Hit <Return> to see next plot (q to exit): ")
if (tolower(ans) == "q") {
return()
}
}
}
if ("fitted.ytT" %in% plot.type) {
# make plot of observations
df <- fitted.marssMLE(x, type = "ytT", interval="confidence", conf.level=conf.level)
df$ymin <- df$.conf.low
df$ymax <- df$.conf.up
df2 <- fitted.marssMLE(x, type = "ytT", interval="prediction", conf.level=conf.level)
df$ymin.pi <- df2$.lwr
df$ymax.pi <- df2$.upr
nY <- min(9, attr(x$model, "model.dims")$y[1])
plot.ncol <- round(sqrt(nY))
plot.nrow <- ceiling(nY / plot.ncol)
par(mfrow = c(plot.nrow, plot.ncol), mar = c(2, 4, 2, 1) + 0.1)
for (plt in levels(df$.rownames)) {
with(subset(df, df$.rownames == plt), {
ylims <- c(min(.fitted, y, ymin, ymax, na.rm = TRUE), max(.fitted, y, ymin, ymax, na.rm = TRUE))
plot(t, .fitted, type = "l", xlab = "", ylab = "Estimate", ylim = ylims)
title(plt)
if (conf.int) polygon(c(t, rev(t)), c(ymin, rev(ymax)), col = plotpar$ci.col, border = plotpar$ci.border)
points(t, y, col = plotpar$point.col, pch = plotpar$point.pch)
lines(t, .fitted, col = plotpar$line.col, lwd = plotpar$line.lwd)
if (decorate){
lines(t, ymin.pi, col = "black", lwd = 1, lty=2)
lines(t, ymax.pi, col = "black", lwd = 1, lty=2)
}
box()
})
}
plot.type <- plot.type[plot.type != "fitted.ytT"]
cat(paste("plot type = \"fitted.ytT\" Observations with Fitted Values\n"))
if (length(plot.type) != 0) {
ans <- readline(prompt = "Hit <Return> to see next plot (q to exit): ")
if (tolower(ans) == "q") {
return()
}
}
}
if ("model.resids" %in% plot.type) {
# make plot of observation residuals
df <- augment.marssMLE(x, type = "ytT", form = "marxss")
df$.resids[is.na(df$y)] <- NA
nY <- min(9, attr(x$model, "model.dims")$y[1])
plot.ncol <- round(sqrt(nY))
plot.nrow <- ceiling(nY / plot.ncol)
par(mfrow = c(plot.nrow, plot.ncol), mar = c(2, 4, 2, 1) + 0.1)
for (plt in levels(df$.rownames)) {
with(subset(df, df$.rownames == plt), {
ylims <- c(min(.resids, na.rm = TRUE), max(.resids, na.rm = TRUE))
if (decorate) {
lo <- predict(loess(.resids ~ t), newdata = data.frame(t = t), se = TRUE)
lo.t <- names(lo$fit)
sigma <- .sigma
sigma[is.na(y)] <- 0
ymin <- qnorm(alpha / 2) * sigma
ymax <- - qnorm(alpha / 2) * sigma
ylims <- c(min(.resids, ymin, na.rm = TRUE), max(.resids, ymax, na.rm = TRUE))
}
plot(t, .resids,
type = "p", xlab = "",
ylab = "", ylim = ylims,
col = plotpar$point.col, pch = plotpar$point.pch
)
title(plt)
if (decorate) {
polygon(c(t, rev(t)),
c(ymin, rev(ymax)),
col = plotpar$ci.col, border = plotpar$ci.border
)
lines(t, lo$fit, col = plotpar$line.col, lwd = plotpar$line.lwd)
}
points(t, .resids, col = plotpar$point.col, pch = plotpar$point.pch)
box()
abline(h = 0, lty = 3)
})
mtext("Observation residuals, y - E[y]", side = 2, outer = TRUE, line = -1)
}
plot.type <- plot.type[plot.type != "model.resids"]
cat(paste("plot type = \"model.resids\" Model Residuals\n"))
if (length(plot.type) != 0) {
ans <- readline(prompt = "Hit <Return> to see next plot (q to exit): ")
if (tolower(ans) == "q") {
return()
}
}
}
if ("state.resids" %in% plot.type) {
# make plot of process residuals; set form='marxss' to get process resids
df <- augment.marssMLE(x, type = "xtT", form = "marxss")
df$.rownames <- paste0("State ", df$.rownames)
nX <- min(9, attr(x$model, "model.dims")$x[1])
plot.nrow <- round(sqrt(nX))
plot.ncol <- ceiling(nX / plot.nrow)
par(mfrow = c(plot.nrow, plot.ncol), mar = c(2, 4, 2, 1) + 0.1)
for (plt in unique(df$.rownames)) {
with(subset(df, df$.rownames == plt), {
ylims <- c(min(.resids, na.rm = TRUE), max(.resids, na.rm = TRUE))
if (decorate) {
lo <- predict(loess(.resids ~ t), newdata = data.frame(t = t), se = TRUE)
lo.t <- names(lo$fit)
ymin <- qnorm(alpha / 2) * .sigma
ymax <- - qnorm(alpha / 2) * .sigma
ylims <- c(min(.resids, ymin, na.rm = TRUE), max(.resids, ymax, na.rm = TRUE))
}
plot(t, .resids,
type = "p", xlab = "",
ylab = "", ylim = ylims,
col = plotpar$point.col, pch = plotpar$point.pch
)
title(plt)
if (decorate) {
polygon(c(t, rev(t)),
c(ymin, rev(ymax)),
col = plotpar$ci.col, border = plotpar$ci.border
)
lines(t, lo$fit, col = plotpar$line.col, lwd = plotpar$line.lwd)
}
points(t, .resids, col = plotpar$point.col, pch = plotpar$point.pch)
box()
abline(h = 0, lty = 3)
})
mtext("State residuals, xtT - E[x]", side = 2, outer = TRUE, line = -1)
}
plot.type <- plot.type[plot.type != "state.resids"]
cat(paste("plot type = \"state.resids\" State Residuals\n"))
if (length(plot.type) != 0) {
ans <- readline(prompt = "Hit <Return> to see next plot (q to exit): ")
if (tolower(ans) == "q") {
return()
}
}
}
slp <- function(yy) {
y <- quantile(yy[!is.na(yy)], c(0.25, 0.75))
x <- qnorm(c(0.25, 0.75))
slope <- diff(y) / diff(x)
return(slope)
}
int <- function(yy) {
y <- quantile(yy[!is.na(yy)], c(0.25, 0.75))
x <- qnorm(c(0.25, 0.75))
slope <- diff(y) / diff(x)
int <- y[1L] - slope * x[1L]
return(int)
}
if ("qqplot.model.resids" %in% plot.type) {
# make plot of observation residuals
df <- augment.marssMLE(x, type = "ytT", form = "marxss")
slope <- tapply(df$.std.resid, df$.rownames, slp)
intercept <- tapply(df$.std.resid, df$.rownames, int)
nY <- min(9, attr(x$model, "model.dims")$y[1])
plot.ncol <- round(sqrt(nY))
plot.nrow <- ceiling(nY / plot.ncol)
par(mfrow = c(plot.nrow, plot.ncol), mar = c(2, 4, 2, 1) + 0.1)
for (plt in levels(df$.rownames)) {
with(subset(df, df$.rownames == plt), {
qqnorm(.std.resid, main = plt)
abline(a = intercept[plt], b = slope[plt], col = plotpar$line.col, lwd = plotpar$line.lwd)
})
}
plot.type <- plot.type[plot.type != "qqplot.model.resids"]
cat(paste("plot type = \"qqplot.model.resids\" QQplot of Model Standardized Residuals\n"))
if (length(plot.type) != 0) {
ans <- readline(prompt = "Hit <Return> to see next plot (q to exit): ")
if (tolower(ans) == "q") {
return()
}
}
}
if ("qqplot.state.resids" %in% plot.type) {
# make qqplot of state residuals
df <- augment.marssMLE(x, type = "xtT", form = "marxss")
df$.rownames <- paste0("State ", df$.rownames)
slope <- tapply(df$.std.resid, df$.rownames, slp)
intercept <- tapply(df$.std.resid, df$.rownames, int)
nX <- min(9, attr(x$model, "model.dims")$x[1])
plot.nrow <- round(sqrt(nX))
plot.ncol <- ceiling(nX / plot.nrow)
par(mfrow = c(plot.nrow, plot.ncol), mar = c(2, 4, 2, 1) + 0.1)
for (plt in unique(df$.rownames)) {
with(subset(df, df$.rownames == plt), {
qqnorm(.std.resid, main = plt)
abline(a = intercept[plt], b = slope[plt], col = plotpar$line.col, lwd = plotpar$line.lwd)
})
}
plot.type <- plot.type[plot.type != "qqplot.state.resids"]
cat(paste("plot type = \"qqplot.state.resids\" QQplot of State Standardized Residuals\n"))
if (length(plot.type) != 0) {
ans <- readline(prompt = "Hit <Return> to see next plot (q to exit): ")
if (tolower(ans) == "q") {
return()
}
}
}
}
|
#' Title Cross-validation for evaluating the performance of subgrouping using win-ratio based value function
#'
#' @param params the list of parameters for xgboost
#' @param data the data of predictors
#' @param label treatment label of subjects
#' @param comp.ind The vector of comparision results
#' @param nrounds the maximum number of boosting rounds.
#' @param nfold number of folds in cross validation
#' @param maximize a logical value, if \code{TRUE}, maximize the loss function, here we set it to \code{FALSE}
#' @param verbose a logical value, whether print out the messages of fitting the model
#' @param early_stopping_rounds the number of rounds before stopping if no improvement, defualt is 5
#'
#' @return a vector of errors
#' @import xgboost
#' @import caret
#' @export
#'
#' @examples NULL
my.xgb.cv<-function( params,
data,
label,
comp.ind,
nrounds,
nfold,
maximize,
verbose, # silent,
early_stopping_rounds # stop if no improvement for 10 consecutive trees
){
n=dim(data)[1]
set.seed(123)
folds<-caret::createFolds(1:n, k=nfold, list=T, returnTrain=TRUE)
cv.error<-list()
for(f in 1:nfold){
#cat(paste0("Cross-validation fold ",f, "\n"))
data.train <- data[folds[[f]],]
data.test <- data[-folds[[f]],]
label.train <- label[folds[[f]]]
label.test <- label[-folds[[f]]]
id.train <<- expand.grid(i=1:length(intersect(which(label==0),folds[[f]])), j=1:length(intersect(which(label==1),folds[[f]])))
id.test <<- expand.grid(i=1:length(setdiff(which(label==0),folds[[f]])), j=1:length(setdiff(which(label==1),folds[[f]])))
comp.ind.train <<- as.numeric(matrix(comp.ind, nrow = sum(label==0), ncol = sum(label==1))[which(label==0) %in% folds[[f]],which(label==1) %in% folds[[f]]])
comp.ind.test <<- as.numeric(matrix(comp.ind, nrow = sum(label==0), ncol = sum(label==1))[!(which(label==0) %in% folds[[f]]),!(which(label==1) %in% folds[[f]])])
dtrain <- xgb.DMatrix(as.matrix(data.train), label = label.train)
dtest <- xgb.DMatrix(as.matrix(data.test), label = label.test)
#param <- list(max_depth = 2, eta = 0.001, silent = 1,
# objective = Myloss, eval_metric = evalerror,verbose = 1,lambda=1,base_score=0,colsample_bytree=1,min_child_weight=0)
watchlist <- list(test = dtest)
model <- xgb.train(params, dtrain, nrounds = nrounds, watchlist,
early_stopping_rounds = early_stopping_rounds,
verbose = verbose,
maximize = maximize)
# record cv error
cv.error[[f]] <- model$evaluation_log$test_OTR_error
}
#cv.error.matrix<-do.call("rbind",cv.error)
#test_OTR_error_mean<-c()
cv.rounds<-min(sapply(cv.error,function(x) length(x)))
cv.error.matrix<-matrix(nrow=nfold,ncol=cv.rounds)
for(f in 1:nfold){
for(m in 1:cv.rounds){
cv.error.matrix[f,m]<-cv.error[[f]][m]
}
}
test_OTR_error_mean<-colMeans(cv.error.matrix, na.rm = T)
return(test_OTR_error_mean)
}
Myloss.train <- function(preds, dtrain) {
trt01p <- getinfo(dtrain, "label")
id=id.train
comp.ind=comp.ind.train
arm.val <- c(1,0)
## (1) Get Time to event Data Ready ##
dat <- data.frame(trt01p)
n<-dim(dat)[1]
dat$id<-c(1:n)
dat$f <- preds
dat$pred <- 1/(1+exp(-preds))
dat$predg <- exp(preds)/(1+exp(preds))^2
gH1.r1.dat <- sum((dat$trt01p==arm.val[1])*dat$pred) #sum of prob for treatment arm in subgroup 1
gH0.r1.dat <- sum((dat$trt01p==arm.val[2])*dat$pred) # sum of prob for control arm in subgroup 1
gH1.r2.dat <- sum((dat$trt01p==arm.val[1])*(1-dat$pred)) # sum of prob for treatment arm in subgroup 2
gH0.r2.dat <- sum((dat$trt01p==arm.val[2])*(1-dat$pred)) # sum of prob for control arm in subgroup 2
## subgroup 1 --- r1 ##
if(gH1.r1.dat > 0 & gH0.r1.dat > 0){ # both arms have subjects in subgroup 1
# Num win in subgroup 1
Nw.r1 <- sum(dat$pred[id[,"i"]] * dat$pred[id[,"j"]] * (dat$trt01p[id[,"i"]]==arm.val[2]) * (dat$trt01p[id[,"j"]]==arm.val[1]) * (comp.ind==1))
# Num lose in subgroup 1
Nl.r1 <- sum(dat$pred[id[,"i"]] * dat$pred[id[,"j"]] * (dat$trt01p[id[,"i"]]==arm.val[2]) * (dat$trt01p[id[,"j"]]==arm.val[1]) * (comp.ind==-1))
if(Nw.r1==0) Nw.r1 <- 0.5 # give it a small value if treatment arm always loss
if(Nl.r1==0) Nl.r1 <- 0.5 # give it a small value if treatment arm always win
# win ratio of subgroup 1
Rw.r1 <- Nw.r1/ Nl.r1
#Rw.r1.g <-(gNw.r1*Nl.r1-gNl.r1*Nw.r1)/Nl.r1^2
# gradient of log num win in subgroup 1
log.Nw.r1.g <- sapply(1:n, function(i) sum(dat$pred[which(dat$trt01p==1)] * (dat$trt01p[i]==arm.val[2]) * (dat$trt01p[which(dat$trt01p==1)]==arm.val[1]) * (comp.ind[id[,"i"]==i]==1)) +
sum(dat$pred[which(dat$trt01p==0)] * (dat$trt01p[which(dat$trt01p==0)]==arm.val[2]) * (dat$trt01p[i]==arm.val[1]) * (comp.ind[id[,"j"]==i]==1)) ) /Nw.r1
log.Nl.r1.g <- sapply(1:n, function(i) sum(dat$pred[which(dat$trt01p==1)] * (dat$trt01p[i]==arm.val[2]) * (dat$trt01p[which(dat$trt01p==1)]==arm.val[1]) * (comp.ind[id[,"i"]==i]==-1)) +
sum(dat$pred[which(dat$trt01p==0)] * (dat$trt01p[which(dat$trt01p==0)]==arm.val[2]) * (dat$trt01p[i]==arm.val[1]) * (comp.ind[id[,"j"]==i]==-1)) ) /Nl.r1
log.Rw.r1.g <- log.Nw.r1.g - log.Nl.r1.g
} else{
Rw.r1 = 1 # do not contribute to the value function
log.Rw.r1.g = 0 # do not contribute to the gradient
}
## subgroup 2 --- r2 ##
if(gH1.r2.dat > 0 & gH0.r2.dat > 0){ # both arms have subjects in subgroup 2
# Num win in subgroup 2
Nw.r2 <- sum((1-dat$pred)[id[,"i"]] * (1-dat$pred)[id[,"j"]] * (dat$trt01p[id[,"i"]]==arm.val[2]) * (dat$trt01p[id[,"j"]]==arm.val[1]) * (comp.ind== 1))
# Num loss in subgroup 2
Nl.r2 <- sum((1-dat$pred)[id[,"i"]] * (1-dat$pred)[id[,"j"]] * (dat$trt01p[id[,"i"]]==arm.val[2]) * (dat$trt01p[id[,"j"]]==arm.val[1]) * (comp.ind== -1))
if(Nw.r2==0) Nw.r2 <- 0.5 # give it a small value if equals 0
if(Nl.r2==0) Nl.r2 <- 0.5 # give it a small value if equals 0
# win ratio of subgroup 2
Rw.r2 <- Nw.r2/ Nl.r2
# gradient of log num win in subgroup2
#Rw.r2.g <-(gNw.r2*Nl.r2-gNl.r2*Nw.r2)/Nl.r2^2
log.Nw.r2.g <- - sapply(1:n, function(i) sum((1-dat$pred[which(dat$trt01p==1)]) * (dat$trt01p[i]==arm.val[2]) * (dat$trt01p[which(dat$trt01p==1)]==arm.val[1]) * (comp.ind[id[,"i"]==i]==1)) +
sum((1-dat$pred[which(dat$trt01p==0)]) * (dat$trt01p[which(dat$trt01p==0)]==arm.val[2]) * (dat$trt01p[i]==arm.val[1]) * (comp.ind[id[,"j"]==i]==1)) ) /Nw.r2
log.Nl.r2.g <- - sapply(1:n, function(i) sum((1-dat$pred[which(dat$trt01p==1)]) * (dat$trt01p[i]==arm.val[2]) * (dat$trt01p[which(dat$trt01p==1)]==arm.val[1]) * (comp.ind[id[,"i"]==i]==-1)) +
sum((1-dat$pred[which(dat$trt01p==0)]) * (dat$trt01p[which(dat$trt01p==0)]==arm.val[2]) * (dat$trt01p[i]==arm.val[1]) * (comp.ind[id[,"j"]==i]==-1)) ) /Nl.r2
log.Rw.r2.g <- log.Nw.r2.g - log.Nl.r2.g
} else {
Rw.r2 = 1 # do not contribute to the value function
log.Rw.r2.g = 0 # do not contribute to the gradient
}
g.p <- (sum(dat$pred)*log.Rw.r1.g + log(Rw.r1) - sum(1-dat$pred)*log.Rw.r2.g + log(Rw.r2))
#h.p <- (2*rmst.diff.r1.g + sum(dat$pred)*rmst.diff.r1.h + 2*rmst.diff.r2.g - sum(1-dat$pred)*rmst.diff.r2.h)
g <- dat$predg*(-1)*g.p
#h <- (-1)*( (dat$predg)^2 * h.p + g.p*dat$predh)
g <- g[order(dat$id)]
#h <- h[order(dat$id)]
h<-rep(0.00001,n)
return(list(grad = g, hess = h))
}
evalerror.test <- function(preds, dtrain) {
trt01p <- getinfo(dtrain, "label")
id=id.test
comp.ind=comp.ind.test
arm.val <- c(1,0)
## (1) Get Time to event Data Ready ##
dat <- data.frame(trt01p)
dat$pred <- 1/(1+exp(-preds))
#dat<-dat[order(dat$aval),]
n<-dim(dat)[1]
## (2) value function ##
gH1.r1.dat <- sum((dat$trt01p==arm.val[1])*dat$pred) #sum of prob for treatment arm in subgroup 1
gH0.r1.dat <- sum((dat$trt01p==arm.val[2])*dat$pred) # sum of prob for control arm in subgroup 1
gH1.r2.dat <- sum((dat$trt01p==arm.val[1])*(1-dat$pred)) # sum of prob for treatment arm in subgroup 2
gH0.r2.dat <- sum((dat$trt01p==arm.val[2])*(1-dat$pred)) # sum of prob for control arm in subgroup 2
## subgroup 1 --- r1 ##
if(gH1.r1.dat > 0 & gH0.r1.dat > 0){ # both arms have subjects in subgroup 1
# Num win in subgroup 1
Nw.r1 <- sum(dat$pred[id[,"i"]] * dat$pred[id[,"j"]] * (dat$trt01p[id[,"i"]]==arm.val[2]) * (dat$trt01p[id[,"j"]]==arm.val[1]) * (comp.ind==1))
# Num lose in subgroup 1
Nl.r1 <- sum(dat$pred[id[,"i"]] * dat$pred[id[,"j"]] * (dat$trt01p[id[,"i"]]==arm.val[2]) * (dat$trt01p[id[,"j"]]==arm.val[1]) * (comp.ind==-1))
if(Nw.r1==0) Nw.r1 <- 0.5 # give it a small value if equals 0
if(Nl.r1==0) Nl.r1 <- 0.5 # give it a small value if equals 0
# win ratio of subgroup 1
Rw.r1 <- Nw.r1/ Nl.r1
} else{
Rw.r1 = 1 # do not contribute to the value function
}
## subgroup 2 --- r2 ##
if(gH1.r2.dat > 0 & gH0.r2.dat > 0){ # both arms have subjects in subgroup 2
# Num win in subgroup 2
Nw.r2 <- sum((1-dat$pred)[id[,"i"]] * (1-dat$pred)[id[,"j"]] * (dat$trt01p[id[,"i"]]==arm.val[2]) * (dat$trt01p[id[,"j"]]==arm.val[1]) * (comp.ind==1))
# Num loss in subgroup 2
Nl.r2 <- sum((1-dat$pred)[id[,"i"]] * (1-dat$pred)[id[,"j"]] * (dat$trt01p[id[,"i"]]==arm.val[2]) * (dat$trt01p[id[,"j"]]==arm.val[1]) * (comp.ind==-1))
if(Nw.r2==0) Nw.r2 <- 0.5 # give it a small value if equals 0
if(Nl.r2==0) Nl.r2 <- 0.5 # give it a small value if equals 0
# win ratio of subgroup 2
Rw.r2 <- Nw.r2/ Nl.r2
} else {
Rw.r2 = 1 # do not contribute to the value function
}
# err is negative of value function
err <- (-1)*( sum(dat$pred)*log(Rw.r1) - sum(1-dat$pred)*log(Rw.r2) )
return(list(metric = "OTR_error", value = err))
}
|
/R/my.xgb.cv.R
|
permissive
|
rosyluo/SubgroupBoost
|
R
| false
| false
| 10,294
|
r
|
#' Title Cross-validation for evaluating the performance of subgrouping using win-ratio based value function
#'
#' @param params the list of parameters for xgboost
#' @param data the data of predictors
#' @param label treatment label of subjects
#' @param comp.ind The vector of comparision results
#' @param nrounds the maximum number of boosting rounds.
#' @param nfold number of folds in cross validation
#' @param maximize a logical value, if \code{TRUE}, maximize the loss function, here we set it to \code{FALSE}
#' @param verbose a logical value, whether print out the messages of fitting the model
#' @param early_stopping_rounds the number of rounds before stopping if no improvement, defualt is 5
#'
#' @return a vector of errors
#' @import xgboost
#' @import caret
#' @export
#'
#' @examples NULL
my.xgb.cv<-function( params,
data,
label,
comp.ind,
nrounds,
nfold,
maximize,
verbose, # silent,
early_stopping_rounds # stop if no improvement for 10 consecutive trees
){
n=dim(data)[1]
set.seed(123)
folds<-caret::createFolds(1:n, k=nfold, list=T, returnTrain=TRUE)
cv.error<-list()
for(f in 1:nfold){
#cat(paste0("Cross-validation fold ",f, "\n"))
data.train <- data[folds[[f]],]
data.test <- data[-folds[[f]],]
label.train <- label[folds[[f]]]
label.test <- label[-folds[[f]]]
id.train <<- expand.grid(i=1:length(intersect(which(label==0),folds[[f]])), j=1:length(intersect(which(label==1),folds[[f]])))
id.test <<- expand.grid(i=1:length(setdiff(which(label==0),folds[[f]])), j=1:length(setdiff(which(label==1),folds[[f]])))
comp.ind.train <<- as.numeric(matrix(comp.ind, nrow = sum(label==0), ncol = sum(label==1))[which(label==0) %in% folds[[f]],which(label==1) %in% folds[[f]]])
comp.ind.test <<- as.numeric(matrix(comp.ind, nrow = sum(label==0), ncol = sum(label==1))[!(which(label==0) %in% folds[[f]]),!(which(label==1) %in% folds[[f]])])
dtrain <- xgb.DMatrix(as.matrix(data.train), label = label.train)
dtest <- xgb.DMatrix(as.matrix(data.test), label = label.test)
#param <- list(max_depth = 2, eta = 0.001, silent = 1,
# objective = Myloss, eval_metric = evalerror,verbose = 1,lambda=1,base_score=0,colsample_bytree=1,min_child_weight=0)
watchlist <- list(test = dtest)
model <- xgb.train(params, dtrain, nrounds = nrounds, watchlist,
early_stopping_rounds = early_stopping_rounds,
verbose = verbose,
maximize = maximize)
# record cv error
cv.error[[f]] <- model$evaluation_log$test_OTR_error
}
#cv.error.matrix<-do.call("rbind",cv.error)
#test_OTR_error_mean<-c()
cv.rounds<-min(sapply(cv.error,function(x) length(x)))
cv.error.matrix<-matrix(nrow=nfold,ncol=cv.rounds)
for(f in 1:nfold){
for(m in 1:cv.rounds){
cv.error.matrix[f,m]<-cv.error[[f]][m]
}
}
test_OTR_error_mean<-colMeans(cv.error.matrix, na.rm = T)
return(test_OTR_error_mean)
}
Myloss.train <- function(preds, dtrain) {
trt01p <- getinfo(dtrain, "label")
id=id.train
comp.ind=comp.ind.train
arm.val <- c(1,0)
## (1) Get Time to event Data Ready ##
dat <- data.frame(trt01p)
n<-dim(dat)[1]
dat$id<-c(1:n)
dat$f <- preds
dat$pred <- 1/(1+exp(-preds))
dat$predg <- exp(preds)/(1+exp(preds))^2
gH1.r1.dat <- sum((dat$trt01p==arm.val[1])*dat$pred) #sum of prob for treatment arm in subgroup 1
gH0.r1.dat <- sum((dat$trt01p==arm.val[2])*dat$pred) # sum of prob for control arm in subgroup 1
gH1.r2.dat <- sum((dat$trt01p==arm.val[1])*(1-dat$pred)) # sum of prob for treatment arm in subgroup 2
gH0.r2.dat <- sum((dat$trt01p==arm.val[2])*(1-dat$pred)) # sum of prob for control arm in subgroup 2
## subgroup 1 --- r1 ##
if(gH1.r1.dat > 0 & gH0.r1.dat > 0){ # both arms have subjects in subgroup 1
# Num win in subgroup 1
Nw.r1 <- sum(dat$pred[id[,"i"]] * dat$pred[id[,"j"]] * (dat$trt01p[id[,"i"]]==arm.val[2]) * (dat$trt01p[id[,"j"]]==arm.val[1]) * (comp.ind==1))
# Num lose in subgroup 1
Nl.r1 <- sum(dat$pred[id[,"i"]] * dat$pred[id[,"j"]] * (dat$trt01p[id[,"i"]]==arm.val[2]) * (dat$trt01p[id[,"j"]]==arm.val[1]) * (comp.ind==-1))
if(Nw.r1==0) Nw.r1 <- 0.5 # give it a small value if treatment arm always loss
if(Nl.r1==0) Nl.r1 <- 0.5 # give it a small value if treatment arm always win
# win ratio of subgroup 1
Rw.r1 <- Nw.r1/ Nl.r1
#Rw.r1.g <-(gNw.r1*Nl.r1-gNl.r1*Nw.r1)/Nl.r1^2
# gradient of log num win in subgroup 1
log.Nw.r1.g <- sapply(1:n, function(i) sum(dat$pred[which(dat$trt01p==1)] * (dat$trt01p[i]==arm.val[2]) * (dat$trt01p[which(dat$trt01p==1)]==arm.val[1]) * (comp.ind[id[,"i"]==i]==1)) +
sum(dat$pred[which(dat$trt01p==0)] * (dat$trt01p[which(dat$trt01p==0)]==arm.val[2]) * (dat$trt01p[i]==arm.val[1]) * (comp.ind[id[,"j"]==i]==1)) ) /Nw.r1
log.Nl.r1.g <- sapply(1:n, function(i) sum(dat$pred[which(dat$trt01p==1)] * (dat$trt01p[i]==arm.val[2]) * (dat$trt01p[which(dat$trt01p==1)]==arm.val[1]) * (comp.ind[id[,"i"]==i]==-1)) +
sum(dat$pred[which(dat$trt01p==0)] * (dat$trt01p[which(dat$trt01p==0)]==arm.val[2]) * (dat$trt01p[i]==arm.val[1]) * (comp.ind[id[,"j"]==i]==-1)) ) /Nl.r1
log.Rw.r1.g <- log.Nw.r1.g - log.Nl.r1.g
} else{
Rw.r1 = 1 # do not contribute to the value function
log.Rw.r1.g = 0 # do not contribute to the gradient
}
## subgroup 2 --- r2 ##
if(gH1.r2.dat > 0 & gH0.r2.dat > 0){ # both arms have subjects in subgroup 2
# Num win in subgroup 2
Nw.r2 <- sum((1-dat$pred)[id[,"i"]] * (1-dat$pred)[id[,"j"]] * (dat$trt01p[id[,"i"]]==arm.val[2]) * (dat$trt01p[id[,"j"]]==arm.val[1]) * (comp.ind== 1))
# Num loss in subgroup 2
Nl.r2 <- sum((1-dat$pred)[id[,"i"]] * (1-dat$pred)[id[,"j"]] * (dat$trt01p[id[,"i"]]==arm.val[2]) * (dat$trt01p[id[,"j"]]==arm.val[1]) * (comp.ind== -1))
if(Nw.r2==0) Nw.r2 <- 0.5 # give it a small value if equals 0
if(Nl.r2==0) Nl.r2 <- 0.5 # give it a small value if equals 0
# win ratio of subgroup 2
Rw.r2 <- Nw.r2/ Nl.r2
# gradient of log num win in subgroup2
#Rw.r2.g <-(gNw.r2*Nl.r2-gNl.r2*Nw.r2)/Nl.r2^2
log.Nw.r2.g <- - sapply(1:n, function(i) sum((1-dat$pred[which(dat$trt01p==1)]) * (dat$trt01p[i]==arm.val[2]) * (dat$trt01p[which(dat$trt01p==1)]==arm.val[1]) * (comp.ind[id[,"i"]==i]==1)) +
sum((1-dat$pred[which(dat$trt01p==0)]) * (dat$trt01p[which(dat$trt01p==0)]==arm.val[2]) * (dat$trt01p[i]==arm.val[1]) * (comp.ind[id[,"j"]==i]==1)) ) /Nw.r2
log.Nl.r2.g <- - sapply(1:n, function(i) sum((1-dat$pred[which(dat$trt01p==1)]) * (dat$trt01p[i]==arm.val[2]) * (dat$trt01p[which(dat$trt01p==1)]==arm.val[1]) * (comp.ind[id[,"i"]==i]==-1)) +
sum((1-dat$pred[which(dat$trt01p==0)]) * (dat$trt01p[which(dat$trt01p==0)]==arm.val[2]) * (dat$trt01p[i]==arm.val[1]) * (comp.ind[id[,"j"]==i]==-1)) ) /Nl.r2
log.Rw.r2.g <- log.Nw.r2.g - log.Nl.r2.g
} else {
Rw.r2 = 1 # do not contribute to the value function
log.Rw.r2.g = 0 # do not contribute to the gradient
}
g.p <- (sum(dat$pred)*log.Rw.r1.g + log(Rw.r1) - sum(1-dat$pred)*log.Rw.r2.g + log(Rw.r2))
#h.p <- (2*rmst.diff.r1.g + sum(dat$pred)*rmst.diff.r1.h + 2*rmst.diff.r2.g - sum(1-dat$pred)*rmst.diff.r2.h)
g <- dat$predg*(-1)*g.p
#h <- (-1)*( (dat$predg)^2 * h.p + g.p*dat$predh)
g <- g[order(dat$id)]
#h <- h[order(dat$id)]
h<-rep(0.00001,n)
return(list(grad = g, hess = h))
}
evalerror.test <- function(preds, dtrain) {
trt01p <- getinfo(dtrain, "label")
id=id.test
comp.ind=comp.ind.test
arm.val <- c(1,0)
## (1) Get Time to event Data Ready ##
dat <- data.frame(trt01p)
dat$pred <- 1/(1+exp(-preds))
#dat<-dat[order(dat$aval),]
n<-dim(dat)[1]
## (2) value function ##
gH1.r1.dat <- sum((dat$trt01p==arm.val[1])*dat$pred) #sum of prob for treatment arm in subgroup 1
gH0.r1.dat <- sum((dat$trt01p==arm.val[2])*dat$pred) # sum of prob for control arm in subgroup 1
gH1.r2.dat <- sum((dat$trt01p==arm.val[1])*(1-dat$pred)) # sum of prob for treatment arm in subgroup 2
gH0.r2.dat <- sum((dat$trt01p==arm.val[2])*(1-dat$pred)) # sum of prob for control arm in subgroup 2
## subgroup 1 --- r1 ##
if(gH1.r1.dat > 0 & gH0.r1.dat > 0){ # both arms have subjects in subgroup 1
# Num win in subgroup 1
Nw.r1 <- sum(dat$pred[id[,"i"]] * dat$pred[id[,"j"]] * (dat$trt01p[id[,"i"]]==arm.val[2]) * (dat$trt01p[id[,"j"]]==arm.val[1]) * (comp.ind==1))
# Num lose in subgroup 1
Nl.r1 <- sum(dat$pred[id[,"i"]] * dat$pred[id[,"j"]] * (dat$trt01p[id[,"i"]]==arm.val[2]) * (dat$trt01p[id[,"j"]]==arm.val[1]) * (comp.ind==-1))
if(Nw.r1==0) Nw.r1 <- 0.5 # give it a small value if equals 0
if(Nl.r1==0) Nl.r1 <- 0.5 # give it a small value if equals 0
# win ratio of subgroup 1
Rw.r1 <- Nw.r1/ Nl.r1
} else{
Rw.r1 = 1 # do not contribute to the value function
}
## subgroup 2 --- r2 ##
if(gH1.r2.dat > 0 & gH0.r2.dat > 0){ # both arms have subjects in subgroup 2
# Num win in subgroup 2
Nw.r2 <- sum((1-dat$pred)[id[,"i"]] * (1-dat$pred)[id[,"j"]] * (dat$trt01p[id[,"i"]]==arm.val[2]) * (dat$trt01p[id[,"j"]]==arm.val[1]) * (comp.ind==1))
# Num loss in subgroup 2
Nl.r2 <- sum((1-dat$pred)[id[,"i"]] * (1-dat$pred)[id[,"j"]] * (dat$trt01p[id[,"i"]]==arm.val[2]) * (dat$trt01p[id[,"j"]]==arm.val[1]) * (comp.ind==-1))
if(Nw.r2==0) Nw.r2 <- 0.5 # give it a small value if equals 0
if(Nl.r2==0) Nl.r2 <- 0.5 # give it a small value if equals 0
# win ratio of subgroup 2
Rw.r2 <- Nw.r2/ Nl.r2
} else {
Rw.r2 = 1 # do not contribute to the value function
}
# err is negative of value function
err <- (-1)*( sum(dat$pred)*log(Rw.r1) - sum(1-dat$pred)*log(Rw.r2) )
return(list(metric = "OTR_error", value = err))
}
|
voroniRain <- function(start.date,
end.date,
daily = TRUE,
plotVoroni = FALSE,
fid = c("D:/Data/AHPC/climate_Daily/dlydatabase.rds","D:/Data/AHPC/climate_Hourly/hrldatabase.rds"){
#Input Require
#start.date a character vector of length 1 'yyyy-mm-dd'
#end.date a character vector of length 1 'yyyy-mm-dd'
#daily a boolean indicating whether it is horuly (FALSE) or daily (TRUE) data.
#plotVoroni a boolean, indicating whether to plot the Voroni tesellation
#fid a character string of the name of the rds file to upload. Created by xxx.R
#Input Optional:
#daily a boolean (default = TRUE) whether to load available daily stations (TRUE)
# or whether to load available hourly stations
#Value:
#A spatial polygons dataframe
# A Thesian polygon of active daily rain gauges in the database
# Attributes of the polygons includes:
# the staion name
# the station height
# the lat and long coordinates of the station
# the start and end dates of the daily record (note this may include some NAs)
# the folder location where daily rain data is held (relative to D:/Data/AHPC/climate_Daily)
#Requires:
#The presence of D:/Data/AHPC/climate_Daily/dlydatabase.rds as created by
#downloadDaily_Ireland.R
#Libraries
require(rgdal)
require(sp)
require(dismo)
require(raster)
#require(rgeos)
if (daily){
dailyDB <- readRDS(fid)
} else {
dailyDB <- readRDS(fid)
}
pos2strip <- which(dailyDB$stations$long == 0)
if (length(pos2strip) > 0 ){
dailyDB$stations <- dailyDB$stations[-pos2strip,]
}
stationDaily <- SpatialPointsDataFrame(coords = dailyDB$stations[,c("long","lat")],
data = dailyDB$stations,
proj4string = CRS("+proj=longlat +datum=WGS84 +ellps=WGS84 +towgs84=0,0,0"))
pos.start <- which(as.Date(stationDaily$start) <= as.Date(start.date))
pos.end <-which(as.Date(stationDaily$end) >= as.Date(end.date))
if (length(pos.start) == 0) stop("Start Date not in Range")
if (length(pos.end) == 0) stop("End Date not in Range")
pos.indates <- intersect(pos.start,pos.start)
if (length(pos.indates) == 0) stop("No data in date range")
stationDailyTrimmed <- stationDaily[pos.indates,]
#irelnd <- spTransform(irelnd, CRSobj = CRS(proj4string(stationDaily)))
#Thesian Polygon from coordinates of points
xys <- coordinates( stationDailyTrimmed)
vor <- voronoi(xys, ext = c())
proj4string(vor) <- "+proj=longlat +datum=WGS84 +ellps=WGS84 +towgs84=0,0,0"
#append station data to each polygon
vordat <- over(vor,stationDailyTrimmed)
vor$station <- vordat$station
vor$height <- vordat$height
vor$lat <- vordat$lat
vor$long <- vordat$long
vor$start <- vordat$start
vor$end <- vordat$end
vor$folder <- vordat$folder
#to do intersect the voroni with a map of ireland
#vor2 <- intersect(vor,ireland)
if (plotVoroni){
plot(vor)
points(stationDailyTrimmed)
}
return(vor)
}
|
/MetData/voroniDailyRain.R
|
no_license
|
gavanmcgrath/Teagasc
|
R
| false
| false
| 3,223
|
r
|
voroniRain <- function(start.date,
end.date,
daily = TRUE,
plotVoroni = FALSE,
fid = c("D:/Data/AHPC/climate_Daily/dlydatabase.rds","D:/Data/AHPC/climate_Hourly/hrldatabase.rds"){
#Input Require
#start.date a character vector of length 1 'yyyy-mm-dd'
#end.date a character vector of length 1 'yyyy-mm-dd'
#daily a boolean indicating whether it is horuly (FALSE) or daily (TRUE) data.
#plotVoroni a boolean, indicating whether to plot the Voroni tesellation
#fid a character string of the name of the rds file to upload. Created by xxx.R
#Input Optional:
#daily a boolean (default = TRUE) whether to load available daily stations (TRUE)
# or whether to load available hourly stations
#Value:
#A spatial polygons dataframe
# A Thesian polygon of active daily rain gauges in the database
# Attributes of the polygons includes:
# the staion name
# the station height
# the lat and long coordinates of the station
# the start and end dates of the daily record (note this may include some NAs)
# the folder location where daily rain data is held (relative to D:/Data/AHPC/climate_Daily)
#Requires:
#The presence of D:/Data/AHPC/climate_Daily/dlydatabase.rds as created by
#downloadDaily_Ireland.R
#Libraries
require(rgdal)
require(sp)
require(dismo)
require(raster)
#require(rgeos)
if (daily){
dailyDB <- readRDS(fid)
} else {
dailyDB <- readRDS(fid)
}
pos2strip <- which(dailyDB$stations$long == 0)
if (length(pos2strip) > 0 ){
dailyDB$stations <- dailyDB$stations[-pos2strip,]
}
stationDaily <- SpatialPointsDataFrame(coords = dailyDB$stations[,c("long","lat")],
data = dailyDB$stations,
proj4string = CRS("+proj=longlat +datum=WGS84 +ellps=WGS84 +towgs84=0,0,0"))
pos.start <- which(as.Date(stationDaily$start) <= as.Date(start.date))
pos.end <-which(as.Date(stationDaily$end) >= as.Date(end.date))
if (length(pos.start) == 0) stop("Start Date not in Range")
if (length(pos.end) == 0) stop("End Date not in Range")
pos.indates <- intersect(pos.start,pos.start)
if (length(pos.indates) == 0) stop("No data in date range")
stationDailyTrimmed <- stationDaily[pos.indates,]
#irelnd <- spTransform(irelnd, CRSobj = CRS(proj4string(stationDaily)))
#Thesian Polygon from coordinates of points
xys <- coordinates( stationDailyTrimmed)
vor <- voronoi(xys, ext = c())
proj4string(vor) <- "+proj=longlat +datum=WGS84 +ellps=WGS84 +towgs84=0,0,0"
#append station data to each polygon
vordat <- over(vor,stationDailyTrimmed)
vor$station <- vordat$station
vor$height <- vordat$height
vor$lat <- vordat$lat
vor$long <- vordat$long
vor$start <- vordat$start
vor$end <- vordat$end
vor$folder <- vordat$folder
#to do intersect the voroni with a map of ireland
#vor2 <- intersect(vor,ireland)
if (plotVoroni){
plot(vor)
points(stationDailyTrimmed)
}
return(vor)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ecs_operations.R
\name{ecs_stop_task}
\alias{ecs_stop_task}
\title{Stops a running task}
\usage{
ecs_stop_task(cluster, task, reason)
}
\arguments{
\item{cluster}{The short name or full Amazon Resource Name (ARN) of the cluster that
hosts the task to stop. If you do not specify a cluster, the default
cluster is assumed.}
\item{task}{[required] The task ID or full Amazon Resource Name (ARN) of the task to stop.}
\item{reason}{An optional message specified when a task is stopped. For example, if
you are using a custom scheduler, you can use this parameter to specify
the reason for stopping the task here, and the message appears in
subsequent DescribeTasks API operations on this task. Up to 255
characters are allowed in this message.}
}
\description{
Stops a running task. Any tags associated with the task will be deleted.
}
\details{
When StopTask is called on a task, the equivalent of \code{docker stop} is
issued to the containers running in the task. This results in a
\code{SIGTERM} value and a default 30-second timeout, after which the
\code{SIGKILL} value is sent and the containers are forcibly stopped. If the
container handles the \code{SIGTERM} value gracefully and exits within 30
seconds from receiving it, no \code{SIGKILL} value is sent.
The default 30-second timeout can be configured on the Amazon ECS
container agent with the \code{ECS_CONTAINER_STOP_TIMEOUT} variable. For more
information, see \href{https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html}{Amazon ECS Container Agent Configuration}
in the \emph{Amazon Elastic Container Service Developer Guide}.
}
\section{Request syntax}{
\preformatted{svc$stop_task(
cluster = "string",
task = "string",
reason = "string"
)
}
}
\keyword{internal}
|
/cran/paws.compute/man/ecs_stop_task.Rd
|
permissive
|
peoplecure/paws
|
R
| false
| true
| 1,844
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ecs_operations.R
\name{ecs_stop_task}
\alias{ecs_stop_task}
\title{Stops a running task}
\usage{
ecs_stop_task(cluster, task, reason)
}
\arguments{
\item{cluster}{The short name or full Amazon Resource Name (ARN) of the cluster that
hosts the task to stop. If you do not specify a cluster, the default
cluster is assumed.}
\item{task}{[required] The task ID or full Amazon Resource Name (ARN) of the task to stop.}
\item{reason}{An optional message specified when a task is stopped. For example, if
you are using a custom scheduler, you can use this parameter to specify
the reason for stopping the task here, and the message appears in
subsequent DescribeTasks API operations on this task. Up to 255
characters are allowed in this message.}
}
\description{
Stops a running task. Any tags associated with the task will be deleted.
}
\details{
When StopTask is called on a task, the equivalent of \code{docker stop} is
issued to the containers running in the task. This results in a
\code{SIGTERM} value and a default 30-second timeout, after which the
\code{SIGKILL} value is sent and the containers are forcibly stopped. If the
container handles the \code{SIGTERM} value gracefully and exits within 30
seconds from receiving it, no \code{SIGKILL} value is sent.
The default 30-second timeout can be configured on the Amazon ECS
container agent with the \code{ECS_CONTAINER_STOP_TIMEOUT} variable. For more
information, see \href{https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html}{Amazon ECS Container Agent Configuration}
in the \emph{Amazon Elastic Container Service Developer Guide}.
}
\section{Request syntax}{
\preformatted{svc$stop_task(
cluster = "string",
task = "string",
reason = "string"
)
}
}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/regression_ols.R
\name{regr_ols}
\alias{regr_ols}
\title{OLS Regression}
\usage{
regr_ols()
}
\value{
A linear regression model for continuous outcomes,
with class 'learner'.
}
\description{
OLS Regression
}
|
/man/regr_ols.Rd
|
no_license
|
adviksh/learners
|
R
| false
| true
| 286
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/regression_ols.R
\name{regr_ols}
\alias{regr_ols}
\title{OLS Regression}
\usage{
regr_ols()
}
\value{
A linear regression model for continuous outcomes,
with class 'learner'.
}
\description{
OLS Regression
}
|
#!/bin/R
### Map QTLs 1 of 3
library('qtl')
################################################################################
## read in the QTL cross
################################################################################
i <- commandArgs(TRUE)[commandArgs(TRUE) %in% c(1:24)]
print(i)
mpath <- '/home/jmiller1/QTL_Map_Raw/ELR_final_map'
fl <- file.path(mpath,'ELR_subsetted.csv')
cross <- read.cross(
file = fl,
format = "csv", genotypes=c("AA","AB","BB"), alleles=c("A","B"),
estimate.map = FALSE
)
################################################################################
nmars <- nmar(cross)
cross <- subset(cross,ind=nmissing(cross) < (nmars*.5))
################################################################################
dups <- findDupMarkers(cross, exact.only = T, adjacent.only = F)
cross <- drop.markers(cross, unlist(dups))
cross <- subset(cross,chr=i)
##cross <- est.map(cross, error.prob=0.1, map.function="kosambi",sex.sp=F,chr=i)
################################################################################
cross <- orderMarkers(cross, window=7,verbose=FALSE,chr=i,
use.ripple=TRUE, error.prob=0.01, sex.sp=FALSE,
map.function="kosambi",maxit=1, tol=1e-4)
filename <- paste0('/home/jmiller1/QTL_Map_Raw/ELR_final_map/ELR_mapped_chr_',i)
write.cross(cross,chr=i,filestem=filename,format="csv")
################################################################################
|
/MAP/backup/elr_map.R
|
no_license
|
jthmiller/QTL_agri
|
R
| false
| false
| 1,462
|
r
|
#!/bin/R
### Map QTLs 1 of 3
library('qtl')
################################################################################
## read in the QTL cross
################################################################################
i <- commandArgs(TRUE)[commandArgs(TRUE) %in% c(1:24)]
print(i)
mpath <- '/home/jmiller1/QTL_Map_Raw/ELR_final_map'
fl <- file.path(mpath,'ELR_subsetted.csv')
cross <- read.cross(
file = fl,
format = "csv", genotypes=c("AA","AB","BB"), alleles=c("A","B"),
estimate.map = FALSE
)
################################################################################
nmars <- nmar(cross)
cross <- subset(cross,ind=nmissing(cross) < (nmars*.5))
################################################################################
dups <- findDupMarkers(cross, exact.only = T, adjacent.only = F)
cross <- drop.markers(cross, unlist(dups))
cross <- subset(cross,chr=i)
##cross <- est.map(cross, error.prob=0.1, map.function="kosambi",sex.sp=F,chr=i)
################################################################################
cross <- orderMarkers(cross, window=7,verbose=FALSE,chr=i,
use.ripple=TRUE, error.prob=0.01, sex.sp=FALSE,
map.function="kosambi",maxit=1, tol=1e-4)
filename <- paste0('/home/jmiller1/QTL_Map_Raw/ELR_final_map/ELR_mapped_chr_',i)
write.cross(cross,chr=i,filestem=filename,format="csv")
################################################################################
|
library(abd)
### Name: Sparrows
### Title: Lifetime Reproductive Success in House Sparrows
### Aliases: Sparrows
### Keywords: datasets
### ** Examples
Sparrows
|
/data/genthat_extracted_code/abd/examples/Sparrows.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 168
|
r
|
library(abd)
### Name: Sparrows
### Title: Lifetime Reproductive Success in House Sparrows
### Aliases: Sparrows
### Keywords: datasets
### ** Examples
Sparrows
|
library(splitstackshape)
all.data<-read.csv("weeklynflstats.csv")[,-1] #dim: 5258 x 13
all.data$id<-as.numeric(rownames(all.data))
all.data$season<-as.factor(all.data$season)
all.data$week<-as.factor(all.data$week)
table(all.data$season,all.data$week) #min: 22, max: 30
margin.table(table(all.data$season,all.data$week),1) #by year; min: 466, max: 494
margin.table(table(all.data$season,all.data$week),2) #by week; min: 288, max: 324
#train/test/validate
#50/30/20
#2629/1577/1052
#from each season-
#239/143 (.36)/95 (.63)
set.seed(4141993)
train<-stratified(all.data,"season",239) #dim: 2629 x 14
train.rows<-sort(train$id)
margin.table(table(train$season,train$week),1)
margin.table(table(train$season,train$week),2)
leftover<-all.data[-train.rows,]
margin.table(table(leftover$season,leftover$week),1)
margin.table(table(leftover$season,leftover$week),2)
set.seed(4141993)
test<-stratified(leftover,"season",143) #dim: 1573 x 14
test.rows<-sort(test$id)
margin.table(table(test$season,test$week),1)
margin.table(table(test$season,test$week),2)
valid<-all.data[-c(test.rows,train.rows),] #dim: 1056 x 14
margin.table(table(valid$season,valid$week),1)
margin.table(table(valid$season,valid$week),2)
write.csv(train,"trainingdata.csv")
write.csv(test,"testingdata.csv")
write.csv(valid,"validationdata.csv")
|
/splitting.R
|
no_license
|
justingomez/Writing-Project
|
R
| false
| false
| 1,323
|
r
|
library(splitstackshape)
all.data<-read.csv("weeklynflstats.csv")[,-1] #dim: 5258 x 13
all.data$id<-as.numeric(rownames(all.data))
all.data$season<-as.factor(all.data$season)
all.data$week<-as.factor(all.data$week)
table(all.data$season,all.data$week) #min: 22, max: 30
margin.table(table(all.data$season,all.data$week),1) #by year; min: 466, max: 494
margin.table(table(all.data$season,all.data$week),2) #by week; min: 288, max: 324
#train/test/validate
#50/30/20
#2629/1577/1052
#from each season-
#239/143 (.36)/95 (.63)
set.seed(4141993)
train<-stratified(all.data,"season",239) #dim: 2629 x 14
train.rows<-sort(train$id)
margin.table(table(train$season,train$week),1)
margin.table(table(train$season,train$week),2)
leftover<-all.data[-train.rows,]
margin.table(table(leftover$season,leftover$week),1)
margin.table(table(leftover$season,leftover$week),2)
set.seed(4141993)
test<-stratified(leftover,"season",143) #dim: 1573 x 14
test.rows<-sort(test$id)
margin.table(table(test$season,test$week),1)
margin.table(table(test$season,test$week),2)
valid<-all.data[-c(test.rows,train.rows),] #dim: 1056 x 14
margin.table(table(valid$season,valid$week),1)
margin.table(table(valid$season,valid$week),2)
write.csv(train,"trainingdata.csv")
write.csv(test,"testingdata.csv")
write.csv(valid,"validationdata.csv")
|
#' Test of Restrictions on FCVAR Model
#'
#' \code{FCVARhypoTest} performs a likelihood ratio test of the null
#' hypothesis: "model is \code{modelR}" against the alternative hypothesis:
#' "model is \code{modelUNR}".
#'
#' @param modelUNR A list of estimation results created for the unrestricted model.
#' @param modelR A list of estimation results created for the restricted model.
#' @return A list \code{LRtest} containing the test results,
#' including the following parameters:
#' \describe{
#' \item{\code{loglikUNR}}{The log-likelihood for the unrestricted model.}
#' \item{\code{loglikR}}{The log-likelihood for the restricted model.}
#' \item{\code{df}}{The degrees of freedom for the test.}
#' \item{\code{LRstat}}{The likelihood ratio test statistic.}
#' \item{\code{p_LRtest}}{The p-value for the likelihood ratio test.}
#' }
#' @examples
#' \donttest{
#' opt <- FCVARoptions()
#' opt$gridSearch <- 0 # Disable grid search in optimization.
#' opt$dbMin <- c(0.01, 0.01) # Set lower bound for d,b.
#' opt$dbMax <- c(2.00, 2.00) # Set upper bound for d,b.
#' opt$constrained <- 0 # Impose restriction dbMax >= d >= b >= dbMin ? 1 <- yes, 0 <- no.
#' x <- votingJNP2014[, c("lib", "ir_can", "un_can")]
#' m1 <- FCVARestn(x, k = 2, r = 1, opt)
#' opt1 <- opt
#' opt1$R_psi <- matrix(c(1, 0), nrow = 1, ncol = 2)
#' opt1$r_psi <- 1
#' m1r1 <- FCVARestn(x, k = 2, r = 1, opt1)
#' Hdb <- FCVARhypoTest(modelUNR = m1, modelR = m1r1)
#' }
#'
#' \donttest{
#' opt1 <- opt
#' opt1$R_Beta <- matrix(c(1, 0, 0), nrow = 1, ncol = 3)
#' m1r2 <- FCVARestn(x, k = 2, r = 1, opt1)
#' Hbeta1 <- FCVARhypoTest(m1, m1r2)
#' }
#'
#' \donttest{
#' opt1 <- opt
#' opt1$R_Alpha <- matrix(c(0, 1, 0), nrow = 1, ncol = 3)
#' m1r4 <- FCVARestn(x, k = 2, r = 1, opt1)
#' Halpha2 <- FCVARhypoTest(m1, m1r4)
#' }
#' @family FCVAR postestimation functions
#' @seealso The test is calculated using the results of two calls to
#' \code{FCVARestn}, under the restricted and unrestricted models.
#' Use \code{FCVARoptions} to set default estimation options for each model,
#' then set restrictions as needed before \code{FCVARestn}.
#' @export
#'
FCVARhypoTest <- function(modelUNR, modelR) {
# Error handling for reverse-ordered likelihood values.
if (modelUNR$like < modelR$like) {
stop(c('Likelihood value from restricted model is larger than that from unrestricted model.\n',
' This could occur for a few reasons. Verify that the following conditions hold:\n',
'1. You have not confused the arguments, i.e., misplaced the unrestricted and restricted model.\n',
'2. The restricted model is nested in the unrestricted model. That is, the estimates from\n',
' the restricted model should also satisfy any identifying restrictions of the unrestricted model.\n',
'3. The unrestricted model still has enough restrictions to identify alpha and beta.\n',
' For example, the default restriction is to set the upper rxr block of beta to the identity matrix.\n',
'4. The likelihood functions are truly optimized. Plot the likelihood function with FCVARlikeGrid,\n',
' and use the optimal grid point as a starting value.\n',
' Use a finer grid or stronger convergence criteria, if necessary.'))
}
# Warning for identical likelihood values.
if (modelUNR$like == modelR$like) {
warning(c('Likelihood value from restricted model is equal to that from unrestricted model.\n',
' Although this is not impossible, it is a negligible event. Verify that the following conditions hold:\n',
'1. You have not confused the arguments, i.e., passed the same model to both the unrestricted and\n',
' restricted model.\n',
'2. The restricted model is nested in the unrestricted model. That is, the estimates from\n',
' the restricted model should also satisfy any identifying restrictions of the unrestricted model.\n',
'3. The unrestricted model still has enough restrictions to identify alpha and beta.\n',
' For example, the default restriction is to set the upper rxr block of beta to the identity matrix.\n',
'4. The likelihood functions are truly optimized. Plot the likelihood function with FCVARlikeGrid,\n',
' and use the optimal grid point as a starting value.\n',
' Use a finer grid or stronger convergence criteria, if necessary.'))
}
# Error handling when no reduction of the number of free parameters.
if (modelUNR$fp <= modelR$fp) {
stop(c('Unrestricted model does not have more free parameters than restricted model.\n',
' This could occur for a few reasons. Verify that the following conditions hold:\n',
'1. You have not confused the arguments, i.e., misplaced the unrestricted and restricted model.\n',
'2. The restricted model is nested in the unrestricted model. That is, the estimates from\n',
' the restricted model should also satisfy any identifying restrictions of the unrestricted model.'))
}
# Calculate the test statistic.
LR_test <- 2*(modelUNR$like - modelR$like)
# Calculate the degrees of freedom by taking the difference in free
# parameters between the unrestricted and restricted model.
df <- modelUNR$fp - modelR$fp
# Calculate the P-value for the test.
p_LRtest <- 1 - stats::pchisq(LR_test, df)
# Print output.
cat(sprintf('Likelihood ratio test results:'))
cat(sprintf('\nUnrestricted log-likelihood: %3.3f\nRestricted log-likelihood: %3.3f\n',
modelUNR$like, modelR$like))
cat(sprintf('Test results (df = %1.0f):\nLR statistic: \t %3.3f\nP-value: \t %1.3f\n',
df,LR_test,p_LRtest))
# Return the test results in a list.
LRtest <- list(
loglikUNR = modelUNR$like,
loglikR = modelR$like,
df = df,
LRstat = LR_test,
pv = p_LRtest
)
return(LRtest)
}
#' Bootstrap Likelihood Ratio Test
#'
#' \code{FCVARboot} generates a distribution of a likelihood ratio
#' test statistic using a wild bootstrap, following the method of
#' Boswijk, Cavaliere, Rahbek, and Taylor (2016). It takes two sets
#' of options as inputs to estimate the model under the null and the
#' unrestricted model.
#'
#' @param x A matrix of variables to be included in the system.
#' @param k The number of lags in the system.
#' @param r The cointegrating rank.
#' @param optRES An S3 object of class \code{FCVAR_opt} that stores the chosen estimation options
#' for the restricted model, as generated from \code{FCVARoptions()},
#' with adjustments as necessary.
#' @param optUNR An S3 object of class \code{FCVAR_opt} that stores the chosen estimation options
#' for the unrestricted model.
#' @param B The number of bootstrap samples.
#' @return A list \code{FCVARboot_stats} containing the estimation results,
#' including the following parameters:
#' \describe{
#' \item{\code{LRbs}}{A \eqn{B x 1} vector of simulated likelihood ratio statistics}
#' \item{\code{pv}}{An approximate p-value for the likelihood ratio statistic
#' based on the bootstrap distribution.}
#' \item{\code{H}}{A list containing the likelihood ratio test results.
#' It is identical to the output from \code{FCVARhypoTest}, with one addition,
#' namely \code{H$pvBS} which is the bootstrap p-value}
#' \item{\code{mBS}}{The model estimates under the null hypothesis.}
#' \item{\code{mUNR}}{The model estimates under the alternative hypothesis.}
#' }
#' @examples
#' \donttest{
#' opt <- FCVARoptions()
#' opt$gridSearch <- 0 # Disable grid search in optimization.
#' opt$dbMin <- c(0.01, 0.01) # Set lower bound for d,b.
#' opt$dbMax <- c(2.00, 2.00) # Set upper bound for d,b.
#' opt$constrained <- 0 # Impose restriction dbMax >= d >= b >= dbMin ? 1 <- yes, 0 <- no.
#' x <- votingJNP2014[, c("lib", "ir_can", "un_can")]
#' opt$plotRoots <- 0
#' optUNR <- opt
#' optRES <- opt
#' optRES$R_Beta <- matrix(c(1, 0, 0), nrow = 1, ncol = 3)
#' set.seed(42)
#' FCVARboot_stats <- FCVARboot(x, k = 2, r = 1, optRES, optUNR, B = 2)
#' # In practice, set the number of bootstraps so that (B+1)*alpha is an integer,
#' # where alpha is the chosen level of significance.
#' # For example, set B = 999 (but it takes a long time to compute).
#' }
#' @family FCVAR postestimation functions
#' @seealso \code{FCVARoptions} to set default estimation options.
#' \code{FCVARestn} is called to estimate the models under the null and alternative hypotheses.
#' @references Boswijk, Cavaliere, Rahbek, and Taylor (2016)
#' "Inference on co-integration parameters in heteroskedastic
#' vector autoregressions," Journal of Econometrics 192, 64-85.
#' @export
#'
FCVARboot <- function(x, k, r, optRES, optUNR, B) {
# Calculate length of sample to generate, adjusting for initial values
cap_T <- nrow(x) - optRES$N
# Use first k+1 observations for initial values
data <- x[1:(k+1), ]
LR <- matrix(0, nrow = B, ncol = 1)
# Turn off output and calculation of standard errors for faster computation
optUNR$print2screen <- 0
optRES$print2screen <- 0
optUNR$CalcSE <- 0
optRES$CalcSE <- 0
mBS <- FCVARestn(x, k, r, optRES)
mUNR <- FCVARestn(x, k, r, optUNR)
cat(sprintf('\nHypothesis test to bootstrap:\n'))
# cat(H)
H <- FCVARhypoTest(mUNR, mBS)
# How often should the number of iterations be displayed
show_iters <- 10
for (j in 1:B) {
# Display replication count every show_iters Bootstraps
if(round((j+1)/show_iters) == (j+1)/show_iters) {
# cat(sprintf('iteration: %1.0f\n', j))
message(sprintf('Completed bootstrap replication %d of %d.', j, B))
}
# (1) generate bootstrap DGP under the null
xBS <- FCVARsimBS(data, mBS, cap_T)
# append initial values to bootstrap sample
BSs <- rbind(data, xBS)
# (2) estimate unrestricted model
mUNRbs <- FCVARestn(BSs, k, r, optUNR)
# (3) estimate restricted model (under the null)
mRES <- FCVARestn(BSs, k, r, optRES)
# (4) calculate test statistic
LR[j] <- -2*(mRES$like - mUNRbs$like)
}
# Return sorted LR stats
LRbs <- LR[order(LR)]
# No need to sort if you count the extreme realizations ( but it looks pretty).
# Calculate Bootstrap P-value (see ETM p.157 eq 4.62)
H$pvBS <- sum(LRbs > H$LRstat)/B
# Print output
cat(sprintf('Bootstrap likelihood ratio test results:'))
cat(sprintf('\nUnrestricted log-likelihood: %3.3f\nRestricted log-likelihood: %3.3f\n',
H$loglikUNR, H$loglikR))
cat(sprintf('Test results (df = %1.0f):\nLR statistic: \t %3.3f\nP-value: \t %1.3f\n',
H$df, H$LRstat, H$pv))
cat(sprintf('P-value (BS): \t %1.3f\n', H$pvBS))
# Return a list of bootstrap test results.
FCVARboot_stats <- list(
LRbs = LRbs,
H = H,
mBS = mBS,
mUNR = mUNR
)
return(FCVARboot_stats)
}
#' Forecasts with the FCVAR Model
#'
#' \code{FCVARforecast} calculates recursive forecasts with the FCVAR model.
#'
#' @param x A matrix of variables to be included in the system.
#' The forecast will be calculated using these values as starting values.
#' @param model A list of estimation results, just as if estimated from \code{FCVARest}.
#' The parameters in \code{model} can also be set or adjusted by assigning new values.
#' @param NumPeriods The number of time periods in the simulation.
#' @return A \code{NumPeriods} \eqn{\times p} matrix \code{xf} of forecasted values.
#' @examples
#' \donttest{
#' opt <- FCVARoptions()
#' opt$gridSearch <- 0 # Disable grid search in optimization.
#' opt$dbMin <- c(0.01, 0.01) # Set lower bound for d,b.
#' opt$dbMax <- c(2.00, 2.00) # Set upper bound for d,b.
#' opt$constrained <- 0 # Impose restriction dbMax >= d >= b >= dbMin ? 1 <- yes, 0 <- no.
#' x <- votingJNP2014[, c("lib", "ir_can", "un_can")]
#' opt1 <- opt
#' opt1$R_Alpha <- matrix(c(0, 1, 0), nrow = 1, ncol = 3)
#' m1r4 <- FCVARestn(x, k = 2, r = 1, opt1)
#' xf <- FCVARforecast(x, m1r4, NumPeriods = 12)
#' }
#' @family FCVAR auxiliary functions
#' @seealso \code{FCVARoptions} to set default estimation options.
#' \code{FCVARestn} for the specification of the \code{model}.
#' \code{FCVARforecast} calls \code{FracDiff} and \code{Lbk} to calculate the forecast.
#' @export
#'
FCVARforecast <- function(x, model, NumPeriods) {
#--------------------------------------------------------------------------------
# Preliminary steps
#--------------------------------------------------------------------------------
p <- ncol(x)
opt <- model$options
cf <- model$coeffs
d <- cf$db[1]
b <- cf$db[2]
#--------------------------------------------------------------------------------
# Recursively generate forecasts
#--------------------------------------------------------------------------------
xf <- x
for (i in 1:NumPeriods) {
# Append x with zeros to simplify calculations.
xf <- rbind(xf, rep(0, p))
cap_T <- nrow(xf)
# Adjust by level parameter if present.
if(opt$levelParam) {
y <- xf - matrix(1, nrow = cap_T, ncol = 1) %*% cf$muHat
} else {
y <- xf
}
# Main term, take fractional lag.
z <- Lbk(y, d, 1)
# Error correction term.
if(!is.null(cf$alphaHat)) {
z <- z + FracDiff( Lbk(y, b, 1), d - b ) %*% t(cf$PiHat)
if(opt$rConstant) {
z <- z + FracDiff( Lbk(matrix(1, nrow = cap_T, ncol = 1), b, 1), d - b ) %*%
cf$rhoHat %*% t(cf$alphaHat)
}
}
# Add unrestricted constant if present.
if(opt$unrConstant) {
z <- z + matrix(1, nrow = cap_T, ncol = 1) %*% t(cf$xiHat)
}
# Add lags if present.
if(!is.null(cf$GammaHat)) {
# k <- size(cf$GammaHat,2)/p
k <- ncol(cf$GammaHat)/p
z <- z + FracDiff( Lbk( y , b, k) , d) %*% t(cf$GammaHat)
}
# Adjust by level parameter if present.
if(opt$levelParam) {
z <- z + matrix(1, nrow = cap_T, ncol = 1) %*% cf$muHat
}
# Append forecast to x matrix.
xf <- rbind(xf[1:(cap_T-1),], z[cap_T, ])
}
#--------------------------------------------------------------------------------
# Return forecasts.
#--------------------------------------------------------------------------------
# Trim off original data to return forecasts only.
xf <- xf[(nrow(x)+1):nrow(xf), ]
return(xf)
}
#' Roots of the Characteristic Polynomial
#'
#' \code{GetCharPolyRoots} calculates the roots of the
#' characteristic polynomial and plots them with the unit circle
#' transformed for the fractional model, see Johansen (2008).
#' \code{summary.FCVAR_roots} prints the output of
#' \code{GetCharPolyRoots} to screen.
#'
#' @param coeffs A list of coefficients for the FCVAR model.
#' An element of the list of estimation \code{results} output from \code{FCVARestn}.
#' @param opt An S3 object of class \code{FCVAR_opt} that stores the chosen estimation options,
#' generated from \code{FCVARoptions()}.
#' @param k The number of lags in the system.
#' @param r The cointegrating rank.
#' @param p The number of variables in the system.
#' @return An S3 object of type \code{FCVAR_roots} with the following elements:
#' \describe{
#' \item{\code{cPolyRoots}}{A vector of the roots of the characteristic polynomial.
#' It is an element of the list of estimation \code{results} output from \code{FCVARestn}.}
#' \item{\code{b}}{A numeric value of the fractional cointegration parameter.}
#' }
#' @examples
#' \donttest{
#' opt <- FCVARoptions()
#' opt$gridSearch <- 0 # Disable grid search in optimization.
#' opt$dbMin <- c(0.01, 0.01) # Set lower bound for d,b.
#' opt$dbMax <- c(2.00, 2.00) # Set upper bound for d,b.
#' opt$constrained <- 0 # Impose restriction dbMax >= d >= b >= dbMin ? 1 <- yes, 0 <- no.
#' x <- votingJNP2014[, c("lib", "ir_can", "un_can")]
#' results <- FCVARestn(x, k = 2, r = 1, opt)
#' FCVAR_CharPoly <- GetCharPolyRoots(results$coeffs, opt, k = 2, r = 1, p = 3)
#' }
#' @family FCVAR postestimation functions
#' @seealso \code{FCVARoptions} to set default estimation options.
#' \code{FCVARestn} to estimate the model for which to calculate the roots
#' of the characteristic polynomial.
#' \code{summary.FCVAR_roots} prints the output of
#' \code{GetCharPolyRoots} to screen.
#' @note The roots are calculated from the companion form of the VAR,
#' where the roots are given as the inverse eigenvalues of the
#' coefficient matrix.
#' @references Johansen, S. (2008). "A representation theory for a class of
#' vector autoregressive models for fractional processes,"
#' Econometric Theory 24, 651-676.
#' @export
#'
GetCharPolyRoots <- function(coeffs, opt, k, r, p) {
b <- coeffs$db[2]
# First construct the coefficient matrix for the companion form of the VAR.
PiStar <- diag(p)
if (r > 0) {
PiStar <- PiStar + coeffs$alphaHat %*% t(coeffs$betaHat)
}
if (k > 0) {
Gamma1 <- coeffs$GammaHat[ , 1 : p]
PiStar <- PiStar + Gamma1
if (k > 1) {
for (i in 2:k) {
Gammai <- coeffs$GammaHat[ , seq(((i-1)*p + 1), i*p)]
GammaiMinus1 <- coeffs$GammaHat[ , seq(((i-2)*p + 1), (i-1)*p)]
PiStar <- cbind(PiStar, (Gammai - GammaiMinus1))
}
}
Gammak <- coeffs$GammaHat[ , seq(((k-1)*p + 1), k*p)]
PiStar <- cbind(PiStar, ( - Gammak ))
}
# Pad with an identity for the transition of the lagged variables.
if (k > 0) {
PiStar <- rbind(PiStar,
cbind(diag(p*k),
matrix(0, nrow = p*k, ncol = p )))
}
# The roots are then the inverse eigenvalues of the matrix PiStar.
cPolyRoots <- 1 / eigen(PiStar)$values
cPolyRoots <- cPolyRoots[order(-Mod(cPolyRoots))]
# Append the fractional integration order and set the class of output.
FCVAR_CharPoly <- list(cPolyRoots = cPolyRoots,
b = b)
class(FCVAR_CharPoly) <- 'FCVAR_roots'
# Generate graph depending on the indicator plotRoots.
if (opt$plotRoots) {
# plot.GetCharPolyRoots(cPolyRoots, b, file = NULL, file_ext = NULL)
graphics::plot(x = FCVAR_CharPoly)
}
return(FCVAR_CharPoly)
}
#' Print Summary of Roots of the Characteristic Polynomial
#'
#' \code{summary.FCVAR_roots} prints the output of
#' \code{GetCharPolyRoots} to screen.
#' \code{GetCharPolyRoots} calculates the roots of the
#' characteristic polynomial to plot them with the unit circle
#' transformed for the fractional model, see Johansen (2008).
#'
#' @param object An S3 object of type \code{FCVAR_roots} with the following elements:
#' \describe{
#' \item{\code{cPolyRoots}}{A vector of the roots of the characteristic polynomial.
#' It is an element of the list of estimation \code{results} output from \code{FCVARestn}.}
#' \item{\code{b}}{A numeric value of the fractional cointegration parameter.}
#' }
#' @param ... additional arguments affecting the summary produced.
#' @return NULL
#' @examples
#' \donttest{
#' opt <- FCVARoptions()
#' opt$gridSearch <- 0 # Disable grid search in optimization.
#' opt$dbMin <- c(0.01, 0.01) # Set lower bound for d,b.
#' opt$dbMax <- c(2.00, 2.00) # Set upper bound for d,b.
#' opt$constrained <- 0 # Impose restriction dbMax >= d >= b >= dbMin ? 1 <- yes, 0 <- no.
#' x <- votingJNP2014[, c("lib", "ir_can", "un_can")]
#' results <- FCVARestn(x, k = 2, r = 1, opt)
#' FCVAR_CharPoly <- GetCharPolyRoots(results$coeffs, opt, k = 2, r = 1, p = 3)
#' summary(object = FCVAR_CharPoly)
#' graphics::plot(x = FCVAR_CharPoly)
#' }
#' @family FCVAR postestimation functions
#' @seealso \code{FCVARoptions} to set default estimation options.
#' \code{FCVARestn} to estimate the model for which to calculate the roots
#' of the characteristic polynomial.
#' \code{summary.FCVAR_roots} prints the output of
#' \code{GetCharPolyRoots} to screen.
#' @note The roots are calculated from the companion form of the VAR,
#' where the roots are given as the inverse eigenvalues of the
#' coefficient matrix.
#' @references Johansen, S. (2008). "A representation theory for a class of
#' vector autoregressive models for fractional processes,"
#' Econometric Theory 24, 651-676.
#' @export
#'
summary.FCVAR_roots <- function(object, ...) {
cat(sprintf('--------------------------------------------------------------------------------\n'))
cat(sprintf( ' Roots of the characteristic polynomial \n'))
cat(sprintf('--------------------------------------------------------------------------------\n'))
cat(sprintf( ' Number Real part Imaginary part Modulus \n'))
cat(sprintf('--------------------------------------------------------------------------------\n'))
for (j in 1:length(object$cPolyRoots)) {
# Split the roots into real and imaginary parts and calculate modulus.
real_root <- Re(object$cPolyRoots[j])
# Allow for a tolerance for the imaginary root to be numerically zero.
# Otherwise, stray minus signs creep in across platforms (especially 32-bit i386).
imag_root <- Im(object$cPolyRoots[j])
if (abs(imag_root) < 10^(-6)) {
imag_root <- 0
}
mod_root <- Mod(object$cPolyRoots[j])
cat(sprintf( ' %2.0f %8.3f %8.3f %8.3f \n',
# j, Re(object$cPolyRoots[j]), Im(object$cPolyRoots[j]), Mod(object$cPolyRoots[j]),
j, real_root, imag_root, mod_root ))
}
cat(sprintf('--------------------------------------------------------------------------------\n'))
}
#' Plot Roots of the Characteristic Polynomial
#'
#' \code{plot.FCVAR_roots} plots the output of
#' \code{GetCharPolyRoots} to screen or to a file.
#' \code{GetCharPolyRoots} calculates the roots of the
#' characteristic polynomial and plots them with the unit circle
#' transformed for the fractional model, see Johansen (2008).
#'
#' @param x An S3 object of type \code{FCVAR_roots} with the following elements:
#' #' \describe{
#' \item{\code{cPolyRoots}}{A vector of the roots of the characteristic polynomial.
#' It is an element of the list of estimation \code{results} output from \code{FCVARestn}.}
#' \item{\code{b}}{A numeric value of the fractional cointegration parameter.}
#' }
#' @param y An argument for generic method \code{plot} that is not used in \code{plot.FCVAR_roots}.
#' @param ... Arguments to be passed to methods, such as graphical parameters
#' for the generic plot function.
#' @return NULL
#' @examples
#' \donttest{
#' opt <- FCVARoptions()
#' opt$gridSearch <- 0 # Disable grid search in optimization.
#' opt$dbMin <- c(0.01, 0.01) # Set lower bound for d,b.
#' opt$dbMax <- c(2.00, 2.00) # Set upper bound for d,b.
#' opt$constrained <- 0 # Impose restriction dbMax >= d >= b >= dbMin ? 1 <- yes, 0 <- no.
#' x <- votingJNP2014[, c("lib", "ir_can", "un_can")]
#' results <- FCVARestn(x, k = 2, r = 1, opt)
#' FCVAR_CharPoly <- GetCharPolyRoots(results$coeffs, opt, k = 2, r = 1, p = 3)
#' summary(object = FCVAR_CharPoly)
#' graphics::plot(x = FCVAR_CharPoly)}
#' @family FCVAR postestimation functions
#' @seealso \code{FCVARoptions} to set default estimation options.
#' \code{FCVARestn} to estimate the model for which to calculate the roots
#' of the characteristic polynomial.
#' \code{summary.FCVAR_roots} prints the output of
#' \code{GetCharPolyRoots} to screen.
#' @note The roots are calculated from the companion form of the VAR,
#' where the roots are given as the inverse eigenvalues of the
#' coefficient matrix.
#' @references Johansen, S. (2008). "A representation theory for a class of
#' vector autoregressive models for fractional processes,"
#' Econometric Theory 24, 651-676.
#' @export
#'
plot.FCVAR_roots <- function(x, y = NULL, ...) {
# Extract parameters from FCVAR_roots object.
cPolyRoots <- x$cPolyRoots
b <- x$b
# Additional parameters.
dots <- list(...)
# Now calculate the line for the transformed unit circle.
# First do the negative half.
unitCircle <- seq( pi, 0, by = - 0.001)
psi <- - (pi - unitCircle)/2
unitCircleX <- cos( - unitCircle)
unitCircleY <- sin( - unitCircle)
transformedUnitCircleX <- (1 - (2*cos(psi))^b*cos(b*psi))
transformedUnitCircleY <- ( (2*cos(psi))^b*sin(b*psi))
# Then do the positive half.
unitCircle <- seq(0, pi, by = 0.001)
psi <- (pi - unitCircle)/2
unitCircleX <- c(unitCircleX, cos(unitCircle))
unitCircleY <- c(unitCircleY, sin(unitCircle))
transformedUnitCircleX <- c(transformedUnitCircleX, 1,
(1 - (2*cos(psi))^b*cos(b*psi)))
transformedUnitCircleY <- c(transformedUnitCircleY, 0,
( (2*cos(psi))^b*sin(b*psi)))
# Plot the unit circle and its image under the mapping
# along with the roots of the characterisitc polynomial.
# Determine axes based on largest roots, if not specified.
if ('xlim' %in% names(dots) & 'ylim' %in% names(dots)) {
xlim <- dots$xlim
ylim <- dots$ylim
} else {
# Calculate parameters for axes.
maxXYaxis <- max( c(transformedUnitCircleX, unitCircleX,
transformedUnitCircleY, unitCircleY) )
minXYaxis <- min( c(transformedUnitCircleX, unitCircleX,
transformedUnitCircleY, unitCircleY) )
maxXYaxis <- max( maxXYaxis, -minXYaxis )
# Replace any unspecified axis limits.
if(!('xlim' %in% names(dots))) {
xlim <- 2*c(-maxXYaxis, maxXYaxis)
}
if(!('ylim' %in% names(dots))) {
ylim <- 2*c(-maxXYaxis, maxXYaxis)
}
}
if ('main' %in% names(dots)) {
main <- dots$main
} else {
main <- c('Roots of the characteristic polynomial',
'with the image of the unit circle')
}
graphics::plot(transformedUnitCircleX,
transformedUnitCircleY,
main = main,
xlab = 'Real Part of Root',
ylab = 'Imaginary Part of Root',
xlim = xlim,
ylim = ylim,
type = 'l',
lwd = 3,
col = 'red')
graphics::lines(unitCircleX, unitCircleY, lwd = 3, col = 'black')
graphics::points(Re(cPolyRoots), Im(cPolyRoots),
pch = 16, col = 'blue')
}
#' Multivariate White Noise Tests
#'
#' \code{MVWNtest} performs multivariate tests for white noise.
#' It performs both the Ljung-Box Q-test and the LM-test on individual series
#' for a sequence of lag lengths.
#' \code{summary.MVWN_stats} prints a summary of these statistics to screen.
#'
#' @param x A matrix of variables to be included in the system,
#' typically model residuals.
#' @param maxlag The number of lags for serial correlation tests.
#' @param printResults An indicator to print results to screen.
#' @return An S3 object of type \code{MVWN_stats} containing the test results,
#' including the following parameters:
#' \describe{
#' \item{\code{Q}}{A 1xp vector of Q statistics for individual series.}
#' \item{\code{pvQ}}{A 1xp vector of P-values for Q-test on individual series.}
#' \item{\code{LM}}{A 1xp vector of LM statistics for individual series.}
#' \item{\code{pvLM}}{A 1xp vector of P-values for LM-test on individual series.}
#' \item{\code{mvQ}}{A multivariate Q statistic.}
#' \item{\code{pvMVQ}}{A p-value for multivariate Q-statistic using \code{p^2*maxlag}
#' degrees of freedom.}
#' \item{\code{maxlag}}{The number of lags for serial correlation tests.}
#' \item{\code{p}}{The number of variables in the system.}
#' }
#' @examples
#' \donttest{
#' opt <- FCVARoptions()
#' opt$gridSearch <- 0 # Disable grid search in optimization.
#' opt$dbMin <- c(0.01, 0.01) # Set lower bound for d,b.
#' opt$dbMax <- c(2.00, 2.00) # Set upper bound for d,b.
#' opt$constrained <- 0 # Impose restriction dbMax >= d >= b >= dbMin ? 1 <- yes, 0 <- no.
#' x <- votingJNP2014[, c("lib", "ir_can", "un_can")]
#' results <- FCVARestn(x, k = 2, r = 1, opt)
#' MVWNtest_stats <- MVWNtest(x = results$Residuals, maxlag = 12, printResults = 1)
#' }
#' set.seed(27)
#' WN <- stats::rnorm(100)
#' RW <- cumsum(stats::rnorm(100))
#' MVWN_x <- as.matrix(data.frame(WN = WN, RW = RW))
#' MVWNtest_stats <- MVWNtest(x = MVWN_x, maxlag = 10, printResults = 1)
#' @family FCVAR postestimation functions
#' @seealso \code{FCVARoptions} to set default estimation options.
#' \code{FCVARestn} produces the residuals intended for this test.
#' \code{LagSelect} uses this test as part of the lag order selection process.
#' \code{summary.MVWN_stats} prints a summary of the \code{MVWN_stats} statistics to screen.
#' @note
#' The LM test is consistent for heteroskedastic series; the Q-test is not.
#' @export
#'
MVWNtest <- function(x, maxlag, printResults) {
cap_T <- nrow(x)
p <- ncol(x)
# Create bins for values
pvQ <- matrix(1, nrow = 1, ncol = p)
pvLM <- matrix(1, nrow = 1, ncol = p)
Q <- matrix(0, nrow = 1, ncol = p)
LM <- matrix(0, nrow = 1, ncol = p)
# Perform univariate Q and LM tests and store the results.
for (i in 1:p) {
Qtest_out <- Qtest(x[,i, drop = FALSE], maxlag)
Q[i] <- Qtest_out$Qstat
pvQ[i] <- Qtest_out$pv
LMtest_out <- LMtest(x[,i, drop = FALSE], maxlag)
LM[i] <- LMtest_out$LMstat
pvLM[i] <- LMtest_out$pv
}
# Perform multivariate Q test.
Qtest_out <- Qtest(x[ , , drop = FALSE], maxlag)
mvQ <- Qtest_out$Qstat
pvMVQ <- Qtest_out$pv
# Output a MVWN_stats object of results.
MVWNtest_stats <- list(
Q = Q,
pvQ = pvQ,
LM = LM,
pvLM = pvLM,
mvQ = mvQ,
pvMVQ = pvMVQ,
maxlag = maxlag,
p = p
)
class(MVWNtest_stats) <- 'MVWN_stats'
# Print output
if (printResults) {
summary(MVWNtest_stats)
}
return(MVWNtest_stats)
}
#' Summarize Statistics for Multivariate White Noise Tests
#'
#' \code{summary.MVWN_stats} is an S3 method for objects of class \code{MVWN_stats}
#' that prints a summary of the statistics from \code{MVWNtest} to screen.
#' \code{MVWNtest} performs multivariate tests for white noise.
#' It performs both the Ljung-Box Q-test and the LM-test on individual series
#' for a sequence of lag lengths.
#'
#' @param object An S3 object of type \code{MVWN_stats} containing the results
#' from multivariate tests for white noise.
#' It is the output of \code{MVWNtest}.
#' @param ... additional arguments affecting the summary produced.
#' @return NULL
#' @examples
#' \donttest{
#' opt <- FCVARoptions()
#' opt$gridSearch <- 0 # Disable grid search in optimization.
#' opt$dbMin <- c(0.01, 0.01) # Set lower bound for d,b.
#' opt$dbMax <- c(2.00, 2.00) # Set upper bound for d,b.
#' opt$constrained <- 0 # Impose restriction dbMax >= d >= b >= dbMin ? 1 <- yes, 0 <- no.
#' x <- votingJNP2014[, c("lib", "ir_can", "un_can")]
#' results <- FCVARestn(x, k = 2, r = 1, opt)
#' MVWNtest_stats <- MVWNtest(x = results$Residuals, maxlag = 12, printResults = 1)
#' summary(object = MVWNtest_stats)
#' }
#'
#' \donttest{
#' set.seed(27)
#' WN <- stats::rnorm(100)
#' RW <- cumsum(stats::rnorm(100))
#' MVWN_x <- as.matrix(data.frame(WN = WN, RW = RW))
#' MVWNtest_stats <- MVWNtest(x = MVWN_x, maxlag = 10, printResults = 1)
#' summary(object = MVWNtest_stats)
#' }
#' @family FCVAR postestimation functions
#' @seealso \code{FCVARoptions} to set default estimation options.
#' \code{FCVARestn} produces the residuals intended for this test.
#' \code{LagSelect} uses this test as part of the lag order selection process.
#' \code{summary.MVWN_stats} is an S3 method for class \code{MVWN_stats} that
#' prints a summary of the output of \code{MVWNtest} to screen.
#' @note
#' The LM test is consistent for heteroskedastic series, the Q-test is not.
#' @export
#'
summary.MVWN_stats <- function(object, ...) {
cat(sprintf('\n White Noise Test Results (lag = %g)\n', object$maxlag))
cat(sprintf('---------------------------------------------\n'))
cat(sprintf('Variable | Q P-val | LM P-val |\n'))
cat(sprintf('---------------------------------------------\n'))
cat(sprintf('Multivar | %7.3f %4.3f | ---- ---- |\n', object$mvQ, object$pvMVQ))
for (i in 1:object$p) {
cat(sprintf('Var%g | %7.3f %4.3f | %7.3f %4.3f |\n',
i, object$Q[i], object$pvQ[i], object$LM[i], object$pvLM[i] ))
}
cat(sprintf('---------------------------------------------\n'))
}
# Breusch-Godfrey Lagrange Multiplier Test for Serial Correlation
#
# \code{LMtest} performs a Breusch-Godfrey Lagrange Multiplier test
# for serial correlation.
# Note that Roxygen comments are excluded to keep this function internal.
# However, the contents of Roxygen comments are shown below for those who read the scripts.
#
# @param x A vector or Tx1 matrix of variables to be tested,
# typically model residuals.
# @param q The number of lags for the serial correlation tests.
# @return A list object \code{LMtest_out} containing the test results,
# including the following parameters:
# \describe{
# \item{\code{LM}}{The LM statistic for individual series.}
# \item{\code{pv}}{The p-value for LM-test on individual series.}
# }
# @examples
# opt <- FCVARoptions()
# opt$gridSearch <- 0 # Disable grid search in optimization.
# opt$dbMin <- c(0.01, 0.01) # Set lower bound for d,b.
# opt$dbMax <- c(2.00, 2.00) # Set upper bound for d,b.
# opt$constrained <- 0 # Impose restriction dbMax >= d >= b >= dbMin ? 1 <- yes, 0 <- no.
# x <- votingJNP2014[, c("lib", "ir_can", "un_can")]
# results <- FCVARestn(x, k = 2, r = 1, opt)
# MVWNtest_stats <- MVWNtest(x = results$Residuals, maxlag = 12, printResults = 1)
# LMtest(x = matrix(results$Residuals[, 1]), q = 12)
# LMtest(x = results$Residuals[,2, drop = FALSE], q = 12)
#
# set.seed(27)
# WN <- stats::rnorm(100)
# RW <- cumsum(stats::rnorm(100))
# LMtest(x = matrix(WN), q = 10)
# LMtest(x = matrix(RW), q = 10)
# MVWN_x <- as.matrix(data.frame(WN = WN, RW = RW))
# MVWNtest_stats <- MVWNtest(x = MVWN_x, maxlag = 10, printResults = 1)
# @family FCVAR postestimation functions
# @seealso \code{MVWNtest} calls this function to test residuals
# from the estimation results of \code{FCVARestn}.
# An alternative test is the Ljung-Box Q-test in \code{Qtest}.
# @note
# The LM test is consistent for heteroskedastic series.
# @export
#
LMtest <- function(x, q) {
# Breusch-Godfrey Lagrange Multiplier test for serial correlation.
cap_T <- nrow(x)
x <- x - mean(x)
y <- x[seq(q + 1, cap_T), , drop = FALSE]
z <- x[seq(1, cap_T - q), , drop = FALSE]
for (i in 1:(q - 1)) {
z <- cbind(x[seq(i + 1, cap_T - q + i), , drop = FALSE], z)
}
e <- y
s <- z[,1:q, drop = FALSE] * kronecker(matrix(1, 1, q), e)
sbar <- t(colMeans(s))
kron_sbar <- kronecker(matrix(1, nrow(s)), sbar)
s <- s - kron_sbar
S <- t(s) %*% s/cap_T
LMstat <- cap_T*sbar %*% solve(S) %*% t(sbar)
pv <- 1 - stats::pchisq(LMstat, q)
# Output a list of results.
LMtest_out <- list(
LMstat = LMstat,
pv = pv
)
return(LMtest_out)
}
# Ljung-Box Q-test for Serial Correlation
#
# \code{Qtest} performs a (multivariate) Ljung-Box Q-test for serial correlation; see
# Luetkepohl (2005, New Introduction to Multiple Time Series Analysis, p. 169).
#
# @param x A vector or Tx1 matrix of variables to be tested,
# typically model residuals.
# @param maxlag The number of lags for the serial correlation tests.
# @return A list object \code{LMtest_out} containing the test results,
# including the following parameters:
# \describe{
# \item{\code{Qstat}}{A 1xp vector of Q statistics for individual series.}
# \item{\code{pv}}{A 1xp vector of P-values for Q-test on individual series.}
# }
# @examples
# opt <- FCVARoptions()
# opt$gridSearch <- 0 # Disable grid search in optimization.
# opt$dbMin <- c(0.01, 0.01) # Set lower bound for d,b.
# opt$dbMax <- c(2.00, 2.00) # Set upper bound for d,b.
# opt$constrained <- 0 # Impose restriction dbMax >= d >= b >= dbMin ? 1 <- yes, 0 <- no.
# x <- votingJNP2014[, c("lib", "ir_can", "un_can")]
# results <- FCVARestn(x, k = 2, r = 1, opt)
# MVWNtest_stats <- MVWNtest(x = results$Residuals, maxlag = 12, printResults = 1)
# Qtest(x = results$Residuals, maxlag = 12)
# Qtest(x = matrix(results$Residuals[, 1]), maxlag = 12)
# Qtest(x = results$Residuals[,2, drop = FALSE], maxlag = 12)
#
# set.seed(27)
# WN <- stats::rnorm(100)
# RW <- cumsum(stats::rnorm(100))
# MVWN_x <- as.matrix(data.frame(WN = WN, RW = RW))
# Qtest(x = MVWN_x, maxlag = 10)
# Qtest(x = matrix(WN), maxlag = 10)
# Qtest(x = matrix(RW), maxlag = 10)
# @family FCVAR postestimation functions
# @seealso \code{MVWNtest} calls this function to test residuals
# from the estimation results of \code{FCVARestn}.
# An alternative test is the Breusch-Godfrey Lagrange Multiplier Test in \code{LMtest}.
# @note
# The LM test in \code{LMtest} is consistent for heteroskedastic series,
# while the Q-test is not.
# @references H. Luetkepohl (2005) "New Introduction to Multiple Time Series Analysis," Springer, Berlin.
# @export
#
Qtest <- function(x, maxlag) {
cap_T <- nrow(x)
p <- ncol(x)
C0 <- matrix(0, nrow = p, ncol = p)
for (t in 1:cap_T) {
C0 <- C0 + t(x[t, , drop = FALSE]) %*% x[t, , drop = FALSE]
}
C0 <- C0/cap_T
C <- array(rep(0, p*p*maxlag), dim = c(p,p,maxlag))
for (i in 1:maxlag) {
for (t in (i + 1):cap_T) {
C[ , ,i] <- C[ , ,i] + t(x[t, , drop = FALSE]) %*% x[t-i, , drop = FALSE]
}
C[ , ,i] <- C[ , ,i]/(cap_T - i) # Note division by (T-i) instead of T.
}
# (Multivariate) Q statistic
Qstat <- 0
for (j in 1:maxlag) {
Qstat <- Qstat + sum(diag( (t(C[ , ,j]) %*% solve(C0)) %*% (C[ , ,j] %*% solve(C0)) )) / (cap_T - j)
}
Qstat <- Qstat*cap_T*(cap_T + 2)
pv <- 1 - stats::pchisq(Qstat, p*p*maxlag) # P-value is calculated with p^2*maxlag df.
# Output a list of results.
Qtest_out <- list(
Qstat = Qstat,
pv = pv
)
return(Qtest_out)
}
|
/R/FCVAR_post.R
|
no_license
|
LeeMorinUCF/FCVAR
|
R
| false
| false
| 37,959
|
r
|
#' Test of Restrictions on FCVAR Model
#'
#' \code{FCVARhypoTest} performs a likelihood ratio test of the null
#' hypothesis: "model is \code{modelR}" against the alternative hypothesis:
#' "model is \code{modelUNR}".
#'
#' @param modelUNR A list of estimation results created for the unrestricted model.
#' @param modelR A list of estimation results created for the restricted model.
#' @return A list \code{LRtest} containing the test results,
#' including the following parameters:
#' \describe{
#' \item{\code{loglikUNR}}{The log-likelihood for the unrestricted model.}
#' \item{\code{loglikR}}{The log-likelihood for the restricted model.}
#' \item{\code{df}}{The degrees of freedom for the test.}
#' \item{\code{LRstat}}{The likelihood ratio test statistic.}
#' \item{\code{p_LRtest}}{The p-value for the likelihood ratio test.}
#' }
#' @examples
#' \donttest{
#' opt <- FCVARoptions()
#' opt$gridSearch <- 0 # Disable grid search in optimization.
#' opt$dbMin <- c(0.01, 0.01) # Set lower bound for d,b.
#' opt$dbMax <- c(2.00, 2.00) # Set upper bound for d,b.
#' opt$constrained <- 0 # Impose restriction dbMax >= d >= b >= dbMin ? 1 <- yes, 0 <- no.
#' x <- votingJNP2014[, c("lib", "ir_can", "un_can")]
#' m1 <- FCVARestn(x, k = 2, r = 1, opt)
#' opt1 <- opt
#' opt1$R_psi <- matrix(c(1, 0), nrow = 1, ncol = 2)
#' opt1$r_psi <- 1
#' m1r1 <- FCVARestn(x, k = 2, r = 1, opt1)
#' Hdb <- FCVARhypoTest(modelUNR = m1, modelR = m1r1)
#' }
#'
#' \donttest{
#' opt1 <- opt
#' opt1$R_Beta <- matrix(c(1, 0, 0), nrow = 1, ncol = 3)
#' m1r2 <- FCVARestn(x, k = 2, r = 1, opt1)
#' Hbeta1 <- FCVARhypoTest(m1, m1r2)
#' }
#'
#' \donttest{
#' opt1 <- opt
#' opt1$R_Alpha <- matrix(c(0, 1, 0), nrow = 1, ncol = 3)
#' m1r4 <- FCVARestn(x, k = 2, r = 1, opt1)
#' Halpha2 <- FCVARhypoTest(m1, m1r4)
#' }
#' @family FCVAR postestimation functions
#' @seealso The test is calculated using the results of two calls to
#' \code{FCVARestn}, under the restricted and unrestricted models.
#' Use \code{FCVARoptions} to set default estimation options for each model,
#' then set restrictions as needed before \code{FCVARestn}.
#' @export
#'
FCVARhypoTest <- function(modelUNR, modelR) {
# Error handling for reverse-ordered likelihood values.
if (modelUNR$like < modelR$like) {
stop(c('Likelihood value from restricted model is larger than that from unrestricted model.\n',
' This could occur for a few reasons. Verify that the following conditions hold:\n',
'1. You have not confused the arguments, i.e., misplaced the unrestricted and restricted model.\n',
'2. The restricted model is nested in the unrestricted model. That is, the estimates from\n',
' the restricted model should also satisfy any identifying restrictions of the unrestricted model.\n',
'3. The unrestricted model still has enough restrictions to identify alpha and beta.\n',
' For example, the default restriction is to set the upper rxr block of beta to the identity matrix.\n',
'4. The likelihood functions are truly optimized. Plot the likelihood function with FCVARlikeGrid,\n',
' and use the optimal grid point as a starting value.\n',
' Use a finer grid or stronger convergence criteria, if necessary.'))
}
# Warning for identical likelihood values.
if (modelUNR$like == modelR$like) {
warning(c('Likelihood value from restricted model is equal to that from unrestricted model.\n',
' Although this is not impossible, it is a negligible event. Verify that the following conditions hold:\n',
'1. You have not confused the arguments, i.e., passed the same model to both the unrestricted and\n',
' restricted model.\n',
'2. The restricted model is nested in the unrestricted model. That is, the estimates from\n',
' the restricted model should also satisfy any identifying restrictions of the unrestricted model.\n',
'3. The unrestricted model still has enough restrictions to identify alpha and beta.\n',
' For example, the default restriction is to set the upper rxr block of beta to the identity matrix.\n',
'4. The likelihood functions are truly optimized. Plot the likelihood function with FCVARlikeGrid,\n',
' and use the optimal grid point as a starting value.\n',
' Use a finer grid or stronger convergence criteria, if necessary.'))
}
# Error handling when no reduction of the number of free parameters.
if (modelUNR$fp <= modelR$fp) {
stop(c('Unrestricted model does not have more free parameters than restricted model.\n',
' This could occur for a few reasons. Verify that the following conditions hold:\n',
'1. You have not confused the arguments, i.e., misplaced the unrestricted and restricted model.\n',
'2. The restricted model is nested in the unrestricted model. That is, the estimates from\n',
' the restricted model should also satisfy any identifying restrictions of the unrestricted model.'))
}
# Calculate the test statistic.
LR_test <- 2*(modelUNR$like - modelR$like)
# Calculate the degrees of freedom by taking the difference in free
# parameters between the unrestricted and restricted model.
df <- modelUNR$fp - modelR$fp
# Calculate the P-value for the test.
p_LRtest <- 1 - stats::pchisq(LR_test, df)
# Print output.
cat(sprintf('Likelihood ratio test results:'))
cat(sprintf('\nUnrestricted log-likelihood: %3.3f\nRestricted log-likelihood: %3.3f\n',
modelUNR$like, modelR$like))
cat(sprintf('Test results (df = %1.0f):\nLR statistic: \t %3.3f\nP-value: \t %1.3f\n',
df,LR_test,p_LRtest))
# Return the test results in a list.
LRtest <- list(
loglikUNR = modelUNR$like,
loglikR = modelR$like,
df = df,
LRstat = LR_test,
pv = p_LRtest
)
return(LRtest)
}
#' Bootstrap Likelihood Ratio Test
#'
#' \code{FCVARboot} generates a distribution of a likelihood ratio
#' test statistic using a wild bootstrap, following the method of
#' Boswijk, Cavaliere, Rahbek, and Taylor (2016). It takes two sets
#' of options as inputs to estimate the model under the null and the
#' unrestricted model.
#'
#' @param x A matrix of variables to be included in the system.
#' @param k The number of lags in the system.
#' @param r The cointegrating rank.
#' @param optRES An S3 object of class \code{FCVAR_opt} that stores the chosen estimation options
#' for the restricted model, as generated from \code{FCVARoptions()},
#' with adjustments as necessary.
#' @param optUNR An S3 object of class \code{FCVAR_opt} that stores the chosen estimation options
#' for the unrestricted model.
#' @param B The number of bootstrap samples.
#' @return A list \code{FCVARboot_stats} containing the estimation results,
#' including the following parameters:
#' \describe{
#' \item{\code{LRbs}}{A \eqn{B x 1} vector of simulated likelihood ratio statistics}
#' \item{\code{pv}}{An approximate p-value for the likelihood ratio statistic
#' based on the bootstrap distribution.}
#' \item{\code{H}}{A list containing the likelihood ratio test results.
#' It is identical to the output from \code{FCVARhypoTest}, with one addition,
#' namely \code{H$pvBS} which is the bootstrap p-value}
#' \item{\code{mBS}}{The model estimates under the null hypothesis.}
#' \item{\code{mUNR}}{The model estimates under the alternative hypothesis.}
#' }
#' @examples
#' \donttest{
#' opt <- FCVARoptions()
#' opt$gridSearch <- 0 # Disable grid search in optimization.
#' opt$dbMin <- c(0.01, 0.01) # Set lower bound for d,b.
#' opt$dbMax <- c(2.00, 2.00) # Set upper bound for d,b.
#' opt$constrained <- 0 # Impose restriction dbMax >= d >= b >= dbMin ? 1 <- yes, 0 <- no.
#' x <- votingJNP2014[, c("lib", "ir_can", "un_can")]
#' opt$plotRoots <- 0
#' optUNR <- opt
#' optRES <- opt
#' optRES$R_Beta <- matrix(c(1, 0, 0), nrow = 1, ncol = 3)
#' set.seed(42)
#' FCVARboot_stats <- FCVARboot(x, k = 2, r = 1, optRES, optUNR, B = 2)
#' # In practice, set the number of bootstraps so that (B+1)*alpha is an integer,
#' # where alpha is the chosen level of significance.
#' # For example, set B = 999 (but it takes a long time to compute).
#' }
#' @family FCVAR postestimation functions
#' @seealso \code{FCVARoptions} to set default estimation options.
#' \code{FCVARestn} is called to estimate the models under the null and alternative hypotheses.
#' @references Boswijk, Cavaliere, Rahbek, and Taylor (2016)
#' "Inference on co-integration parameters in heteroskedastic
#' vector autoregressions," Journal of Econometrics 192, 64-85.
#' @export
#'
FCVARboot <- function(x, k, r, optRES, optUNR, B) {
# Calculate length of sample to generate, adjusting for initial values
cap_T <- nrow(x) - optRES$N
# Use first k+1 observations for initial values
data <- x[1:(k+1), ]
LR <- matrix(0, nrow = B, ncol = 1)
# Turn off output and calculation of standard errors for faster computation
optUNR$print2screen <- 0
optRES$print2screen <- 0
optUNR$CalcSE <- 0
optRES$CalcSE <- 0
mBS <- FCVARestn(x, k, r, optRES)
mUNR <- FCVARestn(x, k, r, optUNR)
cat(sprintf('\nHypothesis test to bootstrap:\n'))
# cat(H)
H <- FCVARhypoTest(mUNR, mBS)
# How often should the number of iterations be displayed
show_iters <- 10
for (j in 1:B) {
# Display replication count every show_iters Bootstraps
if(round((j+1)/show_iters) == (j+1)/show_iters) {
# cat(sprintf('iteration: %1.0f\n', j))
message(sprintf('Completed bootstrap replication %d of %d.', j, B))
}
# (1) generate bootstrap DGP under the null
xBS <- FCVARsimBS(data, mBS, cap_T)
# append initial values to bootstrap sample
BSs <- rbind(data, xBS)
# (2) estimate unrestricted model
mUNRbs <- FCVARestn(BSs, k, r, optUNR)
# (3) estimate restricted model (under the null)
mRES <- FCVARestn(BSs, k, r, optRES)
# (4) calculate test statistic
LR[j] <- -2*(mRES$like - mUNRbs$like)
}
# Return sorted LR stats
LRbs <- LR[order(LR)]
# No need to sort if you count the extreme realizations ( but it looks pretty).
# Calculate Bootstrap P-value (see ETM p.157 eq 4.62)
H$pvBS <- sum(LRbs > H$LRstat)/B
# Print output
cat(sprintf('Bootstrap likelihood ratio test results:'))
cat(sprintf('\nUnrestricted log-likelihood: %3.3f\nRestricted log-likelihood: %3.3f\n',
H$loglikUNR, H$loglikR))
cat(sprintf('Test results (df = %1.0f):\nLR statistic: \t %3.3f\nP-value: \t %1.3f\n',
H$df, H$LRstat, H$pv))
cat(sprintf('P-value (BS): \t %1.3f\n', H$pvBS))
# Return a list of bootstrap test results.
FCVARboot_stats <- list(
LRbs = LRbs,
H = H,
mBS = mBS,
mUNR = mUNR
)
return(FCVARboot_stats)
}
#' Forecasts with the FCVAR Model
#'
#' \code{FCVARforecast} calculates recursive forecasts with the FCVAR model.
#'
#' @param x A matrix of variables to be included in the system.
#' The forecast will be calculated using these values as starting values.
#' @param model A list of estimation results, just as if estimated from \code{FCVARest}.
#' The parameters in \code{model} can also be set or adjusted by assigning new values.
#' @param NumPeriods The number of time periods in the simulation.
#' @return A \code{NumPeriods} \eqn{\times p} matrix \code{xf} of forecasted values.
#' @examples
#' \donttest{
#' opt <- FCVARoptions()
#' opt$gridSearch <- 0 # Disable grid search in optimization.
#' opt$dbMin <- c(0.01, 0.01) # Set lower bound for d,b.
#' opt$dbMax <- c(2.00, 2.00) # Set upper bound for d,b.
#' opt$constrained <- 0 # Impose restriction dbMax >= d >= b >= dbMin ? 1 <- yes, 0 <- no.
#' x <- votingJNP2014[, c("lib", "ir_can", "un_can")]
#' opt1 <- opt
#' opt1$R_Alpha <- matrix(c(0, 1, 0), nrow = 1, ncol = 3)
#' m1r4 <- FCVARestn(x, k = 2, r = 1, opt1)
#' xf <- FCVARforecast(x, m1r4, NumPeriods = 12)
#' }
#' @family FCVAR auxiliary functions
#' @seealso \code{FCVARoptions} to set default estimation options.
#' \code{FCVARestn} for the specification of the \code{model}.
#' \code{FCVARforecast} calls \code{FracDiff} and \code{Lbk} to calculate the forecast.
#' @export
#'
FCVARforecast <- function(x, model, NumPeriods) {
#--------------------------------------------------------------------------------
# Preliminary steps
#--------------------------------------------------------------------------------
p <- ncol(x)
opt <- model$options
cf <- model$coeffs
d <- cf$db[1]
b <- cf$db[2]
#--------------------------------------------------------------------------------
# Recursively generate forecasts
#--------------------------------------------------------------------------------
xf <- x
for (i in 1:NumPeriods) {
# Append x with zeros to simplify calculations.
xf <- rbind(xf, rep(0, p))
cap_T <- nrow(xf)
# Adjust by level parameter if present.
if(opt$levelParam) {
y <- xf - matrix(1, nrow = cap_T, ncol = 1) %*% cf$muHat
} else {
y <- xf
}
# Main term, take fractional lag.
z <- Lbk(y, d, 1)
# Error correction term.
if(!is.null(cf$alphaHat)) {
z <- z + FracDiff( Lbk(y, b, 1), d - b ) %*% t(cf$PiHat)
if(opt$rConstant) {
z <- z + FracDiff( Lbk(matrix(1, nrow = cap_T, ncol = 1), b, 1), d - b ) %*%
cf$rhoHat %*% t(cf$alphaHat)
}
}
# Add unrestricted constant if present.
if(opt$unrConstant) {
z <- z + matrix(1, nrow = cap_T, ncol = 1) %*% t(cf$xiHat)
}
# Add lags if present.
if(!is.null(cf$GammaHat)) {
# k <- size(cf$GammaHat,2)/p
k <- ncol(cf$GammaHat)/p
z <- z + FracDiff( Lbk( y , b, k) , d) %*% t(cf$GammaHat)
}
# Adjust by level parameter if present.
if(opt$levelParam) {
z <- z + matrix(1, nrow = cap_T, ncol = 1) %*% cf$muHat
}
# Append forecast to x matrix.
xf <- rbind(xf[1:(cap_T-1),], z[cap_T, ])
}
#--------------------------------------------------------------------------------
# Return forecasts.
#--------------------------------------------------------------------------------
# Trim off original data to return forecasts only.
xf <- xf[(nrow(x)+1):nrow(xf), ]
return(xf)
}
#' Roots of the Characteristic Polynomial
#'
#' \code{GetCharPolyRoots} calculates the roots of the
#' characteristic polynomial and plots them with the unit circle
#' transformed for the fractional model, see Johansen (2008).
#' \code{summary.FCVAR_roots} prints the output of
#' \code{GetCharPolyRoots} to screen.
#'
#' @param coeffs A list of coefficients for the FCVAR model.
#' An element of the list of estimation \code{results} output from \code{FCVARestn}.
#' @param opt An S3 object of class \code{FCVAR_opt} that stores the chosen estimation options,
#' generated from \code{FCVARoptions()}.
#' @param k The number of lags in the system.
#' @param r The cointegrating rank.
#' @param p The number of variables in the system.
#' @return An S3 object of type \code{FCVAR_roots} with the following elements:
#' \describe{
#' \item{\code{cPolyRoots}}{A vector of the roots of the characteristic polynomial.
#' It is an element of the list of estimation \code{results} output from \code{FCVARestn}.}
#' \item{\code{b}}{A numeric value of the fractional cointegration parameter.}
#' }
#' @examples
#' \donttest{
#' opt <- FCVARoptions()
#' opt$gridSearch <- 0 # Disable grid search in optimization.
#' opt$dbMin <- c(0.01, 0.01) # Set lower bound for d,b.
#' opt$dbMax <- c(2.00, 2.00) # Set upper bound for d,b.
#' opt$constrained <- 0 # Impose restriction dbMax >= d >= b >= dbMin ? 1 <- yes, 0 <- no.
#' x <- votingJNP2014[, c("lib", "ir_can", "un_can")]
#' results <- FCVARestn(x, k = 2, r = 1, opt)
#' FCVAR_CharPoly <- GetCharPolyRoots(results$coeffs, opt, k = 2, r = 1, p = 3)
#' }
#' @family FCVAR postestimation functions
#' @seealso \code{FCVARoptions} to set default estimation options.
#' \code{FCVARestn} to estimate the model for which to calculate the roots
#' of the characteristic polynomial.
#' \code{summary.FCVAR_roots} prints the output of
#' \code{GetCharPolyRoots} to screen.
#' @note The roots are calculated from the companion form of the VAR,
#' where the roots are given as the inverse eigenvalues of the
#' coefficient matrix.
#' @references Johansen, S. (2008). "A representation theory for a class of
#' vector autoregressive models for fractional processes,"
#' Econometric Theory 24, 651-676.
#' @export
#'
GetCharPolyRoots <- function(coeffs, opt, k, r, p) {
b <- coeffs$db[2]
# First construct the coefficient matrix for the companion form of the VAR.
PiStar <- diag(p)
if (r > 0) {
PiStar <- PiStar + coeffs$alphaHat %*% t(coeffs$betaHat)
}
if (k > 0) {
Gamma1 <- coeffs$GammaHat[ , 1 : p]
PiStar <- PiStar + Gamma1
if (k > 1) {
for (i in 2:k) {
Gammai <- coeffs$GammaHat[ , seq(((i-1)*p + 1), i*p)]
GammaiMinus1 <- coeffs$GammaHat[ , seq(((i-2)*p + 1), (i-1)*p)]
PiStar <- cbind(PiStar, (Gammai - GammaiMinus1))
}
}
Gammak <- coeffs$GammaHat[ , seq(((k-1)*p + 1), k*p)]
PiStar <- cbind(PiStar, ( - Gammak ))
}
# Pad with an identity for the transition of the lagged variables.
if (k > 0) {
PiStar <- rbind(PiStar,
cbind(diag(p*k),
matrix(0, nrow = p*k, ncol = p )))
}
# The roots are then the inverse eigenvalues of the matrix PiStar.
cPolyRoots <- 1 / eigen(PiStar)$values
cPolyRoots <- cPolyRoots[order(-Mod(cPolyRoots))]
# Append the fractional integration order and set the class of output.
FCVAR_CharPoly <- list(cPolyRoots = cPolyRoots,
b = b)
class(FCVAR_CharPoly) <- 'FCVAR_roots'
# Generate graph depending on the indicator plotRoots.
if (opt$plotRoots) {
# plot.GetCharPolyRoots(cPolyRoots, b, file = NULL, file_ext = NULL)
graphics::plot(x = FCVAR_CharPoly)
}
return(FCVAR_CharPoly)
}
#' Print Summary of Roots of the Characteristic Polynomial
#'
#' \code{summary.FCVAR_roots} prints the output of
#' \code{GetCharPolyRoots} to screen.
#' \code{GetCharPolyRoots} calculates the roots of the
#' characteristic polynomial to plot them with the unit circle
#' transformed for the fractional model, see Johansen (2008).
#'
#' @param object An S3 object of type \code{FCVAR_roots} with the following elements:
#' \describe{
#' \item{\code{cPolyRoots}}{A vector of the roots of the characteristic polynomial.
#' It is an element of the list of estimation \code{results} output from \code{FCVARestn}.}
#' \item{\code{b}}{A numeric value of the fractional cointegration parameter.}
#' }
#' @param ... additional arguments affecting the summary produced.
#' @return NULL
#' @examples
#' \donttest{
#' opt <- FCVARoptions()
#' opt$gridSearch <- 0 # Disable grid search in optimization.
#' opt$dbMin <- c(0.01, 0.01) # Set lower bound for d,b.
#' opt$dbMax <- c(2.00, 2.00) # Set upper bound for d,b.
#' opt$constrained <- 0 # Impose restriction dbMax >= d >= b >= dbMin ? 1 <- yes, 0 <- no.
#' x <- votingJNP2014[, c("lib", "ir_can", "un_can")]
#' results <- FCVARestn(x, k = 2, r = 1, opt)
#' FCVAR_CharPoly <- GetCharPolyRoots(results$coeffs, opt, k = 2, r = 1, p = 3)
#' summary(object = FCVAR_CharPoly)
#' graphics::plot(x = FCVAR_CharPoly)
#' }
#' @family FCVAR postestimation functions
#' @seealso \code{FCVARoptions} to set default estimation options.
#' \code{FCVARestn} to estimate the model for which to calculate the roots
#' of the characteristic polynomial.
#' \code{summary.FCVAR_roots} prints the output of
#' \code{GetCharPolyRoots} to screen.
#' @note The roots are calculated from the companion form of the VAR,
#' where the roots are given as the inverse eigenvalues of the
#' coefficient matrix.
#' @references Johansen, S. (2008). "A representation theory for a class of
#' vector autoregressive models for fractional processes,"
#' Econometric Theory 24, 651-676.
#' @export
#'
summary.FCVAR_roots <- function(object, ...) {
cat(sprintf('--------------------------------------------------------------------------------\n'))
cat(sprintf( ' Roots of the characteristic polynomial \n'))
cat(sprintf('--------------------------------------------------------------------------------\n'))
cat(sprintf( ' Number Real part Imaginary part Modulus \n'))
cat(sprintf('--------------------------------------------------------------------------------\n'))
for (j in 1:length(object$cPolyRoots)) {
# Split the roots into real and imaginary parts and calculate modulus.
real_root <- Re(object$cPolyRoots[j])
# Allow for a tolerance for the imaginary root to be numerically zero.
# Otherwise, stray minus signs creep in across platforms (especially 32-bit i386).
imag_root <- Im(object$cPolyRoots[j])
if (abs(imag_root) < 10^(-6)) {
imag_root <- 0
}
mod_root <- Mod(object$cPolyRoots[j])
cat(sprintf( ' %2.0f %8.3f %8.3f %8.3f \n',
# j, Re(object$cPolyRoots[j]), Im(object$cPolyRoots[j]), Mod(object$cPolyRoots[j]),
j, real_root, imag_root, mod_root ))
}
cat(sprintf('--------------------------------------------------------------------------------\n'))
}
#' Plot Roots of the Characteristic Polynomial
#'
#' \code{plot.FCVAR_roots} plots the output of
#' \code{GetCharPolyRoots} to screen or to a file.
#' \code{GetCharPolyRoots} calculates the roots of the
#' characteristic polynomial and plots them with the unit circle
#' transformed for the fractional model, see Johansen (2008).
#'
#' @param x An S3 object of type \code{FCVAR_roots} with the following elements:
#' #' \describe{
#' \item{\code{cPolyRoots}}{A vector of the roots of the characteristic polynomial.
#' It is an element of the list of estimation \code{results} output from \code{FCVARestn}.}
#' \item{\code{b}}{A numeric value of the fractional cointegration parameter.}
#' }
#' @param y An argument for generic method \code{plot} that is not used in \code{plot.FCVAR_roots}.
#' @param ... Arguments to be passed to methods, such as graphical parameters
#' for the generic plot function.
#' @return NULL
#' @examples
#' \donttest{
#' opt <- FCVARoptions()
#' opt$gridSearch <- 0 # Disable grid search in optimization.
#' opt$dbMin <- c(0.01, 0.01) # Set lower bound for d,b.
#' opt$dbMax <- c(2.00, 2.00) # Set upper bound for d,b.
#' opt$constrained <- 0 # Impose restriction dbMax >= d >= b >= dbMin ? 1 <- yes, 0 <- no.
#' x <- votingJNP2014[, c("lib", "ir_can", "un_can")]
#' results <- FCVARestn(x, k = 2, r = 1, opt)
#' FCVAR_CharPoly <- GetCharPolyRoots(results$coeffs, opt, k = 2, r = 1, p = 3)
#' summary(object = FCVAR_CharPoly)
#' graphics::plot(x = FCVAR_CharPoly)}
#' @family FCVAR postestimation functions
#' @seealso \code{FCVARoptions} to set default estimation options.
#' \code{FCVARestn} to estimate the model for which to calculate the roots
#' of the characteristic polynomial.
#' \code{summary.FCVAR_roots} prints the output of
#' \code{GetCharPolyRoots} to screen.
#' @note The roots are calculated from the companion form of the VAR,
#' where the roots are given as the inverse eigenvalues of the
#' coefficient matrix.
#' @references Johansen, S. (2008). "A representation theory for a class of
#' vector autoregressive models for fractional processes,"
#' Econometric Theory 24, 651-676.
#' @export
#'
plot.FCVAR_roots <- function(x, y = NULL, ...) {
# Extract parameters from FCVAR_roots object.
cPolyRoots <- x$cPolyRoots
b <- x$b
# Additional parameters.
dots <- list(...)
# Now calculate the line for the transformed unit circle.
# First do the negative half.
unitCircle <- seq( pi, 0, by = - 0.001)
psi <- - (pi - unitCircle)/2
unitCircleX <- cos( - unitCircle)
unitCircleY <- sin( - unitCircle)
transformedUnitCircleX <- (1 - (2*cos(psi))^b*cos(b*psi))
transformedUnitCircleY <- ( (2*cos(psi))^b*sin(b*psi))
# Then do the positive half.
unitCircle <- seq(0, pi, by = 0.001)
psi <- (pi - unitCircle)/2
unitCircleX <- c(unitCircleX, cos(unitCircle))
unitCircleY <- c(unitCircleY, sin(unitCircle))
transformedUnitCircleX <- c(transformedUnitCircleX, 1,
(1 - (2*cos(psi))^b*cos(b*psi)))
transformedUnitCircleY <- c(transformedUnitCircleY, 0,
( (2*cos(psi))^b*sin(b*psi)))
# Plot the unit circle and its image under the mapping
# along with the roots of the characterisitc polynomial.
# Determine axes based on largest roots, if not specified.
if ('xlim' %in% names(dots) & 'ylim' %in% names(dots)) {
xlim <- dots$xlim
ylim <- dots$ylim
} else {
# Calculate parameters for axes.
maxXYaxis <- max( c(transformedUnitCircleX, unitCircleX,
transformedUnitCircleY, unitCircleY) )
minXYaxis <- min( c(transformedUnitCircleX, unitCircleX,
transformedUnitCircleY, unitCircleY) )
maxXYaxis <- max( maxXYaxis, -minXYaxis )
# Replace any unspecified axis limits.
if(!('xlim' %in% names(dots))) {
xlim <- 2*c(-maxXYaxis, maxXYaxis)
}
if(!('ylim' %in% names(dots))) {
ylim <- 2*c(-maxXYaxis, maxXYaxis)
}
}
if ('main' %in% names(dots)) {
main <- dots$main
} else {
main <- c('Roots of the characteristic polynomial',
'with the image of the unit circle')
}
graphics::plot(transformedUnitCircleX,
transformedUnitCircleY,
main = main,
xlab = 'Real Part of Root',
ylab = 'Imaginary Part of Root',
xlim = xlim,
ylim = ylim,
type = 'l',
lwd = 3,
col = 'red')
graphics::lines(unitCircleX, unitCircleY, lwd = 3, col = 'black')
graphics::points(Re(cPolyRoots), Im(cPolyRoots),
pch = 16, col = 'blue')
}
#' Multivariate White Noise Tests
#'
#' \code{MVWNtest} performs multivariate tests for white noise.
#' It performs both the Ljung-Box Q-test and the LM-test on individual series
#' for a sequence of lag lengths.
#' \code{summary.MVWN_stats} prints a summary of these statistics to screen.
#'
#' @param x A matrix of variables to be included in the system,
#' typically model residuals.
#' @param maxlag The number of lags for serial correlation tests.
#' @param printResults An indicator to print results to screen.
#' @return An S3 object of type \code{MVWN_stats} containing the test results,
#' including the following parameters:
#' \describe{
#' \item{\code{Q}}{A 1xp vector of Q statistics for individual series.}
#' \item{\code{pvQ}}{A 1xp vector of P-values for Q-test on individual series.}
#' \item{\code{LM}}{A 1xp vector of LM statistics for individual series.}
#' \item{\code{pvLM}}{A 1xp vector of P-values for LM-test on individual series.}
#' \item{\code{mvQ}}{A multivariate Q statistic.}
#' \item{\code{pvMVQ}}{A p-value for multivariate Q-statistic using \code{p^2*maxlag}
#' degrees of freedom.}
#' \item{\code{maxlag}}{The number of lags for serial correlation tests.}
#' \item{\code{p}}{The number of variables in the system.}
#' }
#' @examples
#' \donttest{
#' opt <- FCVARoptions()
#' opt$gridSearch <- 0 # Disable grid search in optimization.
#' opt$dbMin <- c(0.01, 0.01) # Set lower bound for d,b.
#' opt$dbMax <- c(2.00, 2.00) # Set upper bound for d,b.
#' opt$constrained <- 0 # Impose restriction dbMax >= d >= b >= dbMin ? 1 <- yes, 0 <- no.
#' x <- votingJNP2014[, c("lib", "ir_can", "un_can")]
#' results <- FCVARestn(x, k = 2, r = 1, opt)
#' MVWNtest_stats <- MVWNtest(x = results$Residuals, maxlag = 12, printResults = 1)
#' }
#' set.seed(27)
#' WN <- stats::rnorm(100)
#' RW <- cumsum(stats::rnorm(100))
#' MVWN_x <- as.matrix(data.frame(WN = WN, RW = RW))
#' MVWNtest_stats <- MVWNtest(x = MVWN_x, maxlag = 10, printResults = 1)
#' @family FCVAR postestimation functions
#' @seealso \code{FCVARoptions} to set default estimation options.
#' \code{FCVARestn} produces the residuals intended for this test.
#' \code{LagSelect} uses this test as part of the lag order selection process.
#' \code{summary.MVWN_stats} prints a summary of the \code{MVWN_stats} statistics to screen.
#' @note
#' The LM test is consistent for heteroskedastic series; the Q-test is not.
#' @export
#'
MVWNtest <- function(x, maxlag, printResults) {
cap_T <- nrow(x)
p <- ncol(x)
# Create bins for values
pvQ <- matrix(1, nrow = 1, ncol = p)
pvLM <- matrix(1, nrow = 1, ncol = p)
Q <- matrix(0, nrow = 1, ncol = p)
LM <- matrix(0, nrow = 1, ncol = p)
# Perform univariate Q and LM tests and store the results.
for (i in 1:p) {
Qtest_out <- Qtest(x[,i, drop = FALSE], maxlag)
Q[i] <- Qtest_out$Qstat
pvQ[i] <- Qtest_out$pv
LMtest_out <- LMtest(x[,i, drop = FALSE], maxlag)
LM[i] <- LMtest_out$LMstat
pvLM[i] <- LMtest_out$pv
}
# Perform multivariate Q test.
Qtest_out <- Qtest(x[ , , drop = FALSE], maxlag)
mvQ <- Qtest_out$Qstat
pvMVQ <- Qtest_out$pv
# Output a MVWN_stats object of results.
MVWNtest_stats <- list(
Q = Q,
pvQ = pvQ,
LM = LM,
pvLM = pvLM,
mvQ = mvQ,
pvMVQ = pvMVQ,
maxlag = maxlag,
p = p
)
class(MVWNtest_stats) <- 'MVWN_stats'
# Print output
if (printResults) {
summary(MVWNtest_stats)
}
return(MVWNtest_stats)
}
#' Summarize Statistics for Multivariate White Noise Tests
#'
#' \code{summary.MVWN_stats} is an S3 method for objects of class \code{MVWN_stats}
#' that prints a summary of the statistics from \code{MVWNtest} to screen.
#' \code{MVWNtest} performs multivariate tests for white noise.
#' It performs both the Ljung-Box Q-test and the LM-test on individual series
#' for a sequence of lag lengths.
#'
#' @param object An S3 object of type \code{MVWN_stats} containing the results
#' from multivariate tests for white noise.
#' It is the output of \code{MVWNtest}.
#' @param ... additional arguments affecting the summary produced.
#' @return NULL
#' @examples
#' \donttest{
#' opt <- FCVARoptions()
#' opt$gridSearch <- 0 # Disable grid search in optimization.
#' opt$dbMin <- c(0.01, 0.01) # Set lower bound for d,b.
#' opt$dbMax <- c(2.00, 2.00) # Set upper bound for d,b.
#' opt$constrained <- 0 # Impose restriction dbMax >= d >= b >= dbMin ? 1 <- yes, 0 <- no.
#' x <- votingJNP2014[, c("lib", "ir_can", "un_can")]
#' results <- FCVARestn(x, k = 2, r = 1, opt)
#' MVWNtest_stats <- MVWNtest(x = results$Residuals, maxlag = 12, printResults = 1)
#' summary(object = MVWNtest_stats)
#' }
#'
#' \donttest{
#' set.seed(27)
#' WN <- stats::rnorm(100)
#' RW <- cumsum(stats::rnorm(100))
#' MVWN_x <- as.matrix(data.frame(WN = WN, RW = RW))
#' MVWNtest_stats <- MVWNtest(x = MVWN_x, maxlag = 10, printResults = 1)
#' summary(object = MVWNtest_stats)
#' }
#' @family FCVAR postestimation functions
#' @seealso \code{FCVARoptions} to set default estimation options.
#' \code{FCVARestn} produces the residuals intended for this test.
#' \code{LagSelect} uses this test as part of the lag order selection process.
#' \code{summary.MVWN_stats} is an S3 method for class \code{MVWN_stats} that
#' prints a summary of the output of \code{MVWNtest} to screen.
#' @note
#' The LM test is consistent for heteroskedastic series, the Q-test is not.
#' @export
#'
summary.MVWN_stats <- function(object, ...) {
cat(sprintf('\n White Noise Test Results (lag = %g)\n', object$maxlag))
cat(sprintf('---------------------------------------------\n'))
cat(sprintf('Variable | Q P-val | LM P-val |\n'))
cat(sprintf('---------------------------------------------\n'))
cat(sprintf('Multivar | %7.3f %4.3f | ---- ---- |\n', object$mvQ, object$pvMVQ))
for (i in 1:object$p) {
cat(sprintf('Var%g | %7.3f %4.3f | %7.3f %4.3f |\n',
i, object$Q[i], object$pvQ[i], object$LM[i], object$pvLM[i] ))
}
cat(sprintf('---------------------------------------------\n'))
}
# Breusch-Godfrey Lagrange Multiplier Test for Serial Correlation
#
# \code{LMtest} performs a Breusch-Godfrey Lagrange Multiplier test
# for serial correlation.
# Note that Roxygen comments are excluded to keep this function internal.
# However, the contents of Roxygen comments are shown below for those who read the scripts.
#
# @param x A vector or Tx1 matrix of variables to be tested,
# typically model residuals.
# @param q The number of lags for the serial correlation tests.
# @return A list object \code{LMtest_out} containing the test results,
# including the following parameters:
# \describe{
# \item{\code{LM}}{The LM statistic for individual series.}
# \item{\code{pv}}{The p-value for LM-test on individual series.}
# }
# @examples
# opt <- FCVARoptions()
# opt$gridSearch <- 0 # Disable grid search in optimization.
# opt$dbMin <- c(0.01, 0.01) # Set lower bound for d,b.
# opt$dbMax <- c(2.00, 2.00) # Set upper bound for d,b.
# opt$constrained <- 0 # Impose restriction dbMax >= d >= b >= dbMin ? 1 <- yes, 0 <- no.
# x <- votingJNP2014[, c("lib", "ir_can", "un_can")]
# results <- FCVARestn(x, k = 2, r = 1, opt)
# MVWNtest_stats <- MVWNtest(x = results$Residuals, maxlag = 12, printResults = 1)
# LMtest(x = matrix(results$Residuals[, 1]), q = 12)
# LMtest(x = results$Residuals[,2, drop = FALSE], q = 12)
#
# set.seed(27)
# WN <- stats::rnorm(100)
# RW <- cumsum(stats::rnorm(100))
# LMtest(x = matrix(WN), q = 10)
# LMtest(x = matrix(RW), q = 10)
# MVWN_x <- as.matrix(data.frame(WN = WN, RW = RW))
# MVWNtest_stats <- MVWNtest(x = MVWN_x, maxlag = 10, printResults = 1)
# @family FCVAR postestimation functions
# @seealso \code{MVWNtest} calls this function to test residuals
# from the estimation results of \code{FCVARestn}.
# An alternative test is the Ljung-Box Q-test in \code{Qtest}.
# @note
# The LM test is consistent for heteroskedastic series.
# @export
#
LMtest <- function(x, q) {
# Breusch-Godfrey Lagrange Multiplier test for serial correlation.
cap_T <- nrow(x)
x <- x - mean(x)
y <- x[seq(q + 1, cap_T), , drop = FALSE]
z <- x[seq(1, cap_T - q), , drop = FALSE]
for (i in 1:(q - 1)) {
z <- cbind(x[seq(i + 1, cap_T - q + i), , drop = FALSE], z)
}
e <- y
s <- z[,1:q, drop = FALSE] * kronecker(matrix(1, 1, q), e)
sbar <- t(colMeans(s))
kron_sbar <- kronecker(matrix(1, nrow(s)), sbar)
s <- s - kron_sbar
S <- t(s) %*% s/cap_T
LMstat <- cap_T*sbar %*% solve(S) %*% t(sbar)
pv <- 1 - stats::pchisq(LMstat, q)
# Output a list of results.
LMtest_out <- list(
LMstat = LMstat,
pv = pv
)
return(LMtest_out)
}
# Ljung-Box Q-test for Serial Correlation
#
# \code{Qtest} performs a (multivariate) Ljung-Box Q-test for serial correlation; see
# Luetkepohl (2005, New Introduction to Multiple Time Series Analysis, p. 169).
#
# @param x A vector or Tx1 matrix of variables to be tested,
# typically model residuals.
# @param maxlag The number of lags for the serial correlation tests.
# @return A list object \code{LMtest_out} containing the test results,
# including the following parameters:
# \describe{
# \item{\code{Qstat}}{A 1xp vector of Q statistics for individual series.}
# \item{\code{pv}}{A 1xp vector of P-values for Q-test on individual series.}
# }
# @examples
# opt <- FCVARoptions()
# opt$gridSearch <- 0 # Disable grid search in optimization.
# opt$dbMin <- c(0.01, 0.01) # Set lower bound for d,b.
# opt$dbMax <- c(2.00, 2.00) # Set upper bound for d,b.
# opt$constrained <- 0 # Impose restriction dbMax >= d >= b >= dbMin ? 1 <- yes, 0 <- no.
# x <- votingJNP2014[, c("lib", "ir_can", "un_can")]
# results <- FCVARestn(x, k = 2, r = 1, opt)
# MVWNtest_stats <- MVWNtest(x = results$Residuals, maxlag = 12, printResults = 1)
# Qtest(x = results$Residuals, maxlag = 12)
# Qtest(x = matrix(results$Residuals[, 1]), maxlag = 12)
# Qtest(x = results$Residuals[,2, drop = FALSE], maxlag = 12)
#
# set.seed(27)
# WN <- stats::rnorm(100)
# RW <- cumsum(stats::rnorm(100))
# MVWN_x <- as.matrix(data.frame(WN = WN, RW = RW))
# Qtest(x = MVWN_x, maxlag = 10)
# Qtest(x = matrix(WN), maxlag = 10)
# Qtest(x = matrix(RW), maxlag = 10)
# @family FCVAR postestimation functions
# @seealso \code{MVWNtest} calls this function to test residuals
# from the estimation results of \code{FCVARestn}.
# An alternative test is the Breusch-Godfrey Lagrange Multiplier Test in \code{LMtest}.
# @note
# The LM test in \code{LMtest} is consistent for heteroskedastic series,
# while the Q-test is not.
# @references H. Luetkepohl (2005) "New Introduction to Multiple Time Series Analysis," Springer, Berlin.
# @export
#
Qtest <- function(x, maxlag) {
cap_T <- nrow(x)
p <- ncol(x)
C0 <- matrix(0, nrow = p, ncol = p)
for (t in 1:cap_T) {
C0 <- C0 + t(x[t, , drop = FALSE]) %*% x[t, , drop = FALSE]
}
C0 <- C0/cap_T
C <- array(rep(0, p*p*maxlag), dim = c(p,p,maxlag))
for (i in 1:maxlag) {
for (t in (i + 1):cap_T) {
C[ , ,i] <- C[ , ,i] + t(x[t, , drop = FALSE]) %*% x[t-i, , drop = FALSE]
}
C[ , ,i] <- C[ , ,i]/(cap_T - i) # Note division by (T-i) instead of T.
}
# (Multivariate) Q statistic
Qstat <- 0
for (j in 1:maxlag) {
Qstat <- Qstat + sum(diag( (t(C[ , ,j]) %*% solve(C0)) %*% (C[ , ,j] %*% solve(C0)) )) / (cap_T - j)
}
Qstat <- Qstat*cap_T*(cap_T + 2)
pv <- 1 - stats::pchisq(Qstat, p*p*maxlag) # P-value is calculated with p^2*maxlag df.
# Output a list of results.
Qtest_out <- list(
Qstat = Qstat,
pv = pv
)
return(Qtest_out)
}
|
library(glmnet)
mydata = read.table("./TrainingSet/RF/NSCLC.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.55,family="gaussian",standardize=FALSE)
sink('./Model/EN/Classifier/NSCLC/NSCLC_064.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
/Model/EN/Classifier/NSCLC/NSCLC_064.R
|
no_license
|
leon1003/QSMART
|
R
| false
| false
| 348
|
r
|
library(glmnet)
mydata = read.table("./TrainingSet/RF/NSCLC.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.55,family="gaussian",standardize=FALSE)
sink('./Model/EN/Classifier/NSCLC/NSCLC_064.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
library(tidyverse)
library(lubridate)
library(rstan)
library(shinystan)
#df2 = read_csv('~/gp/discourse_westbrook/westbrook.csv')
df0 = read_csv('~/gp/shots.csv', col_types = cols(.default = "?", GAME_CLOCK = "c"))
df = df0 %>% select(time = GAME_CLOCK, period = PERIOD, result = SHOT_RESULT, name = NAME, pts_type = PTS_TYPE) %>%
filter(name == "Russell Westbrook" & period < 5) %>%
mutate(resultBin = as.numeric(result == "made")) %>%
mutate(time = ms(time)) %>%
mutate(remaining_minutes = (4 - period) * 12 + as.numeric(as.duration(time) / 60.0))
df2 = df %>% mutate(gtime = 48 - remaining_minutes,
x = -(remaining_minutes - 24) / 48.0,
y = resultBin) %>%
select(gtime, period, x, y, result)
sdata = list(N = dim(df2)[[1]],
M = 10,
scale = 0.25,
x = df2$x,
y = df2$y)
fit = stan('~/gp/models/westbrook.stan', data = sdata, chains = 4, cores = 4, iter = 2000)
launch_shinystan(fit)
fit_exact = stan('~/gp/models/westbrook_exact.stan', data = sdata, chains = 4, cores = 4, iter = 2000)
sdata = list(N = dim(df2)[[1]],
T = 4,
x = df2$period,
y = df2$y)
fit2 = stan('~/gp/models/westbrook_grouped.stan', data = sdata, chains = 4, cores = 4)
get_lines = function(fit, vnames, n = 100) {
a = extract(fit, vnames)
idxs = sample(nrow(a[[vnames[[1]]]]), n)
out = as_tibble()
for(i in 1:length(vnames)) {
vname = vnames[[i]];
d = a[[vname]][idxs,]
colnames(d) <- 1:ncol(a[[vnames[[1]]]])
d = as_tibble(d) %>% gather(idx, data)
d$name = vname
out = bind_rows(out, d)
}
out %>% mutate(idx = as.numeric(idx))
}
a = get_lines(fit, c("f"), 50) %>% rename(f = data) %>% select(idx, f)
b = as_tibble(list(idx = 1:nrow(df2), time = df2$gtime))
out2 = inner_join(a, b, by = "idx")
a = get_lines(fit, c("f"), 1000) %>% rename(f = data) %>% select(idx, f)
b = as_tibble(list(idx = 1:nrow(df2), time = df2$gtime))
out = inner_join(a, b, by = "idx")
summary = out %>% group_by(time) %>%
summarize(mean = mean(f),
m = quantile(f, 0.025),
p = quantile(f, 0.975),
sd = sd(f),
esd = sqrt(mean * (1 - mean))) %>%
ungroup()
out3 = extract(fit2, "f")$f
colnames(out3) = 1:4
summary2 = left_join(as_tibble(out3) %>% gather(period, f),
as_tibble(list(period = c("1", "2", "3", "4"), time = c(6.0, 18.0, 30.0, 42.0))),
by = "period") %>%
group_by(time) %>%
summarize(mean = mean(f),
m = quantile(f, 0.025),
p = quantile(f, 0.975)) %>%
ungroup()
summary %>% ggplot(aes(time, mean)) +
geom_ribbon(aes(ymin = m, ymax = p), alpha = 0.25, fill = "blue") +
geom_line() +
geom_line(aes(time, p), col = "blue", alpha = 0.5) +
geom_line(aes(time, m), col = "blue", alpha = 0.5) +
geom_point(data = out2, aes(time, f), size = 0.1, alpha = 0.01) +
#geom_boxplot(data = summary2, aes(time, f, group = time)) +
geom_errorbar(data = summary2, aes(time, ymin = m, ymax = p)) +
geom_point(data = summary2, aes(time, mean)) +
xlab("Game time") +
ylab("Shooting percentage") +
ggtitle("Russell Westbrook's shooting percentage (w/ est. 95% conf. intervals)")
summary %>% arrange(time) %>% slice(c(1, n()))
|
/westbrook.R
|
no_license
|
bbbales2/gp
|
R
| false
| false
| 3,333
|
r
|
library(tidyverse)
library(lubridate)
library(rstan)
library(shinystan)
#df2 = read_csv('~/gp/discourse_westbrook/westbrook.csv')
df0 = read_csv('~/gp/shots.csv', col_types = cols(.default = "?", GAME_CLOCK = "c"))
df = df0 %>% select(time = GAME_CLOCK, period = PERIOD, result = SHOT_RESULT, name = NAME, pts_type = PTS_TYPE) %>%
filter(name == "Russell Westbrook" & period < 5) %>%
mutate(resultBin = as.numeric(result == "made")) %>%
mutate(time = ms(time)) %>%
mutate(remaining_minutes = (4 - period) * 12 + as.numeric(as.duration(time) / 60.0))
df2 = df %>% mutate(gtime = 48 - remaining_minutes,
x = -(remaining_minutes - 24) / 48.0,
y = resultBin) %>%
select(gtime, period, x, y, result)
sdata = list(N = dim(df2)[[1]],
M = 10,
scale = 0.25,
x = df2$x,
y = df2$y)
fit = stan('~/gp/models/westbrook.stan', data = sdata, chains = 4, cores = 4, iter = 2000)
launch_shinystan(fit)
fit_exact = stan('~/gp/models/westbrook_exact.stan', data = sdata, chains = 4, cores = 4, iter = 2000)
sdata = list(N = dim(df2)[[1]],
T = 4,
x = df2$period,
y = df2$y)
fit2 = stan('~/gp/models/westbrook_grouped.stan', data = sdata, chains = 4, cores = 4)
get_lines = function(fit, vnames, n = 100) {
a = extract(fit, vnames)
idxs = sample(nrow(a[[vnames[[1]]]]), n)
out = as_tibble()
for(i in 1:length(vnames)) {
vname = vnames[[i]];
d = a[[vname]][idxs,]
colnames(d) <- 1:ncol(a[[vnames[[1]]]])
d = as_tibble(d) %>% gather(idx, data)
d$name = vname
out = bind_rows(out, d)
}
out %>% mutate(idx = as.numeric(idx))
}
a = get_lines(fit, c("f"), 50) %>% rename(f = data) %>% select(idx, f)
b = as_tibble(list(idx = 1:nrow(df2), time = df2$gtime))
out2 = inner_join(a, b, by = "idx")
a = get_lines(fit, c("f"), 1000) %>% rename(f = data) %>% select(idx, f)
b = as_tibble(list(idx = 1:nrow(df2), time = df2$gtime))
out = inner_join(a, b, by = "idx")
summary = out %>% group_by(time) %>%
summarize(mean = mean(f),
m = quantile(f, 0.025),
p = quantile(f, 0.975),
sd = sd(f),
esd = sqrt(mean * (1 - mean))) %>%
ungroup()
out3 = extract(fit2, "f")$f
colnames(out3) = 1:4
summary2 = left_join(as_tibble(out3) %>% gather(period, f),
as_tibble(list(period = c("1", "2", "3", "4"), time = c(6.0, 18.0, 30.0, 42.0))),
by = "period") %>%
group_by(time) %>%
summarize(mean = mean(f),
m = quantile(f, 0.025),
p = quantile(f, 0.975)) %>%
ungroup()
summary %>% ggplot(aes(time, mean)) +
geom_ribbon(aes(ymin = m, ymax = p), alpha = 0.25, fill = "blue") +
geom_line() +
geom_line(aes(time, p), col = "blue", alpha = 0.5) +
geom_line(aes(time, m), col = "blue", alpha = 0.5) +
geom_point(data = out2, aes(time, f), size = 0.1, alpha = 0.01) +
#geom_boxplot(data = summary2, aes(time, f, group = time)) +
geom_errorbar(data = summary2, aes(time, ymin = m, ymax = p)) +
geom_point(data = summary2, aes(time, mean)) +
xlab("Game time") +
ylab("Shooting percentage") +
ggtitle("Russell Westbrook's shooting percentage (w/ est. 95% conf. intervals)")
summary %>% arrange(time) %>% slice(c(1, n()))
|
# argsto_cmd --- print arguments up to a delimiter
subroutine argsto_cmd
integer i, count, back_no, nd, st
integer getarg, ctoi
pointer ptr
pointer access_arg
character delim (MAXARG), arg (10)
character lscmpk
if (getarg (1, delim, MAXARG) == EOF)
call error ("Usage: argsto <delim> [<count> [<start> [<levels>]]]"p)
if (getarg (2, arg, 10) == EOF)
count = 0
else {
i = 1
count = ctoi (arg, i)
}
if (getarg (3, arg, 10) == EOF)
st = 1
else {
i = 1
st = ctoi (arg, i)
}
if (getarg (4, arg, 10) == EOF)
back_no = 1
else {
i = 1
back_no = ctoi (arg, i)
}
nd = 0
ptr = access_arg (back_no, st)
while (ptr ~= EOF && nd <= count) {
if (lscmpk (ptr, delim) == '='c)
nd += 1
else if (nd == count) {
call lsputf (ptr, STDOUT)
call putch (NEWLINE, STDOUT)
}
st += 1
ptr = access_arg (back_no, st)
}
stop
end
|
/swt/src/lib/sh/src/intcmd.u/argsto_cmd.r
|
no_license
|
arnoldrobbins/gt-swt
|
R
| false
| false
| 1,008
|
r
|
# argsto_cmd --- print arguments up to a delimiter
subroutine argsto_cmd
integer i, count, back_no, nd, st
integer getarg, ctoi
pointer ptr
pointer access_arg
character delim (MAXARG), arg (10)
character lscmpk
if (getarg (1, delim, MAXARG) == EOF)
call error ("Usage: argsto <delim> [<count> [<start> [<levels>]]]"p)
if (getarg (2, arg, 10) == EOF)
count = 0
else {
i = 1
count = ctoi (arg, i)
}
if (getarg (3, arg, 10) == EOF)
st = 1
else {
i = 1
st = ctoi (arg, i)
}
if (getarg (4, arg, 10) == EOF)
back_no = 1
else {
i = 1
back_no = ctoi (arg, i)
}
nd = 0
ptr = access_arg (back_no, st)
while (ptr ~= EOF && nd <= count) {
if (lscmpk (ptr, delim) == '='c)
nd += 1
else if (nd == count) {
call lsputf (ptr, STDOUT)
call putch (NEWLINE, STDOUT)
}
st += 1
ptr = access_arg (back_no, st)
}
stop
end
|
## Import the necessary libraries
library(ggplot2)
library(RColorBrewer)
library(plyr)
library(dplyr)
## Read the datasets
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
## Plotting the graph
vehicles1 <- unique(grep("Vehicles", SCC$EI.Sector, ignore.case = TRUE, value = TRUE))
vehicles <- SCC[SCC$EI.Sector %in% vehicles1, ]["SCC"]
vehiclesBaltimore <- NEI[NEI$SCC %in% vehicles$SCC & NEI$fips == "24510",]
vehiclesBaltimoreYearly <- ddply(vehiclesBaltimore, .(year), function(x) sum(x$Emissions))
colnames(vehiclesBaltimoreYearly)[2] <- "emissions"
png(file="J:/datasciencecoursera/Week4_Course4/Plot5.png")
qplot(year, emissions, data = vehiclesBaltimoreYearly, geom = "line", color = emissions, size = 1) + ggtitle("PM2.5 Emissions by Motor Vehicles in Baltimore City") + xlab("Year") + ylab("PM2.5 Emissions in Tons")
dev.off()
|
/Week4_Course4/Plot5.R
|
no_license
|
khushaliverma27/datasciencecoursera
|
R
| false
| false
| 876
|
r
|
## Import the necessary libraries
library(ggplot2)
library(RColorBrewer)
library(plyr)
library(dplyr)
## Read the datasets
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
## Plotting the graph
vehicles1 <- unique(grep("Vehicles", SCC$EI.Sector, ignore.case = TRUE, value = TRUE))
vehicles <- SCC[SCC$EI.Sector %in% vehicles1, ]["SCC"]
vehiclesBaltimore <- NEI[NEI$SCC %in% vehicles$SCC & NEI$fips == "24510",]
vehiclesBaltimoreYearly <- ddply(vehiclesBaltimore, .(year), function(x) sum(x$Emissions))
colnames(vehiclesBaltimoreYearly)[2] <- "emissions"
png(file="J:/datasciencecoursera/Week4_Course4/Plot5.png")
qplot(year, emissions, data = vehiclesBaltimoreYearly, geom = "line", color = emissions, size = 1) + ggtitle("PM2.5 Emissions by Motor Vehicles in Baltimore City") + xlab("Year") + ylab("PM2.5 Emissions in Tons")
dev.off()
|
-##This function is function for written for week 3 assignment, its called makeCacheMatrix gets a matrix as an input, set the value of the matrix,
-#get the value of the matrix, set the inverse Matrix and get the inverse Matrix. The matrix object
-#can cache its own object.
-
-#<<- operator is used to assign a value to an object in an environment that is different
-#from the current environment
-
-#take the matrix as an input
makeCacheMatrix <- function(x = matrix()) {
- invMatrix <- NULL
-
-#set the value of the Matrix
- setMatrix <- function(y) {
- x <<- y
- invMatrix <<- NULL
- }
-
- getMatrix <- function() x #get the value of the Matrix
- setInverse <- function(inverse) invMatrix <<- inverse #set the value of the invertible matrix
- getInverse <- function() invMatrix #get the value of the invertible matrix
- list(setMatrix = setMatrix, getMatrix = getMatrix,
- setInverse = setInverse, getInverse = getInverse)
}
-## The function cacheSolve takes the output of the previous matrix makeCacheMatrix(matrix) as an
-# input and checks inverse matrix from makeCacheMatrix(matrix) has any value in it or not.
-# In case inverse matrix from makeCacheMatrix((matrix) is empty, it gets the original matrix data from
-# and set the invertible matrix by using the solve function.
-# In case inverse matrix from makeCacheMatrix((matrix) has some value in it (always works
-#after running the code 1st time), it returns a message "Getting Cached Invertible Matrix"
-#and the cached object
-
-
+## Write a short comment describing this function
cacheSolve <- function(x, ...) {
-
-#get the value of the invertible matrix from the makeCacheMatrix function
- invMatrix <- x$getInverse()
- if(!is.null(invMatrix)) { #if inverse matrix is not NULL
- message("Getting Cached Invertible Matrix") #Type message: Getting Cached Invertible Matrix
- return(invMatrix) #return the invertible matrix
- }
-
-#if value of the invertible matrix is NULL then
- MatrixData <- x$getMatrix() #get the original Matrix Data
- invMatrix <- solve(MatrixData, ...) #use solve function to inverse the matrix
- x$setInverse(invMatrix) #set the invertible matrix
- return(invMatrix) #return the invertible matrix
+ ## Return a matrix that is the inverse of 'x'
}
|
/CacheMatrix.R
|
no_license
|
Pooyafarah/datasciencecoursera
|
R
| false
| false
| 2,617
|
r
|
-##This function is function for written for week 3 assignment, its called makeCacheMatrix gets a matrix as an input, set the value of the matrix,
-#get the value of the matrix, set the inverse Matrix and get the inverse Matrix. The matrix object
-#can cache its own object.
-
-#<<- operator is used to assign a value to an object in an environment that is different
-#from the current environment
-
-#take the matrix as an input
makeCacheMatrix <- function(x = matrix()) {
- invMatrix <- NULL
-
-#set the value of the Matrix
- setMatrix <- function(y) {
- x <<- y
- invMatrix <<- NULL
- }
-
- getMatrix <- function() x #get the value of the Matrix
- setInverse <- function(inverse) invMatrix <<- inverse #set the value of the invertible matrix
- getInverse <- function() invMatrix #get the value of the invertible matrix
- list(setMatrix = setMatrix, getMatrix = getMatrix,
- setInverse = setInverse, getInverse = getInverse)
}
-## The function cacheSolve takes the output of the previous matrix makeCacheMatrix(matrix) as an
-# input and checks inverse matrix from makeCacheMatrix(matrix) has any value in it or not.
-# In case inverse matrix from makeCacheMatrix((matrix) is empty, it gets the original matrix data from
-# and set the invertible matrix by using the solve function.
-# In case inverse matrix from makeCacheMatrix((matrix) has some value in it (always works
-#after running the code 1st time), it returns a message "Getting Cached Invertible Matrix"
-#and the cached object
-
-
+## Write a short comment describing this function
cacheSolve <- function(x, ...) {
-
-#get the value of the invertible matrix from the makeCacheMatrix function
- invMatrix <- x$getInverse()
- if(!is.null(invMatrix)) { #if inverse matrix is not NULL
- message("Getting Cached Invertible Matrix") #Type message: Getting Cached Invertible Matrix
- return(invMatrix) #return the invertible matrix
- }
-
-#if value of the invertible matrix is NULL then
- MatrixData <- x$getMatrix() #get the original Matrix Data
- invMatrix <- solve(MatrixData, ...) #use solve function to inverse the matrix
- x$setInverse(invMatrix) #set the invertible matrix
- return(invMatrix) #return the invertible matrix
+ ## Return a matrix that is the inverse of 'x'
}
|
library(nloptr)
library(modelsummary)
#4
set.seed(100)
N <-100000
K <-10
sigma <- 0.5
X<-matrix(rnorm(N*K,mean = 0,sd=sigma),N,K)
X[,1] <- 1
eps <- rnorm(N,mean=0,sd=sigma)
beta <- matrix(c(1.5 ,-1 ,-0.25 ,0.75 ,3.5 ,-2 ,0.5 ,1 ,1.25,2),K,1)
Y <- X%*%beta+eps
#5
beta_OLS <- solve(t(X)%*%X)%*%(t(X)%*%Y)
beta_OLS
#6
objfun <- function(beta_h,y,x) {
return (sum((y-x%*%beta_h)^2))
}
gradient <- function(beta_g,y,x) {
return ( as.vector(-2*t(x)%*%(y-X%*%beta_g)) )
}
# initial values
beta0 <- matrix(rnorm(K,mean = 0,sd=sigma),K,1) #start at uniform random numbers equal to number of coefficients
gradientDesc <- function(y, x, beta_i,object,grad,learn_rate, tol, max_iter) {
n=dim(y)[1]
m <- beta_i
c <- matrix(rnorm(n,mean = 0,sd=sigma),n,1)
yhat <- x%*%m+c
MSE <- object(m,y,x)
converged = F
iterations = 0
while(converged == F) {
## Implement the gradient descent algorithm
m_new <- m - learn_rate * grad(m,y,x)
m <- m_new
c <- y-X%*%m
yhat <- X%*%m+c
MSE_new <- object(m,y,x)
if(MSE - MSE_new <= tol) {
converged = T
return(m)
}
iterations = iterations + 1
if(iterations > max_iter) {
converged = T
return(m)
}
}
}
# Run the function
beta_grad=gradientDesc(Y, X, beta0,objfun,gradient,0.0000003, 0.0000000001, 1000)
#7
options <- list("algorithm"="NLOPT_LD_LBFGS","xtol_rel"=1.0e-6,"maxeval"=1e3)
beta_BFGS <- nloptr( x0=beta0,eval_f=objfun,eval_grad_f=gradient,opts=options,y=Y,x=X)
beta_BFGS$solution
options <- list("algorithm"="NLOPT_LN_NELDERMEAD","xtol_rel"=1.0e-8)
beta_NM <- nloptr( x0=beta0,eval_f=objfun,opts=options,y=Y,x=X)
beta_NM$solution
#8
gradient_MLE <- function (theta ,y,x) {
grad <- as.vector( rep (0, length (theta )))
beta <- theta [1:( length ( theta) -1)]
sig <- theta [ length (theta )]
grad [1:( length ( theta) -1)] <- -t(X)%*%(Y - X%*%beta )/(sig ^2)
grad[ length (theta )] <- dim (X)[1] /sig - crossprod (Y-X%*%beta )/(sig^3)
return ( grad )
}
objfun_MLE <- function(beta_h,y,x) {
return (sum((y-x%*%beta_h[1:( length ( beta_h) -1)])^2))
}
#theta0 <- runif(dim(X)[2]+1)
theta0 <- append(as.vector(summary(lm(Y~X-1))$coefficients[,1]),runif(1))
options <- list("algorithm"="NLOPT_LD_LBFGS","xtol_rel"=1.0e-6,"maxeval"=1e3)
beta_MLE <- nloptr( x0=theta0,eval_f=objfun_MLE,eval_grad_f=gradient_MLE,opts=options,y=Y,x=X)
round(beta_MLE$solution,3)
#9
estimate <-lm(Y~X-1)
modelsummary(estimate, output = 'latex')
|
/ProblemSets/PS8/PS8_Khademorezaian.R
|
permissive
|
town0041/DScourseS21
|
R
| false
| false
| 2,489
|
r
|
library(nloptr)
library(modelsummary)
#4
set.seed(100)
N <-100000
K <-10
sigma <- 0.5
X<-matrix(rnorm(N*K,mean = 0,sd=sigma),N,K)
X[,1] <- 1
eps <- rnorm(N,mean=0,sd=sigma)
beta <- matrix(c(1.5 ,-1 ,-0.25 ,0.75 ,3.5 ,-2 ,0.5 ,1 ,1.25,2),K,1)
Y <- X%*%beta+eps
#5
beta_OLS <- solve(t(X)%*%X)%*%(t(X)%*%Y)
beta_OLS
#6
objfun <- function(beta_h,y,x) {
return (sum((y-x%*%beta_h)^2))
}
gradient <- function(beta_g,y,x) {
return ( as.vector(-2*t(x)%*%(y-X%*%beta_g)) )
}
# initial values
beta0 <- matrix(rnorm(K,mean = 0,sd=sigma),K,1) #start at uniform random numbers equal to number of coefficients
gradientDesc <- function(y, x, beta_i,object,grad,learn_rate, tol, max_iter) {
n=dim(y)[1]
m <- beta_i
c <- matrix(rnorm(n,mean = 0,sd=sigma),n,1)
yhat <- x%*%m+c
MSE <- object(m,y,x)
converged = F
iterations = 0
while(converged == F) {
## Implement the gradient descent algorithm
m_new <- m - learn_rate * grad(m,y,x)
m <- m_new
c <- y-X%*%m
yhat <- X%*%m+c
MSE_new <- object(m,y,x)
if(MSE - MSE_new <= tol) {
converged = T
return(m)
}
iterations = iterations + 1
if(iterations > max_iter) {
converged = T
return(m)
}
}
}
# Run the function
beta_grad=gradientDesc(Y, X, beta0,objfun,gradient,0.0000003, 0.0000000001, 1000)
#7
options <- list("algorithm"="NLOPT_LD_LBFGS","xtol_rel"=1.0e-6,"maxeval"=1e3)
beta_BFGS <- nloptr( x0=beta0,eval_f=objfun,eval_grad_f=gradient,opts=options,y=Y,x=X)
beta_BFGS$solution
options <- list("algorithm"="NLOPT_LN_NELDERMEAD","xtol_rel"=1.0e-8)
beta_NM <- nloptr( x0=beta0,eval_f=objfun,opts=options,y=Y,x=X)
beta_NM$solution
#8
gradient_MLE <- function (theta ,y,x) {
grad <- as.vector( rep (0, length (theta )))
beta <- theta [1:( length ( theta) -1)]
sig <- theta [ length (theta )]
grad [1:( length ( theta) -1)] <- -t(X)%*%(Y - X%*%beta )/(sig ^2)
grad[ length (theta )] <- dim (X)[1] /sig - crossprod (Y-X%*%beta )/(sig^3)
return ( grad )
}
objfun_MLE <- function(beta_h,y,x) {
return (sum((y-x%*%beta_h[1:( length ( beta_h) -1)])^2))
}
#theta0 <- runif(dim(X)[2]+1)
theta0 <- append(as.vector(summary(lm(Y~X-1))$coefficients[,1]),runif(1))
options <- list("algorithm"="NLOPT_LD_LBFGS","xtol_rel"=1.0e-6,"maxeval"=1e3)
beta_MLE <- nloptr( x0=theta0,eval_f=objfun_MLE,eval_grad_f=gradient_MLE,opts=options,y=Y,x=X)
round(beta_MLE$solution,3)
#9
estimate <-lm(Y~X-1)
modelsummary(estimate, output = 'latex')
|
# ------------------------------------------------------------------------------
# Book: MVA
# ------------------------------------------------------------------------------
# Quantlet: MVApcabankr
# ------------------------------------------------------------------------------
# Description: MVApcabankr performs a PCA for the rescaled Swiss bank notes
# (bank2.dat). X1, X2, X3, X6 are taken in cm instead of mm. It
# shows the first three principal components in two-dimensional
# scatterplots. Additionally, a screeplot of the eigenvalues is
# displayed.
# ------------------------------------------------------------------------------
# keywords: PCA, eigenvalues, scatterplot, screeplot
# ------------------------------------------------------------------------------
# Usage: -
# ------------------------------------------------------------------------------
# Inputs: None
# ------------------------------------------------------------------------------
# Output: Two dimensional scatterplots of the first three principal
# components of the rescaled Swiss bank notes data (bank2.dat).
# Additionally, a screeplot of the eigenvalues is displayed.
# ------------------------------------------------------------------------------
# Example: -
# ------------------------------------------------------------------------------
# Author: Zografia Anastasiadou
# ------------------------------------------------------------------------------
# Note: R decomposes matrices differently from Xplore (MVA book), and hence some
# of the eigenvectors have different signs. This does not change the
# results, but it does change the order of the graph by inverting the axes
# around the origin (not always, and not necessarily all of the axis; it
# depends on which eigenvectors we choose to plot).
# In this case, the plots are inverted (compared with plots in the book).
rm(list = ls(all = TRUE))
setwd("/Users/ookhrin/Documents/Private/Lehre/MVA2/_excersises/PCA_D")
x = read.table("bank2.dat")
n = nrow(x)
x[, 1] = x[, 1] / 10
x[, 2] = x[, 2] / 10
x[, 3] = x[, 3] / 10
x[, 6] = x[, 6] / 10
colMeans(x)
# calculates eigenvalues and eigenvectors and sorts them by size
e = eigen((n - 1) * cov(x) / n)
e1 = e$values
# data multiplied by eigenvectors
x = as.matrix(x) %*% e$vectors
par(mfrow = c(2, 2))
# plot of the first vs. second PC
plot(x[, 1], x[, 2], pch = c(rep(1, 100), rep(3, 100)),
col = c(rep("blue", 100), rep("red", 100)),
xlab = "PC1", ylab = "PC2", main = "First vs. Second PC",
cex.lab = 1.2, cex.axis = 1.2, cex.main = 1.8)
# plot of the second vs. third PC
plot(x[, 2], x[, 3], pch = c(rep(1, 100), rep(3, 100)),
col = c(rep("blue", 100), rep("red", 100)),
xlab = "PC2", ylab = "PC3", main = "Second vs. Third PC",
cex.lab = 1.2, cex.axis = 1.2, cex.main = 1.8)
# plot of the first vs. third PC
plot(x[, 1], x[, 3], pch = c(rep(1, 100), rep(3, 100)),
col = c(rep("blue", 100), rep("red", 100)),
xlab = "PC1", ylab = "PC2", main = "First vs. Third PC",
cex.lab = 1.2, cex.axis = 1.2, cex.main = 1.8)
# plot of the eigenvalues
plot(e1, ylim = c(0, 2.5), xlab = "Index", ylab = "Lambda",
main = "Eigenvalues of S", cex.lab = 1.2, cex.axis = 1.2,
cex.main = 1.8)
|
/PCA_D/MVApcabankr.R
|
no_license
|
QuantLet/tu_dresden_mva2
|
R
| false
| false
| 3,415
|
r
|
# ------------------------------------------------------------------------------
# Book: MVA
# ------------------------------------------------------------------------------
# Quantlet: MVApcabankr
# ------------------------------------------------------------------------------
# Description: MVApcabankr performs a PCA for the rescaled Swiss bank notes
# (bank2.dat). X1, X2, X3, X6 are taken in cm instead of mm. It
# shows the first three principal components in two-dimensional
# scatterplots. Additionally, a screeplot of the eigenvalues is
# displayed.
# ------------------------------------------------------------------------------
# keywords: PCA, eigenvalues, scatterplot, screeplot
# ------------------------------------------------------------------------------
# Usage: -
# ------------------------------------------------------------------------------
# Inputs: None
# ------------------------------------------------------------------------------
# Output: Two dimensional scatterplots of the first three principal
# components of the rescaled Swiss bank notes data (bank2.dat).
# Additionally, a screeplot of the eigenvalues is displayed.
# ------------------------------------------------------------------------------
# Example: -
# ------------------------------------------------------------------------------
# Author: Zografia Anastasiadou
# ------------------------------------------------------------------------------
# Note: R decomposes matrices differently from Xplore (MVA book), and hence some
# of the eigenvectors have different signs. This does not change the
# results, but it does change the order of the graph by inverting the axes
# around the origin (not always, and not necessarily all of the axis; it
# depends on which eigenvectors we choose to plot).
# In this case, the plots are inverted (compared with plots in the book).
rm(list = ls(all = TRUE))
setwd("/Users/ookhrin/Documents/Private/Lehre/MVA2/_excersises/PCA_D")
x = read.table("bank2.dat")
n = nrow(x)
x[, 1] = x[, 1] / 10
x[, 2] = x[, 2] / 10
x[, 3] = x[, 3] / 10
x[, 6] = x[, 6] / 10
colMeans(x)
# calculates eigenvalues and eigenvectors and sorts them by size
e = eigen((n - 1) * cov(x) / n)
e1 = e$values
# data multiplied by eigenvectors
x = as.matrix(x) %*% e$vectors
par(mfrow = c(2, 2))
# plot of the first vs. second PC
plot(x[, 1], x[, 2], pch = c(rep(1, 100), rep(3, 100)),
col = c(rep("blue", 100), rep("red", 100)),
xlab = "PC1", ylab = "PC2", main = "First vs. Second PC",
cex.lab = 1.2, cex.axis = 1.2, cex.main = 1.8)
# plot of the second vs. third PC
plot(x[, 2], x[, 3], pch = c(rep(1, 100), rep(3, 100)),
col = c(rep("blue", 100), rep("red", 100)),
xlab = "PC2", ylab = "PC3", main = "Second vs. Third PC",
cex.lab = 1.2, cex.axis = 1.2, cex.main = 1.8)
# plot of the first vs. third PC
plot(x[, 1], x[, 3], pch = c(rep(1, 100), rep(3, 100)),
col = c(rep("blue", 100), rep("red", 100)),
xlab = "PC1", ylab = "PC2", main = "First vs. Third PC",
cex.lab = 1.2, cex.axis = 1.2, cex.main = 1.8)
# plot of the eigenvalues
plot(e1, ylim = c(0, 2.5), xlab = "Index", ylab = "Lambda",
main = "Eigenvalues of S", cex.lab = 1.2, cex.axis = 1.2,
cex.main = 1.8)
|
# tHis is my first script
|
/varuntest.R
|
no_license
|
varungp/introtobda
|
R
| false
| false
| 25
|
r
|
# tHis is my first script
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/transparent_colors.R
\name{transparent.colors}
\alias{transparent.colors}
\title{Function to provide the color code of a transparent color}
\usage{
transparent.colors(front_color, back_color=NA,
front_alpha, back_alpha=NA,whole_background=NA,
output, simple_multicol_output=TRUE)
}
\arguments{
\item{front_color}{The color (without alpha value) to be transparent over the background. Can be a name, a RGB code, and RGB values (between 0 and 1, 0 and 100 or 0 and 255).}
\item{back_color}{The color (without alpha value) to be transparent under the front color and over the background. Can be a name, a RGB code, and RGB values (between 0 and 1, 0 and 100 or 0 and 255). Optional.}
\item{front_alpha}{The transparency (or alpha value) of the front color. Can be between 0 and 1 and 0 and 100.}
\item{back_alpha}{The transparency (or alpha value) of the back color. Can be between 0 and 1 and 0 and 100. Optional.}
\item{whole_background}{The background color to be under the front color and potentially the back color. Cannot be transparent. By default, set to white.}
\item{output}{The output of the color code(s). Can be "color name" (default value), being the color code in R, a "RGB 255 code" (values between 0 and 255) or a "RGB \% code" (values between 0 and 1).}
\item{simple_multicol_output}{Logical. The output of the function if you specify a front and a back colors. Set by default to TRUE, meaning the only output will be the "mixed" color. Otherwise, it will return a list of two elements containing (1) the front, back and mixed colors and (2) the mixed transparency.}
}
\value{
If there is a single color to be transparent, the color code or name.
If there are two colors to be transparent, returns a list: the first element will contain the color codes or names, the second will return the transparency of the two colors superimposed.
The output of this function can be used in a plot (or such) function to define a color (see example).
}
\description{
This function gives the color code (either the name or the RGB code of the color) of a color given its transparency, with or without a specified background.
It can also give the color codes of two colors given their transparency, with or without specified background, and of both transparent colors superimposed.
This function is particularily useful to plot transparent colors and to use their "real" transparent value to use in a legend.
}
\details{
The function can return the color code of a single color with a given transparency and a given background.
It can also return the color code of two colors with each a given transparency, and a given background. In this case, it can also return the transparency of the two colors superimposed.
}
\examples{
# For a single color to be transparent
random_alpha_value<-runif(1,0,1)
random_color<-rgb(red=runif(1,0,1),green=runif(1,0,1),blue=runif(1,0,1),alpha=random_alpha_value)
plot(1:1,type="n")
points(1,1,cex=20,pch=21,col=NA,bg=random_color)
transparent.colors(front_color=random_color,front_alpha=random_alpha_value, output="RGB 255 code")
# The returning RGB code corresponds to the color in the plot.
# For two colors overlapping
random_alpha_value<-runif(1,0,1)
random_colors<-c()
for(i in 1:2){random_colors<-c(random_colors,rgb(red=runif(1,0,1),green=runif(1,0,1),blue=runif(1,0,1),alpha=random_alpha_value))}
plot(1:8,1:8,type="n")
points(4:5,4:5,cex=20,pch=21,col=NA,bg=random_colors)
# The code for all colors
transparent.colors(front_color=random_colors,front_alpha=random_alpha_value,output = "RGB 255 code",simple_multicol_output = FALSE)
# For several colors successively overlapping
random_alpha_value<-runif(1,0,1)
random_colors<-c()
for(i in 1:5){random_colors<-c(random_colors,rgb(red=runif(1,0,1),green=runif(1,0,1),blue=runif(1,0,1),alpha=random_alpha_value))}
dev.new(width=8.958333,height=6.479167,unit="in",noRStudioGD = TRUE)
plot(1:1,1:1,type="n")
for(i in 1:5){points((1+(i-1)/15),1,cex=c(60-8*i),pch=21,col=NA,bg=random_colors[i])}
only_colors<-matrix(nrow=3,ncol=0,NA)
superimposed_colors<-matrix(nrow=3,ncol=0,NA)
for(i in 1:5){
only_colors<-cbind(only_colors,transparent.colors(front_color = random_colors[i],front_alpha=random_alpha_value,output="RGB 255 code"))
if(i>1){
superimposed_colors<-cbind(superimposed_colors,transparent.colors(front_color=random_colors[i],front_alpha=random_alpha_value,back_color=rgb(t(superimposed_colors[,i-1]/255)),back_alpha = 1,output="RGB 255 code"))
}
else{
superimposed_colors<-cbind(superimposed_colors,only_colors[,1])
}
}
# Each transparent color (visible on the top right of each circle, on the top of the first one and the right side of the last one)
only_colors
# Each successive superimposed color (the aggregation of all colors)
superimposed_colors
# Demonstrating the interest of transparent.colors, especially while dealing with superimposed transparent colors one has to legend
cols<-c("black","gray10","gray20","gray30","gray40","gray50","gray60","pink","magenta","red")
alpha<-0.2
new_cols<-character(length=10)
new_cols[10]<-transparent.colors(front_color = cols[10],front_alpha=alpha)
for(i in 9:1){
new_cols[i]<-transparent.colors(front_color=cols[i],front_alpha=alpha,back_color=new_cols[i+1],back_alpha=1)
}
par(mfrow=c(1,2))
plot(1:10,1:10,type="n",main="using 'transparent.colors'",axes=FALSE,bty="n",xlab="",ylab="")
for (i in 10:1){
polygon(c(1,1:10,10),c(1,seq(from=(5+i/2),to=4,length.out = 10),1),col=new_cols[i],border=NA)
}
legend("topright",legend=letters[1:10],pch=21,pt.cex=1.5,col=NA,pt.bg=new_cols,text.col="black",bty="n")
plot(1:10,1:10,type="n",main="using 'alpha' in the plot, not\nbeing able to correct this in the legend",axes=FALSE,bty="n",xlab="",ylab="")
for (i in 10:1){
polygon(c(1,1:10,10),c(1,seq(from=(5+i/2),to=4,length.out = 10),1),col=scales::alpha(cols[i],alpha),border=NA)
}
legend("topright",legend=letters[1:10],pch=21,pt.cex=1.5,col=NA,pt.bg=cols,text.col="black",bty="n")
}
|
/man/transparent.colors.Rd
|
no_license
|
jacobmaugoust/ULT
|
R
| false
| true
| 6,065
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/transparent_colors.R
\name{transparent.colors}
\alias{transparent.colors}
\title{Function to provide the color code of a transparent color}
\usage{
transparent.colors(front_color, back_color=NA,
front_alpha, back_alpha=NA,whole_background=NA,
output, simple_multicol_output=TRUE)
}
\arguments{
\item{front_color}{The color (without alpha value) to be transparent over the background. Can be a name, a RGB code, and RGB values (between 0 and 1, 0 and 100 or 0 and 255).}
\item{back_color}{The color (without alpha value) to be transparent under the front color and over the background. Can be a name, a RGB code, and RGB values (between 0 and 1, 0 and 100 or 0 and 255). Optional.}
\item{front_alpha}{The transparency (or alpha value) of the front color. Can be between 0 and 1 and 0 and 100.}
\item{back_alpha}{The transparency (or alpha value) of the back color. Can be between 0 and 1 and 0 and 100. Optional.}
\item{whole_background}{The background color to be under the front color and potentially the back color. Cannot be transparent. By default, set to white.}
\item{output}{The output of the color code(s). Can be "color name" (default value), being the color code in R, a "RGB 255 code" (values between 0 and 255) or a "RGB \% code" (values between 0 and 1).}
\item{simple_multicol_output}{Logical. The output of the function if you specify a front and a back colors. Set by default to TRUE, meaning the only output will be the "mixed" color. Otherwise, it will return a list of two elements containing (1) the front, back and mixed colors and (2) the mixed transparency.}
}
\value{
If there is a single color to be transparent, the color code or name.
If there are two colors to be transparent, returns a list: the first element will contain the color codes or names, the second will return the transparency of the two colors superimposed.
The output of this function can be used in a plot (or such) function to define a color (see example).
}
\description{
This function gives the color code (either the name or the RGB code of the color) of a color given its transparency, with or without a specified background.
It can also give the color codes of two colors given their transparency, with or without specified background, and of both transparent colors superimposed.
This function is particularily useful to plot transparent colors and to use their "real" transparent value to use in a legend.
}
\details{
The function can return the color code of a single color with a given transparency and a given background.
It can also return the color code of two colors with each a given transparency, and a given background. In this case, it can also return the transparency of the two colors superimposed.
}
\examples{
# For a single color to be transparent
random_alpha_value<-runif(1,0,1)
random_color<-rgb(red=runif(1,0,1),green=runif(1,0,1),blue=runif(1,0,1),alpha=random_alpha_value)
plot(1:1,type="n")
points(1,1,cex=20,pch=21,col=NA,bg=random_color)
transparent.colors(front_color=random_color,front_alpha=random_alpha_value, output="RGB 255 code")
# The returning RGB code corresponds to the color in the plot.
# For two colors overlapping
random_alpha_value<-runif(1,0,1)
random_colors<-c()
for(i in 1:2){random_colors<-c(random_colors,rgb(red=runif(1,0,1),green=runif(1,0,1),blue=runif(1,0,1),alpha=random_alpha_value))}
plot(1:8,1:8,type="n")
points(4:5,4:5,cex=20,pch=21,col=NA,bg=random_colors)
# The code for all colors
transparent.colors(front_color=random_colors,front_alpha=random_alpha_value,output = "RGB 255 code",simple_multicol_output = FALSE)
# For several colors successively overlapping
random_alpha_value<-runif(1,0,1)
random_colors<-c()
for(i in 1:5){random_colors<-c(random_colors,rgb(red=runif(1,0,1),green=runif(1,0,1),blue=runif(1,0,1),alpha=random_alpha_value))}
dev.new(width=8.958333,height=6.479167,unit="in",noRStudioGD = TRUE)
plot(1:1,1:1,type="n")
for(i in 1:5){points((1+(i-1)/15),1,cex=c(60-8*i),pch=21,col=NA,bg=random_colors[i])}
only_colors<-matrix(nrow=3,ncol=0,NA)
superimposed_colors<-matrix(nrow=3,ncol=0,NA)
for(i in 1:5){
only_colors<-cbind(only_colors,transparent.colors(front_color = random_colors[i],front_alpha=random_alpha_value,output="RGB 255 code"))
if(i>1){
superimposed_colors<-cbind(superimposed_colors,transparent.colors(front_color=random_colors[i],front_alpha=random_alpha_value,back_color=rgb(t(superimposed_colors[,i-1]/255)),back_alpha = 1,output="RGB 255 code"))
}
else{
superimposed_colors<-cbind(superimposed_colors,only_colors[,1])
}
}
# Each transparent color (visible on the top right of each circle, on the top of the first one and the right side of the last one)
only_colors
# Each successive superimposed color (the aggregation of all colors)
superimposed_colors
# Demonstrating the interest of transparent.colors, especially while dealing with superimposed transparent colors one has to legend
cols<-c("black","gray10","gray20","gray30","gray40","gray50","gray60","pink","magenta","red")
alpha<-0.2
new_cols<-character(length=10)
new_cols[10]<-transparent.colors(front_color = cols[10],front_alpha=alpha)
for(i in 9:1){
new_cols[i]<-transparent.colors(front_color=cols[i],front_alpha=alpha,back_color=new_cols[i+1],back_alpha=1)
}
par(mfrow=c(1,2))
plot(1:10,1:10,type="n",main="using 'transparent.colors'",axes=FALSE,bty="n",xlab="",ylab="")
for (i in 10:1){
polygon(c(1,1:10,10),c(1,seq(from=(5+i/2),to=4,length.out = 10),1),col=new_cols[i],border=NA)
}
legend("topright",legend=letters[1:10],pch=21,pt.cex=1.5,col=NA,pt.bg=new_cols,text.col="black",bty="n")
plot(1:10,1:10,type="n",main="using 'alpha' in the plot, not\nbeing able to correct this in the legend",axes=FALSE,bty="n",xlab="",ylab="")
for (i in 10:1){
polygon(c(1,1:10,10),c(1,seq(from=(5+i/2),to=4,length.out = 10),1),col=scales::alpha(cols[i],alpha),border=NA)
}
legend("topright",legend=letters[1:10],pch=21,pt.cex=1.5,col=NA,pt.bg=cols,text.col="black",bty="n")
}
|
\docType{data}
\name{traits_eco}
\alias{traits_eco}
\title{Set of functional traits to be retrieved by Ecoflora.}
\format{The format is: a list of the following 17 elements, where
each element is a pair of the form "traits":"code used in Ecoflora HTML code":\describe{
\item{height_max }{: num 3.05}
\item{height_min }{: num 3.06}
\item{leaf_area }{: num 3.17}
\item{leaf_longevity }{: num 3.22}
\item{Photosynthetic_pathway }{: num 4.02}
\item{life_form }{: num 5.01}
\item{Vegetative_reprod_method}{: num 5.05}
\item{Flowering_earliest_month}{: num 5.07}
\item{Flowering_latest_month }{: num 5.08}
\item{Pollen_vector }{: num 5.15}
\item{Seed_weight_mean }{: num 5.34}
\item{Method_of_propagation }{: num 5.52}
\item{Ellenberg_light_Eco }{: num 7.14}
\item{Ellenberg_moisture_Eco }{: num 7.15}
\item{Ellenberg_pH_Eco }{: num 7.16}
\item{Ellenberg_nitrogen_Eco }{: num 7.17}
\item{Ellenberg_salt_Eco }{: num 7.18}
}}
\description{
\code{traits_eco} defines a list containg pairs in the form
\emph{short_name_of_the_trait} =
\emph{corresponding_code_in_Ecoflora } At the moment the
package does not download all the traits available at
Ecoflora; curious users can expand the number of
downloadable traits simply extending the list with other
'pairs' (take care of using the right \code{Ecoflora codes}
as described in
\samp{http://www.ecoflora.org.uk/search_plantchar.php}).
}
\examples{
\dontrun{
data(traits_eco)
}
}
\references{
Fitter, A . H. and Peat , H. J., 1994, The Ecological Flora Database,
J. Ecol., 82, 415-425. \samp{http://www.ecoflora.co.uk}
}
\keyword{datasets}
|
/man/traits_eco.Rd
|
no_license
|
GioBo/TR8
|
R
| false
| false
| 1,598
|
rd
|
\docType{data}
\name{traits_eco}
\alias{traits_eco}
\title{Set of functional traits to be retrieved by Ecoflora.}
\format{The format is: a list of the following 17 elements, where
each element is a pair of the form "traits":"code used in Ecoflora HTML code":\describe{
\item{height_max }{: num 3.05}
\item{height_min }{: num 3.06}
\item{leaf_area }{: num 3.17}
\item{leaf_longevity }{: num 3.22}
\item{Photosynthetic_pathway }{: num 4.02}
\item{life_form }{: num 5.01}
\item{Vegetative_reprod_method}{: num 5.05}
\item{Flowering_earliest_month}{: num 5.07}
\item{Flowering_latest_month }{: num 5.08}
\item{Pollen_vector }{: num 5.15}
\item{Seed_weight_mean }{: num 5.34}
\item{Method_of_propagation }{: num 5.52}
\item{Ellenberg_light_Eco }{: num 7.14}
\item{Ellenberg_moisture_Eco }{: num 7.15}
\item{Ellenberg_pH_Eco }{: num 7.16}
\item{Ellenberg_nitrogen_Eco }{: num 7.17}
\item{Ellenberg_salt_Eco }{: num 7.18}
}}
\description{
\code{traits_eco} defines a list containg pairs in the form
\emph{short_name_of_the_trait} =
\emph{corresponding_code_in_Ecoflora } At the moment the
package does not download all the traits available at
Ecoflora; curious users can expand the number of
downloadable traits simply extending the list with other
'pairs' (take care of using the right \code{Ecoflora codes}
as described in
\samp{http://www.ecoflora.org.uk/search_plantchar.php}).
}
\examples{
\dontrun{
data(traits_eco)
}
}
\references{
Fitter, A . H. and Peat , H. J., 1994, The Ecological Flora Database,
J. Ecol., 82, 415-425. \samp{http://www.ecoflora.co.uk}
}
\keyword{datasets}
|
# load libs
library(tidyverse)
library(rvest)
# get the section of the url that we need for as many cups as we want -- here I use the last five
wc <- c("brazil2014","southafrica2010","germany2006","koreajapan2002","france1998")
# function to get all columns
get_results <- function(wc) {
url <- paste0("https://www.fifa.com/worldcup/archive/",wc,"/matches/index.html")
# datetimes
all_datetimes <- url %>%
read_html() %>%
html_nodes(".mu-i-datetime") %>%
html_text()
# dates
all_dates <- url %>%
read_html() %>%
html_nodes(".mu-i-date") %>%
html_text()
# match number
all_match_numbers <- url %>%
read_html() %>%
html_nodes(".mu-i-matchnum") %>%
html_text()
# round
all_rounds <- url %>%
read_html() %>%
html_nodes(".mu-i-group") %>%
html_text()
# long and short notes for matches that end aet or with pens
all_tiebreakers <- url %>%
read_html() %>%
html_nodes(".text-reasonwin") %>%
html_text()
# scores as text
all_scores <- url %>%
read_html() %>%
html_nodes(".s-scoreText") %>%
html_text()
# teams
all_teams <- url %>%
read_html() %>%
html_nodes(".t-nText ") %>%
html_text()
# team codes
all_codes <- url %>%
read_html() %>%
html_nodes(".t-nTri") %>%
html_text()
# indices to split vectors with odd/even pattern
home_index <- seq(1,length(all_teams)-1,2)
away_index <- seq(2,length(all_teams),2)
# split all_teams to home/away
home_teams <- all_teams[home_index]
away_teams <- all_teams[away_index]
# split all_codes to home/away
home_codes <- all_codes[home_index]
away_codes <- all_codes[away_index]
# split notes for games that end after 90' into long and short
tiebreak_long <- all_tiebreakers[home_index]
tiebreak_short <- all_tiebreakers[away_index]
# create the tibble
tibble(
edition = wc,
datetime = all_datetimes,
date = all_dates,
match_no = all_match_numbers,
round = all_rounds,
home_team = home_teams,
home_code = home_codes,
# get number before the hyphen - start of character string - as integer: home_score
home_score = as.integer(str_extract(all_scores,"^[:digit:]")),
score = all_scores,
# get number after the hyphen - end of character string - as integer: away_score
away_score = as.integer(str_extract(all_scores,"[:digit:]$")),
away_team = away_teams,
away_code = away_codes,
tiebreak_long = tiebreak_long,
tiebreak_short = tiebreak_short
)
}
# map over get_results function to get results for all cups included in wc vector
results <- map_df(wc, get_results)
# a few duplicate rows so remove those here
results <- distinct(results)
# use score to get win/lose/draw columns
results <- results %>%
mutate(winner = case_when(
home_score > away_score ~ home_team,
away_score > home_score ~ away_team,
home_score == away_score ~ str_extract(tiebreak_long, "[:print:]+(?= win?)")
),
home_result = case_when(
home_score > away_score ~ 'W',
away_score > home_score ~ 'L',
home_score == away_score & tiebreak_long == " " ~ 'D',
home_score == away_score & str_extract(tiebreak_long, "[:print:]+(?= win?)") == home_team ~ 'W',
home_score == away_score & str_extract(tiebreak_long, "[:print:]+(?= win?)") != home_team ~ 'L'
),
away_result = case_when(
away_score > home_score ~ 'W',
home_score > away_score ~ 'L',
away_score == home_score & tiebreak_long == " " ~ 'D',
away_score == home_score & str_extract(tiebreak_long, "[:print:]+(?= win?)") == away_team ~ 'W',
away_score == home_score & str_extract(tiebreak_long, "[:print:]+(?= win?)") != away_team ~ 'L'
)
## add in code to add a tag for expected result or upset (ask Joe about when a draw is an upset)
)
## ratings which have to be gathered one by one because they are not stored uniformily in wikitables
# Brazil 2014:
url <- "https://en.wikipedia.org/wiki/2014_FIFA_World_Cup_seeding"
ratings <- url %>%
read_html() %>%
html_node(xpath = '//*[@id="mw-content-text"]/div/table[1]') %>%
html_table(fill = TRUE) %>%
as.tibble(
)
ratings <- ratings %>%
mutate(Team = str_extract(Team, '[^\\(]+'), Team = str_trim(Team, side = "right"), edition = "brazil2014") %>%
rename(team = Team, ranking = `FIFA Ranking\nOctober 2013`) %>%
mutate(team = case_when(
team == "United States" ~ "USA",
team == "Iran" ~ "IR Iran",
team == "South Korea" ~ "Korea Republic",
team == "Ivory Coast" ~ "Côte d'Ivoire",
TRUE ~ as.character(team)
))
ratings14 <- ratings
# South Africa 2010:
url <- "https://en.wikipedia.org/wiki/2010_FIFA_World_Cup_seeding"
ratings <- url %>%
read_html() %>%
html_node(xpath = '//*[@id="mw-content-text"]/div/table[1]') %>%
html_table(fill = TRUE) %>%
as.tibble(
)
ratings <- ratings %>%
mutate(Association = str_extract(Association, '[^\\(]+'), Association = str_trim(Association, side = "right"), edition = "southafrica2010") %>%
rename(team = Association, ranking = `FIFA Ranking\nOctober 2009`) %>%
mutate(team = case_when(
team == "United States" ~ "USA",
team == "South Korea" ~ "Korea Republic",
team == "North Korea" ~ "Korea DPR",
team == "Ivory Coast" ~ "Côte d'Ivoire",
TRUE ~ as.character(team)
))
ratings10 <- ratings
# Germany 2006:
url <- "https://en.wikipedia.org/wiki/2006_FIFA_World_Cup_seeding"
ratings <- url %>%
read_html() %>%
html_node(xpath = '//*[@id="mw-content-text"]/div/table') %>%
html_table(fill = TRUE)
ratings <- ratings[,c(2,12)]
colnames(ratings) <- c('team','ranking')
ratings <- ratings %>%
slice(3:34) %>%
mutate(edition = "germany2006", ranking = as.integer(ranking)) %>%
mutate(team = case_when(
team == "United States" ~ "USA",
team == "Iran" ~ "IR Iran",
team == "South Korea" ~ "Korea Republic",
team == "Ivory Coast" ~ "Côte d'Ivoire",
TRUE ~ as.character(team)
))
## add extra row for Iran name mismatch
rating_extra_iran_row <- tribble(
~team, ~ranking, ~edition,
"Iran", 19, "germany2006"
)
ratings06 <- ratings
# Korea/Japan 2002:
url <- 'https://en.wikipedia.org/wiki/2002_FIFA_World_Cup_seeding'
ratings <- url %>%
read_html() %>%
html_node(xpath = '//*[@id="mw-content-text"]/div/table') %>%
html_table(fill = TRUE)
ratings <- ratings[,c(2,12)]
colnames(ratings) <- c('team','ranking')
## results for 2002 have Iran listed as Iran and IR Iran
ratings <- ratings %>%
slice(3:34) %>%
mutate(edition = "koreajapan2002", ranking = as.integer(ranking)) %>%
mutate(team = case_when(
team == "United States" ~ "USA",
team == "Iran" ~ "IR Iran",
team == "South Korea" ~ "Korea Republic",
team == "Ivory Coast" ~ "Côte d'Ivoire",
TRUE ~ as.character(team)
))
ratings02 <- ratings
# France 1998:
url <- 'https://en.wikipedia.org/wiki/1998_FIFA_World_Cup_seeding'
ratings <- url %>%
read_html() %>%
html_node(xpath = '//*[@id="mw-content-text"]/div/table') %>%
html_table(fill = TRUE)
ratings <- ratings[,c(2,12)]
colnames(ratings) <- c('team','ranking')
ratings <- ratings %>%
slice(3:34) %>%
mutate(edition = "france1998", ranking = as.integer(ranking)) %>%
mutate(team = case_when(
team == "United States" ~ "USA",
team == "South Korea" ~ "Korea Republic",
team == "Ivory Coast" ~ "Côte d'Ivoire",
TRUE ~ as.character(team)
))
ratings98 <- ratings
# USA 1994:
# get these later - need to find a better table
# url <- 'https://en.wikipedia.org/wiki/1994_FIFA_World_Cup'
#
# ratings <- url %>%
# read_html() %>%
# html_node(xpath = '//*[@id="mw-content-text"]/div/table[3]') %>%
# html_table(fill = TRUE)
#
# ratings94 <- ratings
## bind all ratings tables
ratings <- bind_rows(ratings14,ratings10,ratings06,ratings02,ratings98,rating_extra_iran_row)
## get all results one by one and bind_rows
results <- results %>%
left_join(ratings, by = c("home_team" = "team", "edition" = "edition")) %>%
rename(home_rank = ranking) %>%
left_join(ratings, by = c("away_team" = "team", "edition" = "edition")) %>%
rename(away_rank = ranking)
## expected or upset
# when is an upset and upset -- when is a draw an upset (is it ever?)
results <- results %>%
mutate(result_type = case_when(
home_result == 'W' & home_rank < away_rank ~ "expected",
home_result == 'D' & home_rank < away_rank ~ "expected",
home_result == 'W' & home_rank > away_rank ~ "upset",
home_result == 'D' & home_rank > away_rank ~ "upset",
home_result == 'L' & home_rank < away_rank ~ "upset",
home_result == 'L' & home_rank > away_rank ~ "expected"
),
rank_diff = abs(home_rank-away_rank),
score_diff = abs(home_score-away_score)
)
write_csv(results,"historical_results.csv")
### read in results after they have been created ###
# results <- read_csv("historical_results.csv")
upsets_tally <- results %>%
mutate(year = str_extract(edition,'[:digit:]+')) %>%
group_by(edition, year, round) %>%
summarize(expected = sum(if_else(result_type == "expected",1,0)),
upsets = sum(if_else(result_type == "upset",1,0)),
draws = sum(if_else(home_result == "D",1,0))) %>%
arrange(desc(year)) %>%
ungroup()
write_csv(upsets_tally,"upsets_tally_table.csv")
|
/get_results.R
|
no_license
|
joemgon/worldcup2018
|
R
| false
| false
| 9,105
|
r
|
# load libs
library(tidyverse)
library(rvest)
# get the section of the url that we need for as many cups as we want -- here I use the last five
wc <- c("brazil2014","southafrica2010","germany2006","koreajapan2002","france1998")
# function to get all columns
get_results <- function(wc) {
url <- paste0("https://www.fifa.com/worldcup/archive/",wc,"/matches/index.html")
# datetimes
all_datetimes <- url %>%
read_html() %>%
html_nodes(".mu-i-datetime") %>%
html_text()
# dates
all_dates <- url %>%
read_html() %>%
html_nodes(".mu-i-date") %>%
html_text()
# match number
all_match_numbers <- url %>%
read_html() %>%
html_nodes(".mu-i-matchnum") %>%
html_text()
# round
all_rounds <- url %>%
read_html() %>%
html_nodes(".mu-i-group") %>%
html_text()
# long and short notes for matches that end aet or with pens
all_tiebreakers <- url %>%
read_html() %>%
html_nodes(".text-reasonwin") %>%
html_text()
# scores as text
all_scores <- url %>%
read_html() %>%
html_nodes(".s-scoreText") %>%
html_text()
# teams
all_teams <- url %>%
read_html() %>%
html_nodes(".t-nText ") %>%
html_text()
# team codes
all_codes <- url %>%
read_html() %>%
html_nodes(".t-nTri") %>%
html_text()
# indices to split vectors with odd/even pattern
home_index <- seq(1,length(all_teams)-1,2)
away_index <- seq(2,length(all_teams),2)
# split all_teams to home/away
home_teams <- all_teams[home_index]
away_teams <- all_teams[away_index]
# split all_codes to home/away
home_codes <- all_codes[home_index]
away_codes <- all_codes[away_index]
# split notes for games that end after 90' into long and short
tiebreak_long <- all_tiebreakers[home_index]
tiebreak_short <- all_tiebreakers[away_index]
# create the tibble
tibble(
edition = wc,
datetime = all_datetimes,
date = all_dates,
match_no = all_match_numbers,
round = all_rounds,
home_team = home_teams,
home_code = home_codes,
# get number before the hyphen - start of character string - as integer: home_score
home_score = as.integer(str_extract(all_scores,"^[:digit:]")),
score = all_scores,
# get number after the hyphen - end of character string - as integer: away_score
away_score = as.integer(str_extract(all_scores,"[:digit:]$")),
away_team = away_teams,
away_code = away_codes,
tiebreak_long = tiebreak_long,
tiebreak_short = tiebreak_short
)
}
# map over get_results function to get results for all cups included in wc vector
results <- map_df(wc, get_results)
# a few duplicate rows so remove those here
results <- distinct(results)
# use score to get win/lose/draw columns
results <- results %>%
mutate(winner = case_when(
home_score > away_score ~ home_team,
away_score > home_score ~ away_team,
home_score == away_score ~ str_extract(tiebreak_long, "[:print:]+(?= win?)")
),
home_result = case_when(
home_score > away_score ~ 'W',
away_score > home_score ~ 'L',
home_score == away_score & tiebreak_long == " " ~ 'D',
home_score == away_score & str_extract(tiebreak_long, "[:print:]+(?= win?)") == home_team ~ 'W',
home_score == away_score & str_extract(tiebreak_long, "[:print:]+(?= win?)") != home_team ~ 'L'
),
away_result = case_when(
away_score > home_score ~ 'W',
home_score > away_score ~ 'L',
away_score == home_score & tiebreak_long == " " ~ 'D',
away_score == home_score & str_extract(tiebreak_long, "[:print:]+(?= win?)") == away_team ~ 'W',
away_score == home_score & str_extract(tiebreak_long, "[:print:]+(?= win?)") != away_team ~ 'L'
)
## add in code to add a tag for expected result or upset (ask Joe about when a draw is an upset)
)
## ratings which have to be gathered one by one because they are not stored uniformily in wikitables
# Brazil 2014:
url <- "https://en.wikipedia.org/wiki/2014_FIFA_World_Cup_seeding"
ratings <- url %>%
read_html() %>%
html_node(xpath = '//*[@id="mw-content-text"]/div/table[1]') %>%
html_table(fill = TRUE) %>%
as.tibble(
)
ratings <- ratings %>%
mutate(Team = str_extract(Team, '[^\\(]+'), Team = str_trim(Team, side = "right"), edition = "brazil2014") %>%
rename(team = Team, ranking = `FIFA Ranking\nOctober 2013`) %>%
mutate(team = case_when(
team == "United States" ~ "USA",
team == "Iran" ~ "IR Iran",
team == "South Korea" ~ "Korea Republic",
team == "Ivory Coast" ~ "Côte d'Ivoire",
TRUE ~ as.character(team)
))
ratings14 <- ratings
# South Africa 2010:
url <- "https://en.wikipedia.org/wiki/2010_FIFA_World_Cup_seeding"
ratings <- url %>%
read_html() %>%
html_node(xpath = '//*[@id="mw-content-text"]/div/table[1]') %>%
html_table(fill = TRUE) %>%
as.tibble(
)
ratings <- ratings %>%
mutate(Association = str_extract(Association, '[^\\(]+'), Association = str_trim(Association, side = "right"), edition = "southafrica2010") %>%
rename(team = Association, ranking = `FIFA Ranking\nOctober 2009`) %>%
mutate(team = case_when(
team == "United States" ~ "USA",
team == "South Korea" ~ "Korea Republic",
team == "North Korea" ~ "Korea DPR",
team == "Ivory Coast" ~ "Côte d'Ivoire",
TRUE ~ as.character(team)
))
ratings10 <- ratings
# Germany 2006:
url <- "https://en.wikipedia.org/wiki/2006_FIFA_World_Cup_seeding"
ratings <- url %>%
read_html() %>%
html_node(xpath = '//*[@id="mw-content-text"]/div/table') %>%
html_table(fill = TRUE)
ratings <- ratings[,c(2,12)]
colnames(ratings) <- c('team','ranking')
ratings <- ratings %>%
slice(3:34) %>%
mutate(edition = "germany2006", ranking = as.integer(ranking)) %>%
mutate(team = case_when(
team == "United States" ~ "USA",
team == "Iran" ~ "IR Iran",
team == "South Korea" ~ "Korea Republic",
team == "Ivory Coast" ~ "Côte d'Ivoire",
TRUE ~ as.character(team)
))
## add extra row for Iran name mismatch
rating_extra_iran_row <- tribble(
~team, ~ranking, ~edition,
"Iran", 19, "germany2006"
)
ratings06 <- ratings
# Korea/Japan 2002:
url <- 'https://en.wikipedia.org/wiki/2002_FIFA_World_Cup_seeding'
ratings <- url %>%
read_html() %>%
html_node(xpath = '//*[@id="mw-content-text"]/div/table') %>%
html_table(fill = TRUE)
ratings <- ratings[,c(2,12)]
colnames(ratings) <- c('team','ranking')
## results for 2002 have Iran listed as Iran and IR Iran
ratings <- ratings %>%
slice(3:34) %>%
mutate(edition = "koreajapan2002", ranking = as.integer(ranking)) %>%
mutate(team = case_when(
team == "United States" ~ "USA",
team == "Iran" ~ "IR Iran",
team == "South Korea" ~ "Korea Republic",
team == "Ivory Coast" ~ "Côte d'Ivoire",
TRUE ~ as.character(team)
))
ratings02 <- ratings
# France 1998:
url <- 'https://en.wikipedia.org/wiki/1998_FIFA_World_Cup_seeding'
ratings <- url %>%
read_html() %>%
html_node(xpath = '//*[@id="mw-content-text"]/div/table') %>%
html_table(fill = TRUE)
ratings <- ratings[,c(2,12)]
colnames(ratings) <- c('team','ranking')
ratings <- ratings %>%
slice(3:34) %>%
mutate(edition = "france1998", ranking = as.integer(ranking)) %>%
mutate(team = case_when(
team == "United States" ~ "USA",
team == "South Korea" ~ "Korea Republic",
team == "Ivory Coast" ~ "Côte d'Ivoire",
TRUE ~ as.character(team)
))
ratings98 <- ratings
# USA 1994:
# get these later - need to find a better table
# url <- 'https://en.wikipedia.org/wiki/1994_FIFA_World_Cup'
#
# ratings <- url %>%
# read_html() %>%
# html_node(xpath = '//*[@id="mw-content-text"]/div/table[3]') %>%
# html_table(fill = TRUE)
#
# ratings94 <- ratings
## bind all ratings tables
ratings <- bind_rows(ratings14,ratings10,ratings06,ratings02,ratings98,rating_extra_iran_row)
## get all results one by one and bind_rows
results <- results %>%
left_join(ratings, by = c("home_team" = "team", "edition" = "edition")) %>%
rename(home_rank = ranking) %>%
left_join(ratings, by = c("away_team" = "team", "edition" = "edition")) %>%
rename(away_rank = ranking)
## expected or upset
# when is an upset and upset -- when is a draw an upset (is it ever?)
results <- results %>%
mutate(result_type = case_when(
home_result == 'W' & home_rank < away_rank ~ "expected",
home_result == 'D' & home_rank < away_rank ~ "expected",
home_result == 'W' & home_rank > away_rank ~ "upset",
home_result == 'D' & home_rank > away_rank ~ "upset",
home_result == 'L' & home_rank < away_rank ~ "upset",
home_result == 'L' & home_rank > away_rank ~ "expected"
),
rank_diff = abs(home_rank-away_rank),
score_diff = abs(home_score-away_score)
)
write_csv(results,"historical_results.csv")
### read in results after they have been created ###
# results <- read_csv("historical_results.csv")
upsets_tally <- results %>%
mutate(year = str_extract(edition,'[:digit:]+')) %>%
group_by(edition, year, round) %>%
summarize(expected = sum(if_else(result_type == "expected",1,0)),
upsets = sum(if_else(result_type == "upset",1,0)),
draws = sum(if_else(home_result == "D",1,0))) %>%
arrange(desc(year)) %>%
ungroup()
write_csv(upsets_tally,"upsets_tally_table.csv")
|
rm(list = ls(all = TRUE))
# install the required packges if needed
#install.packages("INLA", repos="http://www.math.ntnu.no/inla/R/testing")
#install.packages("bigmemory")
#install.packages("snow")
#install.packages("Rmpi")
#install.packages("ade4")
#install.packages("sp")
#install.packages("BAS")
#install.packages("https://github.com/aliaksah/EMJMCMC2016/files/270429/EMJMCMC_1.2.tar.gz", repos = NULL, type="source")
#install.packages("RCurl")
#install.packages("hash")
library(hash)
library(RCurl)
library(EMJMCMC)
library(sp)
library(INLA)
library(parallel)
library(bigmemory)
library(snow)
library(MASS)
library(ade4)
library(copula)
library(compiler)
library(BAS)
require(stats)
#define your working directory, where the data files are stored
workdir<-""
#prepare data
simx <- read.table(text = getURL("https://raw.githubusercontent.com/aliaksah/EMJMCMC2016/master/examples/Simulated%20Logistic%20Data%20With%20Multiple%20Modes%20%28Example%203%29/sim3-X.txt"),sep = ",")
simy <- read.table(text = getURL("https://raw.githubusercontent.com/aliaksah/EMJMCMC2016/master/examples/Simulated%20Logistic%20Data%20With%20Multiple%20Modes%20%28Example%203%29/sim3-Y.txt"),sep = ",")
data.example <- cbind(simy,simx)
names(data.example)[1]="Y1"
data.example$V2<-(data.example$V10+data.example$V14)*data.example$V9
data.example$V5<-(data.example$V11+data.example$V15)*data.example$V12
#fparam <- c("Const",colnames(data)[-1])
fparam.example <- colnames(data.example)[-1]
fobserved.example <- colnames(data.example)[1]
#dataframe for results; n/b +1 is required for the summary statistics
statistics1 <- big.matrix(nrow = 2 ^(length(fparam.example))+1, ncol = 15,init = NA, type = "double")
statistics <- describe(statistics1)
#create MySearch object with default parameters
mySearch = EMJMCMC2016()
# load functions for MLIK estimation
mySearch$estimator = estimate.bas.glm
mySearch$estimator.args = list(data = data.example,prior = aic.prior(),family = binomial(), logn = log(2000))
#play around with various methods in order to get used to them and see how they work
# carry out full enumeration (it is still feasible)
system.time(
FFF<-mySearch$full_selection(list(statid=6, totalit =2^20+1, ub = 36*20,mlikcur=-Inf,waiccur =100000))
)
# completed in 7889 for 1048576 models whilst BAS took 6954.101 seonds and thus now advantage of using C versus R is clearly seen as neglectible (14688.209 user seconds)
# BAS completed the same job in
# check that all models are enumerated during the full search procedure
idn<-which(is.na(statistics1[,1]))
length(idn)
mySearch$visualize_results(statistics1, "test3", 1024, crit=list(mlik = T, waic = T, dic = T),draw_dist = FALSE)
# once full search is completed, get the truth for the experiment
ppp<-mySearch$post_proceed_results(statistics1 = statistics1)
truth = ppp$p.post # make sure it is equal to Truth column from the article
truth.m = ppp$m.post
truth.prob = ppp$s.mass
ordering = sort(ppp$p.post,index.return=T)
fake500 <- sum(exp(x = (sort(statistics1[,1],decreasing = T)[1:2^20] + 1)),na.rm = TRUE)/truth.prob
print("pi truth")
sprintf("%.10f",truth[ordering$ix])
#estimate best performance ever
min(statistics1[,1],na.rm = T)
idn<-which(is.na(statistics1[,1]))
2^20-length(idn)
statistics1[idn,1]<- -100000
iddx <- sort(statistics1[,1],decreasing = T,index.return=T,na.last = NA)$ix
# check that all models are enumerated during the full search procedure
# see the obtained maximum and minimum
min(statistics1[,1],na.rm = TRUE)
max(statistics1[,1],na.rm = TRUE)
# look at the best possible performance
statistics1[as.numeric(iddx[10001:2^20]),1:15]<-NA
ppp.best<-mySearch$post_proceed_results(statistics1 = statistics1)
best = ppp.best$p.post # make sure it is equal to Truth column from the article
bset.m = ppp.best$m.post
best.prob = ppp.best$s.mass/truth.prob
print("pi best")
sprintf("%.10f",best[ordering$ix])
# notice some interesting details on the posterior mass and number of models visited
# 50000 best models contain 100.0000% of mass 100.0000%
# 48300 best models contain 99.99995% of mass 100.0000%
# 48086 best models contain 99.99995% of mass 100.0000%
# 10000 best models contain 99.99990% of mass 99.99991%
# 5000 best models contain 93.83923% of mass 94.72895%
# 3500 best models contain 85.77979% of mass 87.90333%
# 1500 best models contain 63.33376% of mass 67.71380%
# 1000 best models contain 53.47534% of mass 57.91971%
# 500 best models contain 37.72771% of mass 42.62869%
# 100 best models contain 14.76030% of mass 17.71082%
# 50 best models contain 14.76030% of mass 11.36970%
# 10 best models contain 14.76030% of mass 3.911063%
# 5 best models contain 14.76030% of mass 2.351454%
# 1 best models contain 14.76030% of mass 0.595301%
best.bias.m<-sqrt(mean((bset.m - truth.m)^2,na.rm = TRUE))*100000
best.rmse.m<-sqrt(mean((bset.m - truth.m)^2,na.rm = TRUE))*100000
best.bias<- best - truth
best.rmse<- abs(best - truth)
# view results for the best possible performance model
View((cbind(best.bias[ordering$ix],best.rmse[ordering$ix])*100))
# proceed with the experiment
# set parameters of the search
mySearch$switch.type=as.integer(1)
mySearch$switch.type.glob=as.integer(1)
#mySearch$printable.opt = TRUE
mySearch$max.N.glob=as.integer(5)
mySearch$min.N.glob=as.integer(3)
mySearch$max.N=as.integer(1)
mySearch$min.N=as.integer(1)
mySearch$recalc.margin = as.integer(2^20)
distrib_of_proposals = c(76.91870,71.25264,87.68184,60.55921,15812.39852)
#distrib_of_proposals = с(0,0,0,0,10)
distrib_of_neighbourhoods=t(array(data = c(7.6651604,16.773326,14.541629,12.839445,2.964227,13.048343,7.165434,
0.9936905,15.942490,11.040131,3.200394,15.349051,5.466632,14.676458,
1.5184551,9.285762,6.125034,3.627547,13.343413,2.923767,15.318774,
14.5295380,1.521960,11.804457,5.070282,6.934380,10.578945,12.455602,
6.0826035,2.453729,14.340435,14.863495,1.028312,12.685017,13.806295),dim = c(7,5)))
distrib_of_neighbourhoods[7]=distrib_of_neighbourhoods[7]/100
distrib_of_neighbourhoods = array(data = 0, dim = c(5,7))
distrib_of_neighbourhoods[,3]=10
# proceed with the search
Niter <- 100
thining<-1
system.time({
vect <-array(data = 0,dim = c(length(fparam.example),Niter))
vect.mc <-array(data = 0,dim = c(length(fparam.example),Niter))
inits <-array(data = 0,dim = Niter)
freqs <-array(data = 100,dim = c(5,Niter))
freqs.p <-array(data = 100,dim = c(5,7,Niter))
masses <- array(data = 0,dim = Niter)
biases.m <- array(data = 0,dim = 2 ^(length(fparam.example))+1)
biases.m.mc <- array(data = 0,dim = 2 ^(length(fparam.example))+1)
rmse.m <- array(data = 0,dim = Niter)
rmse.m.mc <- array(data = 0,dim = Niter)
iterats <- array(data = 0,dim = c(2,Niter))
for(i in 1:Niter)
{
statistics1 <- big.matrix(nrow = 2 ^(length(fparam.example))+1, ncol = 15,init = NA, type = "double")
statistics <- describe(statistics1)
mySearch$g.results[4,1]<-0
mySearch$g.results[4,2]<-0
mySearch$p.add = array(data = 0.5,dim = length(fparam.example))
print("BEGIN ITERATION!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
print(i)
set.seed(10*i)
initsol=rbinom(n = length(fparam.example),size = 1,prob = 0.5)
inits[i] <- mySearch$bittodec(initsol)
freqs[,i]<- distrib_of_proposals
resm<-mySearch$modejumping_mcmc(list(varcur=initsol,statid=5, distrib_of_proposals =distrib_of_proposals,distrib_of_neighbourhoods=distrib_of_neighbourhoods, eps = 0.000000001, trit = 999000, trest = 10000, burnin = 3, max.time = 30, maxit = 100000, print.freq =1000))
vect[,i]<-resm$bayes.results$p.post
vect.mc[,i]<-resm$p.post
masses[i]<-resm$bayes.results$s.mass/truth.prob
print(masses[i])
freqs.p[,,i] <- distrib_of_neighbourhoods
cur.p.post <- resm$bayes.results$m.post
cur.p.post[(which(is.na(cur.p.post)))]<-0
rmse.m[i]<-mean((cur.p.post - truth.m)^2,na.rm = TRUE)
biases.m<-biases.m + (cur.p.post - truth.m)
cur.p.post.mc <- resm$m.post
cur.p.post.mc[(which(is.na(cur.p.post.mc)))]<-0
rmse.m.mc[i]<-mean((cur.p.post.mc - truth.m)^2,na.rm = TRUE)
biases.m.mc<-biases.m.mc + (cur.p.post.mc - truth.m)
iterats[1,i]<-mySearch$g.results[4,1]
iterats[2,i]<-mySearch$g.results[4,2]
print("COMPLETE ITERATION!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! with")
print(iterats[2,i])
remove(statistics1)
remove(statistics)
}
}
)
Nlim <- 1
order.deviat <- sort(masses,decreasing = TRUE,index.return=T)
print("model bias rm")
sqrt(mean((biases.m/Niter)^2,na.rm = TRUE))*100000
print("model rmse rm")
sqrt(mean(rmse.m))*100000
print("model bias mc")
sqrt(mean((biases.m.mc/Niter)^2,na.rm = TRUE))*100000
print("model rmse mc")
sqrt(mean(rmse.m.mc))*100000
print("model coverages")
mean(masses)
median(masses)
print("mean # of iterations")# even smaller on average than in BAS
mean(iterats[1,])
print("mean # of estimations")# even smaller on average than in BAS
mean(iterats[2,])
hist(masses)
# correlation between the MSE and the masses, obviously almost minus 1
cor(rmse.m,masses)
cor(rmse.m.mc,masses)
cor(iterats[2,],masses)
truth.buf <- array(data = 0,dim = c(length(fparam.example),Niter))
truth.buf[,1:Niter]<-truth
bias <- vect - truth.buf
bias.mc <- vect.mc - truth.buf
rmse <- (vect^2 +truth.buf^2 - 2*vect*truth.buf)
rmse.mc <- (vect.mc^2 +truth.buf^2 - 2*vect.mc*truth.buf)
bias.avg.rm<-rowMeans(bias)
rmse.avg.rm <-sqrt(rowMeans(rmse))
bias.avg.mc<-rowMeans(bias.mc)
rmse.avg.mc <-sqrt(rowMeans(rmse.mc))
print("pi biases rm")
sprintf("%.10f",bias.avg.rm[ordering$ix]*100)
print("pi rmse rm")
sprintf("%.10f",rmse.avg.rm[ordering$ix]*100)
print("pi biases mc")
sprintf("%.10f",bias.avg.mc[ordering$ix]*100)
print("pi rmse mc")
sprintf("%.10f",rmse.avg.mc[ordering$ix]*100)
# view the final results
View((cbind(bias.avg.rm[ordering$ix],rmse.avg.rm[ordering$ix],bias.avg.mc[ordering$ix],rmse.avg.mc[ordering$ix])*100))
|
/supplementaries/Mode Jumping MCMC/supplementary/examples/Simulated Logistic Data With Multiple Modes (Example 3)/mode_jumping_package_class_example3_10000.r
|
no_license
|
aliaksah/EMJMCMC2016
|
R
| false
| false
| 10,027
|
r
|
rm(list = ls(all = TRUE))
# install the required packges if needed
#install.packages("INLA", repos="http://www.math.ntnu.no/inla/R/testing")
#install.packages("bigmemory")
#install.packages("snow")
#install.packages("Rmpi")
#install.packages("ade4")
#install.packages("sp")
#install.packages("BAS")
#install.packages("https://github.com/aliaksah/EMJMCMC2016/files/270429/EMJMCMC_1.2.tar.gz", repos = NULL, type="source")
#install.packages("RCurl")
#install.packages("hash")
library(hash)
library(RCurl)
library(EMJMCMC)
library(sp)
library(INLA)
library(parallel)
library(bigmemory)
library(snow)
library(MASS)
library(ade4)
library(copula)
library(compiler)
library(BAS)
require(stats)
#define your working directory, where the data files are stored
workdir<-""
#prepare data
simx <- read.table(text = getURL("https://raw.githubusercontent.com/aliaksah/EMJMCMC2016/master/examples/Simulated%20Logistic%20Data%20With%20Multiple%20Modes%20%28Example%203%29/sim3-X.txt"),sep = ",")
simy <- read.table(text = getURL("https://raw.githubusercontent.com/aliaksah/EMJMCMC2016/master/examples/Simulated%20Logistic%20Data%20With%20Multiple%20Modes%20%28Example%203%29/sim3-Y.txt"),sep = ",")
data.example <- cbind(simy,simx)
names(data.example)[1]="Y1"
data.example$V2<-(data.example$V10+data.example$V14)*data.example$V9
data.example$V5<-(data.example$V11+data.example$V15)*data.example$V12
#fparam <- c("Const",colnames(data)[-1])
fparam.example <- colnames(data.example)[-1]
fobserved.example <- colnames(data.example)[1]
#dataframe for results; n/b +1 is required for the summary statistics
statistics1 <- big.matrix(nrow = 2 ^(length(fparam.example))+1, ncol = 15,init = NA, type = "double")
statistics <- describe(statistics1)
#create MySearch object with default parameters
mySearch = EMJMCMC2016()
# load functions for MLIK estimation
mySearch$estimator = estimate.bas.glm
mySearch$estimator.args = list(data = data.example,prior = aic.prior(),family = binomial(), logn = log(2000))
#play around with various methods in order to get used to them and see how they work
# carry out full enumeration (it is still feasible)
system.time(
FFF<-mySearch$full_selection(list(statid=6, totalit =2^20+1, ub = 36*20,mlikcur=-Inf,waiccur =100000))
)
# completed in 7889 for 1048576 models whilst BAS took 6954.101 seonds and thus now advantage of using C versus R is clearly seen as neglectible (14688.209 user seconds)
# BAS completed the same job in
# check that all models are enumerated during the full search procedure
idn<-which(is.na(statistics1[,1]))
length(idn)
mySearch$visualize_results(statistics1, "test3", 1024, crit=list(mlik = T, waic = T, dic = T),draw_dist = FALSE)
# once full search is completed, get the truth for the experiment
ppp<-mySearch$post_proceed_results(statistics1 = statistics1)
truth = ppp$p.post # make sure it is equal to Truth column from the article
truth.m = ppp$m.post
truth.prob = ppp$s.mass
ordering = sort(ppp$p.post,index.return=T)
fake500 <- sum(exp(x = (sort(statistics1[,1],decreasing = T)[1:2^20] + 1)),na.rm = TRUE)/truth.prob
print("pi truth")
sprintf("%.10f",truth[ordering$ix])
#estimate best performance ever
min(statistics1[,1],na.rm = T)
idn<-which(is.na(statistics1[,1]))
2^20-length(idn)
statistics1[idn,1]<- -100000
iddx <- sort(statistics1[,1],decreasing = T,index.return=T,na.last = NA)$ix
# check that all models are enumerated during the full search procedure
# see the obtained maximum and minimum
min(statistics1[,1],na.rm = TRUE)
max(statistics1[,1],na.rm = TRUE)
# look at the best possible performance
statistics1[as.numeric(iddx[10001:2^20]),1:15]<-NA
ppp.best<-mySearch$post_proceed_results(statistics1 = statistics1)
best = ppp.best$p.post # make sure it is equal to Truth column from the article
bset.m = ppp.best$m.post
best.prob = ppp.best$s.mass/truth.prob
print("pi best")
sprintf("%.10f",best[ordering$ix])
# notice some interesting details on the posterior mass and number of models visited
# 50000 best models contain 100.0000% of mass 100.0000%
# 48300 best models contain 99.99995% of mass 100.0000%
# 48086 best models contain 99.99995% of mass 100.0000%
# 10000 best models contain 99.99990% of mass 99.99991%
# 5000 best models contain 93.83923% of mass 94.72895%
# 3500 best models contain 85.77979% of mass 87.90333%
# 1500 best models contain 63.33376% of mass 67.71380%
# 1000 best models contain 53.47534% of mass 57.91971%
# 500 best models contain 37.72771% of mass 42.62869%
# 100 best models contain 14.76030% of mass 17.71082%
# 50 best models contain 14.76030% of mass 11.36970%
# 10 best models contain 14.76030% of mass 3.911063%
# 5 best models contain 14.76030% of mass 2.351454%
# 1 best models contain 14.76030% of mass 0.595301%
best.bias.m<-sqrt(mean((bset.m - truth.m)^2,na.rm = TRUE))*100000
best.rmse.m<-sqrt(mean((bset.m - truth.m)^2,na.rm = TRUE))*100000
best.bias<- best - truth
best.rmse<- abs(best - truth)
# view results for the best possible performance model
View((cbind(best.bias[ordering$ix],best.rmse[ordering$ix])*100))
# proceed with the experiment
# set parameters of the search
mySearch$switch.type=as.integer(1)
mySearch$switch.type.glob=as.integer(1)
#mySearch$printable.opt = TRUE
mySearch$max.N.glob=as.integer(5)
mySearch$min.N.glob=as.integer(3)
mySearch$max.N=as.integer(1)
mySearch$min.N=as.integer(1)
mySearch$recalc.margin = as.integer(2^20)
distrib_of_proposals = c(76.91870,71.25264,87.68184,60.55921,15812.39852)
#distrib_of_proposals = с(0,0,0,0,10)
distrib_of_neighbourhoods=t(array(data = c(7.6651604,16.773326,14.541629,12.839445,2.964227,13.048343,7.165434,
0.9936905,15.942490,11.040131,3.200394,15.349051,5.466632,14.676458,
1.5184551,9.285762,6.125034,3.627547,13.343413,2.923767,15.318774,
14.5295380,1.521960,11.804457,5.070282,6.934380,10.578945,12.455602,
6.0826035,2.453729,14.340435,14.863495,1.028312,12.685017,13.806295),dim = c(7,5)))
distrib_of_neighbourhoods[7]=distrib_of_neighbourhoods[7]/100
distrib_of_neighbourhoods = array(data = 0, dim = c(5,7))
distrib_of_neighbourhoods[,3]=10
# proceed with the search
Niter <- 100
thining<-1
system.time({
vect <-array(data = 0,dim = c(length(fparam.example),Niter))
vect.mc <-array(data = 0,dim = c(length(fparam.example),Niter))
inits <-array(data = 0,dim = Niter)
freqs <-array(data = 100,dim = c(5,Niter))
freqs.p <-array(data = 100,dim = c(5,7,Niter))
masses <- array(data = 0,dim = Niter)
biases.m <- array(data = 0,dim = 2 ^(length(fparam.example))+1)
biases.m.mc <- array(data = 0,dim = 2 ^(length(fparam.example))+1)
rmse.m <- array(data = 0,dim = Niter)
rmse.m.mc <- array(data = 0,dim = Niter)
iterats <- array(data = 0,dim = c(2,Niter))
for(i in 1:Niter)
{
statistics1 <- big.matrix(nrow = 2 ^(length(fparam.example))+1, ncol = 15,init = NA, type = "double")
statistics <- describe(statistics1)
mySearch$g.results[4,1]<-0
mySearch$g.results[4,2]<-0
mySearch$p.add = array(data = 0.5,dim = length(fparam.example))
print("BEGIN ITERATION!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
print(i)
set.seed(10*i)
initsol=rbinom(n = length(fparam.example),size = 1,prob = 0.5)
inits[i] <- mySearch$bittodec(initsol)
freqs[,i]<- distrib_of_proposals
resm<-mySearch$modejumping_mcmc(list(varcur=initsol,statid=5, distrib_of_proposals =distrib_of_proposals,distrib_of_neighbourhoods=distrib_of_neighbourhoods, eps = 0.000000001, trit = 999000, trest = 10000, burnin = 3, max.time = 30, maxit = 100000, print.freq =1000))
vect[,i]<-resm$bayes.results$p.post
vect.mc[,i]<-resm$p.post
masses[i]<-resm$bayes.results$s.mass/truth.prob
print(masses[i])
freqs.p[,,i] <- distrib_of_neighbourhoods
cur.p.post <- resm$bayes.results$m.post
cur.p.post[(which(is.na(cur.p.post)))]<-0
rmse.m[i]<-mean((cur.p.post - truth.m)^2,na.rm = TRUE)
biases.m<-biases.m + (cur.p.post - truth.m)
cur.p.post.mc <- resm$m.post
cur.p.post.mc[(which(is.na(cur.p.post.mc)))]<-0
rmse.m.mc[i]<-mean((cur.p.post.mc - truth.m)^2,na.rm = TRUE)
biases.m.mc<-biases.m.mc + (cur.p.post.mc - truth.m)
iterats[1,i]<-mySearch$g.results[4,1]
iterats[2,i]<-mySearch$g.results[4,2]
print("COMPLETE ITERATION!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! with")
print(iterats[2,i])
remove(statistics1)
remove(statistics)
}
}
)
Nlim <- 1
order.deviat <- sort(masses,decreasing = TRUE,index.return=T)
print("model bias rm")
sqrt(mean((biases.m/Niter)^2,na.rm = TRUE))*100000
print("model rmse rm")
sqrt(mean(rmse.m))*100000
print("model bias mc")
sqrt(mean((biases.m.mc/Niter)^2,na.rm = TRUE))*100000
print("model rmse mc")
sqrt(mean(rmse.m.mc))*100000
print("model coverages")
mean(masses)
median(masses)
print("mean # of iterations")# even smaller on average than in BAS
mean(iterats[1,])
print("mean # of estimations")# even smaller on average than in BAS
mean(iterats[2,])
hist(masses)
# correlation between the MSE and the masses, obviously almost minus 1
cor(rmse.m,masses)
cor(rmse.m.mc,masses)
cor(iterats[2,],masses)
truth.buf <- array(data = 0,dim = c(length(fparam.example),Niter))
truth.buf[,1:Niter]<-truth
bias <- vect - truth.buf
bias.mc <- vect.mc - truth.buf
rmse <- (vect^2 +truth.buf^2 - 2*vect*truth.buf)
rmse.mc <- (vect.mc^2 +truth.buf^2 - 2*vect.mc*truth.buf)
bias.avg.rm<-rowMeans(bias)
rmse.avg.rm <-sqrt(rowMeans(rmse))
bias.avg.mc<-rowMeans(bias.mc)
rmse.avg.mc <-sqrt(rowMeans(rmse.mc))
print("pi biases rm")
sprintf("%.10f",bias.avg.rm[ordering$ix]*100)
print("pi rmse rm")
sprintf("%.10f",rmse.avg.rm[ordering$ix]*100)
print("pi biases mc")
sprintf("%.10f",bias.avg.mc[ordering$ix]*100)
print("pi rmse mc")
sprintf("%.10f",rmse.avg.mc[ordering$ix]*100)
# view the final results
View((cbind(bias.avg.rm[ordering$ix],rmse.avg.rm[ordering$ix],bias.avg.mc[ordering$ix],rmse.avg.mc[ordering$ix])*100))
|
# pre-session options
rm(list = ls())
# getwd()
# setwd("C:/Users/your preferred path")
# load tidyr for pivoting:
library(tidyr)
df <- read.csv(url("https://www.dropbox.com/s/ylf9bkctp5byu2m/proptable.csv?dl=1"),
skip = 0,
header = TRUE,
stringsAsFactors = 0)
write.csv(df, "proptable.csv")
# first, create a pivot table:
df <- df %>%
spread(OS, Installs)
# then create a prop table (proportion table) on rows (1) or on columns (2)
# VERY IMPORTANT! this only works on MATRICES, so you need to subset the table
# so only numeric columns will be in the subset!
df.prop.rows <- prop.table(as.matrix(df[,2:3]),1)
df.prop.rows
# now, let's re-attach the country names:
df.prop.rows <- cbind.data.frame("Country"=df[,1],df.prop.rows)
df.prop.rows
# let's have a look at columns:
df.prop.cols <- prop.table(as.matrix(df[,2:3]),2)
df.prop.cols <- cbind.data.frame("Country"=df[,1],df.prop.cols)
df.prop.cols
# check your totals to see indeed amounts to 100%
rowSums(df.prop.rows[,2:3])
colSums(df.prop.cols[,2:3])
|
/proptable.R
|
no_license
|
dbM11/excel2R
|
R
| false
| false
| 1,105
|
r
|
# pre-session options
rm(list = ls())
# getwd()
# setwd("C:/Users/your preferred path")
# load tidyr for pivoting:
library(tidyr)
df <- read.csv(url("https://www.dropbox.com/s/ylf9bkctp5byu2m/proptable.csv?dl=1"),
skip = 0,
header = TRUE,
stringsAsFactors = 0)
write.csv(df, "proptable.csv")
# first, create a pivot table:
df <- df %>%
spread(OS, Installs)
# then create a prop table (proportion table) on rows (1) or on columns (2)
# VERY IMPORTANT! this only works on MATRICES, so you need to subset the table
# so only numeric columns will be in the subset!
df.prop.rows <- prop.table(as.matrix(df[,2:3]),1)
df.prop.rows
# now, let's re-attach the country names:
df.prop.rows <- cbind.data.frame("Country"=df[,1],df.prop.rows)
df.prop.rows
# let's have a look at columns:
df.prop.cols <- prop.table(as.matrix(df[,2:3]),2)
df.prop.cols <- cbind.data.frame("Country"=df[,1],df.prop.cols)
df.prop.cols
# check your totals to see indeed amounts to 100%
rowSums(df.prop.rows[,2:3])
colSums(df.prop.cols[,2:3])
|
# Linear discriminants and SVMs: practice
# ---------------------------------------
# Exploring the meta-parameters of an SVM
# ---------------------------------------
library(e1071)
library(tcltk)
# Load data into session.
train <- read.csv("assignment3/Letters/LettersTrain.csv")
test <- read.csv("assignment3/Letters/LettersTest.csv")
valid <- read.csv("assignment3/Letters/LettersValid.csv")
# Load indices.
load("assignment3/selected_indices.RData")
# Compute various classifiers.
# Parameters: kernel, cost, degree, gamma.
formula = labels ~ . - X
compute_acc <- function(classifier, set) {
t <- table(set$labels, predict(classifier, set, type='labels'))
sum(diag(t))/sum(t)
}
# Q1
# --
r <- c()
sizes <- c()
train_acc <- c()
valid_acc <- c()
ker <- c()
co <- c()
degr <- c()
gam <- c()
defaultco <- 1
defaultdegr <- 3
defaultgam <- 1/617
# Progress bar.
pb <- tkProgressBar(title = "SVM progress bar", label = "",
min = 0, max = 480, initial = 0, width = 300)
ctr <- 0
for (i in 1:10) {
for (size in c(95, 189, 473, 710, 851, 937)) {
r <- c(r, rep(i, 8))
sizes <- c(sizes, rep(size, 8))
ker <- c(ker, rep("linear", 3), rep("polynomial", 2), rep("radial", 2), "sigmoid")
co <- c(co, 0.1, 10, 1000, rep(defaultco, 5))
degr <- c(degr, rep(defaultdegr, 3), 3, 9, rep(defaultdegr, 3))
gam <- c(gam, rep(defaultgam, 6), 0.1, defaultgam)
# Define training set.
tset <- train[selected_indices[[i]][1:size], ]
# Linear kernel tests.
for (C in c(0.1, 10, 1000)) {
classifier <- svm(formula, data=tset, kernel="linear", cost=C)
train_acc <- c(train_acc, compute_acc(classifier, tset))
valid_acc <- c(valid_acc, compute_acc(classifier, valid))
ctr <- ctr + 1
setTkProgressBar(pb, value=ctr, title = NULL, label = NULL)
}
# Polynomial kernel tests.
for (deg in c(3, 9)) {
classifier <- svm(formula, data=tset, kernel="polynomial", degree=deg)
train_acc <- c(train_acc, compute_acc(classifier, tset))
valid_acc <- c(valid_acc, compute_acc(classifier, valid))
ctr <- ctr + 1
setTkProgressBar(pb, value=ctr, title = NULL, label = NULL)
}
# Radial basis kernel tests.
for (g in c(defaultgam, 0.1)) {
classifier <- svm(formula, data=tset, kernel="radial", gamma=g)
train_acc <- c(train_acc, compute_acc(classifier, tset))
valid_acc <- c(valid_acc, compute_acc(classifier, valid))
ctr <- ctr + 1
setTkProgressBar(pb, value=ctr, title = NULL, label = NULL)
}
# Sigmoid kernel tests.
classifier <- svm(formula, data=tset, kernel="sigmoid")
train_acc <- c(train_acc, compute_acc(classifier, tset))
valid_acc <- c(valid_acc, compute_acc(classifier, valid))
ctr <- ctr + 1
setTkProgressBar(pb, value=ctr, title = NULL, label = NULL)
}
}
setTkProgressBar(pb, value=ctr, title = NULL, label = NULL)
# Save data frame.
student_frame <- data.frame(r, sizes, train_acc, valid_acc, ker, co, degr, gam)
names(student_frame) <- c("Round", "Train size", "Train acc", "Valid acc", "kernel", "cost", "degree", "gamma")
save(student_frame, file="assignment3/A3_2_1.RData")
close(pb)
# Q2
# --
linclass = svm(formula, data=train, kernel="linear", C = 1000)
acc = compute_acc(linclass, test)
# - A linear kernel seems like a good kernel choice.
# - Hard margin hyperplanes are learnt when the C parameter of the primal problem tends to infinity.
# - An RBF kernel with C = 1 and gamma = 1 is roughly equivalent to random guessing.
# - The validation set accuracy increases with the number of observations.
# - Choosing the meta-parameters that maximize the accuracy on the test set causes an optimistic bias.
# That's why a validation set is used.
# - The accuracy on our specific test set is worse than the one on the validation set
# for most of the meta-parameter choices.
# It simply reflects that the data distribution is harder on the test set.
# Q3
# --
linclass = svm(formula, data=train, kernel="linear")
lintrain_acc = compute_acc(linclass, train)
lintest_acc = compute_acc(linclass, test)
rbfclass = svm(formula, data=train, kernel="radial", gamma = 1)
rbftrain_acc = compute_acc(rbfclass, train)
rbftest_acc = compute_acc(rbfclass, test)
sprintf(fmt = "%d, %g, %g, %d, %g, %g", linclass$tot.nSV, lintrain_acc, lintest_acc, rbfclass$tot.nSV, rbftrain_acc, rbftest_acc)
# Q4
# --
# For the second set of parameters (RBF kernel), each training point is essentially
# only similar to itself and dissimilar from all other points.
# Thus, the number of support vectors is maximal and generalization is impossible.
|
/assignment3/linear_discriminants_svms_practice.R
|
no_license
|
Peiffap/lingi2262-assignments
|
R
| false
| false
| 4,644
|
r
|
# Linear discriminants and SVMs: practice
# ---------------------------------------
# Exploring the meta-parameters of an SVM
# ---------------------------------------
library(e1071)
library(tcltk)
# Load data into session.
train <- read.csv("assignment3/Letters/LettersTrain.csv")
test <- read.csv("assignment3/Letters/LettersTest.csv")
valid <- read.csv("assignment3/Letters/LettersValid.csv")
# Load indices.
load("assignment3/selected_indices.RData")
# Compute various classifiers.
# Parameters: kernel, cost, degree, gamma.
formula = labels ~ . - X
compute_acc <- function(classifier, set) {
t <- table(set$labels, predict(classifier, set, type='labels'))
sum(diag(t))/sum(t)
}
# Q1
# --
r <- c()
sizes <- c()
train_acc <- c()
valid_acc <- c()
ker <- c()
co <- c()
degr <- c()
gam <- c()
defaultco <- 1
defaultdegr <- 3
defaultgam <- 1/617
# Progress bar.
pb <- tkProgressBar(title = "SVM progress bar", label = "",
min = 0, max = 480, initial = 0, width = 300)
ctr <- 0
for (i in 1:10) {
for (size in c(95, 189, 473, 710, 851, 937)) {
r <- c(r, rep(i, 8))
sizes <- c(sizes, rep(size, 8))
ker <- c(ker, rep("linear", 3), rep("polynomial", 2), rep("radial", 2), "sigmoid")
co <- c(co, 0.1, 10, 1000, rep(defaultco, 5))
degr <- c(degr, rep(defaultdegr, 3), 3, 9, rep(defaultdegr, 3))
gam <- c(gam, rep(defaultgam, 6), 0.1, defaultgam)
# Define training set.
tset <- train[selected_indices[[i]][1:size], ]
# Linear kernel tests.
for (C in c(0.1, 10, 1000)) {
classifier <- svm(formula, data=tset, kernel="linear", cost=C)
train_acc <- c(train_acc, compute_acc(classifier, tset))
valid_acc <- c(valid_acc, compute_acc(classifier, valid))
ctr <- ctr + 1
setTkProgressBar(pb, value=ctr, title = NULL, label = NULL)
}
# Polynomial kernel tests.
for (deg in c(3, 9)) {
classifier <- svm(formula, data=tset, kernel="polynomial", degree=deg)
train_acc <- c(train_acc, compute_acc(classifier, tset))
valid_acc <- c(valid_acc, compute_acc(classifier, valid))
ctr <- ctr + 1
setTkProgressBar(pb, value=ctr, title = NULL, label = NULL)
}
# Radial basis kernel tests.
for (g in c(defaultgam, 0.1)) {
classifier <- svm(formula, data=tset, kernel="radial", gamma=g)
train_acc <- c(train_acc, compute_acc(classifier, tset))
valid_acc <- c(valid_acc, compute_acc(classifier, valid))
ctr <- ctr + 1
setTkProgressBar(pb, value=ctr, title = NULL, label = NULL)
}
# Sigmoid kernel tests.
classifier <- svm(formula, data=tset, kernel="sigmoid")
train_acc <- c(train_acc, compute_acc(classifier, tset))
valid_acc <- c(valid_acc, compute_acc(classifier, valid))
ctr <- ctr + 1
setTkProgressBar(pb, value=ctr, title = NULL, label = NULL)
}
}
setTkProgressBar(pb, value=ctr, title = NULL, label = NULL)
# Save data frame.
student_frame <- data.frame(r, sizes, train_acc, valid_acc, ker, co, degr, gam)
names(student_frame) <- c("Round", "Train size", "Train acc", "Valid acc", "kernel", "cost", "degree", "gamma")
save(student_frame, file="assignment3/A3_2_1.RData")
close(pb)
# Q2
# --
linclass = svm(formula, data=train, kernel="linear", C = 1000)
acc = compute_acc(linclass, test)
# - A linear kernel seems like a good kernel choice.
# - Hard margin hyperplanes are learnt when the C parameter of the primal problem tends to infinity.
# - An RBF kernel with C = 1 and gamma = 1 is roughly equivalent to random guessing.
# - The validation set accuracy increases with the number of observations.
# - Choosing the meta-parameters that maximize the accuracy on the test set causes an optimistic bias.
# That's why a validation set is used.
# - The accuracy on our specific test set is worse than the one on the validation set
# for most of the meta-parameter choices.
# It simply reflects that the data distribution is harder on the test set.
# Q3
# --
linclass = svm(formula, data=train, kernel="linear")
lintrain_acc = compute_acc(linclass, train)
lintest_acc = compute_acc(linclass, test)
rbfclass = svm(formula, data=train, kernel="radial", gamma = 1)
rbftrain_acc = compute_acc(rbfclass, train)
rbftest_acc = compute_acc(rbfclass, test)
sprintf(fmt = "%d, %g, %g, %d, %g, %g", linclass$tot.nSV, lintrain_acc, lintest_acc, rbfclass$tot.nSV, rbftrain_acc, rbftest_acc)
# Q4
# --
# For the second set of parameters (RBF kernel), each training point is essentially
# only similar to itself and dissimilar from all other points.
# Thus, the number of support vectors is maximal and generalization is impossible.
|
###
# Ensemble of Segmented Functional Nonparametric Classifiers
# Robert Buscaglia, Nichola C. Garbett, Yiannis Kamarianakis
# June 20, 2017
#
# Functions and Packages
# File contains all required functions and packages for running the
# ESFuNC algorithm. Each function has been given a brief description.
# Please see ESFuNC_Analysis_Template.R for details on performing the
# algorithm.
### Required Packages
# Checks for and installs missing packages then loads all packages.
list.of.packages <- c("fda.usc", "foreach", "doParallel", "abind")
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])]
if(length(new.packages))
{
cat("Missing Packages are Installing... \n")
install.packages(new.packages)
}
library("fda.usc")
library("foreach")
library("doParallel")
library("abind")
### Kernels
# Provide weights for nonparametric classifiers. Required for probability calculations.
# Must be written so that they can work with both KNN and Parzen Window. Any additional
# kernels can be prepared but should retain this form to ensure functionality.
kern.tri<-function(x, h=1, method="wknn")
{
if(method=="wknn")
{
temp.1<-x-min(x)
temp.2<-temp.1/max(temp.1)
result<-2*(1-temp.2)
}
if(method=="kernel")
{
result<-ifelse(x<=h, (2/h)*(1-(x/h)), 0)
}
return(result)
}
kern.norm<-function(x, h=1, method="wknn")
{
if(method=="wknn")
{
temp.1<-x-min(x)
result<-2*dnorm(temp.1)
}
if(method=="kernel")
{
result<-2*dnorm(x, 0, h)
}
return(result)
}
kern.unif<-function(x, h=1, method="wknn")
{
if(method=="wknn")
{
result<-dunif(x, min = min(x), max=max(x))
}
if(method=="kernel")
{
result<-ifelse(x<=h, 1/h, 0)
}
return(result)
}
### createFolds
# This is the createFolds function from the package 'caret'.
# This is used to create stratified folds for cross validation.
createFolds<-function (y, k = 10, list = TRUE, returnTrain = FALSE)
{
if (class(y)[1] == "Surv")
y <- y[, "time"]
if (is.numeric(y)) {
cuts <- floor(length(y)/k)
if (cuts < 2)
cuts <- 2
if (cuts > 5)
cuts <- 5
breaks <- unique(quantile(y, probs = seq(0, 1, length = cuts)))
y <- cut(y, breaks, include.lowest = TRUE)
}
if (k < length(y)) {
y <- factor(as.character(y))
numInClass <- table(y)
foldVector <- vector(mode = "integer", length(y))
for (i in 1:length(numInClass)) {
min_reps <- numInClass[i]%/%k
if (min_reps > 0) {
spares <- numInClass[i]%%k
seqVector <- rep(1:k, min_reps)
if (spares > 0)
seqVector <- c(seqVector, sample(1:k, spares))
foldVector[which(y == names(numInClass)[i])] <- sample(seqVector)
}
else {
foldVector[which(y == names(numInClass)[i])] <- sample(1:k,
size = numInClass[i])
}
}
}
else foldVector <- seq(along = y)
if (list) {
out <- split(seq(along = y), foldVector)
names(out) <- paste("Fold", gsub(" ", "0", format(seq(along = out))),
sep = "")
if (returnTrain)
out <- lapply(out, function(data, y) y[-data], y = seq(along = y))
}
else out <- foldVector
out
}
### fold.creator
# A default setting for creating multiple sets of folds if not given manually.
fold.creator<-function(classes, folds, trials)
{
folds.list<-list()
for(j in 1:trials)
folds.list[[j]]<-createFolds(factor(classes), k=folds, list=FALSE)
return(folds.list)
}
### create.cv.sum
# A function for aiding in the visualization and summarization of final cross validation results. Takes
# the results of running any of the cross validation functions and produces summary statistics.
# cv.to.plot should be at least one set of cross validation results. This is a common output from
# cross validation functions.
create.cv.sum<-function(cv.to.plot)
{
n.size<-nrow(as.matrix(cv.to.plot))
med.temp<-apply(cv.to.plot, 2, median)
mad.temp<-apply(cv.to.plot, 2, mad)
mean.temp<-apply(cv.to.plot, 2, mean)
sd.temp<-apply(cv.to.plot, 2, sd)
se.temp<-sd.temp/sqrt(n.size)
ul.temp<-mean.temp+2*se.temp
ll.temp<-mean.temp-2*se.temp
return(rbind(med.temp, mad.temp, ul.temp, mean.temp, sd.temp, ll.temp))
}
### create.segments
# Creates unique fdata objects for each segment determined by (total.segments)
# total.segments = 1 refers to the undisturbed fdata object.
# fdata.object should be fdata object being analyzed (any derivative)
# total.segments : how many partitions are to be made
# indent : discrete point indentations based on argvals. used to remove numerical noise.
# density : grid mesh point density to control integration accuracy.
create.segments<-function(fdata.object, total.segments, indent, density=NULL)
{
argvals=fdata.object$argvals
size<-length(argvals)
start<-argvals[1+indent]
end<-argvals[size-indent]
ifelse(is.null(density), int.density<-size-2*indent, int.density<-density)
temp<-0:total.segments
seg.dist<-(end-start)/total.segments
lims.temp<-start+temp*seg.dist
segs.list<-list()
for(j in 1:total.segments)
{
seq.temp<-seq(lims.temp[j], lims.temp[j+1], (lims.temp[j+1]-lims.temp[j])/(int.density-1))
fd.temp<-eval.fd(seq.temp, fdata2fd(fdata.object))
segs.list[[j]]<-fdata(t(fd.temp), argvals=seq.temp)
}
return(segs.list)
}
### sort.models
# Sorts grid search results and orders results such that segments are minimized and neighbors are maximized
# grid.output : resulting object from running sequential grid search
# models.returned : how many models to be viewed as output
# do.print : control whether resulting ordered table of values is printed to screen. Used in other functions.
sort.models<-function(grid.output, models.returned, do.print=TRUE)
{
size<-dim(grid.output)[1]*dim(grid.output)[2]
best.models<-arrayInd(order(grid.output, decreasing = TRUE), dim(grid.output))
best.models.mat<-matrix(nrow=models.returned, ncol=3)
best.models.mat.temp<-matrix(nrow=size, ncol=3)
colnames(best.models.mat)<-c("Neighbors", "Segments", "Accuracy")
for(j in 1:size)
{
best.models.mat.temp[j,]<-c(rownames(grid.output)[best.models[j,1]],colnames(grid.output)[best.models[j,2]],as.numeric(round(grid.output[best.models[j,1],best.models[j,2]],6)))
}
acc.levels<-as.numeric(levels(factor(best.models.mat.temp[,3])))
for(j in 1:length(acc.levels))
{
set.1<-which(best.models.mat.temp[,3]==acc.levels[j])
fac.levels<-as.numeric(levels(factor(best.models[set.1,2])))
fac.len<-length(fac.levels)
for(k in 1:fac.len)
{
temp.1<-which(best.models[set.1,2]==fac.levels[k])
best.models[set.1[temp.1]]=sort(best.models[set.1[temp.1]], decreasing = TRUE)
}
}
for(j in 1:models.returned)
{
best.models.mat[j,]<-c(rownames(grid.output)[best.models[j,1]],colnames(grid.output)[best.models[j,2]],as.numeric(round(grid.output[best.models[j,1],best.models[j,2]],6)))
}
if(do.print) print(best.models.mat, quote=FALSE)
if(!do.print) return(best.models.mat)
}
### Stepwise Procedures ###
### all.segs.ensemble
# Does not do any stepwise selection. Uses all segments in final model.
# step.array : array of probabilities determined from distances, and dependent on which method and kernel chosen.
# Calculated frequently in other functions.
# segment.asccuracies : accuracy of each segment included in step.array. Used for weighted combinations.
# classes : classification identifiers for set being analyzed.
# seg.weight : Set to TRUE if combining segment probabilities should be weighted by individual segment accuracy
# thresh : stepwise accuracy improvement threshold. Does not affect all.segs.ensemble.
# do.par : Set to TRUE to run calculations in parallel.
# cores : number of cores to use during parallel calculations.
all.segs.ensemble<-function(step.array, segment.accuracies, classes, seg.weight=FALSE, thresh=0.0001, do.par=FALSE, cores=2)
{
n.objs<-dim(step.array)[1]
class.levels<-as.numeric(levels(factor(classes)))
class.levels.index<-seq(1:length(class.levels))
knn.n<-dim(step.array)[3]
segment.n<-dim(step.array)[4]
seg.seq<-seq(1,segment.n,1)
# Add Check that Dimensions are correct
ens.acc<-numeric()
ens.segs.used<-list()
probs.ens<-array(dim=c(n.objs, length(class.levels.index), knn.n))
classes.ens<-matrix(nrow=n.objs, ncol=knn.n)
for (k in 1:knn.n) # Will be changed to bandwith
{
segs.used<-seg.seq
probs.update<-matrix(nrow=n.objs, ncol=length(class.levels.index))
for (q in class.levels.index)
{
ifelse(seg.weight,
probs.update[,q]<-rowSums(t(segment.accuracies[k,segs.used]*t(as.matrix(step.array[,q,k,segs.used])))/sum(segment.accuracies[k,segs.used])),
probs.update[,q]<-rowSums(as.matrix(step.array[,q,k,segs.used]))/length(segs.used)
)
}
#print(probs.update)
est.classes.update<-numeric()
for (q in 1:n.objs)
{
max.classes<-class.levels[as.numeric(which(probs.update[q,]==max(probs.update[q,])))]
est.classes.update[q]<-ifelse(length(max.classes)==1, max.classes, sample(max.classes,1))
}
acc.new<-mean(est.classes.update==classes)
ens.acc[k]<-acc.new
ens.segs.used[[k]]<-segs.used
probs.ens[,,k]<-probs.update
classes.ens[,k]<-est.classes.update
}
return(list(ens.accuracies=ens.acc, ens.segments=ens.segs.used, ens.probs=probs.ens, ens.classes=classes.ens))
}
### forward.ensemble
# Evaluates the sequential addition of segments to the ensemble, evaluating if inclusion of a new segment
# improves leave-one-out accuracy.
# step.array : array of probabilities determined from distances, and dependent on which method and kernel chosen.
# Calculated frequently in other functions.
# segment.asccuracies : accuracy of each segment included in step.array. Used for weighted combinations.
# classes : classification identifies for set being analyzed.
# seg.weight : Set to TRUE if combining segment probabilities should be weighted by individual segment accuracy.
# thresh : stepwise accuracy improvement threshold.
# do.par : Set to TRUE to run calculations in parallel.
# cores : number of cores to use during parallel calculations.
forward.ensemble<-function(step.array, segment.accuracies, classes, seg.weight=FALSE, thresh=0.0001, do.par=FALSE, cores=2)
{
#print("forward")
#browser()
n.objs<-dim(step.array)[1]
class.levels<-as.numeric(levels(factor(classes)))
class.levels.index<-seq(1:length(class.levels))
knn.n<-dim(step.array)[3]
segment.n<-dim(step.array)[4] #changed this!!!
seg.seq<-seq(1,segment.n,1)
# Add Check that Dimensions are correct
ens.acc<-numeric()
ens.segs.used<-list()
probs.ens<-array(dim=c(n.objs, length(class.levels.index), knn.n))
classes.ens<-matrix(nrow=n.objs, ncol=knn.n)
if(do.par)
{
use.cores<-min(knn.n, max.cores)
cl.temp<-makeCluster(use.cores)
registerDoParallel(cl.temp)
k.result<-foreach(k=1:knn.n, .combine=append) %dopar% #this can be parallelized
{
acc.old<-0 # starting value for while
max.vals<-which(segment.accuracies[k,]==max(segment.accuracies[k,]))
max.seg<-ifelse(length(max.vals)==1, max.vals, sample(max.vals,1))
acc.new<-segment.accuracies[k,max.seg]
segs.used<-max.seg
segs.left<-seg.seq[-segs.used]
probs.update<-step.array[,,k,segs.used]
est.classes.update<-numeric()
for(q in 1:n.objs)
{
max.classes<-class.levels[as.numeric(which(probs.update[q,]==max(probs.update[q,])))]
est.classes.update[q]<-ifelse(length(max.classes)==1, max.classes, sample(max.classes,1))
}
# cylce through remaining segs
while((acc.new > acc.old+thresh) & (length(segs.left)!=0))
{
acc.old<-acc.new
acc.update<-0
seg.update<-NULL
est.classes.update<-NULL
for(j in segs.left)
{
probs.temp<-matrix(nrow=n.objs, ncol=length(class.levels.index))
for(q in class.levels.index)
{
ifelse(seg.weight,
probs.temp[,q]<-rowSums(t(segment.accuracies[k,c(segs.used,j)]*t(as.matrix(step.array[,q,k,c(segs.used,j)])))/sum(segment.accuracies[k,c(segs.used,j)])),
probs.temp[,q]<-rowSums(as.matrix(step.array[,q,k,c(segs.used,j)]))/length(c(segs.used,j))
)
if(anyNA(probs.temp[,q])) probs.temp[,q]<-rep(0, n.objs)
}
est.classes.temp<-numeric()
for(q in 1:n.objs)
{
max.classes<-class.levels[as.numeric(which(probs.temp[q,]==max(probs.temp[q,])))]
est.classes.temp[q]<-ifelse(length(max.classes)==1, max.classes, sample(max.classes,1))
}
acc.temp<-mean(est.classes.temp==classes)
if(acc.temp>acc.update)
{
acc.update<-acc.temp
seg.update<-j
est.classes.update<-est.classes.temp
probs.update<-probs.temp
}
}
if(acc.update>acc.old+thresh)
{
acc.new<-acc.update
segs.used<-c(segs.used,seg.update)
segs.left<-seg.seq[-segs.used]
}
}
return(list(list(ens.acc.out=acc.new, ens.segs.used.out=segs.used, probs.ens.out=probs.update, classes.ens.out=est.classes.update)))
}
stopCluster(cl.temp)
}
if(!do.par)
{
k.result<-foreach(k=1:knn.n, .combine=append) %do% #this can be parallelized
{
acc.old<-0 # starting value for while
max.vals<-which(segment.accuracies[k,]==max(segment.accuracies[k,]))
max.seg<-ifelse(length(max.vals)==1, max.vals, sample(max.vals,1))
acc.new<-segment.accuracies[k,max.seg]
segs.used<-max.seg
segs.left<-seg.seq[-segs.used]
probs.update<-step.array[,,k,segs.used]
est.classes.update<-numeric()
for(q in 1:n.objs)
{
max.classes<-class.levels[as.numeric(which(probs.update[q,]==max(probs.update[q,])))]
est.classes.update[q]<-ifelse(length(max.classes)==1, max.classes, sample(max.classes,1))
}
# cylce through remaining segs
while((acc.new > acc.old+thresh) & (length(segs.left)!=0))
{
acc.old<-acc.new
acc.update<-0
seg.update<-NULL
est.classes.update<-NULL
for(j in segs.left)
{
probs.temp<-matrix(nrow=n.objs, ncol=length(class.levels.index))
for(q in class.levels.index)
{
ifelse(seg.weight,
probs.temp[,q]<-rowSums(t(segment.accuracies[k,c(segs.used,j)]*t(as.matrix(step.array[,q,k,c(segs.used,j)])))/sum(segment.accuracies[k,c(segs.used,j)])),
probs.temp[,q]<-rowSums(as.matrix(step.array[,q,k,c(segs.used,j)]))/length(c(segs.used,j))
)
if(anyNA(probs.temp[,q])) probs.temp[,q]<-rep(0, n.objs)
}
est.classes.temp<-numeric()
for(q in 1:n.objs)
{
max.classes<-class.levels[as.numeric(which(probs.temp[q,]==max(probs.temp[q,])))]
est.classes.temp[q]<-ifelse(length(max.classes)==1, max.classes, sample(max.classes,1))
}
acc.temp<-mean(est.classes.temp==classes)
if(acc.temp>acc.update)
{
acc.update<-acc.temp
seg.update<-j
est.classes.update<-est.classes.temp
probs.update<-probs.temp
}
}
if(acc.update>acc.old+thresh)
{
acc.new<-acc.update
segs.used<-c(segs.used,seg.update)
segs.left<-seg.seq[-segs.used]
}
}
return(list(list(ens.acc.out=acc.new, ens.segs.used.out=segs.used, probs.ens.out=probs.update, classes.ens.out=est.classes.update)))
}
}
for(k in 1:knn.n)
{
ens.acc[k]<-k.result[[k]]$ens.acc.out
ens.segs.used[[k]]<-k.result[[k]]$ens.segs.used.out
probs.ens[,,k]<-k.result[[k]]$probs.ens.out
classes.ens[,k]<-k.result[[k]]$classes.ens.out
}
return(list(ens.accuracies=ens.acc, ens.segments=ens.segs.used, ens.probs=probs.ens, ens.classes=classes.ens))
}
### calc.distance.array
# Creates an array of distances for an fdata object for a provided number of segments.
# Segments are first constructed using indent and density inputs.
# Distances are calculated between each observation in the fdata object for each segment.
# Can be done (and should be done) in parallel.
# fdata.object : data set to be analyzed.
# indent : discrete point indentations to remove numerical derivative noise.
# density : integration grid mesh to control accuracy.
# total.segments : number of partitions, where 1 defaults to unparitioned data.
# do.par : Set to TRUE to run calculations in parallel.
# cores : number of cores to use during parallel calculations.
calc.distance.array<-function(fdata.object, indent=0, density=NULL, total.segments=1, do.par=FALSE, max.cores=2)
{
argvals<-fdata.object$argvals
segs<-create.segments(fdata.object, total.segments, indent, density)
acomb <- function(...) abind(..., along=3)
if(do.par)
{
use.cores<-min(total.segments, max.cores)
cl.temp<-makeCluster(use.cores)
registerDoParallel(cl.temp)
seg.distance.out<-foreach(q=1:total.segments, .packages = "fda.usc", .combine='acomb', .multicombine = TRUE) %dopar%
{
x<-metric.lp(segs[[q]])
return(x)
}
stopCluster(cl.temp)
}
if(!do.par)
{
seg.distance.out<-foreach(q=1:total.segments, .packages = "fda.usc", .combine='acomb', .multicombine = TRUE) %dopar%
{
x<-metric.lp(segs[[q]])
return(x)
}
}
dim.1<-dim(seg.distance.out)[1]
dim.2<-dim(seg.distance.out)[2]
dim.3<-total.segments
export.array<-array(dim=c(dim.1, dim.2, dim.3))
export.array[,,(1:total.segments)]<-seg.distance.out
return(export.array)
}
### segment.class
# Returns accuracy, estimated classes, and probabilities for any single segment.
# segment.distances : should be a single matrix from the array of distances.
# classes : classification identifies for set being analyzed.
# k.grid : tuning parameteres to analyze. Either neighbor size (integers)
# or bandwidth constants (positive real values)
# class.method : nonparametric classification method. Either "wknn" or "kernel".
# ker : kernel to be used (see Kernels above)
segment.class<-function(segment.distances, classes, k.grid=2, class.method="wknn", ker=kern.tri)
{
n.objs<-dim(segment.distances)[1]
closest.temp<-matrix(ncol=n.objs,nrow=n.objs-1)
classify.temp<-matrix(ncol=n.objs,nrow=n.objs-1)
dist.ord<-matrix(ncol=n.objs, nrow=n.objs-1)
kern.probs<-matrix(ncol=n.objs, nrow=n.objs-1)
class.levels<-as.numeric(levels(factor(classes)))
class.levels.index<-seq(1:length(class.levels))
met.temp<-segment.distances
for(j in 1:n.objs)
{
or.temp<-order(met.temp[,j])
closest.temp[,j]<-or.temp[or.temp!=j]
classify.temp[,j]<-classes[closest.temp[,j]]
dist.ord[,j]<-met.temp[or.temp[or.temp!=j], j]
}
if(class.method=="wknn")
{
kern.probs<-apply(dist.ord, 2, ker, h=1, method=class.method)
prob.classes<-array(dim=c(n.objs, length(class.levels), length(k.grid)), dimnames = list(seq(1:n.objs), class.levels, seq(1:length(k.grid))))
for(q in 1:length(k.grid))
{
k=k.grid[q]
for(i in 1:length(class.levels.index))
{
for(j in 1:n.objs)
{
assign.temp<-which(classify.temp[(1:k),j]==class.levels[i])
total.prob<-sum(kern.probs[(1:k), j])
prob.classes[j,class.levels.index[i],q]=sum(kern.probs[assign.temp,j])/total.prob
}
}
}
est.classes<-matrix(nrow=n.objs, ncol=length(k.grid))
for (k in 1:length(k.grid))
{
for (j in 1:n.objs)
{
max.classes<-class.levels[as.numeric(which(prob.classes[j,,k]==max(prob.classes[j,,k])))]
est.classes[j,k]<-ifelse(length(max.classes)==1, max.classes, sample(max.classes,1))
}
}
est.accuracy<-numeric()
for (k in 1:length(k.grid))
{
est.accuracy[k]<-mean(est.classes[,k]==classes)
}
}
if(class.method=="kernel")
{
prob.classes<-array(dim=c(n.objs, length(class.levels), length(k.grid)), dimnames = list(seq(1:n.objs), class.levels, seq(1:length(k.grid))))
for(q in 1:length(k.grid))
{
kern.probs<-apply(dist.ord, 2, ker, h=k.grid[q], method=class.method)
for(i in 1:length(class.levels.index))
{
for(j in 1:n.objs)
{
assign.temp<-which(classify.temp[,j]==class.levels[i])
total.prob<-sum(kern.probs[, j])
ifelse(total.prob==0,
prob.classes[j,class.levels.index[i],q]<-1/length(class.levels.index),
prob.classes[j,class.levels.index[i],q]<-sum(kern.probs[assign.temp,j])/total.prob
)
}
}
}
est.classes<-matrix(nrow=n.objs, ncol=length(k.grid))
for (k in 1:length(k.grid))
{
for (j in 1:n.objs)
{
max.classes<-class.levels[as.numeric(which(prob.classes[j,,k]==max(prob.classes[j,,k])))]
est.classes[j,k]<-ifelse(length(max.classes)==1, max.classes, sample(max.classes,1))
}
}
est.accuracy<-numeric()
for (k in 1:length(k.grid))
{
est.accuracy[k]<-mean(est.classes[,k]==classes)
}
}
return(list(accuracy.est=est.accuracy, classes.est=est.classes, prob.array=prob.classes))
}
### grid.class
# Performs the standard grid search over tuning parameters and segments.
# No sequential stepping. To be used if a predetermined grid of parameters is desired rather than using
# sequential stepping of segment sizes.
# distance.array : array of distances calculated from fdata object. Distances for each segmented FDO.
# classes : classification identifies for set being analyzed.
# segments.grid : segment sizes to analyzes. Should be vector of integers.
# k.grid : tuning parameteres to analyze. Either neighbor size (integers)
# or bandwidth constants (positive real values)
# class.method : nonparametric classification method. Either "wknn" or "kernel".
# ker : kernel to be used (see Kernels above)
# step.method : stewpwise procedure. Either forward.ensemble or all.segs.ensemble
# seg.weight : Set to TRUE to weight combination of segments by individual segment LOOCV accuracy.
# thresh : stepwise accuracy improvement threshold. Does not affect all.segs.ensemble.
# do.par : Set to TRUE to run calculations in parallel.
# max.cores : number of cores to use during parallel calculations.
# output : Toggle for displaying information about calculation while calculation is running
grid.class<-function(distance.array, classes, segments.grid=1, k.grid=5, class.method="wknn", ker=kern.tri, step.method=forward.ensemble, seg.weight=TRUE, thresh=0.0001, do.par=FALSE, max.cores=2, output=FALSE)
{
#if(output) cat("Evaluating", segments.grid, "Segment(s) \n")
if(sum(segments.grid)!=dim(distance.array)[3]) stop("Segments and Distance Array do not match!")
n.objs<-dim(distance.array)[1]
class.levels<-as.numeric(levels(factor(classes)))
class.levels.index<-seq(1:length(class.levels))
seg.start<-cumsum(segments.grid)-segments.grid
if(output) cat("Evaluating Segment Probabilities. \n")
s=1
segment.array<-array(dim=c(n.objs, length(class.levels), length(k.grid), segments.grid[s]))
accuracy.mat<-matrix(nrow=length(k.grid), ncol=segments.grid[s])
est.classes<-matrix(nrow=n.objs, ncol=length(k.grid))
if(do.par && segments.grid[s]!=1)
{
use.cores<-min(max.cores, segments.grid[s])
cl.max<-makeCluster(use.cores)
registerDoParallel(cl.max)
kernel.output<-foreach(q=1:segments.grid[s], .packages = "fda.usc", .export="segment.class", .combine=append) %dopar%
{
return(list(segment.class(distance.array[,,seg.start[s]+q], classes, k.grid, class.method, ker)))
}
stopCluster(cl.max)
}
if(!do.par || segments.grid[s]==1)
{
kernel.output<-foreach(q=1:segments.grid[s], .packages = "fda.usc", .export="segment.class", .combine=append) %do%
{
return(list(segment.class(distance.array[,,seg.start[s]+q], classes, k.grid, class.method, ker)))
}
}
for(q in 1:segments.grid[s])
{
segment.array[,,,q]<-kernel.output[[q]]$prob.array
accuracy.mat[,q]<-kernel.output[[q]]$accuracy.est
}
if(output) cat("Finished Probabilities. \n")
grid.results<-step.method(segment.array, accuracy.mat, classes, seg.weight=seg.weight, thresh=thresh, do.par=do.par, cores=max.cores)$ens.accuracies
if(output) cat("Finished Ensembling. \n")
c.names<-NULL
for(j in 1:length(segments.grid)) c.names<-c(c.names, paste(segments.grid[j], "segs"))
r.names<-NULL
for(j in 1:length(k.grid)) r.names<-c(r.names, paste("k=", round(k.grid[j], 6), sep=""))
grid.results<-as.matrix(grid.results)
colnames(grid.results)<-c.names
rownames(grid.results)<-r.names
return(grid.results)
}
### seg.grid.class
# Evaluates a sequentially increasing segment size, evaluating if the top models (as determined by LOOCV)
# improve in accuracy as segment size increases. The top top.models.eval models must not change
# for seg.sep segments. Same functionality as grid.class but with improved selection of segment sizes.
# Can drastically improve computation times if segments to be analyzed is unknown.
# Has additionally been improved to run directly from the fdata.object of interest.
# Indent and deriv must be supplied if fdata.object is to be manipulated within the calculation,
# or fdata.object can be supplied as derivative of interest and set deriv=0.
# fdata.object : set to be analyzed. Default setting is original data curves, that can be differentiated
# if desired.
# classes : classification identifiers for set being analyzed.
# top.models : number of models to be considered when evaluating if segmentation has been optimized.
# seg.sep : distance between total segments evaluated and segment size of top models. Setting to 0 will
# stop calculation once min.segments has been analyzed.
# min.segments : initial segment sizes to be analyzed. Should be a minimum of 2, although will work from 1.
# max.segments : stopping parameter to ensure sequential increasing of segment size will stop.
# indent : numerical noise indentation.
# deriv : which derivative of fdata.object to be analyzed.
# k.grid : grid of tuning parameters
# class.method : nonparametric classification method. Either "wknn" or "kernel".
# ker : kernel to be used (see Kernels above)
# step.method : stewpwise procedure. Either forward.ensemble or all.segs.ensemble
# seg.weight : Set to TRUE to weight combination of segments by individual segment LOOCV accuracy
# thresh : stepwise accuracy improvement threshold. Does not affect all.segs.ensemble.
# density : integration grid mesh size to control accuracy
# do.par : Set to TRUE to run calculations in parallel.
# max.cores : number of cores to use during parallel calculations.
# output : Toggle for displaying information about calculation while calculation is running
# write.out : Toggle to write data to file as calculation is performed.
# write.name : File name to which data is sent if write.out = T.
seq.grid.class<-function(fdata.object, classes, top.models.eval=15, seg.sep=1, min.segments=5, max.segments=30,
indent=0, deriv=0, k.grid=c(1:10), class.method="wknn", ker=kern.tri,
step.method=forward.ensemble, seg.weight=FALSE, thresh=0.0001, density=NULL,
do.par=FALSE, max.cores=2, output=FALSE, write.out=FALSE, write.name=NULL)
{
if(is.null(write.name)) write.name="output"
fd.full<-fdata.object
argvals<-fdata.object$argvals
if(deriv!=0) fd.full<-fdata.deriv(fd.full,deriv)
n.objs<-length(classes)
class.levels<-as.numeric(levels(factor(classes)))
class.levels.index<-seq(1:length(class.levels))
full.grid<-NULL
for(seg.size in 1:min.segments)
{
if(output) cat("Calculating Distance for", seg.size, "segment(s). \n")
dist.temp<-calc.distance.array(fd.full, indent, density, total.segments=seg.size, do.par, max.cores)
grid.seg<-grid.class(dist.temp, classes, segments.grid=seg.size, k.grid=k.grid, class.method=class.method, ker=ker,
step.method=step.method, seg.weight=seg.weight, thresh=thresh, do.par=do.par, max.cores=max.cores, output=output)
full.grid<-cbind(full.grid, grid.seg)
}
if(write.out) write.csv(full.grid, paste(write.name, "_Grid.csv", sep=""))
largest.seg.anal<-min.segments
largest.seg.in.top<-max(as.numeric(substring(sort.models(full.grid, top.models.eval, do.print=FALSE)[1:top.models.eval,2], 1, nchar(sort.models(full.grid, top.models.eval, do.print=FALSE)[1:top.models.eval,2])-4)))
while((largest.seg.in.top+seg.sep)>largest.seg.anal && min.segments<max.segments)
{
min.segments<-min.segments+1
if(output) cat("Segment size increased to ", min.segments, ".\n")
if(output) cat("Calculating Distance for", min.segments, "segment(s). \n")
dist.temp<-calc.distance.array(fd.full, indent, density, total.segments=min.segments, do.par, max.cores)
grid.seg<-grid.class(dist.temp, classes, segments.grid=min.segments, k.grid=k.grid, class.method=class.method, ker=ker,
step.method=step.method, seg.weight=seg.weight, thresh=thresh, do.par=do.par, max.cores=max.cores, output=output)
full.grid<-cbind(full.grid, grid.seg)
if(write.out) write.csv(full.grid, paste(write.name, "_Grid.csv", sep=""))
largest.seg.anal<-min.segments
largest.seg.in.top<-max(as.numeric(substring(sort.models(full.grid, top.models.eval, do.print=FALSE)[1:top.models.eval,2], 1, nchar(sort.models(full.grid, top.models.eval, do.print=FALSE)[1:top.models.eval,2])-4)))
}
if(output) cat("Finished Sequential Grid Analysis at a total of", min.segments, "Segments. \n")
return(full.grid)
}
### validation.probs
# Function that takes a truncated distance matrix (including only the testing data ) and a k.grid
# and returns a probability array for the test data. Used for Cross validation.
# dist.temp : temporary truncated matrix of distances (distances from training to test objects)
# taining.classes : classification identifiers for training set
# k.eval : tuning parmeter
# class.method : nonparametric classification method. Either "wknn" or "kernel".
# ker : kernel to be used (see Kernels above)
validation.probs<-function(dist.temp, training.classes, k.eval, class.method="wknn", ker=kern.tri)
{
test.objs<-ncol(dist.temp)
train.objs<-nrow(dist.temp)
closest.temp<-matrix(ncol=test.objs, nrow=train.objs)
classify.temp<-matrix(ncol=test.objs, nrow=train.objs)
dist.ord<-matrix(ncol=test.objs, nrow=train.objs)
kern.probs<-matrix(ncol=test.objs, nrow=train.objs)
class.levels<-as.numeric(levels(factor(training.classes)))
class.levels.index<-seq(1:length(class.levels))
for(j in 1:test.objs)
{
or.temp<-order(dist.temp[,j])
closest.temp[,j]<-or.temp
classify.temp[,j]<-training.classes[closest.temp[,j]]
dist.ord[,j]<-dist.temp[or.temp,j]
}
if(class.method=="wknn")
{
kern.probs<-apply(dist.ord, 2, ker, h=1, method=class.method)
prob.classes<-matrix(nrow=test.objs, ncol=length(class.levels))
for(i in 1:length(class.levels.index))
{
for(j in 1:test.objs)
{
assign.temp<-which(classify.temp[(1:k.eval),j]==class.levels[i])
total.prob<-sum(kern.probs[(1:k.eval), j])
prob.classes[j,class.levels.index[i]]=sum(kern.probs[assign.temp,j])/total.prob
}
}
}
if(class.method=="kernel")
{
prob.classes<-matrix(nrow=test.objs, ncol=length(class.levels))
kern.probs<-apply(dist.ord, 2, ker, h=k.eval, method=class.method)
for(i in 1:length(class.levels.index))
{
for(j in 1:test.objs)
{
assign.temp<-which(classify.temp[,j]==class.levels[i])
total.prob<-sum(kern.probs[, j])
ifelse(total.prob==0,
prob.classes[j,class.levels.index[i]]<-1/length(class.levels.index),
prob.classes[j,class.levels.index[i]]<-sum(kern.probs[assign.temp,j])/total.prob
)
}
}
}
return(prob.classes)
}
### grid.model.cv
# Runs cross validation of the top models.analyzed models from the results of a sequential grid search.
# fdata.object : set to be analyzed. Default setting is original data curves.
# classes : classification identifies for set being analyzed.
# grid.results : output from running a grid search
# models.analyzed : number of models to validated
# folds : number of folds
# trials : number of times cross validation should be performed.
# folds.list : a list of folds identifiers
# seg.sep : distance between total segments evaluated and segment size of top models. Setting to 0 will
# stop calculation once min.segments has been analyzed.
# min.segments : initial segments sizes to be analyzed. Should be a minimum of 2, although will work from 1.
# max.segments : stopping parameter to ensure sequential increasing of segment size will stop.
# indent : numerical noise indentation.
# deriv : which derivative of fdata.object to be analyzed.
# k.grid : grid of tuning parameters
# class.method : nonparametric classification method. Either "wknn" or "kernel".
# ker : kernel to be used (see Kernels above)
# step.method : stewpwise procedure. Either forward.ensemble or all.segs.ensemble
# seg.weight : Set to TRUE to weight combination of segments by individual segment LOOCV accuracy
# thresh : stepwise accuracy improvement threshold. Does not affect all.segs.ensemble.
# density : integration grid mesh size to control accuracy
# do.par : Set to TRUE to run calculations in parallel.
# max.cores : number of cores to use during parallel calculations.
# large.set : A toggle for numerical stability if analyzing a dataset with extreme number of obersvations
# output : Toggle for displaying information about calculation while calculation is running
grid.model.cv<-function(fdata.object, classes, grid.results, models.analyzed=10, folds=10, trials=1,
folds.list=NULL, indent=0, deriv=0, density=NULL, class.method="wknn", ker=kern.tri,
step.method=forward.ensemble, seg.weight=FALSE, do.par=FALSE, max.cores=2,
large.set=FALSE, output=TRUE)
{
if(is.null(folds.list)) folds.list<-fold.creator(classes.temp, folds, trials)
if(deriv!=0) fdata.object<-fdata.deriv(fdata.object, deriv)
new.k<-as.numeric(substring(sort.models(grid.results, models.analyzed, do.print=FALSE)[1:models.analyzed,1], 3))
new.segs<-as.numeric(substring(sort.models(grid.results, models.analyzed, do.print=FALSE)[1:models.analyzed,2], 1, nchar(sort.models(grid.results, models.analyzed, do.print=FALSE)[1:models.analyzed,2])-4))
seg.grid<-as.numeric(names(table(new.segs)))
n.objs<-length(classes)
class.levels<-as.numeric(levels(factor(classes)))
class.levels.index<-seq(1:length(class.levels))
train.all.acc.mat<-matrix(nrow=trials*folds, ncol=models.analyzed)
test.all.acc.mat<-matrix(nrow=trials*folds, ncol=models.analyzed)
models.out<-list()
for(seg.chosen in 1:length(seg.grid))
{
total.segments=seg.grid[seg.chosen]
models.to.analyze<-which(new.segs==total.segments)
model.k<-new.k[models.to.analyze]
if(output) cat("Segment", seg.chosen, "of ", length(seg.grid), "\n")
distance.array<-calc.distance.array(fdata.object, indent, density, total.segments=total.segments, do.par, max.cores)
prob.array.temp<-array(dim=c(n.objs, length(class.levels.index), length(model.k), total.segments))
acc.mat<-matrix(ncol=length(model.k), nrow=total.segments)
for(j in 1:total.segments)
{
temp.1<-segment.class(distance.array[,,j], classes, model.k, class.method, ker)
prob.array.temp[,,,j]<-temp.1$prob.array
acc.mat[j,]<-temp.1$accuracy.est
}
model.temp<-step.method(prob.array.temp, t(acc.mat), classes, seg.weight=seg.weight)
model.segs.used<-model.temp$ens.segments
models.out[[seg.chosen]]<-model.segs.used
if(do.par && !large.set)
{
cl.red<-makeCluster(max.cores)
registerDoParallel(cl.red)
cv.out<-foreach(j.trial=1:trials, .packages=c("fda.usc"),.export=c("segment.class", "validation.probs", "kern.tri", "kern.norm", "kern.unif"),.combine=append) %dopar%
{
est.accuracy.train<-matrix(nrow=folds, ncol=length(model.k))
est.accuracy.test<-matrix(nrow=folds, ncol=length(model.k))
for(k.fold in 1:folds)
{
test.folds<-which(folds.list[[j.trial]]==k.fold)
training.classes<-classes[-test.folds]
test.classes<-classes[test.folds]
test.objs=length(test.classes)
training.objs<-length(training.classes)
# dist.temp.test<-distance.array[-test.folds,test.folds,]
# dist.temp.train<-distance.array[-test.folds,-test.folds,]
for(model in 1:length(model.k))
{
len.model.segs<-length(model.segs.used[[model]])
### Determine Training Set Accuracy and Segment LOO Accuracy for each Training Set ###
prob.array.train<-array(dim=c(training.objs, length(class.levels.index), 1, len.model.segs))
acc.mat.train<-matrix(ncol=1, nrow=len.model.segs)
for(j in 1:len.model.segs)
{
temp.1<-segment.class(distance.array[-test.folds,-test.folds, model.segs.used[[model]][j]],
training.classes, model.k[model], class.method, ker)
prob.array.train[,,1,j]<-temp.1$prob.array
acc.mat.train[j,]<-temp.1$accuracy.est
}
ens.probs.train<-matrix(nrow=training.objs, ncol=length(class.levels.index))
for(q in class.levels.index)
{
ifelse(seg.weight,
ens.probs.train[,q]<-rowSums(t(acc.mat.train[1:len.model.segs,]*t(as.matrix(prob.array.train[,q,,1:len.model.segs]))))/sum(acc.mat.train[1:len.model.segs,]),
ens.probs.train[,q]<-rowSums(as.matrix(prob.array.train[,q,,1:len.model.segs]))/len.model.segs
)
if(anyNA(ens.probs.train[,q])) probs.temp[,q]<-rep(0, train.objs)
}
est.classes.train<-numeric()
for(q in 1:training.objs)
{
max.classes<-class.levels[as.numeric(which(ens.probs.train[q,]==max(ens.probs.train[q,])))]
est.classes.train[q]<-ifelse(length(max.classes)==1, max.classes, sample(max.classes,1))
}
est.accuracy.train[k.fold, model]<-mean(est.classes.train==training.classes)
prob.array.test<-array(dim=c(test.objs, length(class.levels.index), 1, len.model.segs))
for(q in 1:len.model.segs)
prob.array.test[,,1,q]<-validation.probs(distance.array[-test.folds,test.folds,model.segs.used[[model]][q]], training.classes, k.eval=model.k[model], class.method, ker)
ens.probs.test<-matrix(nrow=test.objs, ncol=length(class.levels.index))
for(q in class.levels.index)
{
ifelse(seg.weight,
ens.probs.test[,q]<-rowSums(t(acc.mat.train[1:len.model.segs,]*t(as.matrix(prob.array.test[,q,,1:len.model.segs]))))/sum(acc.mat.train[1:len.model.segs,]),
ens.probs.test[,q]<-rowSums(as.matrix(prob.array.test[,q,,1:len.model.segs]))/len.model.segs
)
if(anyNA(ens.probs.test[,q])) probs.temp[,q]<-rep(0, test.objs)
}
est.classes.test<-numeric()
for(q in 1:test.objs)
{
max.classes<-class.levels[as.numeric(which(ens.probs.test[q,]==max(ens.probs.test[q,])))]
est.classes.test[q]<-ifelse(length(max.classes)==1, max.classes, sample(max.classes,1))
}
est.accuracy.test[k.fold, model]<-mean(est.classes.test==test.classes)
}
}
return(list(list(test.accuracy=est.accuracy.test, train.accuracy=est.accuracy.train)))
}
stopCluster(cl.red)
}
if(!do.par || large.set)
{
cv.out<-foreach(j.trial=1:trials, .packages=c("fda.usc"),.export=c("segment.class", "validation.probs", "kern.tri", "kern.norm", "kern.unif"),.combine=append) %do%
{
est.accuracy.train<-matrix(nrow=folds, ncol=length(model.k))
est.accuracy.test<-matrix(nrow=folds, ncol=length(model.k))
for(k.fold in 1:folds)
{
test.folds<-which(folds.list[[j.trial]]==k.fold)
training.classes<-classes[-test.folds]
test.classes<-classes[test.folds]
test.objs=length(test.classes)
training.objs<-length(training.classes)
for(model in 1:length(model.k))
{
len.model.segs<-length(model.segs.used[[model]])
prob.array.train<-array(dim=c(training.objs, length(class.levels.index), 1, len.model.segs))
acc.mat.train<-matrix(ncol=1, nrow=len.model.segs)
for(j in 1:len.model.segs)
{
temp.1<-segment.class(distance.array[-test.folds,-test.folds, model.segs.used[[model]][j]],
training.classes, model.k[model], class.method, ker)
prob.array.train[,,1,j]<-temp.1$prob.array
acc.mat.train[j,]<-temp.1$accuracy.est
}
ens.probs.train<-matrix(nrow=training.objs, ncol=length(class.levels.index))
for(q in class.levels.index)
{
ifelse(seg.weight,
ens.probs.train[,q]<-rowSums(t(acc.mat.train[1:len.model.segs,]*t(as.matrix(prob.array.train[,q,,1:len.model.segs]))))/sum(acc.mat.train[1:len.model.segs,]),
ens.probs.train[,q]<-rowSums(as.matrix(prob.array.train[,q,,1:len.model.segs]))/len.model.segs
)
if(anyNA(ens.probs.train[,q])) probs.temp[,q]<-rep(0, train.objs)
}
est.classes.train<-numeric()
for(q in 1:training.objs)
{
max.classes<-class.levels[as.numeric(which(ens.probs.train[q,]==max(ens.probs.train[q,])))]
est.classes.train[q]<-ifelse(length(max.classes)==1, max.classes, sample(max.classes,1))
}
est.accuracy.train[k.fold, model]<-mean(est.classes.train==training.classes)
prob.array.test<-array(dim=c(test.objs, length(class.levels.index), 1, len.model.segs))
for(q in 1:len.model.segs)
prob.array.test[,,1,q]<-validation.probs(distance.array[-test.folds,test.folds,model.segs.used[[model]][q]], training.classes, k.eval=model.k[model], class.method, ker)
ens.probs.test<-matrix(nrow=test.objs, ncol=length(class.levels.index))
for(q in class.levels.index)
{
ifelse(seg.weight,
ens.probs.test[,q]<-rowSums(t(acc.mat.train[1:len.model.segs,]*t(as.matrix(prob.array.test[,q,,1:len.model.segs]))))/sum(acc.mat.train[1:len.model.segs,]),
ens.probs.test[,q]<-rowSums(as.matrix(prob.array.test[,q,,1:len.model.segs]))/len.model.segs
)
if(anyNA(ens.probs.test[,q])) probs.temp[,q]<-rep(0, test.objs)
}
est.classes.test<-numeric()
for(q in 1:test.objs)
{
max.classes<-class.levels[as.numeric(which(ens.probs.test[q,]==max(ens.probs.test[q,])))]
est.classes.test[q]<-ifelse(length(max.classes)==1, max.classes, sample(max.classes,1))
}
est.accuracy.test[k.fold, model]<-mean(est.classes.test==test.classes)
}
}
return(list(list(test.accuracy=est.accuracy.test, train.accuracy=est.accuracy.train)))
}
}
test.accs.temp<-NULL
train.accs.temp<-NULL
for(j in 1:trials)
{
test.accs.temp<-rbind(test.accs.temp, cv.out[[j]]$test.accuracy)
train.accs.temp<-rbind(train.accs.temp, cv.out[[j]]$train.accuracy)
}
for(j in 1:length(models.to.analyze))
{
train.all.acc.mat[,models.to.analyze[j]]<-train.accs.temp[,j]
test.all.acc.mat[,models.to.analyze[j]]<-test.accs.temp[,j]
}
}
new.comb<-cbind(new.k, new.segs)
x<-NULL
for(j in 1:length(models.out)) x<-c(x, lengths(models.out[[j]]))
segments.used.grid<-matrix(NA, ncol=models.analyzed, nrow=max(x))
for(j in 1:length(seg.grid))
{
col.temp<-which(new.comb[,2]==seg.grid[[j]])
n.temp<-length(col.temp)
for(k in 1:n.temp)
{
segs.used<-models.out[[j]][[k]]
len.temp<-length(segs.used)
segments.used.grid[(1:len.temp),col.temp]<-segs.used
}
}
seg.sum<-rbind(t(new.comb), segments.used.grid)
return(list(test.accuracies=test.all.acc.mat, training.accuracies=train.all.acc.mat, segment.summary=seg.sum))
}
### dist.to.prob
# Takes a distance array and produces probability array for given method, kernel, and turning parameter
# See segment.class
# distance.array : array of distances
# total.segments : total number of segments (should match array dimension)
# k.size : a single tuning parameter (not a vector!)
# classes : classification identifies for set being analyzed. Only used for dimensionality.
dist.to.prob<-function(distance.array, total.segments, k.size, classes)
{
n.objs<-length(classes)
class.levels<-as.numeric(levels(factor(classes)))
class.levels.index<-seq(1:length(class.levels))
temp.array<-array(dim=c(n.objs,n.objs,total.segments))
temp.array[,,1:total.segments]<-distance.array
prob.array.temp<-array(dim=c(n.objs, length(class.levels.index), 1, total.segments))
acc.mat<-matrix(ncol=1, nrow=total.segments)
for(j in 1:total.segments)
{
temp.1<-segment.class(temp.array[,,j], classes.temp, k.size, class.method, ker)
prob.array.temp[,,,j]<-temp.1$prob.array
acc.mat[j,]<-temp.1$accuracy.est
}
return(list(probability.array=prob.array.temp, accuracies=acc.mat))
}
### bestsub.ensemble
# Takes a probability array and evaluates all combinations reporting the top LOOCV models for each combination size
# step.array : array of probabilities determined from distances, and dependent on which method and kernel chosen.
# Calculated frequently in other functions.
# segment.asccuracies : accuracy of each segment included in step.array. Used for weighted combinations.
# classes : classification identifies for set being analyzed.
# seg.weight : Set to TRUE if combining segment probabilities should be weighted by individual segment accuracy
# do.par : Set to TRUE to run calculations in parallel.
# max.cores : number of cores to use during parallel calculations.
# best.sub.max : maximum combination size to analyze (computational stability)
bestsub.ensemble<-function(step.array, segment.accuracies, classes, seg.weight=FALSE, do.par=FALSE,
max.cores=2, best.sub.max=10)
{
n.objs<-dim(step.array)[1]
class.levels<-as.numeric(levels(factor(classes)))
class.levels.index<-seq(1:length(class.levels))
knn.n<-dim(step.array)[3]
segment.n<-dim(step.array)[4]
seg.seq<-seq(1,segment.n,1)
ens.acc<-numeric()
ens.segs.used<-list()
probs.ens<-array(dim=c(n.objs, length(class.levels.index), knn.n))
classes.ens<-matrix(nrow=n.objs, ncol=knn.n)
k=1
max.vals<-which(segment.accuracies[k,]==max(segment.accuracies[k,]))
max.seg<-ifelse(length(max.vals)==1, max.vals, sample(max.vals,1))
segs.best<-max.seg
acc.best<-segment.accuracies[k,max.seg]
probs.best<-step.array[,,k,max.seg]
est.classes.best<-numeric()
for(q in 1:n.objs)
{
max.classes<-class.levels[as.numeric(which(probs.best[q,]==max(probs.best[q,])))]
est.classes.best[q]<-ifelse(length(max.classes)==1, max.classes, sample(max.classes,1)) #if tie sample from mean distances from the test objects. Mean distance over all segments?
}
best.sub.start<-list(acc=acc.best, segs=segs.best, probs=probs.best, class=est.classes.best)
ifelse(segment.n>best.sub.max, use.segs.tot<-best.sub.max, use.segs.tot<-segment.n)
if(do.par && segment.n!=1)
{
cl.max<-makeCluster(max.cores)
registerDoParallel(cl.max)
best.sub.list<-foreach(m=2:use.segs.tot, .combine=append) %dopar%
{
combs<-combn(segment.n,m)
best.sub.temp<-list()
best.acc=0
for(j in 1:ncol(combs))
{
segs.update<-combs[,j]
probs.temp<-matrix(nrow=n.objs, ncol=length(class.levels.index))
for(q in class.levels.index)
{
ifelse(seg.weight,
probs.temp[,q]<-rowSums(t(segment.accuracies[k,segs.update]*t(as.matrix(step.array[,q,k,segs.update])))/sum(segment.accuracies[k,segs.update])),
probs.temp[,q]<-rowSums(as.matrix(step.array[,q,k,segs.update]))/length(segs.update)
)
}
est.classes.temp<-numeric()
for(q in 1:n.objs)
{
max.classes<-class.levels[as.numeric(which(probs.temp[q,]==max(probs.temp[q,])))]
est.classes.temp[q]<-ifelse(length(max.classes)==1, max.classes, sample(max.classes,1))
}
acc.temp<-mean(est.classes.temp==classes)
if(acc.temp==best.acc) #acc.temp==acc.best
{
best.sub.temp<-append(best.sub.temp, list(list(acc=acc.temp, segs=segs.update, probs=probs.temp, class=est.classes.temp)))
}
if(acc.temp>best.acc)
{
best.acc=acc.temp
best.sub.temp<-list(list(acc=acc.temp, segs=segs.update, probs=probs.temp, class=est.classes.temp))
}
}
return(list(best.sub.temp))
}
stopCluster(cl.max)
}
if(!do.par && segment.n!=1)
{
best.sub.list<-foreach(m=2:use.segs.tot, .combine=append) %do%
{
combs<-combn(segment.n,m)
best.sub.temp<-list()
best.acc=0
for(j in 1:ncol(combs))
{
segs.update<-combs[,j]
probs.temp<-matrix(nrow=n.objs, ncol=length(class.levels.index))
for(q in class.levels.index)
{
ifelse(seg.weight,
probs.temp[,q]<-rowSums(t(segment.accuracies[k,segs.update]*t(as.matrix(step.array[,q,k,segs.update])))/sum(segment.accuracies[k,segs.update])),
probs.temp[,q]<-rowSums(as.matrix(step.array[,q,k,segs.update]))/length(segs.update)
)
}
est.classes.temp<-numeric()
for(q in 1:n.objs)
{
max.classes<-class.levels[as.numeric(which(probs.temp[q,]==max(probs.temp[q,])))]
est.classes.temp[q]<-ifelse(length(max.classes)==1, max.classes, sample(max.classes,1))
}
acc.temp<-mean(est.classes.temp==classes)
if(acc.temp==best.acc) #acc.temp==acc.best
{
best.sub.temp<-append(best.sub.temp, list(list(acc=acc.temp, segs=segs.update, probs=probs.temp, class=est.classes.temp)))
}
if(acc.temp>best.acc)
{
best.acc=acc.temp
best.sub.temp<-list(list(acc=acc.temp, segs=segs.update, probs=probs.temp, class=est.classes.temp))
}
}
return(list(best.sub.temp))
}
}
acc.tab<-numeric()
n.top.models=1
if(segment.n!=1)
{
for(k in 1:(use.segs.tot-1))
{
for(j in 1:length(best.sub.list[[k]]))
{
n.top.models=n.top.models+1
}
}
}
segs.tab<-matrix(NA, ncol=n.top.models, nrow=use.segs.tot)
acc.tab[1]<-best.sub.start$acc
segs.tab[1,1]<-best.sub.start$segs
count=1
if(segment.n!=1)
{
for(k in 1:(use.segs.tot-1))
{
for(j in 1:length(best.sub.list[[k]]))
{
count=count+1
acc.tab[count]<-best.sub.list[[k]][[j]]$acc
segs.tab[1:(k+1),count]<-best.sub.list[[k]][[j]]$segs
}
}
}
return(list(accuracies=acc.tab, segments.used=segs.tab))
}
### bss.single.model.cv
# Evaluates all best segment selection specifications for a given model (chosen segment size and neighbor size)
bss.single.model.cv<-function(fdata.object, classes, k.size=5, total.segments=1,
indent=0, deriv=0, best.sub.max=10, density=NULL, folds=10,
trials=1, folds.list=NULL, smooth=FALSE, class.method="wknn",
ker=kern.tri, seg.weight=FALSE, do.par=FALSE, max.cores=2, large.set=FALSE, output=FALSE)
{
if(output) time.start<-Sys.time()
n.objs<-length(classes)
class.levels<-as.numeric(levels(factor(classes)))
class.levels.index<-seq(1:length(class.levels))
if(is.null(folds.list)) folds.list<-fold.creator(classes.temp, folds, trials)
if(deriv!=0) fdata.object<-fdata.deriv(fdata.object, deriv)
distance.array<-calc.distance.array(fdata.object, indent, density, total.segments=total.segments, do.par, max.cores)
if(output)
{
time.dist<-Sys.time()
cat("Finished Distances.", time.dist-time.start, "\n")
}
temp.prob<-dist.to.prob(distance.array, total.segments, k.size, classes.temp)
temp<-bestsub.ensemble(step.array=temp.prob$probability.array, segment.accuracies=t(temp.prob$accuracies),
classes=classes.temp, seg.weight=seg.weight, do.par=do.par,
max.cores=max.cores, best.sub.max=best.sub.max)
if(output)
{
time.bss<-Sys.time()
cat("Finished Best Segment Selection", time.bss-time.dist, "\n")
}
segs.list<-list()
for(j in 1:ncol(temp[[2]]))
{
segs.list[[j]]<-as.numeric(na.exclude(temp[[2]][,j]))
}
model.segs.used<-segs.list
models.to.analyze<-1:length(model.segs.used)
model.k<-rep(k.size, length(model.segs.used))
train.all.acc.mat<-matrix(nrow=trials*folds, ncol=length(model.segs.used))
test.all.acc.mat<-matrix(nrow=trials*folds, ncol=length(model.segs.used))
if(do.par && !large.set)
{
cl.red<-makeCluster(max.cores)
registerDoParallel(cl.red)
cv.out<-foreach(j.trial=1:trials, .packages=c("fda.usc"),.export=c("segment.class", "validation.probs", "kern.tri", "kern.norm", "kern.unif"),.combine=append) %dopar%
{
est.accuracy.train<-matrix(nrow=folds, ncol=length(model.k))
est.accuracy.test<-matrix(nrow=folds, ncol=length(model.k))
for(k.fold in 1:folds)
{
test.folds<-which(folds.list[[j.trial]]==k.fold)
training.classes<-classes[-test.folds]
test.classes<-classes[test.folds]
test.objs=length(test.classes)
training.objs<-length(training.classes)
# dist.temp.test<-distance.array[-test.folds,test.folds,]
# dist.temp.train<-distance.array[-test.folds,-test.folds,]
for(model in 1:length(model.k))
{
len.model.segs<-length(model.segs.used[[model]])
### Determine Training Set Accuracy and Segment LOO Accuracy for each Training Set ###
prob.array.train<-array(dim=c(training.objs, length(class.levels.index), 1, len.model.segs))
acc.mat.train<-matrix(ncol=1, nrow=len.model.segs)
for(j in 1:len.model.segs)
{
temp.1<-segment.class(distance.array[-test.folds,-test.folds, model.segs.used[[model]][j]],
training.classes, model.k[model], class.method, ker)
prob.array.train[,,1,j]<-temp.1$prob.array
acc.mat.train[j,]<-temp.1$accuracy.est
}
ens.probs.train<-matrix(nrow=training.objs, ncol=length(class.levels.index))
for(q in class.levels.index)
{
ifelse(seg.weight,
ens.probs.train[,q]<-rowSums(t(acc.mat.train[1:len.model.segs,]*t(as.matrix(prob.array.train[,q,,1:len.model.segs]))))/sum(acc.mat.train[1:len.model.segs,]),
ens.probs.train[,q]<-rowSums(as.matrix(prob.array.train[,q,,1:len.model.segs]))/len.model.segs
)
if(anyNA(ens.probs.train[,q])) probs.temp[,q]<-rep(0, train.objs)
}
est.classes.train<-numeric()
for(q in 1:training.objs)
{
max.classes<-class.levels[as.numeric(which(ens.probs.train[q,]==max(ens.probs.train[q,])))]
est.classes.train[q]<-ifelse(length(max.classes)==1, max.classes, sample(max.classes,1))
}
est.accuracy.train[k.fold, model]<-mean(est.classes.train==training.classes)
### Determine Test Set Accuracy ###
prob.array.test<-array(dim=c(test.objs, length(class.levels.index), 1, len.model.segs))
for(q in 1:len.model.segs)
prob.array.test[,,1,q]<-validation.probs(distance.array[-test.folds,test.folds,model.segs.used[[model]][q]], training.classes, k.eval=model.k[model], class.method, ker)
ens.probs.test<-matrix(nrow=test.objs, ncol=length(class.levels.index))
for(q in class.levels.index)
{
ifelse(seg.weight,
ens.probs.test[,q]<-rowSums(t(acc.mat.train[1:len.model.segs,]*t(as.matrix(prob.array.test[,q,,1:len.model.segs]))))/sum(acc.mat.train[1:len.model.segs,]),
ens.probs.test[,q]<-rowSums(as.matrix(prob.array.test[,q,,1:len.model.segs]))/len.model.segs
)
if(anyNA(ens.probs.test[,q])) probs.temp[,q]<-rep(0, test.objs)
}
est.classes.test<-numeric()
for(q in 1:test.objs)
{
max.classes<-class.levels[as.numeric(which(ens.probs.test[q,]==max(ens.probs.test[q,])))]
est.classes.test[q]<-ifelse(length(max.classes)==1, max.classes, sample(max.classes,1))
}
est.accuracy.test[k.fold, model]<-mean(est.classes.test==test.classes)
}
}
return(list(list(test.accuracy=est.accuracy.test, train.accuracy=est.accuracy.train)))
est.accuracy.test
}
stopCluster(cl.red)
}
if(!do.par || large.set)
{
cv.out<-foreach(j.trial=1:trials, .packages=c("fda.usc"),.export=c("segment.class", "validation.probs", "kern.tri", "kern.norm", "kern.unif"),.combine=append) %do%
{
est.accuracy.train<-matrix(nrow=folds, ncol=length(model.k))
est.accuracy.test<-matrix(nrow=folds, ncol=length(model.k))
for(k.fold in 1:folds)
{
test.folds<-which(folds.list[[j.trial]]==k.fold)
training.classes<-classes[-test.folds]
test.classes<-classes[test.folds]
test.objs=length(test.classes)
training.objs<-length(training.classes)
# dist.temp.test<-distance.array[-test.folds,test.folds,]
# dist.temp.train<-distance.array[-test.folds,-test.folds,]
for(model in 1:length(model.k))
{
len.model.segs<-length(model.segs.used[[model]])
### Determine Training Set Accuracy and Segment LOO Accuracy for each Training Set ###
prob.array.train<-array(dim=c(training.objs, length(class.levels.index), 1, len.model.segs))
acc.mat.train<-matrix(ncol=1, nrow=len.model.segs)
for(j in 1:len.model.segs)
{
temp.1<-segment.class(distance.array[-test.folds,-test.folds, model.segs.used[[model]][j]],
training.classes, model.k[model], class.method, ker)
prob.array.train[,,1,j]<-temp.1$prob.array
acc.mat.train[j,]<-temp.1$accuracy.est
}
ens.probs.train<-matrix(nrow=training.objs, ncol=length(class.levels.index))
for(q in class.levels.index)
{
ifelse(seg.weight,
ens.probs.train[,q]<-rowSums(t(acc.mat.train[1:len.model.segs,]*t(as.matrix(prob.array.train[,q,,1:len.model.segs]))))/sum(acc.mat.train[1:len.model.segs,]),
ens.probs.train[,q]<-rowSums(as.matrix(prob.array.train[,q,,1:len.model.segs]))/len.model.segs
)
if(anyNA(ens.probs.train[,q])) probs.temp[,q]<-rep(0, train.objs)
}
est.classes.train<-numeric()
for(q in 1:training.objs)
{
max.classes<-class.levels[as.numeric(which(ens.probs.train[q,]==max(ens.probs.train[q,])))]
est.classes.train[q]<-ifelse(length(max.classes)==1, max.classes, sample(max.classes,1))
}
est.accuracy.train[k.fold, model]<-mean(est.classes.train==training.classes)
### Determine Test Set Accuracy ###
prob.array.test<-array(dim=c(test.objs, length(class.levels.index), 1, len.model.segs))
for(q in 1:len.model.segs)
prob.array.test[,,1,q]<-validation.probs(distance.array[-test.folds,test.folds,model.segs.used[[model]][q]], training.classes, k.eval=model.k[model], class.method, ker)
ens.probs.test<-matrix(nrow=test.objs, ncol=length(class.levels.index))
for(q in class.levels.index)
{
ifelse(seg.weight,
ens.probs.test[,q]<-rowSums(t(acc.mat.train[1:len.model.segs,]*t(as.matrix(prob.array.test[,q,,1:len.model.segs]))))/sum(acc.mat.train[1:len.model.segs,]),
ens.probs.test[,q]<-rowSums(as.matrix(prob.array.test[,q,,1:len.model.segs]))/len.model.segs
)
if(anyNA(ens.probs.test[,q])) probs.temp[,q]<-rep(0, test.objs)
}
est.classes.test<-numeric()
for(q in 1:test.objs)
{
max.classes<-class.levels[as.numeric(which(ens.probs.test[q,]==max(ens.probs.test[q,])))]
est.classes.test[q]<-ifelse(length(max.classes)==1, max.classes, sample(max.classes,1))
}
est.accuracy.test[k.fold, model]<-mean(est.classes.test==test.classes)
}
}
return(list(list(test.accuracy=est.accuracy.test, train.accuracy=est.accuracy.train)))
}
}
test.accs.temp<-NULL
train.accs.temp<-NULL
for(j in 1:trials)
{
test.accs.temp<-rbind(test.accs.temp, cv.out[[j]]$test.accuracy)
train.accs.temp<-rbind(train.accs.temp, cv.out[[j]]$train.accuracy)
}
for(j in 1:length(models.to.analyze))
{
train.all.acc.mat[,models.to.analyze[j]]<-train.accs.temp[,j]
test.all.acc.mat[,models.to.analyze[j]]<-test.accs.temp[,j]
}
return(list(test.accuracies=test.all.acc.mat, training.accuracies=train.all.acc.mat, bss=temp))
}
### bss.ens.model.cv
# Cross validates the top LOOCV accuracy segment combinations for all combination sizes using
# classifiers produced from a single curve or combination of multiple derivative orders.
# If a single curve is to be analyzed, k.size, seg.size, indent, deriv should be single elements.
# If multiple curves are to be combined, k.size, seg.size, indent, deriv should be vectors.
# best.sub.max can be used to control the size of the segment combinations evaluated.
# fdata.object : original data functional object.
# classes : classification identifies for set being analyzed.
# k.sizes : chosen model tuning parameters for each curve to be analyzed.
# seg.size : chosen model segment sizes for each curve to be analyzed.
# indent : numerical derivative noise indents for each derivative order.
# deriv : derivative orders. i.e c(0,1,2) would be original curve with first and second derivs.
# best.sub.max : maximum combination size.
# thresh : only if forward segment selection it to be used. Improvement in LOOCV to add segment to ensemble.
# density : integration grid mesh size to control accuracy
# folds : number of folds.
# trials : number of times cross validation should be performed.
# folds.list : a list of folds identifiers
# class.method : nonparametric classification method. Either "wknn" or "kernel".
# ker : kernel to be used (see Kernels above).
# seg.weight : Set to TRUE if combining segment probabilities should be weighted by individual segment accuracy.
# use.forward : Use FSS in place of BSS when evaluating final model. Reduces computational burden but
# does not guarantee the top segment combination will be found.
# do.par : Set to TRUE to run calculations in parallel.
# max.cores : number of cores to use during parallel calculations.
# large.set : A toggle for numerical stability if analyzing a dataset with extreme number of obersvations
# output : Toggle for displaying information about calculation while calculation is running
bss.ens.model.cv<-function(fdata.object, classes, k.sizes=c(5,5), seg.sizes=c(1,2),
indent=c(0,0), deriv=c(0,1), best.sub.max=10, thresh=1e-4, density=NULL, folds=10,
trials=1, folds.list=NULL, class.method="wknn", ker=kern.tri, seg.weight=FALSE,
use.forward=FALSE, do.par=FALSE, max.cores=2, large.set=FALSE, output=FALSE)
{
if(output) time.start<-Sys.time()
n.objs<-length(classes)
class.levels<-as.numeric(levels(factor(classes)))
class.levels.index<-seq(1:length(class.levels))
if(is.null(folds.list)) folds.list<-fold.creator(classes.temp, folds, trials)
total.segments<-sum(seg.sizes)
distance.array<-array(dim=c(n.objs, n.objs, total.segments))
temp.prob.array<-array(dim=c(n.objs, length(class.levels.index), 1, total.segments))
temp.prob.accur<-NULL
csum<-cumsum(seg.sizes)
csum.seg<-(cumsum(seg.sizes)-seg.sizes)+1
for(j in 1:length(seg.sizes))
{
ifelse(deriv[j]==0, fdata.object.temp<-fdata.object, fdata.object.temp<-fdata.deriv(fdata.object, deriv[j]))
distance.array[,,csum.seg[j]:csum[j]]<-calc.distance.array(fdata.object.temp, indent[j], density, total.segments=seg.sizes[j], do.par, max.cores)
temp.prob<-dist.to.prob(distance.array[,,csum.seg[j]:csum[j]], seg.sizes[j], k.sizes[j], classes.temp)
temp.prob.array[,,,csum.seg[j]:csum[j]]<-temp.prob$probability.array
temp.prob.accur<-c(temp.prob.accur, temp.prob$accuracies)
}
if(output)
{
time.dist<-Sys.time()
cat("Finished Distances.", time.dist-time.start, "\n")
}
if(!use.forward)
{
temp<-bestsub.ensemble(step.array=temp.prob.array, segment.accuracies=t(as.matrix(temp.prob.accur)),
classes=classes.temp, seg.weight=seg.weight, do.par=do.par,
max.cores=max.cores, best.sub.max=best.sub.max)
segs.list<-list()
for(j in 1:ncol(temp[[2]]))
{
segs.list[[j]]<-as.numeric(na.exclude(temp[[2]][,j]))
}
model.segs.used<-segs.list
models.to.analyze<-1:length(model.segs.used)
}
if(use.forward)
{
temp.fss<-forward.ensemble(step.array=temp.prob.array, segment.accuracies=t(as.matrix(temp.prob.accur)),
classes=classes.temp, seg.weight=seg.weight, thresh=thresh, do.par=do.par,
cores=max.cores)
temp<-list(segments.used=as.matrix(temp.fss$ens.segments[[1]], ncol=1))
model.segs.used<-list(temp.fss$ens.segments[[1]])
models.to.analyze<-1
}
if(output)
{
time.bss<-Sys.time()
cat("Finished BSS.", time.bss-time.dist, "\n")
}
model.k<-NULL
deriv.k<-NULL
for(j in 1:length(seg.sizes))
{
deriv.k<-c(deriv.k, rep(deriv[j], seg.sizes[j]))
model.k<-c(model.k, rep(k.sizes[j], seg.sizes[j]))
}
train.all.acc.mat<-matrix(nrow=trials*folds, ncol=length(model.segs.used))
test.all.acc.mat<-matrix(nrow=trials*folds, ncol=length(model.segs.used))
if(do.par && !large.set)
{
cl.red<-makeCluster(max.cores)
registerDoParallel(cl.red)
cv.out<-foreach(j.trial=1:trials, .packages=c("fda.usc"),.export=c("segment.class", "validation.probs", "kern.tri", "kern.norm", "kern.unif"),.combine=append) %dopar%
{
est.accuracy.train<-matrix(nrow=folds, ncol=length(model.segs.used))
est.accuracy.test<-matrix(nrow=folds, ncol=length(model.segs.used))
for(k.fold in 1:folds)
{
test.folds<-which(folds.list[[j.trial]]==k.fold)
training.classes<-classes[-test.folds]
test.classes<-classes[test.folds]
test.objs=length(test.classes)
training.objs<-length(training.classes)
for(model in 1:length(model.segs.used))
{
len.model.segs<-length(model.segs.used[[model]])
prob.array.train<-array(dim=c(training.objs, length(class.levels.index), 1, len.model.segs))
acc.mat.train<-matrix(ncol=1, nrow=len.model.segs)
for(j in 1:len.model.segs)
{
temp.1<-segment.class(distance.array[-test.folds,-test.folds, model.segs.used[[model]][j]],
training.classes, model.k[model.segs.used[[model]][j]], class.method, ker)
prob.array.train[,,1,j]<-temp.1$prob.array
acc.mat.train[j,]<-temp.1$accuracy.est
}
ens.probs.train<-matrix(nrow=training.objs, ncol=length(class.levels.index))
for(q in class.levels.index)
{
ifelse(seg.weight,
ens.probs.train[,q]<-rowSums(t(acc.mat.train[1:len.model.segs,]*t(as.matrix(prob.array.train[,q,,1:len.model.segs]))))/sum(acc.mat.train[1:len.model.segs,]),
ens.probs.train[,q]<-rowSums(as.matrix(prob.array.train[,q,,1:len.model.segs]))/len.model.segs
)
if(anyNA(ens.probs.train[,q])) probs.temp[,q]<-rep(0, train.objs)
}
est.classes.train<-numeric()
for(q in 1:training.objs)
{
max.classes<-class.levels[as.numeric(which(ens.probs.train[q,]==max(ens.probs.train[q,])))]
est.classes.train[q]<-ifelse(length(max.classes)==1, max.classes, sample(max.classes,1))
}
est.accuracy.train[k.fold, model]<-mean(est.classes.train==training.classes)
prob.array.test<-array(dim=c(test.objs, length(class.levels.index), 1, len.model.segs))
for(q in 1:len.model.segs)
prob.array.test[,,1,q]<-validation.probs(distance.array[-test.folds,test.folds,model.segs.used[[model]][q]], training.classes, k.eval=model.k[model.segs.used[[model]][q]], class.method, ker)
ens.probs.test<-matrix(nrow=test.objs, ncol=length(class.levels.index))
for(q in class.levels.index)
{
ifelse(seg.weight,
ens.probs.test[,q]<-rowSums(t(acc.mat.train[1:len.model.segs,]*t(as.matrix(prob.array.test[,q,,1:len.model.segs]))))/sum(acc.mat.train[1:len.model.segs,]),
ens.probs.test[,q]<-rowSums(as.matrix(prob.array.test[,q,,1:len.model.segs]))/len.model.segs
)
if(anyNA(ens.probs.test[,q])) probs.temp[,q]<-rep(0, test.objs)
}
est.classes.test<-numeric()
for(q in 1:test.objs)
{
max.classes<-class.levels[as.numeric(which(ens.probs.test[q,]==max(ens.probs.test[q,])))]
est.classes.test[q]<-ifelse(length(max.classes)==1, max.classes, sample(max.classes,1))
}
est.accuracy.test[k.fold, model]<-mean(est.classes.test==test.classes)
}
}
return(list(list(test.accuracy=est.accuracy.test, train.accuracy=est.accuracy.train)))
}
stopCluster(cl.red)
}
if(!do.par || large.set)
{
cv.out<-foreach(j.trial=1:trials, .packages=c("fda.usc"),.export=c("segment.class", "validation.probs", "kern.tri", "kern.norm", "kern.unif"),.combine=append) %do%
{
est.accuracy.train<-matrix(nrow=folds, ncol=length(model.segs.used))
est.accuracy.test<-matrix(nrow=folds, ncol=length(model.segs.used))
for(k.fold in 1:folds)
{
test.folds<-which(folds.list[[j.trial]]==k.fold)
training.classes<-classes[-test.folds]
test.classes<-classes[test.folds]
test.objs=length(test.classes)
training.objs<-length(training.classes)
for(model in 1:length(model.segs.used))
{
len.model.segs<-length(model.segs.used[[model]])
prob.array.train<-array(dim=c(training.objs, length(class.levels.index), 1, len.model.segs))
acc.mat.train<-matrix(ncol=1, nrow=len.model.segs)
for(j in 1:len.model.segs)
{
temp.1<-segment.class(distance.array[-test.folds,-test.folds, model.segs.used[[model]][j]],
training.classes, model.k[model.segs.used[[model]][j]], class.method, ker)
prob.array.train[,,1,j]<-temp.1$prob.array
acc.mat.train[j,]<-temp.1$accuracy.est
}
ens.probs.train<-matrix(nrow=training.objs, ncol=length(class.levels.index))
for(q in class.levels.index)
{
ifelse(seg.weight,
ens.probs.train[,q]<-rowSums(t(acc.mat.train[1:len.model.segs,]*t(as.matrix(prob.array.train[,q,,1:len.model.segs]))))/sum(acc.mat.train[1:len.model.segs,]),
ens.probs.train[,q]<-rowSums(as.matrix(prob.array.train[,q,,1:len.model.segs]))/len.model.segs
)
if(anyNA(ens.probs.train[,q])) probs.temp[,q]<-rep(0, train.objs)
}
est.classes.train<-numeric()
for(q in 1:training.objs)
{
max.classes<-class.levels[as.numeric(which(ens.probs.train[q,]==max(ens.probs.train[q,])))]
est.classes.train[q]<-ifelse(length(max.classes)==1, max.classes, sample(max.classes,1))
}
est.accuracy.train[k.fold, model]<-mean(est.classes.train==training.classes)
prob.array.test<-array(dim=c(test.objs, length(class.levels.index), 1, len.model.segs))
for(q in 1:len.model.segs)
prob.array.test[,,1,q]<-validation.probs(distance.array[-test.folds,test.folds,model.segs.used[[model]][q]], training.classes, k.eval=model.k[model.segs.used[[model]][q]], class.method, ker)
ens.probs.test<-matrix(nrow=test.objs, ncol=length(class.levels.index))
for(q in class.levels.index)
{
ifelse(seg.weight,
ens.probs.test[,q]<-rowSums(t(acc.mat.train[1:len.model.segs,]*t(as.matrix(prob.array.test[,q,,1:len.model.segs]))))/sum(acc.mat.train[1:len.model.segs,]),
ens.probs.test[,q]<-rowSums(as.matrix(prob.array.test[,q,,1:len.model.segs]))/len.model.segs
)
if(anyNA(ens.probs.test[,q])) probs.temp[,q]<-rep(0, test.objs)
}
est.classes.test<-numeric()
for(q in 1:test.objs)
{
max.classes<-class.levels[as.numeric(which(ens.probs.test[q,]==max(ens.probs.test[q,])))]
est.classes.test[q]<-ifelse(length(max.classes)==1, max.classes, sample(max.classes,1))
}
est.accuracy.test[k.fold, model]<-mean(est.classes.test==test.classes)
}
}
return(list(list(test.accuracy=est.accuracy.test, train.accuracy=est.accuracy.train)))
}
}
test.accs.temp<-NULL
train.accs.temp<-NULL
for(j in 1:trials)
{
test.accs.temp<-rbind(test.accs.temp, cv.out[[j]]$test.accuracy)
train.accs.temp<-rbind(train.accs.temp, cv.out[[j]]$train.accuracy)
}
for(j in 1:length(models.to.analyze))
{
train.all.acc.mat[,models.to.analyze[j]]<-train.accs.temp[,j]
test.all.acc.mat[,models.to.analyze[j]]<-test.accs.temp[,j]
}
return(list(test.accuracies=test.all.acc.mat, training.accuracies=train.all.acc.mat, bss=temp))
}
|
/ESFuNC_Functions.R
|
no_license
|
BuscagliaR/ESFuNC
|
R
| false
| false
| 78,815
|
r
|
###
# Ensemble of Segmented Functional Nonparametric Classifiers
# Robert Buscaglia, Nichola C. Garbett, Yiannis Kamarianakis
# June 20, 2017
#
# Functions and Packages
# File contains all required functions and packages for running the
# ESFuNC algorithm. Each function has been given a brief description.
# Please see ESFuNC_Analysis_Template.R for details on performing the
# algorithm.
### Required Packages
# Checks for and installs missing packages then loads all packages.
list.of.packages <- c("fda.usc", "foreach", "doParallel", "abind")
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])]
if(length(new.packages))
{
cat("Missing Packages are Installing... \n")
install.packages(new.packages)
}
library("fda.usc")
library("foreach")
library("doParallel")
library("abind")
### Kernels
# Provide weights for nonparametric classifiers. Required for probability calculations.
# Must be written so that they can work with both KNN and Parzen Window. Any additional
# kernels can be prepared but should retain this form to ensure functionality.
kern.tri<-function(x, h=1, method="wknn")
{
if(method=="wknn")
{
temp.1<-x-min(x)
temp.2<-temp.1/max(temp.1)
result<-2*(1-temp.2)
}
if(method=="kernel")
{
result<-ifelse(x<=h, (2/h)*(1-(x/h)), 0)
}
return(result)
}
kern.norm<-function(x, h=1, method="wknn")
{
if(method=="wknn")
{
temp.1<-x-min(x)
result<-2*dnorm(temp.1)
}
if(method=="kernel")
{
result<-2*dnorm(x, 0, h)
}
return(result)
}
kern.unif<-function(x, h=1, method="wknn")
{
if(method=="wknn")
{
result<-dunif(x, min = min(x), max=max(x))
}
if(method=="kernel")
{
result<-ifelse(x<=h, 1/h, 0)
}
return(result)
}
### createFolds
# This is the createFolds function from the package 'caret'.
# This is used to create stratified folds for cross validation.
createFolds<-function (y, k = 10, list = TRUE, returnTrain = FALSE)
{
if (class(y)[1] == "Surv")
y <- y[, "time"]
if (is.numeric(y)) {
cuts <- floor(length(y)/k)
if (cuts < 2)
cuts <- 2
if (cuts > 5)
cuts <- 5
breaks <- unique(quantile(y, probs = seq(0, 1, length = cuts)))
y <- cut(y, breaks, include.lowest = TRUE)
}
if (k < length(y)) {
y <- factor(as.character(y))
numInClass <- table(y)
foldVector <- vector(mode = "integer", length(y))
for (i in 1:length(numInClass)) {
min_reps <- numInClass[i]%/%k
if (min_reps > 0) {
spares <- numInClass[i]%%k
seqVector <- rep(1:k, min_reps)
if (spares > 0)
seqVector <- c(seqVector, sample(1:k, spares))
foldVector[which(y == names(numInClass)[i])] <- sample(seqVector)
}
else {
foldVector[which(y == names(numInClass)[i])] <- sample(1:k,
size = numInClass[i])
}
}
}
else foldVector <- seq(along = y)
if (list) {
out <- split(seq(along = y), foldVector)
names(out) <- paste("Fold", gsub(" ", "0", format(seq(along = out))),
sep = "")
if (returnTrain)
out <- lapply(out, function(data, y) y[-data], y = seq(along = y))
}
else out <- foldVector
out
}
### fold.creator
# A default setting for creating multiple sets of folds if not given manually.
fold.creator<-function(classes, folds, trials)
{
folds.list<-list()
for(j in 1:trials)
folds.list[[j]]<-createFolds(factor(classes), k=folds, list=FALSE)
return(folds.list)
}
### create.cv.sum
# A function for aiding in the visualization and summarization of final cross validation results. Takes
# the results of running any of the cross validation functions and produces summary statistics.
# cv.to.plot should be at least one set of cross validation results. This is a common output from
# cross validation functions.
create.cv.sum<-function(cv.to.plot)
{
n.size<-nrow(as.matrix(cv.to.plot))
med.temp<-apply(cv.to.plot, 2, median)
mad.temp<-apply(cv.to.plot, 2, mad)
mean.temp<-apply(cv.to.plot, 2, mean)
sd.temp<-apply(cv.to.plot, 2, sd)
se.temp<-sd.temp/sqrt(n.size)
ul.temp<-mean.temp+2*se.temp
ll.temp<-mean.temp-2*se.temp
return(rbind(med.temp, mad.temp, ul.temp, mean.temp, sd.temp, ll.temp))
}
### create.segments
# Creates unique fdata objects for each segment determined by (total.segments)
# total.segments = 1 refers to the undisturbed fdata object.
# fdata.object should be fdata object being analyzed (any derivative)
# total.segments : how many partitions are to be made
# indent : discrete point indentations based on argvals. used to remove numerical noise.
# density : grid mesh point density to control integration accuracy.
create.segments<-function(fdata.object, total.segments, indent, density=NULL)
{
argvals=fdata.object$argvals
size<-length(argvals)
start<-argvals[1+indent]
end<-argvals[size-indent]
ifelse(is.null(density), int.density<-size-2*indent, int.density<-density)
temp<-0:total.segments
seg.dist<-(end-start)/total.segments
lims.temp<-start+temp*seg.dist
segs.list<-list()
for(j in 1:total.segments)
{
seq.temp<-seq(lims.temp[j], lims.temp[j+1], (lims.temp[j+1]-lims.temp[j])/(int.density-1))
fd.temp<-eval.fd(seq.temp, fdata2fd(fdata.object))
segs.list[[j]]<-fdata(t(fd.temp), argvals=seq.temp)
}
return(segs.list)
}
### sort.models
# Sorts grid search results and orders results such that segments are minimized and neighbors are maximized
# grid.output : resulting object from running sequential grid search
# models.returned : how many models to be viewed as output
# do.print : control whether resulting ordered table of values is printed to screen. Used in other functions.
sort.models<-function(grid.output, models.returned, do.print=TRUE)
{
size<-dim(grid.output)[1]*dim(grid.output)[2]
best.models<-arrayInd(order(grid.output, decreasing = TRUE), dim(grid.output))
best.models.mat<-matrix(nrow=models.returned, ncol=3)
best.models.mat.temp<-matrix(nrow=size, ncol=3)
colnames(best.models.mat)<-c("Neighbors", "Segments", "Accuracy")
for(j in 1:size)
{
best.models.mat.temp[j,]<-c(rownames(grid.output)[best.models[j,1]],colnames(grid.output)[best.models[j,2]],as.numeric(round(grid.output[best.models[j,1],best.models[j,2]],6)))
}
acc.levels<-as.numeric(levels(factor(best.models.mat.temp[,3])))
for(j in 1:length(acc.levels))
{
set.1<-which(best.models.mat.temp[,3]==acc.levels[j])
fac.levels<-as.numeric(levels(factor(best.models[set.1,2])))
fac.len<-length(fac.levels)
for(k in 1:fac.len)
{
temp.1<-which(best.models[set.1,2]==fac.levels[k])
best.models[set.1[temp.1]]=sort(best.models[set.1[temp.1]], decreasing = TRUE)
}
}
for(j in 1:models.returned)
{
best.models.mat[j,]<-c(rownames(grid.output)[best.models[j,1]],colnames(grid.output)[best.models[j,2]],as.numeric(round(grid.output[best.models[j,1],best.models[j,2]],6)))
}
if(do.print) print(best.models.mat, quote=FALSE)
if(!do.print) return(best.models.mat)
}
### Stepwise Procedures ###
### all.segs.ensemble
# Does not do any stepwise selection. Uses all segments in final model.
# step.array : array of probabilities determined from distances, and dependent on which method and kernel chosen.
# Calculated frequently in other functions.
# segment.asccuracies : accuracy of each segment included in step.array. Used for weighted combinations.
# classes : classification identifiers for set being analyzed.
# seg.weight : Set to TRUE if combining segment probabilities should be weighted by individual segment accuracy
# thresh : stepwise accuracy improvement threshold. Does not affect all.segs.ensemble.
# do.par : Set to TRUE to run calculations in parallel.
# cores : number of cores to use during parallel calculations.
all.segs.ensemble<-function(step.array, segment.accuracies, classes, seg.weight=FALSE, thresh=0.0001, do.par=FALSE, cores=2)
{
n.objs<-dim(step.array)[1]
class.levels<-as.numeric(levels(factor(classes)))
class.levels.index<-seq(1:length(class.levels))
knn.n<-dim(step.array)[3]
segment.n<-dim(step.array)[4]
seg.seq<-seq(1,segment.n,1)
# Add Check that Dimensions are correct
ens.acc<-numeric()
ens.segs.used<-list()
probs.ens<-array(dim=c(n.objs, length(class.levels.index), knn.n))
classes.ens<-matrix(nrow=n.objs, ncol=knn.n)
for (k in 1:knn.n) # Will be changed to bandwith
{
segs.used<-seg.seq
probs.update<-matrix(nrow=n.objs, ncol=length(class.levels.index))
for (q in class.levels.index)
{
ifelse(seg.weight,
probs.update[,q]<-rowSums(t(segment.accuracies[k,segs.used]*t(as.matrix(step.array[,q,k,segs.used])))/sum(segment.accuracies[k,segs.used])),
probs.update[,q]<-rowSums(as.matrix(step.array[,q,k,segs.used]))/length(segs.used)
)
}
#print(probs.update)
est.classes.update<-numeric()
for (q in 1:n.objs)
{
max.classes<-class.levels[as.numeric(which(probs.update[q,]==max(probs.update[q,])))]
est.classes.update[q]<-ifelse(length(max.classes)==1, max.classes, sample(max.classes,1))
}
acc.new<-mean(est.classes.update==classes)
ens.acc[k]<-acc.new
ens.segs.used[[k]]<-segs.used
probs.ens[,,k]<-probs.update
classes.ens[,k]<-est.classes.update
}
return(list(ens.accuracies=ens.acc, ens.segments=ens.segs.used, ens.probs=probs.ens, ens.classes=classes.ens))
}
### forward.ensemble
# Evaluates the sequential addition of segments to the ensemble, evaluating if inclusion of a new segment
# improves leave-one-out accuracy.
# step.array : array of probabilities determined from distances, and dependent on which method and kernel chosen.
# Calculated frequently in other functions.
# segment.asccuracies : accuracy of each segment included in step.array. Used for weighted combinations.
# classes : classification identifies for set being analyzed.
# seg.weight : Set to TRUE if combining segment probabilities should be weighted by individual segment accuracy.
# thresh : stepwise accuracy improvement threshold.
# do.par : Set to TRUE to run calculations in parallel.
# cores : number of cores to use during parallel calculations.
forward.ensemble<-function(step.array, segment.accuracies, classes, seg.weight=FALSE, thresh=0.0001, do.par=FALSE, cores=2)
{
#print("forward")
#browser()
n.objs<-dim(step.array)[1]
class.levels<-as.numeric(levels(factor(classes)))
class.levels.index<-seq(1:length(class.levels))
knn.n<-dim(step.array)[3]
segment.n<-dim(step.array)[4] #changed this!!!
seg.seq<-seq(1,segment.n,1)
# Add Check that Dimensions are correct
ens.acc<-numeric()
ens.segs.used<-list()
probs.ens<-array(dim=c(n.objs, length(class.levels.index), knn.n))
classes.ens<-matrix(nrow=n.objs, ncol=knn.n)
if(do.par)
{
use.cores<-min(knn.n, max.cores)
cl.temp<-makeCluster(use.cores)
registerDoParallel(cl.temp)
k.result<-foreach(k=1:knn.n, .combine=append) %dopar% #this can be parallelized
{
acc.old<-0 # starting value for while
max.vals<-which(segment.accuracies[k,]==max(segment.accuracies[k,]))
max.seg<-ifelse(length(max.vals)==1, max.vals, sample(max.vals,1))
acc.new<-segment.accuracies[k,max.seg]
segs.used<-max.seg
segs.left<-seg.seq[-segs.used]
probs.update<-step.array[,,k,segs.used]
est.classes.update<-numeric()
for(q in 1:n.objs)
{
max.classes<-class.levels[as.numeric(which(probs.update[q,]==max(probs.update[q,])))]
est.classes.update[q]<-ifelse(length(max.classes)==1, max.classes, sample(max.classes,1))
}
# cylce through remaining segs
while((acc.new > acc.old+thresh) & (length(segs.left)!=0))
{
acc.old<-acc.new
acc.update<-0
seg.update<-NULL
est.classes.update<-NULL
for(j in segs.left)
{
probs.temp<-matrix(nrow=n.objs, ncol=length(class.levels.index))
for(q in class.levels.index)
{
ifelse(seg.weight,
probs.temp[,q]<-rowSums(t(segment.accuracies[k,c(segs.used,j)]*t(as.matrix(step.array[,q,k,c(segs.used,j)])))/sum(segment.accuracies[k,c(segs.used,j)])),
probs.temp[,q]<-rowSums(as.matrix(step.array[,q,k,c(segs.used,j)]))/length(c(segs.used,j))
)
if(anyNA(probs.temp[,q])) probs.temp[,q]<-rep(0, n.objs)
}
est.classes.temp<-numeric()
for(q in 1:n.objs)
{
max.classes<-class.levels[as.numeric(which(probs.temp[q,]==max(probs.temp[q,])))]
est.classes.temp[q]<-ifelse(length(max.classes)==1, max.classes, sample(max.classes,1))
}
acc.temp<-mean(est.classes.temp==classes)
if(acc.temp>acc.update)
{
acc.update<-acc.temp
seg.update<-j
est.classes.update<-est.classes.temp
probs.update<-probs.temp
}
}
if(acc.update>acc.old+thresh)
{
acc.new<-acc.update
segs.used<-c(segs.used,seg.update)
segs.left<-seg.seq[-segs.used]
}
}
return(list(list(ens.acc.out=acc.new, ens.segs.used.out=segs.used, probs.ens.out=probs.update, classes.ens.out=est.classes.update)))
}
stopCluster(cl.temp)
}
if(!do.par)
{
k.result<-foreach(k=1:knn.n, .combine=append) %do% #this can be parallelized
{
acc.old<-0 # starting value for while
max.vals<-which(segment.accuracies[k,]==max(segment.accuracies[k,]))
max.seg<-ifelse(length(max.vals)==1, max.vals, sample(max.vals,1))
acc.new<-segment.accuracies[k,max.seg]
segs.used<-max.seg
segs.left<-seg.seq[-segs.used]
probs.update<-step.array[,,k,segs.used]
est.classes.update<-numeric()
for(q in 1:n.objs)
{
max.classes<-class.levels[as.numeric(which(probs.update[q,]==max(probs.update[q,])))]
est.classes.update[q]<-ifelse(length(max.classes)==1, max.classes, sample(max.classes,1))
}
# cylce through remaining segs
while((acc.new > acc.old+thresh) & (length(segs.left)!=0))
{
acc.old<-acc.new
acc.update<-0
seg.update<-NULL
est.classes.update<-NULL
for(j in segs.left)
{
probs.temp<-matrix(nrow=n.objs, ncol=length(class.levels.index))
for(q in class.levels.index)
{
ifelse(seg.weight,
probs.temp[,q]<-rowSums(t(segment.accuracies[k,c(segs.used,j)]*t(as.matrix(step.array[,q,k,c(segs.used,j)])))/sum(segment.accuracies[k,c(segs.used,j)])),
probs.temp[,q]<-rowSums(as.matrix(step.array[,q,k,c(segs.used,j)]))/length(c(segs.used,j))
)
if(anyNA(probs.temp[,q])) probs.temp[,q]<-rep(0, n.objs)
}
est.classes.temp<-numeric()
for(q in 1:n.objs)
{
max.classes<-class.levels[as.numeric(which(probs.temp[q,]==max(probs.temp[q,])))]
est.classes.temp[q]<-ifelse(length(max.classes)==1, max.classes, sample(max.classes,1))
}
acc.temp<-mean(est.classes.temp==classes)
if(acc.temp>acc.update)
{
acc.update<-acc.temp
seg.update<-j
est.classes.update<-est.classes.temp
probs.update<-probs.temp
}
}
if(acc.update>acc.old+thresh)
{
acc.new<-acc.update
segs.used<-c(segs.used,seg.update)
segs.left<-seg.seq[-segs.used]
}
}
return(list(list(ens.acc.out=acc.new, ens.segs.used.out=segs.used, probs.ens.out=probs.update, classes.ens.out=est.classes.update)))
}
}
for(k in 1:knn.n)
{
ens.acc[k]<-k.result[[k]]$ens.acc.out
ens.segs.used[[k]]<-k.result[[k]]$ens.segs.used.out
probs.ens[,,k]<-k.result[[k]]$probs.ens.out
classes.ens[,k]<-k.result[[k]]$classes.ens.out
}
return(list(ens.accuracies=ens.acc, ens.segments=ens.segs.used, ens.probs=probs.ens, ens.classes=classes.ens))
}
### calc.distance.array
# Creates an array of distances for an fdata object for a provided number of segments.
# Segments are first constructed using indent and density inputs.
# Distances are calculated between each observation in the fdata object for each segment.
# Can be done (and should be done) in parallel.
# fdata.object : data set to be analyzed.
# indent : discrete point indentations to remove numerical derivative noise.
# density : integration grid mesh to control accuracy.
# total.segments : number of partitions, where 1 defaults to unparitioned data.
# do.par : Set to TRUE to run calculations in parallel.
# cores : number of cores to use during parallel calculations.
calc.distance.array<-function(fdata.object, indent=0, density=NULL, total.segments=1, do.par=FALSE, max.cores=2)
{
argvals<-fdata.object$argvals
segs<-create.segments(fdata.object, total.segments, indent, density)
acomb <- function(...) abind(..., along=3)
if(do.par)
{
use.cores<-min(total.segments, max.cores)
cl.temp<-makeCluster(use.cores)
registerDoParallel(cl.temp)
seg.distance.out<-foreach(q=1:total.segments, .packages = "fda.usc", .combine='acomb', .multicombine = TRUE) %dopar%
{
x<-metric.lp(segs[[q]])
return(x)
}
stopCluster(cl.temp)
}
if(!do.par)
{
seg.distance.out<-foreach(q=1:total.segments, .packages = "fda.usc", .combine='acomb', .multicombine = TRUE) %dopar%
{
x<-metric.lp(segs[[q]])
return(x)
}
}
dim.1<-dim(seg.distance.out)[1]
dim.2<-dim(seg.distance.out)[2]
dim.3<-total.segments
export.array<-array(dim=c(dim.1, dim.2, dim.3))
export.array[,,(1:total.segments)]<-seg.distance.out
return(export.array)
}
### segment.class
# Returns accuracy, estimated classes, and probabilities for any single segment.
# segment.distances : should be a single matrix from the array of distances.
# classes : classification identifies for set being analyzed.
# k.grid : tuning parameteres to analyze. Either neighbor size (integers)
# or bandwidth constants (positive real values)
# class.method : nonparametric classification method. Either "wknn" or "kernel".
# ker : kernel to be used (see Kernels above)
segment.class<-function(segment.distances, classes, k.grid=2, class.method="wknn", ker=kern.tri)
{
n.objs<-dim(segment.distances)[1]
closest.temp<-matrix(ncol=n.objs,nrow=n.objs-1)
classify.temp<-matrix(ncol=n.objs,nrow=n.objs-1)
dist.ord<-matrix(ncol=n.objs, nrow=n.objs-1)
kern.probs<-matrix(ncol=n.objs, nrow=n.objs-1)
class.levels<-as.numeric(levels(factor(classes)))
class.levels.index<-seq(1:length(class.levels))
met.temp<-segment.distances
for(j in 1:n.objs)
{
or.temp<-order(met.temp[,j])
closest.temp[,j]<-or.temp[or.temp!=j]
classify.temp[,j]<-classes[closest.temp[,j]]
dist.ord[,j]<-met.temp[or.temp[or.temp!=j], j]
}
if(class.method=="wknn")
{
kern.probs<-apply(dist.ord, 2, ker, h=1, method=class.method)
prob.classes<-array(dim=c(n.objs, length(class.levels), length(k.grid)), dimnames = list(seq(1:n.objs), class.levels, seq(1:length(k.grid))))
for(q in 1:length(k.grid))
{
k=k.grid[q]
for(i in 1:length(class.levels.index))
{
for(j in 1:n.objs)
{
assign.temp<-which(classify.temp[(1:k),j]==class.levels[i])
total.prob<-sum(kern.probs[(1:k), j])
prob.classes[j,class.levels.index[i],q]=sum(kern.probs[assign.temp,j])/total.prob
}
}
}
est.classes<-matrix(nrow=n.objs, ncol=length(k.grid))
for (k in 1:length(k.grid))
{
for (j in 1:n.objs)
{
max.classes<-class.levels[as.numeric(which(prob.classes[j,,k]==max(prob.classes[j,,k])))]
est.classes[j,k]<-ifelse(length(max.classes)==1, max.classes, sample(max.classes,1))
}
}
est.accuracy<-numeric()
for (k in 1:length(k.grid))
{
est.accuracy[k]<-mean(est.classes[,k]==classes)
}
}
if(class.method=="kernel")
{
prob.classes<-array(dim=c(n.objs, length(class.levels), length(k.grid)), dimnames = list(seq(1:n.objs), class.levels, seq(1:length(k.grid))))
for(q in 1:length(k.grid))
{
kern.probs<-apply(dist.ord, 2, ker, h=k.grid[q], method=class.method)
for(i in 1:length(class.levels.index))
{
for(j in 1:n.objs)
{
assign.temp<-which(classify.temp[,j]==class.levels[i])
total.prob<-sum(kern.probs[, j])
ifelse(total.prob==0,
prob.classes[j,class.levels.index[i],q]<-1/length(class.levels.index),
prob.classes[j,class.levels.index[i],q]<-sum(kern.probs[assign.temp,j])/total.prob
)
}
}
}
est.classes<-matrix(nrow=n.objs, ncol=length(k.grid))
for (k in 1:length(k.grid))
{
for (j in 1:n.objs)
{
max.classes<-class.levels[as.numeric(which(prob.classes[j,,k]==max(prob.classes[j,,k])))]
est.classes[j,k]<-ifelse(length(max.classes)==1, max.classes, sample(max.classes,1))
}
}
est.accuracy<-numeric()
for (k in 1:length(k.grid))
{
est.accuracy[k]<-mean(est.classes[,k]==classes)
}
}
return(list(accuracy.est=est.accuracy, classes.est=est.classes, prob.array=prob.classes))
}
### grid.class
# Performs the standard grid search over tuning parameters and segments.
# No sequential stepping. To be used if a predetermined grid of parameters is desired rather than using
# sequential stepping of segment sizes.
# distance.array : array of distances calculated from fdata object. Distances for each segmented FDO.
# classes : classification identifies for set being analyzed.
# segments.grid : segment sizes to analyzes. Should be vector of integers.
# k.grid : tuning parameteres to analyze. Either neighbor size (integers)
# or bandwidth constants (positive real values)
# class.method : nonparametric classification method. Either "wknn" or "kernel".
# ker : kernel to be used (see Kernels above)
# step.method : stewpwise procedure. Either forward.ensemble or all.segs.ensemble
# seg.weight : Set to TRUE to weight combination of segments by individual segment LOOCV accuracy.
# thresh : stepwise accuracy improvement threshold. Does not affect all.segs.ensemble.
# do.par : Set to TRUE to run calculations in parallel.
# max.cores : number of cores to use during parallel calculations.
# output : Toggle for displaying information about calculation while calculation is running
grid.class<-function(distance.array, classes, segments.grid=1, k.grid=5, class.method="wknn", ker=kern.tri, step.method=forward.ensemble, seg.weight=TRUE, thresh=0.0001, do.par=FALSE, max.cores=2, output=FALSE)
{
#if(output) cat("Evaluating", segments.grid, "Segment(s) \n")
if(sum(segments.grid)!=dim(distance.array)[3]) stop("Segments and Distance Array do not match!")
n.objs<-dim(distance.array)[1]
class.levels<-as.numeric(levels(factor(classes)))
class.levels.index<-seq(1:length(class.levels))
seg.start<-cumsum(segments.grid)-segments.grid
if(output) cat("Evaluating Segment Probabilities. \n")
s=1
segment.array<-array(dim=c(n.objs, length(class.levels), length(k.grid), segments.grid[s]))
accuracy.mat<-matrix(nrow=length(k.grid), ncol=segments.grid[s])
est.classes<-matrix(nrow=n.objs, ncol=length(k.grid))
if(do.par && segments.grid[s]!=1)
{
use.cores<-min(max.cores, segments.grid[s])
cl.max<-makeCluster(use.cores)
registerDoParallel(cl.max)
kernel.output<-foreach(q=1:segments.grid[s], .packages = "fda.usc", .export="segment.class", .combine=append) %dopar%
{
return(list(segment.class(distance.array[,,seg.start[s]+q], classes, k.grid, class.method, ker)))
}
stopCluster(cl.max)
}
if(!do.par || segments.grid[s]==1)
{
kernel.output<-foreach(q=1:segments.grid[s], .packages = "fda.usc", .export="segment.class", .combine=append) %do%
{
return(list(segment.class(distance.array[,,seg.start[s]+q], classes, k.grid, class.method, ker)))
}
}
for(q in 1:segments.grid[s])
{
segment.array[,,,q]<-kernel.output[[q]]$prob.array
accuracy.mat[,q]<-kernel.output[[q]]$accuracy.est
}
if(output) cat("Finished Probabilities. \n")
grid.results<-step.method(segment.array, accuracy.mat, classes, seg.weight=seg.weight, thresh=thresh, do.par=do.par, cores=max.cores)$ens.accuracies
if(output) cat("Finished Ensembling. \n")
c.names<-NULL
for(j in 1:length(segments.grid)) c.names<-c(c.names, paste(segments.grid[j], "segs"))
r.names<-NULL
for(j in 1:length(k.grid)) r.names<-c(r.names, paste("k=", round(k.grid[j], 6), sep=""))
grid.results<-as.matrix(grid.results)
colnames(grid.results)<-c.names
rownames(grid.results)<-r.names
return(grid.results)
}
### seg.grid.class
# Evaluates a sequentially increasing segment size, evaluating if the top models (as determined by LOOCV)
# improve in accuracy as segment size increases. The top top.models.eval models must not change
# for seg.sep segments. Same functionality as grid.class but with improved selection of segment sizes.
# Can drastically improve computation times if segments to be analyzed is unknown.
# Has additionally been improved to run directly from the fdata.object of interest.
# Indent and deriv must be supplied if fdata.object is to be manipulated within the calculation,
# or fdata.object can be supplied as derivative of interest and set deriv=0.
# fdata.object : set to be analyzed. Default setting is original data curves, that can be differentiated
# if desired.
# classes : classification identifiers for set being analyzed.
# top.models : number of models to be considered when evaluating if segmentation has been optimized.
# seg.sep : distance between total segments evaluated and segment size of top models. Setting to 0 will
# stop calculation once min.segments has been analyzed.
# min.segments : initial segment sizes to be analyzed. Should be a minimum of 2, although will work from 1.
# max.segments : stopping parameter to ensure sequential increasing of segment size will stop.
# indent : numerical noise indentation.
# deriv : which derivative of fdata.object to be analyzed.
# k.grid : grid of tuning parameters
# class.method : nonparametric classification method. Either "wknn" or "kernel".
# ker : kernel to be used (see Kernels above)
# step.method : stewpwise procedure. Either forward.ensemble or all.segs.ensemble
# seg.weight : Set to TRUE to weight combination of segments by individual segment LOOCV accuracy
# thresh : stepwise accuracy improvement threshold. Does not affect all.segs.ensemble.
# density : integration grid mesh size to control accuracy
# do.par : Set to TRUE to run calculations in parallel.
# max.cores : number of cores to use during parallel calculations.
# output : Toggle for displaying information about calculation while calculation is running
# write.out : Toggle to write data to file as calculation is performed.
# write.name : File name to which data is sent if write.out = T.
seq.grid.class<-function(fdata.object, classes, top.models.eval=15, seg.sep=1, min.segments=5, max.segments=30,
indent=0, deriv=0, k.grid=c(1:10), class.method="wknn", ker=kern.tri,
step.method=forward.ensemble, seg.weight=FALSE, thresh=0.0001, density=NULL,
do.par=FALSE, max.cores=2, output=FALSE, write.out=FALSE, write.name=NULL)
{
if(is.null(write.name)) write.name="output"
fd.full<-fdata.object
argvals<-fdata.object$argvals
if(deriv!=0) fd.full<-fdata.deriv(fd.full,deriv)
n.objs<-length(classes)
class.levels<-as.numeric(levels(factor(classes)))
class.levels.index<-seq(1:length(class.levels))
full.grid<-NULL
for(seg.size in 1:min.segments)
{
if(output) cat("Calculating Distance for", seg.size, "segment(s). \n")
dist.temp<-calc.distance.array(fd.full, indent, density, total.segments=seg.size, do.par, max.cores)
grid.seg<-grid.class(dist.temp, classes, segments.grid=seg.size, k.grid=k.grid, class.method=class.method, ker=ker,
step.method=step.method, seg.weight=seg.weight, thresh=thresh, do.par=do.par, max.cores=max.cores, output=output)
full.grid<-cbind(full.grid, grid.seg)
}
if(write.out) write.csv(full.grid, paste(write.name, "_Grid.csv", sep=""))
largest.seg.anal<-min.segments
largest.seg.in.top<-max(as.numeric(substring(sort.models(full.grid, top.models.eval, do.print=FALSE)[1:top.models.eval,2], 1, nchar(sort.models(full.grid, top.models.eval, do.print=FALSE)[1:top.models.eval,2])-4)))
while((largest.seg.in.top+seg.sep)>largest.seg.anal && min.segments<max.segments)
{
min.segments<-min.segments+1
if(output) cat("Segment size increased to ", min.segments, ".\n")
if(output) cat("Calculating Distance for", min.segments, "segment(s). \n")
dist.temp<-calc.distance.array(fd.full, indent, density, total.segments=min.segments, do.par, max.cores)
grid.seg<-grid.class(dist.temp, classes, segments.grid=min.segments, k.grid=k.grid, class.method=class.method, ker=ker,
step.method=step.method, seg.weight=seg.weight, thresh=thresh, do.par=do.par, max.cores=max.cores, output=output)
full.grid<-cbind(full.grid, grid.seg)
if(write.out) write.csv(full.grid, paste(write.name, "_Grid.csv", sep=""))
largest.seg.anal<-min.segments
largest.seg.in.top<-max(as.numeric(substring(sort.models(full.grid, top.models.eval, do.print=FALSE)[1:top.models.eval,2], 1, nchar(sort.models(full.grid, top.models.eval, do.print=FALSE)[1:top.models.eval,2])-4)))
}
if(output) cat("Finished Sequential Grid Analysis at a total of", min.segments, "Segments. \n")
return(full.grid)
}
### validation.probs
# Function that takes a truncated distance matrix (including only the testing data ) and a k.grid
# and returns a probability array for the test data. Used for Cross validation.
# dist.temp : temporary truncated matrix of distances (distances from training to test objects)
# taining.classes : classification identifiers for training set
# k.eval : tuning parmeter
# class.method : nonparametric classification method. Either "wknn" or "kernel".
# ker : kernel to be used (see Kernels above)
validation.probs<-function(dist.temp, training.classes, k.eval, class.method="wknn", ker=kern.tri)
{
test.objs<-ncol(dist.temp)
train.objs<-nrow(dist.temp)
closest.temp<-matrix(ncol=test.objs, nrow=train.objs)
classify.temp<-matrix(ncol=test.objs, nrow=train.objs)
dist.ord<-matrix(ncol=test.objs, nrow=train.objs)
kern.probs<-matrix(ncol=test.objs, nrow=train.objs)
class.levels<-as.numeric(levels(factor(training.classes)))
class.levels.index<-seq(1:length(class.levels))
for(j in 1:test.objs)
{
or.temp<-order(dist.temp[,j])
closest.temp[,j]<-or.temp
classify.temp[,j]<-training.classes[closest.temp[,j]]
dist.ord[,j]<-dist.temp[or.temp,j]
}
if(class.method=="wknn")
{
kern.probs<-apply(dist.ord, 2, ker, h=1, method=class.method)
prob.classes<-matrix(nrow=test.objs, ncol=length(class.levels))
for(i in 1:length(class.levels.index))
{
for(j in 1:test.objs)
{
assign.temp<-which(classify.temp[(1:k.eval),j]==class.levels[i])
total.prob<-sum(kern.probs[(1:k.eval), j])
prob.classes[j,class.levels.index[i]]=sum(kern.probs[assign.temp,j])/total.prob
}
}
}
if(class.method=="kernel")
{
prob.classes<-matrix(nrow=test.objs, ncol=length(class.levels))
kern.probs<-apply(dist.ord, 2, ker, h=k.eval, method=class.method)
for(i in 1:length(class.levels.index))
{
for(j in 1:test.objs)
{
assign.temp<-which(classify.temp[,j]==class.levels[i])
total.prob<-sum(kern.probs[, j])
ifelse(total.prob==0,
prob.classes[j,class.levels.index[i]]<-1/length(class.levels.index),
prob.classes[j,class.levels.index[i]]<-sum(kern.probs[assign.temp,j])/total.prob
)
}
}
}
return(prob.classes)
}
### grid.model.cv
# Runs cross validation of the top models.analyzed models from the results of a sequential grid search.
# fdata.object : set to be analyzed. Default setting is original data curves.
# classes : classification identifies for set being analyzed.
# grid.results : output from running a grid search
# models.analyzed : number of models to validated
# folds : number of folds
# trials : number of times cross validation should be performed.
# folds.list : a list of folds identifiers
# seg.sep : distance between total segments evaluated and segment size of top models. Setting to 0 will
# stop calculation once min.segments has been analyzed.
# min.segments : initial segments sizes to be analyzed. Should be a minimum of 2, although will work from 1.
# max.segments : stopping parameter to ensure sequential increasing of segment size will stop.
# indent : numerical noise indentation.
# deriv : which derivative of fdata.object to be analyzed.
# k.grid : grid of tuning parameters
# class.method : nonparametric classification method. Either "wknn" or "kernel".
# ker : kernel to be used (see Kernels above)
# step.method : stewpwise procedure. Either forward.ensemble or all.segs.ensemble
# seg.weight : Set to TRUE to weight combination of segments by individual segment LOOCV accuracy
# thresh : stepwise accuracy improvement threshold. Does not affect all.segs.ensemble.
# density : integration grid mesh size to control accuracy
# do.par : Set to TRUE to run calculations in parallel.
# max.cores : number of cores to use during parallel calculations.
# large.set : A toggle for numerical stability if analyzing a dataset with extreme number of obersvations
# output : Toggle for displaying information about calculation while calculation is running
grid.model.cv<-function(fdata.object, classes, grid.results, models.analyzed=10, folds=10, trials=1,
folds.list=NULL, indent=0, deriv=0, density=NULL, class.method="wknn", ker=kern.tri,
step.method=forward.ensemble, seg.weight=FALSE, do.par=FALSE, max.cores=2,
large.set=FALSE, output=TRUE)
{
if(is.null(folds.list)) folds.list<-fold.creator(classes.temp, folds, trials)
if(deriv!=0) fdata.object<-fdata.deriv(fdata.object, deriv)
new.k<-as.numeric(substring(sort.models(grid.results, models.analyzed, do.print=FALSE)[1:models.analyzed,1], 3))
new.segs<-as.numeric(substring(sort.models(grid.results, models.analyzed, do.print=FALSE)[1:models.analyzed,2], 1, nchar(sort.models(grid.results, models.analyzed, do.print=FALSE)[1:models.analyzed,2])-4))
seg.grid<-as.numeric(names(table(new.segs)))
n.objs<-length(classes)
class.levels<-as.numeric(levels(factor(classes)))
class.levels.index<-seq(1:length(class.levels))
train.all.acc.mat<-matrix(nrow=trials*folds, ncol=models.analyzed)
test.all.acc.mat<-matrix(nrow=trials*folds, ncol=models.analyzed)
models.out<-list()
for(seg.chosen in 1:length(seg.grid))
{
total.segments=seg.grid[seg.chosen]
models.to.analyze<-which(new.segs==total.segments)
model.k<-new.k[models.to.analyze]
if(output) cat("Segment", seg.chosen, "of ", length(seg.grid), "\n")
distance.array<-calc.distance.array(fdata.object, indent, density, total.segments=total.segments, do.par, max.cores)
prob.array.temp<-array(dim=c(n.objs, length(class.levels.index), length(model.k), total.segments))
acc.mat<-matrix(ncol=length(model.k), nrow=total.segments)
for(j in 1:total.segments)
{
temp.1<-segment.class(distance.array[,,j], classes, model.k, class.method, ker)
prob.array.temp[,,,j]<-temp.1$prob.array
acc.mat[j,]<-temp.1$accuracy.est
}
model.temp<-step.method(prob.array.temp, t(acc.mat), classes, seg.weight=seg.weight)
model.segs.used<-model.temp$ens.segments
models.out[[seg.chosen]]<-model.segs.used
if(do.par && !large.set)
{
cl.red<-makeCluster(max.cores)
registerDoParallel(cl.red)
cv.out<-foreach(j.trial=1:trials, .packages=c("fda.usc"),.export=c("segment.class", "validation.probs", "kern.tri", "kern.norm", "kern.unif"),.combine=append) %dopar%
{
est.accuracy.train<-matrix(nrow=folds, ncol=length(model.k))
est.accuracy.test<-matrix(nrow=folds, ncol=length(model.k))
for(k.fold in 1:folds)
{
test.folds<-which(folds.list[[j.trial]]==k.fold)
training.classes<-classes[-test.folds]
test.classes<-classes[test.folds]
test.objs=length(test.classes)
training.objs<-length(training.classes)
# dist.temp.test<-distance.array[-test.folds,test.folds,]
# dist.temp.train<-distance.array[-test.folds,-test.folds,]
for(model in 1:length(model.k))
{
len.model.segs<-length(model.segs.used[[model]])
### Determine Training Set Accuracy and Segment LOO Accuracy for each Training Set ###
prob.array.train<-array(dim=c(training.objs, length(class.levels.index), 1, len.model.segs))
acc.mat.train<-matrix(ncol=1, nrow=len.model.segs)
for(j in 1:len.model.segs)
{
temp.1<-segment.class(distance.array[-test.folds,-test.folds, model.segs.used[[model]][j]],
training.classes, model.k[model], class.method, ker)
prob.array.train[,,1,j]<-temp.1$prob.array
acc.mat.train[j,]<-temp.1$accuracy.est
}
ens.probs.train<-matrix(nrow=training.objs, ncol=length(class.levels.index))
for(q in class.levels.index)
{
ifelse(seg.weight,
ens.probs.train[,q]<-rowSums(t(acc.mat.train[1:len.model.segs,]*t(as.matrix(prob.array.train[,q,,1:len.model.segs]))))/sum(acc.mat.train[1:len.model.segs,]),
ens.probs.train[,q]<-rowSums(as.matrix(prob.array.train[,q,,1:len.model.segs]))/len.model.segs
)
if(anyNA(ens.probs.train[,q])) probs.temp[,q]<-rep(0, train.objs)
}
est.classes.train<-numeric()
for(q in 1:training.objs)
{
max.classes<-class.levels[as.numeric(which(ens.probs.train[q,]==max(ens.probs.train[q,])))]
est.classes.train[q]<-ifelse(length(max.classes)==1, max.classes, sample(max.classes,1))
}
est.accuracy.train[k.fold, model]<-mean(est.classes.train==training.classes)
prob.array.test<-array(dim=c(test.objs, length(class.levels.index), 1, len.model.segs))
for(q in 1:len.model.segs)
prob.array.test[,,1,q]<-validation.probs(distance.array[-test.folds,test.folds,model.segs.used[[model]][q]], training.classes, k.eval=model.k[model], class.method, ker)
ens.probs.test<-matrix(nrow=test.objs, ncol=length(class.levels.index))
for(q in class.levels.index)
{
ifelse(seg.weight,
ens.probs.test[,q]<-rowSums(t(acc.mat.train[1:len.model.segs,]*t(as.matrix(prob.array.test[,q,,1:len.model.segs]))))/sum(acc.mat.train[1:len.model.segs,]),
ens.probs.test[,q]<-rowSums(as.matrix(prob.array.test[,q,,1:len.model.segs]))/len.model.segs
)
if(anyNA(ens.probs.test[,q])) probs.temp[,q]<-rep(0, test.objs)
}
est.classes.test<-numeric()
for(q in 1:test.objs)
{
max.classes<-class.levels[as.numeric(which(ens.probs.test[q,]==max(ens.probs.test[q,])))]
est.classes.test[q]<-ifelse(length(max.classes)==1, max.classes, sample(max.classes,1))
}
est.accuracy.test[k.fold, model]<-mean(est.classes.test==test.classes)
}
}
return(list(list(test.accuracy=est.accuracy.test, train.accuracy=est.accuracy.train)))
}
stopCluster(cl.red)
}
if(!do.par || large.set)
{
cv.out<-foreach(j.trial=1:trials, .packages=c("fda.usc"),.export=c("segment.class", "validation.probs", "kern.tri", "kern.norm", "kern.unif"),.combine=append) %do%
{
est.accuracy.train<-matrix(nrow=folds, ncol=length(model.k))
est.accuracy.test<-matrix(nrow=folds, ncol=length(model.k))
for(k.fold in 1:folds)
{
test.folds<-which(folds.list[[j.trial]]==k.fold)
training.classes<-classes[-test.folds]
test.classes<-classes[test.folds]
test.objs=length(test.classes)
training.objs<-length(training.classes)
for(model in 1:length(model.k))
{
len.model.segs<-length(model.segs.used[[model]])
prob.array.train<-array(dim=c(training.objs, length(class.levels.index), 1, len.model.segs))
acc.mat.train<-matrix(ncol=1, nrow=len.model.segs)
for(j in 1:len.model.segs)
{
temp.1<-segment.class(distance.array[-test.folds,-test.folds, model.segs.used[[model]][j]],
training.classes, model.k[model], class.method, ker)
prob.array.train[,,1,j]<-temp.1$prob.array
acc.mat.train[j,]<-temp.1$accuracy.est
}
ens.probs.train<-matrix(nrow=training.objs, ncol=length(class.levels.index))
for(q in class.levels.index)
{
ifelse(seg.weight,
ens.probs.train[,q]<-rowSums(t(acc.mat.train[1:len.model.segs,]*t(as.matrix(prob.array.train[,q,,1:len.model.segs]))))/sum(acc.mat.train[1:len.model.segs,]),
ens.probs.train[,q]<-rowSums(as.matrix(prob.array.train[,q,,1:len.model.segs]))/len.model.segs
)
if(anyNA(ens.probs.train[,q])) probs.temp[,q]<-rep(0, train.objs)
}
est.classes.train<-numeric()
for(q in 1:training.objs)
{
max.classes<-class.levels[as.numeric(which(ens.probs.train[q,]==max(ens.probs.train[q,])))]
est.classes.train[q]<-ifelse(length(max.classes)==1, max.classes, sample(max.classes,1))
}
est.accuracy.train[k.fold, model]<-mean(est.classes.train==training.classes)
prob.array.test<-array(dim=c(test.objs, length(class.levels.index), 1, len.model.segs))
for(q in 1:len.model.segs)
prob.array.test[,,1,q]<-validation.probs(distance.array[-test.folds,test.folds,model.segs.used[[model]][q]], training.classes, k.eval=model.k[model], class.method, ker)
ens.probs.test<-matrix(nrow=test.objs, ncol=length(class.levels.index))
for(q in class.levels.index)
{
ifelse(seg.weight,
ens.probs.test[,q]<-rowSums(t(acc.mat.train[1:len.model.segs,]*t(as.matrix(prob.array.test[,q,,1:len.model.segs]))))/sum(acc.mat.train[1:len.model.segs,]),
ens.probs.test[,q]<-rowSums(as.matrix(prob.array.test[,q,,1:len.model.segs]))/len.model.segs
)
if(anyNA(ens.probs.test[,q])) probs.temp[,q]<-rep(0, test.objs)
}
est.classes.test<-numeric()
for(q in 1:test.objs)
{
max.classes<-class.levels[as.numeric(which(ens.probs.test[q,]==max(ens.probs.test[q,])))]
est.classes.test[q]<-ifelse(length(max.classes)==1, max.classes, sample(max.classes,1))
}
est.accuracy.test[k.fold, model]<-mean(est.classes.test==test.classes)
}
}
return(list(list(test.accuracy=est.accuracy.test, train.accuracy=est.accuracy.train)))
}
}
test.accs.temp<-NULL
train.accs.temp<-NULL
for(j in 1:trials)
{
test.accs.temp<-rbind(test.accs.temp, cv.out[[j]]$test.accuracy)
train.accs.temp<-rbind(train.accs.temp, cv.out[[j]]$train.accuracy)
}
for(j in 1:length(models.to.analyze))
{
train.all.acc.mat[,models.to.analyze[j]]<-train.accs.temp[,j]
test.all.acc.mat[,models.to.analyze[j]]<-test.accs.temp[,j]
}
}
new.comb<-cbind(new.k, new.segs)
x<-NULL
for(j in 1:length(models.out)) x<-c(x, lengths(models.out[[j]]))
segments.used.grid<-matrix(NA, ncol=models.analyzed, nrow=max(x))
for(j in 1:length(seg.grid))
{
col.temp<-which(new.comb[,2]==seg.grid[[j]])
n.temp<-length(col.temp)
for(k in 1:n.temp)
{
segs.used<-models.out[[j]][[k]]
len.temp<-length(segs.used)
segments.used.grid[(1:len.temp),col.temp]<-segs.used
}
}
seg.sum<-rbind(t(new.comb), segments.used.grid)
return(list(test.accuracies=test.all.acc.mat, training.accuracies=train.all.acc.mat, segment.summary=seg.sum))
}
### dist.to.prob
# Takes a distance array and produces probability array for given method, kernel, and turning parameter
# See segment.class
# distance.array : array of distances
# total.segments : total number of segments (should match array dimension)
# k.size : a single tuning parameter (not a vector!)
# classes : classification identifies for set being analyzed. Only used for dimensionality.
dist.to.prob<-function(distance.array, total.segments, k.size, classes)
{
n.objs<-length(classes)
class.levels<-as.numeric(levels(factor(classes)))
class.levels.index<-seq(1:length(class.levels))
temp.array<-array(dim=c(n.objs,n.objs,total.segments))
temp.array[,,1:total.segments]<-distance.array
prob.array.temp<-array(dim=c(n.objs, length(class.levels.index), 1, total.segments))
acc.mat<-matrix(ncol=1, nrow=total.segments)
for(j in 1:total.segments)
{
temp.1<-segment.class(temp.array[,,j], classes.temp, k.size, class.method, ker)
prob.array.temp[,,,j]<-temp.1$prob.array
acc.mat[j,]<-temp.1$accuracy.est
}
return(list(probability.array=prob.array.temp, accuracies=acc.mat))
}
### bestsub.ensemble
# Takes a probability array and evaluates all combinations reporting the top LOOCV models for each combination size
# step.array : array of probabilities determined from distances, and dependent on which method and kernel chosen.
# Calculated frequently in other functions.
# segment.asccuracies : accuracy of each segment included in step.array. Used for weighted combinations.
# classes : classification identifies for set being analyzed.
# seg.weight : Set to TRUE if combining segment probabilities should be weighted by individual segment accuracy
# do.par : Set to TRUE to run calculations in parallel.
# max.cores : number of cores to use during parallel calculations.
# best.sub.max : maximum combination size to analyze (computational stability)
bestsub.ensemble<-function(step.array, segment.accuracies, classes, seg.weight=FALSE, do.par=FALSE,
max.cores=2, best.sub.max=10)
{
n.objs<-dim(step.array)[1]
class.levels<-as.numeric(levels(factor(classes)))
class.levels.index<-seq(1:length(class.levels))
knn.n<-dim(step.array)[3]
segment.n<-dim(step.array)[4]
seg.seq<-seq(1,segment.n,1)
ens.acc<-numeric()
ens.segs.used<-list()
probs.ens<-array(dim=c(n.objs, length(class.levels.index), knn.n))
classes.ens<-matrix(nrow=n.objs, ncol=knn.n)
k=1
max.vals<-which(segment.accuracies[k,]==max(segment.accuracies[k,]))
max.seg<-ifelse(length(max.vals)==1, max.vals, sample(max.vals,1))
segs.best<-max.seg
acc.best<-segment.accuracies[k,max.seg]
probs.best<-step.array[,,k,max.seg]
est.classes.best<-numeric()
for(q in 1:n.objs)
{
max.classes<-class.levels[as.numeric(which(probs.best[q,]==max(probs.best[q,])))]
est.classes.best[q]<-ifelse(length(max.classes)==1, max.classes, sample(max.classes,1)) #if tie sample from mean distances from the test objects. Mean distance over all segments?
}
best.sub.start<-list(acc=acc.best, segs=segs.best, probs=probs.best, class=est.classes.best)
ifelse(segment.n>best.sub.max, use.segs.tot<-best.sub.max, use.segs.tot<-segment.n)
if(do.par && segment.n!=1)
{
cl.max<-makeCluster(max.cores)
registerDoParallel(cl.max)
best.sub.list<-foreach(m=2:use.segs.tot, .combine=append) %dopar%
{
combs<-combn(segment.n,m)
best.sub.temp<-list()
best.acc=0
for(j in 1:ncol(combs))
{
segs.update<-combs[,j]
probs.temp<-matrix(nrow=n.objs, ncol=length(class.levels.index))
for(q in class.levels.index)
{
ifelse(seg.weight,
probs.temp[,q]<-rowSums(t(segment.accuracies[k,segs.update]*t(as.matrix(step.array[,q,k,segs.update])))/sum(segment.accuracies[k,segs.update])),
probs.temp[,q]<-rowSums(as.matrix(step.array[,q,k,segs.update]))/length(segs.update)
)
}
est.classes.temp<-numeric()
for(q in 1:n.objs)
{
max.classes<-class.levels[as.numeric(which(probs.temp[q,]==max(probs.temp[q,])))]
est.classes.temp[q]<-ifelse(length(max.classes)==1, max.classes, sample(max.classes,1))
}
acc.temp<-mean(est.classes.temp==classes)
if(acc.temp==best.acc) #acc.temp==acc.best
{
best.sub.temp<-append(best.sub.temp, list(list(acc=acc.temp, segs=segs.update, probs=probs.temp, class=est.classes.temp)))
}
if(acc.temp>best.acc)
{
best.acc=acc.temp
best.sub.temp<-list(list(acc=acc.temp, segs=segs.update, probs=probs.temp, class=est.classes.temp))
}
}
return(list(best.sub.temp))
}
stopCluster(cl.max)
}
if(!do.par && segment.n!=1)
{
best.sub.list<-foreach(m=2:use.segs.tot, .combine=append) %do%
{
combs<-combn(segment.n,m)
best.sub.temp<-list()
best.acc=0
for(j in 1:ncol(combs))
{
segs.update<-combs[,j]
probs.temp<-matrix(nrow=n.objs, ncol=length(class.levels.index))
for(q in class.levels.index)
{
ifelse(seg.weight,
probs.temp[,q]<-rowSums(t(segment.accuracies[k,segs.update]*t(as.matrix(step.array[,q,k,segs.update])))/sum(segment.accuracies[k,segs.update])),
probs.temp[,q]<-rowSums(as.matrix(step.array[,q,k,segs.update]))/length(segs.update)
)
}
est.classes.temp<-numeric()
for(q in 1:n.objs)
{
max.classes<-class.levels[as.numeric(which(probs.temp[q,]==max(probs.temp[q,])))]
est.classes.temp[q]<-ifelse(length(max.classes)==1, max.classes, sample(max.classes,1))
}
acc.temp<-mean(est.classes.temp==classes)
if(acc.temp==best.acc) #acc.temp==acc.best
{
best.sub.temp<-append(best.sub.temp, list(list(acc=acc.temp, segs=segs.update, probs=probs.temp, class=est.classes.temp)))
}
if(acc.temp>best.acc)
{
best.acc=acc.temp
best.sub.temp<-list(list(acc=acc.temp, segs=segs.update, probs=probs.temp, class=est.classes.temp))
}
}
return(list(best.sub.temp))
}
}
acc.tab<-numeric()
n.top.models=1
if(segment.n!=1)
{
for(k in 1:(use.segs.tot-1))
{
for(j in 1:length(best.sub.list[[k]]))
{
n.top.models=n.top.models+1
}
}
}
segs.tab<-matrix(NA, ncol=n.top.models, nrow=use.segs.tot)
acc.tab[1]<-best.sub.start$acc
segs.tab[1,1]<-best.sub.start$segs
count=1
if(segment.n!=1)
{
for(k in 1:(use.segs.tot-1))
{
for(j in 1:length(best.sub.list[[k]]))
{
count=count+1
acc.tab[count]<-best.sub.list[[k]][[j]]$acc
segs.tab[1:(k+1),count]<-best.sub.list[[k]][[j]]$segs
}
}
}
return(list(accuracies=acc.tab, segments.used=segs.tab))
}
### bss.single.model.cv
# Evaluates all best segment selection specifications for a given model (chosen segment size and neighbor size)
bss.single.model.cv<-function(fdata.object, classes, k.size=5, total.segments=1,
indent=0, deriv=0, best.sub.max=10, density=NULL, folds=10,
trials=1, folds.list=NULL, smooth=FALSE, class.method="wknn",
ker=kern.tri, seg.weight=FALSE, do.par=FALSE, max.cores=2, large.set=FALSE, output=FALSE)
{
if(output) time.start<-Sys.time()
n.objs<-length(classes)
class.levels<-as.numeric(levels(factor(classes)))
class.levels.index<-seq(1:length(class.levels))
if(is.null(folds.list)) folds.list<-fold.creator(classes.temp, folds, trials)
if(deriv!=0) fdata.object<-fdata.deriv(fdata.object, deriv)
distance.array<-calc.distance.array(fdata.object, indent, density, total.segments=total.segments, do.par, max.cores)
if(output)
{
time.dist<-Sys.time()
cat("Finished Distances.", time.dist-time.start, "\n")
}
temp.prob<-dist.to.prob(distance.array, total.segments, k.size, classes.temp)
temp<-bestsub.ensemble(step.array=temp.prob$probability.array, segment.accuracies=t(temp.prob$accuracies),
classes=classes.temp, seg.weight=seg.weight, do.par=do.par,
max.cores=max.cores, best.sub.max=best.sub.max)
if(output)
{
time.bss<-Sys.time()
cat("Finished Best Segment Selection", time.bss-time.dist, "\n")
}
segs.list<-list()
for(j in 1:ncol(temp[[2]]))
{
segs.list[[j]]<-as.numeric(na.exclude(temp[[2]][,j]))
}
model.segs.used<-segs.list
models.to.analyze<-1:length(model.segs.used)
model.k<-rep(k.size, length(model.segs.used))
train.all.acc.mat<-matrix(nrow=trials*folds, ncol=length(model.segs.used))
test.all.acc.mat<-matrix(nrow=trials*folds, ncol=length(model.segs.used))
if(do.par && !large.set)
{
cl.red<-makeCluster(max.cores)
registerDoParallel(cl.red)
cv.out<-foreach(j.trial=1:trials, .packages=c("fda.usc"),.export=c("segment.class", "validation.probs", "kern.tri", "kern.norm", "kern.unif"),.combine=append) %dopar%
{
est.accuracy.train<-matrix(nrow=folds, ncol=length(model.k))
est.accuracy.test<-matrix(nrow=folds, ncol=length(model.k))
for(k.fold in 1:folds)
{
test.folds<-which(folds.list[[j.trial]]==k.fold)
training.classes<-classes[-test.folds]
test.classes<-classes[test.folds]
test.objs=length(test.classes)
training.objs<-length(training.classes)
# dist.temp.test<-distance.array[-test.folds,test.folds,]
# dist.temp.train<-distance.array[-test.folds,-test.folds,]
for(model in 1:length(model.k))
{
len.model.segs<-length(model.segs.used[[model]])
### Determine Training Set Accuracy and Segment LOO Accuracy for each Training Set ###
prob.array.train<-array(dim=c(training.objs, length(class.levels.index), 1, len.model.segs))
acc.mat.train<-matrix(ncol=1, nrow=len.model.segs)
for(j in 1:len.model.segs)
{
temp.1<-segment.class(distance.array[-test.folds,-test.folds, model.segs.used[[model]][j]],
training.classes, model.k[model], class.method, ker)
prob.array.train[,,1,j]<-temp.1$prob.array
acc.mat.train[j,]<-temp.1$accuracy.est
}
ens.probs.train<-matrix(nrow=training.objs, ncol=length(class.levels.index))
for(q in class.levels.index)
{
ifelse(seg.weight,
ens.probs.train[,q]<-rowSums(t(acc.mat.train[1:len.model.segs,]*t(as.matrix(prob.array.train[,q,,1:len.model.segs]))))/sum(acc.mat.train[1:len.model.segs,]),
ens.probs.train[,q]<-rowSums(as.matrix(prob.array.train[,q,,1:len.model.segs]))/len.model.segs
)
if(anyNA(ens.probs.train[,q])) probs.temp[,q]<-rep(0, train.objs)
}
est.classes.train<-numeric()
for(q in 1:training.objs)
{
max.classes<-class.levels[as.numeric(which(ens.probs.train[q,]==max(ens.probs.train[q,])))]
est.classes.train[q]<-ifelse(length(max.classes)==1, max.classes, sample(max.classes,1))
}
est.accuracy.train[k.fold, model]<-mean(est.classes.train==training.classes)
### Determine Test Set Accuracy ###
prob.array.test<-array(dim=c(test.objs, length(class.levels.index), 1, len.model.segs))
for(q in 1:len.model.segs)
prob.array.test[,,1,q]<-validation.probs(distance.array[-test.folds,test.folds,model.segs.used[[model]][q]], training.classes, k.eval=model.k[model], class.method, ker)
ens.probs.test<-matrix(nrow=test.objs, ncol=length(class.levels.index))
for(q in class.levels.index)
{
ifelse(seg.weight,
ens.probs.test[,q]<-rowSums(t(acc.mat.train[1:len.model.segs,]*t(as.matrix(prob.array.test[,q,,1:len.model.segs]))))/sum(acc.mat.train[1:len.model.segs,]),
ens.probs.test[,q]<-rowSums(as.matrix(prob.array.test[,q,,1:len.model.segs]))/len.model.segs
)
if(anyNA(ens.probs.test[,q])) probs.temp[,q]<-rep(0, test.objs)
}
est.classes.test<-numeric()
for(q in 1:test.objs)
{
max.classes<-class.levels[as.numeric(which(ens.probs.test[q,]==max(ens.probs.test[q,])))]
est.classes.test[q]<-ifelse(length(max.classes)==1, max.classes, sample(max.classes,1))
}
est.accuracy.test[k.fold, model]<-mean(est.classes.test==test.classes)
}
}
return(list(list(test.accuracy=est.accuracy.test, train.accuracy=est.accuracy.train)))
est.accuracy.test
}
stopCluster(cl.red)
}
if(!do.par || large.set)
{
cv.out<-foreach(j.trial=1:trials, .packages=c("fda.usc"),.export=c("segment.class", "validation.probs", "kern.tri", "kern.norm", "kern.unif"),.combine=append) %do%
{
est.accuracy.train<-matrix(nrow=folds, ncol=length(model.k))
est.accuracy.test<-matrix(nrow=folds, ncol=length(model.k))
for(k.fold in 1:folds)
{
test.folds<-which(folds.list[[j.trial]]==k.fold)
training.classes<-classes[-test.folds]
test.classes<-classes[test.folds]
test.objs=length(test.classes)
training.objs<-length(training.classes)
# dist.temp.test<-distance.array[-test.folds,test.folds,]
# dist.temp.train<-distance.array[-test.folds,-test.folds,]
for(model in 1:length(model.k))
{
len.model.segs<-length(model.segs.used[[model]])
### Determine Training Set Accuracy and Segment LOO Accuracy for each Training Set ###
prob.array.train<-array(dim=c(training.objs, length(class.levels.index), 1, len.model.segs))
acc.mat.train<-matrix(ncol=1, nrow=len.model.segs)
for(j in 1:len.model.segs)
{
temp.1<-segment.class(distance.array[-test.folds,-test.folds, model.segs.used[[model]][j]],
training.classes, model.k[model], class.method, ker)
prob.array.train[,,1,j]<-temp.1$prob.array
acc.mat.train[j,]<-temp.1$accuracy.est
}
ens.probs.train<-matrix(nrow=training.objs, ncol=length(class.levels.index))
for(q in class.levels.index)
{
ifelse(seg.weight,
ens.probs.train[,q]<-rowSums(t(acc.mat.train[1:len.model.segs,]*t(as.matrix(prob.array.train[,q,,1:len.model.segs]))))/sum(acc.mat.train[1:len.model.segs,]),
ens.probs.train[,q]<-rowSums(as.matrix(prob.array.train[,q,,1:len.model.segs]))/len.model.segs
)
if(anyNA(ens.probs.train[,q])) probs.temp[,q]<-rep(0, train.objs)
}
est.classes.train<-numeric()
for(q in 1:training.objs)
{
max.classes<-class.levels[as.numeric(which(ens.probs.train[q,]==max(ens.probs.train[q,])))]
est.classes.train[q]<-ifelse(length(max.classes)==1, max.classes, sample(max.classes,1))
}
est.accuracy.train[k.fold, model]<-mean(est.classes.train==training.classes)
### Determine Test Set Accuracy ###
prob.array.test<-array(dim=c(test.objs, length(class.levels.index), 1, len.model.segs))
for(q in 1:len.model.segs)
prob.array.test[,,1,q]<-validation.probs(distance.array[-test.folds,test.folds,model.segs.used[[model]][q]], training.classes, k.eval=model.k[model], class.method, ker)
ens.probs.test<-matrix(nrow=test.objs, ncol=length(class.levels.index))
for(q in class.levels.index)
{
ifelse(seg.weight,
ens.probs.test[,q]<-rowSums(t(acc.mat.train[1:len.model.segs,]*t(as.matrix(prob.array.test[,q,,1:len.model.segs]))))/sum(acc.mat.train[1:len.model.segs,]),
ens.probs.test[,q]<-rowSums(as.matrix(prob.array.test[,q,,1:len.model.segs]))/len.model.segs
)
if(anyNA(ens.probs.test[,q])) probs.temp[,q]<-rep(0, test.objs)
}
est.classes.test<-numeric()
for(q in 1:test.objs)
{
max.classes<-class.levels[as.numeric(which(ens.probs.test[q,]==max(ens.probs.test[q,])))]
est.classes.test[q]<-ifelse(length(max.classes)==1, max.classes, sample(max.classes,1))
}
est.accuracy.test[k.fold, model]<-mean(est.classes.test==test.classes)
}
}
return(list(list(test.accuracy=est.accuracy.test, train.accuracy=est.accuracy.train)))
}
}
test.accs.temp<-NULL
train.accs.temp<-NULL
for(j in 1:trials)
{
test.accs.temp<-rbind(test.accs.temp, cv.out[[j]]$test.accuracy)
train.accs.temp<-rbind(train.accs.temp, cv.out[[j]]$train.accuracy)
}
for(j in 1:length(models.to.analyze))
{
train.all.acc.mat[,models.to.analyze[j]]<-train.accs.temp[,j]
test.all.acc.mat[,models.to.analyze[j]]<-test.accs.temp[,j]
}
return(list(test.accuracies=test.all.acc.mat, training.accuracies=train.all.acc.mat, bss=temp))
}
### bss.ens.model.cv
# Cross validates the top LOOCV accuracy segment combinations for all combination sizes using
# classifiers produced from a single curve or combination of multiple derivative orders.
# If a single curve is to be analyzed, k.size, seg.size, indent, deriv should be single elements.
# If multiple curves are to be combined, k.size, seg.size, indent, deriv should be vectors.
# best.sub.max can be used to control the size of the segment combinations evaluated.
# fdata.object : original data functional object.
# classes : classification identifies for set being analyzed.
# k.sizes : chosen model tuning parameters for each curve to be analyzed.
# seg.size : chosen model segment sizes for each curve to be analyzed.
# indent : numerical derivative noise indents for each derivative order.
# deriv : derivative orders. i.e c(0,1,2) would be original curve with first and second derivs.
# best.sub.max : maximum combination size.
# thresh : only if forward segment selection it to be used. Improvement in LOOCV to add segment to ensemble.
# density : integration grid mesh size to control accuracy
# folds : number of folds.
# trials : number of times cross validation should be performed.
# folds.list : a list of folds identifiers
# class.method : nonparametric classification method. Either "wknn" or "kernel".
# ker : kernel to be used (see Kernels above).
# seg.weight : Set to TRUE if combining segment probabilities should be weighted by individual segment accuracy.
# use.forward : Use FSS in place of BSS when evaluating final model. Reduces computational burden but
# does not guarantee the top segment combination will be found.
# do.par : Set to TRUE to run calculations in parallel.
# max.cores : number of cores to use during parallel calculations.
# large.set : A toggle for numerical stability if analyzing a dataset with extreme number of obersvations
# output : Toggle for displaying information about calculation while calculation is running
bss.ens.model.cv<-function(fdata.object, classes, k.sizes=c(5,5), seg.sizes=c(1,2),
indent=c(0,0), deriv=c(0,1), best.sub.max=10, thresh=1e-4, density=NULL, folds=10,
trials=1, folds.list=NULL, class.method="wknn", ker=kern.tri, seg.weight=FALSE,
use.forward=FALSE, do.par=FALSE, max.cores=2, large.set=FALSE, output=FALSE)
{
if(output) time.start<-Sys.time()
n.objs<-length(classes)
class.levels<-as.numeric(levels(factor(classes)))
class.levels.index<-seq(1:length(class.levels))
if(is.null(folds.list)) folds.list<-fold.creator(classes.temp, folds, trials)
total.segments<-sum(seg.sizes)
distance.array<-array(dim=c(n.objs, n.objs, total.segments))
temp.prob.array<-array(dim=c(n.objs, length(class.levels.index), 1, total.segments))
temp.prob.accur<-NULL
csum<-cumsum(seg.sizes)
csum.seg<-(cumsum(seg.sizes)-seg.sizes)+1
for(j in 1:length(seg.sizes))
{
ifelse(deriv[j]==0, fdata.object.temp<-fdata.object, fdata.object.temp<-fdata.deriv(fdata.object, deriv[j]))
distance.array[,,csum.seg[j]:csum[j]]<-calc.distance.array(fdata.object.temp, indent[j], density, total.segments=seg.sizes[j], do.par, max.cores)
temp.prob<-dist.to.prob(distance.array[,,csum.seg[j]:csum[j]], seg.sizes[j], k.sizes[j], classes.temp)
temp.prob.array[,,,csum.seg[j]:csum[j]]<-temp.prob$probability.array
temp.prob.accur<-c(temp.prob.accur, temp.prob$accuracies)
}
if(output)
{
time.dist<-Sys.time()
cat("Finished Distances.", time.dist-time.start, "\n")
}
if(!use.forward)
{
temp<-bestsub.ensemble(step.array=temp.prob.array, segment.accuracies=t(as.matrix(temp.prob.accur)),
classes=classes.temp, seg.weight=seg.weight, do.par=do.par,
max.cores=max.cores, best.sub.max=best.sub.max)
segs.list<-list()
for(j in 1:ncol(temp[[2]]))
{
segs.list[[j]]<-as.numeric(na.exclude(temp[[2]][,j]))
}
model.segs.used<-segs.list
models.to.analyze<-1:length(model.segs.used)
}
if(use.forward)
{
temp.fss<-forward.ensemble(step.array=temp.prob.array, segment.accuracies=t(as.matrix(temp.prob.accur)),
classes=classes.temp, seg.weight=seg.weight, thresh=thresh, do.par=do.par,
cores=max.cores)
temp<-list(segments.used=as.matrix(temp.fss$ens.segments[[1]], ncol=1))
model.segs.used<-list(temp.fss$ens.segments[[1]])
models.to.analyze<-1
}
if(output)
{
time.bss<-Sys.time()
cat("Finished BSS.", time.bss-time.dist, "\n")
}
model.k<-NULL
deriv.k<-NULL
for(j in 1:length(seg.sizes))
{
deriv.k<-c(deriv.k, rep(deriv[j], seg.sizes[j]))
model.k<-c(model.k, rep(k.sizes[j], seg.sizes[j]))
}
train.all.acc.mat<-matrix(nrow=trials*folds, ncol=length(model.segs.used))
test.all.acc.mat<-matrix(nrow=trials*folds, ncol=length(model.segs.used))
if(do.par && !large.set)
{
cl.red<-makeCluster(max.cores)
registerDoParallel(cl.red)
cv.out<-foreach(j.trial=1:trials, .packages=c("fda.usc"),.export=c("segment.class", "validation.probs", "kern.tri", "kern.norm", "kern.unif"),.combine=append) %dopar%
{
est.accuracy.train<-matrix(nrow=folds, ncol=length(model.segs.used))
est.accuracy.test<-matrix(nrow=folds, ncol=length(model.segs.used))
for(k.fold in 1:folds)
{
test.folds<-which(folds.list[[j.trial]]==k.fold)
training.classes<-classes[-test.folds]
test.classes<-classes[test.folds]
test.objs=length(test.classes)
training.objs<-length(training.classes)
for(model in 1:length(model.segs.used))
{
len.model.segs<-length(model.segs.used[[model]])
prob.array.train<-array(dim=c(training.objs, length(class.levels.index), 1, len.model.segs))
acc.mat.train<-matrix(ncol=1, nrow=len.model.segs)
for(j in 1:len.model.segs)
{
temp.1<-segment.class(distance.array[-test.folds,-test.folds, model.segs.used[[model]][j]],
training.classes, model.k[model.segs.used[[model]][j]], class.method, ker)
prob.array.train[,,1,j]<-temp.1$prob.array
acc.mat.train[j,]<-temp.1$accuracy.est
}
ens.probs.train<-matrix(nrow=training.objs, ncol=length(class.levels.index))
for(q in class.levels.index)
{
ifelse(seg.weight,
ens.probs.train[,q]<-rowSums(t(acc.mat.train[1:len.model.segs,]*t(as.matrix(prob.array.train[,q,,1:len.model.segs]))))/sum(acc.mat.train[1:len.model.segs,]),
ens.probs.train[,q]<-rowSums(as.matrix(prob.array.train[,q,,1:len.model.segs]))/len.model.segs
)
if(anyNA(ens.probs.train[,q])) probs.temp[,q]<-rep(0, train.objs)
}
est.classes.train<-numeric()
for(q in 1:training.objs)
{
max.classes<-class.levels[as.numeric(which(ens.probs.train[q,]==max(ens.probs.train[q,])))]
est.classes.train[q]<-ifelse(length(max.classes)==1, max.classes, sample(max.classes,1))
}
est.accuracy.train[k.fold, model]<-mean(est.classes.train==training.classes)
prob.array.test<-array(dim=c(test.objs, length(class.levels.index), 1, len.model.segs))
for(q in 1:len.model.segs)
prob.array.test[,,1,q]<-validation.probs(distance.array[-test.folds,test.folds,model.segs.used[[model]][q]], training.classes, k.eval=model.k[model.segs.used[[model]][q]], class.method, ker)
ens.probs.test<-matrix(nrow=test.objs, ncol=length(class.levels.index))
for(q in class.levels.index)
{
ifelse(seg.weight,
ens.probs.test[,q]<-rowSums(t(acc.mat.train[1:len.model.segs,]*t(as.matrix(prob.array.test[,q,,1:len.model.segs]))))/sum(acc.mat.train[1:len.model.segs,]),
ens.probs.test[,q]<-rowSums(as.matrix(prob.array.test[,q,,1:len.model.segs]))/len.model.segs
)
if(anyNA(ens.probs.test[,q])) probs.temp[,q]<-rep(0, test.objs)
}
est.classes.test<-numeric()
for(q in 1:test.objs)
{
max.classes<-class.levels[as.numeric(which(ens.probs.test[q,]==max(ens.probs.test[q,])))]
est.classes.test[q]<-ifelse(length(max.classes)==1, max.classes, sample(max.classes,1))
}
est.accuracy.test[k.fold, model]<-mean(est.classes.test==test.classes)
}
}
return(list(list(test.accuracy=est.accuracy.test, train.accuracy=est.accuracy.train)))
}
stopCluster(cl.red)
}
if(!do.par || large.set)
{
cv.out<-foreach(j.trial=1:trials, .packages=c("fda.usc"),.export=c("segment.class", "validation.probs", "kern.tri", "kern.norm", "kern.unif"),.combine=append) %do%
{
est.accuracy.train<-matrix(nrow=folds, ncol=length(model.segs.used))
est.accuracy.test<-matrix(nrow=folds, ncol=length(model.segs.used))
for(k.fold in 1:folds)
{
test.folds<-which(folds.list[[j.trial]]==k.fold)
training.classes<-classes[-test.folds]
test.classes<-classes[test.folds]
test.objs=length(test.classes)
training.objs<-length(training.classes)
for(model in 1:length(model.segs.used))
{
len.model.segs<-length(model.segs.used[[model]])
prob.array.train<-array(dim=c(training.objs, length(class.levels.index), 1, len.model.segs))
acc.mat.train<-matrix(ncol=1, nrow=len.model.segs)
for(j in 1:len.model.segs)
{
temp.1<-segment.class(distance.array[-test.folds,-test.folds, model.segs.used[[model]][j]],
training.classes, model.k[model.segs.used[[model]][j]], class.method, ker)
prob.array.train[,,1,j]<-temp.1$prob.array
acc.mat.train[j,]<-temp.1$accuracy.est
}
ens.probs.train<-matrix(nrow=training.objs, ncol=length(class.levels.index))
for(q in class.levels.index)
{
ifelse(seg.weight,
ens.probs.train[,q]<-rowSums(t(acc.mat.train[1:len.model.segs,]*t(as.matrix(prob.array.train[,q,,1:len.model.segs]))))/sum(acc.mat.train[1:len.model.segs,]),
ens.probs.train[,q]<-rowSums(as.matrix(prob.array.train[,q,,1:len.model.segs]))/len.model.segs
)
if(anyNA(ens.probs.train[,q])) probs.temp[,q]<-rep(0, train.objs)
}
est.classes.train<-numeric()
for(q in 1:training.objs)
{
max.classes<-class.levels[as.numeric(which(ens.probs.train[q,]==max(ens.probs.train[q,])))]
est.classes.train[q]<-ifelse(length(max.classes)==1, max.classes, sample(max.classes,1))
}
est.accuracy.train[k.fold, model]<-mean(est.classes.train==training.classes)
prob.array.test<-array(dim=c(test.objs, length(class.levels.index), 1, len.model.segs))
for(q in 1:len.model.segs)
prob.array.test[,,1,q]<-validation.probs(distance.array[-test.folds,test.folds,model.segs.used[[model]][q]], training.classes, k.eval=model.k[model.segs.used[[model]][q]], class.method, ker)
ens.probs.test<-matrix(nrow=test.objs, ncol=length(class.levels.index))
for(q in class.levels.index)
{
ifelse(seg.weight,
ens.probs.test[,q]<-rowSums(t(acc.mat.train[1:len.model.segs,]*t(as.matrix(prob.array.test[,q,,1:len.model.segs]))))/sum(acc.mat.train[1:len.model.segs,]),
ens.probs.test[,q]<-rowSums(as.matrix(prob.array.test[,q,,1:len.model.segs]))/len.model.segs
)
if(anyNA(ens.probs.test[,q])) probs.temp[,q]<-rep(0, test.objs)
}
est.classes.test<-numeric()
for(q in 1:test.objs)
{
max.classes<-class.levels[as.numeric(which(ens.probs.test[q,]==max(ens.probs.test[q,])))]
est.classes.test[q]<-ifelse(length(max.classes)==1, max.classes, sample(max.classes,1))
}
est.accuracy.test[k.fold, model]<-mean(est.classes.test==test.classes)
}
}
return(list(list(test.accuracy=est.accuracy.test, train.accuracy=est.accuracy.train)))
}
}
test.accs.temp<-NULL
train.accs.temp<-NULL
for(j in 1:trials)
{
test.accs.temp<-rbind(test.accs.temp, cv.out[[j]]$test.accuracy)
train.accs.temp<-rbind(train.accs.temp, cv.out[[j]]$train.accuracy)
}
for(j in 1:length(models.to.analyze))
{
train.all.acc.mat[,models.to.analyze[j]]<-train.accs.temp[,j]
test.all.acc.mat[,models.to.analyze[j]]<-test.accs.temp[,j]
}
return(list(test.accuracies=test.all.acc.mat, training.accuracies=train.all.acc.mat, bss=temp))
}
|
# codice per analisi ESA cambiamento NO2 da gennaio a marzo 2020
library(raster)
# excercise caricare tutte le immagini
EN01 <- raster("EN_0001.png")
EN02 <- raster("EN_0002.png")
EN03 <- raster("EN_0003.png")
EN04 <- raster("EN_0004.png")
EN05 <- raster("EN_0005.png")
EN06 <- raster("EN_0006.png")
EN07 <- raster("EN_0007.png")
EN08 <- raster("EN_0008.png")
EN09 <- raster("EN_0009.png")
EN10 <- raster("EN_0010.png")
EN11 <- raster("EN_0011.png")
EN12 <- raster("EN_0012.png")
EN13 <- raster("EN_0013.png")
cl <- colorRampPalette(c('red','orange','yellow'))(100) #
plot(EN01, col=cl)
plot(EN13, col=cl)
par(mfrow=c(1,2))
plot(EN01, col=cl)
plot(EN13, col=cl)
dev.off()
# differenze
difno2 <- EN13 - EN01
cldif <- colorRampPalette(c('blue','black','yellow'))(100) #
plot(difno2, col=cldif)
# plot di tutte le immagini
par(mfrow=c(4,4))
plot(EN01, col=cl)
plot(EN02, col=cl)
plot(EN03, col=cl)
plot(EN04, col=cl)
plot(EN05, col=cl)
plot(EN06, col=cl)
plot(EN07, col=cl)
plot(EN08, col=cl)
plot(EN09, col=cl)
plot(EN10, col=cl)
plot(EN11, col=cl
plot(EN12, col=cl)
plot(EN13, col=cl)
setwd("C:/lab/")
setwd("C:/lab/esa_no2/")
rlist <- list.files(pattern=".png")
# per caricare immagini in una volta
listafinale <- lapply(rlist, raster)
EN <- stack(listafinale)
cl <- colorRampPalette(c('red','orange','yellow'))(100) #
plot(EN, col=cl)
setwd("C:/lab/esa_no2")
rlist <- list.files(pattern=".png")
EN <- stack(listafinale)
difEN <- EN$EN_0013 - EN$EN_0001
cld <- colorRampPalette(c('blue','white','red'))(100) #
plot(difEN, col=cld)
cl <- colorRampPalette(c('red','orange','yellow'))(100) #
plot(EN, col=cl)
boxplot(EN)
boxplot(EN, horizontal=T)
boxplot(EN, horizontal=T,outline=F)
boxplot(EN, horizontal=T,outline=F,axes=T)
|
/R_code_multitemp_NO2.r
|
no_license
|
valerioamendola/ecologia_del_paesaggio
|
R
| false
| false
| 1,855
|
r
|
# codice per analisi ESA cambiamento NO2 da gennaio a marzo 2020
library(raster)
# excercise caricare tutte le immagini
EN01 <- raster("EN_0001.png")
EN02 <- raster("EN_0002.png")
EN03 <- raster("EN_0003.png")
EN04 <- raster("EN_0004.png")
EN05 <- raster("EN_0005.png")
EN06 <- raster("EN_0006.png")
EN07 <- raster("EN_0007.png")
EN08 <- raster("EN_0008.png")
EN09 <- raster("EN_0009.png")
EN10 <- raster("EN_0010.png")
EN11 <- raster("EN_0011.png")
EN12 <- raster("EN_0012.png")
EN13 <- raster("EN_0013.png")
cl <- colorRampPalette(c('red','orange','yellow'))(100) #
plot(EN01, col=cl)
plot(EN13, col=cl)
par(mfrow=c(1,2))
plot(EN01, col=cl)
plot(EN13, col=cl)
dev.off()
# differenze
difno2 <- EN13 - EN01
cldif <- colorRampPalette(c('blue','black','yellow'))(100) #
plot(difno2, col=cldif)
# plot di tutte le immagini
par(mfrow=c(4,4))
plot(EN01, col=cl)
plot(EN02, col=cl)
plot(EN03, col=cl)
plot(EN04, col=cl)
plot(EN05, col=cl)
plot(EN06, col=cl)
plot(EN07, col=cl)
plot(EN08, col=cl)
plot(EN09, col=cl)
plot(EN10, col=cl)
plot(EN11, col=cl
plot(EN12, col=cl)
plot(EN13, col=cl)
setwd("C:/lab/")
setwd("C:/lab/esa_no2/")
rlist <- list.files(pattern=".png")
# per caricare immagini in una volta
listafinale <- lapply(rlist, raster)
EN <- stack(listafinale)
cl <- colorRampPalette(c('red','orange','yellow'))(100) #
plot(EN, col=cl)
setwd("C:/lab/esa_no2")
rlist <- list.files(pattern=".png")
EN <- stack(listafinale)
difEN <- EN$EN_0013 - EN$EN_0001
cld <- colorRampPalette(c('blue','white','red'))(100) #
plot(difEN, col=cld)
cl <- colorRampPalette(c('red','orange','yellow'))(100) #
plot(EN, col=cl)
boxplot(EN)
boxplot(EN, horizontal=T)
boxplot(EN, horizontal=T,outline=F)
boxplot(EN, horizontal=T,outline=F,axes=T)
|
### ---
### Extra analyses: Dominance analyses for
### other predictors besides Adaptive Performance
### ---
### ---
### Rename other outcome variables
### ---
handlemerg <- NCO_Promotion_Study$HANDLINGEMERGENCIES
interperson <- NCO_Promotion_Study$INTERPERSONALADAPTABILITY
changecond <- NCO_Promotion_Study$UNPREDICTABLEWORKSIT
physicaladapt <- NCO_Promotion_Study$PHYSICALLYORIENTED
learnnew <- NCO_Promotion_Study$LEARNINGNEWTECH
handlestress <- NCO_Promotion_Study$HANDLINGWORKSTRESS
cultadapt <- NCO_Promotion_Study$CULTURALADAPTABILITY
probsolve <- NCO_Promotion_Study$SOLVINGPROBLEMSCREATIVELY
# DA of Competencies on Handling Emergencies #
userDat2 <- data.frame(posenviron,
getresults,
preplead,
devlead,
leadbyexp,
comms,
handlemerg)
userDat3 <- data.frame(
sapply(
userDat2,
function(x) ifelse(is.na(x),
mean(x, na.rm = TRUE),
x)))
test2 <- DW.accuracy(userDat3,
iv.relia = c(.81,.81,.81,.81,.81,.81),
dv.relia = .77,
iv.names = c("posenviron",
"getresults",
"preplead",
"devlead",
"leadbyexp",
"comms"),
whichCor = 0,
spurIV = T,
epsilon = F,
n.sims = 100)
print(test2)
View(test2$avg.weights)
# 2. Competencies with Interperson #
userDat5 <- data.frame(posenviron,
getresults,
preplead,
devlead,
leadbyexp,
comms,
interperson)
userDat4 <- data.frame(
sapply(
userDat5,
function(x) ifelse(is.na(x),
mean(x, na.rm = TRUE),
x)))
test3 <- DW.accuracy(userDat4,
iv.relia = c(.81,.81,.81,.81,.81,.81),
dv.relia = .77,
iv.names = c("posenviron",
"getresults",
"preplead",
"devlead",
"leadbyexp",
"comms"),
whichCor = 0,
spurIV = T,
epsilon = F,
n.sims = 100)
print(test3)
View(test3$avg.weights)
# 3. Competencies with changecond #
userDat7 <- data.frame(posenviron,
getresults,
preplead,
devlead,
leadbyexp,
comms,
changecond)
userDat6 <- data.frame(
sapply(
userDat7,
function(x) ifelse(is.na(x),
mean(x, na.rm = TRUE),
x)))
test4 <- DW.accuracy(userDat6,
iv.relia = c(.81,.81,.81,.81,.81,.81),
dv.relia = .77,
iv.names = c("posenviron",
"getresults",
"preplead",
"devlead",
"leadbyexp",
"comms"),
whichCor = 0,
spurIV = T,
epsilon = F,
n.sims = 100)
print(test4)
View(test4$avg.weights)
# 4. Competencies with physicaladapt #
userDat9 <- data.frame(posenviron,
getresults,
preplead,
devlead,
leadbyexp,
comms,
physicaladapt)
userDat8 <- data.frame(
sapply(
userDat9,
function(x) ifelse(is.na(x),
mean(x, na.rm = TRUE),
x)))
test5 <- DW.accuracy(userDat8,
iv.relia = c(.81,.81,.81,.81,.81,.81),
dv.relia = .77,
iv.names = c("posenviron",
"getresults",
"preplead",
"devlead",
"leadbyexp",
"comms"),
whichCor = 0,
spurIV = T,
epsilon = F,
n.sims = 100)
print(test5)
View(test5$avg.weights)
# 5. Competencies with learnnew #
userDat11 <- data.frame(posenviron,
getresults,
preplead,
devlead,
leadbyexp,
comms,
learnnew)
userDat10 <- data.frame(
sapply(
userDat11,
function(x) ifelse(is.na(x),
mean(x, na.rm = TRUE),
x)))
test6 <- DW.accuracy(userDat10,
iv.relia = c(.81,.81,.81,.81,.81,.81),
dv.relia = .77,
iv.names = c("posenviron",
"getresults",
"preplead",
"devlead",
"leadbyexp",
"comms"),
whichCor = 0,
spurIV = T,
epsilon = F,
n.sims = 100)
print(test6)
View(test6$avg.weights)
# 6. Competencies with handlestress #
userDat13 <- data.frame(posenviron,
getresults,
preplead,
devlead,
leadbyexp,
comms,
handlestress)
userDat12 <- data.frame(
sapply(
userDat13,
function(x) ifelse(is.na(x),
mean(x, na.rm = TRUE),
x)))
test7 <- DW.accuracy(userDat12,
iv.relia = c(.81,.81,.81,.81,.81,.81),
dv.relia = .77,
iv.names = c("posenviron",
"getresults",
"preplead",
"devlead",
"leadbyexp",
"comms"),
whichCor = 0,
spurIV = T,
epsilon = F,
n.sims = 100)
print(test7)
View(test7$avg.weights)
# 7. Competencies with handlestress #
userDat15 <- data.frame(posenviron,
getresults,
preplead,
devlead,
leadbyexp,
comms,
handlestress)
userDat14 <- data.frame(
sapply(
userDat15,
function(x) ifelse(is.na(x),
mean(x, na.rm = TRUE),
x)))
test8 <- DW.accuracy(userDat14,
iv.relia = c(.81,.81,.81,.81,.81,.81),
dv.relia = .77,
iv.names = c("posenviron",
"getresults",
"preplead",
"devlead",
"leadbyexp",
"comms"),
whichCor = 0,
spurIV = T,
epsilon = F,
n.sims = 100)
print(test8)
View(test8$avg.weights)
# 8. Competencies with cultadapt #
userDat17 <- data.frame(posenviron,
getresults,
preplead,
devlead,
leadbyexp,
comms,
cultadapt)
userDat16 <- data.frame(
sapply(
userDat17,
function(x) ifelse(is.na(x),
mean(x, na.rm = TRUE),
x)))
test9 <- DW.accuracy(userDat16,
iv.relia = c(.81,.81,.81,.81,.81,.81),
dv.relia = .77,
iv.names = c("posenviron",
"getresults",
"preplead",
"devlead",
"leadbyexp",
"comms"),
whichCor = 0,
spurIV = T,
epsilon = F,
n.sims = 100)
print(test9)
View(test9$avg.weights)
# Competencies with probsolve #
userDat19 <- data.frame(posenviron,
getresults,
preplead,
devlead,
leadbyexp,
comms,
probsolve)
userDat18 <- data.frame(
sapply(
userDat19,
function(x) ifelse(is.na(x),
mean(x, na.rm = TRUE),
x)))
test10 <- DW.accuracy(userDat18,
iv.relia = c(.81,.81,.81,.81,.81,.81),
dv.relia = .77,
iv.names = c("posenviron",
"getresults",
"preplead",
"devlead",
"leadbyexp",
"comms"),
whichCor = 0,
spurIV = T,
epsilon = F,
n.sims = 100)
print(test10)
View(test10$avg.weights)
|
/Dominance Analyses for Other Outcomes.R
|
no_license
|
colekm297/Monte-Carlo-Dominance-Analysis
|
R
| false
| false
| 9,862
|
r
|
### ---
### Extra analyses: Dominance analyses for
### other predictors besides Adaptive Performance
### ---
### ---
### Rename other outcome variables
### ---
handlemerg <- NCO_Promotion_Study$HANDLINGEMERGENCIES
interperson <- NCO_Promotion_Study$INTERPERSONALADAPTABILITY
changecond <- NCO_Promotion_Study$UNPREDICTABLEWORKSIT
physicaladapt <- NCO_Promotion_Study$PHYSICALLYORIENTED
learnnew <- NCO_Promotion_Study$LEARNINGNEWTECH
handlestress <- NCO_Promotion_Study$HANDLINGWORKSTRESS
cultadapt <- NCO_Promotion_Study$CULTURALADAPTABILITY
probsolve <- NCO_Promotion_Study$SOLVINGPROBLEMSCREATIVELY
# DA of Competencies on Handling Emergencies #
userDat2 <- data.frame(posenviron,
getresults,
preplead,
devlead,
leadbyexp,
comms,
handlemerg)
userDat3 <- data.frame(
sapply(
userDat2,
function(x) ifelse(is.na(x),
mean(x, na.rm = TRUE),
x)))
test2 <- DW.accuracy(userDat3,
iv.relia = c(.81,.81,.81,.81,.81,.81),
dv.relia = .77,
iv.names = c("posenviron",
"getresults",
"preplead",
"devlead",
"leadbyexp",
"comms"),
whichCor = 0,
spurIV = T,
epsilon = F,
n.sims = 100)
print(test2)
View(test2$avg.weights)
# 2. Competencies with Interperson #
userDat5 <- data.frame(posenviron,
getresults,
preplead,
devlead,
leadbyexp,
comms,
interperson)
userDat4 <- data.frame(
sapply(
userDat5,
function(x) ifelse(is.na(x),
mean(x, na.rm = TRUE),
x)))
test3 <- DW.accuracy(userDat4,
iv.relia = c(.81,.81,.81,.81,.81,.81),
dv.relia = .77,
iv.names = c("posenviron",
"getresults",
"preplead",
"devlead",
"leadbyexp",
"comms"),
whichCor = 0,
spurIV = T,
epsilon = F,
n.sims = 100)
print(test3)
View(test3$avg.weights)
# 3. Competencies with changecond #
userDat7 <- data.frame(posenviron,
getresults,
preplead,
devlead,
leadbyexp,
comms,
changecond)
userDat6 <- data.frame(
sapply(
userDat7,
function(x) ifelse(is.na(x),
mean(x, na.rm = TRUE),
x)))
test4 <- DW.accuracy(userDat6,
iv.relia = c(.81,.81,.81,.81,.81,.81),
dv.relia = .77,
iv.names = c("posenviron",
"getresults",
"preplead",
"devlead",
"leadbyexp",
"comms"),
whichCor = 0,
spurIV = T,
epsilon = F,
n.sims = 100)
print(test4)
View(test4$avg.weights)
# 4. Competencies with physicaladapt #
userDat9 <- data.frame(posenviron,
getresults,
preplead,
devlead,
leadbyexp,
comms,
physicaladapt)
userDat8 <- data.frame(
sapply(
userDat9,
function(x) ifelse(is.na(x),
mean(x, na.rm = TRUE),
x)))
test5 <- DW.accuracy(userDat8,
iv.relia = c(.81,.81,.81,.81,.81,.81),
dv.relia = .77,
iv.names = c("posenviron",
"getresults",
"preplead",
"devlead",
"leadbyexp",
"comms"),
whichCor = 0,
spurIV = T,
epsilon = F,
n.sims = 100)
print(test5)
View(test5$avg.weights)
# 5. Competencies with learnnew #
userDat11 <- data.frame(posenviron,
getresults,
preplead,
devlead,
leadbyexp,
comms,
learnnew)
userDat10 <- data.frame(
sapply(
userDat11,
function(x) ifelse(is.na(x),
mean(x, na.rm = TRUE),
x)))
test6 <- DW.accuracy(userDat10,
iv.relia = c(.81,.81,.81,.81,.81,.81),
dv.relia = .77,
iv.names = c("posenviron",
"getresults",
"preplead",
"devlead",
"leadbyexp",
"comms"),
whichCor = 0,
spurIV = T,
epsilon = F,
n.sims = 100)
print(test6)
View(test6$avg.weights)
# 6. Competencies with handlestress #
userDat13 <- data.frame(posenviron,
getresults,
preplead,
devlead,
leadbyexp,
comms,
handlestress)
userDat12 <- data.frame(
sapply(
userDat13,
function(x) ifelse(is.na(x),
mean(x, na.rm = TRUE),
x)))
test7 <- DW.accuracy(userDat12,
iv.relia = c(.81,.81,.81,.81,.81,.81),
dv.relia = .77,
iv.names = c("posenviron",
"getresults",
"preplead",
"devlead",
"leadbyexp",
"comms"),
whichCor = 0,
spurIV = T,
epsilon = F,
n.sims = 100)
print(test7)
View(test7$avg.weights)
# 7. Competencies with handlestress #
userDat15 <- data.frame(posenviron,
getresults,
preplead,
devlead,
leadbyexp,
comms,
handlestress)
userDat14 <- data.frame(
sapply(
userDat15,
function(x) ifelse(is.na(x),
mean(x, na.rm = TRUE),
x)))
test8 <- DW.accuracy(userDat14,
iv.relia = c(.81,.81,.81,.81,.81,.81),
dv.relia = .77,
iv.names = c("posenviron",
"getresults",
"preplead",
"devlead",
"leadbyexp",
"comms"),
whichCor = 0,
spurIV = T,
epsilon = F,
n.sims = 100)
print(test8)
View(test8$avg.weights)
# 8. Competencies with cultadapt #
userDat17 <- data.frame(posenviron,
getresults,
preplead,
devlead,
leadbyexp,
comms,
cultadapt)
userDat16 <- data.frame(
sapply(
userDat17,
function(x) ifelse(is.na(x),
mean(x, na.rm = TRUE),
x)))
test9 <- DW.accuracy(userDat16,
iv.relia = c(.81,.81,.81,.81,.81,.81),
dv.relia = .77,
iv.names = c("posenviron",
"getresults",
"preplead",
"devlead",
"leadbyexp",
"comms"),
whichCor = 0,
spurIV = T,
epsilon = F,
n.sims = 100)
print(test9)
View(test9$avg.weights)
# Competencies with probsolve #
userDat19 <- data.frame(posenviron,
getresults,
preplead,
devlead,
leadbyexp,
comms,
probsolve)
userDat18 <- data.frame(
sapply(
userDat19,
function(x) ifelse(is.na(x),
mean(x, na.rm = TRUE),
x)))
test10 <- DW.accuracy(userDat18,
iv.relia = c(.81,.81,.81,.81,.81,.81),
dv.relia = .77,
iv.names = c("posenviron",
"getresults",
"preplead",
"devlead",
"leadbyexp",
"comms"),
whichCor = 0,
spurIV = T,
epsilon = F,
n.sims = 100)
print(test10)
View(test10$avg.weights)
|
# Case Study - Denco
#Manufacturing Firm with sales data of partnum and customer with region wise sales
# Should know - import, table, dplyr, aggregate etc
#read file : Method1
sales1 = read.csv("./data/denco.csv")
str(sales1) #see if data is loaded and check its structure
head(sales1)
#read file : Method2 : when location is not in project folder
sales2 = read.csv(file.choose())
str(sales2)
#read file: Method3
#install.packages('gsheet')
library(gsheet)
url = "https://docs.google.com/spreadsheets/d/1h7HU0X_Q4T5h5D1Q36qoK40Tplz94x_HZYHOJJC_edU/edit#gid=216113907"
sales3 = as.data.frame(gsheet2tbl(url))
str(sales3)
head(sales3)
# whichever you read the data, store it in sales object
sales = sales1 # keeping a backuph
head(sales) #top 6 rows
str(sales) # structure
class(sales) # class type - Df
dim(sales) # rows & columns
summary(sales) # summary- colname, type
names(sales) # col names
head(sales) # first 6 values
tail(sales) # last 6 values
# Some other functions
unique(sales$custname) # unique customer names
length(unique(sales$custname)) # no of unique customers
length(unique(sales$region )) # no of distinct regions
#Information Required----
# 1. Who are the most loyal Customers - What to do : Improve repeated sales, Target customers with low sales Volumes
# 2. Which customers contribute the most to their revenue : How do I retain these customers & target incentives
# 3a. What part numbers bring in to significant portion of revenue : Maximise revenue from high value parts
# 3b. What parts have the highest profit margin : What parts are driving profits & what parts need to build further
#dplyr
names(sales)
library(dplyr) #use this library to do summarisation
#Case-1 : Loyal Customers----
# Finding Frequency and sort them in descending order
names(sales)
(t1=table(sales$custname)) # freq table for all customers
class(t1); length(t1)
head(t1) # top 6 but not sorted
t2= sort(t1,decreasing=T )
head(t2)
#Ans: CHIZ BROS INC most loyal with 253 times purchase
#other methods through dplyr
library(dplyr)
sales %>% count(custname, sort=TRUE) %>% head(n=5)
#Ans: CHIZ BROS INC most loyal with 253 times purchase
sales %>% dplyr::group_by(custname) %>% dplyr::summarise(n = n()) %>% dplyr::arrange(desc(n)) %>% head(n=5)
#Ans: CHIZ BROS INC most loyal with 253 times purchase
#Case-2 : Customer- Highest Revenue---
sales %>% group_by(custname) %>% summarize(Revenue = sum(revenue)) %>% arrange(desc(Revenue)) %>% tail(n=5)
#Ans2: Triumph Insulation gave max revenue
# save this object and then analyse
salesrevenue = sales %>% group_by(custname) %>% summarize(Revenue = sum(revenue)) %>% arrange(desc(Revenue))
#dply uses tibble format and does not show all rows/cols by default
options(tibble.width = Inf) # displays all columns.
options(tibble.print_max = Inf) # to show all the rows.
salesrevenue[1:5,] # first 5 rows
#Ans2: here also Triump insulation gives max revenue = 35592K
#Case3 : Most Profitable PartsNums ----
# Look for freq, revenue and/ or profit margin
# Summarise by Part Num for frequency
sales %>% dplyr::group_by(partnum) %>% dplyr::summarise(n = n()) %>% dplyr::arrange(desc(n)) %>% head(n=5)
#Ans3a: Part Num- 764821000 was purchased max times - 122
# Summarise Partnum for Profit(margin) : sum(profit)
names(sales)
sales %>% group_by (partnum) %>% summarise(TotalMargin= sum(margin)) %>% arrange(desc(TotalMargin)) %>% head()
#Ans 3b: Part Num - 733648000 gave max margin profit - 11003367
# Summarise Partnum for revenue
#(practise it )
#Case : Extra ----
#Find Regions wise sales
(salesregionrevenue = sales %>% group_by(region) %>% summarise(Revenue = sum(revenue)) %>% arrange(desc(Revenue)))
#Some Graphs related to Data Summarised
pie(x = salesregionrevenue$Revenue, labels=unique(sales$region))
barplot(salesregionrevenue$Revenue, col=1:4)
# Filter Data----
#Rows where revenue > 700000 : show only custname, revenue
sales %>% filter(revenue >= 700000 ) %>% select(custname, revenue)
#select 1% rows on random basis
sales %>% sample_frac(.01) %>% select(custname, revenue)
#select 10 rows on random basis and sort them regionwise
sales %>% sample_n(10) %>% select(custname, region) %>% arrange(region)
sample_n(sales, 10) # similar method
#End of session for Denco Case - Manufacturing Firm
#Descriptive Analysis
#Loyal Customers, Customer giving max revenue
#Profitable Partnums - Freq, Revenue and Margin
|
/Denco Case-1.R
|
no_license
|
Vijay1204/FirstProject
|
R
| false
| false
| 4,406
|
r
|
# Case Study - Denco
#Manufacturing Firm with sales data of partnum and customer with region wise sales
# Should know - import, table, dplyr, aggregate etc
#read file : Method1
sales1 = read.csv("./data/denco.csv")
str(sales1) #see if data is loaded and check its structure
head(sales1)
#read file : Method2 : when location is not in project folder
sales2 = read.csv(file.choose())
str(sales2)
#read file: Method3
#install.packages('gsheet')
library(gsheet)
url = "https://docs.google.com/spreadsheets/d/1h7HU0X_Q4T5h5D1Q36qoK40Tplz94x_HZYHOJJC_edU/edit#gid=216113907"
sales3 = as.data.frame(gsheet2tbl(url))
str(sales3)
head(sales3)
# whichever you read the data, store it in sales object
sales = sales1 # keeping a backuph
head(sales) #top 6 rows
str(sales) # structure
class(sales) # class type - Df
dim(sales) # rows & columns
summary(sales) # summary- colname, type
names(sales) # col names
head(sales) # first 6 values
tail(sales) # last 6 values
# Some other functions
unique(sales$custname) # unique customer names
length(unique(sales$custname)) # no of unique customers
length(unique(sales$region )) # no of distinct regions
#Information Required----
# 1. Who are the most loyal Customers - What to do : Improve repeated sales, Target customers with low sales Volumes
# 2. Which customers contribute the most to their revenue : How do I retain these customers & target incentives
# 3a. What part numbers bring in to significant portion of revenue : Maximise revenue from high value parts
# 3b. What parts have the highest profit margin : What parts are driving profits & what parts need to build further
#dplyr
names(sales)
library(dplyr) #use this library to do summarisation
#Case-1 : Loyal Customers----
# Finding Frequency and sort them in descending order
names(sales)
(t1=table(sales$custname)) # freq table for all customers
class(t1); length(t1)
head(t1) # top 6 but not sorted
t2= sort(t1,decreasing=T )
head(t2)
#Ans: CHIZ BROS INC most loyal with 253 times purchase
#other methods through dplyr
library(dplyr)
sales %>% count(custname, sort=TRUE) %>% head(n=5)
#Ans: CHIZ BROS INC most loyal with 253 times purchase
sales %>% dplyr::group_by(custname) %>% dplyr::summarise(n = n()) %>% dplyr::arrange(desc(n)) %>% head(n=5)
#Ans: CHIZ BROS INC most loyal with 253 times purchase
#Case-2 : Customer- Highest Revenue---
sales %>% group_by(custname) %>% summarize(Revenue = sum(revenue)) %>% arrange(desc(Revenue)) %>% tail(n=5)
#Ans2: Triumph Insulation gave max revenue
# save this object and then analyse
salesrevenue = sales %>% group_by(custname) %>% summarize(Revenue = sum(revenue)) %>% arrange(desc(Revenue))
#dply uses tibble format and does not show all rows/cols by default
options(tibble.width = Inf) # displays all columns.
options(tibble.print_max = Inf) # to show all the rows.
salesrevenue[1:5,] # first 5 rows
#Ans2: here also Triump insulation gives max revenue = 35592K
#Case3 : Most Profitable PartsNums ----
# Look for freq, revenue and/ or profit margin
# Summarise by Part Num for frequency
sales %>% dplyr::group_by(partnum) %>% dplyr::summarise(n = n()) %>% dplyr::arrange(desc(n)) %>% head(n=5)
#Ans3a: Part Num- 764821000 was purchased max times - 122
# Summarise Partnum for Profit(margin) : sum(profit)
names(sales)
sales %>% group_by (partnum) %>% summarise(TotalMargin= sum(margin)) %>% arrange(desc(TotalMargin)) %>% head()
#Ans 3b: Part Num - 733648000 gave max margin profit - 11003367
# Summarise Partnum for revenue
#(practise it )
#Case : Extra ----
#Find Regions wise sales
(salesregionrevenue = sales %>% group_by(region) %>% summarise(Revenue = sum(revenue)) %>% arrange(desc(Revenue)))
#Some Graphs related to Data Summarised
pie(x = salesregionrevenue$Revenue, labels=unique(sales$region))
barplot(salesregionrevenue$Revenue, col=1:4)
# Filter Data----
#Rows where revenue > 700000 : show only custname, revenue
sales %>% filter(revenue >= 700000 ) %>% select(custname, revenue)
#select 1% rows on random basis
sales %>% sample_frac(.01) %>% select(custname, revenue)
#select 10 rows on random basis and sort them regionwise
sales %>% sample_n(10) %>% select(custname, region) %>% arrange(region)
sample_n(sales, 10) # similar method
#End of session for Denco Case - Manufacturing Firm
#Descriptive Analysis
#Loyal Customers, Customer giving max revenue
#Profitable Partnums - Freq, Revenue and Margin
|
library(visualize)
zvalue=(152-150)/(2/sqrt(100))
zvalue
qnorm(0.95)
qnorm(0.05)
visualize.norm(stat=zvalue, mu=0, sd=1,section="upper")
library(BSDA)
z.test(x=D$Machine.1, alternative="greater", sigma.x=2,mu=150)
|
/Lab7_Practice/7b_prac.r
|
no_license
|
nullvoiddeath/7th-R-008
|
R
| false
| false
| 215
|
r
|
library(visualize)
zvalue=(152-150)/(2/sqrt(100))
zvalue
qnorm(0.95)
qnorm(0.05)
visualize.norm(stat=zvalue, mu=0, sd=1,section="upper")
library(BSDA)
z.test(x=D$Machine.1, alternative="greater", sigma.x=2,mu=150)
|
## Tests for write2
context("Testing the write2 output")
data(mockstudy)
expect_write2_worked <- function(FUN, object, reference, ...)
{
FUN <- match.fun(FUN)
filename <- tempfile()
on.exit(expect_true(file.remove(paste0(filename, ".Rmd"))))
if(!file.exists(reference)) skip("Couldn't find the reference file.")
if(!file.create(paste0(filename, ".Rmd"))) skip("Couldn't create the temporary file.")
expect_error(FUN(object, file = filename, ..., render. = TRUE, keep.rmd = TRUE, append. = FALSE, quiet = TRUE), NA)
on.exit(expect_true(file.remove(filename)), add = TRUE)
generated <- readLines(paste0(filename, ".Rmd"))
expect_output_file(cat(generated, sep = "\n"), reference)
}
###########################################################################################################
#### Internal output
###########################################################################################################
test_that("write2.tableby -> HTML", {
expect_write2_worked(write2html, tableby(arm ~ sex + age, data=mockstudy, numeric.stats = c("meansd", "q1q3", "range")),
reference = "write2.tableby.html.Rmd",
title = "My test table", labelTranslations = list(sex = "SEX", age ="Age, yrs"), total = FALSE)
})
test_that("write2.modelsum -> HTML", {
expect_write2_worked(write2html, modelsum(alk.phos ~ arm + ps + hgb, adjust= ~ age + sex, family = "gaussian", data = mockstudy),
reference = "write2.modelsum.html.Rmd",
title = "My test table", show.intercept = FALSE, digits = 5)
})
old.labs <- c(cumFreq = "cumFreq", freqPercent = "freqPercent", cumPercent = "cumPercent")
test_that("write2.freqlist -> HTML", {
expect_write2_worked(write2html, freqlist(table(mockstudy[c("arm", "sex", "mdquality.s")], useNA = "ifany"), strata = c("arm", "sex")),
reference = "write2.freqlist.html.Rmd", single = TRUE, labelTranslations = old.labs)
})
test_that("write2.freqlist -> doc", {
expect_write2_worked(write2word, freqlist(table(mockstudy[c("arm", "sex", "mdquality.s")], useNA = "ifany"), strata = c("arm", "sex")),
reference = "write2.freqlist.doc.Rmd", single = TRUE, title = "My cool title", labelTranslations = old.labs)
})
## From the vignette
test_that("write2.list (summary objects) -> PDF", {
mylist6 <- list(
summary(tableby(sex ~ age, data = mockstudy), title = "A Title for tableby"),
summary(modelsum(age ~ sex, data = mockstudy), title = "A Title for modelsum"),
summary(freqlist(~ sex, data = mockstudy, labelTranslations = old.labs), title = "A Title for freqlist")
)
expect_write2_worked(write2pdf, mylist6, reference = "write2.multititles.pdf.Rmd")
})
###########################################################################################################
#### External output, commented out on 11/9/17 because of external package changes
###########################################################################################################
#
# test_that("write2.knitr_kable -> HTML", {
# if(require(knitr))
# {
# expect_write2_worked(write2html, knitr::kable(head(mockstudy)), reference = "write2.kable.html.Rmd")
# } else skip("library(knitr) not available.")
# })
#
# test_that("write2.xtable -> HTML", {
# if(require(xtable))
# {
# expect_write2_worked(write2html, xtable::xtable(head(mockstudy), caption = "My xtable"), reference = "write2.xtable.html.Rmd",
# type = "html", comment = FALSE, include.rownames = FALSE, caption.placement = 'top')
# } else skip("library(xtable) not available.")
# })
#
# test_that("write2.character (pander) -> HTML", {
# if(require(pander))
# {
# expect_write2_worked(write2html, pander::pander_return(head(mockstudy)), reference = "write2.pander.html.Rmd")
# } else skip("library(pander) not available.")
# })
#
###########################################################################################################
#### List output
###########################################################################################################
mylist <- list(tableby(sex ~ age, data = mockstudy, numeric.stats = c("meansd", "q1q3", "range")),
freqlist(table(mockstudy[, c("sex", "arm")]), labelTranslations = old.labs),
knitr::kable(utils::head(mockstudy)))
mylist2 <- list("# Header 1",
"This is a small paragraph.",
tableby(sex ~ age, data = mockstudy, numeric.stats = c("meansd", "q1q3", "range")))
test_that("write2.list -> PDF", {
expect_write2_worked(write2pdf, mylist, reference = "write2.mylist.pdf.Rmd")
})
test_that("write2.list -> Word", {
expect_write2_worked(write2word, mylist2, reference = "write2.mylist2.doc.Rmd")
})
test_that("write2.list recursion -> PDF", {
expect_write2_worked(write2word, list(mylist2, mylist),
reference = "write2.mylists.pdf.Rmd")
})
###########################################################################################################
#### verbatim output
###########################################################################################################
my.lm <- summary(lm(age ~ sex, data = mockstudy))
test_that("write2.default -> PDF", {
expect_write2_worked(write2pdf, my.lm,
reference = "write2.lm.pdf.Rmd")
})
test_that("write2.verbatim -> html", {
expect_write2_worked(write2pdf, verbatim(paste0("Hi.", 1:5)),
reference = "write2.char.html.Rmd")
})
test_that("Writing HTML from PDF works (#162)", {
expect_write2_worked(write2pdf, list(
"hi there",
code.chunk(
arsenal::write2html("hi there", "hi_there.html", clean = TRUE)
)
), reference = "write2.render.html.Rmd")
})
###########################################################################################################
#### YAML output
###########################################################################################################
mylist3 <- list(
"# Header 1",
"This is a small paragraph.",
tableby(sex ~ age, data = mockstudy, numeric.stats = c("meansd", "q1q3", "range")),
yaml(title = "My title"),
my.lm,
yaml(author = "Ethan P Heinzen"),
yaml("header-includes" = list("\\usepackage[labelformat=empty]{caption}")),
code.chunk(a <- 1, "b <- 2", a + b, "a - b", chunk.opts = "r echo = FALSE, eval = TRUE")
)
mylist4 <- list(
yaml(title = "My title", author = "Ethan P Heinzen", "header-includes" = list("\\usepackage[labelformat=empty]{caption}")),
"# Header 1",
"This is a small paragraph.",
tableby(sex ~ age, data = mockstudy, numeric.stats = c("meansd", "q1q3", "range")),
my.lm,
code.chunk(a <- 1, "b <- 2", a + b, "a - b", chunk.opts = "r echo = FALSE, eval = TRUE")
)
test_that("write2.yaml -> PDF", {
expect_write2_worked(write2pdf, mylist3, reference = "write2.yaml.pdf.Rmd")
expect_write2_worked(write2pdf, mylist4, reference = "write2.yaml.pdf.Rmd")
})
###########################################################################################################
#### Code used to generate the files
###########################################################################################################
#
# write2html(tableby(arm ~ sex + age, data=mockstudy, numeric.stats = c("meansd", "q1q3", "range")), "tests/testthat/write2.tableby.html",
# title = "My test table", labelTranslations = list(sex = "SEX", age ="Age, yrs"), total = FALSE, render. = FALSE)
#
# write2html(modelsum(alk.phos ~ arm + ps + hgb, adjust= ~ age + sex, family = "gaussian", data = mockstudy),
# "tests/testthat/write2.modelsum.html",
# title = "My test table", show.intercept = FALSE, digits = 5, render. = FALSE)
#
# write2html(freqlist(table(mockstudy[, c("arm", "sex", "mdquality.s")], useNA = "ifany"), groupBy = c("arm", "sex")),
# "tests/testthat/write2.freqlist.html", single = TRUE, render. = FALSE)
#
# write2word(freqlist(table(mockstudy[, c("arm", "sex", "mdquality.s")], useNA = "ifany"), groupBy = c("arm", "sex")),
# "tests/testthat/write2.freqlist.doc", single = TRUE, title = "My cool title", render. = FALSE)
#
# write2pdf(mylist6, "tests/testthat/write2.multititles.pdf", render. = FALSE)
#
## write2html(knitr::kable(head(mockstudy)),
## "tests/testthat/write2.kable.html", render. = FALSE)
##
## write2html(xtable::xtable(head(mockstudy), caption = "My xtable"),
## "tests/testthat/write2.xtable.html",
## type = "html", comment = FALSE, include.rownames = FALSE, caption.placement = "top", render. = FALSE)
##
## write2html(pander::pander_return(head(mockstudy)),
## "tests/testthat/write2.pander.html", render. = FALSE)
#
#
# write2pdf(mylist, "tests/testthat/write2.mylist.pdf", render. = FALSE)
# write2word(mylist2, "tests/testthat/write2.mylist2.doc", render. = FALSE)
# write2pdf(list(mylist2, mylist), "tests/testthat/write2.mylists.pdf", render. = FALSE)
#
# write2pdf(my.lm, "tests/testthat/write2.lm.pdf", render. = FALSE)
# write2html(verbatim(paste0("Hi.", 1:5)),
# "tests/testthat/write2.char.html", render. = FALSE)
# write2pdf(mylist3, "tests/testthat/write2.yaml.pdf", render. = FALSE)
###########################################################################################################
#### Reported bugs for write2
###########################################################################################################
|
/tests/testthat/test_write2.R
|
no_license
|
ifendo/arsenal
|
R
| false
| false
| 9,542
|
r
|
## Tests for write2
context("Testing the write2 output")
data(mockstudy)
expect_write2_worked <- function(FUN, object, reference, ...)
{
FUN <- match.fun(FUN)
filename <- tempfile()
on.exit(expect_true(file.remove(paste0(filename, ".Rmd"))))
if(!file.exists(reference)) skip("Couldn't find the reference file.")
if(!file.create(paste0(filename, ".Rmd"))) skip("Couldn't create the temporary file.")
expect_error(FUN(object, file = filename, ..., render. = TRUE, keep.rmd = TRUE, append. = FALSE, quiet = TRUE), NA)
on.exit(expect_true(file.remove(filename)), add = TRUE)
generated <- readLines(paste0(filename, ".Rmd"))
expect_output_file(cat(generated, sep = "\n"), reference)
}
###########################################################################################################
#### Internal output
###########################################################################################################
test_that("write2.tableby -> HTML", {
expect_write2_worked(write2html, tableby(arm ~ sex + age, data=mockstudy, numeric.stats = c("meansd", "q1q3", "range")),
reference = "write2.tableby.html.Rmd",
title = "My test table", labelTranslations = list(sex = "SEX", age ="Age, yrs"), total = FALSE)
})
test_that("write2.modelsum -> HTML", {
expect_write2_worked(write2html, modelsum(alk.phos ~ arm + ps + hgb, adjust= ~ age + sex, family = "gaussian", data = mockstudy),
reference = "write2.modelsum.html.Rmd",
title = "My test table", show.intercept = FALSE, digits = 5)
})
old.labs <- c(cumFreq = "cumFreq", freqPercent = "freqPercent", cumPercent = "cumPercent")
test_that("write2.freqlist -> HTML", {
expect_write2_worked(write2html, freqlist(table(mockstudy[c("arm", "sex", "mdquality.s")], useNA = "ifany"), strata = c("arm", "sex")),
reference = "write2.freqlist.html.Rmd", single = TRUE, labelTranslations = old.labs)
})
test_that("write2.freqlist -> doc", {
expect_write2_worked(write2word, freqlist(table(mockstudy[c("arm", "sex", "mdquality.s")], useNA = "ifany"), strata = c("arm", "sex")),
reference = "write2.freqlist.doc.Rmd", single = TRUE, title = "My cool title", labelTranslations = old.labs)
})
## From the vignette
test_that("write2.list (summary objects) -> PDF", {
mylist6 <- list(
summary(tableby(sex ~ age, data = mockstudy), title = "A Title for tableby"),
summary(modelsum(age ~ sex, data = mockstudy), title = "A Title for modelsum"),
summary(freqlist(~ sex, data = mockstudy, labelTranslations = old.labs), title = "A Title for freqlist")
)
expect_write2_worked(write2pdf, mylist6, reference = "write2.multititles.pdf.Rmd")
})
###########################################################################################################
#### External output, commented out on 11/9/17 because of external package changes
###########################################################################################################
#
# test_that("write2.knitr_kable -> HTML", {
# if(require(knitr))
# {
# expect_write2_worked(write2html, knitr::kable(head(mockstudy)), reference = "write2.kable.html.Rmd")
# } else skip("library(knitr) not available.")
# })
#
# test_that("write2.xtable -> HTML", {
# if(require(xtable))
# {
# expect_write2_worked(write2html, xtable::xtable(head(mockstudy), caption = "My xtable"), reference = "write2.xtable.html.Rmd",
# type = "html", comment = FALSE, include.rownames = FALSE, caption.placement = 'top')
# } else skip("library(xtable) not available.")
# })
#
# test_that("write2.character (pander) -> HTML", {
# if(require(pander))
# {
# expect_write2_worked(write2html, pander::pander_return(head(mockstudy)), reference = "write2.pander.html.Rmd")
# } else skip("library(pander) not available.")
# })
#
###########################################################################################################
#### List output
###########################################################################################################
mylist <- list(tableby(sex ~ age, data = mockstudy, numeric.stats = c("meansd", "q1q3", "range")),
freqlist(table(mockstudy[, c("sex", "arm")]), labelTranslations = old.labs),
knitr::kable(utils::head(mockstudy)))
mylist2 <- list("# Header 1",
"This is a small paragraph.",
tableby(sex ~ age, data = mockstudy, numeric.stats = c("meansd", "q1q3", "range")))
test_that("write2.list -> PDF", {
expect_write2_worked(write2pdf, mylist, reference = "write2.mylist.pdf.Rmd")
})
test_that("write2.list -> Word", {
expect_write2_worked(write2word, mylist2, reference = "write2.mylist2.doc.Rmd")
})
test_that("write2.list recursion -> PDF", {
expect_write2_worked(write2word, list(mylist2, mylist),
reference = "write2.mylists.pdf.Rmd")
})
###########################################################################################################
#### verbatim output
###########################################################################################################
my.lm <- summary(lm(age ~ sex, data = mockstudy))
test_that("write2.default -> PDF", {
expect_write2_worked(write2pdf, my.lm,
reference = "write2.lm.pdf.Rmd")
})
test_that("write2.verbatim -> html", {
expect_write2_worked(write2pdf, verbatim(paste0("Hi.", 1:5)),
reference = "write2.char.html.Rmd")
})
test_that("Writing HTML from PDF works (#162)", {
expect_write2_worked(write2pdf, list(
"hi there",
code.chunk(
arsenal::write2html("hi there", "hi_there.html", clean = TRUE)
)
), reference = "write2.render.html.Rmd")
})
###########################################################################################################
#### YAML output
###########################################################################################################
mylist3 <- list(
"# Header 1",
"This is a small paragraph.",
tableby(sex ~ age, data = mockstudy, numeric.stats = c("meansd", "q1q3", "range")),
yaml(title = "My title"),
my.lm,
yaml(author = "Ethan P Heinzen"),
yaml("header-includes" = list("\\usepackage[labelformat=empty]{caption}")),
code.chunk(a <- 1, "b <- 2", a + b, "a - b", chunk.opts = "r echo = FALSE, eval = TRUE")
)
mylist4 <- list(
yaml(title = "My title", author = "Ethan P Heinzen", "header-includes" = list("\\usepackage[labelformat=empty]{caption}")),
"# Header 1",
"This is a small paragraph.",
tableby(sex ~ age, data = mockstudy, numeric.stats = c("meansd", "q1q3", "range")),
my.lm,
code.chunk(a <- 1, "b <- 2", a + b, "a - b", chunk.opts = "r echo = FALSE, eval = TRUE")
)
test_that("write2.yaml -> PDF", {
expect_write2_worked(write2pdf, mylist3, reference = "write2.yaml.pdf.Rmd")
expect_write2_worked(write2pdf, mylist4, reference = "write2.yaml.pdf.Rmd")
})
###########################################################################################################
#### Code used to generate the files
###########################################################################################################
#
# write2html(tableby(arm ~ sex + age, data=mockstudy, numeric.stats = c("meansd", "q1q3", "range")), "tests/testthat/write2.tableby.html",
# title = "My test table", labelTranslations = list(sex = "SEX", age ="Age, yrs"), total = FALSE, render. = FALSE)
#
# write2html(modelsum(alk.phos ~ arm + ps + hgb, adjust= ~ age + sex, family = "gaussian", data = mockstudy),
# "tests/testthat/write2.modelsum.html",
# title = "My test table", show.intercept = FALSE, digits = 5, render. = FALSE)
#
# write2html(freqlist(table(mockstudy[, c("arm", "sex", "mdquality.s")], useNA = "ifany"), groupBy = c("arm", "sex")),
# "tests/testthat/write2.freqlist.html", single = TRUE, render. = FALSE)
#
# write2word(freqlist(table(mockstudy[, c("arm", "sex", "mdquality.s")], useNA = "ifany"), groupBy = c("arm", "sex")),
# "tests/testthat/write2.freqlist.doc", single = TRUE, title = "My cool title", render. = FALSE)
#
# write2pdf(mylist6, "tests/testthat/write2.multititles.pdf", render. = FALSE)
#
## write2html(knitr::kable(head(mockstudy)),
## "tests/testthat/write2.kable.html", render. = FALSE)
##
## write2html(xtable::xtable(head(mockstudy), caption = "My xtable"),
## "tests/testthat/write2.xtable.html",
## type = "html", comment = FALSE, include.rownames = FALSE, caption.placement = "top", render. = FALSE)
##
## write2html(pander::pander_return(head(mockstudy)),
## "tests/testthat/write2.pander.html", render. = FALSE)
#
#
# write2pdf(mylist, "tests/testthat/write2.mylist.pdf", render. = FALSE)
# write2word(mylist2, "tests/testthat/write2.mylist2.doc", render. = FALSE)
# write2pdf(list(mylist2, mylist), "tests/testthat/write2.mylists.pdf", render. = FALSE)
#
# write2pdf(my.lm, "tests/testthat/write2.lm.pdf", render. = FALSE)
# write2html(verbatim(paste0("Hi.", 1:5)),
# "tests/testthat/write2.char.html", render. = FALSE)
# write2pdf(mylist3, "tests/testthat/write2.yaml.pdf", render. = FALSE)
###########################################################################################################
#### Reported bugs for write2
###########################################################################################################
|
## Functions in this file allows you to cache previously computed matrix inverse and
## return without extra calculation. First call makeCacheMatrix to get an obj for
## caching and use the functions in the returned obj to get/set matrix and get its inverse
## given a matrix, return the functions for setting/getting value and getting/setting inverse of the matrix;
makeCacheMatrix <- function(x = matrix()) {
mat <- NULL;
set <- function(y) {
x <<- y;
mat <<- NULL;
}
get <- function() x;
set_inverse <- function(inv_mat) mat <<- inv_mat;
get_inverse <- function() mat;
list(
set = set,
get = get,
get_inverse = get_inverse,
set_inverse = set_inverse
);
}
## Give a cacheMatrix returned by makeCacheMatrix,
## this functions first try to get the cached inv result,
## if there's not, calculate the inverse and cache & return it
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
cached_inv <- x$get_inverse();
if (!is.null(cached_inv)) {
message('getting cached data');
return(cached_inv);
}
mat <- x$get();
inv_mat <- solve(mat,...);
x$set_inverse(inv_mat);
return(inv_mat);
}
|
/cachematrix.R
|
no_license
|
wzy0421/ProgrammingAssignment2
|
R
| false
| false
| 1,187
|
r
|
## Functions in this file allows you to cache previously computed matrix inverse and
## return without extra calculation. First call makeCacheMatrix to get an obj for
## caching and use the functions in the returned obj to get/set matrix and get its inverse
## given a matrix, return the functions for setting/getting value and getting/setting inverse of the matrix;
makeCacheMatrix <- function(x = matrix()) {
mat <- NULL;
set <- function(y) {
x <<- y;
mat <<- NULL;
}
get <- function() x;
set_inverse <- function(inv_mat) mat <<- inv_mat;
get_inverse <- function() mat;
list(
set = set,
get = get,
get_inverse = get_inverse,
set_inverse = set_inverse
);
}
## Give a cacheMatrix returned by makeCacheMatrix,
## this functions first try to get the cached inv result,
## if there's not, calculate the inverse and cache & return it
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
cached_inv <- x$get_inverse();
if (!is.null(cached_inv)) {
message('getting cached data');
return(cached_inv);
}
mat <- x$get();
inv_mat <- solve(mat,...);
x$set_inverse(inv_mat);
return(inv_mat);
}
|
#:# libraries
library(digest)
library(mlr)
library(OpenML)
library(farff)
#:# config
set.seed(1)
#:# data
dataset <- getOMLDataSet(data.name = "cpu_small")
head(dataset$data)
#:# preprocessing
head(dataset$data)
#:# model
task = makeClassifTask(id = "task", data = dataset$data, target = "binaryClass")
lrn = makeLearner("classif.rda", par.vals = list(), predict.type = "prob")
#:# hash
#:# 09e6998493569014538d9f10170109c8
hash <- digest(list(task, lrn))
hash
#:# audit
cv <- makeResampleDesc("CV", iters = 5)
r <- mlr::resample(lrn, task, cv, measures = list(acc, auc, tnr, tpr, ppv, f1))
ACC <- r$aggr
ACC
#:# session info
sink(paste0("sessionInfo.txt"))
sessionInfo()
sink()
|
/models/openml_cpu_small/classification_binaryClass/09e6998493569014538d9f10170109c8/code.R
|
no_license
|
pysiakk/CaseStudies2019S
|
R
| false
| false
| 686
|
r
|
#:# libraries
library(digest)
library(mlr)
library(OpenML)
library(farff)
#:# config
set.seed(1)
#:# data
dataset <- getOMLDataSet(data.name = "cpu_small")
head(dataset$data)
#:# preprocessing
head(dataset$data)
#:# model
task = makeClassifTask(id = "task", data = dataset$data, target = "binaryClass")
lrn = makeLearner("classif.rda", par.vals = list(), predict.type = "prob")
#:# hash
#:# 09e6998493569014538d9f10170109c8
hash <- digest(list(task, lrn))
hash
#:# audit
cv <- makeResampleDesc("CV", iters = 5)
r <- mlr::resample(lrn, task, cv, measures = list(acc, auc, tnr, tpr, ppv, f1))
ACC <- r$aggr
ACC
#:# session info
sink(paste0("sessionInfo.txt"))
sessionInfo()
sink()
|
#
# Copyright 2007-2020 by the individuals mentioned in the source code history
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
setClass(Class = "MxFitFunctionML",
contains = "MxBaseFitFunction",
representation = representation(
fellner = "logical",
verbose = "integer",
profileOut="MxOptionalChar",
rowwiseParallel="logical",
jointConditionOn="character",
components="MxCharOrNumber"),
)
setMethod("initialize", "MxFitFunctionML",
function(.Object, ...) {
.Object <- callNextMethod()
.Object@vector <- ..1
.Object@rowDiagnostics <- ..2
.Object@fellner <- ..3
.Object@verbose <- ..4
.Object@profileOut <- ..5
.Object@rowwiseParallel <- ..6
.Object@jointConditionOn <- ..7
.Object
})
setMethod("genericFitConvertEntities", "MxFitFunctionML",
function(.Object, flatModel, namespace, labelsData) {
name <- .Object@name
modelname <- imxReverseIdentifier(flatModel, .Object@name)[[1]]
expectName <- paste(modelname, "expectation", sep=".")
expectation <- flatModel@expectations[[expectName]]
dataname <- expectation@data
if (flatModel@datasets[[dataname]]@type != 'raw') {
if (.Object@vector) {
modelname <- getModelName(.Object)
msg <- paste("The ML fit function",
"in model", omxQuotes(modelname), "has specified",
"'vector' = TRUE, but the observed data is not raw data")
stop(msg, call.=FALSE)
}
if (.Object@rowDiagnostics) {
modelname <- getModelName(.Object)
msg <- paste("The ML fit function",
"in model", omxQuotes(modelname), "has specified",
"'rowDiagnostics' = TRUE, but the observed data is not raw data")
stop(msg, call.=FALSE)
}
}
return(flatModel)
})
setMethod("genericFitFunConvert", "MxFitFunctionML",
function(.Object, flatModel, model, labelsData, dependencies) {
.Object <- callNextMethod()
name <- .Object@name
modelname <- imxReverseIdentifier(model, .Object@name)[[1]]
expectName <- paste(modelname, "expectation", sep=".")
if (expectName %in% names(flatModel@expectations)) {
expectIndex <- imxLocateIndex(flatModel, expectName, name)
ex <- flatModel@expectations[[1L + expectIndex]]
if (is(ex, "MxExpectationHiddenMarkov") || is(ex, "MxExpectationMixture")) {
.Object@components <-
sapply(paste(ex@components, "fitfunction", sep="."),
function(ff) imxLocateIndex(flatModel, ff, name),
USE.NAMES = FALSE)
}
} else {
expectIndex <- as.integer(NA)
}
.Object@expectation <- expectIndex
return(.Object)
})
setMethod("genericFitInitialMatrix", "MxFitFunctionML",
function(.Object, flatModel) {
flatFitFunction <- flatModel@fitfunctions[[.Object@name]]
if (flatFitFunction@vector == FALSE) {
return(matrix(as.double(NA), 1, 1))
} else {
modelname <- imxReverseIdentifier(flatModel, flatFitFunction@name)[[1]]
expectationName <- paste(modelname, "expectation", sep = ".")
expectation <- flatModel@expectations[[expectationName]]
if (is.null(expectation)) {
msg <- paste("The ML fit function has vector = TRUE",
"and a missing expectation in the model",
omxQuotes(modelname))
stop(msg, call.=FALSE)
}
if (is.na(expectation@data)) {
msg <- paste("The ML fit function has vector = TRUE",
"and an expectation function with no data in the model",
omxQuotes(modelname))
stop(msg, call.=FALSE)
}
mxDataObject <- flatModel@datasets[[expectation@data]]
if (mxDataObject@type != 'raw') {
msg <- paste("The dataset associated with the ML expectation function",
"in model", omxQuotes(modelname), "is not raw data.")
stop(msg, call.=FALSE)
}
rows <- nrow(mxDataObject@observed)
return(matrix(as.double(NA), rows, 1))
}
})
setMethod("generateReferenceModels", "MxFitFunctionML",
function(.Object, model, distribution, equateThresholds) {
modelName <- model@name
datasource <- model$data
if (is.null(datasource)) {
stop(paste("Model", omxQuotes(modelName), "does not contain any data"))
}
expectation <- model@expectation
if (is(expectation, "MxExpectationBA81")) {
return(generateIFAReferenceModels(model, distribution))
}
if(is(expectation, "MxExpectationGREML")){
stop("Reference models for GREML expectation are not implemented")
}
# assume it's multivariate Normal
datatype <- datasource@type
obsdata <- datasource@observed
datanobs <- datasource@numObs
wasRun <- model@.wasRun
if(wasRun) {
if (is.null(model@expectation@.runDims)) stop("Not clear which data were used to fit model")
selVars <- model@expectation@.runDims
if(nrow(obsdata) == ncol(obsdata)){
if(!single.na(model@expectation@.runDims)) { obsdata <- obsdata[selVars, selVars] }
#variable subsets are not run for covariance data
#consequently, selVars are only used when runDims are provided.
} else { obsdata <- obsdata[,selVars, drop=FALSE] }
} else {
message(paste("The model", omxQuotes(modelName), "has not been run. So reference models",
"of all the variables in the data will be made. For reference models",
"of only the variables used in the model, provide the model after it has been run."))
}
generateNormalReferenceModels(modelName, obsdata, datatype, any(!is.na(datasource@means)),
datanobs, datasource@means, distribution=distribution,
equateThresholds)
})
mxFitFunctionML <- function(vector = FALSE, rowDiagnostics=FALSE, ..., fellner=as.logical(NA),
verbose=0L, profileOut=c(), rowwiseParallel=as.logical(NA),
jointConditionOn=c('auto', 'ordinal', 'continuous')) {
prohibitDotdotdot(list(...))
if (length(vector) > 1 || typeof(vector) != "logical") {
stop("'vector' argument is not a logical value")
}
if (length(rowDiagnostics) > 1 || typeof(rowDiagnostics) != "logical") {
stop("'rowDiagnostics' argument is not a logical value")
}
if (length(fellner) > 1) {
stop("'fellner' argument must be one thing")
}
if (!is.na(fellner) && fellner && (vector || rowDiagnostics)) {
stop("'fellner' cannot be combined with 'vector' or 'rowDiagnostics'")
}
jointConditionOn <- match.arg(jointConditionOn)
return(new("MxFitFunctionML", vector, rowDiagnostics, fellner,
as.integer(verbose), as.character(profileOut), rowwiseParallel,
jointConditionOn))
}
displayMxFitFunctionML <- function(fitfunction) {
cat("MxFitFunctionML", omxQuotes(fitfunction@name), '\n')
cat("$vector :", fitfunction@vector, '\n')
cat("$rowDiagnostics :", fitfunction@rowDiagnostics, '\n')
cat("$fellner :", fitfunction@fellner, '\n')
cat("$verbose :", fitfunction@verbose, '\n')
cat("$rowwiseParallel :", fitfunction@rowwiseParallel, '\n')
cat("$jointConditionOn :", fitfunction@jointConditionOn, '\n')
cat("$result :", head(fitfunction@result),
ifelse(length(fitfunction@result)>6, "...", ""), '\n')
invisible(fitfunction)
}
setMethod("print", "MxFitFunctionML", function(x, ...) {
displayMxFitFunctionML(x)
})
setMethod("show", "MxFitFunctionML", function(object) {
displayMxFitFunctionML(object)
})
|
/R/MxFitFunctionML.R
|
no_license
|
mirkoruks/OpenMx
|
R
| false
| false
| 7,568
|
r
|
#
# Copyright 2007-2020 by the individuals mentioned in the source code history
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
setClass(Class = "MxFitFunctionML",
contains = "MxBaseFitFunction",
representation = representation(
fellner = "logical",
verbose = "integer",
profileOut="MxOptionalChar",
rowwiseParallel="logical",
jointConditionOn="character",
components="MxCharOrNumber"),
)
setMethod("initialize", "MxFitFunctionML",
function(.Object, ...) {
.Object <- callNextMethod()
.Object@vector <- ..1
.Object@rowDiagnostics <- ..2
.Object@fellner <- ..3
.Object@verbose <- ..4
.Object@profileOut <- ..5
.Object@rowwiseParallel <- ..6
.Object@jointConditionOn <- ..7
.Object
})
setMethod("genericFitConvertEntities", "MxFitFunctionML",
function(.Object, flatModel, namespace, labelsData) {
name <- .Object@name
modelname <- imxReverseIdentifier(flatModel, .Object@name)[[1]]
expectName <- paste(modelname, "expectation", sep=".")
expectation <- flatModel@expectations[[expectName]]
dataname <- expectation@data
if (flatModel@datasets[[dataname]]@type != 'raw') {
if (.Object@vector) {
modelname <- getModelName(.Object)
msg <- paste("The ML fit function",
"in model", omxQuotes(modelname), "has specified",
"'vector' = TRUE, but the observed data is not raw data")
stop(msg, call.=FALSE)
}
if (.Object@rowDiagnostics) {
modelname <- getModelName(.Object)
msg <- paste("The ML fit function",
"in model", omxQuotes(modelname), "has specified",
"'rowDiagnostics' = TRUE, but the observed data is not raw data")
stop(msg, call.=FALSE)
}
}
return(flatModel)
})
setMethod("genericFitFunConvert", "MxFitFunctionML",
function(.Object, flatModel, model, labelsData, dependencies) {
.Object <- callNextMethod()
name <- .Object@name
modelname <- imxReverseIdentifier(model, .Object@name)[[1]]
expectName <- paste(modelname, "expectation", sep=".")
if (expectName %in% names(flatModel@expectations)) {
expectIndex <- imxLocateIndex(flatModel, expectName, name)
ex <- flatModel@expectations[[1L + expectIndex]]
if (is(ex, "MxExpectationHiddenMarkov") || is(ex, "MxExpectationMixture")) {
.Object@components <-
sapply(paste(ex@components, "fitfunction", sep="."),
function(ff) imxLocateIndex(flatModel, ff, name),
USE.NAMES = FALSE)
}
} else {
expectIndex <- as.integer(NA)
}
.Object@expectation <- expectIndex
return(.Object)
})
setMethod("genericFitInitialMatrix", "MxFitFunctionML",
function(.Object, flatModel) {
flatFitFunction <- flatModel@fitfunctions[[.Object@name]]
if (flatFitFunction@vector == FALSE) {
return(matrix(as.double(NA), 1, 1))
} else {
modelname <- imxReverseIdentifier(flatModel, flatFitFunction@name)[[1]]
expectationName <- paste(modelname, "expectation", sep = ".")
expectation <- flatModel@expectations[[expectationName]]
if (is.null(expectation)) {
msg <- paste("The ML fit function has vector = TRUE",
"and a missing expectation in the model",
omxQuotes(modelname))
stop(msg, call.=FALSE)
}
if (is.na(expectation@data)) {
msg <- paste("The ML fit function has vector = TRUE",
"and an expectation function with no data in the model",
omxQuotes(modelname))
stop(msg, call.=FALSE)
}
mxDataObject <- flatModel@datasets[[expectation@data]]
if (mxDataObject@type != 'raw') {
msg <- paste("The dataset associated with the ML expectation function",
"in model", omxQuotes(modelname), "is not raw data.")
stop(msg, call.=FALSE)
}
rows <- nrow(mxDataObject@observed)
return(matrix(as.double(NA), rows, 1))
}
})
setMethod("generateReferenceModels", "MxFitFunctionML",
function(.Object, model, distribution, equateThresholds) {
modelName <- model@name
datasource <- model$data
if (is.null(datasource)) {
stop(paste("Model", omxQuotes(modelName), "does not contain any data"))
}
expectation <- model@expectation
if (is(expectation, "MxExpectationBA81")) {
return(generateIFAReferenceModels(model, distribution))
}
if(is(expectation, "MxExpectationGREML")){
stop("Reference models for GREML expectation are not implemented")
}
# assume it's multivariate Normal
datatype <- datasource@type
obsdata <- datasource@observed
datanobs <- datasource@numObs
wasRun <- model@.wasRun
if(wasRun) {
if (is.null(model@expectation@.runDims)) stop("Not clear which data were used to fit model")
selVars <- model@expectation@.runDims
if(nrow(obsdata) == ncol(obsdata)){
if(!single.na(model@expectation@.runDims)) { obsdata <- obsdata[selVars, selVars] }
#variable subsets are not run for covariance data
#consequently, selVars are only used when runDims are provided.
} else { obsdata <- obsdata[,selVars, drop=FALSE] }
} else {
message(paste("The model", omxQuotes(modelName), "has not been run. So reference models",
"of all the variables in the data will be made. For reference models",
"of only the variables used in the model, provide the model after it has been run."))
}
generateNormalReferenceModels(modelName, obsdata, datatype, any(!is.na(datasource@means)),
datanobs, datasource@means, distribution=distribution,
equateThresholds)
})
mxFitFunctionML <- function(vector = FALSE, rowDiagnostics=FALSE, ..., fellner=as.logical(NA),
verbose=0L, profileOut=c(), rowwiseParallel=as.logical(NA),
jointConditionOn=c('auto', 'ordinal', 'continuous')) {
prohibitDotdotdot(list(...))
if (length(vector) > 1 || typeof(vector) != "logical") {
stop("'vector' argument is not a logical value")
}
if (length(rowDiagnostics) > 1 || typeof(rowDiagnostics) != "logical") {
stop("'rowDiagnostics' argument is not a logical value")
}
if (length(fellner) > 1) {
stop("'fellner' argument must be one thing")
}
if (!is.na(fellner) && fellner && (vector || rowDiagnostics)) {
stop("'fellner' cannot be combined with 'vector' or 'rowDiagnostics'")
}
jointConditionOn <- match.arg(jointConditionOn)
return(new("MxFitFunctionML", vector, rowDiagnostics, fellner,
as.integer(verbose), as.character(profileOut), rowwiseParallel,
jointConditionOn))
}
displayMxFitFunctionML <- function(fitfunction) {
cat("MxFitFunctionML", omxQuotes(fitfunction@name), '\n')
cat("$vector :", fitfunction@vector, '\n')
cat("$rowDiagnostics :", fitfunction@rowDiagnostics, '\n')
cat("$fellner :", fitfunction@fellner, '\n')
cat("$verbose :", fitfunction@verbose, '\n')
cat("$rowwiseParallel :", fitfunction@rowwiseParallel, '\n')
cat("$jointConditionOn :", fitfunction@jointConditionOn, '\n')
cat("$result :", head(fitfunction@result),
ifelse(length(fitfunction@result)>6, "...", ""), '\n')
invisible(fitfunction)
}
setMethod("print", "MxFitFunctionML", function(x, ...) {
displayMxFitFunctionML(x)
})
setMethod("show", "MxFitFunctionML", function(object) {
displayMxFitFunctionML(object)
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/query.site.R
\name{query.site}
\alias{query.site}
\title{Given site_id, return site table}
\usage{
query.site(site.id, con)
}
\arguments{
\item{site.id}{The id of the site}
\item{con}{: database connection}
}
\description{
Given site_id, return site table
}
\author{
Betsy Cowdery
}
|
/base/db/man/query.site.Rd
|
permissive
|
PecanProject/pecan
|
R
| false
| true
| 362
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/query.site.R
\name{query.site}
\alias{query.site}
\title{Given site_id, return site table}
\usage{
query.site(site.id, con)
}
\arguments{
\item{site.id}{The id of the site}
\item{con}{: database connection}
}
\description{
Given site_id, return site table
}
\author{
Betsy Cowdery
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/velox.R
\name{velox}
\alias{velox}
\title{Create a VeloxRaster object}
\usage{
velox(x, extent = NULL, res = NULL, crs = NULL)
}
\arguments{
\item{x}{A RasterLayer, RasterStack, matrix, list of matrices, list of VeloxRaster objects,
or character string pointing to a GDAL-readable file.}
\item{extent}{An \code{extent} object or a numeric vector of length 4. Required if \code{x} is a matrix or list
of matrices, ignored otherwise.}
\item{res}{The x and y resolution of the raster as a numeric vector of length 2. Required if \code{x} is a matrix or list
of matrices, ignored otherwise.}
\item{crs}{Optional. A character string describing a projection and datum in the PROJ.4 format.
Ignored if \code{x} is a Raster* object.}
}
\value{
A VeloxRaster object.
}
\description{
\code{velox} creates a VeloxRaster object.
}
\details{
Creates a VeloxRaster object. Note that VeloxRaster objects are Reference Class objects and thus mutable.
Hence, the usual R copy on modify semantics do not apply.
Note that if \code{x} is a list of VeloxRasters, the \code{extent} and \code{crs} attributes are copied
from the first list element.
}
\examples{
## Create VeloxRaster from list of matrices
mat1 <- matrix(1:100, 10, 10)
mat2 <- matrix(100:1, 10, 10)
mat.ls <- list(mat1, mat2)
vx <- velox(mat.ls, extent=c(0,1,0,1), res=c(0.1,0.1), crs="+proj=longlat +datum=WGS84 +no_defs")
}
|
/man/velox.Rd
|
no_license
|
markwestcott34/velox
|
R
| false
| true
| 1,454
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/velox.R
\name{velox}
\alias{velox}
\title{Create a VeloxRaster object}
\usage{
velox(x, extent = NULL, res = NULL, crs = NULL)
}
\arguments{
\item{x}{A RasterLayer, RasterStack, matrix, list of matrices, list of VeloxRaster objects,
or character string pointing to a GDAL-readable file.}
\item{extent}{An \code{extent} object or a numeric vector of length 4. Required if \code{x} is a matrix or list
of matrices, ignored otherwise.}
\item{res}{The x and y resolution of the raster as a numeric vector of length 2. Required if \code{x} is a matrix or list
of matrices, ignored otherwise.}
\item{crs}{Optional. A character string describing a projection and datum in the PROJ.4 format.
Ignored if \code{x} is a Raster* object.}
}
\value{
A VeloxRaster object.
}
\description{
\code{velox} creates a VeloxRaster object.
}
\details{
Creates a VeloxRaster object. Note that VeloxRaster objects are Reference Class objects and thus mutable.
Hence, the usual R copy on modify semantics do not apply.
Note that if \code{x} is a list of VeloxRasters, the \code{extent} and \code{crs} attributes are copied
from the first list element.
}
\examples{
## Create VeloxRaster from list of matrices
mat1 <- matrix(1:100, 10, 10)
mat2 <- matrix(100:1, 10, 10)
mat.ls <- list(mat1, mat2)
vx <- velox(mat.ls, extent=c(0,1,0,1), res=c(0.1,0.1), crs="+proj=longlat +datum=WGS84 +no_defs")
}
|
#Installing the packages
install.packages("randomForest")
install.packages("MASS")
library(randomForest)
library(MASS)
data(fgl)
#Checking the dataset
str(fgl)
#Settin the seeds
set.seed(17)
#Calling the random forest
fgl.rf <- randomForest(type ~ . , data = fgl, mtry = 2, importance = TRUE, do.trace = 100)
print(fgl.rf)
#Installing ipred library to use errorest function for comparison
library(ipred)
set.seed(131)
#10 repitition of 10 fold-cross validation through SVM
error.RF <- numeric(10)
for(i in 1:10) error.RF[i] <- errorest(type ~ . , data = fgl, model = randomForest, mtry = 2)$error
summary(error.RF)
#Installing e1071 to use SVM
library(e1071)
set.seed(563)
error.SVM <- numeric(10)
for(i in 1:10) error.SVM[i] <- errorest(type ~ . , data = fgl, model = svm, coat = 10, gamma = 1.5)$error
summary(error.SVM)
#Using importance variable to build simpler models
par(mfrow = c(2,2))
for (i in 1:4)
plot(sort(fgl.rf$importance[ ,i], dec = TRUE), type = "h", main = paste("Measure", i))
|
/Pred8_criminological investigation of glass.R
|
no_license
|
mayank100sharma/Criminological-investigation-of-glass
|
R
| false
| false
| 1,007
|
r
|
#Installing the packages
install.packages("randomForest")
install.packages("MASS")
library(randomForest)
library(MASS)
data(fgl)
#Checking the dataset
str(fgl)
#Settin the seeds
set.seed(17)
#Calling the random forest
fgl.rf <- randomForest(type ~ . , data = fgl, mtry = 2, importance = TRUE, do.trace = 100)
print(fgl.rf)
#Installing ipred library to use errorest function for comparison
library(ipred)
set.seed(131)
#10 repitition of 10 fold-cross validation through SVM
error.RF <- numeric(10)
for(i in 1:10) error.RF[i] <- errorest(type ~ . , data = fgl, model = randomForest, mtry = 2)$error
summary(error.RF)
#Installing e1071 to use SVM
library(e1071)
set.seed(563)
error.SVM <- numeric(10)
for(i in 1:10) error.SVM[i] <- errorest(type ~ . , data = fgl, model = svm, coat = 10, gamma = 1.5)$error
summary(error.SVM)
#Using importance variable to build simpler models
par(mfrow = c(2,2))
for (i in 1:4)
plot(sort(fgl.rf$importance[ ,i], dec = TRUE), type = "h", main = paste("Measure", i))
|
## Caching the Inverse of a matrix
## Matrix inversion is usually a costly computation and there may be some benefit to caching the inverse of a matrix rather than
## computing it repeatedly
## makeCacheMatrix creates a list containing a function to
## 1. set the value of the matrix
## 2. get the value of the matrix
## 3. set the value of inverse of the matrix
## 4. get the value of inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
invMatrix <- NULL
set <- function(y)
{
x<<- y
invMatrix <<- NULL
}
get <- function() x
setInverse <- function(inverse)
invMatrix <<- inverse
getInverse <- function() invMatrix
list(set = set, get = get, setInverse = setInverse, getInverse = getInverse)
}
## This function computes the inverse of the special "matrix" returned by makeCacheMatrix above. If the inverse has already been
## calculated (and the matrix has not changed), then the cachesolve should retrieve the inverse from the cache.
## This function assumes that the matrix is always invertible.
cacheSolve <- function(x, ...) {
invMatrix <- x$getInverse()
if(!is.null(invMatrix))
{
message("Getting the cached data")
return (invMatrix)
}
matx <- x$get()
invMatrix <- solve(matx,...)
x$setInverse(invMatrix)
invMatrix
}
|
/cachematrix.R
|
no_license
|
ParamitaBasu/ProgrammingAssignment2
|
R
| false
| false
| 1,586
|
r
|
## Caching the Inverse of a matrix
## Matrix inversion is usually a costly computation and there may be some benefit to caching the inverse of a matrix rather than
## computing it repeatedly
## makeCacheMatrix creates a list containing a function to
## 1. set the value of the matrix
## 2. get the value of the matrix
## 3. set the value of inverse of the matrix
## 4. get the value of inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
invMatrix <- NULL
set <- function(y)
{
x<<- y
invMatrix <<- NULL
}
get <- function() x
setInverse <- function(inverse)
invMatrix <<- inverse
getInverse <- function() invMatrix
list(set = set, get = get, setInverse = setInverse, getInverse = getInverse)
}
## This function computes the inverse of the special "matrix" returned by makeCacheMatrix above. If the inverse has already been
## calculated (and the matrix has not changed), then the cachesolve should retrieve the inverse from the cache.
## This function assumes that the matrix is always invertible.
cacheSolve <- function(x, ...) {
invMatrix <- x$getInverse()
if(!is.null(invMatrix))
{
message("Getting the cached data")
return (invMatrix)
}
matx <- x$get()
invMatrix <- solve(matx,...)
x$setInverse(invMatrix)
invMatrix
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Xing.R
\name{XingMethod}
\alias{XingMethod}
\title{This function computes Xing's global distance metric learning classification
algorithm.}
\usage{
XingMethod(Y, X, S = NULL, D = NULL,
learning_rate = 0.1, epsilon = 0.01,
error = 1e-10, max_iterations = 1000)
}
\arguments{
\item{Y}{vector of non-negative integer labels corresponding to each data point.}
\item{X}{Input numeric matrix where each row is a data point whose
label is the corresponding entry in \code{Y} and each column is a
variable.}
\item{S}{A \code{n * 2} similarity matrix describing the constraints of data points with
with the same class label.
Each row of the matrix is a pair of indices of two data points in \code{X}
which belong to the same class. For example, pair(1, 3)
says that the first data point is in the same class as the third
data point. Default value is \code{S = NULL} in which case
\code{S} is computed in full. Use this parameter to define a smaller
similarity matrix which is appropriate to your given problem e.g via sampling
methods. The indices in \code{S} should range between 1 and \code{nrow(X)}.}
\item{D}{A \code{n * 2} disimilarity matrix describing the constraints of data points with
with a different class label.
Each row of the matrix is a pair of indices of two data points in \code{X}
which belong to different classes. For example, pair(1, 3)
says that the first data point is in a different class than the third
data point. Default value is \code{D = NULL} in which case
\code{D} is computed in full. Use this parameter to define a smaller disimilarity
matrix which is appropriate to your given problem e.g via sampling
methods. The indices in \code{D} should range between 1 and \code{nrow(X)}.}
\item{learning_rate}{The learning rate to be used in the solver. Default value is
is \code{learning_rate = 0.1}.}
\item{epsilon}{Threshold for convergence of the gradient method. Default value is
\code{epsilon = 0.01}.}
\item{error}{Threshold to be used when projecting onto the constraint
set. Default value is \code{error = 1e-10}.}
\item{max_iterations}{The maximum number of iterations to be processed
in the solver. Default value is \code{max_iterations = 1000}.}
}
\value{
This function returns a list with the following items:
\item{XingTransform}{The matrix under which the data was transformed. The
multiplication of this matrix with its transpose gives
the matrix used in the Mahalanobis
metric.}
\item{TransformedX}{The transformed original data \code{X} which was transformed
using the Xing Transform i.e \eqn{TransformedX = X *
XingTransform}}.
}
\description{
This function computes Xing's global distance metric learning
classification algorithm as described in [1]. See the Vignette
by using the command \code{browseVignette("DistanceLearning")}
for an introduction to using Xing's global
distance metric learning method.
}
\details{
See the Vignette by using the command
\code{browseVignette("DistanceLearning")}
for an introduction to using Xing's method.
}
\examples{
# Load data from package DistanceLearning
library(DistanceLearning)
library(class)
fname <- system.file("extdata", "example_data.csv", package="DistanceLearning")
df <- read.csv(fname)
Y <- as.integer(df$y)
X <- as.matrix(df[,c(2,3)])
sample_points <- sample(1:nrow(X), 180, replace = FALSE)
subX <- X[sample_points,]
subY <- Y[sample_points]
# Learn the metric, and get the transformed data
result <- XingMethod(subY, subX)
XingMetric <- result$XingTransform
transformedX <- result$transformedX
# Get the accuracy of KNN classification without applying the new metric
yhat <- knn(subX, X[-sample_points,], subY, k = 5)
Accuracy <- length(which(Y[-sample_points] == yhat))/length(Y[-sample_points])
Accuracy
# Get the accuracy of KNN classification after applying the new metric
transformednewX <- X[-sample_points,] \%*\% XingMetric
yhat2 <- knn(transformedX, transformednewX, subY, k = 5)
Accuracy2 <- length(which(Y[-sample_points] == yhat2))/length(Y[-sample_points])
Accuracy2
}
\references{
[1] Eric P. Xing, Michael I. Jordan, Stuart J Russell, and
Andrew Y. Ng. Distance Metric Learning with Application
to Clustering with Side-Information. In S. Becker,
S. Thrun, and K. Obermayer, editors, Advances in Neural
Information Processing Systems 15, pages 521-528.
MIT Press, 2003.
}
\author{
Carl Tony Fakhry, Ping Chen, Rahul Kulkarni and Kourosh Zarringhalam
}
|
/man/XingMethod.Rd
|
no_license
|
carltonyfakhry/DistanceLearning
|
R
| false
| true
| 4,719
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Xing.R
\name{XingMethod}
\alias{XingMethod}
\title{This function computes Xing's global distance metric learning classification
algorithm.}
\usage{
XingMethod(Y, X, S = NULL, D = NULL,
learning_rate = 0.1, epsilon = 0.01,
error = 1e-10, max_iterations = 1000)
}
\arguments{
\item{Y}{vector of non-negative integer labels corresponding to each data point.}
\item{X}{Input numeric matrix where each row is a data point whose
label is the corresponding entry in \code{Y} and each column is a
variable.}
\item{S}{A \code{n * 2} similarity matrix describing the constraints of data points with
with the same class label.
Each row of the matrix is a pair of indices of two data points in \code{X}
which belong to the same class. For example, pair(1, 3)
says that the first data point is in the same class as the third
data point. Default value is \code{S = NULL} in which case
\code{S} is computed in full. Use this parameter to define a smaller
similarity matrix which is appropriate to your given problem e.g via sampling
methods. The indices in \code{S} should range between 1 and \code{nrow(X)}.}
\item{D}{A \code{n * 2} disimilarity matrix describing the constraints of data points with
with a different class label.
Each row of the matrix is a pair of indices of two data points in \code{X}
which belong to different classes. For example, pair(1, 3)
says that the first data point is in a different class than the third
data point. Default value is \code{D = NULL} in which case
\code{D} is computed in full. Use this parameter to define a smaller disimilarity
matrix which is appropriate to your given problem e.g via sampling
methods. The indices in \code{D} should range between 1 and \code{nrow(X)}.}
\item{learning_rate}{The learning rate to be used in the solver. Default value is
is \code{learning_rate = 0.1}.}
\item{epsilon}{Threshold for convergence of the gradient method. Default value is
\code{epsilon = 0.01}.}
\item{error}{Threshold to be used when projecting onto the constraint
set. Default value is \code{error = 1e-10}.}
\item{max_iterations}{The maximum number of iterations to be processed
in the solver. Default value is \code{max_iterations = 1000}.}
}
\value{
This function returns a list with the following items:
\item{XingTransform}{The matrix under which the data was transformed. The
multiplication of this matrix with its transpose gives
the matrix used in the Mahalanobis
metric.}
\item{TransformedX}{The transformed original data \code{X} which was transformed
using the Xing Transform i.e \eqn{TransformedX = X *
XingTransform}}.
}
\description{
This function computes Xing's global distance metric learning
classification algorithm as described in [1]. See the Vignette
by using the command \code{browseVignette("DistanceLearning")}
for an introduction to using Xing's global
distance metric learning method.
}
\details{
See the Vignette by using the command
\code{browseVignette("DistanceLearning")}
for an introduction to using Xing's method.
}
\examples{
# Load data from package DistanceLearning
library(DistanceLearning)
library(class)
fname <- system.file("extdata", "example_data.csv", package="DistanceLearning")
df <- read.csv(fname)
Y <- as.integer(df$y)
X <- as.matrix(df[,c(2,3)])
sample_points <- sample(1:nrow(X), 180, replace = FALSE)
subX <- X[sample_points,]
subY <- Y[sample_points]
# Learn the metric, and get the transformed data
result <- XingMethod(subY, subX)
XingMetric <- result$XingTransform
transformedX <- result$transformedX
# Get the accuracy of KNN classification without applying the new metric
yhat <- knn(subX, X[-sample_points,], subY, k = 5)
Accuracy <- length(which(Y[-sample_points] == yhat))/length(Y[-sample_points])
Accuracy
# Get the accuracy of KNN classification after applying the new metric
transformednewX <- X[-sample_points,] \%*\% XingMetric
yhat2 <- knn(transformedX, transformednewX, subY, k = 5)
Accuracy2 <- length(which(Y[-sample_points] == yhat2))/length(Y[-sample_points])
Accuracy2
}
\references{
[1] Eric P. Xing, Michael I. Jordan, Stuart J Russell, and
Andrew Y. Ng. Distance Metric Learning with Application
to Clustering with Side-Information. In S. Becker,
S. Thrun, and K. Obermayer, editors, Advances in Neural
Information Processing Systems 15, pages 521-528.
MIT Press, 2003.
}
\author{
Carl Tony Fakhry, Ping Chen, Rahul Kulkarni and Kourosh Zarringhalam
}
|
require(xgboost)
require(methods)
# we load in the agaricus dataset
# In this example, we are aiming to predict whether a mushroom can be eated
data(agaricus.train, package='xgboost')
data(agaricus.test, package='xgboost')
train <- agaricus.train
test <- agaricus.test
# the loaded data is stored in sparseMatrix, and label is a numeric vector in {0,1}
class(train$label)
class(train$data)
#-------------Basic Training using XGBoost-----------------
# this is the basic usage of xgboost you can put matrix in data field
# note: we are puting in sparse matrix here, xgboost naturally handles sparse input
# use sparse matrix when your feature is sparse(e.g. when you using one-hot encoding vector)
print("training xgboost with sparseMatrix")
bst <- xgboost(data = train$data, label = train$label, max.depth = 2, eta = 1, nround = 2,
nthread = 2, objective = "binary:logistic")
# alternatively, you can put in dense matrix, i.e. basic R-matrix
print("training xgboost with Matrix")
bst <- xgboost(data = as.matrix(train$data), label = train$label, max.depth = 2, eta = 1, nround = 2,
nthread = 2, objective = "binary:logistic")
# you can also put in xgb.DMatrix object, stores label, data and other meta datas needed for advanced features
print("training xgboost with xgb.DMatrix")
dtrain <- xgb.DMatrix(data = train$data, label = train$label)
bst <- xgboost(data = dtrain, max.depth = 2, eta = 1, nround = 2, nthread = 2,
objective = "binary:logistic")
# Verbose = 0,1,2
print ('train xgboost with verbose 0, no message')
bst <- xgboost(data = dtrain, max.depth = 2, eta = 1, nround = 2,
nthread = 2, objective = "binary:logistic", verbose = 0)
print ('train xgboost with verbose 1, print evaluation metric')
bst <- xgboost(data = dtrain, max.depth = 2, eta = 1, nround = 2,
nthread = 2, objective = "binary:logistic", verbose = 1)
print ('train xgboost with verbose 2, also print information about tree')
bst <- xgboost(data = dtrain, max.depth = 2, eta = 1, nround = 2,
nthread = 2, objective = "binary:logistic", verbose = 2)
# you can also specify data as file path to a LibSVM format input
# since we do not have this file with us, the following line is just for illustration
# bst <- xgboost(data = 'agaricus.train.svm', max.depth = 2, eta = 1, nround = 2,objective = "binary:logistic")
#--------------------basic prediction using xgboost--------------
# you can do prediction using the following line
# you can put in Matrix, sparseMatrix, or xgb.DMatrix
pred <- predict(bst, test$data)
err <- mean(as.numeric(pred > 0.5) != test$label)
print(paste("test-error=", err))
#-------------------save and load models-------------------------
# save model to binary local file
xgb.save(bst, "xgboost.model")
# load binary model to R
bst2 <- xgb.load("xgboost.model")
pred2 <- predict(bst2, test$data)
# pred2 should be identical to pred
print(paste("sum(abs(pred2-pred))=", sum(abs(pred2-pred))))
# save model to R's raw vector
raw = xgb.save.raw(bst)
# load binary model to R
bst3 <- xgb.load(raw)
pred3 <- predict(bst3, test$data)
# pred2 should be identical to pred
print(paste("sum(abs(pred3-pred))=", sum(abs(pred2-pred))))
#----------------Advanced features --------------
# to use advanced features, we need to put data in xgb.DMatrix
dtrain <- xgb.DMatrix(data = train$data, label=train$label)
dtest <- xgb.DMatrix(data = test$data, label=test$label)
#---------------Using watchlist----------------
# watchlist is a list of xgb.DMatrix, each of them tagged with name
watchlist <- list(train=dtrain, test=dtest)
# to train with watchlist, use xgb.train, which contains more advanced features
# watchlist allows us to monitor the evaluation result on all data in the list
print ('train xgboost using xgb.train with watchlist')
bst <- xgb.train(data=dtrain, max.depth=2, eta=1, nround=2, watchlist=watchlist,
nthread = 2, objective = "binary:logistic")
# we can change evaluation metrics, or use multiple evaluation metrics
print ('train xgboost using xgb.train with watchlist, watch logloss and error')
bst <- xgb.train(data=dtrain, max.depth=2, eta=1, nround=2, watchlist=watchlist,
eval.metric = "error", eval.metric = "logloss",
nthread = 2, objective = "binary:logistic")
# xgb.DMatrix can also be saved using xgb.DMatrix.save
xgb.DMatrix.save(dtrain, "dtrain.buffer")
# to load it in, simply call xgb.DMatrix
dtrain2 <- xgb.DMatrix("dtrain.buffer")
bst <- xgb.train(data=dtrain2, max.depth=2, eta=1, nround=2, watchlist=watchlist,
nthread = 2, objective = "binary:logistic")
# information can be extracted from xgb.DMatrix using getinfo
label = getinfo(dtest, "label")
pred <- predict(bst, dtest)
err <- as.numeric(sum(as.integer(pred > 0.5) != label))/length(label)
print(paste("test-error=", err))
# You can dump the tree you learned using xgb.dump into a text file
xgb.dump(bst, "dump.raw.txt", with.stats = T)
# Finally, you can check which features are the most important.
print("Most important features (look at column Gain):")
print(xgb.importance(feature_names = train$data@Dimnames[[2]], filename_dump = "dump.raw.txt"))
|
/tools/xgboost-0.40/R-package/demo/basic_walkthrough.R
|
permissive
|
hezila/kdd2015
|
R
| false
| false
| 5,212
|
r
|
require(xgboost)
require(methods)
# we load in the agaricus dataset
# In this example, we are aiming to predict whether a mushroom can be eated
data(agaricus.train, package='xgboost')
data(agaricus.test, package='xgboost')
train <- agaricus.train
test <- agaricus.test
# the loaded data is stored in sparseMatrix, and label is a numeric vector in {0,1}
class(train$label)
class(train$data)
#-------------Basic Training using XGBoost-----------------
# this is the basic usage of xgboost you can put matrix in data field
# note: we are puting in sparse matrix here, xgboost naturally handles sparse input
# use sparse matrix when your feature is sparse(e.g. when you using one-hot encoding vector)
print("training xgboost with sparseMatrix")
bst <- xgboost(data = train$data, label = train$label, max.depth = 2, eta = 1, nround = 2,
nthread = 2, objective = "binary:logistic")
# alternatively, you can put in dense matrix, i.e. basic R-matrix
print("training xgboost with Matrix")
bst <- xgboost(data = as.matrix(train$data), label = train$label, max.depth = 2, eta = 1, nround = 2,
nthread = 2, objective = "binary:logistic")
# you can also put in xgb.DMatrix object, stores label, data and other meta datas needed for advanced features
print("training xgboost with xgb.DMatrix")
dtrain <- xgb.DMatrix(data = train$data, label = train$label)
bst <- xgboost(data = dtrain, max.depth = 2, eta = 1, nround = 2, nthread = 2,
objective = "binary:logistic")
# Verbose = 0,1,2
print ('train xgboost with verbose 0, no message')
bst <- xgboost(data = dtrain, max.depth = 2, eta = 1, nround = 2,
nthread = 2, objective = "binary:logistic", verbose = 0)
print ('train xgboost with verbose 1, print evaluation metric')
bst <- xgboost(data = dtrain, max.depth = 2, eta = 1, nround = 2,
nthread = 2, objective = "binary:logistic", verbose = 1)
print ('train xgboost with verbose 2, also print information about tree')
bst <- xgboost(data = dtrain, max.depth = 2, eta = 1, nround = 2,
nthread = 2, objective = "binary:logistic", verbose = 2)
# you can also specify data as file path to a LibSVM format input
# since we do not have this file with us, the following line is just for illustration
# bst <- xgboost(data = 'agaricus.train.svm', max.depth = 2, eta = 1, nround = 2,objective = "binary:logistic")
#--------------------basic prediction using xgboost--------------
# you can do prediction using the following line
# you can put in Matrix, sparseMatrix, or xgb.DMatrix
pred <- predict(bst, test$data)
err <- mean(as.numeric(pred > 0.5) != test$label)
print(paste("test-error=", err))
#-------------------save and load models-------------------------
# save model to binary local file
xgb.save(bst, "xgboost.model")
# load binary model to R
bst2 <- xgb.load("xgboost.model")
pred2 <- predict(bst2, test$data)
# pred2 should be identical to pred
print(paste("sum(abs(pred2-pred))=", sum(abs(pred2-pred))))
# save model to R's raw vector
raw = xgb.save.raw(bst)
# load binary model to R
bst3 <- xgb.load(raw)
pred3 <- predict(bst3, test$data)
# pred2 should be identical to pred
print(paste("sum(abs(pred3-pred))=", sum(abs(pred2-pred))))
#----------------Advanced features --------------
# to use advanced features, we need to put data in xgb.DMatrix
dtrain <- xgb.DMatrix(data = train$data, label=train$label)
dtest <- xgb.DMatrix(data = test$data, label=test$label)
#---------------Using watchlist----------------
# watchlist is a list of xgb.DMatrix, each of them tagged with name
watchlist <- list(train=dtrain, test=dtest)
# to train with watchlist, use xgb.train, which contains more advanced features
# watchlist allows us to monitor the evaluation result on all data in the list
print ('train xgboost using xgb.train with watchlist')
bst <- xgb.train(data=dtrain, max.depth=2, eta=1, nround=2, watchlist=watchlist,
nthread = 2, objective = "binary:logistic")
# we can change evaluation metrics, or use multiple evaluation metrics
print ('train xgboost using xgb.train with watchlist, watch logloss and error')
bst <- xgb.train(data=dtrain, max.depth=2, eta=1, nround=2, watchlist=watchlist,
eval.metric = "error", eval.metric = "logloss",
nthread = 2, objective = "binary:logistic")
# xgb.DMatrix can also be saved using xgb.DMatrix.save
xgb.DMatrix.save(dtrain, "dtrain.buffer")
# to load it in, simply call xgb.DMatrix
dtrain2 <- xgb.DMatrix("dtrain.buffer")
bst <- xgb.train(data=dtrain2, max.depth=2, eta=1, nround=2, watchlist=watchlist,
nthread = 2, objective = "binary:logistic")
# information can be extracted from xgb.DMatrix using getinfo
label = getinfo(dtest, "label")
pred <- predict(bst, dtest)
err <- as.numeric(sum(as.integer(pred > 0.5) != label))/length(label)
print(paste("test-error=", err))
# You can dump the tree you learned using xgb.dump into a text file
xgb.dump(bst, "dump.raw.txt", with.stats = T)
# Finally, you can check which features are the most important.
print("Most important features (look at column Gain):")
print(xgb.importance(feature_names = train$data@Dimnames[[2]], filename_dump = "dump.raw.txt"))
|
library(data.table)
library(dplyr)
library(tidyr)
library(ggplot2)
library(pheatmap)
library(RColorBrewer)
library(survival)
library(fastcluster)
library(cluster)
library(survminer)
library(umap)
library(Rtsne)
library(enrichR)
library(DESeq2)
library(lme4)
library(lmerTest)
library(emmeans)
library(gplots)
library(stringr)
#library(xCell)
library(corrplot)
##### load data #####
# ab1,demo1,eve1,symp1,olink,RNAseq,timeTo1,tcell,go_df
load('Result/A01_compiled_data.rda')
RNAseq = fread('Result/A02_RNA_seq_derived.csv',data.table = F)
#t1 = c('Gene_CCR2','Gene_CSF1R','Gene_LIFR',
# 'Gene_OSMR','Gene_IL6ST','Gene_PDCD1','Gene_CXCR3')
geneDf = read.csv('A27_top_go_genes.csv')
t1 = paste0('Gene_',geneDf$gene_symbol)
RNAseq = RNAseq[,!grepl('^Gene',colnames(RNAseq))|colnames(RNAseq)%in%t1]
RNAseq=RNAseq%>%mutate(Info_id = as.character(Info_id))
colnames(olink)[-(1:2)]=paste0('olink_',colnames(olink)[-(1:2)])
RNAseq = left_join(RNAseq,olink,by=c('Info_id'='id','Info_day'='day'))
colnames(ab1)[-(1:2)]=paste0('IgG_',colnames(ab1)[-(1:2)])
ab1 = ab1 %>%mutate(id = as.character(id))
RNAseq = left_join(RNAseq,ab1,by=c('Info_id'='id','Info_day'='day'))
colnames(lab1)[-(1:2)]=paste0('Lab_',colnames(lab1)[-(1:2)])
lab1 = lab1 %>%mutate(id = as.character(id))%>%mutate(day = as.numeric(day))
RNAseq = left_join(RNAseq,lab1,by=c('Info_id'='id','Info_day'='day'))
colnames(eve1)[-(1:2)]=paste0('EVE_',colnames(eve1)[-(1:2)])
eve1[,-(1:2)] = apply(eve1[,-(1:2)],2,function(x){log2(as.numeric(x))})
eve1 = eve1 %>%mutate(id = as.character(id))%>%mutate(day = as.numeric(day))
RNAseq = left_join(RNAseq,eve1,by=c('Info_id'='id','Info_day'='day'))
##### combined with T cell data ######
t1 = "_CD107a$|_IFNy$|_IL10$|_IL21$|_TNF$"
t1 = which(grepl(t1,colnames(tcell)) )#& grepl('Memory',colnames(tcell))
tcellSub = tcell[,c(1,2,t1)]
colnames(tcellSub)=gsub('_bgsub','',colnames(tcellSub))
cor1 = cor(tcellSub[,-c(1:2)],use = 'pairwise.complete.obs',method = 'spearman')
heatmap(cor1)
pdf('Result/A14_Tcell_correlation.pdf',7,7)
corrplot(cor1,order = 'hclust')
dev.off()
###### combined with T cell data and Ab data #####
ab28 = ab1%>%filter(day ==28)%>%dplyr::select(-day)
colnames(ab28)[2]='IgG_D28'
ab120 = ab1%>%filter(day ==120)%>%dplyr::select(-day)
colnames(ab120)[2]='IgG_M4'
tcellSub = tcellSub%>%left_join(ab28,by= 'id')%>%left_join(ab120,by= 'id')
neut_ab = read.csv('Data/Antibodies/prelim %Neut at 1-50 for Pras.csv')
neut_ab = gather(neut_ab,key='day',value = "neut_value",-id)
neut_ab = neut_ab%>%
mutate(day=gsub('D','',day))%>%
mutate(day = as.integer(day))%>%
mutate(id = as.character(id))%>%
dplyr::select(-day)
tcellSub = tcellSub%>%left_join(neut_ab,by= 'id')
cor1 = cor(tcellSub[,-c(1:2)],use = 'pairwise.complete.obs',method = 'spearman')
pdf('Result/A14_Tcell_Ab_correlation.pdf',10,10)
corrplot(cor1,order = 'hclust')
dev.off()
# concise heatmap
tcellSmall = tcellSub[,!grepl('^MN|Memory',colnames(tcellSub))]
cor1 = cor(tcellSmall[,-c(1:2)],use = 'pairwise.complete.obs',method = 'spearman')
pdf('Result/A14_Tcell_Ab_correlation_small.pdf',5,5)
corrplot(cor1,order = 'hclust',hclust.method = 'ward.D2')
dev.off()
tcellSmall = tcellSub[,grepl('CD4',colnames(tcellSub))]
plot(tcellSmall)
tcellSmall = tcellSmall%>%
gather(key='cell',value = 'value')
p = ggplot(tcellSmall,aes(x=value))+
geom_histogram()+theme_bw()+
facet_wrap(~cell,scales = 'free')
plot(p)
##### correlate with t cell #####
df1 = tcell%>%
dplyr::select(Info_id=id,
mCD8_CD107a=`SS1_bgsub_Memory CD4p_IFNy`)%>%
inner_join(RNAseq,by='Info_id')%>%
mutate(Info_onset = Info_day-Info_onset)%>%
filter(Info_severitycat!='Asymptomatic')
df1$mCD8_CD107a[df1$mCD8_CD107a<0]=0
plotDf = df1%>%dplyr::select(Info_id,Info_tx,mCD8_CD107a)%>%unique()
p = ggplot(plotDf,aes(x=Info_tx,y =mCD8_CD107a ))+
geom_boxplot()+theme_bw()
pdf('Result/A14_tx_vs_tcell.pdf',3,3)
plot(p)
dev.off()
dfResult = data.frame()
for (i in grep('GO_|^olink_|Gene_|EVE_',colnames(df1))) { #18:ncol(df1)
t1 = df1[,c("mCD8_CD107a","Info_onset",'olink_DDX58_O95786_OID01018',
colnames(df1)[i])]%>%na.omit()
colnames(t1)[4]='y'
LM1 = lm(mCD8_CD107a~poly(Info_onset,2)+y,t1)
LM1 = summary(LM1)
LM2 = lm(olink_DDX58_O95786_OID01018~poly(Info_onset,2)+y
,t1)
LM2 = summary(LM2)
t1 = data.frame(gene = colnames(df1)[i],
beta = LM1$coefficients['y','Estimate'],
t= LM1$coefficients['y','t value'],
p = LM1$coefficients['y','Pr(>|t|)'],
p_rig = LM2$coefficients['y','Pr(>|t|)'],
t_rig = LM2$coefficients['y','t value'])
dfResult = rbind(dfResult,t1)
}
write.csv(dfResult,'Result/A14_t_cell_results.csv',row.names = F)
dfResult = dfResult%>%
#filter(grepl('olink|EVE',gene))%>%
filter(grepl('GO_',gene))%>%
#mutate(type = gsub('_.*','',gene))%>%
#group_by(type)%>%
mutate(pAdj = p.adjust(p,'fdr'))%>%
mutate(nc = nchar(gene))%>%
filter(nc<40)
protCL=read.csv('Result/A11_gene_CL.csv')
dfPlot = dfResult%>%filter(pAdj<0.05)
dfPlot = dfPlot%>%#left_join(protCL,by=c('gene'='prot'))%>%
mutate(pAdjRig = p.adjust(p_rig,'fdr'))%>%
filter(!grepl('TBA',gene))%>%
mutate(gene = gsub('olink_|EVE_|GO_','',gene))%>%
mutate(gene = gsub('_.*','',gene))%>%
group_by(gene)%>%summarise_all(function(x){x[1]})%>%
top_n(10,-pAdj)
p = ggplot(dfPlot,
aes(x=t,y=reorder(gene,t)))+
#geom_point(aes(x=max(t)+1,color = pAdj>0.05),shape = 15)+
geom_bar(stat = 'identity')+theme_bw()
pdf('Result/A14_t_cell_bar.pdf',width = 5,height = 3)
plot(p)
dev.off()
dfPlot$t_rig[dfPlot$t_rig>100]=0
p = ggplot(dfPlot,
aes(x=t_rig,y=reorder(gene,t)))+
geom_point(aes(x=max(t_rig)+1,color = pAdjRig>0.05),shape = 15)+
geom_bar(stat = 'identity')+theme_bw()
pdf('Result/A14_t_cell_bar_rig.pdf',width =5,height = 3)
plot(p)
dev.off()
pValue = dfResult%>%
filter(pAdj<0.01)
##### t cell vs time #####
df1 = tcell%>%
dplyr::select(Info_id=id,mCD8_CD107a=`MN_bgsub_CD8p_CD107a`)%>%
inner_join(RNAseq,by='Info_id')%>%
mutate(Info_onset = 30-Info_onset)%>%
filter(Info_severitycat!='Asymptomatic')%>%
dplyr::select(mCD8_CD107a,Info_onset)%>%
unique()
df1$mCD8_CD107a[df1$mCD8_CD107a<0]=0
p = ggplot(df1,aes(x=Info_onset,y= mCD8_CD107a))+
geom_point()+geom_smooth(method = 'lm')+theme_bw()
pdf('Result/A14_time_vs_cd8.pdf',3,3)
plot(p)
dev.off()
##### plot trajectory #####
df1 = RNAseq%>%
mutate(Info_time2prime = Info_time2prime-Info_onset)%>%
mutate(Info_onset = Info_day-Info_onset)%>%
filter(Info_severitycat!='Asymptomatic')
df1 = cbind(Info_surv = Surv(df1$Info_time2prime,df1$Info_prime),
df1)
dfPlot = df1[,c('Info_id','Info_time2prime','Info_prime','Info_onset',
pValue$gene)]%>%
gather(key='prot',value = 'value',-Info_time2prime,
-Info_prime,-Info_onset,-Info_id)%>%
na.omit()%>%
group_by(prot)%>%
mutate(value = lm(value~poly(Info_onset,2))$residuals)%>%
mutate(value = value>median(value,na.rm = T))
fit2 <- survfit( Surv(Info_time2prime,Info_prime) ~ value+prot, data = dfPlot )
ggsurv <- ggsurvplot(fit2, conf.int = F)
surv_pvalue(fit2)
pdf('Result/A13_prime_survival.pdf',width = 5,height = 5)
plot(ggsurv$plot+facet_wrap(~prot,scales = 'free'))
dev.off()
dfPlot = df1[,c('Info_time2prime','Info_prime','Info_onset',pValue$gene)]%>%
gather(key='prot',value = 'value',-Info_time2prime,-Info_prime,-Info_onset)%>%
na.omit()%>%
mutate(cleared = (Info_onset>Info_time2prime)&(Info_prime==1))%>%
filter(Info_prime==1)%>%
mutate(clear_cat = Info_time2prime>median(Info_time2prime))%>%
group_by(prot)#%>%
#mutate(value = lm(value~poly(Info_onset,2))$residuals)
p = ggplot(dfPlot,aes(x=Info_onset,y=value,color = clear_cat))+
geom_point()+
facet_wrap(~prot,scales = "free")+
theme_bw()+geom_smooth(method = 'lm')
plot(p)
##### severity vs CD8+ T cell #####
df1 = tcell%>%
dplyr::select(Info_id=id,mCD8_CD107a=`SS1_bgsub_Memory CD8p_CD107a`)%>%
inner_join(RNAseq,by='Info_id')%>%
dplyr::select(Info_severitycat,mCD8_CD107a)%>%
unique()
df1$mCD8_CD107a[df1$mCD8_CD107a<0]=0
p = ggplot(df1,aes(x=Info_severitycat,y = mCD8_CD107a))+
geom_boxplot(outlier.shape = NA)+
geom_jitter(width = 0.2)+theme_bw()
pdf('Result/A14_mCD8_CD107a.pdf',width =3,height = 3)
plot(p)
dev.off()
kruskal.test(mCD8_CD107a ~ Info_severitycat, data = df1)
summary.aov(lm(mCD8_CD107a ~ Info_severitycat, data = df1))
##### DDX58 vs t cell #####
df1 = tcell%>%
dplyr::select(Info_id=id,
tcell=`SS1_bgsub_Memory CD4p_IFNy`)%>%
inner_join(RNAseq,by='Info_id')%>%
mutate(Info_onset = Info_day-Info_onset)%>%
filter(Info_severitycat!='Asymptomatic')%>%
mutate(DDX58=olink_DDX58_O95786_OID01018)%>%
dplyr::select(Info_onset,DDX58,tcell)%>%
na.omit()%>%
mutate(DDX58 = lm(DDX58~poly(Info_onset,2))$residuals)
p = ggplot(df1,aes(x=DDX58,y=tcell))+
geom_point()+geom_smooth(method = 'lm')+theme_bw()
pdf('Result/A15_DDX58_vs_tcell.pdf',3,3)
plot(p)
dev.off()
df1 = tcell%>%
dplyr::select(Info_id=id,
tcell=`SS1_bgsub_Memory CD4p_IFNy`)%>%
inner_join(RNAseq,by='Info_id')%>%
mutate(Info_onset = Info_day-Info_onset)%>%
filter(Info_severitycat!='Asymptomatic')%>%
mutate(IL9=`EVE_IL-9`)%>%
dplyr::select(Info_onset,IL9,tcell)%>%
na.omit()%>%
mutate(IL9 = lm(IL9~poly(Info_onset,2))$residuals)
p = ggplot(df1,aes(x=IL9,y=tcell))+
geom_point()+geom_smooth(method = 'lm')+theme_bw()
pdf('Result/A15_IL9_vs_tcell.pdf',3,3)
plot(p)
dev.off()
|
/Code/A14_T_cell_association.R
|
no_license
|
hzc363/COVID19_system_immunology
|
R
| false
| false
| 9,553
|
r
|
library(data.table)
library(dplyr)
library(tidyr)
library(ggplot2)
library(pheatmap)
library(RColorBrewer)
library(survival)
library(fastcluster)
library(cluster)
library(survminer)
library(umap)
library(Rtsne)
library(enrichR)
library(DESeq2)
library(lme4)
library(lmerTest)
library(emmeans)
library(gplots)
library(stringr)
#library(xCell)
library(corrplot)
##### load data #####
# ab1,demo1,eve1,symp1,olink,RNAseq,timeTo1,tcell,go_df
load('Result/A01_compiled_data.rda')
RNAseq = fread('Result/A02_RNA_seq_derived.csv',data.table = F)
#t1 = c('Gene_CCR2','Gene_CSF1R','Gene_LIFR',
# 'Gene_OSMR','Gene_IL6ST','Gene_PDCD1','Gene_CXCR3')
geneDf = read.csv('A27_top_go_genes.csv')
t1 = paste0('Gene_',geneDf$gene_symbol)
RNAseq = RNAseq[,!grepl('^Gene',colnames(RNAseq))|colnames(RNAseq)%in%t1]
RNAseq=RNAseq%>%mutate(Info_id = as.character(Info_id))
colnames(olink)[-(1:2)]=paste0('olink_',colnames(olink)[-(1:2)])
RNAseq = left_join(RNAseq,olink,by=c('Info_id'='id','Info_day'='day'))
colnames(ab1)[-(1:2)]=paste0('IgG_',colnames(ab1)[-(1:2)])
ab1 = ab1 %>%mutate(id = as.character(id))
RNAseq = left_join(RNAseq,ab1,by=c('Info_id'='id','Info_day'='day'))
colnames(lab1)[-(1:2)]=paste0('Lab_',colnames(lab1)[-(1:2)])
lab1 = lab1 %>%mutate(id = as.character(id))%>%mutate(day = as.numeric(day))
RNAseq = left_join(RNAseq,lab1,by=c('Info_id'='id','Info_day'='day'))
colnames(eve1)[-(1:2)]=paste0('EVE_',colnames(eve1)[-(1:2)])
eve1[,-(1:2)] = apply(eve1[,-(1:2)],2,function(x){log2(as.numeric(x))})
eve1 = eve1 %>%mutate(id = as.character(id))%>%mutate(day = as.numeric(day))
RNAseq = left_join(RNAseq,eve1,by=c('Info_id'='id','Info_day'='day'))
##### combined with T cell data ######
t1 = "_CD107a$|_IFNy$|_IL10$|_IL21$|_TNF$"
t1 = which(grepl(t1,colnames(tcell)) )#& grepl('Memory',colnames(tcell))
tcellSub = tcell[,c(1,2,t1)]
colnames(tcellSub)=gsub('_bgsub','',colnames(tcellSub))
cor1 = cor(tcellSub[,-c(1:2)],use = 'pairwise.complete.obs',method = 'spearman')
heatmap(cor1)
pdf('Result/A14_Tcell_correlation.pdf',7,7)
corrplot(cor1,order = 'hclust')
dev.off()
###### combined with T cell data and Ab data #####
ab28 = ab1%>%filter(day ==28)%>%dplyr::select(-day)
colnames(ab28)[2]='IgG_D28'
ab120 = ab1%>%filter(day ==120)%>%dplyr::select(-day)
colnames(ab120)[2]='IgG_M4'
tcellSub = tcellSub%>%left_join(ab28,by= 'id')%>%left_join(ab120,by= 'id')
neut_ab = read.csv('Data/Antibodies/prelim %Neut at 1-50 for Pras.csv')
neut_ab = gather(neut_ab,key='day',value = "neut_value",-id)
neut_ab = neut_ab%>%
mutate(day=gsub('D','',day))%>%
mutate(day = as.integer(day))%>%
mutate(id = as.character(id))%>%
dplyr::select(-day)
tcellSub = tcellSub%>%left_join(neut_ab,by= 'id')
cor1 = cor(tcellSub[,-c(1:2)],use = 'pairwise.complete.obs',method = 'spearman')
pdf('Result/A14_Tcell_Ab_correlation.pdf',10,10)
corrplot(cor1,order = 'hclust')
dev.off()
# concise heatmap
tcellSmall = tcellSub[,!grepl('^MN|Memory',colnames(tcellSub))]
cor1 = cor(tcellSmall[,-c(1:2)],use = 'pairwise.complete.obs',method = 'spearman')
pdf('Result/A14_Tcell_Ab_correlation_small.pdf',5,5)
corrplot(cor1,order = 'hclust',hclust.method = 'ward.D2')
dev.off()
tcellSmall = tcellSub[,grepl('CD4',colnames(tcellSub))]
plot(tcellSmall)
tcellSmall = tcellSmall%>%
gather(key='cell',value = 'value')
p = ggplot(tcellSmall,aes(x=value))+
geom_histogram()+theme_bw()+
facet_wrap(~cell,scales = 'free')
plot(p)
##### correlate with t cell #####
df1 = tcell%>%
dplyr::select(Info_id=id,
mCD8_CD107a=`SS1_bgsub_Memory CD4p_IFNy`)%>%
inner_join(RNAseq,by='Info_id')%>%
mutate(Info_onset = Info_day-Info_onset)%>%
filter(Info_severitycat!='Asymptomatic')
df1$mCD8_CD107a[df1$mCD8_CD107a<0]=0
plotDf = df1%>%dplyr::select(Info_id,Info_tx,mCD8_CD107a)%>%unique()
p = ggplot(plotDf,aes(x=Info_tx,y =mCD8_CD107a ))+
geom_boxplot()+theme_bw()
pdf('Result/A14_tx_vs_tcell.pdf',3,3)
plot(p)
dev.off()
dfResult = data.frame()
for (i in grep('GO_|^olink_|Gene_|EVE_',colnames(df1))) { #18:ncol(df1)
t1 = df1[,c("mCD8_CD107a","Info_onset",'olink_DDX58_O95786_OID01018',
colnames(df1)[i])]%>%na.omit()
colnames(t1)[4]='y'
LM1 = lm(mCD8_CD107a~poly(Info_onset,2)+y,t1)
LM1 = summary(LM1)
LM2 = lm(olink_DDX58_O95786_OID01018~poly(Info_onset,2)+y
,t1)
LM2 = summary(LM2)
t1 = data.frame(gene = colnames(df1)[i],
beta = LM1$coefficients['y','Estimate'],
t= LM1$coefficients['y','t value'],
p = LM1$coefficients['y','Pr(>|t|)'],
p_rig = LM2$coefficients['y','Pr(>|t|)'],
t_rig = LM2$coefficients['y','t value'])
dfResult = rbind(dfResult,t1)
}
write.csv(dfResult,'Result/A14_t_cell_results.csv',row.names = F)
dfResult = dfResult%>%
#filter(grepl('olink|EVE',gene))%>%
filter(grepl('GO_',gene))%>%
#mutate(type = gsub('_.*','',gene))%>%
#group_by(type)%>%
mutate(pAdj = p.adjust(p,'fdr'))%>%
mutate(nc = nchar(gene))%>%
filter(nc<40)
protCL=read.csv('Result/A11_gene_CL.csv')
dfPlot = dfResult%>%filter(pAdj<0.05)
dfPlot = dfPlot%>%#left_join(protCL,by=c('gene'='prot'))%>%
mutate(pAdjRig = p.adjust(p_rig,'fdr'))%>%
filter(!grepl('TBA',gene))%>%
mutate(gene = gsub('olink_|EVE_|GO_','',gene))%>%
mutate(gene = gsub('_.*','',gene))%>%
group_by(gene)%>%summarise_all(function(x){x[1]})%>%
top_n(10,-pAdj)
p = ggplot(dfPlot,
aes(x=t,y=reorder(gene,t)))+
#geom_point(aes(x=max(t)+1,color = pAdj>0.05),shape = 15)+
geom_bar(stat = 'identity')+theme_bw()
pdf('Result/A14_t_cell_bar.pdf',width = 5,height = 3)
plot(p)
dev.off()
dfPlot$t_rig[dfPlot$t_rig>100]=0
p = ggplot(dfPlot,
aes(x=t_rig,y=reorder(gene,t)))+
geom_point(aes(x=max(t_rig)+1,color = pAdjRig>0.05),shape = 15)+
geom_bar(stat = 'identity')+theme_bw()
pdf('Result/A14_t_cell_bar_rig.pdf',width =5,height = 3)
plot(p)
dev.off()
pValue = dfResult%>%
filter(pAdj<0.01)
##### t cell vs time #####
df1 = tcell%>%
dplyr::select(Info_id=id,mCD8_CD107a=`MN_bgsub_CD8p_CD107a`)%>%
inner_join(RNAseq,by='Info_id')%>%
mutate(Info_onset = 30-Info_onset)%>%
filter(Info_severitycat!='Asymptomatic')%>%
dplyr::select(mCD8_CD107a,Info_onset)%>%
unique()
df1$mCD8_CD107a[df1$mCD8_CD107a<0]=0
p = ggplot(df1,aes(x=Info_onset,y= mCD8_CD107a))+
geom_point()+geom_smooth(method = 'lm')+theme_bw()
pdf('Result/A14_time_vs_cd8.pdf',3,3)
plot(p)
dev.off()
##### plot trajectory #####
df1 = RNAseq%>%
mutate(Info_time2prime = Info_time2prime-Info_onset)%>%
mutate(Info_onset = Info_day-Info_onset)%>%
filter(Info_severitycat!='Asymptomatic')
df1 = cbind(Info_surv = Surv(df1$Info_time2prime,df1$Info_prime),
df1)
dfPlot = df1[,c('Info_id','Info_time2prime','Info_prime','Info_onset',
pValue$gene)]%>%
gather(key='prot',value = 'value',-Info_time2prime,
-Info_prime,-Info_onset,-Info_id)%>%
na.omit()%>%
group_by(prot)%>%
mutate(value = lm(value~poly(Info_onset,2))$residuals)%>%
mutate(value = value>median(value,na.rm = T))
fit2 <- survfit( Surv(Info_time2prime,Info_prime) ~ value+prot, data = dfPlot )
ggsurv <- ggsurvplot(fit2, conf.int = F)
surv_pvalue(fit2)
pdf('Result/A13_prime_survival.pdf',width = 5,height = 5)
plot(ggsurv$plot+facet_wrap(~prot,scales = 'free'))
dev.off()
dfPlot = df1[,c('Info_time2prime','Info_prime','Info_onset',pValue$gene)]%>%
gather(key='prot',value = 'value',-Info_time2prime,-Info_prime,-Info_onset)%>%
na.omit()%>%
mutate(cleared = (Info_onset>Info_time2prime)&(Info_prime==1))%>%
filter(Info_prime==1)%>%
mutate(clear_cat = Info_time2prime>median(Info_time2prime))%>%
group_by(prot)#%>%
#mutate(value = lm(value~poly(Info_onset,2))$residuals)
p = ggplot(dfPlot,aes(x=Info_onset,y=value,color = clear_cat))+
geom_point()+
facet_wrap(~prot,scales = "free")+
theme_bw()+geom_smooth(method = 'lm')
plot(p)
##### severity vs CD8+ T cell #####
df1 = tcell%>%
dplyr::select(Info_id=id,mCD8_CD107a=`SS1_bgsub_Memory CD8p_CD107a`)%>%
inner_join(RNAseq,by='Info_id')%>%
dplyr::select(Info_severitycat,mCD8_CD107a)%>%
unique()
df1$mCD8_CD107a[df1$mCD8_CD107a<0]=0
p = ggplot(df1,aes(x=Info_severitycat,y = mCD8_CD107a))+
geom_boxplot(outlier.shape = NA)+
geom_jitter(width = 0.2)+theme_bw()
pdf('Result/A14_mCD8_CD107a.pdf',width =3,height = 3)
plot(p)
dev.off()
kruskal.test(mCD8_CD107a ~ Info_severitycat, data = df1)
summary.aov(lm(mCD8_CD107a ~ Info_severitycat, data = df1))
##### DDX58 vs t cell #####
df1 = tcell%>%
dplyr::select(Info_id=id,
tcell=`SS1_bgsub_Memory CD4p_IFNy`)%>%
inner_join(RNAseq,by='Info_id')%>%
mutate(Info_onset = Info_day-Info_onset)%>%
filter(Info_severitycat!='Asymptomatic')%>%
mutate(DDX58=olink_DDX58_O95786_OID01018)%>%
dplyr::select(Info_onset,DDX58,tcell)%>%
na.omit()%>%
mutate(DDX58 = lm(DDX58~poly(Info_onset,2))$residuals)
p = ggplot(df1,aes(x=DDX58,y=tcell))+
geom_point()+geom_smooth(method = 'lm')+theme_bw()
pdf('Result/A15_DDX58_vs_tcell.pdf',3,3)
plot(p)
dev.off()
df1 = tcell%>%
dplyr::select(Info_id=id,
tcell=`SS1_bgsub_Memory CD4p_IFNy`)%>%
inner_join(RNAseq,by='Info_id')%>%
mutate(Info_onset = Info_day-Info_onset)%>%
filter(Info_severitycat!='Asymptomatic')%>%
mutate(IL9=`EVE_IL-9`)%>%
dplyr::select(Info_onset,IL9,tcell)%>%
na.omit()%>%
mutate(IL9 = lm(IL9~poly(Info_onset,2))$residuals)
p = ggplot(df1,aes(x=IL9,y=tcell))+
geom_point()+geom_smooth(method = 'lm')+theme_bw()
pdf('Result/A15_IL9_vs_tcell.pdf',3,3)
plot(p)
dev.off()
|
#' Create a Boostrap 4 dashboard footer
#'
#' Build an blue dashboard footer
#'
#' @param ... Slot for \link{blueFooterMenu}.
#' @param copyrights Left text, if any.
#' @param src Left text src, if any.
#'
#'
#'
#' @author David Granjon, \email{dgranjon@@ymail.com}
#'
#' @export
blueDashFooter <- function(..., copyrights = NULL, src = NULL) {
shiny::tags$footer(
class = "footer",
shiny::tags$div(
class = "row align-items-center justify-content-xl-between",
shiny::tags$div(
class = "col-xl-6",
shiny::tags$div(
class = "copyright text-center text-xl-left text-muted",
shiny::a(
class = "font-weight-bold ml-1",
href = src,
target = "_blank",
copyrights
)
)
),
shiny::tags$div(
class = "col-xl-6",
...
)
)
)
}
#' Create a Boostrap 4 dashboard footer menu
#'
#' Build an blue dashboard footer menu
#'
#' @param ... Slot for \link{blueFooterItem}.
#'
#' @author David Granjon, \email{dgranjon@@ymail.com}
#'
#' @export
blueFooterMenu <- function(...) {
shiny::tags$ul(
class = "nav nav-footer justify-content-center justify-content-xl-end",
...
)
}
#' Create a Boostrap 4 dashboard footer menu item
#'
#' Build an blue dashboard footer menu item
#'
#' @param ... Item name.
#' @param src Item external link.
#'
#' @author David Granjon, \email{dgranjon@@ymail.com}
#'
#' @export
blueFooterItem <- function(..., src = NULL) {
shiny::tags$li(
class = "nav-item",
shiny::a(
class = "nav-link",
target = "_blank",
href = src,
...
)
)
}
|
/R/dashboardFooter.R
|
no_license
|
tamas-olah/blueDash
|
R
| false
| false
| 1,659
|
r
|
#' Create a Boostrap 4 dashboard footer
#'
#' Build an blue dashboard footer
#'
#' @param ... Slot for \link{blueFooterMenu}.
#' @param copyrights Left text, if any.
#' @param src Left text src, if any.
#'
#'
#'
#' @author David Granjon, \email{dgranjon@@ymail.com}
#'
#' @export
blueDashFooter <- function(..., copyrights = NULL, src = NULL) {
shiny::tags$footer(
class = "footer",
shiny::tags$div(
class = "row align-items-center justify-content-xl-between",
shiny::tags$div(
class = "col-xl-6",
shiny::tags$div(
class = "copyright text-center text-xl-left text-muted",
shiny::a(
class = "font-weight-bold ml-1",
href = src,
target = "_blank",
copyrights
)
)
),
shiny::tags$div(
class = "col-xl-6",
...
)
)
)
}
#' Create a Boostrap 4 dashboard footer menu
#'
#' Build an blue dashboard footer menu
#'
#' @param ... Slot for \link{blueFooterItem}.
#'
#' @author David Granjon, \email{dgranjon@@ymail.com}
#'
#' @export
blueFooterMenu <- function(...) {
shiny::tags$ul(
class = "nav nav-footer justify-content-center justify-content-xl-end",
...
)
}
#' Create a Boostrap 4 dashboard footer menu item
#'
#' Build an blue dashboard footer menu item
#'
#' @param ... Item name.
#' @param src Item external link.
#'
#' @author David Granjon, \email{dgranjon@@ymail.com}
#'
#' @export
blueFooterItem <- function(..., src = NULL) {
shiny::tags$li(
class = "nav-item",
shiny::a(
class = "nav-link",
target = "_blank",
href = src,
...
)
)
}
|
## Matrix Cache
## abstraction that speed up calculation of the matrix inverse
##
## eg.
## # create cache matrix object, provide the matrix to invert
## cm <- makeCacheMatrix(matrix(c(1, 2, 3, 4), nrow = 2, ncol = 2))
##
## # calculate the matrix inverse the first time, subsequent invocations will return cached result
## matrixInverse <- cacheSolve(cm)
## Given a matrix return a cache matrix object to be fed to the cacheSolve method
##
## args:
## matrix to be inverted
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setInverse <- function(inverse) inv <<- inverse
getInverse <- function() inv
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Given a list built using makeCacheMatrix return the inverse of the underlying matrix
##
## args:
## list object resulted from makeCacheMatrix
cacheSolve <- function(x, ...) {
inv <- x$getInverse()
if (!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setInverse(inv)
inv
}
|
/cachematrix.R
|
no_license
|
lrascao/ProgrammingAssignment2
|
R
| false
| false
| 1,130
|
r
|
## Matrix Cache
## abstraction that speed up calculation of the matrix inverse
##
## eg.
## # create cache matrix object, provide the matrix to invert
## cm <- makeCacheMatrix(matrix(c(1, 2, 3, 4), nrow = 2, ncol = 2))
##
## # calculate the matrix inverse the first time, subsequent invocations will return cached result
## matrixInverse <- cacheSolve(cm)
## Given a matrix return a cache matrix object to be fed to the cacheSolve method
##
## args:
## matrix to be inverted
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setInverse <- function(inverse) inv <<- inverse
getInverse <- function() inv
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Given a list built using makeCacheMatrix return the inverse of the underlying matrix
##
## args:
## list object resulted from makeCacheMatrix
cacheSolve <- function(x, ...) {
inv <- x$getInverse()
if (!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setInverse(inv)
inv
}
|
query_hive <- function(sql) {
library("RJDBC")
library("rJava")
hadoop.class.path = list.files(path=c("./dep"), pattern="jar", full.names=T);
cp = c(hadoop.class.path)
.jinit(classpath=cp)
url.dbc = paste0("jdbc:hive2://eb-pmp:10000/default");
drv=JDBC("org.apache.hive.jdbc.HiveDriver")
conn <- dbConnect(drv, url.dbc, "hive", "hive")
df <- dbGetQuery(conn, sql)
}
query_oracle <- function(sql) {
library(ROracle)
drv <- dbDriver("Oracle")
con <- dbConnect(drv, "CLR", "INTG_REF_CLR", "209.INTG")
res <- dbSendQuery(con, sql)
df <- fetch(res, n=-1)
# df <- data.frame(data)
}
|
/R/DBQuery.R
|
no_license
|
roxxette/selmodel
|
R
| false
| false
| 609
|
r
|
query_hive <- function(sql) {
library("RJDBC")
library("rJava")
hadoop.class.path = list.files(path=c("./dep"), pattern="jar", full.names=T);
cp = c(hadoop.class.path)
.jinit(classpath=cp)
url.dbc = paste0("jdbc:hive2://eb-pmp:10000/default");
drv=JDBC("org.apache.hive.jdbc.HiveDriver")
conn <- dbConnect(drv, url.dbc, "hive", "hive")
df <- dbGetQuery(conn, sql)
}
query_oracle <- function(sql) {
library(ROracle)
drv <- dbDriver("Oracle")
con <- dbConnect(drv, "CLR", "INTG_REF_CLR", "209.INTG")
res <- dbSendQuery(con, sql)
df <- fetch(res, n=-1)
# df <- data.frame(data)
}
|
install.packages("gapminder")
library(gapminder)
#ggpolt2:使用 ggplot() 函數做資料映射、使用 geom_() 函數調整圖形種類、使用 + 連結不同的函數,堆疊圖層
install.packages("tidyverse")
install.packages("ggplot2")
library(gapminder) # data
library(ggplot2) # plotting
install.packages("dplyr")
library(dplyr) # data manipulations
install.packages("plotly")
library(plotly)
data<-read.csv("/Users/yulin/Documents/資訊類別/視覺化/期末專題/ks-projects.csv", header=T, sep=",")
View(data)
data$goal <- as.numeric(data$goal)
data$pledged <- as.numeric(data$pledged)
data_select <- select(data,main_category,starts_with("goal"),starts_with("pledged"))
View(data_select)
data_select %>%
group_by(main_category) %>%
summarise(goal = sum(goal), pledge = sum(pledged),percentage = pledge/goal)
bar_plot <- plot_ly(data_select,x = ~main_category,y = ~goal,type = 'bar',name = 'goal') %>%
add_trace(y = ~pledged,name = 'pledge') %>%
layout(yaxis = list(title = 'dollars',barmode = 'group'))
bar_plot
library(plotly)
p <- data_select %>%
plot_ly() %>%
add_trace(data_select,x = ~main_category, y = ~goal, type = 'bar', name = 'goal',
text = data_select$goal,
marker = list(color = 'rgb(255, 95, 95)',
line = list(color = 'rgb(255,95,95)', width = 0.5))) %>%
add_trace(data_select,x = ~main_category, y = ~pledged, type = 'bar', name = 'pledge',
text = data_select$pledged,
marker = list(color = 'rgb(176, 176, 176)',
line = list(color = 'rgb(176, 176, 176)', width = 0.5))) %>%
layout(title = "2009~2016 Category Goal and Pledge",
barmode = 'group',
xaxis = list(title = "Category"),
yaxis = list(title = "US dollars"))
p
|
/其他練習/r/project.R
|
no_license
|
yenchungLin/study
|
R
| false
| false
| 1,806
|
r
|
install.packages("gapminder")
library(gapminder)
#ggpolt2:使用 ggplot() 函數做資料映射、使用 geom_() 函數調整圖形種類、使用 + 連結不同的函數,堆疊圖層
install.packages("tidyverse")
install.packages("ggplot2")
library(gapminder) # data
library(ggplot2) # plotting
install.packages("dplyr")
library(dplyr) # data manipulations
install.packages("plotly")
library(plotly)
data<-read.csv("/Users/yulin/Documents/資訊類別/視覺化/期末專題/ks-projects.csv", header=T, sep=",")
View(data)
data$goal <- as.numeric(data$goal)
data$pledged <- as.numeric(data$pledged)
data_select <- select(data,main_category,starts_with("goal"),starts_with("pledged"))
View(data_select)
data_select %>%
group_by(main_category) %>%
summarise(goal = sum(goal), pledge = sum(pledged),percentage = pledge/goal)
bar_plot <- plot_ly(data_select,x = ~main_category,y = ~goal,type = 'bar',name = 'goal') %>%
add_trace(y = ~pledged,name = 'pledge') %>%
layout(yaxis = list(title = 'dollars',barmode = 'group'))
bar_plot
library(plotly)
p <- data_select %>%
plot_ly() %>%
add_trace(data_select,x = ~main_category, y = ~goal, type = 'bar', name = 'goal',
text = data_select$goal,
marker = list(color = 'rgb(255, 95, 95)',
line = list(color = 'rgb(255,95,95)', width = 0.5))) %>%
add_trace(data_select,x = ~main_category, y = ~pledged, type = 'bar', name = 'pledge',
text = data_select$pledged,
marker = list(color = 'rgb(176, 176, 176)',
line = list(color = 'rgb(176, 176, 176)', width = 0.5))) %>%
layout(title = "2009~2016 Category Goal and Pledge",
barmode = 'group',
xaxis = list(title = "Category"),
yaxis = list(title = "US dollars"))
p
|
source("install.R")
rmarkdown::render('pilotReport.Rmd', output_file='pilotReport.html')
|
/render.R
|
no_license
|
amehtaSF/reproducibility_assignment
|
R
| false
| false
| 91
|
r
|
source("install.R")
rmarkdown::render('pilotReport.Rmd', output_file='pilotReport.html')
|
# initModal = function() {
# showModal(modalDialog(size = "l", footer = actionButton("dismissInitModal", "Ok!"),
# #title = "Welcome to GMSE-GAME!",
# includeMarkdown("introduction.Rmd")
# ))
# }
initModal1 = function() {
showModal(modalDialog(size = "l", footer = tagList(actionButton("toInit2", "Next", class="butt"), actionButton("dismissInitModal", "Skip introduction", class="butt", style="float:left")),
#title = "Welcome to GMSE-GAME!",
includeMarkdown("introduction1.Rmd")
))
}
initModal2 = function() {
showModal(modalDialog(size = "l", footer = tagList(actionButton("toInit1", "Previous", class = "butt"),
actionButton("toInit3", "Next", class = "butt"),
actionButton("dismissInitModal", "Skip introduction", class = "butt", style="float:left")),
#title = "Welcome to GMSE-GAME!",
includeMarkdown("introduction2.Rmd")
))
}
initModal3 = function() {
showModal(modalDialog(size = "l", footer = tagList(actionButton("backtoInit2", "Previous", class = "butt"),
actionButton("toInit4", "Next", class = "butt"),
actionButton("dismissInitModal", "Skip introduction", class = "butt", style="float:left")),
#title = "Welcome to GMSE-GAME!",
includeMarkdown("introduction3.Rmd")
))
}
initModal4 = function() {
showModal(modalDialog(size = "l", footer = tagList(actionButton("backtoInit3", "Previous", class = "butt"),
actionButton("toInit5", "Next", class = "butt"),
actionButton("dismissInitModal", "Skip introduction", class = "butt", style="float:left")),
includeMarkdown("introduction4.Rmd")
))
}
initModal5 = function() {
showModal(modalDialog(size = "l", footer = tagList(actionButton("backtoInit4", "Previous", class = "butt"),
actionButton("toInit6", "Next", class = "butt"),
actionButton("dismissInitModal", "Skip introduction", class = "butt", style="float:left")),
includeMarkdown("introduction5.Rmd")
))
}
initModal6 = function() {
showModal(modalDialog(size = "l", footer = tagList(actionButton("backtoInit5", "Previous", class = "butt"),
actionButton("dismissInitModal", "Okay, go!", class = "butt", style = "color: #fff; background-color: #D35E60; font-weight: bold")),
includeMarkdown("introduction6.Rmd")
))
}
allIntroModal = function() {
showModal(modalDialog(size = "l", footer = tagList(modalButton("Close")), easyClose = TRUE,
includeMarkdown("introduction1.Rmd"),
includeMarkdown("introduction2.Rmd"),
includeMarkdown("introduction3.Rmd"),
includeMarkdown("introduction4.Rmd"),
includeMarkdown("introduction5.Rmd")
))
}
setPlayerModal = function(playername) {
showModal(modalDialog(size = "l", footer = actionButton("confirmStart", "Go!"),
#title = div(style="padding-left: 10%; padding-right: 10%", "What is your player name?"),
div(style="padding-left: 10%; padding-right: 10%",
includeMarkdown("consentText.Rmd"),
),
div(style="padding-left: 10%; padding-right: 10%; font-weight: bold;",
checkboxInput("consentAgree", "I consent to the above", value = FALSE, width = NULL)
),
hr(),
div(style="padding-left: 10%; padding-right: 10%;", h3("What is your player name?")),
div(style="padding-left: 10%; padding-right: 10%; font-size: 0.75em",
textInput("playerName", label = NULL, value = playername, width = NULL, placeholder = NULL),
"",
tags$div(tags$ul(
tags$li(tags$span("Letters and numbers only please; and no spaces.")),
tags$li(tags$span("We only ask for a nickname so you can keep track of your game scores on the leaderboard.")),
tags$li(tags$span("Using your real name means we will record this and that you consent to us doing so; if you do not, please use a nickname.")),
))
)
)
)
}
confirmResetModal = function() {
showModal(modalDialog(size = "m", footer = tagList(actionButton("cancelReset", "Cancel", class = "butt"),actionButton("confirmReset", "Yes, reset.", class = "butt")),
title = span(style = "font-family: Courier New; font-weight: bold; color:darkred", "Are you sure you want to reset?"),
span(style = "font-family: Courier New;","Resetting the game means you go back to the start!")
)
)
}
finishedModal = function() {
showModal(modalDialog(size = "m", footer = actionButton("confirmFinished", "Ok", class = "butt"),
title = span(style = "font-family: Courier New; font-weight: bold; color:darkred","You've reached the final management year!"),
span(style = "font-family: Courier New","Well done, you have reached the maximum number of management years. The grazing animal population has not gone extinct.")
)
)
}
extinctionModal = function() {
showModal(modalDialog(size = "m", footer = actionButton("confirmExtinction", "Ok", class = "butt"),
title = span(style = "font-family: Courier New; font-weight: bold; color:darkred","Population extinct!"),
span(style = "font-family: Courier New","No resources left to manage!")
)
)
}
scoresModal = function(score_display = "total", total_scores = NULL) {
if(score_display == "total") {
showModal(modalDialog(size = "l", footer = tagList(actionButton("closeScores", "New Game", class = "butt")), easyClose = TRUE,
title = span(style = "font-size:1.75em; font-family: Courier New; font-weight: bold;","Top 10 High scores: Total"),
span(style = "font-size:1.25em; font-weight: bold;",
"You are",
span(style = "font-size:1.5em; color:red;", textOutput("rank_total",inline=T)),
"out of ",
span(style = "font-size:1.5em; color:darkred;", total_scores),"!"
),
dataTableOutput("highScores")
))
}
if(score_display == "split") {
showModal(modalDialog(size = "l", footer = tagList(actionButton("closeScores", "New Game", class = "butt")), easyClose = TRUE,
title = span(style = "font-size:2em; font-family: Courier New; font-weight: bold;","Top 10 High scores"),
div(style="display: inline-block;vertical-align:top; width: 400px;",
span(style="font-size:1.5em;align:center; text-align:center;", "Animal score"),
p(),
span(style = "font-size:1.25em; font-weight: bold;",
"You are",
span(style = "font-size:1.5em; color:red;", textOutput("rank_res",inline=T)),
"out of ",
span(style = "font-size:1.5em; color:darkred;", total_scores),"!"
),
dataTableOutput("highScores_res")),
div(style="display: inline-block;vertical-align:top; width: 50px;"," "),
div(style="display: inline-block;vertical-align:top; width: 400px;",
span(style="font-size:1.5em;align:center; text-align:center; ", "Farming score"),
p(),
span(style = "font-size:1.25em; font-weight: bold;",
"You are",
span(style = "font-size:1.5em; color:green;", textOutput("rank_yld",inline=T)),
"out of ",
span(style = "font-size:1.5em; color:darkred;", total_scores),"!"
),
dataTableOutput("highScores_yld")),
))
}
}
|
/app/infoDialogs.R
|
no_license
|
jejoenje/GMSEGAME
|
R
| false
| false
| 9,086
|
r
|
# initModal = function() {
# showModal(modalDialog(size = "l", footer = actionButton("dismissInitModal", "Ok!"),
# #title = "Welcome to GMSE-GAME!",
# includeMarkdown("introduction.Rmd")
# ))
# }
initModal1 = function() {
showModal(modalDialog(size = "l", footer = tagList(actionButton("toInit2", "Next", class="butt"), actionButton("dismissInitModal", "Skip introduction", class="butt", style="float:left")),
#title = "Welcome to GMSE-GAME!",
includeMarkdown("introduction1.Rmd")
))
}
initModal2 = function() {
showModal(modalDialog(size = "l", footer = tagList(actionButton("toInit1", "Previous", class = "butt"),
actionButton("toInit3", "Next", class = "butt"),
actionButton("dismissInitModal", "Skip introduction", class = "butt", style="float:left")),
#title = "Welcome to GMSE-GAME!",
includeMarkdown("introduction2.Rmd")
))
}
initModal3 = function() {
showModal(modalDialog(size = "l", footer = tagList(actionButton("backtoInit2", "Previous", class = "butt"),
actionButton("toInit4", "Next", class = "butt"),
actionButton("dismissInitModal", "Skip introduction", class = "butt", style="float:left")),
#title = "Welcome to GMSE-GAME!",
includeMarkdown("introduction3.Rmd")
))
}
initModal4 = function() {
showModal(modalDialog(size = "l", footer = tagList(actionButton("backtoInit3", "Previous", class = "butt"),
actionButton("toInit5", "Next", class = "butt"),
actionButton("dismissInitModal", "Skip introduction", class = "butt", style="float:left")),
includeMarkdown("introduction4.Rmd")
))
}
initModal5 = function() {
showModal(modalDialog(size = "l", footer = tagList(actionButton("backtoInit4", "Previous", class = "butt"),
actionButton("toInit6", "Next", class = "butt"),
actionButton("dismissInitModal", "Skip introduction", class = "butt", style="float:left")),
includeMarkdown("introduction5.Rmd")
))
}
initModal6 = function() {
showModal(modalDialog(size = "l", footer = tagList(actionButton("backtoInit5", "Previous", class = "butt"),
actionButton("dismissInitModal", "Okay, go!", class = "butt", style = "color: #fff; background-color: #D35E60; font-weight: bold")),
includeMarkdown("introduction6.Rmd")
))
}
allIntroModal = function() {
showModal(modalDialog(size = "l", footer = tagList(modalButton("Close")), easyClose = TRUE,
includeMarkdown("introduction1.Rmd"),
includeMarkdown("introduction2.Rmd"),
includeMarkdown("introduction3.Rmd"),
includeMarkdown("introduction4.Rmd"),
includeMarkdown("introduction5.Rmd")
))
}
setPlayerModal = function(playername) {
showModal(modalDialog(size = "l", footer = actionButton("confirmStart", "Go!"),
#title = div(style="padding-left: 10%; padding-right: 10%", "What is your player name?"),
div(style="padding-left: 10%; padding-right: 10%",
includeMarkdown("consentText.Rmd"),
),
div(style="padding-left: 10%; padding-right: 10%; font-weight: bold;",
checkboxInput("consentAgree", "I consent to the above", value = FALSE, width = NULL)
),
hr(),
div(style="padding-left: 10%; padding-right: 10%;", h3("What is your player name?")),
div(style="padding-left: 10%; padding-right: 10%; font-size: 0.75em",
textInput("playerName", label = NULL, value = playername, width = NULL, placeholder = NULL),
"",
tags$div(tags$ul(
tags$li(tags$span("Letters and numbers only please; and no spaces.")),
tags$li(tags$span("We only ask for a nickname so you can keep track of your game scores on the leaderboard.")),
tags$li(tags$span("Using your real name means we will record this and that you consent to us doing so; if you do not, please use a nickname.")),
))
)
)
)
}
confirmResetModal = function() {
showModal(modalDialog(size = "m", footer = tagList(actionButton("cancelReset", "Cancel", class = "butt"),actionButton("confirmReset", "Yes, reset.", class = "butt")),
title = span(style = "font-family: Courier New; font-weight: bold; color:darkred", "Are you sure you want to reset?"),
span(style = "font-family: Courier New;","Resetting the game means you go back to the start!")
)
)
}
finishedModal = function() {
showModal(modalDialog(size = "m", footer = actionButton("confirmFinished", "Ok", class = "butt"),
title = span(style = "font-family: Courier New; font-weight: bold; color:darkred","You've reached the final management year!"),
span(style = "font-family: Courier New","Well done, you have reached the maximum number of management years. The grazing animal population has not gone extinct.")
)
)
}
extinctionModal = function() {
showModal(modalDialog(size = "m", footer = actionButton("confirmExtinction", "Ok", class = "butt"),
title = span(style = "font-family: Courier New; font-weight: bold; color:darkred","Population extinct!"),
span(style = "font-family: Courier New","No resources left to manage!")
)
)
}
scoresModal = function(score_display = "total", total_scores = NULL) {
if(score_display == "total") {
showModal(modalDialog(size = "l", footer = tagList(actionButton("closeScores", "New Game", class = "butt")), easyClose = TRUE,
title = span(style = "font-size:1.75em; font-family: Courier New; font-weight: bold;","Top 10 High scores: Total"),
span(style = "font-size:1.25em; font-weight: bold;",
"You are",
span(style = "font-size:1.5em; color:red;", textOutput("rank_total",inline=T)),
"out of ",
span(style = "font-size:1.5em; color:darkred;", total_scores),"!"
),
dataTableOutput("highScores")
))
}
if(score_display == "split") {
showModal(modalDialog(size = "l", footer = tagList(actionButton("closeScores", "New Game", class = "butt")), easyClose = TRUE,
title = span(style = "font-size:2em; font-family: Courier New; font-weight: bold;","Top 10 High scores"),
div(style="display: inline-block;vertical-align:top; width: 400px;",
span(style="font-size:1.5em;align:center; text-align:center;", "Animal score"),
p(),
span(style = "font-size:1.25em; font-weight: bold;",
"You are",
span(style = "font-size:1.5em; color:red;", textOutput("rank_res",inline=T)),
"out of ",
span(style = "font-size:1.5em; color:darkred;", total_scores),"!"
),
dataTableOutput("highScores_res")),
div(style="display: inline-block;vertical-align:top; width: 50px;"," "),
div(style="display: inline-block;vertical-align:top; width: 400px;",
span(style="font-size:1.5em;align:center; text-align:center; ", "Farming score"),
p(),
span(style = "font-size:1.25em; font-weight: bold;",
"You are",
span(style = "font-size:1.5em; color:green;", textOutput("rank_yld",inline=T)),
"out of ",
span(style = "font-size:1.5em; color:darkred;", total_scores),"!"
),
dataTableOutput("highScores_yld")),
))
}
}
|
plot_four_panels <- function(gen, fixed, conc = TRUE, file_name) {
#gen <- make('tp_wy_out', remake_file = '30_analyze_data_series.yml')
#fixed <- make('tp_wy_out_stationary', remake_file = '30_analyze_data_series.yml')
gen_annual <- tableResults(gen) %>%
mutate(trend_type = 'Water quality trend')
fix_annual <- tableResults(fixed) %>%
mutate(trend_type = 'MTC')
annuals <- bind_rows(gen_annual, fix_annual)
if (conc == TRUE) {
p1 <- # first plot annual estimates with lines
ggplot(annuals, aes(x = Year, y = `FN Conc [mg/L]`)) +
geom_line(aes(group = trend_type, color = trend_type)) +
scale_color_manual(values = c('yellow3', 'purple4')) +
geom_point(dat = gen_annual, aes(x = Year, y = `Conc [mg/L]`)) +
labs(y = 'Conc [mg/L]', color = '', x = 'Water year') +
theme_bw() +
guides(color = guide_legend(nrow = 1))
} else {
# first plot annual estimates with lines
p1 <- ggplot(annuals, aes(x = Year, y = `FN Flux [10^6kg/yr]`)) +
geom_line(aes(group = trend_type, color = trend_type)) +
scale_color_manual(values = c('yellow3', 'purple4')) +
geom_point(dat = gen_annual, aes(x = Year, y = `Flux [10^6kg/yr]`)) +
labs(y = 'Flux [10^6kg/yr]', color = '', x = 'Water year') +
theme_bw() +
guides(color = guide_legend(nrow = 1))
}
# plot annual Q statistics
annual_Q <- gen$Daily %>%
group_by(waterYear) %>%
summarize(`10th percentile` = quantile(Q, probs = 0.1),
`max day` = max(Q),
`median` = median(Q)) %>%
gather(key = 'variable', value = 'value', -waterYear)
p2 <- ggplot(annual_Q, aes(x = waterYear, y = value)) +
geom_point(aes(color = variable)) +
geom_smooth(method = 'lm', se = FALSE, aes(group = variable, color = variable)) +
scale_y_log10() +
theme_bw() +
labs(y = "Q [cms]", x = 'Water year', color = '') +
guides(color = guide_legend(nrow = 1))
# create conc vs discharge over different times/years
conc_q_period <- gen$Sample %>%
mutate(month = lubridate::month(Date)) %>%
mutate(period = case_when(month %in% c(2:4) & waterYear <= 1999 ~ 'Feb-Apr 1990-1999',
month %in% c(2:4) & waterYear >= 2009 ~ 'Feb-Apr 2009-2018',
month %in% c(5:7) & waterYear <= 1999 ~ 'May-Jul 1990-1999',
month %in% c(5:7) & waterYear > 2009 ~ 'May-Jul 2009-2018')) %>%
filter(!is.na(period))
p3 <- ggplot(conc_q_period, aes(x = Q, y = ConcAve)) +
geom_point(color = 'darkgray', alpha = 0.5, show.legend = FALSE) +
geom_smooth(se = FALSE, method = 'lm', aes(color = period, group= period), size = 1, show.legend = FALSE) +
scale_x_log10() +
scale_y_log10() +
scale_color_manual(values = c('skyblue2', 'skyblue4', 'tomato', 'tomato4')) +
theme_bw() +
labs(y = 'Conc [mg/L]', x = 'Q [cms]')
# now do cdf of Q per period
q_period <- gen$Daily %>%
mutate(month = lubridate::month(Date)) %>%
mutate(period = case_when(month %in% c(2:4) & waterYear <= 1999 ~ 'Feb-Apr 1990-1999',
month %in% c(2:4) & waterYear >= 2009 ~ 'Feb-Apr 2009-2018',
month %in% c(5:7) & waterYear <= 1999 ~ 'May-Jul 1990-1999',
month %in% c(5:7) & waterYear > 2009 ~ 'May-Jul 2009-2018')) %>%
filter(!is.na(period))
p4 <- ggplot(q_period, aes(x = Q, color = period, group = period)) +
stat_ecdf(geom = 'line', size = 1) +
scale_x_log10() +
scale_color_manual(values = c('skyblue2', 'skyblue4', 'tomato', 'tomato4')) +
theme_bw() +
labs(y = 'Cumulative probability', color = '', x = 'Q [cms]') +
guides(color = guide_legend(nrow = 2))
# put it all together!
p1b <- p1 + theme(legend.position = 'none')
p2b <- p2 + theme(legend.position = 'none')
p3b <- p3 + theme(legend.position = 'none')
p4b <- p4 + theme(legend.position = 'none')
ptop <- plot_grid(p1b, p2b, p3b, p4b, align = 'h', ncol = 4, rel_widths = c(1.3, 1.3,1,1))
pbottom <- plot_grid(get_legend(p1), get_legend(p2), get_legend(p4), ncol = 3, rel_widths = c(1.3, 1.3, 2))
pall <- plot_grid(ptop, pbottom, align = 'v', ncol = 1, rel_heights = c(1, 0.2))
ggsave(file_name, pall, height = 4, width = 12)
}
# recreate four-panel plot from murphy
|
/30_analyze_data/code/create_four_panel_summary.R
|
no_license
|
limnoliver/yahara-trends
|
R
| false
| false
| 4,395
|
r
|
plot_four_panels <- function(gen, fixed, conc = TRUE, file_name) {
#gen <- make('tp_wy_out', remake_file = '30_analyze_data_series.yml')
#fixed <- make('tp_wy_out_stationary', remake_file = '30_analyze_data_series.yml')
gen_annual <- tableResults(gen) %>%
mutate(trend_type = 'Water quality trend')
fix_annual <- tableResults(fixed) %>%
mutate(trend_type = 'MTC')
annuals <- bind_rows(gen_annual, fix_annual)
if (conc == TRUE) {
p1 <- # first plot annual estimates with lines
ggplot(annuals, aes(x = Year, y = `FN Conc [mg/L]`)) +
geom_line(aes(group = trend_type, color = trend_type)) +
scale_color_manual(values = c('yellow3', 'purple4')) +
geom_point(dat = gen_annual, aes(x = Year, y = `Conc [mg/L]`)) +
labs(y = 'Conc [mg/L]', color = '', x = 'Water year') +
theme_bw() +
guides(color = guide_legend(nrow = 1))
} else {
# first plot annual estimates with lines
p1 <- ggplot(annuals, aes(x = Year, y = `FN Flux [10^6kg/yr]`)) +
geom_line(aes(group = trend_type, color = trend_type)) +
scale_color_manual(values = c('yellow3', 'purple4')) +
geom_point(dat = gen_annual, aes(x = Year, y = `Flux [10^6kg/yr]`)) +
labs(y = 'Flux [10^6kg/yr]', color = '', x = 'Water year') +
theme_bw() +
guides(color = guide_legend(nrow = 1))
}
# plot annual Q statistics
annual_Q <- gen$Daily %>%
group_by(waterYear) %>%
summarize(`10th percentile` = quantile(Q, probs = 0.1),
`max day` = max(Q),
`median` = median(Q)) %>%
gather(key = 'variable', value = 'value', -waterYear)
p2 <- ggplot(annual_Q, aes(x = waterYear, y = value)) +
geom_point(aes(color = variable)) +
geom_smooth(method = 'lm', se = FALSE, aes(group = variable, color = variable)) +
scale_y_log10() +
theme_bw() +
labs(y = "Q [cms]", x = 'Water year', color = '') +
guides(color = guide_legend(nrow = 1))
# create conc vs discharge over different times/years
conc_q_period <- gen$Sample %>%
mutate(month = lubridate::month(Date)) %>%
mutate(period = case_when(month %in% c(2:4) & waterYear <= 1999 ~ 'Feb-Apr 1990-1999',
month %in% c(2:4) & waterYear >= 2009 ~ 'Feb-Apr 2009-2018',
month %in% c(5:7) & waterYear <= 1999 ~ 'May-Jul 1990-1999',
month %in% c(5:7) & waterYear > 2009 ~ 'May-Jul 2009-2018')) %>%
filter(!is.na(period))
p3 <- ggplot(conc_q_period, aes(x = Q, y = ConcAve)) +
geom_point(color = 'darkgray', alpha = 0.5, show.legend = FALSE) +
geom_smooth(se = FALSE, method = 'lm', aes(color = period, group= period), size = 1, show.legend = FALSE) +
scale_x_log10() +
scale_y_log10() +
scale_color_manual(values = c('skyblue2', 'skyblue4', 'tomato', 'tomato4')) +
theme_bw() +
labs(y = 'Conc [mg/L]', x = 'Q [cms]')
# now do cdf of Q per period
q_period <- gen$Daily %>%
mutate(month = lubridate::month(Date)) %>%
mutate(period = case_when(month %in% c(2:4) & waterYear <= 1999 ~ 'Feb-Apr 1990-1999',
month %in% c(2:4) & waterYear >= 2009 ~ 'Feb-Apr 2009-2018',
month %in% c(5:7) & waterYear <= 1999 ~ 'May-Jul 1990-1999',
month %in% c(5:7) & waterYear > 2009 ~ 'May-Jul 2009-2018')) %>%
filter(!is.na(period))
p4 <- ggplot(q_period, aes(x = Q, color = period, group = period)) +
stat_ecdf(geom = 'line', size = 1) +
scale_x_log10() +
scale_color_manual(values = c('skyblue2', 'skyblue4', 'tomato', 'tomato4')) +
theme_bw() +
labs(y = 'Cumulative probability', color = '', x = 'Q [cms]') +
guides(color = guide_legend(nrow = 2))
# put it all together!
p1b <- p1 + theme(legend.position = 'none')
p2b <- p2 + theme(legend.position = 'none')
p3b <- p3 + theme(legend.position = 'none')
p4b <- p4 + theme(legend.position = 'none')
ptop <- plot_grid(p1b, p2b, p3b, p4b, align = 'h', ncol = 4, rel_widths = c(1.3, 1.3,1,1))
pbottom <- plot_grid(get_legend(p1), get_legend(p2), get_legend(p4), ncol = 3, rel_widths = c(1.3, 1.3, 2))
pall <- plot_grid(ptop, pbottom, align = 'v', ncol = 1, rel_heights = c(1, 0.2))
ggsave(file_name, pall, height = 4, width = 12)
}
# recreate four-panel plot from murphy
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SeSAMeStr_utils.R
\name{plot_inferedstrains}
\alias{plot_inferedstrains}
\title{Model Infered Strains}
\usage{
plot_inferedstrains(sdf, out_dir)
}
\arguments{
\item{out_dir}{Path to output directory}
\item{sdfs}{list of data frames of intensity values for each replicate; output of get_sdfs}
}
\value{
Plots probabilities of infered strains to /QC subdir
}
\description{
Plotting the infered mouse strain for each replicate
}
|
/man/plot_inferedstrains.Rd
|
no_license
|
Stefanos-Apostle/SeSAMeStr
|
R
| false
| true
| 505
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SeSAMeStr_utils.R
\name{plot_inferedstrains}
\alias{plot_inferedstrains}
\title{Model Infered Strains}
\usage{
plot_inferedstrains(sdf, out_dir)
}
\arguments{
\item{out_dir}{Path to output directory}
\item{sdfs}{list of data frames of intensity values for each replicate; output of get_sdfs}
}
\value{
Plots probabilities of infered strains to /QC subdir
}
\description{
Plotting the infered mouse strain for each replicate
}
|
#' ---
#' jupyter:
#' jupytext_format_version: '1.0'
#' jupytext_formats: ipynb,Rmd,R:spin
#' kernelspec:
#' display_name: R
#' language: R
#' name: ir
#' language_info:
#' codemirror_mode: r
#' file_extension: .r
#' mimetype: text/x-r-source
#' name: R
#' pygments_lexer: r
#' version: 3.5.1
#' toc:
#' base_numbering: 1
#' nav_menu: {}
#' number_sections: true
#' sideBar: true
#' skip_h1_title: false
#' title_cell: Table of Contents
#' title_sidebar: Contents
#' toc_cell: false
#' toc_position: {}
#' toc_section_display: true
#' toc_window_display: false
#' ---
#' ---
#' title: "R Data Wrangling"
#' output:
#' html_document:
#' highlight: tango
#' toc: true
#' toc_float:
#' collapsed: true
#' ---
#'
#'
#'
#' Workshop description
#' ====================
#'
#' Data scientists are known and celebrated for modeling and visually
#' displaying information, but down in the data science engine room there
#' is a lot of less glamorous work to be done. Before data can be used
#' effectively it must often be cleaned, corrected, and reformatted. This
#' workshop introduces the basic tools needed to make your data behave,
#' including data reshaping, regular expressions and other text
#' manipulation tools.
#'
#' This is an intermediate/advanced R course appropriate for those with
#' basic knowledge of R. If you need a refresher we recommend the
#' [the IQSS R intro](http://tutorials.iq.harvard.edu/R/Rintro/Rintro.html).
#'
#' The lesson notes are available at
#' <http://tutorials.iq.harvard.edu/R/RDataManagement/RDataManagement.html>
#'
#' Prerequisites and Preparation
#' =============================
#'
#' Prior to the workshop you should:
#'
#' - install R from <https://cran.r-project.org/>
#' - install RStudio from <https://www.rstudio.com/products/rstudio/download/#download>
#' - install the tidyverse package in R with `install.packages("tidyverse")` and attach it with `library(tidyverse)`
#'
#' To start the workshop:
#' - Download and extract the materials from <http://tutorials.iq.harvard.edu/R/RDataManagement.zip>.
#' - Open Rstudio and create a new project (`File ==> New Project ==> Existing Directory` and select the `RDataManagement` folder you downloaded and extracted earlier.
#' - Open a new R script (`File ==> New File ==> R script`) and save it as `Notes.R`.
#'
#'
#' Example project overview
#' ========================
#'
#' It is common for data to be made available on a website somewhere, either by a
#' government agency, research group, or other organizations and entities. Often
#' the data you want is spread over many files, and retrieving it all one file at a
#' time is tedious and time consuming. Such is the case with the baby names data we
#' will be using today.
#'
#' The UK [Office for National Statistics](https://www.ons.gov.uk) provides yearly
#' data on the most popular baby names going back to 1996. The data is provided
#' separately for boys and girls and is stored in Excel spreadsheets.
#'
#' I have downloaded all the excel files containing boys names data from
#' https://www.ons.gov.uk/peoplepopulationandcommunity/birthsdeathsandmarriages/livebirths/datasets/babynamesenglandandwalesbabynamesstatisticsboys
#' and made them available at
#' http://tutorials.iq.harvard.edu/R/RDataManagement/data/boysNames.zip.
#'
#' Our mission is to extract and graph the **top 100** boys names in England
#' and Wales for every year since 1996. There are several things that
#' make this challenging.
#'
#' Problems with the data
#' ======================
#'
#' While it was good of the UK Office for National Statistics to provide
#' baby name data, they were not very diligent about arranging it in a
#' convenient or consistent format.
#'
#' Exercise 0
#' ----------
#' Our mission is to extract and graph the **top 100** boys names in England and Wales for every year since 1996. There are several things that make this challenging.
#'
#' 1. Locate the file named `1996boys_tcm77-254026.xlsx` and open it in
#' a spreadsheet. (If you don't have a spreadsheet program installed on
#' your computer you can downloads one from
#' https://www.libreoffice.org/download/download/). What issues can you
#' identify that might make working with these data more difficult?
#'
#' 2. Locate the file named `2015boysnamesfinal.xlsx` and open it in a
#' spreadsheet. In what ways is the format different than the format
#' of `1996boys_tcm77-254026.xlsx`? How might these differences make
#' it more difficult to work with these data?
#'
#' Exercise 0 Prototype
#' -------------------------------
#'
#' > 1. Locate the file named `1996boys_tcm77-254026.xlsx` and open it in
#' > a spreadsheet. (If you don't have a spreadsheet program installed on
#' > your computer you can downloads one from
#' > https://www.libreoffice.org/download/download/). What issues can you
#' > identify that might make working with these data more difficult?
#'
#' The data does not start on row one. Headers are on row 7, followed by
#' a blank line, followed by the actual data.
#'
#' The data is stored in an inconvenient way, with ranks 1-50 in the
#' first set of columns and ranks 51-100 in a separate set of columns.
#'
#' There are notes below the data.
#'
#' > 3. Locate the file named `2015boysnamesfinal.xlsx` and open it in a
#' > spreadsheet. In what ways is the format different than the format
#' > of `1996boys_tcm77-254026.xlsx`? How might these differences make
#' > it more difficult to work with these data?
#'
#' The worksheet containing the data of interest is in different
#' positions and has different names from one year to the next. However,
#' it always includes "Table 1" in the worksheet name.
#'
#' Some years include columns for "changes in rank", others do not.
#'
#' These differences will make it more difficult to automate
#' re-arranging the data since we have to write code that can handle
#' different input formats.
#'
#' Useful data manipulation packages
#' =================================
#'
#' As you can see, the data is in quite a messy state. Note that this is
#' not a contrived example; this is exactly the way the data came to us
#' from the UK government website! Let's start cleaning and organizing
#' it. The `tidyverse` suite of packages provides many modern
#' conveniences that will make this job easier.
library(tidyverse)
#' Working with Excel worksheets
#' =============================
#'
#' Each Excel file contains a worksheet with the baby names data we want.
#' Each file also contains additional supplemental worksheets that we are
#' not currently interested in. As noted above, the worksheet of interest
#' differs from year to year, but always has "Table 1" in the sheet name.
#'
#' The first step is to get a vector of file names.
boy.file.names <- list.files("data/boys", full.names = TRUE)
#' Now that we've told R the names of the data files we can start working
#' with them. For example, the first file is
boy.file.names[[1]]
#' and we can use the `excel_sheets` function from the *readxl* package
#' to list the worksheet names from this file.
library(readxl)
excel_sheets(boy.file.names[[1]])
#' Iterating over file names with `map`
#' -----------------------------------
#'
#' Now that we know how to retrieve the names of the worksheets in an
#' Excel file we could start writing code to extract the sheet names from
#' each file, e.g.,
excel_sheets(boy.file.names[[1]])
excel_sheets(boy.file.names[[2]])
## ...
excel_sheets(boy.file.names[[20]])
#' This is not a terrible idea for a small number of files, but it is
#' more convenient to let R do the iteration for us. We could use a `for`
#' loop, or `sapply`, but the `map` family of functions from the *purrr*
#' package gives us a more consistent alternative, so we'll use that.
library(purrr)
map(boy.file.names, excel_sheets)
#' Filtering strings using regular expressions
#' --------------------------------------------
#'
#' In order extract the correct worksheet names we need a way to extract
#' strings containing "Table 1". Base R provides some string manipulation
#' capabilities (see `?regex`, `?sub` and `?grep`), but we will use the
#' *stringr* package because it is more user-friendly.
#'
#' The *stringr* package provides functions to *detect*, *locate*,
#' *extract*, *match*, *replace*, *combine* and *split* strings (among
#' other things).
#'
#' Here we want to detect the pattern "Table 1", and only
#' return elements with this pattern. We can do that using the
#' `str_subset` function. The first argument to `str_subset` is character
#' vector we want to search in. The second argument is a *regular
#' expression* matching the pattern we want to retain.
#'
#' If you are not familiar with regular expressions, <http://www.regexr.com/> is a
#' good place to start.
#'
#' Now that we know how to filter character vectors using `str_subset` we can
#' identify the correct sheet in a particular Excel file. For example,
library(stringr)
str_subset(excel_sheets(boy.file.names[[1]]), "Table 1")
#' Writing your own functions
#' --------------------------
#'
#' The `map*` functions are useful when you want to apply a function to a
#' list or vector of inputs and obtain the return values. This is very
#' convenient when a function already exists that does exactly what you
#' want. In the examples above we mapped the `excel_sheets` function to
#' the elements of a vector containing file names. But now there is no
#' function that both retrieves worksheet names and subsets them.
#' Fortunately, writing functions in R is easy.
get.data.sheet.name <- function(file, pattern) {
str_subset(excel_sheets(file), pattern)
}
#' Now we can map this new function over our vector of file names.
map(boy.file.names,
get.data.sheet.name,
pattern = "Table 1")
#' Reading Excel data files
#' ========================
#'
#' Now that we know the correct worksheet from each file we can actually
#' read those data into R. We can do that using the `read_excel`
#' function.
#'
#' We'll start by reading the data from the first file, just to check
#' that it works. Recall that the actual data starts on row 7, so we want
#' to skip the first 6 rows.
tmp <- read_excel(
boy.file.names[1],
sheet = get.data.sheet.name(boy.file.names[1],
pattern = "Table 1"),
skip = 6)
library(dplyr, quietly=TRUE)
glimpse(tmp)
#' Exercise 1
#' -----------
#'
#' 1. Write a function that takes a file name as an argument and reads
#' the worksheet containing "Table 1" from that file. Don't forget
#' to skip the first 6 rows.
#'
#' 2. Test your function by using it to read *one* of the boys names
#' Excel files.
#'
#' 3. Use the `map` function to read data from all the Excel files,
#' using the function you wrote in step 1.
#'
#'
#' Exercise 1 solution
#' ---------------------------
## 1. Write a function that takes a file name as an argument and reads
## the worksheet containing "Table 1" from that file.
read.baby.names <- function(file) {
sheet.name <- str_subset(excel_sheets(file), "Table 1")
read_excel(file, sheet = sheet.name, skip = 6)
}
## 2. Test your function by using it to read *one* of the boys names
## Excel files.
glimpse(read.baby.names(boy.file.names[1]))
## 3. Use the `map` function to read data from all the Excel files,
## using the function you wrote in step 1.
boysNames <- map(boy.file.names, read.baby.names)
#' Data cleanup
#' ================
#'
#' Now that we've read in the data we still have some cleanup to do.
#' Specifically, we need to:
#'
#' 1. fix column names
#' 2. get rid of blank row and the top and the notes at the bottom
#' 3. get rid of extraneous "changes in rank" columns if they exist
#' 4. transform the side-by-side tables layout to a single table.
#'
#' In short, we want to go from this:
#'
#' 
#'
#' to this:
#'
#' 
#'
#' There are many ways to do this kind of data manipulation in R. We're
#' going to use the *dplyr* and *tidyr* packages to make our lives
#' easier. (Both packages were installed as dependencies of the
#' *tidyverse* package.)
#'
#' Selecting columns
#' ---------------
#'
#' Next we want to retain just the `Name`, `Name__1` and `Count`,
#' `Count__1` columns. We can do that using the `select` function:
boysNames[[1]]
boysNames[[1]] <- select(boysNames[[1]], Name, Name__1, Count, Count__1)
boysNames[[1]]
#' Dropping missing values
#' -----------------------
#'
#' Next we want to remove blank rows and rows used for notes. An easy way
#' to do that is to use `drop_na` to remove rows with missing values.
boysNames[[1]]
boysNames[[1]] <- drop_na(boysNames[[1]])
boysNames[[1]]
#' Finally, we will want to filter out missing do this for all the elements in `boysNames`, a
#' task I leave to you.
#'
#' Exercise 2
#' -----------
#'
#' 1. Write a function that takes a `data.frame` as an argument and
#' returns a modified version including only columns named "Name",
#' "Name\_\_1", "Count", or "Count\_\_1".
#'
#' 2. Test your function by using it to read *one* of the boys names
#' Excel files.
#'
#' 3. Use the `map` function to read data from all the Excel files,
#' using the function you wrote in step 1.
#'
#'
#' Exercise 2 solution<span class="tag" data-tag-name="prototype"></span>
#'
#'
#'
#'
#' Re-arranging into a single table
#' -----------------------------------------
#'
#' Our final task is to re-arrange to data so that it is all in a single
#' table instead of in two side-by-side tables. For many similar tasks
#' the `gather` function in the *tidyr* package is useful, but in this
#' case we will be better off using a combination of `select` and
#' `bind_rows`.
boysNames[[1]]
bind_rows(select(boysNames[[1]], Name, Count),
select(boysNames[[1]], Name = Name__1, Count = Count__1))
#' Exercise 3: Cleanup all the data
#' ------------------------------
#'
#' In the previous examples we learned how to drop empty rows with
#' `filter`, select only relevant columns with `select`, and re-arrange
#' our data with `select` and `bind_rows`. In each case we applied the
#' changes only to the first element of our `boysNames` list.
#'
#' Your task now is to use the `map` function to apply each of these
#' transformations to all the elements in `boysNames`.
#'
#' Exercise prototype
#' ------------------
#' There are different ways you can go about it. Here is one:
#'
## write a function that does all the cleanup
cleanupNamesData <- function(x) {
filtered <- filter(x, !is.na(Name)) # drop rows with no Name value
selected <- select(filtered, Name, Count, Name__1, Count__1) # select just Name and Count columns
bind_rows(select(selected, Name, Count), # re-arrange into two columns
select(selected, Name = Name__1, Count = Count__1))
}
## test it out on the second data.frame in the list
glimpse(boysNames[[2]]) # before cleanup
glimpse(cleanupNamesData(boysNames[[2]])) # after cleanup
## apply the cleanup function to all the data.frames in the list
boysNames <- map(boysNames, cleanupNamesData)
#' Data organization and storage
#' ========================
#' Now that we have the data cleaned up and augmented, we can turn our attention to organizing and storing the data.
#'
#' One table for each year
#' ----------------------
#' Right now we have a list of tables, one for each year. This is not a bad way to go. It has the advantage of making it easy to work with individual years; it has the disadvantage of making it more difficult to examine questions that require data from multiple years. To make the arrangement of the data clearer it helps to name each element of the list with the year it corresponds too.
glimpse(head(boysNames))
years <- str_extract(boy.file.names, "[0-9]{4}")
boysNames <- setNames(boysNames, years)
glimpse(head(boysNames))
#' One big table
#' -------------
#'
#' While storing the data in separate tables by year makes some sense,
#' many operations will be easier if the data is simply stored in one big
#' table. We've already seen how to turn a list of data.frames into a
#' single data.frame using `bind_rows`, but there is a problem; The year
#' information is stored in the names of the list elements, and so
#' flattening the tables into one will result in losing the year
#' information! Fortunately it is not too much trouble to add the year
#' information to each table before flattening.
boysNames <- imap(boysNames,
function(data, name) {
mutate(data, Year = as.integer(name))
})
boysNames <- bind_rows(boysNames)
glimpse(boysNames)
#' Exercise: Make one big table
#' -------------------------
#'
#' Turn the list of boys names data.frames into a single table.
#'
#' Create a directory under `data/all` and write the data to a `.csv`
#' file.
#'
#'
#' Finally, repeat the previous exercise, this time working with the data
#' in one big table.
#'
#' Exercise prototype
#' ------------------
#'
#' Working with the data in one big table is often easier.
boysNames <- bind_rows(boysNames)
dir.create("data/all")
write_csv(boysNames, "data/all/boys_names.csv")
## What where the five most popular names in 2013?
slice(arrange(filter(boysNames, Year == 2013),
desc(Count)),
1:5)
## How has the popularity of the name "ANDREW" changed over time?
andrew <- filter(boysNames, Name == "ANDREW")
ggplot(andrew, aes(x = Year, y = Count)) +
geom_line() +
ggtitle("Popularity of \"Andrew\", over time")
#' Additional reading and resources
#' ================================
#'
#' - Learn from the best: <http://adv-r.had.co.nz/>;
#' <http://r4ds.had.co.nz/>
#' - R documentation: <http://cran.r-project.org/manuals.html>
#' - Collection of R tutorials:
#' <http://cran.r-project.org/other-docs.html>
#'
#' - R for Programmers (by Norman Matloff, UC--Davis)
#'
#' <http://heather.cs.ucdavis.edu/~matloff/R/RProg.pdf>
#'
#' - Calling C and Fortran from R (by Charles Geyer, UMinn)
#'
#' <http://www.stat.umn.edu/~charlie/rc/>
#'
#' - State of the Art in Parallel Computing with R (Schmidberger et al.)
#'
#' <http://www.jstatso>|.org/v31/i01/paper
#'
#' - Institute for Quantitative Social Science: <http://iq.harvard.edu>
#' - IQSS Data Science Services: <http://dss.iq.harvard.edu/>
|
/R/RDataWrangling/RDataWrangling.R
|
permissive
|
hbs-rcs/workshops
|
R
| false
| false
| 19,114
|
r
|
#' ---
#' jupyter:
#' jupytext_format_version: '1.0'
#' jupytext_formats: ipynb,Rmd,R:spin
#' kernelspec:
#' display_name: R
#' language: R
#' name: ir
#' language_info:
#' codemirror_mode: r
#' file_extension: .r
#' mimetype: text/x-r-source
#' name: R
#' pygments_lexer: r
#' version: 3.5.1
#' toc:
#' base_numbering: 1
#' nav_menu: {}
#' number_sections: true
#' sideBar: true
#' skip_h1_title: false
#' title_cell: Table of Contents
#' title_sidebar: Contents
#' toc_cell: false
#' toc_position: {}
#' toc_section_display: true
#' toc_window_display: false
#' ---
#' ---
#' title: "R Data Wrangling"
#' output:
#' html_document:
#' highlight: tango
#' toc: true
#' toc_float:
#' collapsed: true
#' ---
#'
#'
#'
#' Workshop description
#' ====================
#'
#' Data scientists are known and celebrated for modeling and visually
#' displaying information, but down in the data science engine room there
#' is a lot of less glamorous work to be done. Before data can be used
#' effectively it must often be cleaned, corrected, and reformatted. This
#' workshop introduces the basic tools needed to make your data behave,
#' including data reshaping, regular expressions and other text
#' manipulation tools.
#'
#' This is an intermediate/advanced R course appropriate for those with
#' basic knowledge of R. If you need a refresher we recommend the
#' [the IQSS R intro](http://tutorials.iq.harvard.edu/R/Rintro/Rintro.html).
#'
#' The lesson notes are available at
#' <http://tutorials.iq.harvard.edu/R/RDataManagement/RDataManagement.html>
#'
#' Prerequisites and Preparation
#' =============================
#'
#' Prior to the workshop you should:
#'
#' - install R from <https://cran.r-project.org/>
#' - install RStudio from <https://www.rstudio.com/products/rstudio/download/#download>
#' - install the tidyverse package in R with `install.packages("tidyverse")` and attach it with `library(tidyverse)`
#'
#' To start the workshop:
#' - Download and extract the materials from <http://tutorials.iq.harvard.edu/R/RDataManagement.zip>.
#' - Open Rstudio and create a new project (`File ==> New Project ==> Existing Directory` and select the `RDataManagement` folder you downloaded and extracted earlier.
#' - Open a new R script (`File ==> New File ==> R script`) and save it as `Notes.R`.
#'
#'
#' Example project overview
#' ========================
#'
#' It is common for data to be made available on a website somewhere, either by a
#' government agency, research group, or other organizations and entities. Often
#' the data you want is spread over many files, and retrieving it all one file at a
#' time is tedious and time consuming. Such is the case with the baby names data we
#' will be using today.
#'
#' The UK [Office for National Statistics](https://www.ons.gov.uk) provides yearly
#' data on the most popular baby names going back to 1996. The data is provided
#' separately for boys and girls and is stored in Excel spreadsheets.
#'
#' I have downloaded all the excel files containing boys names data from
#' https://www.ons.gov.uk/peoplepopulationandcommunity/birthsdeathsandmarriages/livebirths/datasets/babynamesenglandandwalesbabynamesstatisticsboys
#' and made them available at
#' http://tutorials.iq.harvard.edu/R/RDataManagement/data/boysNames.zip.
#'
#' Our mission is to extract and graph the **top 100** boys names in England
#' and Wales for every year since 1996. There are several things that
#' make this challenging.
#'
#' Problems with the data
#' ======================
#'
#' While it was good of the UK Office for National Statistics to provide
#' baby name data, they were not very diligent about arranging it in a
#' convenient or consistent format.
#'
#' Exercise 0
#' ----------
#' Our mission is to extract and graph the **top 100** boys names in England and Wales for every year since 1996. There are several things that make this challenging.
#'
#' 1. Locate the file named `1996boys_tcm77-254026.xlsx` and open it in
#' a spreadsheet. (If you don't have a spreadsheet program installed on
#' your computer you can downloads one from
#' https://www.libreoffice.org/download/download/). What issues can you
#' identify that might make working with these data more difficult?
#'
#' 2. Locate the file named `2015boysnamesfinal.xlsx` and open it in a
#' spreadsheet. In what ways is the format different than the format
#' of `1996boys_tcm77-254026.xlsx`? How might these differences make
#' it more difficult to work with these data?
#'
#' Exercise 0 Prototype
#' -------------------------------
#'
#' > 1. Locate the file named `1996boys_tcm77-254026.xlsx` and open it in
#' > a spreadsheet. (If you don't have a spreadsheet program installed on
#' > your computer you can downloads one from
#' > https://www.libreoffice.org/download/download/). What issues can you
#' > identify that might make working with these data more difficult?
#'
#' The data does not start on row one. Headers are on row 7, followed by
#' a blank line, followed by the actual data.
#'
#' The data is stored in an inconvenient way, with ranks 1-50 in the
#' first set of columns and ranks 51-100 in a separate set of columns.
#'
#' There are notes below the data.
#'
#' > 3. Locate the file named `2015boysnamesfinal.xlsx` and open it in a
#' > spreadsheet. In what ways is the format different than the format
#' > of `1996boys_tcm77-254026.xlsx`? How might these differences make
#' > it more difficult to work with these data?
#'
#' The worksheet containing the data of interest is in different
#' positions and has different names from one year to the next. However,
#' it always includes "Table 1" in the worksheet name.
#'
#' Some years include columns for "changes in rank", others do not.
#'
#' These differences will make it more difficult to automate
#' re-arranging the data since we have to write code that can handle
#' different input formats.
#'
#' Useful data manipulation packages
#' =================================
#'
#' As you can see, the data is in quite a messy state. Note that this is
#' not a contrived example; this is exactly the way the data came to us
#' from the UK government website! Let's start cleaning and organizing
#' it. The `tidyverse` suite of packages provides many modern
#' conveniences that will make this job easier.
library(tidyverse)
#' Working with Excel worksheets
#' =============================
#'
#' Each Excel file contains a worksheet with the baby names data we want.
#' Each file also contains additional supplemental worksheets that we are
#' not currently interested in. As noted above, the worksheet of interest
#' differs from year to year, but always has "Table 1" in the sheet name.
#'
#' The first step is to get a vector of file names.
boy.file.names <- list.files("data/boys", full.names = TRUE)
#' Now that we've told R the names of the data files we can start working
#' with them. For example, the first file is
boy.file.names[[1]]
#' and we can use the `excel_sheets` function from the *readxl* package
#' to list the worksheet names from this file.
library(readxl)
excel_sheets(boy.file.names[[1]])
#' Iterating over file names with `map`
#' -----------------------------------
#'
#' Now that we know how to retrieve the names of the worksheets in an
#' Excel file we could start writing code to extract the sheet names from
#' each file, e.g.,
excel_sheets(boy.file.names[[1]])
excel_sheets(boy.file.names[[2]])
## ...
excel_sheets(boy.file.names[[20]])
#' This is not a terrible idea for a small number of files, but it is
#' more convenient to let R do the iteration for us. We could use a `for`
#' loop, or `sapply`, but the `map` family of functions from the *purrr*
#' package gives us a more consistent alternative, so we'll use that.
library(purrr)
map(boy.file.names, excel_sheets)
#' Filtering strings using regular expressions
#' --------------------------------------------
#'
#' In order extract the correct worksheet names we need a way to extract
#' strings containing "Table 1". Base R provides some string manipulation
#' capabilities (see `?regex`, `?sub` and `?grep`), but we will use the
#' *stringr* package because it is more user-friendly.
#'
#' The *stringr* package provides functions to *detect*, *locate*,
#' *extract*, *match*, *replace*, *combine* and *split* strings (among
#' other things).
#'
#' Here we want to detect the pattern "Table 1", and only
#' return elements with this pattern. We can do that using the
#' `str_subset` function. The first argument to `str_subset` is character
#' vector we want to search in. The second argument is a *regular
#' expression* matching the pattern we want to retain.
#'
#' If you are not familiar with regular expressions, <http://www.regexr.com/> is a
#' good place to start.
#'
#' Now that we know how to filter character vectors using `str_subset` we can
#' identify the correct sheet in a particular Excel file. For example,
library(stringr)
str_subset(excel_sheets(boy.file.names[[1]]), "Table 1")
#' Writing your own functions
#' --------------------------
#'
#' The `map*` functions are useful when you want to apply a function to a
#' list or vector of inputs and obtain the return values. This is very
#' convenient when a function already exists that does exactly what you
#' want. In the examples above we mapped the `excel_sheets` function to
#' the elements of a vector containing file names. But now there is no
#' function that both retrieves worksheet names and subsets them.
#' Fortunately, writing functions in R is easy.
get.data.sheet.name <- function(file, pattern) {
str_subset(excel_sheets(file), pattern)
}
#' Now we can map this new function over our vector of file names.
map(boy.file.names,
get.data.sheet.name,
pattern = "Table 1")
#' Reading Excel data files
#' ========================
#'
#' Now that we know the correct worksheet from each file we can actually
#' read those data into R. We can do that using the `read_excel`
#' function.
#'
#' We'll start by reading the data from the first file, just to check
#' that it works. Recall that the actual data starts on row 7, so we want
#' to skip the first 6 rows.
tmp <- read_excel(
boy.file.names[1],
sheet = get.data.sheet.name(boy.file.names[1],
pattern = "Table 1"),
skip = 6)
library(dplyr, quietly=TRUE)
glimpse(tmp)
#' Exercise 1
#' -----------
#'
#' 1. Write a function that takes a file name as an argument and reads
#' the worksheet containing "Table 1" from that file. Don't forget
#' to skip the first 6 rows.
#'
#' 2. Test your function by using it to read *one* of the boys names
#' Excel files.
#'
#' 3. Use the `map` function to read data from all the Excel files,
#' using the function you wrote in step 1.
#'
#'
#' Exercise 1 solution
#' ---------------------------
## 1. Write a function that takes a file name as an argument and reads
## the worksheet containing "Table 1" from that file.
read.baby.names <- function(file) {
sheet.name <- str_subset(excel_sheets(file), "Table 1")
read_excel(file, sheet = sheet.name, skip = 6)
}
## 2. Test your function by using it to read *one* of the boys names
## Excel files.
glimpse(read.baby.names(boy.file.names[1]))
## 3. Use the `map` function to read data from all the Excel files,
## using the function you wrote in step 1.
boysNames <- map(boy.file.names, read.baby.names)
#' Data cleanup
#' ================
#'
#' Now that we've read in the data we still have some cleanup to do.
#' Specifically, we need to:
#'
#' 1. fix column names
#' 2. get rid of blank row and the top and the notes at the bottom
#' 3. get rid of extraneous "changes in rank" columns if they exist
#' 4. transform the side-by-side tables layout to a single table.
#'
#' In short, we want to go from this:
#'
#' 
#'
#' to this:
#'
#' 
#'
#' There are many ways to do this kind of data manipulation in R. We're
#' going to use the *dplyr* and *tidyr* packages to make our lives
#' easier. (Both packages were installed as dependencies of the
#' *tidyverse* package.)
#'
#' Selecting columns
#' ---------------
#'
#' Next we want to retain just the `Name`, `Name__1` and `Count`,
#' `Count__1` columns. We can do that using the `select` function:
boysNames[[1]]
boysNames[[1]] <- select(boysNames[[1]], Name, Name__1, Count, Count__1)
boysNames[[1]]
#' Dropping missing values
#' -----------------------
#'
#' Next we want to remove blank rows and rows used for notes. An easy way
#' to do that is to use `drop_na` to remove rows with missing values.
boysNames[[1]]
boysNames[[1]] <- drop_na(boysNames[[1]])
boysNames[[1]]
#' Finally, we will want to filter out missing do this for all the elements in `boysNames`, a
#' task I leave to you.
#'
#' Exercise 2
#' -----------
#'
#' 1. Write a function that takes a `data.frame` as an argument and
#' returns a modified version including only columns named "Name",
#' "Name\_\_1", "Count", or "Count\_\_1".
#'
#' 2. Test your function by using it to read *one* of the boys names
#' Excel files.
#'
#' 3. Use the `map` function to read data from all the Excel files,
#' using the function you wrote in step 1.
#'
#'
#' Exercise 2 solution<span class="tag" data-tag-name="prototype"></span>
#'
#'
#'
#'
#' Re-arranging into a single table
#' -----------------------------------------
#'
#' Our final task is to re-arrange to data so that it is all in a single
#' table instead of in two side-by-side tables. For many similar tasks
#' the `gather` function in the *tidyr* package is useful, but in this
#' case we will be better off using a combination of `select` and
#' `bind_rows`.
boysNames[[1]]
bind_rows(select(boysNames[[1]], Name, Count),
select(boysNames[[1]], Name = Name__1, Count = Count__1))
#' Exercise 3: Cleanup all the data
#' ------------------------------
#'
#' In the previous examples we learned how to drop empty rows with
#' `filter`, select only relevant columns with `select`, and re-arrange
#' our data with `select` and `bind_rows`. In each case we applied the
#' changes only to the first element of our `boysNames` list.
#'
#' Your task now is to use the `map` function to apply each of these
#' transformations to all the elements in `boysNames`.
#'
#' Exercise prototype
#' ------------------
#' There are different ways you can go about it. Here is one:
#'
## write a function that does all the cleanup
cleanupNamesData <- function(x) {
filtered <- filter(x, !is.na(Name)) # drop rows with no Name value
selected <- select(filtered, Name, Count, Name__1, Count__1) # select just Name and Count columns
bind_rows(select(selected, Name, Count), # re-arrange into two columns
select(selected, Name = Name__1, Count = Count__1))
}
## test it out on the second data.frame in the list
glimpse(boysNames[[2]]) # before cleanup
glimpse(cleanupNamesData(boysNames[[2]])) # after cleanup
## apply the cleanup function to all the data.frames in the list
boysNames <- map(boysNames, cleanupNamesData)
#' Data organization and storage
#' ========================
#' Now that we have the data cleaned up and augmented, we can turn our attention to organizing and storing the data.
#'
#' One table for each year
#' ----------------------
#' Right now we have a list of tables, one for each year. This is not a bad way to go. It has the advantage of making it easy to work with individual years; it has the disadvantage of making it more difficult to examine questions that require data from multiple years. To make the arrangement of the data clearer it helps to name each element of the list with the year it corresponds too.
glimpse(head(boysNames))
years <- str_extract(boy.file.names, "[0-9]{4}")
boysNames <- setNames(boysNames, years)
glimpse(head(boysNames))
#' One big table
#' -------------
#'
#' While storing the data in separate tables by year makes some sense,
#' many operations will be easier if the data is simply stored in one big
#' table. We've already seen how to turn a list of data.frames into a
#' single data.frame using `bind_rows`, but there is a problem; The year
#' information is stored in the names of the list elements, and so
#' flattening the tables into one will result in losing the year
#' information! Fortunately it is not too much trouble to add the year
#' information to each table before flattening.
boysNames <- imap(boysNames,
function(data, name) {
mutate(data, Year = as.integer(name))
})
boysNames <- bind_rows(boysNames)
glimpse(boysNames)
#' Exercise: Make one big table
#' -------------------------
#'
#' Turn the list of boys names data.frames into a single table.
#'
#' Create a directory under `data/all` and write the data to a `.csv`
#' file.
#'
#'
#' Finally, repeat the previous exercise, this time working with the data
#' in one big table.
#'
#' Exercise prototype
#' ------------------
#'
#' Working with the data in one big table is often easier.
boysNames <- bind_rows(boysNames)
dir.create("data/all")
write_csv(boysNames, "data/all/boys_names.csv")
## What where the five most popular names in 2013?
slice(arrange(filter(boysNames, Year == 2013),
desc(Count)),
1:5)
## How has the popularity of the name "ANDREW" changed over time?
andrew <- filter(boysNames, Name == "ANDREW")
ggplot(andrew, aes(x = Year, y = Count)) +
geom_line() +
ggtitle("Popularity of \"Andrew\", over time")
#' Additional reading and resources
#' ================================
#'
#' - Learn from the best: <http://adv-r.had.co.nz/>;
#' <http://r4ds.had.co.nz/>
#' - R documentation: <http://cran.r-project.org/manuals.html>
#' - Collection of R tutorials:
#' <http://cran.r-project.org/other-docs.html>
#'
#' - R for Programmers (by Norman Matloff, UC--Davis)
#'
#' <http://heather.cs.ucdavis.edu/~matloff/R/RProg.pdf>
#'
#' - Calling C and Fortran from R (by Charles Geyer, UMinn)
#'
#' <http://www.stat.umn.edu/~charlie/rc/>
#'
#' - State of the Art in Parallel Computing with R (Schmidberger et al.)
#'
#' <http://www.jstatso>|.org/v31/i01/paper
#'
#' - Institute for Quantitative Social Science: <http://iq.harvard.edu>
#' - IQSS Data Science Services: <http://dss.iq.harvard.edu/>
|
pValue<-function(temp){
tempAOV <- aov(Counts~Diets, data=temp)
testP<-summary(tempAOV)[[1]][["Pr(>F)"]][[1]]
}
filenames <- list.files(pattern="21_samples-HQ-LQ")
for (f in filenames) {
fd<-read.table(f, header=TRUE)
result<-pValue(fd)
rest<-c(f,":",result)
print(rest)
}
|
/pValueFinder.R
|
no_license
|
zunjuu/pValueFinder
|
R
| false
| false
| 287
|
r
|
pValue<-function(temp){
tempAOV <- aov(Counts~Diets, data=temp)
testP<-summary(tempAOV)[[1]][["Pr(>F)"]][[1]]
}
filenames <- list.files(pattern="21_samples-HQ-LQ")
for (f in filenames) {
fd<-read.table(f, header=TRUE)
result<-pValue(fd)
rest<-c(f,":",result)
print(rest)
}
|
source('utils_base.R')
# own
sprintm = function(x, fmt, ...) sprintf(fmt, x, ...)
# filter blank values out
compact = partial(Filter, Negate(is.null))
# test
generate_data = function(base_name, end) {
1:end %>%
sprintm("%03s") %>%
pre0(base_name)
}
read_data_naming = function( from = '', test = F ) { read.file('data_naming', from = from, test = test) }
write_data_naming = function( df, from = '', test = F ) { write.file(df, 'data_naming', from = from, test = test) }
|
/utils.R
|
no_license
|
expomini/data_analysis_presentations
|
R
| false
| false
| 479
|
r
|
source('utils_base.R')
# own
sprintm = function(x, fmt, ...) sprintf(fmt, x, ...)
# filter blank values out
compact = partial(Filter, Negate(is.null))
# test
generate_data = function(base_name, end) {
1:end %>%
sprintm("%03s") %>%
pre0(base_name)
}
read_data_naming = function( from = '', test = F ) { read.file('data_naming', from = from, test = test) }
write_data_naming = function( df, from = '', test = F ) { write.file(df, 'data_naming', from = from, test = test) }
|
#agreement and correlation of pat vs patmat
cor(pat.man$Effect,patmat.man$Effect)#0.97
plot(pat.man$Effect,patmat.man$Effect,main="Correlation between paternal BMI effect estimates \nobtained before and after adjustment for maternal BMI: r=0.97",xlab="Unadjusted",ylab="Adjusted")
abline(0,1,col=wes_palette("Zissou1")[1])
summary(abs(abs(patmat.man$Effect*100 - pat.man$Effect*100)/(pat.man$Effect*100)))
#comparing pat vs mat effect sizes
summary(abs(list.of.results$covs.patmat$Effect)< abs(list.of.results$covs.matpat$Effect))
summary(abs(list.of.results$covs.pat$Effect)< abs(list.of.results$covs.mat$Effect))
#Pat vs Mat manhattan plots
require(meffil)
require(wesanderson)
desat <- function(cols, sat=0.5) {
X <- diag(c(1, sat, 1)) %*% rgb2hsv(col2rgb(cols))
hsv(X[1,], X[2,], X[3,])
}
annotation <- meffil.get.features("450k")
source("~/EWAS/pat_bmi/manhattan_plot_function.r")
pat.man <- merge(list.of.results$covs.pat,annotation,by.x="MarkerName",by.y="name",all=F)
mat.man <- merge(list.of.results$covs.mat,annotation,by.x="MarkerName",by.y="name",all=F)
pat.man$CHR <- as.numeric(unlist(lapply(pat.man$chromosome,substring,first=4)))
mat.man$CHR <- as.numeric(unlist(lapply(mat.man$chromosome,substring,first=4)))
patmat.man <- merge(list.of.results$covs.patmat,annotation,by.x="MarkerName",by.y="name",all=F)
matpat.man <- merge(list.of.results$covs.matpat,annotation,by.x="MarkerName",by.y="name",all=F)
patmat.man$CHR <- as.numeric(unlist(lapply(patmat.man$chromosome,substring,first=4)))
matpat.man$CHR <- as.numeric(unlist(lapply(matpat.man$chromosome,substring,first=4)))
pat.man$abs.Effect <-abs(pat.man$Effect)*100
mat.man$abs.Effect <-abs(mat.man$Effect)*100
patmat.man$abs.Effect <-abs(patmat.man$Effect)*100
matpat.man$abs.Effect <-abs(matpat.man$Effect)*100
png("pat.manhattan.effect.png",width=1500,height=500)
manhattan (pat.man, chr = "CHR", bp = "position", p = "abs.Effect", snp = "MarkerName", col = c(wes_palette("Zissou1")[1],
desat(wes_palette("Zissou1")[1])), chrlabs = NULL, suggestiveline = NULL,
genomewideline = NULL,ylim_max=max(c(pat.man$abs.Effect,mat.man$abs.Effect,matpat.man$abs.Effect,patmat.man$abs.Effect))+0.2,logp=F)
dev.off()
png("mat.manhattan.effect.png",width=1500,height=500)
manhattan (mat.man, chr = "CHR", bp = "position", p = "abs.Effect", snp = "MarkerName", col = c(wes_palette("Zissou1")[5],
desat(wes_palette("Zissou1")[5])), chrlabs = NULL, suggestiveline = NULL,
genomewideline = NULL,ylim_max=max(c(pat.man$abs.Effect,mat.man$abs.Effect,matpat.man$abs.Effect,patmat.man$abs.Effect))+0.2,logp=F)
dev.off()
png("patmat.manhattan.effect.png",width=1500,height=500)
manhattan (patmat.man, chr = "CHR", bp = "position", p = "abs.Effect", snp = "MarkerName", col = c(wes_palette("Zissou1")[1],
desat(wes_palette("Zissou1")[1])), chrlabs = NULL, suggestiveline = NULL,
genomewideline = NULL,ylim_max=max(c(pat.man$abs.Effect,mat.man$abs.Effect,matpat.man$abs.Effect,patmat.man$abs.Effect))+0.2,logp=F)
dev.off()
png("matpat.manhattan.effect.png",width=1500,height=500)
manhattan (matpat.man, chr = "CHR", bp = "position", p = "abs.Effect", snp = "MarkerName", col = c(wes_palette("Zissou1")[5],
desat(wes_palette("Zissou1")[5])), chrlabs = NULL, suggestiveline = NULL,
genomewideline = NULL,ylim_max=max(c(pat.man$abs.Effect,mat.man$abs.Effect,matpat.man$abs.Effect,patmat.man$abs.Effect))+0.2,logp=F)
dev.off()
#mat vs pat meta-analysis
fixed.effects.meta.analysis <- function(data){
require(metafor)
res = split(data, f=data$MarkerName)
res = lapply(res, function(x) rma.uni(slab=x$study,yi=x$Effect,sei=x$StdErr,method="FE",weighted=TRUE))
res
}
CpGs <- list.of.results$covs.pat[which(list.of.results$covs.pat$Pvalue<1e-5),"MarkerName"]
PatMatComparison<-rbind(list.of.results$covs.pat[which(list.of.results$covs.pat$MarkerName %in% CpGs),],
list.of.results$covs.patmat[which(list.of.results$covs.patmat$MarkerName %in% CpGs),],
list.of.results$covs.mat[which(list.of.results$covs.mat$MarkerName %in% CpGs),],
list.of.results$covs.matpat[which(list.of.results$covs.matpat$MarkerName %in% CpGs),])
PatMatComparison$Model <- c(rep("Paternal",length(CpGs)),rep("Paternal adjusted for maternal",length(CpGs)),
rep("Maternal",length(CpGs)),rep("Maternal adjusted for paternal",length(CpGs)))
results.patmat.adj<-fixed.effects.meta.analysis(data=PatMatComparison[which(PatMatComparison$Model %in% c("Paternal adjusted for maternal","Maternal adjusted for paternal")),])
results.patmat<-ldply(lapply(results.patmat,function(x) unlist(c(x[c("QE","QEp","I2")]))))
results.patmat<-fixed.effects.meta.analysis(data=PatMatComparison[which(PatMatComparison$Model %in% c("Paternal","Maternal")),])
results.patmat.adj<-ldply(lapply(results.patmat.adj,function(x) unlist(c(x[c("QE","QEp","I2")]))))
write.csv(results.patmat.adj,"matpatvspatmat.metaanalysis.birth.csv")
PatMatComparison$colour <-wes_palette("Zissou1")[1]
PatMatComparison$colour[which(PatMatComparison$Model %in% c("Maternal","Maternal adjusted for paternal"))]<-wes_palette("Zissou1")[5]
PatMatComparison$ci.lb<-PatMatComparison$Effect - (1.96* PatMatComparison$StdErr)
PatMatComparison$ci.ub<-PatMatComparison$Effect + (1.96* PatMatComparison$StdErr)
PatMatComparison$Model<-factor(PatMatComparison$Model,levels=c("Paternal","Paternal adjusted for maternal","Maternal","Maternal adjusted for paternal"),ordered=TRUE)
Order<-list.of.results$covs.patmat[which(list.of.results$covs.patmat$MarkerName %in% CpGs),]
Order<-Order[order(Order$Effect),]
PatMatComparison$MarkerName<-factor(PatMatComparison$MarkerName,levels=Order$MarkerName,ordered=TRUE)
PatMatComparison<-merge(PatMatComparison,annotation,by.x="MarkerName",by.y="name",all=FALSE)
new.annotations<-fread("/panfs/panasas01/sscm/gs8094/Common_files/enhanced_annotations.txt",stringsAsFactors=FALSE)
PatMatComparison<-merge(PatMatComparison,new.annotations,by.x="MarkerName",by.y="IlmnID",all.y=FALSE)
PatMatComparison$gene <- unlist(lapply(strsplit(PatMatComparison$gene.symbol,split=";"),"[",1))
PatMatComparison$gene[is.na(PatMatComparison$gene)]<-PatMatComparison$"UCSC KnownGene"[is.na(PatMatComparison$gene)]
PatMatComparison$CpG.Gene <- paste0(PatMatComparison$MarkerName,"\n",PatMatComparison$gene)
PatMatComparison<-PatMatComparison[order(PatMatComparison$MarkerName),]
PatMatComparison$CpG.Gene<-factor(PatMatComparison$CpG.Gene,levels=unique(PatMatComparison$CpG.Gene),ordered=TRUE)
P <- ggplot(PatMatComparison,aes(x=Model,y=Effect*100))+
geom_hline(yintercept=0,linetype="dashed")+
geom_errorbar(aes(colour=Model,ymin=ci.lb*100, ymax=ci.ub*100),width=0.5,size=1)+
geom_point(aes(shape=Model,colour=Model),fill="white",size=4)+
scale_shape_manual(values=c(15,22,19,21))+
scale_colour_manual(values=c(wes_palette("Zissou1")[1],wes_palette("Zissou1")[1],wes_palette("Zissou1")[5],wes_palette("Zissou1")[5]))+
facet_grid(.~CpG.Gene)+
theme_bw() + theme(legend.spacing.x = unit(1.0, 'cm'),legend.position = "bottom",legend.title=element_blank(),legend.text=element_text(size=14),axis.line.x=element_blank(),axis.ticks.x=element_blank(),axis.text.x=element_blank(),axis.text.y=element_text(size=12),axis.title.y=element_text(size=14),panel.grid.major.x = element_blank()) +
xlab("")+ylab("Effect estimate (difference in % methylation\nper 1SD increase in parental BMI)")+
ggtitle("CpGs showing strongest statistical evidence of association with paternal BMI\n") + theme(panel.spacing = unit(0.8, "lines"),panel.border = element_blank(),panel.background = element_rect(fill="grey95"),plot.title = element_text(hjust = 0.5,size=16),strip.background=element_rect(fill="grey95",colour=NA),strip.text = element_text(size=12,face = "italic"))
png("PatvsMat.coefplot.png",width=1000,height=500)
P
dev.off()
|
/Analyses/Birth/7. Comparison to maternal estimates.r
|
no_license
|
ammegandchips/PACE_Paternal_BMI
|
R
| false
| false
| 7,924
|
r
|
#agreement and correlation of pat vs patmat
cor(pat.man$Effect,patmat.man$Effect)#0.97
plot(pat.man$Effect,patmat.man$Effect,main="Correlation between paternal BMI effect estimates \nobtained before and after adjustment for maternal BMI: r=0.97",xlab="Unadjusted",ylab="Adjusted")
abline(0,1,col=wes_palette("Zissou1")[1])
summary(abs(abs(patmat.man$Effect*100 - pat.man$Effect*100)/(pat.man$Effect*100)))
#comparing pat vs mat effect sizes
summary(abs(list.of.results$covs.patmat$Effect)< abs(list.of.results$covs.matpat$Effect))
summary(abs(list.of.results$covs.pat$Effect)< abs(list.of.results$covs.mat$Effect))
#Pat vs Mat manhattan plots
require(meffil)
require(wesanderson)
desat <- function(cols, sat=0.5) {
X <- diag(c(1, sat, 1)) %*% rgb2hsv(col2rgb(cols))
hsv(X[1,], X[2,], X[3,])
}
annotation <- meffil.get.features("450k")
source("~/EWAS/pat_bmi/manhattan_plot_function.r")
pat.man <- merge(list.of.results$covs.pat,annotation,by.x="MarkerName",by.y="name",all=F)
mat.man <- merge(list.of.results$covs.mat,annotation,by.x="MarkerName",by.y="name",all=F)
pat.man$CHR <- as.numeric(unlist(lapply(pat.man$chromosome,substring,first=4)))
mat.man$CHR <- as.numeric(unlist(lapply(mat.man$chromosome,substring,first=4)))
patmat.man <- merge(list.of.results$covs.patmat,annotation,by.x="MarkerName",by.y="name",all=F)
matpat.man <- merge(list.of.results$covs.matpat,annotation,by.x="MarkerName",by.y="name",all=F)
patmat.man$CHR <- as.numeric(unlist(lapply(patmat.man$chromosome,substring,first=4)))
matpat.man$CHR <- as.numeric(unlist(lapply(matpat.man$chromosome,substring,first=4)))
pat.man$abs.Effect <-abs(pat.man$Effect)*100
mat.man$abs.Effect <-abs(mat.man$Effect)*100
patmat.man$abs.Effect <-abs(patmat.man$Effect)*100
matpat.man$abs.Effect <-abs(matpat.man$Effect)*100
png("pat.manhattan.effect.png",width=1500,height=500)
manhattan (pat.man, chr = "CHR", bp = "position", p = "abs.Effect", snp = "MarkerName", col = c(wes_palette("Zissou1")[1],
desat(wes_palette("Zissou1")[1])), chrlabs = NULL, suggestiveline = NULL,
genomewideline = NULL,ylim_max=max(c(pat.man$abs.Effect,mat.man$abs.Effect,matpat.man$abs.Effect,patmat.man$abs.Effect))+0.2,logp=F)
dev.off()
png("mat.manhattan.effect.png",width=1500,height=500)
manhattan (mat.man, chr = "CHR", bp = "position", p = "abs.Effect", snp = "MarkerName", col = c(wes_palette("Zissou1")[5],
desat(wes_palette("Zissou1")[5])), chrlabs = NULL, suggestiveline = NULL,
genomewideline = NULL,ylim_max=max(c(pat.man$abs.Effect,mat.man$abs.Effect,matpat.man$abs.Effect,patmat.man$abs.Effect))+0.2,logp=F)
dev.off()
png("patmat.manhattan.effect.png",width=1500,height=500)
manhattan (patmat.man, chr = "CHR", bp = "position", p = "abs.Effect", snp = "MarkerName", col = c(wes_palette("Zissou1")[1],
desat(wes_palette("Zissou1")[1])), chrlabs = NULL, suggestiveline = NULL,
genomewideline = NULL,ylim_max=max(c(pat.man$abs.Effect,mat.man$abs.Effect,matpat.man$abs.Effect,patmat.man$abs.Effect))+0.2,logp=F)
dev.off()
png("matpat.manhattan.effect.png",width=1500,height=500)
manhattan (matpat.man, chr = "CHR", bp = "position", p = "abs.Effect", snp = "MarkerName", col = c(wes_palette("Zissou1")[5],
desat(wes_palette("Zissou1")[5])), chrlabs = NULL, suggestiveline = NULL,
genomewideline = NULL,ylim_max=max(c(pat.man$abs.Effect,mat.man$abs.Effect,matpat.man$abs.Effect,patmat.man$abs.Effect))+0.2,logp=F)
dev.off()
#mat vs pat meta-analysis
fixed.effects.meta.analysis <- function(data){
require(metafor)
res = split(data, f=data$MarkerName)
res = lapply(res, function(x) rma.uni(slab=x$study,yi=x$Effect,sei=x$StdErr,method="FE",weighted=TRUE))
res
}
CpGs <- list.of.results$covs.pat[which(list.of.results$covs.pat$Pvalue<1e-5),"MarkerName"]
PatMatComparison<-rbind(list.of.results$covs.pat[which(list.of.results$covs.pat$MarkerName %in% CpGs),],
list.of.results$covs.patmat[which(list.of.results$covs.patmat$MarkerName %in% CpGs),],
list.of.results$covs.mat[which(list.of.results$covs.mat$MarkerName %in% CpGs),],
list.of.results$covs.matpat[which(list.of.results$covs.matpat$MarkerName %in% CpGs),])
PatMatComparison$Model <- c(rep("Paternal",length(CpGs)),rep("Paternal adjusted for maternal",length(CpGs)),
rep("Maternal",length(CpGs)),rep("Maternal adjusted for paternal",length(CpGs)))
results.patmat.adj<-fixed.effects.meta.analysis(data=PatMatComparison[which(PatMatComparison$Model %in% c("Paternal adjusted for maternal","Maternal adjusted for paternal")),])
results.patmat<-ldply(lapply(results.patmat,function(x) unlist(c(x[c("QE","QEp","I2")]))))
results.patmat<-fixed.effects.meta.analysis(data=PatMatComparison[which(PatMatComparison$Model %in% c("Paternal","Maternal")),])
results.patmat.adj<-ldply(lapply(results.patmat.adj,function(x) unlist(c(x[c("QE","QEp","I2")]))))
write.csv(results.patmat.adj,"matpatvspatmat.metaanalysis.birth.csv")
PatMatComparison$colour <-wes_palette("Zissou1")[1]
PatMatComparison$colour[which(PatMatComparison$Model %in% c("Maternal","Maternal adjusted for paternal"))]<-wes_palette("Zissou1")[5]
PatMatComparison$ci.lb<-PatMatComparison$Effect - (1.96* PatMatComparison$StdErr)
PatMatComparison$ci.ub<-PatMatComparison$Effect + (1.96* PatMatComparison$StdErr)
PatMatComparison$Model<-factor(PatMatComparison$Model,levels=c("Paternal","Paternal adjusted for maternal","Maternal","Maternal adjusted for paternal"),ordered=TRUE)
Order<-list.of.results$covs.patmat[which(list.of.results$covs.patmat$MarkerName %in% CpGs),]
Order<-Order[order(Order$Effect),]
PatMatComparison$MarkerName<-factor(PatMatComparison$MarkerName,levels=Order$MarkerName,ordered=TRUE)
PatMatComparison<-merge(PatMatComparison,annotation,by.x="MarkerName",by.y="name",all=FALSE)
new.annotations<-fread("/panfs/panasas01/sscm/gs8094/Common_files/enhanced_annotations.txt",stringsAsFactors=FALSE)
PatMatComparison<-merge(PatMatComparison,new.annotations,by.x="MarkerName",by.y="IlmnID",all.y=FALSE)
PatMatComparison$gene <- unlist(lapply(strsplit(PatMatComparison$gene.symbol,split=";"),"[",1))
PatMatComparison$gene[is.na(PatMatComparison$gene)]<-PatMatComparison$"UCSC KnownGene"[is.na(PatMatComparison$gene)]
PatMatComparison$CpG.Gene <- paste0(PatMatComparison$MarkerName,"\n",PatMatComparison$gene)
PatMatComparison<-PatMatComparison[order(PatMatComparison$MarkerName),]
PatMatComparison$CpG.Gene<-factor(PatMatComparison$CpG.Gene,levels=unique(PatMatComparison$CpG.Gene),ordered=TRUE)
P <- ggplot(PatMatComparison,aes(x=Model,y=Effect*100))+
geom_hline(yintercept=0,linetype="dashed")+
geom_errorbar(aes(colour=Model,ymin=ci.lb*100, ymax=ci.ub*100),width=0.5,size=1)+
geom_point(aes(shape=Model,colour=Model),fill="white",size=4)+
scale_shape_manual(values=c(15,22,19,21))+
scale_colour_manual(values=c(wes_palette("Zissou1")[1],wes_palette("Zissou1")[1],wes_palette("Zissou1")[5],wes_palette("Zissou1")[5]))+
facet_grid(.~CpG.Gene)+
theme_bw() + theme(legend.spacing.x = unit(1.0, 'cm'),legend.position = "bottom",legend.title=element_blank(),legend.text=element_text(size=14),axis.line.x=element_blank(),axis.ticks.x=element_blank(),axis.text.x=element_blank(),axis.text.y=element_text(size=12),axis.title.y=element_text(size=14),panel.grid.major.x = element_blank()) +
xlab("")+ylab("Effect estimate (difference in % methylation\nper 1SD increase in parental BMI)")+
ggtitle("CpGs showing strongest statistical evidence of association with paternal BMI\n") + theme(panel.spacing = unit(0.8, "lines"),panel.border = element_blank(),panel.background = element_rect(fill="grey95"),plot.title = element_text(hjust = 0.5,size=16),strip.background=element_rect(fill="grey95",colour=NA),strip.text = element_text(size=12,face = "italic"))
png("PatvsMat.coefplot.png",width=1000,height=500)
P
dev.off()
|
# test SWC function
test_that('SoilHyP', {
#
data(dataTestthat)
# --------------------------------------------
# Unimodal van Genuchten
# --------------------------------------------
expect_equal(SWC(suc = seq(1, 1000, by = 1), par.shp = c(ths = 0.4, thr = 0, alfa = 0.02, n = 1.5),
FUN.shp = c('vG'), modality = 'uni', suc.negativ = FALSE), dataTestthat$th.vgm.uni)
# --------------------------------------------
# Bimodal van Genuchten
# --------------------------------------------
expect_equal(SWC(suc = seq(1, 1000, by = 1), par.shp = c(ths = 0.4, thr = 0, alfa = 0.02, n = 2, w2 = 0.2, alfa2 = 1, n2 = 10),
FUN.shp = c('vG'), modality = c('bi'), suc.negativ = FALSE),
dataTestthat$th.vgm.bi)
# --------------------------------------------
# Unimodal PDI
# --------------------------------------------
expect_equal(SWC(suc = seq(1, 1000, by = 1),
par.shp = list(ths = 0.4, thr = 0, alfa = 0.02, n = 1.5),
FUN.shp = c('pdi'), modality = c('uni'), suc.negativ = FALSE),
dataTestthat$th.pdi.uni)
# --------------------------------------------
# bimodal PDI
# --------------------------------------------
expect_equal(SWC(suc = seq(1, 1000, by = 1),
par.shp = list(ths = 0.4, thr = 0, alfa = 0.02, n = 2, w2 = 0.2, alfa2 = 1, n2 = 10),
FUN.shp = c('pdi'), modality = c('bi'), suc.negativ = FALSE),
dataTestthat$th.pdi.bi)
})
# dataTestthat <- list(th.vgm.uni = SWC(suc = seq(1, 1000, by = 1), par.shp = c(ths = 0.4, thr = 0, alfa = 0.02, n = 1.5),
# FUN.shp = c('vG'), modality = 'uni', suc.negativ = FALSE),
# th.vgm.bi = SWC(suc = seq(1, 1000, by = 1),
# par.shp = c(ths = 0.4, thr = 0, alfa = 0.02, n = 2, w2 = 0.2, alfa2 = 1, n2 = 10),
# FUN.shp = c('vG'), modality = c('bi'), suc.negativ = FALSE),
# th.pdi.uni = SWC(suc = seq(1, 1000, by = 1), par.shp = list(ths = 0.4, thr = 0, alfa = 0.02, n = 1.5),
# FUN.shp = c('pdi'), modality = c('uni'), suc.negativ = FALSE),
# th.pdi.bi = SWC(suc = seq(1, 1000, by = 1),
# par.shp = list(ths = 0.4, thr = 0, alfa = 0.02, n = 2, w2 = 0.2, alfa2 = 1, n2 = 10),
# FUN.shp = c('pdi'), modality = c('bi'), suc.negativ = FALSE),
# ku.vgm.uni = Ku(suc = seq(1, 1000, by = 1), FUN.shp = 'vGM',
# par.shp = list(Ks = 10, ths = 0.5, thr = 0, alfa = 0.02, n = 1.5, tau = 0.5),
# modality = 'uni', suc.negativ = FALSE),
# ku.vgm.bi = Ku(suc = seq(1, 1000, by = 1), FUN.shp = 'vGM',
# par.shp = list(Ks = 10, ths = 0.5, thr = 0, alfa = 0.02,
# n = 1.5, tau = 0.5, w2 = 0.1, alfa2 = 0.1, n2 = 3),
# modality = 'bi', suc.negativ = FALSE),
# ku.pdi.uni = Ku(suc = seq(1, 1000, by = 1), FUN.shp = 'PDI', modality = 'uni',
# par.shp = list(Ks = 10, ths = 0.5, thr = 0, alfa = 0.02, n = 1.5, tau = 0.5, omega = 0.001),
# suc.negativ = FALSE),
# ku.pdi.bi = Ku(suc = seq(1, 1000, by = 1), FUN.shp = 'PDI', modality = 'uni',
# par.shp = list(Ks = 10, ths = 0.5, thr = 0, alfa = 0.02, n = 1.5, tau = 0.5, omega = 0.001, w2 = 0.2, alfa2 = 1, n2 = 10),
# suc.negativ = FALSE)
# )
# getwd()
#save(dataTestthat, file = 'data/dataTestthat.RData')
#rm(dataTestthat)
|
/tests/testthat/test_SWC.R
|
no_license
|
cran/SoilHyP
|
R
| false
| false
| 3,858
|
r
|
# test SWC function
test_that('SoilHyP', {
#
data(dataTestthat)
# --------------------------------------------
# Unimodal van Genuchten
# --------------------------------------------
expect_equal(SWC(suc = seq(1, 1000, by = 1), par.shp = c(ths = 0.4, thr = 0, alfa = 0.02, n = 1.5),
FUN.shp = c('vG'), modality = 'uni', suc.negativ = FALSE), dataTestthat$th.vgm.uni)
# --------------------------------------------
# Bimodal van Genuchten
# --------------------------------------------
expect_equal(SWC(suc = seq(1, 1000, by = 1), par.shp = c(ths = 0.4, thr = 0, alfa = 0.02, n = 2, w2 = 0.2, alfa2 = 1, n2 = 10),
FUN.shp = c('vG'), modality = c('bi'), suc.negativ = FALSE),
dataTestthat$th.vgm.bi)
# --------------------------------------------
# Unimodal PDI
# --------------------------------------------
expect_equal(SWC(suc = seq(1, 1000, by = 1),
par.shp = list(ths = 0.4, thr = 0, alfa = 0.02, n = 1.5),
FUN.shp = c('pdi'), modality = c('uni'), suc.negativ = FALSE),
dataTestthat$th.pdi.uni)
# --------------------------------------------
# bimodal PDI
# --------------------------------------------
expect_equal(SWC(suc = seq(1, 1000, by = 1),
par.shp = list(ths = 0.4, thr = 0, alfa = 0.02, n = 2, w2 = 0.2, alfa2 = 1, n2 = 10),
FUN.shp = c('pdi'), modality = c('bi'), suc.negativ = FALSE),
dataTestthat$th.pdi.bi)
})
# dataTestthat <- list(th.vgm.uni = SWC(suc = seq(1, 1000, by = 1), par.shp = c(ths = 0.4, thr = 0, alfa = 0.02, n = 1.5),
# FUN.shp = c('vG'), modality = 'uni', suc.negativ = FALSE),
# th.vgm.bi = SWC(suc = seq(1, 1000, by = 1),
# par.shp = c(ths = 0.4, thr = 0, alfa = 0.02, n = 2, w2 = 0.2, alfa2 = 1, n2 = 10),
# FUN.shp = c('vG'), modality = c('bi'), suc.negativ = FALSE),
# th.pdi.uni = SWC(suc = seq(1, 1000, by = 1), par.shp = list(ths = 0.4, thr = 0, alfa = 0.02, n = 1.5),
# FUN.shp = c('pdi'), modality = c('uni'), suc.negativ = FALSE),
# th.pdi.bi = SWC(suc = seq(1, 1000, by = 1),
# par.shp = list(ths = 0.4, thr = 0, alfa = 0.02, n = 2, w2 = 0.2, alfa2 = 1, n2 = 10),
# FUN.shp = c('pdi'), modality = c('bi'), suc.negativ = FALSE),
# ku.vgm.uni = Ku(suc = seq(1, 1000, by = 1), FUN.shp = 'vGM',
# par.shp = list(Ks = 10, ths = 0.5, thr = 0, alfa = 0.02, n = 1.5, tau = 0.5),
# modality = 'uni', suc.negativ = FALSE),
# ku.vgm.bi = Ku(suc = seq(1, 1000, by = 1), FUN.shp = 'vGM',
# par.shp = list(Ks = 10, ths = 0.5, thr = 0, alfa = 0.02,
# n = 1.5, tau = 0.5, w2 = 0.1, alfa2 = 0.1, n2 = 3),
# modality = 'bi', suc.negativ = FALSE),
# ku.pdi.uni = Ku(suc = seq(1, 1000, by = 1), FUN.shp = 'PDI', modality = 'uni',
# par.shp = list(Ks = 10, ths = 0.5, thr = 0, alfa = 0.02, n = 1.5, tau = 0.5, omega = 0.001),
# suc.negativ = FALSE),
# ku.pdi.bi = Ku(suc = seq(1, 1000, by = 1), FUN.shp = 'PDI', modality = 'uni',
# par.shp = list(Ks = 10, ths = 0.5, thr = 0, alfa = 0.02, n = 1.5, tau = 0.5, omega = 0.001, w2 = 0.2, alfa2 = 1, n2 = 10),
# suc.negativ = FALSE)
# )
# getwd()
#save(dataTestthat, file = 'data/dataTestthat.RData')
#rm(dataTestthat)
|
#plot1
emissions_per_year <- aggregate(Emissions ~ year, data = NEI, sum)
#plot2
emissions_per_year_balt <- aggregate(Emissions ~ year, data = NEI[NEI$fips == "24510",], sum)
#plot3
emissions_per_year_type_balt <- aggregate(Emissions ~ year + type, data = NEI[NEI$fips == "24510",], sum)
#plot4
SCC_coal <- as.character(SCC$SCC[grep("(Comb).*(Coal)", SCC$Short.Name)])
emissions_per_year_coal <- aggregate(Emissions ~ year, data = NEI[NEI$SCC %in% SCC_coal,], sum)
#plot5
SCC_vehicles <- as.character(SCC$SCC[grep("(Highway Veh)|(Off-highway)", SCC$Short.Name)])
emissions_per_year_vehicles_balt <- aggregate(Emissions ~ year, data = NEI[NEI$SCC %in% SCC_vehicles & NEI$fips == "24510",], sum)
#plot6
SCC_vehicles <- as.character(SCC$SCC[grep("(Highway Veh)|(Off-highway)", SCC$Short.Name)])
emissions_per_year_vehicles_balt_la <- aggregate(Emissions ~ year + fips, data = NEI[NEI$SCC %in% SCC_vehicles & NEI$fips %in% c("24510","06037"),], sum)
bl <- emissions_per_year_vehicles_balt_la
#calculate relative emissions
bl$RelEmissions[bl$fips == "06037"] <-
(bl$Emissions[bl$fips == "06037"] /
bl$Emissions[bl$year == 1999 & bl$fips == "06037"])
bl$RelEmissions[bl$fips == "24510"] <-
(bl$Emissions[bl$fips == "24510"] /
bl$Emissions[bl$year == 1999 & bl$fips == "24510"])
rm(NEI,SCC)
|
/compute graph vars and free up mem.R
|
no_license
|
rubenogit/ExData_Plotting2
|
R
| false
| false
| 1,303
|
r
|
#plot1
emissions_per_year <- aggregate(Emissions ~ year, data = NEI, sum)
#plot2
emissions_per_year_balt <- aggregate(Emissions ~ year, data = NEI[NEI$fips == "24510",], sum)
#plot3
emissions_per_year_type_balt <- aggregate(Emissions ~ year + type, data = NEI[NEI$fips == "24510",], sum)
#plot4
SCC_coal <- as.character(SCC$SCC[grep("(Comb).*(Coal)", SCC$Short.Name)])
emissions_per_year_coal <- aggregate(Emissions ~ year, data = NEI[NEI$SCC %in% SCC_coal,], sum)
#plot5
SCC_vehicles <- as.character(SCC$SCC[grep("(Highway Veh)|(Off-highway)", SCC$Short.Name)])
emissions_per_year_vehicles_balt <- aggregate(Emissions ~ year, data = NEI[NEI$SCC %in% SCC_vehicles & NEI$fips == "24510",], sum)
#plot6
SCC_vehicles <- as.character(SCC$SCC[grep("(Highway Veh)|(Off-highway)", SCC$Short.Name)])
emissions_per_year_vehicles_balt_la <- aggregate(Emissions ~ year + fips, data = NEI[NEI$SCC %in% SCC_vehicles & NEI$fips %in% c("24510","06037"),], sum)
bl <- emissions_per_year_vehicles_balt_la
#calculate relative emissions
bl$RelEmissions[bl$fips == "06037"] <-
(bl$Emissions[bl$fips == "06037"] /
bl$Emissions[bl$year == 1999 & bl$fips == "06037"])
bl$RelEmissions[bl$fips == "24510"] <-
(bl$Emissions[bl$fips == "24510"] /
bl$Emissions[bl$year == 1999 & bl$fips == "24510"])
rm(NEI,SCC)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/clustMD_UtilityFunctions.R
\name{modal.value}
\alias{modal.value}
\title{Calculate the mode of a sample}
\usage{
modal.value(x)
}
\arguments{
\item{x}{a vector containing the sample values.}
}
\value{
The mode of the sample. In the case of a tie, the minimum is
returned.
}
\description{
Calculate the mode of a sample
}
\keyword{internal}
|
/man/modal.value.Rd
|
no_license
|
cran/clustMD
|
R
| false
| true
| 423
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/clustMD_UtilityFunctions.R
\name{modal.value}
\alias{modal.value}
\title{Calculate the mode of a sample}
\usage{
modal.value(x)
}
\arguments{
\item{x}{a vector containing the sample values.}
}
\value{
The mode of the sample. In the case of a tie, the minimum is
returned.
}
\description{
Calculate the mode of a sample
}
\keyword{internal}
|
#' WriteDataFrameAsTsv
#' just save a dataframe on disk in TSV format
#' @param data.frame.to.save the data frame to store on disk
#' @param file.name.path the file path where to store the dataset
#' @param col.names see ?write.table description (default is NA)
#' @param row.names see ?write.table description (default is TRUE)
#'
#' @return none
#' @export
#'
#' @examples
#' df <- data.frame(1, 1:10, rownames=paste0(rep("rn", 10), 1:10),
#' colnames=c("c1","c2"))
#' WriteDataFrameAsTsv(df)
writeDataFrameAsTsv <- function(data.frame.to.save=NULL,
file.name.path=file.path(tempdir(),tempfile()),
col.names=NA, row.names=TRUE)
{
if(is.null(data.frame.to.save)) stop("Please provide a data frame!")
file.name.path <- gsub(pattern=" ", replacement="_", x=file.name.path)
file.name <- paste0(file.name.path, ".tsv")
write.table(x=data.frame.to.save, file=file.name, quote=FALSE, sep="\t",
col.names=col.names, row.names=row.names)
message(file.name, " written on disk as TSV file!\n")
}
#' ReadDataFrameFromTsv
#'
#' @param file.name.path a TSV format filename with path
#' @param row.names.col the column number where the rownames are located (def.1)
#' @param header.flag is an HEADER present? (default is TRUE)
#' @param sep the column separator (default is "\t")
#' @param quote.char the character used for quoting characters (defualt is none)
#'
#' @return a dataframe
#' @export
#'
#' @examples
#' TBW
readDataFrameFromTsv <- function(file.name.path, row.names.col=1,
header.flag=TRUE, sep="\t", quote.char="")
{
if(!file.exists(file.name.path)) stop("Please provide an existing filename")
df <- read.table(file=file.name.path, sep=sep, header=header.flag,
row.names=row.names.col, quote=quote.char)
message(file.name.path, " read from disk!\n")
return(df)
}
#' convertGenesViaMouseDb
#'
#' @description converts genes from SYMBOL or ENTREZID to SYMBOL or ENTREZID for
#' mouse genome as defined in org.Mm.eg.db
#'
#' @param gene.list a list of genes
#' @param fromType one of SYMBOL or ENTREZID
#' @param toType one of SYMBOL or ENTREZID
#'
#' @return the gene.map with only converted values.
#'
#' @export
#' @import org.Mm.eg.db
#' @importFrom AnnotationDbi select
#' @examples
convertGenesViaMouseDb <- function(gene.list, fromType=c("SYMBOL", "ENTREZID"),
toType=c("SYMBOL", "ENTREZID"))
{
require("org.Mm.eg.db")
annotated.map <- AnnotationDbi::select(org.Mm.eg.db,
keys=gene.list,
keytype=fromType,
columns=c("SYMBOL", "ENTREZID"))
col.check <- colnames(annotated.map)[-which(colnames(annotated.map) ==
fromType)]
indsna <- is.na(annotated.map[col.check])
if(sum(indsna) > 0) annotated.map <- annotated.map[-which(indsna),]
return(annotated.map)
}
#' convertGenesViaBiomart
#' @description Converts a list of genes using biomaRt package. (See biomaRt for
#' further usage description)
#' @param specie one of "hg38", "mm10", "rnor6".
#' @param attrs one or more attributes to be returned
#' as defined in biomaRt::listAttrs
#' @param filter one or more filters as defined in biomaRt::listFilters
#' @param filter.values a list of values for the defined filters
#'
#' @return the gene.map of attributes
#'
#' @importFrom biomaRt useMart getBM
#' @export
#'
#' @examples
#' TBW
convertGenesViaBiomart <- function(specie=c("hg38", "mm10", "rnor6"),
attrs=NULL, filter=NULL, filter.values=NULL)
{
specie <- match.arg(specie)
stopifnot((length(attrs) != 0))
stopifnot( is.null(filter) || (!is.null(filter.values)) )
stopifnot( (!is.null(filter)) || is.null(filter.values) )
if(length(which(attrs %in% filter))==0)
{
stop("please use a filter matching the attributes!")
}
switch (specie,
"mm10"={ ds <- "mmusculus_gene_ensembl"},
"hg38"={ ds <- "hsapiens_gene_ensembl"},
"rnor6"={ ds <- "rnorvegicus_gene_ensembl"}
)
mart <- biomaRt::useMart("ensembl", dataset=ds)
# listAttributes(mart)[grep(external", listAttributes(mart)[,1]),1]
# listFilters(mart)[grep("external", listFilters(mart)[,1]),1]
# attrs <- c("ensembl_gene_id", "external_gene_name", "entrezgene")
gene.map <- biomaRt::getBM(attributes=attrs, mart=mart,
filters=filter, values=filter.values)
if(!is.null(filter))
{
idx.dp <- which(duplicated(gene.map[[filter]]))
if(length(idx.dp) > 0 )
{
gene.map <- gene.map[-idx.dp,]
}
gene.map <- gene.map[order(gene.map[[filter]]),]
}
idx.entr <- grep(pattern="entrezgene", x=colnames(gene.map))
if(length(idx.entr) > 0 )
{
gene.map[,idx.entr] <- as.character(gene.map[,idx.entr])
}
return(gene.map)
}
#' attachGeneColumnToDf
#' attaches an additional column to a dataframe using its rownames as
#' identifiers
#' @param mainDf the dataframe where the
#' @param genesMap
#' @param rowNamesIdentifier
#' @param mapFromIdentifier
#' @param mapToIdentifier
#'
#' @return
#' @export
#'
#' @examples
attachGeneColumnToDf <- function(mainDf, genesMap,
rowNamesIdentifier=c("entrezgene", "ensembl", "symbol"),
mapFromIdentifier=NULL, mapToIdentifier=NULL)
{
match.arg(rowNamesIdentifier)
stopifnot(!is.null(mapFromIdentifier))
stopifnot(!is.null(mapToIdentifier))
mainDf <- mainDf[order(rownames(mainDf)),]
rownames <- rownames(mainDf)
mainDf$check <- NA
mainDf$gene <- NA
genesMap <- genesMap[order(genesMap[[mapFromIdentifier]]),]
idx.re <- which(rownames %in% genesMap[[mapFromIdentifier]])
idx.er <- which(genesMap[[mapFromIdentifier]] %in% rownames)
mainDf$check[idx.re] <- genesMap[[mapFromIdentifier]][idx.er]
mainDf$gene[idx.re] <- genesMap[[mapToIdentifier]][idx.er]
## removing NA (not mapped) lines
# noNaMainDf <- mainDf
# idx.na <- which(is.na(mainDf$gene))
# if(length(idx.na) > 0) noNaMainDf <- mainDf[-idx.na,]
# idx.e <- which(noNaMainDf$gene == "")
# if(length(idx.e) > 0) noNaMainDf <- noNaMainDf[-idx.e,]
# return(noNaMainDf)
return(mainDf)
}
#' subsetDfByCol
#'
#' @description Subsets a dataframe by a list of colnames or rownames
#'
#' @param df a dataframe.
#' @param list a list of identifiers.
#' @param colname the df column where to check the list in the df.
#' If NULL, the rownames will be used.
#'
#' @return the subsetted df
#' @export
#'
#' @examples
subsetDfByCol <- function(df, list, colname=NULL)
{
if(is.null(colname))
{
idx <- which(rownames(df) %in% list)
} else {
idx <- which(df[[colname]] %in% list)
}
df <- df[idx, , drop=FALSE]
}
#' isCol
#' @description checks if a column is present in a dataframe/matrix.
#' @param df.to.check the dataframe/matrix
#' @param colname the colname to check
#'
#' @return boolean value
#' @export
#'
#' @examples
#'
isCol <- function(df.to.check, colname)
{
idx <- which(colnames(df.to.check) %in% colname)
if(length(idx) > 0)
{
return(TRUE)
}else{
return(FALSE)
}
}
#' generatePlotStrings
#' @description generates a list of title, plot.folder and plot.file.name
#' strings. Typically used in plot functions.
#'
#' @param path the starting path
#' @param prefix the prefix for the file
#' @param plot.type the type of the plot
#'
#' @return a list of strings title, plot.folder and plot.file.name.
#' @keywords internal
#'
#' @examples
generatePlotStrings <- function(path=NULL, prefix, plot.type)
{
title <- gsub(pattern="_", replacement=" ",
x=UpdatePrefix(prefix, plot.type))
plot.folder <- gsub(pattern=" ", replacement="_",
x=file.path(path, plot.type))
plot.file.name <- gsub(pattern=" ", replacement="_",
x=UpdatePrefix(prefix, plot.type))
if(!is.null(path)) dir.create(plot.folder, showWarnings=FALSE,
recursive=TRUE)
return(list("title"= title,
"plot.folder"=plot.folder,
"plot.file.name"=plot.file.name))
}
#' updatePrefix
#' @description given an input string, it appends one or more strings to it,
#' separated by spaces. (tipically used by generatePlotStrings function)
#' @param prefix the string of the prefix to update
#' @param ... a list of strings to append to the prefix
#'
#' @return the updated prefix
#' @export
#'
#' @examples
#'
updatePrefix <- function(prefix, ...)
{
dots <- unlist(list(...))
if(length(dots) != 0)
{
for (str in dots)
{
# str <- gsub(pattern=".", replacement="_", str)
prefix <- paste(prefix, str, sep=" ")
}
} else {
stop("provide a string to append to ", new.prefix)
}
return(prefix)
}
#' updateFolderPath
#' @description appends one or more strings to the path and creates all
#' the directories in the paths recursively.
#' Additionally, replaces all the whitespaces with underscores.
#' @param path a string representing a path
#' @param ... a list of one or more elements to append to the path
#'
#' @return the string of the updated path
#' @export
#'
#' @examples
#' pth <- "./old/path"
#' updateFolderPath(path=pth, c("new", "directories", "in_the", "path"))
#'
updateFolderPath <- function(path, ...)
{
dots <- unlist(list(...))
if(length(dots) != 0)
{
for (str in dots)
{
str <- gsub(pattern=" ", replacement="_", str)
path <- file.path(path, str)
}
} else {
stop("provide a string to append to ", path)
}
dir.create(path, recursive=TRUE, showWarnings=FALSE)
message("Recursively created ", path, " on disk")
return(path)
}
#' updateFilename
#' @description appends one or more strings to a filename string
#' @param filename the string representing the starting filename
#' @param ... a list of strings to append to filename
#' @param extension an extension to add to the filename without . (optional)
#'
#' @return a string with the updated filename
#' @export
#'
#' @examples
#' fn <- file1
#' newfn <- updateFilename(filename=fn, c("with", "more", "informations"),
#' extension="pdf")
#' print(newfn)
#'
updateFilename <- function(filename, ..., extension=NULL)
{
dots <- unlist(list(...))
print(dots)
filename <- gsub(pattern=" ", replacement="_", x=filename)
if(length(dots) != 0)
{
for (str in dots) filename <- paste(filename, str, sep="_")
} else {
stop("provide a string to append to ", filename)
}
if(!is.null(extension))
{
filename <- paste0(filename, ".", extension)
}
return(filename)
}
|
/R/utilities.R
|
no_license
|
drighelli/ticorser
|
R
| false
| false
| 11,038
|
r
|
#' WriteDataFrameAsTsv
#' just save a dataframe on disk in TSV format
#' @param data.frame.to.save the data frame to store on disk
#' @param file.name.path the file path where to store the dataset
#' @param col.names see ?write.table description (default is NA)
#' @param row.names see ?write.table description (default is TRUE)
#'
#' @return none
#' @export
#'
#' @examples
#' df <- data.frame(1, 1:10, rownames=paste0(rep("rn", 10), 1:10),
#' colnames=c("c1","c2"))
#' WriteDataFrameAsTsv(df)
writeDataFrameAsTsv <- function(data.frame.to.save=NULL,
file.name.path=file.path(tempdir(),tempfile()),
col.names=NA, row.names=TRUE)
{
if(is.null(data.frame.to.save)) stop("Please provide a data frame!")
file.name.path <- gsub(pattern=" ", replacement="_", x=file.name.path)
file.name <- paste0(file.name.path, ".tsv")
write.table(x=data.frame.to.save, file=file.name, quote=FALSE, sep="\t",
col.names=col.names, row.names=row.names)
message(file.name, " written on disk as TSV file!\n")
}
#' ReadDataFrameFromTsv
#'
#' @param file.name.path a TSV format filename with path
#' @param row.names.col the column number where the rownames are located (def.1)
#' @param header.flag is an HEADER present? (default is TRUE)
#' @param sep the column separator (default is "\t")
#' @param quote.char the character used for quoting characters (defualt is none)
#'
#' @return a dataframe
#' @export
#'
#' @examples
#' TBW
readDataFrameFromTsv <- function(file.name.path, row.names.col=1,
header.flag=TRUE, sep="\t", quote.char="")
{
if(!file.exists(file.name.path)) stop("Please provide an existing filename")
df <- read.table(file=file.name.path, sep=sep, header=header.flag,
row.names=row.names.col, quote=quote.char)
message(file.name.path, " read from disk!\n")
return(df)
}
#' convertGenesViaMouseDb
#'
#' @description converts genes from SYMBOL or ENTREZID to SYMBOL or ENTREZID for
#' mouse genome as defined in org.Mm.eg.db
#'
#' @param gene.list a list of genes
#' @param fromType one of SYMBOL or ENTREZID
#' @param toType one of SYMBOL or ENTREZID
#'
#' @return the gene.map with only converted values.
#'
#' @export
#' @import org.Mm.eg.db
#' @importFrom AnnotationDbi select
#' @examples
convertGenesViaMouseDb <- function(gene.list, fromType=c("SYMBOL", "ENTREZID"),
toType=c("SYMBOL", "ENTREZID"))
{
require("org.Mm.eg.db")
annotated.map <- AnnotationDbi::select(org.Mm.eg.db,
keys=gene.list,
keytype=fromType,
columns=c("SYMBOL", "ENTREZID"))
col.check <- colnames(annotated.map)[-which(colnames(annotated.map) ==
fromType)]
indsna <- is.na(annotated.map[col.check])
if(sum(indsna) > 0) annotated.map <- annotated.map[-which(indsna),]
return(annotated.map)
}
#' convertGenesViaBiomart
#' @description Converts a list of genes using biomaRt package. (See biomaRt for
#' further usage description)
#' @param specie one of "hg38", "mm10", "rnor6".
#' @param attrs one or more attributes to be returned
#' as defined in biomaRt::listAttrs
#' @param filter one or more filters as defined in biomaRt::listFilters
#' @param filter.values a list of values for the defined filters
#'
#' @return the gene.map of attributes
#'
#' @importFrom biomaRt useMart getBM
#' @export
#'
#' @examples
#' TBW
convertGenesViaBiomart <- function(specie=c("hg38", "mm10", "rnor6"),
attrs=NULL, filter=NULL, filter.values=NULL)
{
specie <- match.arg(specie)
stopifnot((length(attrs) != 0))
stopifnot( is.null(filter) || (!is.null(filter.values)) )
stopifnot( (!is.null(filter)) || is.null(filter.values) )
if(length(which(attrs %in% filter))==0)
{
stop("please use a filter matching the attributes!")
}
switch (specie,
"mm10"={ ds <- "mmusculus_gene_ensembl"},
"hg38"={ ds <- "hsapiens_gene_ensembl"},
"rnor6"={ ds <- "rnorvegicus_gene_ensembl"}
)
mart <- biomaRt::useMart("ensembl", dataset=ds)
# listAttributes(mart)[grep(external", listAttributes(mart)[,1]),1]
# listFilters(mart)[grep("external", listFilters(mart)[,1]),1]
# attrs <- c("ensembl_gene_id", "external_gene_name", "entrezgene")
gene.map <- biomaRt::getBM(attributes=attrs, mart=mart,
filters=filter, values=filter.values)
if(!is.null(filter))
{
idx.dp <- which(duplicated(gene.map[[filter]]))
if(length(idx.dp) > 0 )
{
gene.map <- gene.map[-idx.dp,]
}
gene.map <- gene.map[order(gene.map[[filter]]),]
}
idx.entr <- grep(pattern="entrezgene", x=colnames(gene.map))
if(length(idx.entr) > 0 )
{
gene.map[,idx.entr] <- as.character(gene.map[,idx.entr])
}
return(gene.map)
}
#' attachGeneColumnToDf
#' attaches an additional column to a dataframe using its rownames as
#' identifiers
#' @param mainDf the dataframe where the
#' @param genesMap
#' @param rowNamesIdentifier
#' @param mapFromIdentifier
#' @param mapToIdentifier
#'
#' @return
#' @export
#'
#' @examples
attachGeneColumnToDf <- function(mainDf, genesMap,
rowNamesIdentifier=c("entrezgene", "ensembl", "symbol"),
mapFromIdentifier=NULL, mapToIdentifier=NULL)
{
match.arg(rowNamesIdentifier)
stopifnot(!is.null(mapFromIdentifier))
stopifnot(!is.null(mapToIdentifier))
mainDf <- mainDf[order(rownames(mainDf)),]
rownames <- rownames(mainDf)
mainDf$check <- NA
mainDf$gene <- NA
genesMap <- genesMap[order(genesMap[[mapFromIdentifier]]),]
idx.re <- which(rownames %in% genesMap[[mapFromIdentifier]])
idx.er <- which(genesMap[[mapFromIdentifier]] %in% rownames)
mainDf$check[idx.re] <- genesMap[[mapFromIdentifier]][idx.er]
mainDf$gene[idx.re] <- genesMap[[mapToIdentifier]][idx.er]
## removing NA (not mapped) lines
# noNaMainDf <- mainDf
# idx.na <- which(is.na(mainDf$gene))
# if(length(idx.na) > 0) noNaMainDf <- mainDf[-idx.na,]
# idx.e <- which(noNaMainDf$gene == "")
# if(length(idx.e) > 0) noNaMainDf <- noNaMainDf[-idx.e,]
# return(noNaMainDf)
return(mainDf)
}
#' subsetDfByCol
#'
#' @description Subsets a dataframe by a list of colnames or rownames
#'
#' @param df a dataframe.
#' @param list a list of identifiers.
#' @param colname the df column where to check the list in the df.
#' If NULL, the rownames will be used.
#'
#' @return the subsetted df
#' @export
#'
#' @examples
subsetDfByCol <- function(df, list, colname=NULL)
{
if(is.null(colname))
{
idx <- which(rownames(df) %in% list)
} else {
idx <- which(df[[colname]] %in% list)
}
df <- df[idx, , drop=FALSE]
}
#' isCol
#' @description checks if a column is present in a dataframe/matrix.
#' @param df.to.check the dataframe/matrix
#' @param colname the colname to check
#'
#' @return boolean value
#' @export
#'
#' @examples
#'
isCol <- function(df.to.check, colname)
{
idx <- which(colnames(df.to.check) %in% colname)
if(length(idx) > 0)
{
return(TRUE)
}else{
return(FALSE)
}
}
#' generatePlotStrings
#' @description generates a list of title, plot.folder and plot.file.name
#' strings. Typically used in plot functions.
#'
#' @param path the starting path
#' @param prefix the prefix for the file
#' @param plot.type the type of the plot
#'
#' @return a list of strings title, plot.folder and plot.file.name.
#' @keywords internal
#'
#' @examples
generatePlotStrings <- function(path=NULL, prefix, plot.type)
{
title <- gsub(pattern="_", replacement=" ",
x=UpdatePrefix(prefix, plot.type))
plot.folder <- gsub(pattern=" ", replacement="_",
x=file.path(path, plot.type))
plot.file.name <- gsub(pattern=" ", replacement="_",
x=UpdatePrefix(prefix, plot.type))
if(!is.null(path)) dir.create(plot.folder, showWarnings=FALSE,
recursive=TRUE)
return(list("title"= title,
"plot.folder"=plot.folder,
"plot.file.name"=plot.file.name))
}
#' updatePrefix
#' @description given an input string, it appends one or more strings to it,
#' separated by spaces. (tipically used by generatePlotStrings function)
#' @param prefix the string of the prefix to update
#' @param ... a list of strings to append to the prefix
#'
#' @return the updated prefix
#' @export
#'
#' @examples
#'
updatePrefix <- function(prefix, ...)
{
dots <- unlist(list(...))
if(length(dots) != 0)
{
for (str in dots)
{
# str <- gsub(pattern=".", replacement="_", str)
prefix <- paste(prefix, str, sep=" ")
}
} else {
stop("provide a string to append to ", new.prefix)
}
return(prefix)
}
#' updateFolderPath
#' @description appends one or more strings to the path and creates all
#' the directories in the paths recursively.
#' Additionally, replaces all the whitespaces with underscores.
#' @param path a string representing a path
#' @param ... a list of one or more elements to append to the path
#'
#' @return the string of the updated path
#' @export
#'
#' @examples
#' pth <- "./old/path"
#' updateFolderPath(path=pth, c("new", "directories", "in_the", "path"))
#'
updateFolderPath <- function(path, ...)
{
dots <- unlist(list(...))
if(length(dots) != 0)
{
for (str in dots)
{
str <- gsub(pattern=" ", replacement="_", str)
path <- file.path(path, str)
}
} else {
stop("provide a string to append to ", path)
}
dir.create(path, recursive=TRUE, showWarnings=FALSE)
message("Recursively created ", path, " on disk")
return(path)
}
#' updateFilename
#' @description appends one or more strings to a filename string
#' @param filename the string representing the starting filename
#' @param ... a list of strings to append to filename
#' @param extension an extension to add to the filename without . (optional)
#'
#' @return a string with the updated filename
#' @export
#'
#' @examples
#' fn <- file1
#' newfn <- updateFilename(filename=fn, c("with", "more", "informations"),
#' extension="pdf")
#' print(newfn)
#'
updateFilename <- function(filename, ..., extension=NULL)
{
dots <- unlist(list(...))
print(dots)
filename <- gsub(pattern=" ", replacement="_", x=filename)
if(length(dots) != 0)
{
for (str in dots) filename <- paste(filename, str, sep="_")
} else {
stop("provide a string to append to ", filename)
}
if(!is.null(extension))
{
filename <- paste0(filename, ".", extension)
}
return(filename)
}
|
# ################################################################################# #
# SCRIPT_NAME: Bag of little bootstraps (BLB)
# The BLB is a procedure to produce robust, computationally efficient
# bootstrap estimates for a statistic of interest.
# INPUTS
# - data: x_1, x_2,..., x_n
# - theta_hat: estimator of interest
# - m: subset size
# - s: number of sampled subsets
# - B: number of Monte Carlo iterations
# - q: estimator of quality assessment
# OUTPUTS
# - estimate of q, i.e., q(Q_n(P))
# ################################################################################# #
## BEGIN SCRIPT ------------------------------------------------------------------- #
# --------------------------------------------------------------------------------- #
# regular bootstrap
reg_boot <- function(X, y, B){
set.seed(5)
d <- dim(X)[2]
beta_ls_boots <- matrix(rep(0, B*d), ncol = d)
ls_fit <- lm(y ~ X - 1)
beta_ls <- ls_fit$coefficients
res_ls <- ls_fit$residuals
std_err <- summary(ls_fit)$coefficients[,2]
for(i in 1:B){
idx <- sample(n, n, replace = TRUE)
y_boot <- X[idx,] %*% beta_ls + res_ls[idx]
beta_ls_boots[i,] <- lsfit(X[idx,], y_boot,
intercept = FALSE)$coefficients
}
beta_ls_mean <- colMeans(beta_ls_boots)
q_boot <- apply(beta_ls_boots, 2, sd)
q_hi <- apply(beta_ls_boots, 2, quantile, probs = 0.975)
q_lo <- apply(beta_ls_boots, 2, quantile, probs = 0.025)
return(list(q_boot = q_boot, q_hi = q_hi, q_lo = q_lo, std_err = std_err,
beta_ls_boots = beta_ls_boots, beta_ls = beta_ls))
}
|
/reg_boot.R
|
no_license
|
jakeknigge/bag-of-little-boots
|
R
| false
| false
| 1,685
|
r
|
# ################################################################################# #
# SCRIPT_NAME: Bag of little bootstraps (BLB)
# The BLB is a procedure to produce robust, computationally efficient
# bootstrap estimates for a statistic of interest.
# INPUTS
# - data: x_1, x_2,..., x_n
# - theta_hat: estimator of interest
# - m: subset size
# - s: number of sampled subsets
# - B: number of Monte Carlo iterations
# - q: estimator of quality assessment
# OUTPUTS
# - estimate of q, i.e., q(Q_n(P))
# ################################################################################# #
## BEGIN SCRIPT ------------------------------------------------------------------- #
# --------------------------------------------------------------------------------- #
# regular bootstrap
reg_boot <- function(X, y, B){
set.seed(5)
d <- dim(X)[2]
beta_ls_boots <- matrix(rep(0, B*d), ncol = d)
ls_fit <- lm(y ~ X - 1)
beta_ls <- ls_fit$coefficients
res_ls <- ls_fit$residuals
std_err <- summary(ls_fit)$coefficients[,2]
for(i in 1:B){
idx <- sample(n, n, replace = TRUE)
y_boot <- X[idx,] %*% beta_ls + res_ls[idx]
beta_ls_boots[i,] <- lsfit(X[idx,], y_boot,
intercept = FALSE)$coefficients
}
beta_ls_mean <- colMeans(beta_ls_boots)
q_boot <- apply(beta_ls_boots, 2, sd)
q_hi <- apply(beta_ls_boots, 2, quantile, probs = 0.975)
q_lo <- apply(beta_ls_boots, 2, quantile, probs = 0.025)
return(list(q_boot = q_boot, q_hi = q_hi, q_lo = q_lo, std_err = std_err,
beta_ls_boots = beta_ls_boots, beta_ls = beta_ls))
}
|
# -----------------------------------------------------------------------------------------------------------
# Research: A comparison of meta-analysis methods: Understanding the influence of experiments'
# statistical parameters
# File name: svc-functions.R
# File type: secondary R Script
# Date: March 2015
# R Script contributors: Oscar Dieste, Omar S. Gomez
# Purpose: functions to perform meta-analysis using SVC
# ----------------------------------------------------------------------------------------------------------
#
#
# The vote from a single study
tell_vote <-
function (control, treatment, level) {
p <- t.test(x=control, y=treatment, conf.level=level)["p.value"]
if (p > 0.05) return (0)
else return(1)
}
#
# Auxiliary functions
calculate_nis <-
function (ncs, nts)
return ((ncs*nts)/(ncs+nts))
likelihood_function <-
function (votes, nis, delta) (sum((votes*log(1-pnorm(-1*sqrt(nis)*delta))) + ((1-votes)*log(pnorm(-1*sqrt(nis)*delta)))))
tell_Di <-
function (nis, delta) (sqrt(nis/(2*pi))*exp(-0.5*nis*(delta^2)))
tell_pi <-
function (nis, delta) (1-pnorm(-1*sqrt(nis)*delta))
#
# Calculate the most probable effect size
svc_calculate_delta <-
function (votes, ncs, nts){
nis <- calculate_nis(ncs, nts)
int <- 1
#
# coarse interaction
max_l <- -1 * .Machine$double.xmax
value_delta <- -10
num_values <- (-1*2*value_delta*(1/int))+1
deltas <- vector(length=num_values)
deltas[1] = value_delta
for (i in 2:num_values) deltas[i] <- deltas[i-1]+int
for (i in 1:num_values) {
l <- likelihood_function(votes, nis, deltas[i])
if (!is.nan(l)) {
if (max_l < l) {
max_l <- l
value_delta <- deltas[i]
}
}
}
#
# finer iteration
max_l <- -1 * .Machine$double.xmax
value_delta <- value_delta - int
num_values <- (100 * int * 2) + 1
deltas <- vector(length=num_values)
deltas[1] = value_delta
for (i in 2:num_values) deltas[i] <- deltas[i-1]+0.01
for (i in 1:num_values) {
l <- likelihood_function(votes, nis, deltas[i])
if (!is.nan(l)) {
if (max_l < l) {
max_l <- l
value_delta <- deltas[i]
}
}
}
return(value_delta)
}
#
# Accuracy calculation
tell_svc_accuracy <-
function (ncs, nts, delta, cutoff,d,list_effect_sizes) {
nis <- calculate_nis(ncs, nts)
svc_var <- 1/sum((tell_Di(nis, delta)^2)/(tell_pi(nis, delta)*(1-tell_pi(nis, delta))))
left_side <- delta - sqrt(svc_var)*cutoff
right_side <- delta + sqrt(svc_var)*cutoff
return(if((list_effect_sizes[d] >= left_side) && (list_effect_sizes[d] <= right_side)) 1 else 0)
}
#
# Empirical power calculation
tell_svc_emp_power <-
function (ncs, nts, delta, cutoff) {
nis <- calculate_nis(ncs, nts)
svc_var <- 1/sum((tell_Di(nis, delta)^2)/(tell_pi(nis, delta)*(1-tell_pi(nis, delta))))
left_side <- delta - sqrt(svc_var)*cutoff
return(if(0 < left_side) 1 else 0)
}
|
/R/svc-functions.R
|
no_license
|
GRISE-UPM/comparison.of.meta.analysis.methods
|
R
| false
| false
| 3,174
|
r
|
# -----------------------------------------------------------------------------------------------------------
# Research: A comparison of meta-analysis methods: Understanding the influence of experiments'
# statistical parameters
# File name: svc-functions.R
# File type: secondary R Script
# Date: March 2015
# R Script contributors: Oscar Dieste, Omar S. Gomez
# Purpose: functions to perform meta-analysis using SVC
# ----------------------------------------------------------------------------------------------------------
#
#
# The vote from a single study
tell_vote <-
function (control, treatment, level) {
p <- t.test(x=control, y=treatment, conf.level=level)["p.value"]
if (p > 0.05) return (0)
else return(1)
}
#
# Auxiliary functions
calculate_nis <-
function (ncs, nts)
return ((ncs*nts)/(ncs+nts))
likelihood_function <-
function (votes, nis, delta) (sum((votes*log(1-pnorm(-1*sqrt(nis)*delta))) + ((1-votes)*log(pnorm(-1*sqrt(nis)*delta)))))
tell_Di <-
function (nis, delta) (sqrt(nis/(2*pi))*exp(-0.5*nis*(delta^2)))
tell_pi <-
function (nis, delta) (1-pnorm(-1*sqrt(nis)*delta))
#
# Calculate the most probable effect size
svc_calculate_delta <-
function (votes, ncs, nts){
nis <- calculate_nis(ncs, nts)
int <- 1
#
# coarse interaction
max_l <- -1 * .Machine$double.xmax
value_delta <- -10
num_values <- (-1*2*value_delta*(1/int))+1
deltas <- vector(length=num_values)
deltas[1] = value_delta
for (i in 2:num_values) deltas[i] <- deltas[i-1]+int
for (i in 1:num_values) {
l <- likelihood_function(votes, nis, deltas[i])
if (!is.nan(l)) {
if (max_l < l) {
max_l <- l
value_delta <- deltas[i]
}
}
}
#
# finer iteration
max_l <- -1 * .Machine$double.xmax
value_delta <- value_delta - int
num_values <- (100 * int * 2) + 1
deltas <- vector(length=num_values)
deltas[1] = value_delta
for (i in 2:num_values) deltas[i] <- deltas[i-1]+0.01
for (i in 1:num_values) {
l <- likelihood_function(votes, nis, deltas[i])
if (!is.nan(l)) {
if (max_l < l) {
max_l <- l
value_delta <- deltas[i]
}
}
}
return(value_delta)
}
#
# Accuracy calculation
tell_svc_accuracy <-
function (ncs, nts, delta, cutoff,d,list_effect_sizes) {
nis <- calculate_nis(ncs, nts)
svc_var <- 1/sum((tell_Di(nis, delta)^2)/(tell_pi(nis, delta)*(1-tell_pi(nis, delta))))
left_side <- delta - sqrt(svc_var)*cutoff
right_side <- delta + sqrt(svc_var)*cutoff
return(if((list_effect_sizes[d] >= left_side) && (list_effect_sizes[d] <= right_side)) 1 else 0)
}
#
# Empirical power calculation
tell_svc_emp_power <-
function (ncs, nts, delta, cutoff) {
nis <- calculate_nis(ncs, nts)
svc_var <- 1/sum((tell_Di(nis, delta)^2)/(tell_pi(nis, delta)*(1-tell_pi(nis, delta))))
left_side <- delta - sqrt(svc_var)*cutoff
return(if(0 < left_side) 1 else 0)
}
|
# Install and load packages
package_names <- c("survey","dplyr","foreign","devtools")
lapply(package_names, function(x) if(!x %in% installed.packages()) install.packages(x))
lapply(package_names, require, character.only=T)
install_github("e-mitchell/meps_r_pkg/MEPS")
library(MEPS)
options(survey.lonely.psu="adjust")
# Load FYC file
FYC <- read.xport('C:/MEPS/.FYC..ssp');
year <- .year.
FYC <- FYC %>%
mutate_at(vars(starts_with("AGE")),funs(replace(., .< 0, NA))) %>%
mutate(AGELAST = coalesce(AGE.yy.X, AGE42X, AGE31X))
FYC$ind = 1
# Reason for difficulty receiving needed dental care
FYC <- FYC %>%
mutate(delay_DN = (DNUNAB42 == 1 | DNDLAY42 == 1)*1,
afford_DN = (DNDLRS42 == 1 | DNUNRS42 == 1)*1,
insure_DN = (DNDLRS42 %in% c(2,3) | DNUNRS42 %in% c(2,3))*1,
other_DN = (DNDLRS42 > 3 | DNUNRS42 > 3)*1)
# Sex
FYC <- FYC %>%
mutate(sex = recode_factor(SEX, .default = "Missing", .missing = "Missing",
"1" = "Male",
"2" = "Female"))
FYCdsgn <- svydesign(
id = ~VARPSU,
strata = ~VARSTR,
weights = ~PERWT.yy.F,
data = FYC,
nest = TRUE)
results <- svyby(~afford_DN + insure_DN + other_DN, FUN = svytotal, by = ~sex, design = subset(FYCdsgn, ACCELI42==1 & delay_DN==1))
print(results)
|
/mepstrends/hc_care/json/code/r/totPOP__sex__rsn_DN__.r
|
permissive
|
RandomCriticalAnalysis/MEPS-summary-tables
|
R
| false
| false
| 1,295
|
r
|
# Install and load packages
package_names <- c("survey","dplyr","foreign","devtools")
lapply(package_names, function(x) if(!x %in% installed.packages()) install.packages(x))
lapply(package_names, require, character.only=T)
install_github("e-mitchell/meps_r_pkg/MEPS")
library(MEPS)
options(survey.lonely.psu="adjust")
# Load FYC file
FYC <- read.xport('C:/MEPS/.FYC..ssp');
year <- .year.
FYC <- FYC %>%
mutate_at(vars(starts_with("AGE")),funs(replace(., .< 0, NA))) %>%
mutate(AGELAST = coalesce(AGE.yy.X, AGE42X, AGE31X))
FYC$ind = 1
# Reason for difficulty receiving needed dental care
FYC <- FYC %>%
mutate(delay_DN = (DNUNAB42 == 1 | DNDLAY42 == 1)*1,
afford_DN = (DNDLRS42 == 1 | DNUNRS42 == 1)*1,
insure_DN = (DNDLRS42 %in% c(2,3) | DNUNRS42 %in% c(2,3))*1,
other_DN = (DNDLRS42 > 3 | DNUNRS42 > 3)*1)
# Sex
FYC <- FYC %>%
mutate(sex = recode_factor(SEX, .default = "Missing", .missing = "Missing",
"1" = "Male",
"2" = "Female"))
FYCdsgn <- svydesign(
id = ~VARPSU,
strata = ~VARSTR,
weights = ~PERWT.yy.F,
data = FYC,
nest = TRUE)
results <- svyby(~afford_DN + insure_DN + other_DN, FUN = svytotal, by = ~sex, design = subset(FYCdsgn, ACCELI42==1 & delay_DN==1))
print(results)
|
! This rule file was created from: n-iris-aca.rul and from: m-iris.d
! --------------------------------------------------------
1, 50, 57
(petal_width,0.1..1) -> (class,Iris-setosa)
2, 45, 46
(petal_width,1..1.7) & (petal_length,1..4.8) -> (class,Iris-versicolor)
3, 4, 5
(petal_length,4.8..5.1) & (sepal_length,6.2..7.9) & (petal_width,1..1.7)
-> (class,Iris-versicolor)
3, 2, 3
(petal_length,4.8..5.1) & (sepal_width,3..3.1) & (sepal_length,6.2..7.9)
-> (class,Iris-versicolor)
2, 2, 4
(petal_length,4.8..5.1) & (sepal_width,3.1..4.4) -> (class,Iris-versicolor)
3, 20, 20
(sepal_width,2.7..3) & (petal_width,1..1.7) & (sepal_length,4.3..6.2)
-> (class,Iris-versicolor)
2, 38, 38
(petal_width,1.7..2.5) & (petal_length,5.1..6.9) -> (class,Iris-viginica)
3, 3, 3
(petal_length,4.8..5.1) & (sepal_length,4.3..6.2) & (sepal_width,3..3.1)
-> (class,Iris-viginica)
2, 24, 25
(petal_width,1.7..2.5) & (sepal_width,2.7..3) -> (class,Iris-viginica)
3, 4, 5
(sepal_width,2..2.7) & (sepal_length,4.3..6.2) & (petal_length,4.8..5.1)
-> (class,Iris-viginica)
2, 9, 9
(sepal_width,2..2.7) & (petal_width,1.7..2.5) -> (class,Iris-viginica)
2, 36, 36
(petal_length,5.1..6.9) & (sepal_length,6.2..7.9) -> (class,Iris-viginica)
2, 6, 7
(sepal_width,2..2.7) & (petal_length,5.1..6.9) -> (class,Iris-viginica)
|
/m-iris.r
|
no_license
|
Nidhimidha/Special-Data-Mining-EECS-839
|
R
| false
| false
| 1,330
|
r
|
! This rule file was created from: n-iris-aca.rul and from: m-iris.d
! --------------------------------------------------------
1, 50, 57
(petal_width,0.1..1) -> (class,Iris-setosa)
2, 45, 46
(petal_width,1..1.7) & (petal_length,1..4.8) -> (class,Iris-versicolor)
3, 4, 5
(petal_length,4.8..5.1) & (sepal_length,6.2..7.9) & (petal_width,1..1.7)
-> (class,Iris-versicolor)
3, 2, 3
(petal_length,4.8..5.1) & (sepal_width,3..3.1) & (sepal_length,6.2..7.9)
-> (class,Iris-versicolor)
2, 2, 4
(petal_length,4.8..5.1) & (sepal_width,3.1..4.4) -> (class,Iris-versicolor)
3, 20, 20
(sepal_width,2.7..3) & (petal_width,1..1.7) & (sepal_length,4.3..6.2)
-> (class,Iris-versicolor)
2, 38, 38
(petal_width,1.7..2.5) & (petal_length,5.1..6.9) -> (class,Iris-viginica)
3, 3, 3
(petal_length,4.8..5.1) & (sepal_length,4.3..6.2) & (sepal_width,3..3.1)
-> (class,Iris-viginica)
2, 24, 25
(petal_width,1.7..2.5) & (sepal_width,2.7..3) -> (class,Iris-viginica)
3, 4, 5
(sepal_width,2..2.7) & (sepal_length,4.3..6.2) & (petal_length,4.8..5.1)
-> (class,Iris-viginica)
2, 9, 9
(sepal_width,2..2.7) & (petal_width,1.7..2.5) -> (class,Iris-viginica)
2, 36, 36
(petal_length,5.1..6.9) & (sepal_length,6.2..7.9) -> (class,Iris-viginica)
2, 6, 7
(sepal_width,2..2.7) & (petal_length,5.1..6.9) -> (class,Iris-viginica)
|
library(crimeutils)
library(dplyr)
library(readr)
library(DT)
library(scales)
library(tidyr)
library(ggplot2)
library(readr)
library(mapproj)
library(lubridate)
devtools::install_github("wmurphyrd/fiftystater")
library(fiftystater)
make_frequency_table_year <- function(data, column, col_names) {
temp <- unique(data[, column])
temp <- temp[!is.na(temp)]
temp_df <- data.frame(col1 = temp,
first_year = NA,
number = NA)
for (i in 1:nrow(temp_df)) {
loop_value <- temp_df$col1[i]
storage <- data[data[, column] %in% loop_value, ]
temp_df$number[i] <- nrow(storage)
temp_df$first_year[i] <- min(storage$year)
}
temp_df <-
temp_df %>%
mutate(percent = number / sum(number)) %>%
arrange(desc(number),
desc(first_year)) %>%
mutate(col1 = crimeutils::capitalize_words(col1))
names(temp_df) <- col_names
return(temp_df)
}
make_frequency_table <- function(data, column, col_names) {
temp <- unique(data[, column])
temp <- temp[!is.na(temp)]
temp_df <- data.frame(col1 = temp,
number = NA)
for (i in 1:nrow(temp_df)) {
loop_value <- temp_df$col1[i]
storage <- data[data[, column] %in% loop_value, ]
temp_df$number[i] <- nrow(storage)
}
temp_df <-
temp_df %>%
mutate(percent = number / sum(number)) %>%
arrange(desc(number)) %>%
mutate(col1 = crimeutils::capitalize_words(col1))
names(temp_df) <- col_names
return(temp_df)
}
|
/_common.R
|
no_license
|
amyfkaplan/ucrbook
|
R
| false
| false
| 1,507
|
r
|
library(crimeutils)
library(dplyr)
library(readr)
library(DT)
library(scales)
library(tidyr)
library(ggplot2)
library(readr)
library(mapproj)
library(lubridate)
devtools::install_github("wmurphyrd/fiftystater")
library(fiftystater)
make_frequency_table_year <- function(data, column, col_names) {
temp <- unique(data[, column])
temp <- temp[!is.na(temp)]
temp_df <- data.frame(col1 = temp,
first_year = NA,
number = NA)
for (i in 1:nrow(temp_df)) {
loop_value <- temp_df$col1[i]
storage <- data[data[, column] %in% loop_value, ]
temp_df$number[i] <- nrow(storage)
temp_df$first_year[i] <- min(storage$year)
}
temp_df <-
temp_df %>%
mutate(percent = number / sum(number)) %>%
arrange(desc(number),
desc(first_year)) %>%
mutate(col1 = crimeutils::capitalize_words(col1))
names(temp_df) <- col_names
return(temp_df)
}
make_frequency_table <- function(data, column, col_names) {
temp <- unique(data[, column])
temp <- temp[!is.na(temp)]
temp_df <- data.frame(col1 = temp,
number = NA)
for (i in 1:nrow(temp_df)) {
loop_value <- temp_df$col1[i]
storage <- data[data[, column] %in% loop_value, ]
temp_df$number[i] <- nrow(storage)
}
temp_df <-
temp_df %>%
mutate(percent = number / sum(number)) %>%
arrange(desc(number)) %>%
mutate(col1 = crimeutils::capitalize_words(col1))
names(temp_df) <- col_names
return(temp_df)
}
|
######################################################################################
#
# Predict House Sales
#
# This script loads the master house observations file, filters it based on year and
# excludes observations with key missing features (e.g., if there's no house value,
# the the record is probably the results of a failed extrapolation, such as no prior
# transaction to extrapolate from).
#
# It then does some basic imputation
#
#
######################################################################################
# tClean is the whole data from 2006 onwards.
# Missingness:
# some rows are omitted (e.g., no Yrssincesold values,etc)
# Others use averages or regressions
# Some missing remain in cols that aren't important and not used in modeling
library(dplyr)
library(tidyr)
library(ROCR)
library(caret)
library(randomForest)
library(glmnet)
library(data.table)
library(Matrix)
library(xgboost)
library(Metrics)
library(ROSE)
t2 = read.csv("allHouseObsAndDemo20161213.csv", row.names = 1)
t2$X = NULL
tClean = t2 %>% filter(ObsYear>2009 & !is.na(HouseValPctDevPrevSale) & !is.na(TrueHouseValue))
filterYear = 0
# tClean = t2 %>% filter(ObsYear==filterYear & !is.na(HouseValPctDevPrevSale) & !is.na(TrueHouseValue))
tClean$NumBeds[is.na(tClean$NumBeds)]=3
tClean$NumBaths[is.na(tClean$NumBaths)]=2
tClean$RaceOther = log(1.01-tClean$RaceWhite)
tClean$RaceWhite = NULL
tClean= tClean %>% mutate(Sqft=ifelse(is.na(Sqft), -100 + 690*NumBeds,Sqft))
tClean$SoldPrevYear = as.integer(tClean$YrsSinceSoldLag<1)
tClean$LotSize[is.na(tClean$LotSize)]=median(tClean$LotSize, na.rm=TRUE)
tClean$YrsSinceSoldLag[is.na(tClean$YrsSinceSoldLag)]=mean(tClean$YrsSinceSoldLag, na.rm=TRUE)
#tClean$YrsOld[is.na(tClean$YrsOld)]=26
tClean$YrsSinceRemodel[is.na(tClean$YrsSinceRemodel)]=22
tClean$HHMedIncome[is.na(tClean$HHMedIncome)]=mean(tClean$HHMedIncome, na.rm=TRUE)
#filter out other missingness
#tClean <- tClean[rowSums(is.na(tClean)) == 0,]
tClean$LotSize = log(tClean$LotSize)
tClean$Sqft = log(tClean$Sqft)
tClean$HouseValPrevYear = log(tClean$HouseValPrevYear)
# tClean at this point should match up to tClean in order to later extract address data
# treg = tClean %>%
# select(LotSize,NumBeds,SaleFlg,
# YrsSinceRemodel,YrsSinceSoldLag, SoldPrevYear,
# HouseValPrevYear, HouseValPctDevPrevSale, ZipCode,
# YoYPctChgLaborForce, YoYChgUnempRate,
# HHMChildUnder18,
# MortAnd2ndOrEq, RaceOther, YoYChg)
# logistic regression significant features
#
# treg = tClean %>%
# select(LotSize, NumBaths, NumBeds, Sqft, YrsSinceRemodel, YrsSinceSoldLag, SoldPrevYear, HouseValPctDevPrevSale, ZipCode, YoYPctChgLaborForce, YoYChgUnempRate, EduAttainBach, HHMChildUnder18, MedValue, YoYChg, RaceOther
# )
# full version
treg = tClean %>%
select(LotSize,NumBaths,NumBeds,SaleFlg,
Sqft, YrsSinceRemodel,YrsSinceSoldLag, SoldPrevYear,
HouseValPrevYear, HouseValPctDevPrevSale, ZipCode, unempRate,
YoYPctChgLaborForce, YoYChgUnempRate,
EduAttainBach,HHMedIncome, HHMChildUnder18,HHMSeniorsOver65,MedValue,
Mortgage,MortAnd2ndAndEq,MortAnd2ndOrEq, RaceOther, HHwRetIncome, YoYChg, Rate)
# lasso removed:
#NumBaths, Sqft, unempRate, EduAttainBach, HHMedIncome, HHMSeniorsOver65, MedValue, Mortgage, MortAnd2ndOrEq, HHwRetIncome, Rate
######################################################################################
#
# Build Test and Training sets
#
######################################################################################
y = treg[,"SaleFlg", drop=FALSE]
treg$SaleFlg = NULL
treg = scale(treg)
treg = cbind(treg, SaleFlag=y)
tregX = treg
tregX$SaleFlg = NULL
#write.csv(treg, "allHouseObsAndDemoNoNAs.csv")
#treg$SaleFlg = as.integer(as.character(treg$SaleFlg)) # for regression
treg$SaleFlg = as.factor(treg$SaleFlg)
set.seed(999)
train = createDataPartition(treg$SaleFlg, p = 0.7, list=FALSE)
tregTest = treg[-train,]
tregTestX = tregX[-train,]
tregTrain = treg[train,]
# do overtraining
tregTrainOver = ROSE(SaleFlg ~ ., data=tregTrain, p=0.3)$data
######################################################################################
#
# Logistic Regresssion
#
#
######################################################################################
logit.sat = glm(SaleFlg ~.,
family = "binomial",
data = tregTrainOver, control = list(maxit = 200))
summary(logit.sat)
lsumAll = summary(logit.sat)
logit.featToKeep = paste(names(which(lsumAll$coefficients[,4]<0.1))[-1],sep = "", collapse = ", ")
logit.featToKeep # this can be used to modify the feature set
pchisq(logit.sat$deviance, logit.sat$df.residual, lower.tail = FALSE)
# explains 3% of the variance
1 - logit.sat$deviance/logit.sat$null.deviance
################################
# reduced set of features
################################
tregTrainOver.reduced = tregTrainOver %>% select(
LotSize,
NumBaths,
NumBeds,
Sqft,
YrsSinceRemodel,
YrsSinceSoldLag,
SoldPrevYear,
HouseValPctDevPrevSale,
YoYPctChgLaborForce,
YoYChgUnempRate,
EduAttainBach,
HHMedIncome,
HHMChildUnder18,
RaceOther,
YoYChg,
SaleFlg
)
tregTest.reduced = tregTest %>% select(
LotSize, NumBaths, NumBeds, Sqft, YrsSinceRemodel, YrsSinceSoldLag, SoldPrevYear, HouseValPctDevPrevSale, YoYPctChgLaborForce, YoYChgUnempRate, EduAttainBach, HHMedIncome, HHMChildUnder18, RaceOther, YoYChg, SaleFlg
)
logit.reduced = glm(SaleFlg ~.,
family = "binomial",
data = tregTrainOver.reduced, control = list(maxit = 200))
summary(logit.reduced)
################################
# Lasso Regression
################################
grid = 10^seq(1, -5, length = 100)
# need matrices for glmnet
x_train = model.matrix(SaleFlg ~ ., tregTrainOver)[,-1]
y_train = as.integer(as.character(tregTrainOver$SaleFlg))
x_test = model.matrix(SaleFlg ~ ., tregTest)[,-1]
y_test = as.integer(as.character(tregTest$SaleFlg))
lasso.models.train = glmnet(x_train, y_train, alpha = 1, lambda = grid)
#Running 5-fold cross validation.
set.seed(0)
cv.lasso.out = cv.glmnet(x_train, y_train,
lambda = grid, alpha = 1, nfolds = 5)
bestlambda.lasso = cv.lasso.out$lambda.min
# who got shrunk
lasso.coeff = coef(cv.lasso.out, s = "lambda.min")
################################
# Random Forest
################################
# convert to integer so RF does a regression
tregTrainOver$SaleFlg = as.integer(as.character(tregTrainOver$SaleFlg))
rf.houses = randomForest(SaleFlg ~ ., data = tregTrainOver, importance = TRUE, ntree=500)
################################
# Summarize
################################
getCutoffs = function(perf, cutoff) {
x = perf@x.values[[1]]
cutoffIndex = which(round(x,4)==round(cutoff,4))[1]
a = perf@alpha.values[[1]]
cutoffVal = a[cutoffIndex]
y = perf@y.values[[1]]
cutoffY = y[xidx]
return (c("cutoffIndex"=cutoffIndex, "cutoffVal"=cutoffVal, "cutoffY"=cutoffY))
}
# logistic
lr.pval = predict(logit.sat, tregTest, type = "response")
lr.predict = prediction(lr.pval, tregTest$SaleFlg)
lr.sat.roc.perf = performance(lr.predict, measure = 'tpr', x.measure = 'fpr')
lr.cutoffs = getCutoffs(lr.sat.roc.perf, 0.5)
sale.predicted = ifelse(lr.pval>lr.cutoffs["cutoffVal"], 1, 0)
logRes = data.frame("ObsNum"=row.names(tregTest), 'type'="LogitFull", "truth"= as.character(tregTest$SaleFlg), "Predict" = sale.predicted, 'Prob'=lr.pval)
table(logRes[,3:4])
# logistic reduced
lr.red.pval = predict(logit.sig, tregTest, type = "response")
lr.red.predict = prediction(lr.red.pval, tregTest$SaleFlg)
lr.red.roc.perf = performance(lr.red.predict, measure = 'tpr', x.measure = 'fpr')
lr.red.cutoffs = getCutoffs(lr.red.roc.perf, 0.5)
sale.predicted = ifelse(lr.red.pval>lr.red.cutoffs["cutoffVal"], 1, 0)
logRes.red = data.frame("ObsNum"=row.names(tregTest), 'type'="LogitRed", "truth"= as.character(tregTest$SaleFlg), "Predict" = sale.predicted, 'Prob'=lr.red.pval)
table(logRes.red[,3:4])
# lasso
las.pval = predict(lasso.models.train, s = bestlambda.lasso, newx = x_test)
las.predict = prediction(las.pval, tregTest$SaleFlg)
las.roc.perf = performance(las.predict, measure = 'tpr', x.measure = 'fpr')
las.cutoffs = getCutoffs(las.roc.perf, 0.5)
sale.predicted = ifelse(las.pval>las.cutoffs["cutoffVal"], 1, 0)
lasRes = data.frame("ObsNum"=row.names(tregTest), 'type'="Lasso", "truth"= as.character(tregTest$SaleFlg), "Predict" = as.numeric(sale.predicted), 'Prob'=as.numeric(las.pval))
table(lasRes[,3:4])
# random forest
rf.pval = log(predict(rf.houses, tregTest))
rf.predict = prediction(rf.pval, tregTest$SaleFlg)
rf.roc.perf = performance(rf.predict, measure = 'tpr', x.measure = 'fpr')
rf.cutoffs = getCutoffs(rf.roc.perf, 0.5)
sale.predicted = ifelse(rf.pval>rf.cutoffs["cutoffVal"], 1, 0)
rfRes = data.frame("ObsNum"=row.names(tregTest), 'type'="RF", "truth"= as.character(tregTest$SaleFlg), "Predict" = as.numeric(sale.predicted), 'Prob'=as.numeric(rf.pval))
table(rfRes[,3:4])
allRes = rbind(rfRes, logRes, logRes.red)
allRes$Correct = allRes$truth == allRes$Predict
allRes %>% group_by(truth, Correct, ObsNum) %>% summarize(cnt=sum(Predict)) %>%
group_by(truth, Correct, cnt) %>% count() %>% ungroup()
agg1 = allRes %>% select(-Prob, -Correct) %>% spread(type, Predict)
agg2 = allRes %>% select(-Prob, -Predict) %>% spread(type, Correct)
colnames(agg2) <- c("ObsNum","truth","RF.Cor","LogitFull.Cor","LogitRed.Cor")
agg = cbind(agg1, agg2[,3:5])
%>% group_by(ObsNum, truth) %>% summarize(RF=sum(RF, na.rm=T), LogitFull=sum(LogitFull, na.rm=T), LogitRed=sum(LogitRed, na.rm=T)) %>%
group_by(truth) %>% summarize(RF=sum(RF), LogitFull=sum(LogitFull), LogitRed=sum(LogitRed))
consensus = allRes %>% group_by(ObsNum) %>% summarize(cnt=sum(Predict)) %>% ungroup()
consensus = cbind(logRes, consensus)
consensus[which(consensus$cnt %in% c(1)),"Predict"]=0
table(consensus[,3:4])
plot(las.roc.perf, colorize = TRUE, main="ROC for 4 Models")
plot(lr.red.roc.perf, add = TRUE, colorize = TRUE)
plot(lr.sat.roc.perf, add = TRUE, colorize = TRUE)
plot(rf.roc.perf, add = TRUE, colorize = TRUE)
abline(a=0, b= 1, col='grey')
abline(a=y[xidx], b= 0, col='red')
abline(v= x[xidx],col='red')
logRes$truth = as.factor(logRes$truth)
ggplot(logRes, aes(x=Prob, fill=truth)) + geom_histogram(bins = 60, position='identity') + geom_vline(xintercept=lr.cutoffs["cutoffVal"]) +
theme_minimal() + xlab('Prediction (prob)') + ggtitle("Sale vs Non-Sale Logit Prediction") +
scale_fill_manual(values=c("paleturquoise3", "red4"),
name=NULL,
breaks=c(0,1 ),
labels=c("No Sale", "Sale"))
lasRes$truth = as.factor(lasRes$truth)
ggplot(lasRes, aes(x=Prob, fill=truth)) + geom_histogram(bins = 60, position='identity') + geom_vline(xintercept=las.cutoffs["cutoffVal"]) +
theme_minimal() + xlab('Prediction (prob)') + ggtitle("Sale vs Non-Sale Logit Prediction") +
scale_fill_manual(values=c("paleturquoise3", "red4"),
name=NULL,
breaks=c(0,1 ),
labels=c("No Sale", "Sale"))
plot(lr.sat.roc.perf, main="ROC for 4 Models")
abline(a=0, b= 1)
abline(a=y[xidx], b= 0)
abline(v= x[xidx])
sale.predicted = ifelse(pval>lr.cutoff, 1, 0)
logRes = data.frame("truth"= as.character(tregTest$SaleFlg), "Predict" = sale.predicted, 'Prob'=pval)
table(logRes[,1:2])
logRes$truth = as.factor(logRes$truth)
ggplot(subset(logRes,truth==0), aes(x=Prob)) + geom_histogram(bins = 60, fill='cadetblue4') +
geom_histogram(data=subset(logRes,truth==1), aes(x=Prob), fill='brown4', bins = 60) + geom_vline(xintercept=lr.cutoff) +
theme_minimal() + xlab('Prediction (prob)') + ggtitle("Sale(blue) vs Non-Sale(red) Logit Prediction")
summary(logRes$truth)
rfRes$truth = as.factor(rfRes$truth)
ggplot(subset(rfRes,truth==0), aes(x=log(Prob))) + geom_histogram(bins = 60, fill='blue') +
geom_histogram(data=subset(rfRes,truth==1), aes(x=log(Prob)), fill='red', bins = 60) +
geom_vline(xintercept=log(rf.cutoff)) +
theme_minimal()
importance(rf.houses, type=1)
varImpPlot(rf.houses, type=1, main="Variable Importance for RF")
# combine Logistic and RF
logRes = data.frame("truth"= as.character(tregTest$SaleFlg), "Predict" = sale.predicted, 'Prob'=pval)
rfRes =data.frame('truth'=tregTest$SaleFlg, "Predict" = sale.rf.predicted,"Prob"=rf.pval)
comp.pred = cbind(logRes, rfRes)
comp.pred[,4]=NULL
names(comp.pred) <- c("Truth", "LR.pred", "LR.Prob", "RF.Pred","RF.Prob")
comp.pred$RF.LogProb = log(comp.pred$RF.Prob)
comp.pred$combPred = comp.pred$RF.LogProb > lr.cutoff | comp.pred$LR.Prob > lr.cutoff
comp.pred$RF.LogProb = scale(comp.pred$RF.LogProb)
cp =comp.pred[,c( 3, 5)]
cp = scale(cp)
cpp = comp.pred[,1, drop=F]
cpp = cbind(cp, cpp)
ggplot(subset(comp.pred,Truth==0), aes(x=RF.LogProb)) + geom_histogram(alpha=0.3, bins = 60, fill='blue') +
geom_histogram(data=subset(comp.pred,Truth==1), aes(x=RF.LogProb), fill='red', alpha=0.3, bins = 60) +
theme_minimal()
table(treg$SaleFlg, treg$LotSize<=0.46)
630/9859
library(parallel)
# Calculate the number of cores
no_cores <- detectCores() - 1
# Initiate cluster
cl <- makeCluster(no_cores)
tregTrainOverX = tregTrainOver
y_train = as.integer(as.character(tregTrainOverX$SaleFlg))
tregTrainOverX[,"SaleFlg"] = NULL
tregTestX = tregTest
y_test = as.integer(as.character(tregTestX$SaleFlg))
tregTestX[,"SaleFlg"] = NULL
dtrain = xgb.DMatrix(as.matrix(tregTrainOverX), label=y_train)
dtest = xgb.DMatrix(as.matrix(tregTestX), label = y_test)
xgb_params = list(
colsample_bytree = 1,
subsample = 1,
eta = 0.04,
objective = 'binary:logistic', #reg:linear'
max_depth = 5,
num_parallel_tree = 1,
min_child_weight = 1
)
res = xgboost(dtrain,
y_train,
params= xgb_params,
nrounds=500,
early_stopping_rounds=15,
print_every_n = 50)
test.predict = predict(res, dtest)
test.predictBin = ifelse(test.predict<0.75,0, 1)
xgbRes = data.frame("truth"=y_test, "predict"=test.predictBin, "Prob"=test.predict)
table(xgbRes[,1:2])
xgbRes$truth = as.factor(xgbRes$truth)
ggplot(subset(xgbRes,truth==0), aes(x=Prob)) + geom_histogram(bins = 70, fill='blue') +
geom_histogram(data=subset(xgbRes,truth==1), aes(x=Prob), fill='red', bins = 70) + geom_vline(xintercept=0.14) +
theme_minimal()
# double hit ratio with 500 rounds, eta 0.04, max depth 5 and predict threshold of 0.15
# plot
model <- xgb.dump(res, with.stats = T)
names = colnames(tregTrainOverX)
importance_matrix <- xgb.importance(names, model = res)
# Nice graph
library(Ckmeans.1d.dp)
xgb.plot.importance(importance_matrix[1:15,])
tregFeat = treg
tregFeat$SaleFlg = NULL
treg$TrueHouseValue
# cross=validate XGB
xgb_params = list(
seed = 0,
colsample_bytree = 1,
subsample = 1,
eta = 0.075,
objective = 'binary:logistic',
max_depth = 6,
num_parallel_tree = 1,
min_child_weight = 1,
base_score = 0.5
)
watchList = list(train=dtrain, test=dtest)
res = xgb.train(
data=dtrain,
max.depth=6, eta=0.075, nthread = 2,
nround=1000,
watchList = watchList,
base_score=5,
print.every.n = 1,
verbose = 1,
eval.metric="error")
test.predict = predict(res, dtest)
plot(test.predict, y_test)
test.predictBin = ifelse(test.predict<0.02, 0, 1)
x = data.frame("truth"=y_test, "predict"=test.predictBin)
table(x)
x1 = data.frame("truth"=y_test, "predict"=test.predict)
x1$truth = as.factor(x1$truth)
ggplot(subset(x1,truth==0), aes(x=predict)) + geom_histogram(bins = 70, fill='blue') +
geom_histogram(data=subset(x1,truth==1), aes(x=predict), fill='red', bins = 70)
names = colnames(tregTrainOverX)
importance_matrix <- xgb.importance(names, model = res)
xgb.plot.importance(importance_matrix = importance_matrix)
# create unique list of addresses for submission to census
allRec1 = read.csv("ncresidents_zip28786.csv", stringsAsFactors = F)
allRec2 = read.csv("ncresidents_zip28803.csv", stringsAsFactors = F)
allRec = rbind(allRec1, allRec2)
uniqueAddr = allRec %>% group_by(addrQuery) %>%
summarize(zipCode=min(zipCode),
cityName=min(cityName),
streetAddr=min(streetAddr),
countyName=min(countyName),
houseID=min(rowID)) %>% arrange(houseID)
# only pull off addresses we actually use
a = data.frame(addrQuery = unique(t$AddrQuery))
uaf = a %>% inner_join(uniqueAddr, by="addrQuery")
write.csv(uaf, file="addr.csv", row.names = F)
library(ggplot2)
t$HouseID = as.factor(t$HouseID)
t$YrsSinceSoldF = as.factor(t$YrsSinceSold)
tCut$SaleFlg = as.factor(tCut$SaleFlg)
tCut$ObsYearF = as.factor(tCut$ObsYear)
t$SaleFlgF = as.factor(t$SaleFlg)
ggplot(t, aes(y=TrueHouseValue, x=ObsYear, color=SaleFlgF)) +
geom_point(alpha=0.5) + theme(legend.position="none")
tClean$SaleFlgF = as.integer(as.character(tClean$SaleFlg))
tClean$OwnershipID = as.factor(tClean$OwnershipID)
temp = unique(tClean$HouseID)
temp2 = sample(temp, 20)
temp3= tClean[tClean$HouseID %in% temp2,]
#temp3$OwnershipID = as.factor(temp3$OwnershipID)
tempSales= temp3[temp3$SaleFlg==1,]
ggplot(temp3, aes(x=ObsYear, y=log(TrueHouseValueAdj), color=HouseID)) +
geom_line() + theme(legend.position="none") +
geom_point(data=tempSales, aes(x=ObsYear, y=log(TrueHouseValueAdj)))
# box plots
ggplot(treg, aes(x=SaleFlg, y=log(LotSize), color=SaleFlg)) +
geom_boxplot() + theme(legend.position="none") + ylim(c(-5,5))
test = t.test(log(LotSize)~SaleFlg, data=treg)
print(test)
ggplot(subset(treg, SaleFlg ==0), aes(x=log(LotSize))) + geom_histogram(bins = 70, fill='blue') +
geom_histogram(data=subset(treg, SaleFlg ==1), aes(x=log(LotSize)), fill='red', bins = 70) +
theme_minimal()
|
/Project5-Capstone/JasonSippie/predictHouseSales.R
|
no_license
|
vuchau/bootcamp007_project
|
R
| false
| false
| 17,938
|
r
|
######################################################################################
#
# Predict House Sales
#
# This script loads the master house observations file, filters it based on year and
# excludes observations with key missing features (e.g., if there's no house value,
# the the record is probably the results of a failed extrapolation, such as no prior
# transaction to extrapolate from).
#
# It then does some basic imputation
#
#
######################################################################################
# tClean is the whole data from 2006 onwards.
# Missingness:
# some rows are omitted (e.g., no Yrssincesold values,etc)
# Others use averages or regressions
# Some missing remain in cols that aren't important and not used in modeling
library(dplyr)
library(tidyr)
library(ROCR)
library(caret)
library(randomForest)
library(glmnet)
library(data.table)
library(Matrix)
library(xgboost)
library(Metrics)
library(ROSE)
t2 = read.csv("allHouseObsAndDemo20161213.csv", row.names = 1)
t2$X = NULL
tClean = t2 %>% filter(ObsYear>2009 & !is.na(HouseValPctDevPrevSale) & !is.na(TrueHouseValue))
filterYear = 0
# tClean = t2 %>% filter(ObsYear==filterYear & !is.na(HouseValPctDevPrevSale) & !is.na(TrueHouseValue))
tClean$NumBeds[is.na(tClean$NumBeds)]=3
tClean$NumBaths[is.na(tClean$NumBaths)]=2
tClean$RaceOther = log(1.01-tClean$RaceWhite)
tClean$RaceWhite = NULL
tClean= tClean %>% mutate(Sqft=ifelse(is.na(Sqft), -100 + 690*NumBeds,Sqft))
tClean$SoldPrevYear = as.integer(tClean$YrsSinceSoldLag<1)
tClean$LotSize[is.na(tClean$LotSize)]=median(tClean$LotSize, na.rm=TRUE)
tClean$YrsSinceSoldLag[is.na(tClean$YrsSinceSoldLag)]=mean(tClean$YrsSinceSoldLag, na.rm=TRUE)
#tClean$YrsOld[is.na(tClean$YrsOld)]=26
tClean$YrsSinceRemodel[is.na(tClean$YrsSinceRemodel)]=22
tClean$HHMedIncome[is.na(tClean$HHMedIncome)]=mean(tClean$HHMedIncome, na.rm=TRUE)
#filter out other missingness
#tClean <- tClean[rowSums(is.na(tClean)) == 0,]
tClean$LotSize = log(tClean$LotSize)
tClean$Sqft = log(tClean$Sqft)
tClean$HouseValPrevYear = log(tClean$HouseValPrevYear)
# tClean at this point should match up to tClean in order to later extract address data
# treg = tClean %>%
# select(LotSize,NumBeds,SaleFlg,
# YrsSinceRemodel,YrsSinceSoldLag, SoldPrevYear,
# HouseValPrevYear, HouseValPctDevPrevSale, ZipCode,
# YoYPctChgLaborForce, YoYChgUnempRate,
# HHMChildUnder18,
# MortAnd2ndOrEq, RaceOther, YoYChg)
# logistic regression significant features
#
# treg = tClean %>%
# select(LotSize, NumBaths, NumBeds, Sqft, YrsSinceRemodel, YrsSinceSoldLag, SoldPrevYear, HouseValPctDevPrevSale, ZipCode, YoYPctChgLaborForce, YoYChgUnempRate, EduAttainBach, HHMChildUnder18, MedValue, YoYChg, RaceOther
# )
# full version
treg = tClean %>%
select(LotSize,NumBaths,NumBeds,SaleFlg,
Sqft, YrsSinceRemodel,YrsSinceSoldLag, SoldPrevYear,
HouseValPrevYear, HouseValPctDevPrevSale, ZipCode, unempRate,
YoYPctChgLaborForce, YoYChgUnempRate,
EduAttainBach,HHMedIncome, HHMChildUnder18,HHMSeniorsOver65,MedValue,
Mortgage,MortAnd2ndAndEq,MortAnd2ndOrEq, RaceOther, HHwRetIncome, YoYChg, Rate)
# lasso removed:
#NumBaths, Sqft, unempRate, EduAttainBach, HHMedIncome, HHMSeniorsOver65, MedValue, Mortgage, MortAnd2ndOrEq, HHwRetIncome, Rate
######################################################################################
#
# Build Test and Training sets
#
######################################################################################
y = treg[,"SaleFlg", drop=FALSE]
treg$SaleFlg = NULL
treg = scale(treg)
treg = cbind(treg, SaleFlag=y)
tregX = treg
tregX$SaleFlg = NULL
#write.csv(treg, "allHouseObsAndDemoNoNAs.csv")
#treg$SaleFlg = as.integer(as.character(treg$SaleFlg)) # for regression
treg$SaleFlg = as.factor(treg$SaleFlg)
set.seed(999)
train = createDataPartition(treg$SaleFlg, p = 0.7, list=FALSE)
tregTest = treg[-train,]
tregTestX = tregX[-train,]
tregTrain = treg[train,]
# do overtraining
tregTrainOver = ROSE(SaleFlg ~ ., data=tregTrain, p=0.3)$data
######################################################################################
#
# Logistic Regresssion
#
#
######################################################################################
logit.sat = glm(SaleFlg ~.,
family = "binomial",
data = tregTrainOver, control = list(maxit = 200))
summary(logit.sat)
lsumAll = summary(logit.sat)
logit.featToKeep = paste(names(which(lsumAll$coefficients[,4]<0.1))[-1],sep = "", collapse = ", ")
logit.featToKeep # this can be used to modify the feature set
pchisq(logit.sat$deviance, logit.sat$df.residual, lower.tail = FALSE)
# explains 3% of the variance
1 - logit.sat$deviance/logit.sat$null.deviance
################################
# reduced set of features
################################
tregTrainOver.reduced = tregTrainOver %>% select(
LotSize,
NumBaths,
NumBeds,
Sqft,
YrsSinceRemodel,
YrsSinceSoldLag,
SoldPrevYear,
HouseValPctDevPrevSale,
YoYPctChgLaborForce,
YoYChgUnempRate,
EduAttainBach,
HHMedIncome,
HHMChildUnder18,
RaceOther,
YoYChg,
SaleFlg
)
tregTest.reduced = tregTest %>% select(
LotSize, NumBaths, NumBeds, Sqft, YrsSinceRemodel, YrsSinceSoldLag, SoldPrevYear, HouseValPctDevPrevSale, YoYPctChgLaborForce, YoYChgUnempRate, EduAttainBach, HHMedIncome, HHMChildUnder18, RaceOther, YoYChg, SaleFlg
)
logit.reduced = glm(SaleFlg ~.,
family = "binomial",
data = tregTrainOver.reduced, control = list(maxit = 200))
summary(logit.reduced)
################################
# Lasso Regression
################################
grid = 10^seq(1, -5, length = 100)
# need matrices for glmnet
x_train = model.matrix(SaleFlg ~ ., tregTrainOver)[,-1]
y_train = as.integer(as.character(tregTrainOver$SaleFlg))
x_test = model.matrix(SaleFlg ~ ., tregTest)[,-1]
y_test = as.integer(as.character(tregTest$SaleFlg))
lasso.models.train = glmnet(x_train, y_train, alpha = 1, lambda = grid)
#Running 5-fold cross validation.
set.seed(0)
cv.lasso.out = cv.glmnet(x_train, y_train,
lambda = grid, alpha = 1, nfolds = 5)
bestlambda.lasso = cv.lasso.out$lambda.min
# who got shrunk
lasso.coeff = coef(cv.lasso.out, s = "lambda.min")
################################
# Random Forest
################################
# convert to integer so RF does a regression
tregTrainOver$SaleFlg = as.integer(as.character(tregTrainOver$SaleFlg))
rf.houses = randomForest(SaleFlg ~ ., data = tregTrainOver, importance = TRUE, ntree=500)
################################
# Summarize
################################
getCutoffs = function(perf, cutoff) {
x = perf@x.values[[1]]
cutoffIndex = which(round(x,4)==round(cutoff,4))[1]
a = perf@alpha.values[[1]]
cutoffVal = a[cutoffIndex]
y = perf@y.values[[1]]
cutoffY = y[xidx]
return (c("cutoffIndex"=cutoffIndex, "cutoffVal"=cutoffVal, "cutoffY"=cutoffY))
}
# logistic
lr.pval = predict(logit.sat, tregTest, type = "response")
lr.predict = prediction(lr.pval, tregTest$SaleFlg)
lr.sat.roc.perf = performance(lr.predict, measure = 'tpr', x.measure = 'fpr')
lr.cutoffs = getCutoffs(lr.sat.roc.perf, 0.5)
sale.predicted = ifelse(lr.pval>lr.cutoffs["cutoffVal"], 1, 0)
logRes = data.frame("ObsNum"=row.names(tregTest), 'type'="LogitFull", "truth"= as.character(tregTest$SaleFlg), "Predict" = sale.predicted, 'Prob'=lr.pval)
table(logRes[,3:4])
# logistic reduced
lr.red.pval = predict(logit.sig, tregTest, type = "response")
lr.red.predict = prediction(lr.red.pval, tregTest$SaleFlg)
lr.red.roc.perf = performance(lr.red.predict, measure = 'tpr', x.measure = 'fpr')
lr.red.cutoffs = getCutoffs(lr.red.roc.perf, 0.5)
sale.predicted = ifelse(lr.red.pval>lr.red.cutoffs["cutoffVal"], 1, 0)
logRes.red = data.frame("ObsNum"=row.names(tregTest), 'type'="LogitRed", "truth"= as.character(tregTest$SaleFlg), "Predict" = sale.predicted, 'Prob'=lr.red.pval)
table(logRes.red[,3:4])
# lasso
las.pval = predict(lasso.models.train, s = bestlambda.lasso, newx = x_test)
las.predict = prediction(las.pval, tregTest$SaleFlg)
las.roc.perf = performance(las.predict, measure = 'tpr', x.measure = 'fpr')
las.cutoffs = getCutoffs(las.roc.perf, 0.5)
sale.predicted = ifelse(las.pval>las.cutoffs["cutoffVal"], 1, 0)
lasRes = data.frame("ObsNum"=row.names(tregTest), 'type'="Lasso", "truth"= as.character(tregTest$SaleFlg), "Predict" = as.numeric(sale.predicted), 'Prob'=as.numeric(las.pval))
table(lasRes[,3:4])
# random forest
rf.pval = log(predict(rf.houses, tregTest))
rf.predict = prediction(rf.pval, tregTest$SaleFlg)
rf.roc.perf = performance(rf.predict, measure = 'tpr', x.measure = 'fpr')
rf.cutoffs = getCutoffs(rf.roc.perf, 0.5)
sale.predicted = ifelse(rf.pval>rf.cutoffs["cutoffVal"], 1, 0)
rfRes = data.frame("ObsNum"=row.names(tregTest), 'type'="RF", "truth"= as.character(tregTest$SaleFlg), "Predict" = as.numeric(sale.predicted), 'Prob'=as.numeric(rf.pval))
table(rfRes[,3:4])
allRes = rbind(rfRes, logRes, logRes.red)
allRes$Correct = allRes$truth == allRes$Predict
allRes %>% group_by(truth, Correct, ObsNum) %>% summarize(cnt=sum(Predict)) %>%
group_by(truth, Correct, cnt) %>% count() %>% ungroup()
agg1 = allRes %>% select(-Prob, -Correct) %>% spread(type, Predict)
agg2 = allRes %>% select(-Prob, -Predict) %>% spread(type, Correct)
colnames(agg2) <- c("ObsNum","truth","RF.Cor","LogitFull.Cor","LogitRed.Cor")
agg = cbind(agg1, agg2[,3:5])
%>% group_by(ObsNum, truth) %>% summarize(RF=sum(RF, na.rm=T), LogitFull=sum(LogitFull, na.rm=T), LogitRed=sum(LogitRed, na.rm=T)) %>%
group_by(truth) %>% summarize(RF=sum(RF), LogitFull=sum(LogitFull), LogitRed=sum(LogitRed))
consensus = allRes %>% group_by(ObsNum) %>% summarize(cnt=sum(Predict)) %>% ungroup()
consensus = cbind(logRes, consensus)
consensus[which(consensus$cnt %in% c(1)),"Predict"]=0
table(consensus[,3:4])
plot(las.roc.perf, colorize = TRUE, main="ROC for 4 Models")
plot(lr.red.roc.perf, add = TRUE, colorize = TRUE)
plot(lr.sat.roc.perf, add = TRUE, colorize = TRUE)
plot(rf.roc.perf, add = TRUE, colorize = TRUE)
abline(a=0, b= 1, col='grey')
abline(a=y[xidx], b= 0, col='red')
abline(v= x[xidx],col='red')
logRes$truth = as.factor(logRes$truth)
ggplot(logRes, aes(x=Prob, fill=truth)) + geom_histogram(bins = 60, position='identity') + geom_vline(xintercept=lr.cutoffs["cutoffVal"]) +
theme_minimal() + xlab('Prediction (prob)') + ggtitle("Sale vs Non-Sale Logit Prediction") +
scale_fill_manual(values=c("paleturquoise3", "red4"),
name=NULL,
breaks=c(0,1 ),
labels=c("No Sale", "Sale"))
lasRes$truth = as.factor(lasRes$truth)
ggplot(lasRes, aes(x=Prob, fill=truth)) + geom_histogram(bins = 60, position='identity') + geom_vline(xintercept=las.cutoffs["cutoffVal"]) +
theme_minimal() + xlab('Prediction (prob)') + ggtitle("Sale vs Non-Sale Logit Prediction") +
scale_fill_manual(values=c("paleturquoise3", "red4"),
name=NULL,
breaks=c(0,1 ),
labels=c("No Sale", "Sale"))
plot(lr.sat.roc.perf, main="ROC for 4 Models")
abline(a=0, b= 1)
abline(a=y[xidx], b= 0)
abline(v= x[xidx])
sale.predicted = ifelse(pval>lr.cutoff, 1, 0)
logRes = data.frame("truth"= as.character(tregTest$SaleFlg), "Predict" = sale.predicted, 'Prob'=pval)
table(logRes[,1:2])
logRes$truth = as.factor(logRes$truth)
ggplot(subset(logRes,truth==0), aes(x=Prob)) + geom_histogram(bins = 60, fill='cadetblue4') +
geom_histogram(data=subset(logRes,truth==1), aes(x=Prob), fill='brown4', bins = 60) + geom_vline(xintercept=lr.cutoff) +
theme_minimal() + xlab('Prediction (prob)') + ggtitle("Sale(blue) vs Non-Sale(red) Logit Prediction")
summary(logRes$truth)
rfRes$truth = as.factor(rfRes$truth)
ggplot(subset(rfRes,truth==0), aes(x=log(Prob))) + geom_histogram(bins = 60, fill='blue') +
geom_histogram(data=subset(rfRes,truth==1), aes(x=log(Prob)), fill='red', bins = 60) +
geom_vline(xintercept=log(rf.cutoff)) +
theme_minimal()
importance(rf.houses, type=1)
varImpPlot(rf.houses, type=1, main="Variable Importance for RF")
# combine Logistic and RF
logRes = data.frame("truth"= as.character(tregTest$SaleFlg), "Predict" = sale.predicted, 'Prob'=pval)
rfRes =data.frame('truth'=tregTest$SaleFlg, "Predict" = sale.rf.predicted,"Prob"=rf.pval)
comp.pred = cbind(logRes, rfRes)
comp.pred[,4]=NULL
names(comp.pred) <- c("Truth", "LR.pred", "LR.Prob", "RF.Pred","RF.Prob")
comp.pred$RF.LogProb = log(comp.pred$RF.Prob)
comp.pred$combPred = comp.pred$RF.LogProb > lr.cutoff | comp.pred$LR.Prob > lr.cutoff
comp.pred$RF.LogProb = scale(comp.pred$RF.LogProb)
cp =comp.pred[,c( 3, 5)]
cp = scale(cp)
cpp = comp.pred[,1, drop=F]
cpp = cbind(cp, cpp)
ggplot(subset(comp.pred,Truth==0), aes(x=RF.LogProb)) + geom_histogram(alpha=0.3, bins = 60, fill='blue') +
geom_histogram(data=subset(comp.pred,Truth==1), aes(x=RF.LogProb), fill='red', alpha=0.3, bins = 60) +
theme_minimal()
table(treg$SaleFlg, treg$LotSize<=0.46)
630/9859
library(parallel)
# Calculate the number of cores
no_cores <- detectCores() - 1
# Initiate cluster
cl <- makeCluster(no_cores)
tregTrainOverX = tregTrainOver
y_train = as.integer(as.character(tregTrainOverX$SaleFlg))
tregTrainOverX[,"SaleFlg"] = NULL
tregTestX = tregTest
y_test = as.integer(as.character(tregTestX$SaleFlg))
tregTestX[,"SaleFlg"] = NULL
dtrain = xgb.DMatrix(as.matrix(tregTrainOverX), label=y_train)
dtest = xgb.DMatrix(as.matrix(tregTestX), label = y_test)
xgb_params = list(
colsample_bytree = 1,
subsample = 1,
eta = 0.04,
objective = 'binary:logistic', #reg:linear'
max_depth = 5,
num_parallel_tree = 1,
min_child_weight = 1
)
res = xgboost(dtrain,
y_train,
params= xgb_params,
nrounds=500,
early_stopping_rounds=15,
print_every_n = 50)
test.predict = predict(res, dtest)
test.predictBin = ifelse(test.predict<0.75,0, 1)
xgbRes = data.frame("truth"=y_test, "predict"=test.predictBin, "Prob"=test.predict)
table(xgbRes[,1:2])
xgbRes$truth = as.factor(xgbRes$truth)
ggplot(subset(xgbRes,truth==0), aes(x=Prob)) + geom_histogram(bins = 70, fill='blue') +
geom_histogram(data=subset(xgbRes,truth==1), aes(x=Prob), fill='red', bins = 70) + geom_vline(xintercept=0.14) +
theme_minimal()
# double hit ratio with 500 rounds, eta 0.04, max depth 5 and predict threshold of 0.15
# plot
model <- xgb.dump(res, with.stats = T)
names = colnames(tregTrainOverX)
importance_matrix <- xgb.importance(names, model = res)
# Nice graph
library(Ckmeans.1d.dp)
xgb.plot.importance(importance_matrix[1:15,])
tregFeat = treg
tregFeat$SaleFlg = NULL
treg$TrueHouseValue
# cross=validate XGB
xgb_params = list(
seed = 0,
colsample_bytree = 1,
subsample = 1,
eta = 0.075,
objective = 'binary:logistic',
max_depth = 6,
num_parallel_tree = 1,
min_child_weight = 1,
base_score = 0.5
)
watchList = list(train=dtrain, test=dtest)
res = xgb.train(
data=dtrain,
max.depth=6, eta=0.075, nthread = 2,
nround=1000,
watchList = watchList,
base_score=5,
print.every.n = 1,
verbose = 1,
eval.metric="error")
test.predict = predict(res, dtest)
plot(test.predict, y_test)
test.predictBin = ifelse(test.predict<0.02, 0, 1)
x = data.frame("truth"=y_test, "predict"=test.predictBin)
table(x)
x1 = data.frame("truth"=y_test, "predict"=test.predict)
x1$truth = as.factor(x1$truth)
ggplot(subset(x1,truth==0), aes(x=predict)) + geom_histogram(bins = 70, fill='blue') +
geom_histogram(data=subset(x1,truth==1), aes(x=predict), fill='red', bins = 70)
names = colnames(tregTrainOverX)
importance_matrix <- xgb.importance(names, model = res)
xgb.plot.importance(importance_matrix = importance_matrix)
# create unique list of addresses for submission to census
allRec1 = read.csv("ncresidents_zip28786.csv", stringsAsFactors = F)
allRec2 = read.csv("ncresidents_zip28803.csv", stringsAsFactors = F)
allRec = rbind(allRec1, allRec2)
uniqueAddr = allRec %>% group_by(addrQuery) %>%
summarize(zipCode=min(zipCode),
cityName=min(cityName),
streetAddr=min(streetAddr),
countyName=min(countyName),
houseID=min(rowID)) %>% arrange(houseID)
# only pull off addresses we actually use
a = data.frame(addrQuery = unique(t$AddrQuery))
uaf = a %>% inner_join(uniqueAddr, by="addrQuery")
write.csv(uaf, file="addr.csv", row.names = F)
library(ggplot2)
t$HouseID = as.factor(t$HouseID)
t$YrsSinceSoldF = as.factor(t$YrsSinceSold)
tCut$SaleFlg = as.factor(tCut$SaleFlg)
tCut$ObsYearF = as.factor(tCut$ObsYear)
t$SaleFlgF = as.factor(t$SaleFlg)
ggplot(t, aes(y=TrueHouseValue, x=ObsYear, color=SaleFlgF)) +
geom_point(alpha=0.5) + theme(legend.position="none")
tClean$SaleFlgF = as.integer(as.character(tClean$SaleFlg))
tClean$OwnershipID = as.factor(tClean$OwnershipID)
temp = unique(tClean$HouseID)
temp2 = sample(temp, 20)
temp3= tClean[tClean$HouseID %in% temp2,]
#temp3$OwnershipID = as.factor(temp3$OwnershipID)
tempSales= temp3[temp3$SaleFlg==1,]
ggplot(temp3, aes(x=ObsYear, y=log(TrueHouseValueAdj), color=HouseID)) +
geom_line() + theme(legend.position="none") +
geom_point(data=tempSales, aes(x=ObsYear, y=log(TrueHouseValueAdj)))
# box plots
ggplot(treg, aes(x=SaleFlg, y=log(LotSize), color=SaleFlg)) +
geom_boxplot() + theme(legend.position="none") + ylim(c(-5,5))
test = t.test(log(LotSize)~SaleFlg, data=treg)
print(test)
ggplot(subset(treg, SaleFlg ==0), aes(x=log(LotSize))) + geom_histogram(bins = 70, fill='blue') +
geom_histogram(data=subset(treg, SaleFlg ==1), aes(x=log(LotSize)), fill='red', bins = 70) +
theme_minimal()
|
## Plot 4
# Read data
header <- read.table(unz("exdata_data_household_power_consumption.zip", "household_power_consumption.txt"), nrows = 1, header = FALSE, sep =';', stringsAsFactors = FALSE)
data <- read.table(unz("exdata_data_household_power_consumption.zip", "household_power_consumption.txt"), header=T, sep=";", skip=66636, nrows=2880, na.strings="?")
colnames(data)<-unlist(header)
data$Date <- as.Date(data$Date,format = c("%d/%m/%Y"))
data$Time <- strptime(data$Time,format = "%H:%M:%S")
data[1:1440,"Time"] <- format(data[1:1440,"Time"],"2007-02-01 %H:%M:%S")
data[1441:2880,"Time"] <- format(data[1441:2880,"Time"],"2007-02-02 %H:%M:%S")
# Graph
png("plot4.png")
#TopLeft
par(mfrow=c(2,2))
plot(data$Time,data$Global_active_power,type="l",ylab="Global Active Power",xlab="")
#TopRight
plot(data$Time,data$Voltage,type="l",ylab="Voltage",xlab="datetime")
#BottomLeft
plot(data$Time,data$Sub_metering_1,type="l",ylab="Energy Sub Metering",xlab="")
lines(data$Time,data$Sub_metering_2,col="red")
lines(data$Time,data$Sub_metering_3,col="blue")
legend("topright",col=c("black","red","blue"),legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),lty=1,bty="n")
#BottomRight
plot(data$Time,data$Global_reactive_power,type="l",ylab="Global_reactive_power",xlab="datetime")
dev.off()
|
/plot4.R
|
no_license
|
jlrichards2121/ExData_Plotting1
|
R
| false
| false
| 1,299
|
r
|
## Plot 4
# Read data
header <- read.table(unz("exdata_data_household_power_consumption.zip", "household_power_consumption.txt"), nrows = 1, header = FALSE, sep =';', stringsAsFactors = FALSE)
data <- read.table(unz("exdata_data_household_power_consumption.zip", "household_power_consumption.txt"), header=T, sep=";", skip=66636, nrows=2880, na.strings="?")
colnames(data)<-unlist(header)
data$Date <- as.Date(data$Date,format = c("%d/%m/%Y"))
data$Time <- strptime(data$Time,format = "%H:%M:%S")
data[1:1440,"Time"] <- format(data[1:1440,"Time"],"2007-02-01 %H:%M:%S")
data[1441:2880,"Time"] <- format(data[1441:2880,"Time"],"2007-02-02 %H:%M:%S")
# Graph
png("plot4.png")
#TopLeft
par(mfrow=c(2,2))
plot(data$Time,data$Global_active_power,type="l",ylab="Global Active Power",xlab="")
#TopRight
plot(data$Time,data$Voltage,type="l",ylab="Voltage",xlab="datetime")
#BottomLeft
plot(data$Time,data$Sub_metering_1,type="l",ylab="Energy Sub Metering",xlab="")
lines(data$Time,data$Sub_metering_2,col="red")
lines(data$Time,data$Sub_metering_3,col="blue")
legend("topright",col=c("black","red","blue"),legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),lty=1,bty="n")
#BottomRight
plot(data$Time,data$Global_reactive_power,type="l",ylab="Global_reactive_power",xlab="datetime")
dev.off()
|
# Overall Description:
# A pair of functions that create and cache the inverse of a matrix. The functions
# take advantage of R's scoping rules to store/cache inverses of a supplied
# matrix once it has been computed and provides retrieval functions to reduce
# computation times rather than requiring inverses be calculated for each matrix.
makeCacheMatrix <- function(x = matrix()) {
# Description:
# This function creates a special "matrix" object that can cache its inverse.
# Args:
# x: a square invertabile matrix, default equals empty matrix
# Returns:
# a matrix and references to internal public functions
unCachedMatrix <- x
cachedMatrix <- NULL
set <- function(y){
cachedMatrix <<- NULL
}
get <- function() unCachedMatrix
setCachedMatrix <- function(invertedMatrix) cachedMatrix <<- invertedMatrix
getCachedMatrix <- function() cachedMatrix
methods = list(set = set, get = get, setCachedMatrix = setCachedMatrix,
getCachedMatrix = getCachedMatrix)
}
cacheSolve <- function(x, ...) {
# Description:
# This function computes the inverse of the "matrix" returned by makeCacheMatrix
# Args:
# x: a square invertible matrix reference which was created using makeCacheMatrix
# ...: additional arguments
# Returns:
# an inverted matrix OR if a cached matrix exist this function retrieves a
# cached inverted matrix
cachedMatrix = x$getCachedMatrix()
if(!is.null(cachedMatrix)){
message("getting cached matrix")
return(cachedMatrix)
} else{
message("cached matrix not found, solving inverse and cacheing")
unCachedMatrix <- x$get()
invertedMatrix <- solve(unCachedMatrix)
x$setCachedMatrix(invertedMatrix)
invertedMatrix
}
}
### CODE BELOW FOR TESTING ###
# Test making matrix
newMatrix <- matrix(c(-1, -2, 1, 1), 2,2)
m <- makeCacheMatrix(newMatrix)
m$get()
# Test creating inverse matrix
inv <- cacheSolve(m)
inv
# Test retrieving cached matrix
inv <- cacheSolve(m)
inv
|
/cachematrix.R
|
no_license
|
stugorf/ProgrammingAssignment2
|
R
| false
| false
| 2,102
|
r
|
# Overall Description:
# A pair of functions that create and cache the inverse of a matrix. The functions
# take advantage of R's scoping rules to store/cache inverses of a supplied
# matrix once it has been computed and provides retrieval functions to reduce
# computation times rather than requiring inverses be calculated for each matrix.
makeCacheMatrix <- function(x = matrix()) {
# Description:
# This function creates a special "matrix" object that can cache its inverse.
# Args:
# x: a square invertabile matrix, default equals empty matrix
# Returns:
# a matrix and references to internal public functions
unCachedMatrix <- x
cachedMatrix <- NULL
set <- function(y){
cachedMatrix <<- NULL
}
get <- function() unCachedMatrix
setCachedMatrix <- function(invertedMatrix) cachedMatrix <<- invertedMatrix
getCachedMatrix <- function() cachedMatrix
methods = list(set = set, get = get, setCachedMatrix = setCachedMatrix,
getCachedMatrix = getCachedMatrix)
}
cacheSolve <- function(x, ...) {
# Description:
# This function computes the inverse of the "matrix" returned by makeCacheMatrix
# Args:
# x: a square invertible matrix reference which was created using makeCacheMatrix
# ...: additional arguments
# Returns:
# an inverted matrix OR if a cached matrix exist this function retrieves a
# cached inverted matrix
cachedMatrix = x$getCachedMatrix()
if(!is.null(cachedMatrix)){
message("getting cached matrix")
return(cachedMatrix)
} else{
message("cached matrix not found, solving inverse and cacheing")
unCachedMatrix <- x$get()
invertedMatrix <- solve(unCachedMatrix)
x$setCachedMatrix(invertedMatrix)
invertedMatrix
}
}
### CODE BELOW FOR TESTING ###
# Test making matrix
newMatrix <- matrix(c(-1, -2, 1, 1), 2,2)
m <- makeCacheMatrix(newMatrix)
m$get()
# Test creating inverse matrix
inv <- cacheSolve(m)
inv
# Test retrieving cached matrix
inv <- cacheSolve(m)
inv
|
#'
"nurealcoeff" <-
function(X,Y,o)
{ if(o!=1)
{ n <- length(X)
zeta <- sum(Y*exp(-1i*o*X))
iota2 <- sum(exp(-2i*o*X))
2*(n*Conj(zeta)-Conj(iota2)*zeta)/(n*n-Conj(iota2)*iota2)
}
else
{ mean(Y) }
}
|
/R/nurealcoeff.R
|
no_license
|
nickmckay/nuspectral
|
R
| false
| false
| 251
|
r
|
#'
"nurealcoeff" <-
function(X,Y,o)
{ if(o!=1)
{ n <- length(X)
zeta <- sum(Y*exp(-1i*o*X))
iota2 <- sum(exp(-2i*o*X))
2*(n*Conj(zeta)-Conj(iota2)*zeta)/(n*n-Conj(iota2)*iota2)
}
else
{ mean(Y) }
}
|
#### Practice II
#___________________________________________UNDERTANDING FUNCTIONAL TRAIT SPACE
rm(list = ls())
# install.packages(ggplot2)
# install.packages(dplyr)
# install.packages(reshape2)
library(ggplot2)
library(dplyr)
library(reshape2)
load("C:/Users/iq37aceh/Desktop/Lecture/vt_traits_2.RData")
# vt_traits # traits are scaled and ready for analysis
#______________________________________________species are grouped/marked by the vegetation types they occur
PCA <- prcomp(vt_traits[1:6])
PCAvalues <- data.frame(Species = vt_traits$spp,
Coast = vt_traits$C,
ThermFor = vt_traits$T,
Laurel = vt_traits$L,
Pinar = vt_traits$P,
Summit = vt_traits$S,
Rocks = vt_traits$R,
PCA$x)
PCAloadings <- data.frame(Variables = rownames(PCA$rotation), PCA$rotation)
dat = reshape2::melt(data = PCAvalues, id.vars = c("Species", "PC1", "PC2", "PC3"),
measure.vars = c("Coast", "ThermFor", "Laurel", "Pinar", "Summit", "Rocks" ))
dat = na.omit(dat)
#______________________________________________ Using a nice plot to interpret the trait combinations of two different vegetation types
# Coast (Coastal scrub), Laurel (Laurel Forest), Summit (summit scrub)
trait_space <- dplyr::filter(dat, variable == "Laurel") # Chance here the vegetation type
# Density plot
ggplot2::ggplot(trait_space, aes(PC1, PC2)) +
stat_density_2d(geom = "polygon", contour = TRUE,
aes(fill = after_stat(level)), colour = "gray", bins = 5) +
scale_fill_distiller(palette = "Greens", direction = 1) +
geom_jitter(alpha=0.3, size =1.3, colour = "gray40") +
geom_segment(data = PCAloadings, size = 0.2,
aes(x = 0, xend = PC1*4.2, y = 0, yend = PC2*4.2),
arrow = arrow(length = unit(0.1, "cm")),colour = "black") +
geom_text(data = PCAloadings, aes(x = PC1*4.4, y = PC2*4.4, label = Variables), size = 4) +
ggtitle("Laurel") + # Change the title depending on the vegetation type filtered.
scale_y_reverse () +
scale_x_reverse () +
theme_minimal()
|
/week 1/5 - Friday/Class_MSc_FD_2.R
|
permissive
|
chase-lab/biodiv-patterns-course-2021
|
R
| false
| false
| 2,257
|
r
|
#### Practice II
#___________________________________________UNDERTANDING FUNCTIONAL TRAIT SPACE
rm(list = ls())
# install.packages(ggplot2)
# install.packages(dplyr)
# install.packages(reshape2)
library(ggplot2)
library(dplyr)
library(reshape2)
load("C:/Users/iq37aceh/Desktop/Lecture/vt_traits_2.RData")
# vt_traits # traits are scaled and ready for analysis
#______________________________________________species are grouped/marked by the vegetation types they occur
PCA <- prcomp(vt_traits[1:6])
PCAvalues <- data.frame(Species = vt_traits$spp,
Coast = vt_traits$C,
ThermFor = vt_traits$T,
Laurel = vt_traits$L,
Pinar = vt_traits$P,
Summit = vt_traits$S,
Rocks = vt_traits$R,
PCA$x)
PCAloadings <- data.frame(Variables = rownames(PCA$rotation), PCA$rotation)
dat = reshape2::melt(data = PCAvalues, id.vars = c("Species", "PC1", "PC2", "PC3"),
measure.vars = c("Coast", "ThermFor", "Laurel", "Pinar", "Summit", "Rocks" ))
dat = na.omit(dat)
#______________________________________________ Using a nice plot to interpret the trait combinations of two different vegetation types
# Coast (Coastal scrub), Laurel (Laurel Forest), Summit (summit scrub)
trait_space <- dplyr::filter(dat, variable == "Laurel") # Chance here the vegetation type
# Density plot
ggplot2::ggplot(trait_space, aes(PC1, PC2)) +
stat_density_2d(geom = "polygon", contour = TRUE,
aes(fill = after_stat(level)), colour = "gray", bins = 5) +
scale_fill_distiller(palette = "Greens", direction = 1) +
geom_jitter(alpha=0.3, size =1.3, colour = "gray40") +
geom_segment(data = PCAloadings, size = 0.2,
aes(x = 0, xend = PC1*4.2, y = 0, yend = PC2*4.2),
arrow = arrow(length = unit(0.1, "cm")),colour = "black") +
geom_text(data = PCAloadings, aes(x = PC1*4.4, y = PC2*4.4, label = Variables), size = 4) +
ggtitle("Laurel") + # Change the title depending on the vegetation type filtered.
scale_y_reverse () +
scale_x_reverse () +
theme_minimal()
|
library(decryptr)
library(base64enc)
model_rfb <- load_model("rfb")
model_rsc <- load_model("rsc")
model_cadesp <- load_model("cadesp")
model_nfesp <- load_model("nfesp")
keys <- yaml::read_yaml("keys.yaml")
#* @get /
root <- function() {
"hello world"
}
#* @post /rfb
rfb <- function(img, key){
key <- openssl::sha256(key)
if(!key %in% keys | is.null(key) | is.na(key)) {
stop("Not authorized. Get an api key from decryptr.com.br")
}
img_decoded <- base64enc::base64decode(img)
message(Sys.time(), ": rfb")
decrypt(img_decoded, model_rfb)
}
#* @post /rsc
rsc <- function(img, key){
key <- openssl::sha256(key)
if(!key %in% keys | is.null(key) | is.na(key)) {
stop("Not authorized. Get an api key from decryptr.com.br")
}
img_decoded <- base64enc::base64decode(img)
message(Sys.time(), ": rsc")
decrypt(img_decoded, model_rsc)
}
#* @post /cadesp
cadesp <- function(img, key){
key <- openssl::sha256(key)
if(!key %in% keys | is.null(key) | is.na(key)) {
stop("Not authorized. Get an api key from decryptr.com.br")
}
img_decoded <- base64enc::base64decode(img)
message(Sys.time(), ": cadesp")
decrypt(img_decoded, model_cadesp)
}
#* @post /nfesp
nfesp <- function(img, key){
key <- openssl::sha256(key)
if(!key %in% keys | is.null(key) | is.na(key)) {
stop("Not authorized. Get an api key from decryptr.com.br")
}
img_decoded <- base64enc::base64decode(img)
message(Sys.time(), ": nfesp")
decrypt(img_decoded, model_nfesp)
}
|
/api.R
|
no_license
|
decryptr/api
|
R
| false
| false
| 1,504
|
r
|
library(decryptr)
library(base64enc)
model_rfb <- load_model("rfb")
model_rsc <- load_model("rsc")
model_cadesp <- load_model("cadesp")
model_nfesp <- load_model("nfesp")
keys <- yaml::read_yaml("keys.yaml")
#* @get /
root <- function() {
"hello world"
}
#* @post /rfb
rfb <- function(img, key){
key <- openssl::sha256(key)
if(!key %in% keys | is.null(key) | is.na(key)) {
stop("Not authorized. Get an api key from decryptr.com.br")
}
img_decoded <- base64enc::base64decode(img)
message(Sys.time(), ": rfb")
decrypt(img_decoded, model_rfb)
}
#* @post /rsc
rsc <- function(img, key){
key <- openssl::sha256(key)
if(!key %in% keys | is.null(key) | is.na(key)) {
stop("Not authorized. Get an api key from decryptr.com.br")
}
img_decoded <- base64enc::base64decode(img)
message(Sys.time(), ": rsc")
decrypt(img_decoded, model_rsc)
}
#* @post /cadesp
cadesp <- function(img, key){
key <- openssl::sha256(key)
if(!key %in% keys | is.null(key) | is.na(key)) {
stop("Not authorized. Get an api key from decryptr.com.br")
}
img_decoded <- base64enc::base64decode(img)
message(Sys.time(), ": cadesp")
decrypt(img_decoded, model_cadesp)
}
#* @post /nfesp
nfesp <- function(img, key){
key <- openssl::sha256(key)
if(!key %in% keys | is.null(key) | is.na(key)) {
stop("Not authorized. Get an api key from decryptr.com.br")
}
img_decoded <- base64enc::base64decode(img)
message(Sys.time(), ": nfesp")
decrypt(img_decoded, model_nfesp)
}
|
dmixNorm <- nimbleFunction(
run = function(x = double(), p = double(1), mu = double(1), sigma = double(1), log = logical(0, default=0)) {
returnType(double())
K = length(p)
# out = 0
# for (j in 1:d) {
# out = out + p[j] * dnorm(x, mu[j], sigma[j])
# }
tmp = rep(0, K)
tmp[1:K] = log(p[1:K]) + dnorm(x, mu[1:K], sigma[1:K], log=TRUE)
a = max(tmp[1:K])
out = a + log(sum(exp(tmp[1:K] - a)))
# out = sum(p * dnorm(x, mu, sigma))
# if (log) {
# return(log(out))
# } else {
# return(out)
# }
if (log) {
return(out)
} else {
return(exp(out))
}
})
# dmixNorm <- nimbleFunction(
# run = function(x = double(), p = double(1), mu = double(1), sigma = double(1), log = logical(0, default=0)) {
# returnType(double())
# d = length(p)
# tmp = numeric(length=d)
# for (j in 1:d) {
# tmp[j] = log(p[j]) + dnorm(x, mu[j], sigma[j], log = TRUE)
# }
# A = max(tmp)
# out <- A + log(sum(exp(tmp[j] - A)))
# if (log) {
# return(out)
# } else {
# return(exp(out))
# }
# })
rmixNorm <- nimbleFunction(
run = function(n = double(), p = double(1), mu = double(1), sigma = double(1)) {
returnType(double())
d = length(p)
out = rep(0, d)
idx <- rcat(1, prob=p)
for (j in 1:d) {
out[j] = rnorm(1, mu[j], sigma[j])
}
# return(sum(p * rnorm(1, mu, sigma)))
return(out[idx])
})
registerDistributions(list(
dmixNorm = list(
BUGSdist = "dmixNorm(p, mu, sigma)",
discrete = FALSE,
range = c(-Inf, Inf),
types = c('value = double()', 'p = double(1)', 'mu = double(1)', 'sigma = double(1)')
)))
|
/R/dmixnorm.R
|
permissive
|
jtipton25/mixing-manuscript
|
R
| false
| false
| 1,687
|
r
|
dmixNorm <- nimbleFunction(
run = function(x = double(), p = double(1), mu = double(1), sigma = double(1), log = logical(0, default=0)) {
returnType(double())
K = length(p)
# out = 0
# for (j in 1:d) {
# out = out + p[j] * dnorm(x, mu[j], sigma[j])
# }
tmp = rep(0, K)
tmp[1:K] = log(p[1:K]) + dnorm(x, mu[1:K], sigma[1:K], log=TRUE)
a = max(tmp[1:K])
out = a + log(sum(exp(tmp[1:K] - a)))
# out = sum(p * dnorm(x, mu, sigma))
# if (log) {
# return(log(out))
# } else {
# return(out)
# }
if (log) {
return(out)
} else {
return(exp(out))
}
})
# dmixNorm <- nimbleFunction(
# run = function(x = double(), p = double(1), mu = double(1), sigma = double(1), log = logical(0, default=0)) {
# returnType(double())
# d = length(p)
# tmp = numeric(length=d)
# for (j in 1:d) {
# tmp[j] = log(p[j]) + dnorm(x, mu[j], sigma[j], log = TRUE)
# }
# A = max(tmp)
# out <- A + log(sum(exp(tmp[j] - A)))
# if (log) {
# return(out)
# } else {
# return(exp(out))
# }
# })
rmixNorm <- nimbleFunction(
run = function(n = double(), p = double(1), mu = double(1), sigma = double(1)) {
returnType(double())
d = length(p)
out = rep(0, d)
idx <- rcat(1, prob=p)
for (j in 1:d) {
out[j] = rnorm(1, mu[j], sigma[j])
}
# return(sum(p * rnorm(1, mu, sigma)))
return(out[idx])
})
registerDistributions(list(
dmixNorm = list(
BUGSdist = "dmixNorm(p, mu, sigma)",
discrete = FALSE,
range = c(-Inf, Inf),
types = c('value = double()', 'p = double(1)', 'mu = double(1)', 'sigma = double(1)')
)))
|
library('RODBC')
library('quantmod')
library('PerformanceAnalytics')
library('lubridate')
library('ggplot2')
library('ggthemes')
library('reshape2')
library('viridis')
source("d:/stockviz/r/config.r")
options(stringsAsFactors = FALSE)
options("scipen"=100)
reportPath <- "."
indexName1<-"NIFTY 50"
indexName2<-"NIFTY MIDCAP 50"
startDate<-as.Date("2004-01-01")
endDate<-as.Date("2020-09-30")
mavgYrs <- 5
lb<-220*mavgYrs
lcon <- odbcDriverConnect(sprintf("Driver={ODBC Driver 17 for SQL Server};Server=%s;Database=%s;Uid=%s;Pwd=%s;", ldbserver, ldbname, ldbuser, ldbpassword), case = "nochange", believeNRows = TRUE)
plotRatio<-function(ratioName){
nDf1<-sqlQuery(lcon, sprintf("select TIME_STAMP, %s from INDEX_NSE_VALUATION where index_name='%s' and time_stamp >= '%s' and time_stamp <= '%s'", ratioName, indexName1, startDate, endDate))
nXts1<-xts(nDf1[,2], as.Date(nDf1[,1]))
nDf2<-sqlQuery(lcon, sprintf("select TIME_STAMP, %s from INDEX_NSE_VALUATION where index_name='%s' and time_stamp >= '%s' and time_stamp <= '%s'", ratioName, indexName2, startDate, endDate))
nXts2<-xts(nDf2[,2], as.Date(nDf2[,1]))
allXts<-merge(nXts1, nXts2)
names(allXts)<-c(indexName1, indexName2)
allXts[,1] <- na.locf(allXts[,1])
allXts[,2] <- na.locf(allXts[,2])
relXts<-nXts2/nXts1
names(relXts)<-c('RELATIVE')
relXts$avg <-rollapply(relXts$RELATIVE, lb, mean)
relXts$avgPsd <- relXts$avg + rollapply(relXts$RELATIVE, lb, sd)
relXts$avgMsd<- relXts$avg - rollapply(relXts$RELATIVE, lb, sd)
mavg <- merge(rollapply(allXts, lb, mean), rollapply(allXts, lb, sd))
allXts <- merge(allXts, mavg[,1], mavg[,2], mavg[,1]+mavg[,3], mavg[,1]-mavg[,3], mavg[,2]+mavg[,4], mavg[,2]-mavg[,4])
names(allXts) <- c(indexName1, indexName2, paste0(indexName1, '-', mavgYrs, 'yrs'), paste0(indexName2, '-', mavgYrs, 'yrs'), 'i1a', 'i1b', 'i2a', 'i2b')
############
firstDate<-first(index(allXts))
lastDate<-last(index(allXts))
xAxisTicks<-seq(from=firstDate, to=lastDate, length.out=10)
ctr2Df<-data.frame(allXts)
ctr2Df$T<-as.Date(index(allXts))
ctr2Names <- names(ctr2Df)
pdf(NULL)
ggplot(ctr2Df, aes(x=T)) +
theme_economist() +
scale_color_viridis() +
scale_fill_viridis() +
geom_line(data=ctr2Df[, c('T', ctr2Names[1])], aes_string(y=ctr2Names[1], color='1')) +
geom_line(data=ctr2Df[, c('T', ctr2Names[2])], aes_string(y=ctr2Names[2], color='2')) +
geom_line(data=ctr2Df[, c('T', ctr2Names[3])], aes_string(y=ctr2Names[3], color='1'), linetype = "dashed") +
geom_line(data=ctr2Df[, c('T', ctr2Names[4])], aes_string(y=ctr2Names[4], color='2'), linetype = "dashed") +
geom_ribbon(data=ctr2Df[, c('T', 'i1a', 'i1b')], aes_string(ymin = 'i1b', ymax='i1a', fill='1'), alpha=0.3) +
geom_ribbon(data=ctr2Df[, c('T', 'i2a', 'i2b')], aes_string(ymin = 'i2b', ymax='i2a', fill='2'), alpha=0.3) +
scale_x_date(breaks = xAxisTicks) +
guides(color=F, fill=F) +
labs(x='', y=ratioName, color='', title=sprintf("%s/%s %s Ratio", indexName1, indexName2, ratioName), subtitle=sprintf("[%s:%s]", firstDate, lastDate)) +
annotate("text", x=lastDate, y=min(allXts, na.rm=T), label = "@StockViz", hjust=1.1, vjust=-1.1, col="white", cex=6, fontface = "bold", alpha = 0.8)
ggsave(sprintf("%s/%s.%s.%s.png", reportPath, indexName1, indexName2, ratioName), width=16, height=8, units="in")
#############
ctr2Df<-data.frame(relXts)
ctr2Df$T<-as.Date(index(relXts))
pdf(NULL)
ggplot(ctr2Df, aes(x=T)) +
theme_economist() +
geom_line(aes(y=RELATIVE)) +
geom_line(aes(y=avg), color='grey') +
geom_ribbon(aes(ymin = avgMsd, ymax=avgPsd), fill='grey70', alpha=0.5) +
scale_x_date(breaks = xAxisTicks) +
labs(x='', y="ratio", color='', title=sprintf("%s/%s Relative %s Ratio", indexName1, indexName2, ratioName), subtitle=sprintf("[%s:%s]", firstDate, lastDate)) +
annotate("text", x=lastDate, y=min(relXts, na.rm=T), label = "@StockViz", hjust=1.1, vjust=-1.1, col="white", cex=6, fontface = "bold", alpha = 0.8)
ggsave(sprintf("%s/%s.%s.%s.relative.png", reportPath, indexName1, indexName2, ratioName), width=16, height=8, units="in")
}
plotRatio("PE")
plotRatio("PB")
|
/entries and exits/valuation/plot.diff.R
|
no_license
|
stockviz/blog
|
R
| false
| false
| 4,116
|
r
|
library('RODBC')
library('quantmod')
library('PerformanceAnalytics')
library('lubridate')
library('ggplot2')
library('ggthemes')
library('reshape2')
library('viridis')
source("d:/stockviz/r/config.r")
options(stringsAsFactors = FALSE)
options("scipen"=100)
reportPath <- "."
indexName1<-"NIFTY 50"
indexName2<-"NIFTY MIDCAP 50"
startDate<-as.Date("2004-01-01")
endDate<-as.Date("2020-09-30")
mavgYrs <- 5
lb<-220*mavgYrs
lcon <- odbcDriverConnect(sprintf("Driver={ODBC Driver 17 for SQL Server};Server=%s;Database=%s;Uid=%s;Pwd=%s;", ldbserver, ldbname, ldbuser, ldbpassword), case = "nochange", believeNRows = TRUE)
plotRatio<-function(ratioName){
nDf1<-sqlQuery(lcon, sprintf("select TIME_STAMP, %s from INDEX_NSE_VALUATION where index_name='%s' and time_stamp >= '%s' and time_stamp <= '%s'", ratioName, indexName1, startDate, endDate))
nXts1<-xts(nDf1[,2], as.Date(nDf1[,1]))
nDf2<-sqlQuery(lcon, sprintf("select TIME_STAMP, %s from INDEX_NSE_VALUATION where index_name='%s' and time_stamp >= '%s' and time_stamp <= '%s'", ratioName, indexName2, startDate, endDate))
nXts2<-xts(nDf2[,2], as.Date(nDf2[,1]))
allXts<-merge(nXts1, nXts2)
names(allXts)<-c(indexName1, indexName2)
allXts[,1] <- na.locf(allXts[,1])
allXts[,2] <- na.locf(allXts[,2])
relXts<-nXts2/nXts1
names(relXts)<-c('RELATIVE')
relXts$avg <-rollapply(relXts$RELATIVE, lb, mean)
relXts$avgPsd <- relXts$avg + rollapply(relXts$RELATIVE, lb, sd)
relXts$avgMsd<- relXts$avg - rollapply(relXts$RELATIVE, lb, sd)
mavg <- merge(rollapply(allXts, lb, mean), rollapply(allXts, lb, sd))
allXts <- merge(allXts, mavg[,1], mavg[,2], mavg[,1]+mavg[,3], mavg[,1]-mavg[,3], mavg[,2]+mavg[,4], mavg[,2]-mavg[,4])
names(allXts) <- c(indexName1, indexName2, paste0(indexName1, '-', mavgYrs, 'yrs'), paste0(indexName2, '-', mavgYrs, 'yrs'), 'i1a', 'i1b', 'i2a', 'i2b')
############
firstDate<-first(index(allXts))
lastDate<-last(index(allXts))
xAxisTicks<-seq(from=firstDate, to=lastDate, length.out=10)
ctr2Df<-data.frame(allXts)
ctr2Df$T<-as.Date(index(allXts))
ctr2Names <- names(ctr2Df)
pdf(NULL)
ggplot(ctr2Df, aes(x=T)) +
theme_economist() +
scale_color_viridis() +
scale_fill_viridis() +
geom_line(data=ctr2Df[, c('T', ctr2Names[1])], aes_string(y=ctr2Names[1], color='1')) +
geom_line(data=ctr2Df[, c('T', ctr2Names[2])], aes_string(y=ctr2Names[2], color='2')) +
geom_line(data=ctr2Df[, c('T', ctr2Names[3])], aes_string(y=ctr2Names[3], color='1'), linetype = "dashed") +
geom_line(data=ctr2Df[, c('T', ctr2Names[4])], aes_string(y=ctr2Names[4], color='2'), linetype = "dashed") +
geom_ribbon(data=ctr2Df[, c('T', 'i1a', 'i1b')], aes_string(ymin = 'i1b', ymax='i1a', fill='1'), alpha=0.3) +
geom_ribbon(data=ctr2Df[, c('T', 'i2a', 'i2b')], aes_string(ymin = 'i2b', ymax='i2a', fill='2'), alpha=0.3) +
scale_x_date(breaks = xAxisTicks) +
guides(color=F, fill=F) +
labs(x='', y=ratioName, color='', title=sprintf("%s/%s %s Ratio", indexName1, indexName2, ratioName), subtitle=sprintf("[%s:%s]", firstDate, lastDate)) +
annotate("text", x=lastDate, y=min(allXts, na.rm=T), label = "@StockViz", hjust=1.1, vjust=-1.1, col="white", cex=6, fontface = "bold", alpha = 0.8)
ggsave(sprintf("%s/%s.%s.%s.png", reportPath, indexName1, indexName2, ratioName), width=16, height=8, units="in")
#############
ctr2Df<-data.frame(relXts)
ctr2Df$T<-as.Date(index(relXts))
pdf(NULL)
ggplot(ctr2Df, aes(x=T)) +
theme_economist() +
geom_line(aes(y=RELATIVE)) +
geom_line(aes(y=avg), color='grey') +
geom_ribbon(aes(ymin = avgMsd, ymax=avgPsd), fill='grey70', alpha=0.5) +
scale_x_date(breaks = xAxisTicks) +
labs(x='', y="ratio", color='', title=sprintf("%s/%s Relative %s Ratio", indexName1, indexName2, ratioName), subtitle=sprintf("[%s:%s]", firstDate, lastDate)) +
annotate("text", x=lastDate, y=min(relXts, na.rm=T), label = "@StockViz", hjust=1.1, vjust=-1.1, col="white", cex=6, fontface = "bold", alpha = 0.8)
ggsave(sprintf("%s/%s.%s.%s.relative.png", reportPath, indexName1, indexName2, ratioName), width=16, height=8, units="in")
}
plotRatio("PE")
plotRatio("PB")
|
#' Viterbi algorithm
#'
#' For a given model, reconstructs the most probable states sequence,
#' using the Viterbi algorithm.
#'
#' @param m An object \code{moveHMM}
#'
#' @return The sequence of most probable states.
#'
#' @examples
#' # m is a moveHMM object (as returned by fitHMM), automatically loaded with the package
#' m <- example$m
#'
#' # reconstruction of states sequence
#' states <- viterbi(m)
#'
#' @references
#' Zucchini, W. and MacDonald, I.L. 2009.
#' Hidden Markov Models for Time Series: An Introduction Using R.
#' Chapman & Hall (London).
#'
#' @export
viterbi <- function(m)
{
if(!is.moveHMM(m))
stop("'m' must be a moveHMM object (as output by fitHMM)")
data <- m$data
nbStates <- ncol(m$mle$stepPar)
beta <- m$mle$beta
delta <- m$mle$delta
stepDist <- m$conditions$stepDist
angleDist <- m$conditions$angleDist
stepPar <- m$mle$stepPar
anglePar <- m$mle$anglePar
zeroInflation <- m$conditions$zeroInflation
if(nbStates==1)
stop("No states to decode (nbStates=1)")
# identify covariates
covsCol <- which(names(data)!="ID" & names(data)!="x" & names(data)!="y" &
names(data)!="step" & names(data)!="angle")
nbCovs <- length(covsCol)-1 # substract intercept column
covs <- data[,covsCol]
allProbs <- allProbs(data,nbStates,stepDist,angleDist,stepPar,anglePar,zeroInflation)
trMat <- trMatrix_rcpp(nbStates,beta,as.matrix(covs))
nbAnimals <- length(unique(data$ID))
aInd <- NULL
for(i in 1:nbAnimals)
aInd <- c(aInd,which(data$ID==unique(data$ID)[i])[1])
allStates <- NULL
for(zoo in 1:nbAnimals) {
nbObs <- length(which(data$ID==unique(data$ID)[zoo])) # nb of observations for animal zoo
obsInd <- which(!is.na(data$step) & !is.na(data$angle))
if(zoo!=nbAnimals) {
p <- allProbs[aInd[zoo]:(aInd[zoo+1]-1),]
tm <- trMat[,,aInd[zoo]:(aInd[zoo+1]-1)]
}
else {
p <- allProbs[aInd[zoo]:nrow(allProbs),]
tm <- trMat[,,aInd[zoo]:nrow(allProbs)]
}
xi <- matrix(NA,nbObs,nbStates)
foo <- delta*p[1,]
xi[1,] <- foo/sum(foo)
for(i in 2:nbObs) {
foo <- apply(xi[i-1,]*tm[,,i],2,max)*p[i,]
xi[i,] <- foo/sum(foo)
}
stSeq <- rep(NA,nbObs)
stSeq[nbObs] <- which.max(xi[nbObs,])
for(i in (nbObs-1):1)
stSeq[i] <- which.max(tm[,stSeq[i+1],i+1]*xi[i,])
allStates <- c(allStates,stSeq)
}
return(allStates)
}
|
/moveHMM/R/viterbi.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 2,407
|
r
|
#' Viterbi algorithm
#'
#' For a given model, reconstructs the most probable states sequence,
#' using the Viterbi algorithm.
#'
#' @param m An object \code{moveHMM}
#'
#' @return The sequence of most probable states.
#'
#' @examples
#' # m is a moveHMM object (as returned by fitHMM), automatically loaded with the package
#' m <- example$m
#'
#' # reconstruction of states sequence
#' states <- viterbi(m)
#'
#' @references
#' Zucchini, W. and MacDonald, I.L. 2009.
#' Hidden Markov Models for Time Series: An Introduction Using R.
#' Chapman & Hall (London).
#'
#' @export
viterbi <- function(m)
{
if(!is.moveHMM(m))
stop("'m' must be a moveHMM object (as output by fitHMM)")
data <- m$data
nbStates <- ncol(m$mle$stepPar)
beta <- m$mle$beta
delta <- m$mle$delta
stepDist <- m$conditions$stepDist
angleDist <- m$conditions$angleDist
stepPar <- m$mle$stepPar
anglePar <- m$mle$anglePar
zeroInflation <- m$conditions$zeroInflation
if(nbStates==1)
stop("No states to decode (nbStates=1)")
# identify covariates
covsCol <- which(names(data)!="ID" & names(data)!="x" & names(data)!="y" &
names(data)!="step" & names(data)!="angle")
nbCovs <- length(covsCol)-1 # substract intercept column
covs <- data[,covsCol]
allProbs <- allProbs(data,nbStates,stepDist,angleDist,stepPar,anglePar,zeroInflation)
trMat <- trMatrix_rcpp(nbStates,beta,as.matrix(covs))
nbAnimals <- length(unique(data$ID))
aInd <- NULL
for(i in 1:nbAnimals)
aInd <- c(aInd,which(data$ID==unique(data$ID)[i])[1])
allStates <- NULL
for(zoo in 1:nbAnimals) {
nbObs <- length(which(data$ID==unique(data$ID)[zoo])) # nb of observations for animal zoo
obsInd <- which(!is.na(data$step) & !is.na(data$angle))
if(zoo!=nbAnimals) {
p <- allProbs[aInd[zoo]:(aInd[zoo+1]-1),]
tm <- trMat[,,aInd[zoo]:(aInd[zoo+1]-1)]
}
else {
p <- allProbs[aInd[zoo]:nrow(allProbs),]
tm <- trMat[,,aInd[zoo]:nrow(allProbs)]
}
xi <- matrix(NA,nbObs,nbStates)
foo <- delta*p[1,]
xi[1,] <- foo/sum(foo)
for(i in 2:nbObs) {
foo <- apply(xi[i-1,]*tm[,,i],2,max)*p[i,]
xi[i,] <- foo/sum(foo)
}
stSeq <- rep(NA,nbObs)
stSeq[nbObs] <- which.max(xi[nbObs,])
for(i in (nbObs-1):1)
stSeq[i] <- which.max(tm[,stSeq[i+1],i+1]*xi[i,])
allStates <- c(allStates,stSeq)
}
return(allStates)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/value.R
\name{value}
\alias{value}
\title{Retrieve the value from an object}
\usage{
value(x, ...)
}
\arguments{
\item{x}{object from which to retrieve the value}
\item{...}{additional arguments passed to methods}
}
\description{
Retrieve the value from an object
}
|
/man/value.Rd
|
permissive
|
r-lib/covr
|
R
| false
| true
| 345
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/value.R
\name{value}
\alias{value}
\title{Retrieve the value from an object}
\usage{
value(x, ...)
}
\arguments{
\item{x}{object from which to retrieve the value}
\item{...}{additional arguments passed to methods}
}
\description{
Retrieve the value from an object
}
|
## this scales fairly well:
## n = (100, 1000, 10000) --> (0.53, 4.21, 38) seconds
## TODO: this function cannot deal with overlapping horizons (usually an error): > 1 row / slice
## it would be useful to support these kind of data, as many lab-sampled sites have sub-samples
# this function is run on the horizon data, once for each depth slice
get.slice <- function(h, id, top, bottom, vars, z, include='top', strict=TRUE) {
# 1. get indices to rows matchings current depth slice (z)
# this is the default method
if(include == 'top')
idx <- which(z >= h[[top]] & z < h[[bottom]])
# not sure why someone would use this approach, but include it anyways
if(include == 'bottom')
idx <- which(z > h[[top]] & z <= h[[bottom]])
# 2. extract data.frame along slice, and named vars + id
h <- h[idx, c(id, vars)]
# 2.5 compute fraction missing
# if there is only 1 variable, don't try to compute this value
# if all data are missing NA is returned
h$.pctMissing <- apply(as.matrix(h[, vars]), 1, function(i, n=length(vars)) length(which(is.na(i))) / n)
# 3. QA/QC
# how many unique IDs?
l.ids <- length(unique(h[[id]]))
# how many rows in the result?
n.res <- nrow(h)
# more rows than IDs --> bad horizonation
if(l.ids != n.res) {
if(strict == TRUE) {
# get offending IDs
id.tab <- table(h[[id]])
bad.ids <- paste(names(id.tab)[which(id.tab > 1)], collapse=', ')
stop(paste('bad horizonation in IDs:', bad.ids), call.=FALSE)
}
# looser interp of the data... issue warning and return multuple rows/ID
# join(..., match='first') will correct the problem
else
warning('Bad horizonation detected, first matching horizon selected. Use strict=TRUE to enforce QA/QC.')
}
# done: return subset of original data + pct not NA
return(h)
}
## TODO: further optimization should be possible.
## this is a much more robust + fast version of slice.slow
## needs a little more testing, and then will be ready
slice.fast <- function(object, fm, top.down=TRUE, just.the.data=FALSE, strict=TRUE){
## important: change the default behavior of data.frame and melt
opt.original <- options(stringsAsFactors = FALSE)
# test for logical input
if(! inherits(fm, "formula"))
stop('must provide a valid formula: ~ var1 + var2 + ...', call.=FALSE)
# extract components of the formula:
formula <- str_c(deparse(fm, 500), collapse="")
elements <- str_split(formula, fixed("~"))[[1]]
formula <- lapply(str_split(elements, "[+*]"), str_trim)
# TODO: this will have to be changed when we implement no LHS = all slices
if (length(formula) > 2)
stop("please provide a valid formula", call.=FALSE)
# extract parsed formula components
vars <- formula[[2]] # RHS, simple enough
# LHS: could be either single integer or vector of slices
z <- as.numeric(eval(parse(text=formula[[1]])))
# get horizons + depth column names + ID column name
h <- horizons(object)
hd <- horizonDepths(object)
top <- hd[1] ; bottom <- hd[2] # convenience vars
id <- idname(object)
id.order <- profile_id(object) # this is the original ordering of profiles
# check for bogus left/right side problems with the formula
if(any(z < 0) | any(is.na(z)))
stop('z-slice must be >= 1', call.=FALSE)
## TODO: this will have to be updated for z-slices defined by data in @site
if(! class(z) %in% c('numeric','integer')) # bogus z-slice
stop('z-slice must be either numeric or integer', call.=FALSE)
# check for '.' --> all variables, minus ID/depths
if(any(vars == '.')) {
nh <- names(h)
cols.to.remove.idx <- match(c(id, top, bottom), nh)
vars <- nh[-cols.to.remove.idx]
}
# check for column names that don't exist
if(any(vars %in% names(h)) == FALSE) # bogus column names in right-hand side
stop('column names in formula do not match any horizon data', call.=FALSE)
## extract all vars by slice_i
# pre-allocate storage as list
hd.slices <- vector(mode='list', length=length(z))
# prepare an index for the list
slice.idx <- seq_along(z)
# convert h into an imutable data.frame for speed
# h <- idata.frame(h)
# iterate over this index
for(slice.i in slice.idx) {
# extract all vars for current slice
# TODO: this is wasteful as the entire pile of horizons is passed to get.slice in each iteration of the loop
m.i.sub <- get.slice(h, id=id, top=top, bottom=bottom, vars=vars, z=z[slice.i], strict=strict)
# join with original IDs in order to account for NA, or bad horizonation
d <- data.frame(temp_id=id.order)
names(d) <- id
## BUG: join doesn't work when ID is a factor
m.i <- join(d, m.i.sub, by=id, type='left', match='first')
# add depth range:
# top-down, means that the slice starts from the user-defined depths (default)
if(top.down) {
m.i[[top]] <- z[slice.i] # "top"
m.i[[bottom]] <- z[slice.i] + 1 # "bottom"
}
# otherwise, the slice starts at the bottom (why would someone do this?)
else {
m.i[[top]] <- z[slice.i] - 1 # "top"
m.i[[bottom]] <- z[slice.i] # "bottom"
}
# save to the list
hd.slices[[slice.i]] <- m.i
}
# convert list into DF
hd.slices <- ldply(hd.slices)
# re-order by id, then top
# keep only data we care about
# note that we have a new column in there used to store pct not NA
hd.slices <- hd.slices[order(match(hd.slices[[id]], id.order), hd.slices[[top]]), c(id, top, bottom, vars, '.pctMissing')]
# if we just want the data:
if(just.the.data)
return(hd.slices)
# if spatial data and only a single slice: SPDF
if(nrow(coordinates(object)) == length(object) & length(z) == 1) {
cat('result is a SpatialPointsDataFrame object\n')
# check for site data, if present - join to our sliced data
if(nrow(site(object)) > 0 )
hd.slices <- join(hd.slices, site(object), by=id)
# since the order of our slices and coordinates are the same
# it is safe to use 'match.ID=FALSE'
# this gets around a potential problem when dimnames(object)[[1]] aren't consecutive
# values-- often the case when subsetting has been performed
return(SpatialPointsDataFrame(coordinates(object), data=hd.slices, match.ID=FALSE))
}
# otherwise return an SPC, be sure to copy over the spatial data
depths(hd.slices) <- as.formula(paste(id, '~', top, '+', bottom))
hd.slices@sp <- object@sp
# if site data: return an SPC + @site
# note that we should have a proper setter for this
if(nrow(site(object)) > 0 )
hd.slices@site <- site(object)
# copy over any diagnostic features
diagnostic_hz(hd.slices) <- diagnostic_hz(object)
# copy over metadata
metadata(hd.slices) <- metadata(object)
# reset options:
options(opt.original)
# done
return(hd.slices)
}
## slice:
if (!isGeneric("slice"))
setGeneric("slice", function(object, fm, top.down=TRUE, just.the.data=FALSE, strict=TRUE) standardGeneric("slice"))
## TODO: allow the use of site data (PSC etc.) to determine the z-slice
setMethod(f='slice', signature='SoilProfileCollection', slice.fast)
|
/R/SoilProfileCollection-slice-methods.R
|
no_license
|
IKWENZI/aqp
|
R
| false
| false
| 7,389
|
r
|
## this scales fairly well:
## n = (100, 1000, 10000) --> (0.53, 4.21, 38) seconds
## TODO: this function cannot deal with overlapping horizons (usually an error): > 1 row / slice
## it would be useful to support these kind of data, as many lab-sampled sites have sub-samples
# this function is run on the horizon data, once for each depth slice
get.slice <- function(h, id, top, bottom, vars, z, include='top', strict=TRUE) {
# 1. get indices to rows matchings current depth slice (z)
# this is the default method
if(include == 'top')
idx <- which(z >= h[[top]] & z < h[[bottom]])
# not sure why someone would use this approach, but include it anyways
if(include == 'bottom')
idx <- which(z > h[[top]] & z <= h[[bottom]])
# 2. extract data.frame along slice, and named vars + id
h <- h[idx, c(id, vars)]
# 2.5 compute fraction missing
# if there is only 1 variable, don't try to compute this value
# if all data are missing NA is returned
h$.pctMissing <- apply(as.matrix(h[, vars]), 1, function(i, n=length(vars)) length(which(is.na(i))) / n)
# 3. QA/QC
# how many unique IDs?
l.ids <- length(unique(h[[id]]))
# how many rows in the result?
n.res <- nrow(h)
# more rows than IDs --> bad horizonation
if(l.ids != n.res) {
if(strict == TRUE) {
# get offending IDs
id.tab <- table(h[[id]])
bad.ids <- paste(names(id.tab)[which(id.tab > 1)], collapse=', ')
stop(paste('bad horizonation in IDs:', bad.ids), call.=FALSE)
}
# looser interp of the data... issue warning and return multuple rows/ID
# join(..., match='first') will correct the problem
else
warning('Bad horizonation detected, first matching horizon selected. Use strict=TRUE to enforce QA/QC.')
}
# done: return subset of original data + pct not NA
return(h)
}
## TODO: further optimization should be possible.
## this is a much more robust + fast version of slice.slow
## needs a little more testing, and then will be ready
slice.fast <- function(object, fm, top.down=TRUE, just.the.data=FALSE, strict=TRUE){
## important: change the default behavior of data.frame and melt
opt.original <- options(stringsAsFactors = FALSE)
# test for logical input
if(! inherits(fm, "formula"))
stop('must provide a valid formula: ~ var1 + var2 + ...', call.=FALSE)
# extract components of the formula:
formula <- str_c(deparse(fm, 500), collapse="")
elements <- str_split(formula, fixed("~"))[[1]]
formula <- lapply(str_split(elements, "[+*]"), str_trim)
# TODO: this will have to be changed when we implement no LHS = all slices
if (length(formula) > 2)
stop("please provide a valid formula", call.=FALSE)
# extract parsed formula components
vars <- formula[[2]] # RHS, simple enough
# LHS: could be either single integer or vector of slices
z <- as.numeric(eval(parse(text=formula[[1]])))
# get horizons + depth column names + ID column name
h <- horizons(object)
hd <- horizonDepths(object)
top <- hd[1] ; bottom <- hd[2] # convenience vars
id <- idname(object)
id.order <- profile_id(object) # this is the original ordering of profiles
# check for bogus left/right side problems with the formula
if(any(z < 0) | any(is.na(z)))
stop('z-slice must be >= 1', call.=FALSE)
## TODO: this will have to be updated for z-slices defined by data in @site
if(! class(z) %in% c('numeric','integer')) # bogus z-slice
stop('z-slice must be either numeric or integer', call.=FALSE)
# check for '.' --> all variables, minus ID/depths
if(any(vars == '.')) {
nh <- names(h)
cols.to.remove.idx <- match(c(id, top, bottom), nh)
vars <- nh[-cols.to.remove.idx]
}
# check for column names that don't exist
if(any(vars %in% names(h)) == FALSE) # bogus column names in right-hand side
stop('column names in formula do not match any horizon data', call.=FALSE)
## extract all vars by slice_i
# pre-allocate storage as list
hd.slices <- vector(mode='list', length=length(z))
# prepare an index for the list
slice.idx <- seq_along(z)
# convert h into an imutable data.frame for speed
# h <- idata.frame(h)
# iterate over this index
for(slice.i in slice.idx) {
# extract all vars for current slice
# TODO: this is wasteful as the entire pile of horizons is passed to get.slice in each iteration of the loop
m.i.sub <- get.slice(h, id=id, top=top, bottom=bottom, vars=vars, z=z[slice.i], strict=strict)
# join with original IDs in order to account for NA, or bad horizonation
d <- data.frame(temp_id=id.order)
names(d) <- id
## BUG: join doesn't work when ID is a factor
m.i <- join(d, m.i.sub, by=id, type='left', match='first')
# add depth range:
# top-down, means that the slice starts from the user-defined depths (default)
if(top.down) {
m.i[[top]] <- z[slice.i] # "top"
m.i[[bottom]] <- z[slice.i] + 1 # "bottom"
}
# otherwise, the slice starts at the bottom (why would someone do this?)
else {
m.i[[top]] <- z[slice.i] - 1 # "top"
m.i[[bottom]] <- z[slice.i] # "bottom"
}
# save to the list
hd.slices[[slice.i]] <- m.i
}
# convert list into DF
hd.slices <- ldply(hd.slices)
# re-order by id, then top
# keep only data we care about
# note that we have a new column in there used to store pct not NA
hd.slices <- hd.slices[order(match(hd.slices[[id]], id.order), hd.slices[[top]]), c(id, top, bottom, vars, '.pctMissing')]
# if we just want the data:
if(just.the.data)
return(hd.slices)
# if spatial data and only a single slice: SPDF
if(nrow(coordinates(object)) == length(object) & length(z) == 1) {
cat('result is a SpatialPointsDataFrame object\n')
# check for site data, if present - join to our sliced data
if(nrow(site(object)) > 0 )
hd.slices <- join(hd.slices, site(object), by=id)
# since the order of our slices and coordinates are the same
# it is safe to use 'match.ID=FALSE'
# this gets around a potential problem when dimnames(object)[[1]] aren't consecutive
# values-- often the case when subsetting has been performed
return(SpatialPointsDataFrame(coordinates(object), data=hd.slices, match.ID=FALSE))
}
# otherwise return an SPC, be sure to copy over the spatial data
depths(hd.slices) <- as.formula(paste(id, '~', top, '+', bottom))
hd.slices@sp <- object@sp
# if site data: return an SPC + @site
# note that we should have a proper setter for this
if(nrow(site(object)) > 0 )
hd.slices@site <- site(object)
# copy over any diagnostic features
diagnostic_hz(hd.slices) <- diagnostic_hz(object)
# copy over metadata
metadata(hd.slices) <- metadata(object)
# reset options:
options(opt.original)
# done
return(hd.slices)
}
## slice:
if (!isGeneric("slice"))
setGeneric("slice", function(object, fm, top.down=TRUE, just.the.data=FALSE, strict=TRUE) standardGeneric("slice"))
## TODO: allow the use of site data (PSC etc.) to determine the z-slice
setMethod(f='slice', signature='SoilProfileCollection', slice.fast)
|
#The occupant in room #1 exits, and opens the closed door to every room in the hotel (starting with his/her own). The occupant in room
#2 exits, and closes the now open door to every second room in the hotel leaving the others alone. The occupant in room #3 exits, and now finding somedoors open and some closed, visits every third room in the hotel and closes the door if it is open, opens it if it closed, leaving the others alone
n=100
doors<-rep(0,n)
for(i in c(1:n))
{
act=seq(i,n,i)
for(j in act){
if(doors[j]==0)
{
doors[j]=1
}
else
{
doors[j]=0
}
}
#num<-"the door number"
#cat(num,i,"\n")
#print(doors)
}
for(i in c(1:n))
{
if(doors[i]==1)
print(i)
}
print(doors)
print("1=open 0=close")
# Another Way of doing this program
g<-rep("close",10)
for(k in c(1:10))
{
for(j in c(1:10))
{
if(j%%k==0)
{
if(g[j]=="close")
{
g[j]="open"
}
else
{
g[j]="close"
}
}
}
#num="persen"
#print(cat(num,i))
print(g)
}
|
/hotel_doors.R
|
no_license
|
BajiShaida/CourseWork
|
R
| false
| false
| 1,462
|
r
|
#The occupant in room #1 exits, and opens the closed door to every room in the hotel (starting with his/her own). The occupant in room
#2 exits, and closes the now open door to every second room in the hotel leaving the others alone. The occupant in room #3 exits, and now finding somedoors open and some closed, visits every third room in the hotel and closes the door if it is open, opens it if it closed, leaving the others alone
n=100
doors<-rep(0,n)
for(i in c(1:n))
{
act=seq(i,n,i)
for(j in act){
if(doors[j]==0)
{
doors[j]=1
}
else
{
doors[j]=0
}
}
#num<-"the door number"
#cat(num,i,"\n")
#print(doors)
}
for(i in c(1:n))
{
if(doors[i]==1)
print(i)
}
print(doors)
print("1=open 0=close")
# Another Way of doing this program
g<-rep("close",10)
for(k in c(1:10))
{
for(j in c(1:10))
{
if(j%%k==0)
{
if(g[j]=="close")
{
g[j]="open"
}
else
{
g[j]="close"
}
}
}
#num="persen"
#print(cat(num,i))
print(g)
}
|
# devtools
library(devtools)
library(roxygen2)
# create documentation
devtools::document()
# check documentation
devtools::check_man()
# run tests
devtools::test()
# create vignettes
devtools::build_vignettes()
# create bundle
devtools::build()
# install package
devtools::install()
|
/roller/devtools-flow.R
|
no_license
|
Derek-Shue/stat133-hws-fall18
|
R
| false
| false
| 288
|
r
|
# devtools
library(devtools)
library(roxygen2)
# create documentation
devtools::document()
# check documentation
devtools::check_man()
# run tests
devtools::test()
# create vignettes
devtools::build_vignettes()
# create bundle
devtools::build()
# install package
devtools::install()
|
# Analyse Script
library(haven)
library(tidyverse)
# Data Cleaning ----
# Skalenberechnung ----
# Analyse ----
# * Analyse 1 ----
# Wir prüfen ob sich die Variable X1 von 0 Unterscheidet.
print("Hypothese 1")
t.test(anscombe$x1)
# Graphik erstellung ----
|
/AnalyseScript.R
|
no_license
|
rocknromeo/rstudiotest
|
R
| false
| false
| 269
|
r
|
# Analyse Script
library(haven)
library(tidyverse)
# Data Cleaning ----
# Skalenberechnung ----
# Analyse ----
# * Analyse 1 ----
# Wir prüfen ob sich die Variable X1 von 0 Unterscheidet.
print("Hypothese 1")
t.test(anscombe$x1)
# Graphik erstellung ----
|
source('framework/data.R');
source('framework/backtester.R')
source('framework/processResults.R');
source('strategies/macd-kdj.R')
numOfDays <- 100
dataList <- getData(directory="PART1")
dataList <- lapply(dataList, function(x) x[1:numOfDays])
sMult <- 0.2 # slippage multiplier
#MACD seqs
slowSeq <- seq(from=26,to=26,by=20)
fastSeq <- seq(from=20,to=20,by=15)
sigSeq <- seq(from=10,to=10,by=15)
#KDJ seqs
slowDseq<- seq(from=26,to=26,by=20)
fastKSeq<- seq(from=12,to=12,by=15)
fastDseq<- seq(from=9,to=9,by=15)
paramsList <- list(slowSeq,fastSeq,sigSeq,slowDseq,fastKSeq,fastDseq)
numberComb <- prod(sapply(paramsList,length))
resultsMatrix <- matrix(nrow=numberComb,ncol=7)
colnames(resultsMatrix) <- c("slow","fast","signal","slowD","fastk","fastD","PD Ratio")
pfolioPnLList <- vector(mode="list",length=numberComb)
count <- 1
for (sl in slowSeq) {
for (fa in fastSeq) {
for (sig in sigSeq) {
for (sd in slowDseq) {
for (fk in fastKSeq) {
for (fd in fastDseq) {
params <- list(series=1,
slow=sl,fast=fa,signal=sig,
nSlowD=sd,nFastK=fk,nFastD=fd,
posSizes=c(92,23,25,1,11,20,6759,1,43,7),
lookback=50,size=300000)
results <- backtest(dataList, getOrders, params, sMult)
pfolioPnL <- plotResults(dataList,results)
resultsMatrix[count,] <- c(sl,fa,sig,sd,fk,fd,pfolioPnL$fitAgg)
pfolioPnLList[[count]]<- pfolioPnL
cat("Just completed",count,"out of",numberComb,"\n")
print(resultsMatrix[count,])
count <- count + 1
}}}}}}
print(resultsMatrix[order(resultsMatrix[,"PD Ratio"]),])
|
/main_optimaize.R
|
no_license
|
TANKERS888/COMP396
|
R
| false
| false
| 1,742
|
r
|
source('framework/data.R');
source('framework/backtester.R')
source('framework/processResults.R');
source('strategies/macd-kdj.R')
numOfDays <- 100
dataList <- getData(directory="PART1")
dataList <- lapply(dataList, function(x) x[1:numOfDays])
sMult <- 0.2 # slippage multiplier
#MACD seqs
slowSeq <- seq(from=26,to=26,by=20)
fastSeq <- seq(from=20,to=20,by=15)
sigSeq <- seq(from=10,to=10,by=15)
#KDJ seqs
slowDseq<- seq(from=26,to=26,by=20)
fastKSeq<- seq(from=12,to=12,by=15)
fastDseq<- seq(from=9,to=9,by=15)
paramsList <- list(slowSeq,fastSeq,sigSeq,slowDseq,fastKSeq,fastDseq)
numberComb <- prod(sapply(paramsList,length))
resultsMatrix <- matrix(nrow=numberComb,ncol=7)
colnames(resultsMatrix) <- c("slow","fast","signal","slowD","fastk","fastD","PD Ratio")
pfolioPnLList <- vector(mode="list",length=numberComb)
count <- 1
for (sl in slowSeq) {
for (fa in fastSeq) {
for (sig in sigSeq) {
for (sd in slowDseq) {
for (fk in fastKSeq) {
for (fd in fastDseq) {
params <- list(series=1,
slow=sl,fast=fa,signal=sig,
nSlowD=sd,nFastK=fk,nFastD=fd,
posSizes=c(92,23,25,1,11,20,6759,1,43,7),
lookback=50,size=300000)
results <- backtest(dataList, getOrders, params, sMult)
pfolioPnL <- plotResults(dataList,results)
resultsMatrix[count,] <- c(sl,fa,sig,sd,fk,fd,pfolioPnL$fitAgg)
pfolioPnLList[[count]]<- pfolioPnL
cat("Just completed",count,"out of",numberComb,"\n")
print(resultsMatrix[count,])
count <- count + 1
}}}}}}
print(resultsMatrix[order(resultsMatrix[,"PD Ratio"]),])
|
#' Read RDS file
#' @description Read R data - RDS file from anywhere
#' @param file path of the file to be read
#' @param FUN the function using which the file is to be read
#' @param data_source the name of the data source, if not set globally. s3, gcs or local
#' @param bucket the name of the bucket, if not set globally
#' @param dir the directory to store intermediate files
#' @param delete_file logical. to delete the file downloaded
#' @param show_progress logical. Shows progress of the download operation
#' @param ... other parameters for the FUN function defined above
#' @export "import_rds"
#' @return the output of the FUN function
#'
#' @examples
#' \dontrun{
#' # Import RDS from Google Cloud
#' flyio_set_datasource("gcs")
#' flyio_set_bucket("your-bucket-name")
#' import_rds("rds-on-cloud.rds", readRDS, dir = tempdir())
#' }
import_rds <- function(file, FUN = readRDS, data_source = flyio_get_datasource(),
bucket = flyio_get_bucket(data_source), dir = flyio_get_dir(), delete_file = TRUE, show_progress = FALSE, ...){
# checking if the file is valid
assert_that(tools::file_ext(file) %in% c("RDS", "rds"), msg = "Please input a valid path")
if(data_source == "local"){
t = FUN(file, ...)
return(t)
}
# a tempfile with the required extension
temp <- paste0(dir, "/", basename(file))
if(isTRUE(delete_file)){on.exit(unlink(temp))}
# downloading the file
file = gsub("\\/+","/",file)
downlogical = import_file(bucketpath = file, localfile = temp, bucket = bucket, show_progress = show_progress)
assert_that(is.character(downlogical), msg = "Downloading of file failed")
# loading the file to the memory using user defined function
result = FUN(temp, ...)
return(result)
}
|
/R/import_rds.R
|
no_license
|
intiluna/flyio
|
R
| false
| false
| 1,754
|
r
|
#' Read RDS file
#' @description Read R data - RDS file from anywhere
#' @param file path of the file to be read
#' @param FUN the function using which the file is to be read
#' @param data_source the name of the data source, if not set globally. s3, gcs or local
#' @param bucket the name of the bucket, if not set globally
#' @param dir the directory to store intermediate files
#' @param delete_file logical. to delete the file downloaded
#' @param show_progress logical. Shows progress of the download operation
#' @param ... other parameters for the FUN function defined above
#' @export "import_rds"
#' @return the output of the FUN function
#'
#' @examples
#' \dontrun{
#' # Import RDS from Google Cloud
#' flyio_set_datasource("gcs")
#' flyio_set_bucket("your-bucket-name")
#' import_rds("rds-on-cloud.rds", readRDS, dir = tempdir())
#' }
import_rds <- function(file, FUN = readRDS, data_source = flyio_get_datasource(),
bucket = flyio_get_bucket(data_source), dir = flyio_get_dir(), delete_file = TRUE, show_progress = FALSE, ...){
# checking if the file is valid
assert_that(tools::file_ext(file) %in% c("RDS", "rds"), msg = "Please input a valid path")
if(data_source == "local"){
t = FUN(file, ...)
return(t)
}
# a tempfile with the required extension
temp <- paste0(dir, "/", basename(file))
if(isTRUE(delete_file)){on.exit(unlink(temp))}
# downloading the file
file = gsub("\\/+","/",file)
downlogical = import_file(bucketpath = file, localfile = temp, bucket = bucket, show_progress = show_progress)
assert_that(is.character(downlogical), msg = "Downloading of file failed")
# loading the file to the memory using user defined function
result = FUN(temp, ...)
return(result)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bwplot.r
\name{bwplot.mids}
\alias{bwplot.mids}
\alias{bwplot}
\title{Box-and-whisker plot of observed and imputed data}
\usage{
\method{bwplot}{mids}(x, data, na.groups = NULL, groups = NULL,
as.table = TRUE, theme = mice.theme(), mayreplicate = TRUE,
allow.multiple = TRUE, outer = TRUE,
drop.unused.levels = lattice::lattice.getOption("drop.unused.levels"), ...,
subscripts = TRUE, subset = TRUE)
}
\arguments{
\item{x}{A \code{mids} object, typically created by \code{mice()} or
\code{mice.mids()}.}
\item{data}{Formula that selects the data to be plotted. This argument
follows the \pkg{lattice} rules for \emph{formulas}, describing the primary
variables (used for the per-panel display) and the optional conditioning
variables (which define the subsets plotted in different panels) to be used
in the plot.
The formula is evaluated on the complete data set in the \code{long} form.
Legal variable names for the formula include \code{names(x$data)} plus the
two administrative factors \code{.imp} and \code{.id}.
\bold{Extended formula interface:} The primary variable terms (both the LHS
\code{y} and RHS \code{x}) may consist of multiple terms separated by a
\sQuote{+} sign, e.g., \code{y1 + y2 ~ x | a * b}. This formula would be
taken to mean that the user wants to plot both \code{y1 ~ x | a * b} and
\code{y2 ~ x | a * b}, but with the \code{y1 ~ x} and \code{y2 ~ x} in
\emph{separate panels}. This behavior differs from standard \pkg{lattice}.
\emph{Only combine terms of the same type}, i.e. only factors or only
numerical variables. Mixing numerical and categorical data occasionally
produces odds labeling of vertical axis.
For convience, in \code{stripplot()} and \code{bwplot} the formula
\code{y~.imp} may be abbreviated as \code{y}. This applies only to a single
\code{y}, and does not (yet) work for \code{y1+y2~.imp}.}
\item{na.groups}{An expression evaluating to a logical vector indicating
which two groups are distinguished (e.g. using different colors) in the
display. The environment in which this expression is evaluated in the
response indicator \code{is.na(x$data)}.
The default \code{na.group = NULL} constrasts the observed and missing data
in the LHS \code{y} variable of the display, i.e. groups created by
\code{is.na(y)}. The expression \code{y} creates the groups according to
\code{is.na(y)}. The expression \code{y1 & y2} creates groups by
\code{is.na(y1) & is.na(y2)}, and \code{y1 | y2} creates groups as
\code{is.na(y1) | is.na(y2)}, and so on.}
\item{groups}{This is the usual \code{groups} arguments in \pkg{lattice}. It
differs from \code{na.groups} because it evaluates in the completed data
\code{data.frame(complete(x, "long", inc=TRUE))} (as usual), whereas
\code{na.groups} evaluates in the response indicator. See
\code{\link{xyplot}} for more details. When both \code{na.groups} and
\code{groups} are specified, \code{na.groups} takes precedence, and
\code{groups} is ignored.}
\item{as.table}{See \code{\link[lattice:xyplot]{xyplot}}.}
\item{theme}{A named list containing the graphical parameters. The default
function \code{mice.theme} produces a short list of default colors, line
width, and so on. The extensive list may be obtained from
\code{trellis.par.get()}. Global graphical parameters like \code{col} or
\code{cex} in high-level calls are still honored, so first experiment with
the global parameters. Many setting consists of a pair. For example,
\code{mice.theme} defines two symbol colors. The first is for the observed
data, the second for the imputed data. The theme settings only exist during
the call, and do not affect the trellis graphical parameters.}
\item{mayreplicate}{A logical indicating whether color, line widths, and so
on, may be replicated. The graphical functions attempt to choose
"intelligent" graphical parameters. For example, the same color can be
replicated for different element, e.g. use all reds for the imputed data.
Replication may be switched off by setting the flag to \code{FALSE}, in order
to allow the user to gain full control.}
\item{allow.multiple}{See \code{\link[lattice:xyplot]{xyplot}}.}
\item{outer}{See \code{\link[lattice:xyplot]{xyplot}}.}
\item{drop.unused.levels}{See \code{\link[lattice:xyplot]{xyplot}}.}
\item{\dots}{Further arguments, usually not directly processed by the
high-level functions documented here, but instead passed on to other
functions.}
\item{subscripts}{See \code{\link[lattice:xyplot]{xyplot}}.}
\item{subset}{See \code{\link[lattice:xyplot]{xyplot}}.}
}
\value{
The high-level functions documented here, as well as other high-level
Lattice functions, return an object of class \code{"trellis"}. The
\code{\link[lattice:update.trellis]{update}} method can be used to
subsequently update components of the object, and the
\code{\link[lattice:print.trellis]{print}} method (usually called by default)
will plot it on an appropriate plotting device.
}
\description{
Plotting methods for imputed data using \pkg{lattice}. \code{bwplot}
produces box-and-whisker plots. The function
automatically separates the observed and imputed data. The
functions extend the usual features of \pkg{lattice}.
}
\details{
The argument \code{na.groups} may be used to specify (combinations of)
missingness in any of the variables. The argument \code{groups} can be used
to specify groups based on the variable values themselves. Only one of both
may be active at the same time. When both are specified, \code{na.groups}
takes precedence over \code{groups}.
Use the \code{subset} and \code{na.groups} together to plots parts of the
data. For example, select the first imputed data set by by
\code{subset=.imp==1}.
Graphical paramaters like \code{col}, \code{pch} and \code{cex} can be
specified in the arguments list to alter the plotting symbols. If
\code{length(col)==2}, the color specification to define the observed and
missing groups. \code{col[1]} is the color of the 'observed' data,
\code{col[2]} is the color of the missing or imputed data. A convenient color
choice is \code{col=mdc(1:2)}, a transparent blue color for the observed
data, and a transparent red color for the imputed data. A good choice is
\code{col=mdc(1:2), pch=20, cex=1.5}. These choices can be set for the
duration of the session by running \code{mice.theme()}.
}
\note{
The first two arguments (\code{x} and \code{data}) are reversed
compared to the standard Trellis syntax implemented in \pkg{lattice}. This
reversal was necessary in order to benefit from automatic method dispatch.
In \pkg{mice} the argument \code{x} is always a \code{mids} object, whereas
in \pkg{lattice} the argument \code{x} is always a formula.
In \pkg{mice} the argument \code{data} is always a formula object, whereas in
\pkg{lattice} the argument \code{data} is usually a data frame.
All other arguments have identical interpretation.
}
\examples{
imp <- mice(boys, maxit=1)
### box-and-whisker plot per imputation of all numerical variables
bwplot(imp)
### tv (testicular volume), conditional on region
bwplot(imp, tv~.imp|reg)
### same data, organized in a different way
bwplot(imp, tv~reg|.imp, theme=list())
}
\references{
Sarkar, Deepayan (2008) \emph{Lattice: Multivariate Data
Visualization with R}, Springer.
van Buuren S and Groothuis-Oudshoorn K (2011). \code{mice}: Multivariate
Imputation by Chained Equations in \code{R}. \emph{Journal of Statistical
Software}, \bold{45}(3), 1-67. \url{http://www.jstatsoft.org/v45/i03/}
}
\seealso{
\code{\link{mice}}, \code{\link{xyplot}}, \code{\link{densityplot}},
\code{\link{stripplot}}, \code{\link{lattice}} for an overview of the
package, as well as \code{\link[lattice:bwplot]{bwplot}},
\code{\link[lattice:panel.bwplot]{panel.bwplot}},
\code{\link[lattice:print.trellis]{print.trellis}},
\code{\link[lattice:trellis.par.set]{trellis.par.set}}
}
\author{
Stef van Buuren
}
\keyword{hplot}
|
/man/bwplot.mids.Rd
|
no_license
|
moreno-betancur/mice
|
R
| false
| true
| 7,942
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bwplot.r
\name{bwplot.mids}
\alias{bwplot.mids}
\alias{bwplot}
\title{Box-and-whisker plot of observed and imputed data}
\usage{
\method{bwplot}{mids}(x, data, na.groups = NULL, groups = NULL,
as.table = TRUE, theme = mice.theme(), mayreplicate = TRUE,
allow.multiple = TRUE, outer = TRUE,
drop.unused.levels = lattice::lattice.getOption("drop.unused.levels"), ...,
subscripts = TRUE, subset = TRUE)
}
\arguments{
\item{x}{A \code{mids} object, typically created by \code{mice()} or
\code{mice.mids()}.}
\item{data}{Formula that selects the data to be plotted. This argument
follows the \pkg{lattice} rules for \emph{formulas}, describing the primary
variables (used for the per-panel display) and the optional conditioning
variables (which define the subsets plotted in different panels) to be used
in the plot.
The formula is evaluated on the complete data set in the \code{long} form.
Legal variable names for the formula include \code{names(x$data)} plus the
two administrative factors \code{.imp} and \code{.id}.
\bold{Extended formula interface:} The primary variable terms (both the LHS
\code{y} and RHS \code{x}) may consist of multiple terms separated by a
\sQuote{+} sign, e.g., \code{y1 + y2 ~ x | a * b}. This formula would be
taken to mean that the user wants to plot both \code{y1 ~ x | a * b} and
\code{y2 ~ x | a * b}, but with the \code{y1 ~ x} and \code{y2 ~ x} in
\emph{separate panels}. This behavior differs from standard \pkg{lattice}.
\emph{Only combine terms of the same type}, i.e. only factors or only
numerical variables. Mixing numerical and categorical data occasionally
produces odds labeling of vertical axis.
For convience, in \code{stripplot()} and \code{bwplot} the formula
\code{y~.imp} may be abbreviated as \code{y}. This applies only to a single
\code{y}, and does not (yet) work for \code{y1+y2~.imp}.}
\item{na.groups}{An expression evaluating to a logical vector indicating
which two groups are distinguished (e.g. using different colors) in the
display. The environment in which this expression is evaluated in the
response indicator \code{is.na(x$data)}.
The default \code{na.group = NULL} constrasts the observed and missing data
in the LHS \code{y} variable of the display, i.e. groups created by
\code{is.na(y)}. The expression \code{y} creates the groups according to
\code{is.na(y)}. The expression \code{y1 & y2} creates groups by
\code{is.na(y1) & is.na(y2)}, and \code{y1 | y2} creates groups as
\code{is.na(y1) | is.na(y2)}, and so on.}
\item{groups}{This is the usual \code{groups} arguments in \pkg{lattice}. It
differs from \code{na.groups} because it evaluates in the completed data
\code{data.frame(complete(x, "long", inc=TRUE))} (as usual), whereas
\code{na.groups} evaluates in the response indicator. See
\code{\link{xyplot}} for more details. When both \code{na.groups} and
\code{groups} are specified, \code{na.groups} takes precedence, and
\code{groups} is ignored.}
\item{as.table}{See \code{\link[lattice:xyplot]{xyplot}}.}
\item{theme}{A named list containing the graphical parameters. The default
function \code{mice.theme} produces a short list of default colors, line
width, and so on. The extensive list may be obtained from
\code{trellis.par.get()}. Global graphical parameters like \code{col} or
\code{cex} in high-level calls are still honored, so first experiment with
the global parameters. Many setting consists of a pair. For example,
\code{mice.theme} defines two symbol colors. The first is for the observed
data, the second for the imputed data. The theme settings only exist during
the call, and do not affect the trellis graphical parameters.}
\item{mayreplicate}{A logical indicating whether color, line widths, and so
on, may be replicated. The graphical functions attempt to choose
"intelligent" graphical parameters. For example, the same color can be
replicated for different element, e.g. use all reds for the imputed data.
Replication may be switched off by setting the flag to \code{FALSE}, in order
to allow the user to gain full control.}
\item{allow.multiple}{See \code{\link[lattice:xyplot]{xyplot}}.}
\item{outer}{See \code{\link[lattice:xyplot]{xyplot}}.}
\item{drop.unused.levels}{See \code{\link[lattice:xyplot]{xyplot}}.}
\item{\dots}{Further arguments, usually not directly processed by the
high-level functions documented here, but instead passed on to other
functions.}
\item{subscripts}{See \code{\link[lattice:xyplot]{xyplot}}.}
\item{subset}{See \code{\link[lattice:xyplot]{xyplot}}.}
}
\value{
The high-level functions documented here, as well as other high-level
Lattice functions, return an object of class \code{"trellis"}. The
\code{\link[lattice:update.trellis]{update}} method can be used to
subsequently update components of the object, and the
\code{\link[lattice:print.trellis]{print}} method (usually called by default)
will plot it on an appropriate plotting device.
}
\description{
Plotting methods for imputed data using \pkg{lattice}. \code{bwplot}
produces box-and-whisker plots. The function
automatically separates the observed and imputed data. The
functions extend the usual features of \pkg{lattice}.
}
\details{
The argument \code{na.groups} may be used to specify (combinations of)
missingness in any of the variables. The argument \code{groups} can be used
to specify groups based on the variable values themselves. Only one of both
may be active at the same time. When both are specified, \code{na.groups}
takes precedence over \code{groups}.
Use the \code{subset} and \code{na.groups} together to plots parts of the
data. For example, select the first imputed data set by by
\code{subset=.imp==1}.
Graphical paramaters like \code{col}, \code{pch} and \code{cex} can be
specified in the arguments list to alter the plotting symbols. If
\code{length(col)==2}, the color specification to define the observed and
missing groups. \code{col[1]} is the color of the 'observed' data,
\code{col[2]} is the color of the missing or imputed data. A convenient color
choice is \code{col=mdc(1:2)}, a transparent blue color for the observed
data, and a transparent red color for the imputed data. A good choice is
\code{col=mdc(1:2), pch=20, cex=1.5}. These choices can be set for the
duration of the session by running \code{mice.theme()}.
}
\note{
The first two arguments (\code{x} and \code{data}) are reversed
compared to the standard Trellis syntax implemented in \pkg{lattice}. This
reversal was necessary in order to benefit from automatic method dispatch.
In \pkg{mice} the argument \code{x} is always a \code{mids} object, whereas
in \pkg{lattice} the argument \code{x} is always a formula.
In \pkg{mice} the argument \code{data} is always a formula object, whereas in
\pkg{lattice} the argument \code{data} is usually a data frame.
All other arguments have identical interpretation.
}
\examples{
imp <- mice(boys, maxit=1)
### box-and-whisker plot per imputation of all numerical variables
bwplot(imp)
### tv (testicular volume), conditional on region
bwplot(imp, tv~.imp|reg)
### same data, organized in a different way
bwplot(imp, tv~reg|.imp, theme=list())
}
\references{
Sarkar, Deepayan (2008) \emph{Lattice: Multivariate Data
Visualization with R}, Springer.
van Buuren S and Groothuis-Oudshoorn K (2011). \code{mice}: Multivariate
Imputation by Chained Equations in \code{R}. \emph{Journal of Statistical
Software}, \bold{45}(3), 1-67. \url{http://www.jstatsoft.org/v45/i03/}
}
\seealso{
\code{\link{mice}}, \code{\link{xyplot}}, \code{\link{densityplot}},
\code{\link{stripplot}}, \code{\link{lattice}} for an overview of the
package, as well as \code{\link[lattice:bwplot]{bwplot}},
\code{\link[lattice:panel.bwplot]{panel.bwplot}},
\code{\link[lattice:print.trellis]{print.trellis}},
\code{\link[lattice:trellis.par.set]{trellis.par.set}}
}
\author{
Stef van Buuren
}
\keyword{hplot}
|
library(R2jags)
setwd("C:/Users/mcolvin/Documents/projects/Paddlefish/analysis")
mod<- function()
{
for(i in 1:M)
{
z[i]~dbern(omega) # LATENT VARIABLE, DATA AUGMENTATION
for(j in 1:Nocc)
{
p_eff[i,j]<- z[i]*p_cap[j] # CONDITIONAL CAPTURE PROBABILITY
ch[i,j]~dbern(p_eff[i,j])
}#i
}#j
# ACOUSTICALLY TAGGED FISH
for(i in 1:Ncha)
{
for(j in 1:Nocc)
{
cha[i,j]~dbern(p_cap[j])
}
}
# # CAPTURE PROBABILITY AS A FUNCTION OF EFFORT
for(occ in 1:Nocc)
{
y[occ]<- a # a + b*effort[occ]
p_cap[occ]<- exp(y[occ])/(1+exp(y[occ])) # convert to probability
}
# DERIVED PARAMETERS
N<-sum(z[]) + Ncha
# PRIORS
omega~dunif(0,1)
a~dnorm(0,0.37)
}
## number of rows to augment capture histories
## this should be more than you expect the population to be
dat_aug<- 200
xxx<- read.csv("./Copy of Population Matrix from 4-1-16.csv")
# clean up data
xxx<- xxx[,c(1:8)]
# data stream 1
cha<- as.matrix(subset(xxx,dat==1)[,-1])# subset and drop first column
# data stream 2
ch<- as.matrix(subset(xxx,dat==2)[,-1])# subset and drop first column
## vectors of 0s and 1s indicating whether fish is
## in the pool or not (not acoustically tagged fish)
z<- c(rep(1,nrow(ch)),rep(0,dat_aug-nrow(ch)))
## capture history plus extra rows of 0s
ch<-rbind(ch,
matrix(0,nrow=dat_aug-nrow(ch) ,ncol=ncol(ch)))# data augmentation needs to be matrix of 0s not NAs for JAGs
## bundle up data for JAGs
dat<- list(ch=ch,# capture history of non-acoustic fish
cha=cha, # capture history of acoustic fish
Ncha=nrow(cha), # number of acoustically tagged fish in pool at sampling
M=nrow(ch),# Number of rows in the augmented capture history matrix
Nocc=ncol(cha))# Number of netting occasions
## initial values
## set for each chain
inits<- function()
{
list(a=-2.5,omega=0.5,z=z)
}
## WHAT PARAMTERS TO KEEP TRACK OF DURING ESTIMATION
params<-c("a","N","omega","p_cap")
# THIS WILL ONLY RUN IF YOU HAVE JAGS INSTALLED
# AND THE R2jags PACKAGE
out <- jags(data=dat,
inits=inits,
parameters=params,
model.file=mod,
n.chains = 3,
n.iter = 15000,
n.burnin = 6000,
n.thin=2,
working.directory=getwd())
out # OUTPUT
out$BUGSoutput$mean$N # ESTIMATED NUMBER OF FISH IN POOL
traceplot(out)# LOOK AT TRACEPLOTS FOR CONVERGENCE.
|
/_model-verifications/Jags-Closed-Population-Estimates/20160303-closedN (2).R
|
no_license
|
mcolvin/paddlefish-RD
|
R
| false
| false
| 2,289
|
r
|
library(R2jags)
setwd("C:/Users/mcolvin/Documents/projects/Paddlefish/analysis")
mod<- function()
{
for(i in 1:M)
{
z[i]~dbern(omega) # LATENT VARIABLE, DATA AUGMENTATION
for(j in 1:Nocc)
{
p_eff[i,j]<- z[i]*p_cap[j] # CONDITIONAL CAPTURE PROBABILITY
ch[i,j]~dbern(p_eff[i,j])
}#i
}#j
# ACOUSTICALLY TAGGED FISH
for(i in 1:Ncha)
{
for(j in 1:Nocc)
{
cha[i,j]~dbern(p_cap[j])
}
}
# # CAPTURE PROBABILITY AS A FUNCTION OF EFFORT
for(occ in 1:Nocc)
{
y[occ]<- a # a + b*effort[occ]
p_cap[occ]<- exp(y[occ])/(1+exp(y[occ])) # convert to probability
}
# DERIVED PARAMETERS
N<-sum(z[]) + Ncha
# PRIORS
omega~dunif(0,1)
a~dnorm(0,0.37)
}
## number of rows to augment capture histories
## this should be more than you expect the population to be
dat_aug<- 200
xxx<- read.csv("./Copy of Population Matrix from 4-1-16.csv")
# clean up data
xxx<- xxx[,c(1:8)]
# data stream 1
cha<- as.matrix(subset(xxx,dat==1)[,-1])# subset and drop first column
# data stream 2
ch<- as.matrix(subset(xxx,dat==2)[,-1])# subset and drop first column
## vectors of 0s and 1s indicating whether fish is
## in the pool or not (not acoustically tagged fish)
z<- c(rep(1,nrow(ch)),rep(0,dat_aug-nrow(ch)))
## capture history plus extra rows of 0s
ch<-rbind(ch,
matrix(0,nrow=dat_aug-nrow(ch) ,ncol=ncol(ch)))# data augmentation needs to be matrix of 0s not NAs for JAGs
## bundle up data for JAGs
dat<- list(ch=ch,# capture history of non-acoustic fish
cha=cha, # capture history of acoustic fish
Ncha=nrow(cha), # number of acoustically tagged fish in pool at sampling
M=nrow(ch),# Number of rows in the augmented capture history matrix
Nocc=ncol(cha))# Number of netting occasions
## initial values
## set for each chain
inits<- function()
{
list(a=-2.5,omega=0.5,z=z)
}
## WHAT PARAMTERS TO KEEP TRACK OF DURING ESTIMATION
params<-c("a","N","omega","p_cap")
# THIS WILL ONLY RUN IF YOU HAVE JAGS INSTALLED
# AND THE R2jags PACKAGE
out <- jags(data=dat,
inits=inits,
parameters=params,
model.file=mod,
n.chains = 3,
n.iter = 15000,
n.burnin = 6000,
n.thin=2,
working.directory=getwd())
out # OUTPUT
out$BUGSoutput$mean$N # ESTIMATED NUMBER OF FISH IN POOL
traceplot(out)# LOOK AT TRACEPLOTS FOR CONVERGENCE.
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/vulnpryr.R
\docType{package}
\name{vulnpryr}
\alias{my_env}
\alias{vulnpryr}
\alias{vulnpryr-package}
\title{vulnpryr: A package for prying additional utility into your CVSS scores.}
\format{\preformatted{<environment: 0x0000000008e45f80>
}}
\usage{
my_env
vulnpryr(cve_id, cvss_base, avg_cvss_score = 6.2, msp_factor = 2.5,
edb_factor = 1.5, private_exploit_factor = 0.5,
network_vector_factor = 2, impact_factor = 3)
}
\arguments{
\item{cve_id}{ID of the CVE in question}
\item{cvss_base}{The current CVSS rating of the vuln in question}
\item{avg_cvss_score}{Mean CVSS score of the population}
\item{msp_factor}{Amount to adjust CVSS if Metasploit module is present}
\item{edb_factor}{Amount to adjust CVSS if ExploitDB is present}
\item{private_exploit_factor}{Factor if private exploit exists}
\item{network_vector_factor}{Amount to adjust if not a network vuln}
\item{impact_factor}{Amount to adjust if impact is not confidentiality}
}
\value{
Dataframe with the adjusted vuln
}
\description{
vulnpryr: A package for prying additional utility into your CVSS scores.
Rescale vulnerabilities.
}
\section{vulnpryr functions}{
The vulnpryer functions ...
}
\keyword{datasets}
|
/man/vulnpryr.Rd
|
no_license
|
pombreda/vulnpryr
|
R
| false
| false
| 1,328
|
rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/vulnpryr.R
\docType{package}
\name{vulnpryr}
\alias{my_env}
\alias{vulnpryr}
\alias{vulnpryr-package}
\title{vulnpryr: A package for prying additional utility into your CVSS scores.}
\format{\preformatted{<environment: 0x0000000008e45f80>
}}
\usage{
my_env
vulnpryr(cve_id, cvss_base, avg_cvss_score = 6.2, msp_factor = 2.5,
edb_factor = 1.5, private_exploit_factor = 0.5,
network_vector_factor = 2, impact_factor = 3)
}
\arguments{
\item{cve_id}{ID of the CVE in question}
\item{cvss_base}{The current CVSS rating of the vuln in question}
\item{avg_cvss_score}{Mean CVSS score of the population}
\item{msp_factor}{Amount to adjust CVSS if Metasploit module is present}
\item{edb_factor}{Amount to adjust CVSS if ExploitDB is present}
\item{private_exploit_factor}{Factor if private exploit exists}
\item{network_vector_factor}{Amount to adjust if not a network vuln}
\item{impact_factor}{Amount to adjust if impact is not confidentiality}
}
\value{
Dataframe with the adjusted vuln
}
\description{
vulnpryr: A package for prying additional utility into your CVSS scores.
Rescale vulnerabilities.
}
\section{vulnpryr functions}{
The vulnpryer functions ...
}
\keyword{datasets}
|
# lookup:
# takes in a character string, outputs the corresponding key (index) in the dictionary
lookup<- function(word){
for(i in 1:n){
if (names(dict[i]) == word){
return(i)
}
}
return(NULL)
}
# cleanInput:
# preprocesses input: giving <num> to digits, replacing :?!. with <eos> mark, removing extra spaces
# keeping only the last two words for prediction when multiple words in input
cleanInput <- function(input) {
# remove non-ASCII characters
input<- iconv(input, "latin1", "ASCII", sub = "")
# revert symbol & / to words
input<- gsub("\\&", " and ", input)
input <-gsub("\\/", " or ", input)
# remove period in abbreviation
input <- gsub("\\s([A-Z])\\.\\s", " \\1", input)
input <- gsub("\\s([A-Z][a-z]{1,3})\\.\\s", " \\1", input)
input <- gsub("^([A-Z])\\.\\s", " \\1", input)
input <- gsub("^([A-Z][a-z]{1,3})\\.\\s", " \\1", input)
input<- tolower(input)
# replace :.?! with end of sentence tags <eos>
# and eliminate other punctuation except apostrophes
input<- gsub("[:.?!]+", " <eos> ", gsub("(?![:.?!'])[[:punct:]]", " ", input, perl=T))
# remove errant apostrohes
input<-gsub(" ' "," ", input)
input<-gsub("\\' ", " ", input)
input<-gsub("^'", "", input)
# replaces numbers with number tag <num>
input<- gsub("[0-9]+"," <num> ", input)
# remove extra spaces
input<- gsub("^[ ]","",input)
input<- gsub("[ ]$", "", input)
splitInput<- unlist(strsplit(input, " "))
n<- length(splitInput)
if(n == 0){
stop("input something..")
}
# if more than 2 words in input, keep only last two
if(n > 2){
input<- paste0(splitInput[n-1], " ", splitInput[n], sep = "")
}
return(input)
}
# predict:
# takes in character strings, and outputs a vector of words which have highest conditional pobability given the input
# when no result found in higher order N-Gram, backoff to lower order N-Grams
predict <-function(input, max = 15){
input <- cleanInput(input)
inputSplit<- unlist(strsplit(input, " "))
inputSize<-length(inputSplit)
# if input has one word
if(inputSize == 1){
ind<- lookup(input)
# if input not found in dictionary
if (is.null(ind)){
result<- head(uniGram, max)$w1
}
else {
result<- head(biGram[w1 == ind], max)$w2
}
}
# if input has two words
else{
indw1<- lookup(inputSplit[1])
indw2<- lookup(inputSplit[2])
subTri<- triGram[w1 == indw1 & w2 == indw2]
if(nrow(subTri) == 0){
# if w1w2 not found in trigram, backoff to bigram
subBi<- biGram[w1 == indw2]
if (nrow(subBi) == 0){
result<- head(uniGram, max)$w1
}
else {
result<- head(subBi, max)$w2
}
}
else {
result<- head(subTri, max)$w3
}
}
resultWord<- names(dict[result])
resultWord
}
|
/global.R
|
no_license
|
lifengleaf/Next-Word-Predictor
|
R
| false
| false
| 2,890
|
r
|
# lookup:
# takes in a character string, outputs the corresponding key (index) in the dictionary
lookup<- function(word){
for(i in 1:n){
if (names(dict[i]) == word){
return(i)
}
}
return(NULL)
}
# cleanInput:
# preprocesses input: giving <num> to digits, replacing :?!. with <eos> mark, removing extra spaces
# keeping only the last two words for prediction when multiple words in input
cleanInput <- function(input) {
# remove non-ASCII characters
input<- iconv(input, "latin1", "ASCII", sub = "")
# revert symbol & / to words
input<- gsub("\\&", " and ", input)
input <-gsub("\\/", " or ", input)
# remove period in abbreviation
input <- gsub("\\s([A-Z])\\.\\s", " \\1", input)
input <- gsub("\\s([A-Z][a-z]{1,3})\\.\\s", " \\1", input)
input <- gsub("^([A-Z])\\.\\s", " \\1", input)
input <- gsub("^([A-Z][a-z]{1,3})\\.\\s", " \\1", input)
input<- tolower(input)
# replace :.?! with end of sentence tags <eos>
# and eliminate other punctuation except apostrophes
input<- gsub("[:.?!]+", " <eos> ", gsub("(?![:.?!'])[[:punct:]]", " ", input, perl=T))
# remove errant apostrohes
input<-gsub(" ' "," ", input)
input<-gsub("\\' ", " ", input)
input<-gsub("^'", "", input)
# replaces numbers with number tag <num>
input<- gsub("[0-9]+"," <num> ", input)
# remove extra spaces
input<- gsub("^[ ]","",input)
input<- gsub("[ ]$", "", input)
splitInput<- unlist(strsplit(input, " "))
n<- length(splitInput)
if(n == 0){
stop("input something..")
}
# if more than 2 words in input, keep only last two
if(n > 2){
input<- paste0(splitInput[n-1], " ", splitInput[n], sep = "")
}
return(input)
}
# predict:
# takes in character strings, and outputs a vector of words which have highest conditional pobability given the input
# when no result found in higher order N-Gram, backoff to lower order N-Grams
predict <-function(input, max = 15){
input <- cleanInput(input)
inputSplit<- unlist(strsplit(input, " "))
inputSize<-length(inputSplit)
# if input has one word
if(inputSize == 1){
ind<- lookup(input)
# if input not found in dictionary
if (is.null(ind)){
result<- head(uniGram, max)$w1
}
else {
result<- head(biGram[w1 == ind], max)$w2
}
}
# if input has two words
else{
indw1<- lookup(inputSplit[1])
indw2<- lookup(inputSplit[2])
subTri<- triGram[w1 == indw1 & w2 == indw2]
if(nrow(subTri) == 0){
# if w1w2 not found in trigram, backoff to bigram
subBi<- biGram[w1 == indw2]
if (nrow(subBi) == 0){
result<- head(uniGram, max)$w1
}
else {
result<- head(subBi, max)$w2
}
}
else {
result<- head(subTri, max)$w3
}
}
resultWord<- names(dict[result])
resultWord
}
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22810536108214e+146, 4.77900238332834e-299, 0), .Dim = c(5L, 1L)))
result <- do.call(CNull:::communities_individual_based_sampling_beta,testlist)
str(result)
|
/CNull/inst/testfiles/communities_individual_based_sampling_beta/AFL_communities_individual_based_sampling_beta/communities_individual_based_sampling_beta_valgrind_files/1615835295-test.R
|
no_license
|
akhikolla/updatedatatype-list2
|
R
| false
| false
| 270
|
r
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22810536108214e+146, 4.77900238332834e-299, 0), .Dim = c(5L, 1L)))
result <- do.call(CNull:::communities_individual_based_sampling_beta,testlist)
str(result)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cloudbuild_objects.R
\name{Operation}
\alias{Operation}
\title{Operation Object}
\usage{
Operation(Operation.metadata = NULL, Operation.response = NULL,
error = NULL, done = NULL, metadata = NULL, response = NULL,
name = NULL)
}
\arguments{
\item{Operation.metadata}{The \link{Operation.metadata} object or list of objects}
\item{Operation.response}{The \link{Operation.response} object or list of objects}
\item{error}{The error result of the operation in case of failure}
\item{done}{If the value is `false`, it means the operation is still in progress}
\item{metadata}{Service-specific metadata associated with the operation}
\item{response}{The normal response of the operation in case of success}
\item{name}{The server-assigned name, which is only unique within the same service that}
}
\value{
Operation object
}
\description{
Operation Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
This resource represents a long-running operation that is the result of anetwork API call.
}
\seealso{
Other Operation functions: \code{\link{Operation.metadata}},
\code{\link{Operation.response}}
}
|
/googlecloudbuildv1.auto/man/Operation.Rd
|
permissive
|
Phippsy/autoGoogleAPI
|
R
| false
| true
| 1,221
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cloudbuild_objects.R
\name{Operation}
\alias{Operation}
\title{Operation Object}
\usage{
Operation(Operation.metadata = NULL, Operation.response = NULL,
error = NULL, done = NULL, metadata = NULL, response = NULL,
name = NULL)
}
\arguments{
\item{Operation.metadata}{The \link{Operation.metadata} object or list of objects}
\item{Operation.response}{The \link{Operation.response} object or list of objects}
\item{error}{The error result of the operation in case of failure}
\item{done}{If the value is `false`, it means the operation is still in progress}
\item{metadata}{Service-specific metadata associated with the operation}
\item{response}{The normal response of the operation in case of success}
\item{name}{The server-assigned name, which is only unique within the same service that}
}
\value{
Operation object
}
\description{
Operation Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
This resource represents a long-running operation that is the result of anetwork API call.
}
\seealso{
Other Operation functions: \code{\link{Operation.metadata}},
\code{\link{Operation.response}}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/number_facets.R
\name{number_facets}
\alias{number_facets}
\title{number_facets}
\usage{
number_facets(string)
}
\arguments{
\item{string}{character vector of facet labels}
}
\description{
automatically labels facets with number before the label
}
|
/man/number_facets.Rd
|
no_license
|
padpadpadpad/MicrobioUoE
|
R
| false
| true
| 326
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/number_facets.R
\name{number_facets}
\alias{number_facets}
\title{number_facets}
\usage{
number_facets(string)
}
\arguments{
\item{string}{character vector of facet labels}
}
\description{
automatically labels facets with number before the label
}
|
library(ProjectTemplate)
load.project()
rmst_rmtl <- mutate(rmst_rmtl, strata = as.factor(strata))
# Reference values from ECI = 0
rmtl_ref <-
rmst_rmtl %>%
filter(strata == "ECI=0") %>%
select(data) %>%
pluck(1, 1) %>%
select(years, rmtl, se)
# Relative RMTL compared to ECI = 0
rmtl_rr <-
rmst_rmtl %>%
filter(strata != "ECI=0") %>%
mutate(
data = map(data, left_join, rmtl_ref, "years")
) %>%
unnest(data) %>%
transmute(
strata,
years,
rr = rmtl.x / rmtl.y,
std = sqrt((se.x ^ 2 + rr ^ 2 * se.y ^ 2) / rmtl.y ^ 2),
ll = rr - 1.96 * std,
ul = rr + 1.96 * std,
Elixhauser = factor(strata, levels(strata), gsub("ECI=", "", levels(strata))),
) %>%
filter(years >= 1)
cache("rmtl_rr")
# RMTL Figure -----------------------------------------------------------------
rmtl_rr %>%
ggplot(aes(years, rr)) +
geom_line(aes(col = Elixhauser)) +
geom_hline(yintercept = 1, color = scales::hue_pal()(1)) +
geom_ribbon(
aes(ymin = ll, ymax = ul, fill = Elixhauser, alpha = 0.1),
show.legend = FALSE
) +
ylab("Restricted Mean Time Lost Ratio") +
xlab("Years since THA") +
theme_minimal() +
theme(
legend.position = c(1, 1),
legend.justification = c(1, 1)
) +
scale_color_discrete(drop = FALSE) +
scale_fill_discrete(drop = FALSE) +
scale_x_continuous(breaks = 0:10, minor_breaks = NULL) +
scale_y_continuous(breaks = 0:8, minor_breaks = NULL)
ggsave("graphs/rmtl.png", height = 10, width = 10, units = "cm")
ggsave("graphs/rmtl.tiff", height = 10, width = 10, units = "cm", dpi = 1200, compression = "lzw")
|
/src/rmtl.R
|
no_license
|
eribul/rmst
|
R
| false
| false
| 1,670
|
r
|
library(ProjectTemplate)
load.project()
rmst_rmtl <- mutate(rmst_rmtl, strata = as.factor(strata))
# Reference values from ECI = 0
rmtl_ref <-
rmst_rmtl %>%
filter(strata == "ECI=0") %>%
select(data) %>%
pluck(1, 1) %>%
select(years, rmtl, se)
# Relative RMTL compared to ECI = 0
rmtl_rr <-
rmst_rmtl %>%
filter(strata != "ECI=0") %>%
mutate(
data = map(data, left_join, rmtl_ref, "years")
) %>%
unnest(data) %>%
transmute(
strata,
years,
rr = rmtl.x / rmtl.y,
std = sqrt((se.x ^ 2 + rr ^ 2 * se.y ^ 2) / rmtl.y ^ 2),
ll = rr - 1.96 * std,
ul = rr + 1.96 * std,
Elixhauser = factor(strata, levels(strata), gsub("ECI=", "", levels(strata))),
) %>%
filter(years >= 1)
cache("rmtl_rr")
# RMTL Figure -----------------------------------------------------------------
rmtl_rr %>%
ggplot(aes(years, rr)) +
geom_line(aes(col = Elixhauser)) +
geom_hline(yintercept = 1, color = scales::hue_pal()(1)) +
geom_ribbon(
aes(ymin = ll, ymax = ul, fill = Elixhauser, alpha = 0.1),
show.legend = FALSE
) +
ylab("Restricted Mean Time Lost Ratio") +
xlab("Years since THA") +
theme_minimal() +
theme(
legend.position = c(1, 1),
legend.justification = c(1, 1)
) +
scale_color_discrete(drop = FALSE) +
scale_fill_discrete(drop = FALSE) +
scale_x_continuous(breaks = 0:10, minor_breaks = NULL) +
scale_y_continuous(breaks = 0:8, minor_breaks = NULL)
ggsave("graphs/rmtl.png", height = 10, width = 10, units = "cm")
ggsave("graphs/rmtl.tiff", height = 10, width = 10, units = "cm", dpi = 1200, compression = "lzw")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Fecundity.R
\name{Fecundity}
\alias{Fecundity}
\title{Kernel Functions - Fecundity}
\usage{
Fecundity(z, pars, date)
}
\arguments{
\item{z}{Size at the beginning of the timestep [float]}
\item{pars}{Data.frame containing the date-indexed parameters[data.frame]}
\item{date}{Ordinal day to reference proper 'pars' date-indexed parameters [integer]}
}
\value{
Number of hatchlings per spawn[binary]
}
\description{
If an individual reaches 'z_repro' they reproduce with probability provided by the \code{\link{Spawning}} function, and have 'hatch_per_spawn' hatchlings
}
\seealso{
Other Kernel Functions:
\code{\link{HatchlingGrowth}()},
\code{\link{Spawning}()},
\code{\link{Survival}()}
}
\concept{Kernel Functions}
|
/man/Fecundity.Rd
|
no_license
|
npollesch/FishToxTranslator
|
R
| false
| true
| 797
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Fecundity.R
\name{Fecundity}
\alias{Fecundity}
\title{Kernel Functions - Fecundity}
\usage{
Fecundity(z, pars, date)
}
\arguments{
\item{z}{Size at the beginning of the timestep [float]}
\item{pars}{Data.frame containing the date-indexed parameters[data.frame]}
\item{date}{Ordinal day to reference proper 'pars' date-indexed parameters [integer]}
}
\value{
Number of hatchlings per spawn[binary]
}
\description{
If an individual reaches 'z_repro' they reproduce with probability provided by the \code{\link{Spawning}} function, and have 'hatch_per_spawn' hatchlings
}
\seealso{
Other Kernel Functions:
\code{\link{HatchlingGrowth}()},
\code{\link{Spawning}()},
\code{\link{Survival}()}
}
\concept{Kernel Functions}
|
library(shiny)
library(leaflet)
library(RColorBrewer)
library(scales)
library(lattice)
library(dplyr)
shinyServer(function(input, output, session) {
## Interactive Map ###########################################
threshold <- 4000
# Creamos el mapa
output$map <- renderLeaflet({
leaflet() %>%
addTiles(
urlTemplate = "//{s}.tiles.mapbox.com/v3/jcheng.map-5ebohr46/{z}/{x}/{y}.png",
attribution = 'Maps by <a href="http://www.mapbox.com/">Mapbox</a>'
) %>%
setView(lng = -3.70, lat = 40.41, zoom = 6) #Madrid
})
# FILTRAMOS LOS DATOS EN FUNCION DE LOS PARAMETROS DE ENTRADA
observe({
if (nrow(Ft_res_sal) > 0 )
{
leafletProxy("map", data = Ft_res_sal) %>% clearShapes() %>% clearControls()
}
#Mes seleccionado
Mes <- input$mes
#Pais seleccionado
Pais <- input$pais
# Pais <- "Alemania"
PaisNum <- ListPais[ListPais$Pais == Pais, ]$Clave.Pais
# PaisNum <- 401
# Mes <- 1
Ft_res_sal <- Ft_res[Ft_res$MES == Mes & Ft_res$PAIS.RESIDENCIAS == PaisNum ,]
#Definimos el radio del circulo
radius <- Ft_res_sal$VIAJEROS / max(Ft_res_sal$VIAJEROS) * 50000
radius <- ifelse(radius <= threshold, threshold, radius)
if (nrow(Ft_res_sal) > 10 )
{
colorData <- Ft_res_sal$VIAJEROS
pal <- colorBin("Spectral", colorData, 7, pretty = FALSE)
#Pintamos los circulos
leafletProxy("map", data = Ft_res_sal) %>%
clearShapes() %>%
addCircles(~longitude, ~latitude, radius=radius, layerId=~VIAJEROS,
stroke=FALSE, fillOpacity=0.4, fillColor=pal(colorData))
#Pintamos los leyenda
proxy <- leafletProxy("map", data = Ft_res_sal)
proxy %>% clearControls()
if (input$legend)
{
colorData <- Ft_res_sal$VIAJEROS
pal <- colorBin("Spectral", colorData, 3, pretty = FALSE)
proxy %>% addLegend(position = "bottomright",
pal = pal, values = Ft_res_sal$VIAJEROS)
}
}
else
{
#Pintamos los circulos
leafletProxy("map", data = Ft_res_sal) %>%
clearShapes() %>%
addCircles(~longitude, ~latitude, radius=radius, layerId=~VIAJEROS,
stroke=FALSE, fillOpacity=0.7, fillColor= "#c39bcc")
}
})
mostrarDetalle <- function(id, lat, lng, mes, pais) {
PaisNum <- ListPais[ListPais$Pais == pais, ]$Clave.Pais
selectedFT <- Ft_res[Ft_res$longitude == lng & Ft_res$latitude == lat & Ft_res$MES == mes & Ft_res$PAIS.RESIDENCIAS == PaisNum, ]
content <- as.character(tagList(
tags$h4("MUNICIPIO:", selectedFT$NOMBRE),
tags$br(),
sprintf("Viajeros: %s", selectedFT$VIAJEROS),
tags$br()
# sprintf("Media de noches : %s%%", ____), tags$br()
))
leafletProxy("map") %>% addPopups(lng, lat, content, layerId =selectedFT)
}
# Mostramos el pop-up de informacion
observe({
leafletProxy("map") %>% clearPopups()
event <- input$map_shape_click
if (is.null(event))
return()
isolate({
mostrarDetalle(event$id, event$lat, event$lng, input$mes, input$pais)
})
})
})
|
/Frontur_CM/server.R
|
no_license
|
dherranznavio/KSCHOOL-TFM
|
R
| false
| false
| 3,276
|
r
|
library(shiny)
library(leaflet)
library(RColorBrewer)
library(scales)
library(lattice)
library(dplyr)
shinyServer(function(input, output, session) {
## Interactive Map ###########################################
threshold <- 4000
# Creamos el mapa
output$map <- renderLeaflet({
leaflet() %>%
addTiles(
urlTemplate = "//{s}.tiles.mapbox.com/v3/jcheng.map-5ebohr46/{z}/{x}/{y}.png",
attribution = 'Maps by <a href="http://www.mapbox.com/">Mapbox</a>'
) %>%
setView(lng = -3.70, lat = 40.41, zoom = 6) #Madrid
})
# FILTRAMOS LOS DATOS EN FUNCION DE LOS PARAMETROS DE ENTRADA
observe({
if (nrow(Ft_res_sal) > 0 )
{
leafletProxy("map", data = Ft_res_sal) %>% clearShapes() %>% clearControls()
}
#Mes seleccionado
Mes <- input$mes
#Pais seleccionado
Pais <- input$pais
# Pais <- "Alemania"
PaisNum <- ListPais[ListPais$Pais == Pais, ]$Clave.Pais
# PaisNum <- 401
# Mes <- 1
Ft_res_sal <- Ft_res[Ft_res$MES == Mes & Ft_res$PAIS.RESIDENCIAS == PaisNum ,]
#Definimos el radio del circulo
radius <- Ft_res_sal$VIAJEROS / max(Ft_res_sal$VIAJEROS) * 50000
radius <- ifelse(radius <= threshold, threshold, radius)
if (nrow(Ft_res_sal) > 10 )
{
colorData <- Ft_res_sal$VIAJEROS
pal <- colorBin("Spectral", colorData, 7, pretty = FALSE)
#Pintamos los circulos
leafletProxy("map", data = Ft_res_sal) %>%
clearShapes() %>%
addCircles(~longitude, ~latitude, radius=radius, layerId=~VIAJEROS,
stroke=FALSE, fillOpacity=0.4, fillColor=pal(colorData))
#Pintamos los leyenda
proxy <- leafletProxy("map", data = Ft_res_sal)
proxy %>% clearControls()
if (input$legend)
{
colorData <- Ft_res_sal$VIAJEROS
pal <- colorBin("Spectral", colorData, 3, pretty = FALSE)
proxy %>% addLegend(position = "bottomright",
pal = pal, values = Ft_res_sal$VIAJEROS)
}
}
else
{
#Pintamos los circulos
leafletProxy("map", data = Ft_res_sal) %>%
clearShapes() %>%
addCircles(~longitude, ~latitude, radius=radius, layerId=~VIAJEROS,
stroke=FALSE, fillOpacity=0.7, fillColor= "#c39bcc")
}
})
mostrarDetalle <- function(id, lat, lng, mes, pais) {
PaisNum <- ListPais[ListPais$Pais == pais, ]$Clave.Pais
selectedFT <- Ft_res[Ft_res$longitude == lng & Ft_res$latitude == lat & Ft_res$MES == mes & Ft_res$PAIS.RESIDENCIAS == PaisNum, ]
content <- as.character(tagList(
tags$h4("MUNICIPIO:", selectedFT$NOMBRE),
tags$br(),
sprintf("Viajeros: %s", selectedFT$VIAJEROS),
tags$br()
# sprintf("Media de noches : %s%%", ____), tags$br()
))
leafletProxy("map") %>% addPopups(lng, lat, content, layerId =selectedFT)
}
# Mostramos el pop-up de informacion
observe({
leafletProxy("map") %>% clearPopups()
event <- input$map_shape_click
if (is.null(event))
return()
isolate({
mostrarDetalle(event$id, event$lat, event$lng, input$mes, input$pais)
})
})
})
|
#PLOT 4
the_data <- read.table("household_power_consumption.txt", sep=";", header=TRUE, na.strings = "?")
# plot series
newdata <- subset(the_data, Date == '1/2/2007' | Date == '2/2/2007')
#let's have 2 rows of plots and one column of plots
png('plot4.png')
par(mfrow = c(2,2))
par(mar=rep(3.9,4))
#WORKS!!!
dates <- strptime(paste(as.Date(newdata$Date, "%d/%m/%Y"), newdata$Time), format="%Y-%m-%d %H:%M:%S")
#the line plot!
plot(dates, as.numeric(newdata$Global_active_power)/1000, type="l", ylab="Global Active Power (kilowatts)", xlab="")
#Voltage Plot
plot(dates, as.numeric(newdata$Voltage), type="l", ylab="Voltage", xlab="datetime")
#set up the plotting area, but don't plot anything just yet
plot(dates,newdata$Sub_metering_1,type="l", ylab="Energy sub metering", xlab="")
#let's add the data one
points(dates, newdata$Sub_metering_2, col="red", type="l")
points(dates, newdata$Sub_metering_3, col="blue", type="l")
legend("topright",
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
lty=c(1,1,1), # gives the legend appropriate symbols (lines)
col=c("black","red","blue"), bty="n", cex=0.8) # gives the legend lines the correct color and width
#Global Reactive Power Plot
plot(dates, as.numeric(newdata$Global_reactive_power), type="l", ylab="Global_reactive_power", xlab="datetime")
dev.off()
|
/plot4.R
|
no_license
|
kukocCoursera/ExData_Plotting1
|
R
| false
| false
| 1,365
|
r
|
#PLOT 4
the_data <- read.table("household_power_consumption.txt", sep=";", header=TRUE, na.strings = "?")
# plot series
newdata <- subset(the_data, Date == '1/2/2007' | Date == '2/2/2007')
#let's have 2 rows of plots and one column of plots
png('plot4.png')
par(mfrow = c(2,2))
par(mar=rep(3.9,4))
#WORKS!!!
dates <- strptime(paste(as.Date(newdata$Date, "%d/%m/%Y"), newdata$Time), format="%Y-%m-%d %H:%M:%S")
#the line plot!
plot(dates, as.numeric(newdata$Global_active_power)/1000, type="l", ylab="Global Active Power (kilowatts)", xlab="")
#Voltage Plot
plot(dates, as.numeric(newdata$Voltage), type="l", ylab="Voltage", xlab="datetime")
#set up the plotting area, but don't plot anything just yet
plot(dates,newdata$Sub_metering_1,type="l", ylab="Energy sub metering", xlab="")
#let's add the data one
points(dates, newdata$Sub_metering_2, col="red", type="l")
points(dates, newdata$Sub_metering_3, col="blue", type="l")
legend("topright",
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
lty=c(1,1,1), # gives the legend appropriate symbols (lines)
col=c("black","red","blue"), bty="n", cex=0.8) # gives the legend lines the correct color and width
#Global Reactive Power Plot
plot(dates, as.numeric(newdata$Global_reactive_power), type="l", ylab="Global_reactive_power", xlab="datetime")
dev.off()
|
.onUnload <- function (libpath) {
library.dynam.unload("rankdist", libpath)
}
paramTow = function(param.true){
w.true = rev(cumsum(rev(param.true)))
w.true
}
wToparam = function(w.true){
param.true = numeric(length(w.true))
param.true[1:(length(w.true)-1)] = -diff(w.true)
param.true[length(param.true)] = w.true[length(w.true)]
param.true
}
UpdateCount = function(dat,count){
dat@count = count
dat@nobs = sum(count)
if (length(dat@topq)!=1 && min(dat@topq) < dat@nobj-1){
dat@subobs = numeric(length(dat@topq))
for (i in 1:length(dat@topq)){
dat@subobs[i] = sum(dat@count[ dat@q_ind[i]: (dat@q_ind[i+1]-1) ])
}
}
dat
}
# used in SearchPi0: make a optimization result into a model
AddInfo=function(solveres,dat,pi0){
solveres$nobs = dat@nobs
solveres$nobj = dat@nobj
solveres$pi0.ranking = pi0
solveres
}
# neighbour for incomplete rankings
SearchPi0=function(dat,init,ctrl){
# if (class(ctrl)=="RankControlWtau"){
# mod <- SingleClusterModel(dat,init,ctrl,0)
# return(mod)
# }
n = dat@nobj
curr_best_ranking = init@modal_ranking.init[[1]]
if (ctrl@SearchPi0_show_message){
message("<<< initial ranking ",curr_best_ranking," >>>")
}
if (max(dat@topq) < n-1){
curr_best_ranking[curr_best_ranking>max(dat@topq)+1]=max(dat@topq)+1
}
curr_solve <- SingleClusterModel(dat,init,ctrl,curr_best_ranking)
curr_model = AddInfo(curr_solve,dat,curr_best_ranking)
FUN = ctrl@SearchPi0_FUN
curr_goodness = FUN(curr_model)
hashtable = hash::hash()
hash::.set(hashtable,keys = RanktoHash(curr_best_ranking),values=TRUE)
SearchPi0_step = 0
while(TRUE){
SearchPi0_step = SearchPi0_step+1
if (SearchPi0_step > ctrl@SearchPi0_limit){
if (ctrl@SearchPi0_show_message){
message("Search Pi0 limit has been reached. Stop at current best: ",this_ranking)
}
break
}
if (ctrl@SearchPi0_neighbour=="Cayley"){
neighbours = CayleyNeighbour(curr_best_ranking)
} else {
neighbours = KendallNeighbour(curr_best_ranking)
}
testkeys = RanktoHash(neighbours)
tested = hash::has.key(testkeys,hashtable)
if (all(tested)) break
hash::.set(hashtable,keys=testkeys[!tested],values=rep(TRUE,length(testkeys[!tested])))
for (i in 1:nrow(neighbours)){
# tested neighbours cannot be better
if (tested[i]) next
this_ranking = neighbours[i,]
if (ctrl@SearchPi0_show_message){
message("\tNow Checking Neighbour ",this_ranking)
}
this_solve <- SingleClusterModel(dat,init,ctrl,this_ranking)
this_model = AddInfo(this_solve,dat,this_ranking)
this_goodness = FUN(this_model)
if (this_goodness > curr_goodness){
curr_goodness = this_goodness
curr_best_ranking = this_ranking
curr_model = this_model
if (ctrl@SearchPi0_show_message){
message("***Best changed to ",curr_best_ranking,"***")
}
if (ctrl@SearchPi0_fast_traversal)
break
}
}
if (ctrl@SearchPi0_show_message){
message("--> Moved to ",curr_best_ranking," <--")
}
}
curr_model$SearchPi0_step = SearchPi0_step
curr_model
}
# TODO need to change: does not work for d==1
t.gen = function(d){
t.lst = list()
t.lst[[d]] = matrix(rep(1:d,d),ncol = d, nrow = d,byrow=T)
left.mask = matrix(rep(0,d^2),ncol = d, nrow = d)
left.mask[2:d,1:(d-1)] = diag(rep(1,d-1))
t.lst[[d]][upper.tri(left.mask)] = 0
for ( i in 1:(d-1)){
t.lst[[d-i]] = left.mask%*%t.lst[[d-i+1]]
diag(t.lst[[d-i]]) = c(rep(0,i),1:(d-i))
t.lst[[d-i]][upper.tri(left.mask)] = 0
}
t.lst
}
GHC = function(param,t.lst){
d = length(param) # d = t - 1
K = matrix(rep(0,d^2),ncol = d, nrow = d)
for ( i in 1:d){
K = -1 * param[i] * t.lst[[i]] + K
}
K = exp(K)
K[upper.tri(K)] = 0
gradiant = numeric(d)
ones = rep(1,d)
denom = rowSums(K) + ones
B = matrix(ncol=d,nrow=d)
for (i in 1:d){
B[,i] = rowSums(-1 * K * t.lst[[i]])
}
for ( i in 1:d){
gradiant[i] = sum(B[,i] / denom)
}
gradiant
}
# find the weighted kendall distance between p1 and p2
# p1 and p2 are orderings
KwDist = function(p1, p2,w){
n = length(p1)
distance = 0
for (i in p2){
pos1 = which(p1 == i)
pos2 = which(p2 == i)
relative_pos1 = (1:n - pos1)[order(p1)]
relative_pos2 = (1:n - pos2)[order(p2)]
Ji = which(relative_pos1 * relative_pos2 < 0)
Ii = length(Ji)
Li = (pos1 + pos2 + Ii)/2
c1 = ifelse(pos1<=(Li-1), sum(w[pos1:(Li-1)]),0)
c2 = ifelse(pos2<=(Li-1), sum(w[pos2:(Li-1)]),0)
distance = distance + (c1 + c2)/2
}
distance
}
BreakTieEqualProb <- function(dat){
ind_comp <- which(dat@topq == dat@nobj-1)
if (max(dat@topq) == dat@nobj-1){
ind_comp_start <- dat@q_ind[ind_comp]
ind_comp_end <- dat@q_ind[ind_comp + 1] - 1
comp_ranking <- dat@ranking[ind_comp_start:ind_comp_end, ]
comp_count <- dat@count[ind_comp_start:ind_comp_end]
} else {
comp_ranking <- permute::allPerms(dat@nobj)
comp_ranking <- rbind(1:dat@nobj, comp_ranking)
comp_count <- rep(0, nrow(comp_ranking))
}
comp_hash <- RanktoHash(comp_ranking)
for (i in 1:length(dat@topq)){
if(dat@topq[i] == dat@nobj-1)
next
ind_start <- dat@q_ind[i]
ind_end <- dat@q_ind[i+1] - 1
this_q <- dat@topq[i]
this_inc <- 1/factorial(dat@nobj - this_q)
# generate permutations for tied group
tie_perm <- permute::allPerms((this_q+1):dat@nobj) + this_q
tie_perm <- rbind(tie_perm, (this_q+1):dat@nobj)
# iterate through top-q rankings
for (this_partial_ind in ind_start:ind_end){
this_partial <- dat@ranking[this_partial_ind, ]
this_count <- dat@count[this_partial_ind]
ind_tie <- which(this_partial == this_q + 1)
# iterate through possible tie-breakings
for (ind_break in 1:nrow(tie_perm)){
copy_partial <- this_partial
this_break <- tie_perm[ind_break, ]
ptr_break <- 1
# iterate through tied positions
for (this_tie_ind in ind_tie){
copy_partial[this_tie_ind] = this_break[ptr_break]
ptr_break <- ptr_break + 1
}
this_hash <- rankdist::RanktoHash(copy_partial)
ind_incre <- which(comp_hash == this_hash)
comp_count[ind_incre] = comp_count[ind_incre] + this_inc*this_count
}
}
}
# handle complete rankings
ind_nonempty_count = which(comp_count != 0)
comp_count = comp_count[comp_count > 0]
comp_ranking = comp_ranking[ind_nonempty_count, ]
comp_dat <- new("RankData", ranking=comp_ranking, count=comp_count)
}
|
/rankdist/R/utils.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 7,475
|
r
|
.onUnload <- function (libpath) {
library.dynam.unload("rankdist", libpath)
}
paramTow = function(param.true){
w.true = rev(cumsum(rev(param.true)))
w.true
}
wToparam = function(w.true){
param.true = numeric(length(w.true))
param.true[1:(length(w.true)-1)] = -diff(w.true)
param.true[length(param.true)] = w.true[length(w.true)]
param.true
}
UpdateCount = function(dat,count){
dat@count = count
dat@nobs = sum(count)
if (length(dat@topq)!=1 && min(dat@topq) < dat@nobj-1){
dat@subobs = numeric(length(dat@topq))
for (i in 1:length(dat@topq)){
dat@subobs[i] = sum(dat@count[ dat@q_ind[i]: (dat@q_ind[i+1]-1) ])
}
}
dat
}
# used in SearchPi0: make a optimization result into a model
AddInfo=function(solveres,dat,pi0){
solveres$nobs = dat@nobs
solveres$nobj = dat@nobj
solveres$pi0.ranking = pi0
solveres
}
# neighbour for incomplete rankings
SearchPi0=function(dat,init,ctrl){
# if (class(ctrl)=="RankControlWtau"){
# mod <- SingleClusterModel(dat,init,ctrl,0)
# return(mod)
# }
n = dat@nobj
curr_best_ranking = init@modal_ranking.init[[1]]
if (ctrl@SearchPi0_show_message){
message("<<< initial ranking ",curr_best_ranking," >>>")
}
if (max(dat@topq) < n-1){
curr_best_ranking[curr_best_ranking>max(dat@topq)+1]=max(dat@topq)+1
}
curr_solve <- SingleClusterModel(dat,init,ctrl,curr_best_ranking)
curr_model = AddInfo(curr_solve,dat,curr_best_ranking)
FUN = ctrl@SearchPi0_FUN
curr_goodness = FUN(curr_model)
hashtable = hash::hash()
hash::.set(hashtable,keys = RanktoHash(curr_best_ranking),values=TRUE)
SearchPi0_step = 0
while(TRUE){
SearchPi0_step = SearchPi0_step+1
if (SearchPi0_step > ctrl@SearchPi0_limit){
if (ctrl@SearchPi0_show_message){
message("Search Pi0 limit has been reached. Stop at current best: ",this_ranking)
}
break
}
if (ctrl@SearchPi0_neighbour=="Cayley"){
neighbours = CayleyNeighbour(curr_best_ranking)
} else {
neighbours = KendallNeighbour(curr_best_ranking)
}
testkeys = RanktoHash(neighbours)
tested = hash::has.key(testkeys,hashtable)
if (all(tested)) break
hash::.set(hashtable,keys=testkeys[!tested],values=rep(TRUE,length(testkeys[!tested])))
for (i in 1:nrow(neighbours)){
# tested neighbours cannot be better
if (tested[i]) next
this_ranking = neighbours[i,]
if (ctrl@SearchPi0_show_message){
message("\tNow Checking Neighbour ",this_ranking)
}
this_solve <- SingleClusterModel(dat,init,ctrl,this_ranking)
this_model = AddInfo(this_solve,dat,this_ranking)
this_goodness = FUN(this_model)
if (this_goodness > curr_goodness){
curr_goodness = this_goodness
curr_best_ranking = this_ranking
curr_model = this_model
if (ctrl@SearchPi0_show_message){
message("***Best changed to ",curr_best_ranking,"***")
}
if (ctrl@SearchPi0_fast_traversal)
break
}
}
if (ctrl@SearchPi0_show_message){
message("--> Moved to ",curr_best_ranking," <--")
}
}
curr_model$SearchPi0_step = SearchPi0_step
curr_model
}
# TODO need to change: does not work for d==1
t.gen = function(d){
t.lst = list()
t.lst[[d]] = matrix(rep(1:d,d),ncol = d, nrow = d,byrow=T)
left.mask = matrix(rep(0,d^2),ncol = d, nrow = d)
left.mask[2:d,1:(d-1)] = diag(rep(1,d-1))
t.lst[[d]][upper.tri(left.mask)] = 0
for ( i in 1:(d-1)){
t.lst[[d-i]] = left.mask%*%t.lst[[d-i+1]]
diag(t.lst[[d-i]]) = c(rep(0,i),1:(d-i))
t.lst[[d-i]][upper.tri(left.mask)] = 0
}
t.lst
}
GHC = function(param,t.lst){
d = length(param) # d = t - 1
K = matrix(rep(0,d^2),ncol = d, nrow = d)
for ( i in 1:d){
K = -1 * param[i] * t.lst[[i]] + K
}
K = exp(K)
K[upper.tri(K)] = 0
gradiant = numeric(d)
ones = rep(1,d)
denom = rowSums(K) + ones
B = matrix(ncol=d,nrow=d)
for (i in 1:d){
B[,i] = rowSums(-1 * K * t.lst[[i]])
}
for ( i in 1:d){
gradiant[i] = sum(B[,i] / denom)
}
gradiant
}
# find the weighted kendall distance between p1 and p2
# p1 and p2 are orderings
KwDist = function(p1, p2,w){
n = length(p1)
distance = 0
for (i in p2){
pos1 = which(p1 == i)
pos2 = which(p2 == i)
relative_pos1 = (1:n - pos1)[order(p1)]
relative_pos2 = (1:n - pos2)[order(p2)]
Ji = which(relative_pos1 * relative_pos2 < 0)
Ii = length(Ji)
Li = (pos1 + pos2 + Ii)/2
c1 = ifelse(pos1<=(Li-1), sum(w[pos1:(Li-1)]),0)
c2 = ifelse(pos2<=(Li-1), sum(w[pos2:(Li-1)]),0)
distance = distance + (c1 + c2)/2
}
distance
}
BreakTieEqualProb <- function(dat){
ind_comp <- which(dat@topq == dat@nobj-1)
if (max(dat@topq) == dat@nobj-1){
ind_comp_start <- dat@q_ind[ind_comp]
ind_comp_end <- dat@q_ind[ind_comp + 1] - 1
comp_ranking <- dat@ranking[ind_comp_start:ind_comp_end, ]
comp_count <- dat@count[ind_comp_start:ind_comp_end]
} else {
comp_ranking <- permute::allPerms(dat@nobj)
comp_ranking <- rbind(1:dat@nobj, comp_ranking)
comp_count <- rep(0, nrow(comp_ranking))
}
comp_hash <- RanktoHash(comp_ranking)
for (i in 1:length(dat@topq)){
if(dat@topq[i] == dat@nobj-1)
next
ind_start <- dat@q_ind[i]
ind_end <- dat@q_ind[i+1] - 1
this_q <- dat@topq[i]
this_inc <- 1/factorial(dat@nobj - this_q)
# generate permutations for tied group
tie_perm <- permute::allPerms((this_q+1):dat@nobj) + this_q
tie_perm <- rbind(tie_perm, (this_q+1):dat@nobj)
# iterate through top-q rankings
for (this_partial_ind in ind_start:ind_end){
this_partial <- dat@ranking[this_partial_ind, ]
this_count <- dat@count[this_partial_ind]
ind_tie <- which(this_partial == this_q + 1)
# iterate through possible tie-breakings
for (ind_break in 1:nrow(tie_perm)){
copy_partial <- this_partial
this_break <- tie_perm[ind_break, ]
ptr_break <- 1
# iterate through tied positions
for (this_tie_ind in ind_tie){
copy_partial[this_tie_ind] = this_break[ptr_break]
ptr_break <- ptr_break + 1
}
this_hash <- rankdist::RanktoHash(copy_partial)
ind_incre <- which(comp_hash == this_hash)
comp_count[ind_incre] = comp_count[ind_incre] + this_inc*this_count
}
}
}
# handle complete rankings
ind_nonempty_count = which(comp_count != 0)
comp_count = comp_count[comp_count > 0]
comp_ranking = comp_ranking[ind_nonempty_count, ]
comp_dat <- new("RankData", ranking=comp_ranking, count=comp_count)
}
|
library(glmnet)
mydata = read.table("./TrainingSet/RF/central_nervous_system.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.01,family="gaussian",standardize=FALSE)
sink('./Model/EN/Classifier/central_nervous_system/central_nervous_system_008.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
/Model/EN/Classifier/central_nervous_system/central_nervous_system_008.R
|
no_license
|
leon1003/QSMART
|
R
| false
| false
| 399
|
r
|
library(glmnet)
mydata = read.table("./TrainingSet/RF/central_nervous_system.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.01,family="gaussian",standardize=FALSE)
sink('./Model/EN/Classifier/central_nervous_system/central_nervous_system_008.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
#read data into dataframe
power <- read.table("C:/Users/michael.breecher/Desktop/data/household_power_consumption.txt",
sep = ";",
header = TRUE)
#create an additional field for a date converted from text.
power$date_corrected <- as.Date(power$Date, format = "%d/%m/%Y")
#subset power information to include only 2/1/2007 and 2/2/2007
working_power <- subset(power, power$date_corrected >= "2007-02-01" & power$date_corrected <= "2007-02-02")
#convert global active power string to numeric value.
working_power$gap_corr <- as.numeric(as.character(working_power$Global_active_power))
#convert metering vectors from factors to numeric
working_power$sm1 <- as.numeric(as.character(working_power$Sub_metering_1))
working_power$sm2 <- as.numeric(as.character(working_power$Sub_metering_2))
working_power$sm3 <- as.numeric(as.character(working_power$Sub_metering_3))
#create additional field for datetime
working_power$datetime <- as.POSIXct(paste(working_power$date_corrected, working_power$Time))
par(mfrow = c(2, 2), mar = c(4,4,.5,.5), oma = c(0,0,0,0), cex.axis = .8, cex.lab = .8)
plot(working_power$datetime, working_power$gap_corr,
type = "l", #line type
xlab = "",
ylab = "Global Active Power",
)
plot(working_power$datetime, working_power$Voltage,
type = "l", #line type
xlab = "datetime",
ylab = "Voltage",
yaxt = 'n'
)
#custom voltage axis
axis(2, at = 1:7*195+700, lab = c("234","","238","","242","","246"))
plot(working_power$datetime, working_power$sm1,
type = "l", #line type
xlab = "",
ylab = "Energy sub metering"
)
points(working_power$datetime,
working_power$sm2,
type = "l", col = "red")
points(working_power$datetime, working_power$sm3, type = "l", col = "blue")
#add legend
par(xpd = TRUE)
legend("topright", lwd = 1, col = c("black", "blue", "red"),
legend = c("Sub_metering_1 ", "Sub_metering_2 ", "Sub_metering_3 "),
cex = .7, bty = 'n', ncol = 1)
plot(working_power$datetime, working_power$Global_reactive_power,
type = "l", #line type
xlab = "datetime",
ylab = "Global_reactive_power",
yaxt = 'n',
)
axis(2, at = 0:5*45, lab = c(0.0, 0.1, 0.2, 0.3, 0.4, 0.5))
|
/plot4.R
|
no_license
|
mbreecher/ExData_Plotting1
|
R
| false
| false
| 2,247
|
r
|
#read data into dataframe
power <- read.table("C:/Users/michael.breecher/Desktop/data/household_power_consumption.txt",
sep = ";",
header = TRUE)
#create an additional field for a date converted from text.
power$date_corrected <- as.Date(power$Date, format = "%d/%m/%Y")
#subset power information to include only 2/1/2007 and 2/2/2007
working_power <- subset(power, power$date_corrected >= "2007-02-01" & power$date_corrected <= "2007-02-02")
#convert global active power string to numeric value.
working_power$gap_corr <- as.numeric(as.character(working_power$Global_active_power))
#convert metering vectors from factors to numeric
working_power$sm1 <- as.numeric(as.character(working_power$Sub_metering_1))
working_power$sm2 <- as.numeric(as.character(working_power$Sub_metering_2))
working_power$sm3 <- as.numeric(as.character(working_power$Sub_metering_3))
#create additional field for datetime
working_power$datetime <- as.POSIXct(paste(working_power$date_corrected, working_power$Time))
par(mfrow = c(2, 2), mar = c(4,4,.5,.5), oma = c(0,0,0,0), cex.axis = .8, cex.lab = .8)
plot(working_power$datetime, working_power$gap_corr,
type = "l", #line type
xlab = "",
ylab = "Global Active Power",
)
plot(working_power$datetime, working_power$Voltage,
type = "l", #line type
xlab = "datetime",
ylab = "Voltage",
yaxt = 'n'
)
#custom voltage axis
axis(2, at = 1:7*195+700, lab = c("234","","238","","242","","246"))
plot(working_power$datetime, working_power$sm1,
type = "l", #line type
xlab = "",
ylab = "Energy sub metering"
)
points(working_power$datetime,
working_power$sm2,
type = "l", col = "red")
points(working_power$datetime, working_power$sm3, type = "l", col = "blue")
#add legend
par(xpd = TRUE)
legend("topright", lwd = 1, col = c("black", "blue", "red"),
legend = c("Sub_metering_1 ", "Sub_metering_2 ", "Sub_metering_3 "),
cex = .7, bty = 'n', ncol = 1)
plot(working_power$datetime, working_power$Global_reactive_power,
type = "l", #line type
xlab = "datetime",
ylab = "Global_reactive_power",
yaxt = 'n',
)
axis(2, at = 0:5*45, lab = c(0.0, 0.1, 0.2, 0.3, 0.4, 0.5))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.