content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
context("parametricRmst")
test_that("error_thrown_if_armAsFactor_TRUE",{
sD <- createSurvivalDataObject()
fit <- fitModels(sD, armAsFactor=TRUE, endPoint="relapse")
expect_error(calcModelRmst(fit, model="weibull", times=50))
})
test_that("error_thrown_if_fit_contains_covariates",{
sD <- createSurvivalDataObject()
fit <- fitModels(sD, armAsFactor=FALSE, endPoint="relapse", covariate="age")
expect_error(calcModelRmst(fit, model="weibull", times=50))
})
test_that("error_thrown_if_invalid_model",{
sD <- createSurvivalDataObject()
fit <- fitModels(sD, armAsFactor=FALSE, endPoint="relapse",model="weibull")
expect_error(calcModelRmst(fit, model="exponential", times=50))
})
test_that("error_thrown_if_invalid_time",{
sD <- createSurvivalDataObject()
fit <- fitModels(sD, armAsFactor=FALSE, endPoint="relapse",model="weibull")
expect_error(calcModelRmst(fit, model="weibull", times=c("dsfwe",50)))
})
test_that("one_column_is_generated_per_time_requested",{
sD <- createSurvivalDataObject()
fit <- fitModels(sD, armAsFactor=FALSE, endPoint="relapse",model="weibull")
tab <- calcModelRmst(fit, model="weibull", times=c(67,Inf, 10))
expect_equal(tab$numcol,4) #1 for each time + 1 for arm names
})
test_that("one_row_is_generated_per_arm",{
sD <- createSurvivalDataObject()
fit <- fitModels(sD, armAsFactor=FALSE, endPoint="relapse",model="weibull")
tab <- calcModelRmst(fit, model="weibull", times=c(67,Inf, 10))
expect_equal(tab$numrow, 4) # 1 for difference, 1 for "time" + 2 for arm
})
test_that("time_inf_gives_mean_of_distribution",{
sD <- createSurvivalDataObject()
fit <- fitModels(sD, armAsFactor=FALSE, endPoint="relapse")
tab <- calcModelRmst(fit, model="exponential", times=Inf, class="data.frame")
expect_equal(tab[[1]][2], 1/fit@models$exponential[[1]]$res[1,"est"])
expect_equal(tab[[1]][3], 1/fit@models$exponential[[2]]$res[1,"est"])
})
test_that("difference_row_is_difference_of_values_of_each_arm",{
sD <- createSurvivalDataObject()
fit <- fitModels(sD, armAsFactor=FALSE, endPoint="relapse")
tab <- calcModelRmst(fit, model="llogis", times=c(5,10,20), class="data.frame")
expect_equal(as.numeric(tab[4,]),as.numeric(tab[3,])-as.numeric(tab[2,]))
})
| /tests/testthat/test-rmst.R | no_license | scientific-computing-solutions/sibyl | R | false | false | 2,244 | r | context("parametricRmst")
test_that("error_thrown_if_armAsFactor_TRUE",{
sD <- createSurvivalDataObject()
fit <- fitModels(sD, armAsFactor=TRUE, endPoint="relapse")
expect_error(calcModelRmst(fit, model="weibull", times=50))
})
test_that("error_thrown_if_fit_contains_covariates",{
sD <- createSurvivalDataObject()
fit <- fitModels(sD, armAsFactor=FALSE, endPoint="relapse", covariate="age")
expect_error(calcModelRmst(fit, model="weibull", times=50))
})
test_that("error_thrown_if_invalid_model",{
sD <- createSurvivalDataObject()
fit <- fitModels(sD, armAsFactor=FALSE, endPoint="relapse",model="weibull")
expect_error(calcModelRmst(fit, model="exponential", times=50))
})
test_that("error_thrown_if_invalid_time",{
sD <- createSurvivalDataObject()
fit <- fitModels(sD, armAsFactor=FALSE, endPoint="relapse",model="weibull")
expect_error(calcModelRmst(fit, model="weibull", times=c("dsfwe",50)))
})
test_that("one_column_is_generated_per_time_requested",{
sD <- createSurvivalDataObject()
fit <- fitModels(sD, armAsFactor=FALSE, endPoint="relapse",model="weibull")
tab <- calcModelRmst(fit, model="weibull", times=c(67,Inf, 10))
expect_equal(tab$numcol,4) #1 for each time + 1 for arm names
})
test_that("one_row_is_generated_per_arm",{
sD <- createSurvivalDataObject()
fit <- fitModels(sD, armAsFactor=FALSE, endPoint="relapse",model="weibull")
tab <- calcModelRmst(fit, model="weibull", times=c(67,Inf, 10))
expect_equal(tab$numrow, 4) # 1 for difference, 1 for "time" + 2 for arm
})
test_that("time_inf_gives_mean_of_distribution",{
sD <- createSurvivalDataObject()
fit <- fitModels(sD, armAsFactor=FALSE, endPoint="relapse")
tab <- calcModelRmst(fit, model="exponential", times=Inf, class="data.frame")
expect_equal(tab[[1]][2], 1/fit@models$exponential[[1]]$res[1,"est"])
expect_equal(tab[[1]][3], 1/fit@models$exponential[[2]]$res[1,"est"])
})
test_that("difference_row_is_difference_of_values_of_each_arm",{
sD <- createSurvivalDataObject()
fit <- fitModels(sD, armAsFactor=FALSE, endPoint="relapse")
tab <- calcModelRmst(fit, model="llogis", times=c(5,10,20), class="data.frame")
expect_equal(as.numeric(tab[4,]),as.numeric(tab[3,])-as.numeric(tab[2,]))
})
|
source('~/code/util/mtx.r')
run_scanpy = function(data, n_pcs=0, method='dmap', dpt=FALSE, root_cell=NULL, n_neighbors=10, paga=FALSE, clusters=NULL, cleanup=TRUE, out=NULL, sparse=TRUE, resolution=1){
# -----------------------------------------
# Run scanpy on [cells x feats] data matrix
# -----------------------------------------
# For dmap, use t(obj$data) or obj$pca.rot
# For dca, use t(obj$counts)
# method should be 'dmap' or 'dca'
# Filter data (remove zero genes)
data = data[, colSums(data > 0) >= 3]
# Write data
if(is.null(out)){
out = tempfile(pattern='scanpy.', tmpdir='~/tmp', fileext='.data.txt')
} else {
out = paste0(out, '.data.txt')
}
# Get prefix
prefix = gsub('.data.txt', '', gsub('.matrix.mtx', '', out))
if(sparse == FALSE){
write.table(as.matrix(data), file=out, sep='\t', quote=F, row.names=FALSE, col.names=FALSE)
print(paste0('Writing dense matrix to ', out))
} else {
out = write_mtx(data, prefix=prefix)$data
print(paste0('Writing sparse matrix to ', out))
}
# Write clusters
if(is.null(clusters)){clusters = 'louvain_groups'} else {
clusters_fn = paste0(prefix, '.clusters.txt')
write.table(clusters, clusters_fn, quote=F, row.names=FALSE, col.names=FALSE)
}
# Root cell index
if(is.null(root_cell)){iroot = 0} else {iroot = match(root_cell, rownames(data))-1}
if(method == 'leiden'){
print('Clustering with leiden. Make sure data = t(obj$data) or obj$pca.rot')
# Clustering
command = paste('python ~/code/single_cell/run_scanpy.py --data', out, '--leiden', '--n_pcs', n_pcs, '--n_neighbors', n_neighbors, '--resolution', resolution, '--out', prefix)
}
if(method == 'dmap'){
print('Calculating diffusion map. Make sure data = t(obj$data) or obj$pca.rot')
# Diffusion map
command = paste('python ~/code/single_cell/run_scanpy.py --data', out, '--dmap', '--n_pcs', n_pcs, '--n_neighbors', n_neighbors, '--iroot', iroot, '--out', prefix)
# Pseuodotime
if(dpt == TRUE){command = paste(command, '--dpt')}
# Approximate graph abstraction
if(paga == TRUE){command = paste(command, '--paga', '--clusters', clusters_fn)}
}
if(method == 'dca'){
print('Imputing data with DCA. Make sure data = t(obj$counts)')
# deep count autoencoder
command = paste('python ~/code/single_cell/run_scanpy.py --data', out, '--dca', '--out', prefix)
}
system(command)
# Load & cleanup results
res = list(leiden='leiden', dmap='X_diffmap', dpt_time='dpt_pseudotime', paga_full='connectivities', paga_tree='connectivities_tree', categories='categories', dca='dca')
for(name in names(res)){
fn = paste(prefix, res[[name]], 'txt', sep='.')
if(file.exists(fn)){
res[[name]] = as.data.frame(fread(fn, header=FALSE))
if(nrow(data) == nrow(res[[name]])){
rownames(res[[name]]) = rownames(data)
}
if(ncol(data) == ncol(res[[name]])){
colnames(res[[name]]) = colnames(data)
}
if(cleanup == TRUE){system(paste('rm', fn))}
}
}
tryCatch({for(name in c('paga_tree', 'paga_full')){rownames(res[[name]]) = colnames(res[[name]]) = res[['categories']][,1]}}, error=function(e){})
if(cleanup == TRUE){system(paste('rm', out))}
return(res)
}
| /single_cell/scanpy.r | no_license | cssmillie/code | R | false | false | 3,363 | r | source('~/code/util/mtx.r')
run_scanpy = function(data, n_pcs=0, method='dmap', dpt=FALSE, root_cell=NULL, n_neighbors=10, paga=FALSE, clusters=NULL, cleanup=TRUE, out=NULL, sparse=TRUE, resolution=1){
# -----------------------------------------
# Run scanpy on [cells x feats] data matrix
# -----------------------------------------
# For dmap, use t(obj$data) or obj$pca.rot
# For dca, use t(obj$counts)
# method should be 'dmap' or 'dca'
# Filter data (remove zero genes)
data = data[, colSums(data > 0) >= 3]
# Write data
if(is.null(out)){
out = tempfile(pattern='scanpy.', tmpdir='~/tmp', fileext='.data.txt')
} else {
out = paste0(out, '.data.txt')
}
# Get prefix
prefix = gsub('.data.txt', '', gsub('.matrix.mtx', '', out))
if(sparse == FALSE){
write.table(as.matrix(data), file=out, sep='\t', quote=F, row.names=FALSE, col.names=FALSE)
print(paste0('Writing dense matrix to ', out))
} else {
out = write_mtx(data, prefix=prefix)$data
print(paste0('Writing sparse matrix to ', out))
}
# Write clusters
if(is.null(clusters)){clusters = 'louvain_groups'} else {
clusters_fn = paste0(prefix, '.clusters.txt')
write.table(clusters, clusters_fn, quote=F, row.names=FALSE, col.names=FALSE)
}
# Root cell index
if(is.null(root_cell)){iroot = 0} else {iroot = match(root_cell, rownames(data))-1}
if(method == 'leiden'){
print('Clustering with leiden. Make sure data = t(obj$data) or obj$pca.rot')
# Clustering
command = paste('python ~/code/single_cell/run_scanpy.py --data', out, '--leiden', '--n_pcs', n_pcs, '--n_neighbors', n_neighbors, '--resolution', resolution, '--out', prefix)
}
if(method == 'dmap'){
print('Calculating diffusion map. Make sure data = t(obj$data) or obj$pca.rot')
# Diffusion map
command = paste('python ~/code/single_cell/run_scanpy.py --data', out, '--dmap', '--n_pcs', n_pcs, '--n_neighbors', n_neighbors, '--iroot', iroot, '--out', prefix)
# Pseuodotime
if(dpt == TRUE){command = paste(command, '--dpt')}
# Approximate graph abstraction
if(paga == TRUE){command = paste(command, '--paga', '--clusters', clusters_fn)}
}
if(method == 'dca'){
print('Imputing data with DCA. Make sure data = t(obj$counts)')
# deep count autoencoder
command = paste('python ~/code/single_cell/run_scanpy.py --data', out, '--dca', '--out', prefix)
}
system(command)
# Load & cleanup results
res = list(leiden='leiden', dmap='X_diffmap', dpt_time='dpt_pseudotime', paga_full='connectivities', paga_tree='connectivities_tree', categories='categories', dca='dca')
for(name in names(res)){
fn = paste(prefix, res[[name]], 'txt', sep='.')
if(file.exists(fn)){
res[[name]] = as.data.frame(fread(fn, header=FALSE))
if(nrow(data) == nrow(res[[name]])){
rownames(res[[name]]) = rownames(data)
}
if(ncol(data) == ncol(res[[name]])){
colnames(res[[name]]) = colnames(data)
}
if(cleanup == TRUE){system(paste('rm', fn))}
}
}
tryCatch({for(name in c('paga_tree', 'paga_full')){rownames(res[[name]]) = colnames(res[[name]]) = res[['categories']][,1]}}, error=function(e){})
if(cleanup == TRUE){system(paste('rm', out))}
return(res)
}
|
#' Generate the base plot of inflation curves for all tire sizes
#'
#' @param base_inflation the inflation data to use.
#'
#' @param plot_theme the ggplot theme to use.
#'
#' @return ggplot object
#'
#' @include plot_helpers.R
#' @include generate-inflation-curve-data.R
#' @include theme_dg_pale.R
#'
#' @import ggplot2
#' @importFrom directlabels geom_dl
#' @export
generate_base_pressure_plot <- function(
base_inflation = generate_inflation_data(),
plot_theme = theme_dg_pale()
) {
tire_palette <- wesanderson::wes_palette(
n = length(tire_sizes_mm()),
name = "GrandBudapest2",
type = "continuous"
)
ggplot(
base_inflation,
aes(
wheel_load_lbs,
tire_pressure_psi,
group = tire_size_mm,
color = tire_size_text
)
) +
plot_theme +
theme(
plot.caption = element_text(color = "#bbbbbb"),
aspect.ratio = 0.8
) +
plot_title() +
labs(caption = "Original data from Bicycle Quarterly/Frank Berto and Jan Heine") +
scale_x_continuous(
name = "Wheel Load",
breaks = seq(
floor(min(base_inflation$wheel_load_lbs) / 10) * 10,
x_max_wheel_load, 10
),
labels = dual_weight
) +
scale_y_continuous(
name = "Tire Pressure",
# add check so min - max tire psi is mod 10
breaks = seq(20, max_tire_psi, 10),
labels = dual_pressure
) +
scale_color_manual(values = tire_palette) +
# coord_cartesian(ylim = c(20, 150)) +
# coord_cartesian(ylim = c(min_tire_psi, max_tire_psi)) +
geom_line(size = 0.85, alpha = 0.95) +
expand_limits(x = 158) + # to fit tire size label - make programatic
geom_dl(
aes(label = tire_size_text),
method = list("last.points", cex = 1.0, hjust = -0.05),
color = "#333333"
)
}
#' Base pressure chart
#'
#' This chart corresponds to the printed chart, updated for
#' current tire sizes. The \code{\link{plot_bike_inflation}} functions superimpose
#' the front and rear calculated pressures for a bike setup.
#'
#' @export
base_pressure_plot <- generate_base_pressure_plot()
| /R/generate_base_plot.R | permissive | dgabbe/btpress | R | false | false | 2,098 | r | #' Generate the base plot of inflation curves for all tire sizes
#'
#' @param base_inflation the inflation data to use.
#'
#' @param plot_theme the ggplot theme to use.
#'
#' @return ggplot object
#'
#' @include plot_helpers.R
#' @include generate-inflation-curve-data.R
#' @include theme_dg_pale.R
#'
#' @import ggplot2
#' @importFrom directlabels geom_dl
#' @export
generate_base_pressure_plot <- function(
base_inflation = generate_inflation_data(),
plot_theme = theme_dg_pale()
) {
tire_palette <- wesanderson::wes_palette(
n = length(tire_sizes_mm()),
name = "GrandBudapest2",
type = "continuous"
)
ggplot(
base_inflation,
aes(
wheel_load_lbs,
tire_pressure_psi,
group = tire_size_mm,
color = tire_size_text
)
) +
plot_theme +
theme(
plot.caption = element_text(color = "#bbbbbb"),
aspect.ratio = 0.8
) +
plot_title() +
labs(caption = "Original data from Bicycle Quarterly/Frank Berto and Jan Heine") +
scale_x_continuous(
name = "Wheel Load",
breaks = seq(
floor(min(base_inflation$wheel_load_lbs) / 10) * 10,
x_max_wheel_load, 10
),
labels = dual_weight
) +
scale_y_continuous(
name = "Tire Pressure",
# add check so min - max tire psi is mod 10
breaks = seq(20, max_tire_psi, 10),
labels = dual_pressure
) +
scale_color_manual(values = tire_palette) +
# coord_cartesian(ylim = c(20, 150)) +
# coord_cartesian(ylim = c(min_tire_psi, max_tire_psi)) +
geom_line(size = 0.85, alpha = 0.95) +
expand_limits(x = 158) + # to fit tire size label - make programatic
geom_dl(
aes(label = tire_size_text),
method = list("last.points", cex = 1.0, hjust = -0.05),
color = "#333333"
)
}
#' Base pressure chart
#'
#' This chart corresponds to the printed chart, updated for
#' current tire sizes. The \code{\link{plot_bike_inflation}} functions superimpose
#' the front and rear calculated pressures for a bike setup.
#'
#' @export
base_pressure_plot <- generate_base_pressure_plot()
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/ikk1.R
\name{generateIKK1}
\alias{IKK1}
\alias{generateIKK1}
\alias{ikk1}
\title{IKK1 test function generator.}
\usage{
generateIKK1(in.dim = 2L, out.dim = 3L)
}
\arguments{
\item{in.dim}{[\code{integer(1)}] \cr
Size of parameter space. Must be two.}
\item{out.dim}{[\code{integer(1)}] \cr
Size of target space. Must be three.}
}
\value{
A \code{mooFunction}.
}
\description{
IKK1 test function generator.
}
\references{
wfg [51]
}
| /man/generateIKK1.Rd | no_license | danielhorn/moobench | R | false | false | 520 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/ikk1.R
\name{generateIKK1}
\alias{IKK1}
\alias{generateIKK1}
\alias{ikk1}
\title{IKK1 test function generator.}
\usage{
generateIKK1(in.dim = 2L, out.dim = 3L)
}
\arguments{
\item{in.dim}{[\code{integer(1)}] \cr
Size of parameter space. Must be two.}
\item{out.dim}{[\code{integer(1)}] \cr
Size of target space. Must be three.}
}
\value{
A \code{mooFunction}.
}
\description{
IKK1 test function generator.
}
\references{
wfg [51]
}
|
\name{centralValue}
\alias{centralValue}
\title{
Obtain statistic of centrality
}
\description{
This function obtains a statistic of centrality of a variable given a
sample of its values.
}
\usage{
centralValue(x, ws = NULL)
}
\arguments{
\item{x}{
A vector of values (the sample).
}
\item{ws}{
A vector of case weights (defaulting to NULL, i.e. no case weights).
}
}
\details{
If the variable is numeric it returns de median of the given sample, if it
is a factor it returns the mode. In other cases it
tries to convert to a factor and then returns the mode.
}
\value{
A number if the variable is numeric. A string with the name of the
most frequent nominal value, otherwise.
}
\references{
Torgo, L. (2016) \emph{Data Mining using R: learning with case studies,
second edition},
Chapman & Hall/CRC (ISBN-13: 978-1482234893).
\url{http://ltorgo.github.io/DMwR2}
}
\author{ Luis Torgo \email{ltorgo@dcc.fc.up.pt} }
\seealso{
\code{\link{mean}}, \code{\link{median}}
}
\examples{
# An example with numerical data
x <- rnorm(100)
centralValue(x)
# An example with nominal data
y <-
factor(sample(1:10,200,replace=TRUE),levels=1:10,labels=paste('v',1:10,sep=''))
centralValue(y)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{univar}
| /man/centralValue.Rd | no_license | gmaubach/DMwR2 | R | false | false | 1,318 | rd | \name{centralValue}
\alias{centralValue}
\title{
Obtain statistic of centrality
}
\description{
This function obtains a statistic of centrality of a variable given a
sample of its values.
}
\usage{
centralValue(x, ws = NULL)
}
\arguments{
\item{x}{
A vector of values (the sample).
}
\item{ws}{
A vector of case weights (defaulting to NULL, i.e. no case weights).
}
}
\details{
If the variable is numeric it returns de median of the given sample, if it
is a factor it returns the mode. In other cases it
tries to convert to a factor and then returns the mode.
}
\value{
A number if the variable is numeric. A string with the name of the
most frequent nominal value, otherwise.
}
\references{
Torgo, L. (2016) \emph{Data Mining using R: learning with case studies,
second edition},
Chapman & Hall/CRC (ISBN-13: 978-1482234893).
\url{http://ltorgo.github.io/DMwR2}
}
\author{ Luis Torgo \email{ltorgo@dcc.fc.up.pt} }
\seealso{
\code{\link{mean}}, \code{\link{median}}
}
\examples{
# An example with numerical data
x <- rnorm(100)
centralValue(x)
# An example with nominal data
y <-
factor(sample(1:10,200,replace=TRUE),levels=1:10,labels=paste('v',1:10,sep=''))
centralValue(y)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{univar}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Styles.R
\name{getVisualStyleJSON}
\alias{getVisualStyleJSON}
\title{Get Visual Style JSON}
\usage{
getVisualStyleJSON(styleName, css = FALSE, base.url = .defaultBaseUrl)
}
\arguments{
\item{styleName}{name of style}
\item{css}{TRUE to create a CytoscapeJS CSS style, FALSE to create
a generic JSON version. Default is FALSE.}
\item{base.url}{(optional) Ignore unless you need to specify a custom domain,
port or version to connect to the CyREST API. Default is http://localhost:1234
and the latest version of the CyREST API supported by this version of RCy3.}
}
\value{
List of visual style properties
}
\description{
Get all defaults and mappings for a visual style
}
\examples{
\donttest{
getVisualStyleJSON()
}
}
| /man/getVisualStyleJSON.Rd | permissive | cytoscape/RCy3 | R | false | true | 798 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Styles.R
\name{getVisualStyleJSON}
\alias{getVisualStyleJSON}
\title{Get Visual Style JSON}
\usage{
getVisualStyleJSON(styleName, css = FALSE, base.url = .defaultBaseUrl)
}
\arguments{
\item{styleName}{name of style}
\item{css}{TRUE to create a CytoscapeJS CSS style, FALSE to create
a generic JSON version. Default is FALSE.}
\item{base.url}{(optional) Ignore unless you need to specify a custom domain,
port or version to connect to the CyREST API. Default is http://localhost:1234
and the latest version of the CyREST API supported by this version of RCy3.}
}
\value{
List of visual style properties
}
\description{
Get all defaults and mappings for a visual style
}
\examples{
\donttest{
getVisualStyleJSON()
}
}
|
# --- write the perceptual mapping func ---
pmap <- function(inp.mat, # input matrix with R rows and C colms
k=1) # scaling factor
{
# inp.mat = perception matrix with row and column headers
# brands or units of analysis in rows and attributes in columns
par(pty="m")
fit = prcomp(inp.mat, scale.=TRUE) # extract prin compts
str(fit)
plot(fit$rotation[,1:2], # use only top 2 prinComps
type ="n",
xlim=c(-1.5,1.5), ylim=c(-1.5,1.5), # plot parms
main ="Perceptual map ") # plot title
abline(h=0); abline(v=0) # build horiz & vert axes
attribnames = colnames(inp.mat)
brdnames = rownames(inp.mat)
# <-- insert attrib vectors as arrows--
for (i1 in 1:nrow(fit$rotation)){
arrows(0, 0, x1=fit$rotation[i1,1]*fit$sdev[1], y1=fit$rotation[i1,2]*fit$sdev[2], col = "blue", lwd = 1.5);
text(x = fit$rotation[i1,1]*fit$sdev[1], y = fit$rotation[i1,2]*fit$sdev[2], labels = attribnames[i1], col = "blue", cex = 1.1)
}
# <--- make co-ords within (-1,1) frame #
fit1 = fit
fit1$x[,1] = fit$x[,1]/apply(abs(fit$x),2,sum)[1]
fit1$x[,2] = fit$x[,2]/apply(abs(fit$x),2,sum)[2]
points(x = fit1$x[,1]*k, y = fit1$x[,2]*k, pch = 19, col ="red")
text(x = fit1$x[,1]*k, y = fit1$x[,2]*k, labels = brdnames,col ="black", cex = 1.1)
} # JSM func ends
## Test example
officestar.percep = read.table("https://raw.githubusercontent.com/sudhir-voleti/sample-data-sets/master/JSM%20example%20data/officestar%20perceptual.txt", header=TRUE)
dim(officestar.percep); officestar.percep
pmap(t(officestar.percep), k=1.5) | /general R funcs.R | no_license | sudhir-voleti/code-chunks | R | false | false | 1,701 | r | # --- write the perceptual mapping func ---
pmap <- function(inp.mat, # input matrix with R rows and C colms
k=1) # scaling factor
{
# inp.mat = perception matrix with row and column headers
# brands or units of analysis in rows and attributes in columns
par(pty="m")
fit = prcomp(inp.mat, scale.=TRUE) # extract prin compts
str(fit)
plot(fit$rotation[,1:2], # use only top 2 prinComps
type ="n",
xlim=c(-1.5,1.5), ylim=c(-1.5,1.5), # plot parms
main ="Perceptual map ") # plot title
abline(h=0); abline(v=0) # build horiz & vert axes
attribnames = colnames(inp.mat)
brdnames = rownames(inp.mat)
# <-- insert attrib vectors as arrows--
for (i1 in 1:nrow(fit$rotation)){
arrows(0, 0, x1=fit$rotation[i1,1]*fit$sdev[1], y1=fit$rotation[i1,2]*fit$sdev[2], col = "blue", lwd = 1.5);
text(x = fit$rotation[i1,1]*fit$sdev[1], y = fit$rotation[i1,2]*fit$sdev[2], labels = attribnames[i1], col = "blue", cex = 1.1)
}
# <--- make co-ords within (-1,1) frame #
fit1 = fit
fit1$x[,1] = fit$x[,1]/apply(abs(fit$x),2,sum)[1]
fit1$x[,2] = fit$x[,2]/apply(abs(fit$x),2,sum)[2]
points(x = fit1$x[,1]*k, y = fit1$x[,2]*k, pch = 19, col ="red")
text(x = fit1$x[,1]*k, y = fit1$x[,2]*k, labels = brdnames,col ="black", cex = 1.1)
} # JSM func ends
## Test example
officestar.percep = read.table("https://raw.githubusercontent.com/sudhir-voleti/sample-data-sets/master/JSM%20example%20data/officestar%20perceptual.txt", header=TRUE)
dim(officestar.percep); officestar.percep
pmap(t(officestar.percep), k=1.5) |
#### helper functions ####
#install.packages("RTextTools")
#install.packages("topicmodels")
# Load libraries
require(RTextTools)
library(topicmodels)
library(rhdf5)
# Manually adjusted probabilities for all terms
adjust_prob <- function(lyr, label) {
new_lyr <- lyr[-237,]
df <- cbind(label, new_lyr)
result <- data.frame(matrix(seq(20), nrow = max(as.numeric(label)), ncol = 5000))
for (lb in 1:max(as.numeric(label))) {
holder <- subset(df, df[,1] == lb)
holder <- colSums(holder[,3:5002])
tot <- sum(holder)
prop <- holder / tot
result[lb,] <- prop
}
result <- t(result)
result <- cbind(colnames(new_lyr)[2:5001], result)
result <- result[-c(2,3,6:30),]
return(result)
}
write.csv(adjust_prob(lyr,output_label),file = paste(data_output_path, "adjust_propotion_20.csv", sep=""))
# Convert tokenized terms to single string
string_convert <- function(lyr) {
result <- data.frame()
for (row in 1:(nrow(lyr))) {
words <- NULL
for (col in 2:(ncol(lyr)-1)) {
if (lyr[row,col] != 0) {
kw <- rep(colnames(lyr)[col],lyr[row,col])
kw <- paste(kw, collapse=' ')
words <- paste(words, kw)
}
}
result[row,1] <- words
}
return(result)
}
# Convert strings to matrix
matrix_convert <- function(text_string, language) {
text <- as.vector(text_string)
matx <- create_matrix(text_string, language = language)
rowTotals <- apply(matx , 1, sum)
matx.new <- matx[rowTotals> 0, ]
return(matx.new)
}
| /lib/helper.r | no_license | TZstatsADS/Fall2016-proj4-TianShengTS | R | false | false | 1,820 | r | #### helper functions ####
#install.packages("RTextTools")
#install.packages("topicmodels")
# Load libraries
require(RTextTools)
library(topicmodels)
library(rhdf5)
# Manually adjusted probabilities for all terms
adjust_prob <- function(lyr, label) {
new_lyr <- lyr[-237,]
df <- cbind(label, new_lyr)
result <- data.frame(matrix(seq(20), nrow = max(as.numeric(label)), ncol = 5000))
for (lb in 1:max(as.numeric(label))) {
holder <- subset(df, df[,1] == lb)
holder <- colSums(holder[,3:5002])
tot <- sum(holder)
prop <- holder / tot
result[lb,] <- prop
}
result <- t(result)
result <- cbind(colnames(new_lyr)[2:5001], result)
result <- result[-c(2,3,6:30),]
return(result)
}
write.csv(adjust_prob(lyr,output_label),file = paste(data_output_path, "adjust_propotion_20.csv", sep=""))
# Convert tokenized terms to single string
string_convert <- function(lyr) {
result <- data.frame()
for (row in 1:(nrow(lyr))) {
words <- NULL
for (col in 2:(ncol(lyr)-1)) {
if (lyr[row,col] != 0) {
kw <- rep(colnames(lyr)[col],lyr[row,col])
kw <- paste(kw, collapse=' ')
words <- paste(words, kw)
}
}
result[row,1] <- words
}
return(result)
}
# Convert strings to matrix
matrix_convert <- function(text_string, language) {
text <- as.vector(text_string)
matx <- create_matrix(text_string, language = language)
rowTotals <- apply(matx , 1, sum)
matx.new <- matx[rowTotals> 0, ]
return(matx.new)
}
|
#-------------------------------------------------------------------------------
# Cylindrical Bessel Derivatives for J
#-------------------------------------------------------------------------------
reff.cjn<-function(x,n){
return(besselJ(x,n))
}
| /R/reff.cjn.r | no_license | aneves76/rvswf | R | false | false | 252 | r | #-------------------------------------------------------------------------------
# Cylindrical Bessel Derivatives for J
#-------------------------------------------------------------------------------
reff.cjn<-function(x,n){
return(besselJ(x,n))
}
|
/LSED_Lab_10.R | no_license | Samox1/R_LAB_LSED_MSR_TEXT | R | false | false | 2,574 | r | ||
#' Create AGS from a mutation matrix
#'
#' Imports a TAB-delimited file with mutations.
#' This function creates a new list of AGSs from a table listing point (or otherwise qualitatively defined) mutations. Such a matrix M typically has size Ngenes x Nsamples, so that the current function returns a list of \code{length=ncol(M)}. For each of the Nsamples, AGSs are created as simple lists of all mutated genes G in a given sample S, i.e. any value X in the matrix M that satisfies condition \code{!is.na(X)} would be treated as a mutation. Eventual mutation types / categories are ignored. Wild type gene states in the original TAB-delimited file should be represented with NAs.
#' @param MUT Matrix of size \emph{Ngenes x Nsamples} (the both Ns are positive integers, depending on the screen scale).
#' @param col.mask To include only columns with IDs that contain the specified mask. This parameter is aware of regular expression syntax, i.e. uses \code{grep(..., fixed = FALSE)}.
#' @param namesFromColumn Number of the column (if any) that contains the gene/protein names. Note that it is only necessary if the latter are NOT the unique rownames of the matrix. This could be sometimes useful for processing redundant gene profiles with one-to-many mapping etc. Otherwise (i.e. the default), rownames shall contain gene IDs.
#' @param permute If the list of AGSs should be created via random permutation of sample labels. This might be needed for testing the null hypothesis that mutated genes are randomly combined into individual genomes, while having the same frequency distribution as in the actual cohort. Since reproducing the original distribution of AGS sizes is a non-trivial set theoretical problem, the procedure is accompanied by plotting gene set sizes in actual vs. permuted AGS (the latter is usually smaller, which might be unavoidable without a sophisticated algortihm...).
#'
#' @examples
#' data("tcga.gbm",package="NEArender")
#' dim(tcga.gbm)
#' ags.list <- mutations2ags(tcga.gbm, col.mask="[-.]01$")
#' length(ags.list)
#' length(unique(unlist(ags.list)))
#' @export
mutations2ags <- function(MUT, col.mask=NA, namesFromColumn=NA, permute=FALSE
# , Lowercase = 1
) {
if (is.null(MUT)) {stop("Not enough parameters...");}
mgs.list <- NULL;
if (is.na(namesFromColumn)) {m1 <- MUT;}
else {m1 <- MUT[,(namesFromColumn+1):ncol(MUT)];}
if (!is.na(col.mask)) {m1 <- m1[,colnames(m1)[grep(col.mask,colnames(m1))]];}
mgs.list <- apply(m1, 2, function (x) unique(tolower(names(x))[which(!is.na(x) )]));
if (permute) {mgs.list <- permute.gs(mgs.list);}
return(mgs.list);
}
permute.gs <- function (GS, Plot=FALSE) {
pmgs <- as.list(NULL); mmgs <- unlist(GS);
fmgs <- table(mmgs) / length(GS); # fmgs <- fmgs / sum(fmgs);
for (m in names(GS)) {
pmgs[[m]] <- sample(x = names(fmgs), size = length(GS[[m]]), replace = FALSE, prob = fmgs);
}
print("Gene set permutation done.");
if (Plot) {
plot(table(unlist(GS))[names(fmgs)], table(unlist(pmgs))[names(fmgs)], xlab="Original", ylab="Permuted", main="#Samples / gene");
abline(0,1,lty=2, col="grey");
plot(sapply(GS, length)[names(GS)], sapply(pmgs, length)[names(GS)], xlab="Original", ylab="Permuted", main="#Genes / sample")
abline(0,1,lty=2, col="grey");
}
return(pmgs);
}
| /R/mutations2ags.r | no_license | ashwini06/NEArender | R | false | false | 3,249 | r | #' Create AGS from a mutation matrix
#'
#' Imports a TAB-delimited file with mutations.
#' This function creates a new list of AGSs from a table listing point (or otherwise qualitatively defined) mutations. Such a matrix M typically has size Ngenes x Nsamples, so that the current function returns a list of \code{length=ncol(M)}. For each of the Nsamples, AGSs are created as simple lists of all mutated genes G in a given sample S, i.e. any value X in the matrix M that satisfies condition \code{!is.na(X)} would be treated as a mutation. Eventual mutation types / categories are ignored. Wild type gene states in the original TAB-delimited file should be represented with NAs.
#' @param MUT Matrix of size \emph{Ngenes x Nsamples} (the both Ns are positive integers, depending on the screen scale).
#' @param col.mask To include only columns with IDs that contain the specified mask. This parameter is aware of regular expression syntax, i.e. uses \code{grep(..., fixed = FALSE)}.
#' @param namesFromColumn Number of the column (if any) that contains the gene/protein names. Note that it is only necessary if the latter are NOT the unique rownames of the matrix. This could be sometimes useful for processing redundant gene profiles with one-to-many mapping etc. Otherwise (i.e. the default), rownames shall contain gene IDs.
#' @param permute If the list of AGSs should be created via random permutation of sample labels. This might be needed for testing the null hypothesis that mutated genes are randomly combined into individual genomes, while having the same frequency distribution as in the actual cohort. Since reproducing the original distribution of AGS sizes is a non-trivial set theoretical problem, the procedure is accompanied by plotting gene set sizes in actual vs. permuted AGS (the latter is usually smaller, which might be unavoidable without a sophisticated algortihm...).
#'
#' @examples
#' data("tcga.gbm",package="NEArender")
#' dim(tcga.gbm)
#' ags.list <- mutations2ags(tcga.gbm, col.mask="[-.]01$")
#' length(ags.list)
#' length(unique(unlist(ags.list)))
#' @export
mutations2ags <- function(MUT, col.mask=NA, namesFromColumn=NA, permute=FALSE
# , Lowercase = 1
) {
if (is.null(MUT)) {stop("Not enough parameters...");}
mgs.list <- NULL;
if (is.na(namesFromColumn)) {m1 <- MUT;}
else {m1 <- MUT[,(namesFromColumn+1):ncol(MUT)];}
if (!is.na(col.mask)) {m1 <- m1[,colnames(m1)[grep(col.mask,colnames(m1))]];}
mgs.list <- apply(m1, 2, function (x) unique(tolower(names(x))[which(!is.na(x) )]));
if (permute) {mgs.list <- permute.gs(mgs.list);}
return(mgs.list);
}
permute.gs <- function (GS, Plot=FALSE) {
pmgs <- as.list(NULL); mmgs <- unlist(GS);
fmgs <- table(mmgs) / length(GS); # fmgs <- fmgs / sum(fmgs);
for (m in names(GS)) {
pmgs[[m]] <- sample(x = names(fmgs), size = length(GS[[m]]), replace = FALSE, prob = fmgs);
}
print("Gene set permutation done.");
if (Plot) {
plot(table(unlist(GS))[names(fmgs)], table(unlist(pmgs))[names(fmgs)], xlab="Original", ylab="Permuted", main="#Samples / gene");
abline(0,1,lty=2, col="grey");
plot(sapply(GS, length)[names(GS)], sapply(pmgs, length)[names(GS)], xlab="Original", ylab="Permuted", main="#Genes / sample")
abline(0,1,lty=2, col="grey");
}
return(pmgs);
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stat-fit-broom.R
\name{stat_fit_tidy}
\alias{stat_fit_tidy}
\title{Return one row data frame with fitted parameter estimates.}
\usage{
stat_fit_tidy(mapping = NULL, data = NULL, geom = "null", method = "lm",
method.args = list(formula = y ~ x), label.x.npc = "left",
label.y.npc = "top", label.x = NULL, label.y = NULL,
position = "identity", na.rm = FALSE, show.legend = FALSE,
inherit.aes = TRUE, ...)
}
\arguments{
\item{mapping}{The aesthetic mapping, usually constructed with
\code{\link[ggplot2]{aes}} or \code{\link[ggplot2]{aes_string}}. Only needs
to be set at the layer level if you are overriding the plot defaults.}
\item{data}{A layer specific dataset - only needed if you want to override
the plot defaults.}
\item{geom}{The geometric object to use display the data}
\item{method}{character.}
\item{method.args}{list of arguments to pass to \code{method}.}
\item{label.x.npc, label.y.npc}{\code{numeric} with range 0..1 or character.
Coordinates to be used for positioning the output, expressed in "normalized
parent coordinates" or character string. If too short they will be recycled.}
\item{label.x, label.y}{\code{numeric} Coordinates (in data units) to be used
for absolute positioning of the output. If too short they will be recycled.}
\item{position}{The position adjustment to use for overlapping points on this
layer}
\item{na.rm}{a logical indicating whether NA values should be stripped
before the computation proceeds.}
\item{show.legend}{logical. Should this layer be included in the legends?
\code{NA}, the default, includes if any aesthetics are mapped. \code{FALSE}
never includes, and \code{TRUE} always includes.}
\item{inherit.aes}{If \code{FALSE}, overrides the default aesthetics, rather
than combining with them. This is most useful for helper functions that
define both data and aesthetics and shouldn't inherit behaviour from the
default plot specification, e.g. \code{\link[ggplot2]{borders}}.}
\item{...}{other arguments passed on to \code{\link[ggplot2]{layer}}. This
can include aesthetics whose values you want to set, not map. See
\code{\link[ggplot2]{layer}} for more details.}
}
\description{
\code{stat_fit_tidy} fits a model and returns a "tidy" version of the
model's summary, using package 'broom'.
}
\section{Computed variables}{
The output of \code{\link[broom]{tidy}} is returned after reshaping it
into a single row.
}
| /man/stat_fit_tidy.Rd | no_license | kassambara/ggpmisc | R | false | true | 2,539 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stat-fit-broom.R
\name{stat_fit_tidy}
\alias{stat_fit_tidy}
\title{Return one row data frame with fitted parameter estimates.}
\usage{
stat_fit_tidy(mapping = NULL, data = NULL, geom = "null", method = "lm",
method.args = list(formula = y ~ x), label.x.npc = "left",
label.y.npc = "top", label.x = NULL, label.y = NULL,
position = "identity", na.rm = FALSE, show.legend = FALSE,
inherit.aes = TRUE, ...)
}
\arguments{
\item{mapping}{The aesthetic mapping, usually constructed with
\code{\link[ggplot2]{aes}} or \code{\link[ggplot2]{aes_string}}. Only needs
to be set at the layer level if you are overriding the plot defaults.}
\item{data}{A layer specific dataset - only needed if you want to override
the plot defaults.}
\item{geom}{The geometric object to use display the data}
\item{method}{character.}
\item{method.args}{list of arguments to pass to \code{method}.}
\item{label.x.npc, label.y.npc}{\code{numeric} with range 0..1 or character.
Coordinates to be used for positioning the output, expressed in "normalized
parent coordinates" or character string. If too short they will be recycled.}
\item{label.x, label.y}{\code{numeric} Coordinates (in data units) to be used
for absolute positioning of the output. If too short they will be recycled.}
\item{position}{The position adjustment to use for overlapping points on this
layer}
\item{na.rm}{a logical indicating whether NA values should be stripped
before the computation proceeds.}
\item{show.legend}{logical. Should this layer be included in the legends?
\code{NA}, the default, includes if any aesthetics are mapped. \code{FALSE}
never includes, and \code{TRUE} always includes.}
\item{inherit.aes}{If \code{FALSE}, overrides the default aesthetics, rather
than combining with them. This is most useful for helper functions that
define both data and aesthetics and shouldn't inherit behaviour from the
default plot specification, e.g. \code{\link[ggplot2]{borders}}.}
\item{...}{other arguments passed on to \code{\link[ggplot2]{layer}}. This
can include aesthetics whose values you want to set, not map. See
\code{\link[ggplot2]{layer}} for more details.}
}
\description{
\code{stat_fit_tidy} fits a model and returns a "tidy" version of the
model's summary, using package 'broom'.
}
\section{Computed variables}{
The output of \code{\link[broom]{tidy}} is returned after reshaping it
into a single row.
}
|
# Single Decoction UI
singleDecoctionUI <- function(){
tabPanel("Single Decoction",
sidebarLayout(
sidebarPanel(width = 4,
h3("Inputs:"),
fluidRow(width = 4,
column(width = 4,
numericInput(inputId = "mashGrainTemp",label = paste0("Grain Temp(","\U00B0","F)"),value = 70)
),
column(width = 4,
numericInput(inputId = "mashSaccRestTemp",label = paste0("Sacc. Rest Temp (","\U00B0","F)"),value = 150)
)
),
numericInput(inputId = "mashDuration",label = "Sacc Rest Duration (min)",value = 60),
h3("Outputs:"),
fluidRow(width = 4,
column(width = 4,
strong("Mash Vol (Gal):"),
verbatimTextOutput(outputId = "singleDecoctionMashVol")
),
column(width = 4,
strong(paste0("Infusion Temp(","\U00B0","F)")),
verbatimTextOutput(outputId = "singleDecoctionInfusionTemp")
)
),
fluidRow(width = 4,
column(width = 4,
strong("Thickness (Qts/Lb):"),
verbatimTextOutput(outputId = "singleDecoctionMashThickness")
),
column(width = 4,
strong("Total Grain (lbs):"),
verbatimTextOutput(outputId = "singleDecoctionTotalGrain")
)
),
fluidRow(
column(width = 4,
strong("Mash Out Vol (Gal):"),
verbatimTextOutput(outputId = "singleDecoctionMashOutVolume")
),
column(width = 4,
strong("Boil Time (min):"),
verbatimTextOutput(outputId = "singleDecoctionBoilTime")
)
)
),
mainPanel(width = 8,
plotOutput(outputId = "singleDecoctionStepMashPlot")
)
)
)
} | /App/Mash/SingleDecoction/SingleDecoctionUI.R | no_license | BenjaminBearce/BK_Brew | R | false | false | 3,518 | r | # Single Decoction UI
singleDecoctionUI <- function(){
tabPanel("Single Decoction",
sidebarLayout(
sidebarPanel(width = 4,
h3("Inputs:"),
fluidRow(width = 4,
column(width = 4,
numericInput(inputId = "mashGrainTemp",label = paste0("Grain Temp(","\U00B0","F)"),value = 70)
),
column(width = 4,
numericInput(inputId = "mashSaccRestTemp",label = paste0("Sacc. Rest Temp (","\U00B0","F)"),value = 150)
)
),
numericInput(inputId = "mashDuration",label = "Sacc Rest Duration (min)",value = 60),
h3("Outputs:"),
fluidRow(width = 4,
column(width = 4,
strong("Mash Vol (Gal):"),
verbatimTextOutput(outputId = "singleDecoctionMashVol")
),
column(width = 4,
strong(paste0("Infusion Temp(","\U00B0","F)")),
verbatimTextOutput(outputId = "singleDecoctionInfusionTemp")
)
),
fluidRow(width = 4,
column(width = 4,
strong("Thickness (Qts/Lb):"),
verbatimTextOutput(outputId = "singleDecoctionMashThickness")
),
column(width = 4,
strong("Total Grain (lbs):"),
verbatimTextOutput(outputId = "singleDecoctionTotalGrain")
)
),
fluidRow(
column(width = 4,
strong("Mash Out Vol (Gal):"),
verbatimTextOutput(outputId = "singleDecoctionMashOutVolume")
),
column(width = 4,
strong("Boil Time (min):"),
verbatimTextOutput(outputId = "singleDecoctionBoilTime")
)
)
),
mainPanel(width = 8,
plotOutput(outputId = "singleDecoctionStepMashPlot")
)
)
)
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CDFFunctions.R
\name{plotCDF}
\alias{plotCDF}
\title{Title}
\usage{
plotCDF(..., realData = NULL, plotPars = NULL, linesPars = NULL)
}
\arguments{
\item{...}{Parameters for \code{plotCDFValues}}
\item{realData}{A data frame or with 2 colums: the first should be an
\code{rt} column and the second should be a \code{response} column that
has a 0 for each lower boundary response and a 1 for upper boundardy
responses.}
\item{plotPars}{A named list of arguments to pass to the \code{plot()}
call.}
\item{linesPars}{A named list of arguments to pass to the \code{lines()}
call that plots the \code{realData} cdf (if provided).}
}
\description{
Title
}
| /man/plotCDF.Rd | no_license | Cmell/diffR | R | false | true | 732 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CDFFunctions.R
\name{plotCDF}
\alias{plotCDF}
\title{Title}
\usage{
plotCDF(..., realData = NULL, plotPars = NULL, linesPars = NULL)
}
\arguments{
\item{...}{Parameters for \code{plotCDFValues}}
\item{realData}{A data frame or with 2 colums: the first should be an
\code{rt} column and the second should be a \code{response} column that
has a 0 for each lower boundary response and a 1 for upper boundardy
responses.}
\item{plotPars}{A named list of arguments to pass to the \code{plot()}
call.}
\item{linesPars}{A named list of arguments to pass to the \code{lines()}
call that plots the \code{realData} cdf (if provided).}
}
\description{
Title
}
|
#' First variant of M-GRAF model
#'
#' \code{MGRAF1} returns the estimated common structure Z and subject-specific
#' low rank components \eqn{Q_i} and \eqn{\Lambda_i} for multiple undirected
#' graphs.
#'
#' The subject-specific deviation \eqn{D_i} is decomposed into \deqn{D_i = Q_i
#' * \Lambda_i * Q_i^{\top},} where each \eqn{Q_i} is a VxK orthonormal matrix and
#' each \eqn{\Lambda_i} is a KxK diagonal matrix.
#'
#' @param A Binary array with size VxVxn storing the VxV symmetric adjacency
#' matrices of n graphs.
#' @param K An integer that specifies the latent dimension of the graphs
#' @param tol A numeric scalar that specifies the convergence threshold of CISE
#' algorithm. CISE iteration continues until the absolute percent change in
#' joint log-likelihood is smaller than this value. Default is tol = 0.01.
#' @param maxit An integer that specifies the maximum number of iterations.
#' Default is maxit = 5.
#'
#' @return A list is returned containing the ingredients below from M-GRAF1
#' model corresponding to the largest log-likelihood over iterations.
#' \item{Z}{A numeric vector containing the lower triangular entries in the
#' estimated matrix Z.} \item{Lambda}{Kxn matrix where each column stores the
#' diagonal entries in \eqn{\Lambda_i}.} \item{Q}{VxKxn array containing the
#' estimated VxK orthonormal matrix \eqn{Q_i}, i=1,...,n.} \item{D_LT}{Lxn
#' matrix where each column stores the lower triangular entries in \eqn{D_i =
#' Q_i * \Lambda_i * Q_i^{\top}}; L=V(V-1)/2.} \item{LL_max}{Maximum
#' log-likelihood across iterations.} \item{LL}{Joint log-likelihood at each
#' iteration.}
#'
#' @examples
#' data(A)
#' n = dim(A)[3]
#' subs = sample.int(n=n,size=30)
#' A_sub = A[ , , subs]
#' res = MGRAF1(A=A_sub, K=3, tol=0.01, maxit=5)
#'
#' @import gdata
#' @import Matrix
#' @import glmnet
#' @import rARPACK
#' @importFrom stats coef sd
#' @export
####################################################################################################################################### CISE algorithm to estimate common structure and low-dimensional individual
####################################################################################################################################### structure of multiple undirected binary networks #####
##------ M-GRAF Model ----------------------------------------------##
## A_i ~ Bernoulli(\Pi_i) logit(\Pi_i) = Z + D_i = Z + Q_i %*% \Lambda_i %*%
## t(Q_i) The algorithm iterate between the following steps until convergence 1.
## Given Z, \Lambda_i, solve for Q_i by doing eigen-decomposition on (A_i-P_0),
## where P_0 = 1/(1+exp(-Z)) 2. Given Q_i, solve Z and \Lambda_i by logistic
## regression
MGRAF1 = function(A, K, tol, maxit) {
n = dim(A)[3]
V = dim(A)[1]
L = V * (V - 1)/2
if (missing(tol)) {
tol = 0.01
}
if (tol<=0){
stop("threshold tol should be positive")
}
if (missing(maxit)) {
maxit = 5
}
###### Initialization ----------------------------------------------------------------
## initialize P_0 by A_bar
## -----------------------------------------------------------
P0 = apply(A, c(1, 2), sum)/n
## initialize Z by log odds of P0
vec_P0 = lowerTriangle(P0)
vec_P0[which(vec_P0 == 1)] = 1 - (1e-16) # note 1 - (1-(1e-17)) == 0 in R
vec_P0[which(vec_P0 == 0)] = 1e-16
Z = log(vec_P0/(1 - vec_P0))
## select the first K largest eigenvalue *in magnitude*
## ------------------------------------- initialize Lambda_i by eigenvalues of
## (A_i-P0)
Lambda = matrix(0, nrow = K, ncol = n)
# initialize Q_i by eigenvectors correspond to Lambda_i
Q = array(0, c(V, K, n))
for (i in 1:n) {
# select K largest eigenvalues in magnitude automatically sort them decreasingly
ED = eigs_sym(A = A[, , i] - P0, k = K, which = "LM")
Lambda[, i] = ED$values
Q[, , i] = ED$vectors
}
if (missing(tol)) {
tol = 0.01
}
if (missing(maxit)) {
maxit = 5
}
###---------- compute initial log-likelihood ---------------------------------------------------
A_LT = apply(A, 3, lowerTriangle)
## an array of lower-triangle of principal matrices specific to subject
M_array = apply(Q, c(2, 3), function(x) {
lowerTriangle(tcrossprod(x))
}) # LxKxn
D_LT = matrix(0, nrow = L, ncol = n)
LL_A = 0
for (i in 1:n) {
if (K == 1) {
D_LT[, i] = M_array[, , i] * Lambda[, i]
} else {
D_LT[, i] = M_array[, , i] %*% Lambda[, i]
}
vec_Pi = 1/(1 + exp(-Z - D_LT[, i]))
vec_Pi[which(vec_Pi == 1)] = 1 - (1e-16) # note 1 - (1-(1e-17)) == 0 in R
vec_Pi[which(vec_Pi == 0)] = 1e-16
LL_A = LL_A + sum(A_LT[, i] * log(vec_Pi) + (1 - A_LT[, i]) * log(1 - vec_Pi))
}
###################################################################################### TUNE PENALTY PARAMETER LAMBDA IN GLMNET ptm = proc.time() CONSTRUCT Y
###################################################################################### ----------------------------------------------------------------------
y = factor(c(A_LT))
### CONSTRUCT PENALTY FACTORS FOR Z AND LAMBDA ---------------------------------------
### prior precision of Z
phi_z = 0.01
# prior precision of lambda
s_l = 2.5 # prior scale
phi_lambda = 1/(s_l^2)
# penalty factor
pen_fac = c(rep(phi_z, L), rep(phi_lambda, n * K))
# normalize to ensure sum(pen_fac) = L+n*K, #variables
const_pf = sum(pen_fac)/(L + n * K)
pen_fac = pen_fac/const_pf
# glmnet penalty factor
lambda_glm = c(10^(0:-8), 0) * const_pf
### CONSTRUCT DESIGN-MATRIX ----------------------------------------------------------
### construct intercept part of design matrix
### -----------------------------------------
design_int = Diagonal(L)
for (i in 2:n) {
design_int = rbind(design_int, Diagonal(L))
}
## construct predictors M part of design matrix
## -------------------------------------- scale M
sd_M = apply(M_array, c(2, 3), sd) # Kxn
M_list = lapply(1:n, function(i) {
# scale M_array[,k,i] to have sd 0.5
if (K == 1) {
temp_M = M_array[, , i]/2/sd_M[, i] # Lx1
} else {
temp_M = sweep(M_array[, , i], 2, 2 * sd_M[, i], FUN = "/") # LxK
}
})
design_mat = cbind(design_int, bdiag(M_list))
rm(M_list)
## run cv.glmnet to determine optimal penalty lambda
rglmModel = cv.glmnet(x = design_mat, y = y, family = "binomial", alpha = 0, lambda = lambda_glm,
standardize = FALSE, intercept = FALSE, penalty.factor = pen_fac, maxit = 200,
nfolds = 5, parallel = FALSE) # type.measure=deviance
ind_lambda_opt = which(lambda_glm == rglmModel$lambda.min)
glm_coef = coef(rglmModel, s = "lambda.min")[-1]
##----- update Z and P0 -----##
Z = glm_coef[1:L] # Lx1
P0 = matrix(0, nrow = V, ncol = V)
lowerTriangle(P0) = 1/(1 + exp(-Z))
P0 = P0 + t(P0)
##----- update Lambda -----##
Lambda = matrix(glm_coef[(L + 1):(L + n * K)], nrow = K, ncol = n) # Kxn
# unscale Lambda
Lambda = Lambda/sd_M/2
# sort lambda
if (K > 1) {
Lambda = apply(Lambda, 2, sort, decreasing = TRUE) # Kxn
}
#################################################### 2-step Iterative Algorithm #####################
LL_seq = numeric(maxit + 1)
LL_seq[1] = LL_A
# elapse_time = numeric(maxit)
for (st in 1:maxit) {
ptm = proc.time()
########### Update Q ------------------------------------------------------------------
Q = array(0, c(V, K, n))
for (i in 1:n) {
# ED = eigen( A[,,i]-P0, symmetric=T) # evals sorted decreasingly
j = sum(Lambda[, i] >= 0) # number of lambda >0 for i
if (j == 0) {
ED = eigs_sym(A = A[, , i] - P0, k = K, which = "SA")
Q[, , i] = ED$vectors
} else if (j == K) {
ED = eigs_sym(A = A[, , i] - P0, k = K, which = "LA")
Q[, , i] = ED$vectors
} else {
ED1 = eigs_sym(A = A[, , i] - P0, k = j, which = "LA")
ED2 = eigs_sym(A = A[, , i] - P0, k = K - j, which = "SA")
Q[, , i] = cbind(ED1$vectors, ED2$vectors)
}
}
######### COMPUTE JOINT LOGLIKELIHOOD -----------------------------------------------------
######### an array of lower-triangle of principal matrices specific to subject
M_array = apply(Q, c(2, 3), function(x) {
lowerTriangle(tcrossprod(x))
}) # LxKxn
D_LT = matrix(0, nrow = L, ncol = n)
LL_A = 0
for (i in 1:n) {
if (K == 1) {
D_LT[, i] = M_array[, , i] * Lambda[, i]
} else {
D_LT[, i] = M_array[, , i] %*% Lambda[, i]
}
vec_Pi = 1/(1 + exp(-Z - D_LT[, i]))
vec_Pi[which(vec_Pi == 1)] = 1 - (1e-16) # note 1 - (1-(1e-17)) == 0 in R
vec_Pi[which(vec_Pi == 0)] = 1e-16
LL_A = LL_A + sum(A_LT[, i] * log(vec_Pi) + (1 - A_LT[, i]) * log(1 - vec_Pi))
}
LL_seq[st + 1] = LL_A
print(st)
if (LL_seq[st + 1] > max(LL_seq[1:st])) {
D_LT_best = D_LT
Q_best = Q
Lambda_best = Lambda
Z_best = Z
LL_max = LL_seq[st + 1]
}
if (abs(LL_seq[st + 1] - LL_seq[st])/abs(LL_seq[st]) < tol) {
break
}
############ CONSTRUCT DESIGN-MATRIX FOR LOGISTIC REGRESSION ----------------------------------
############ intercept part of design matrix has been constructed
############ -------------------------------------- construct predictors M part of design
############ matrix -------------------------------------- scale M
sd_M = apply(M_array, c(2, 3), sd) # Kxn
M_list = lapply(1:n, function(i) {
# scale M_array[,k,i] to have sd 0.5
if (K == 1) {
temp_M = M_array[, , i]/2/sd_M[, i] # Lx1
} else {
temp_M = sweep(M_array[, , i], 2, 2 * sd_M[, i], FUN = "/") # LxK
}
})
design_mat = cbind(design_int, bdiag(M_list))
rm(M_list)
########## LOGISTIC REGRESSION ------------------------------------------------------------
########## run a penalized logistic regression (ridge regression) Instead of setting penalty
########## = lambda_opt, we use a sequence of larger penalty parameters as warm starts. This
########## is more robust though may take longer time.
rglmModel = glmnet(x = design_mat, y = y, family = "binomial", alpha = 0, lambda = lambda_glm[1:ind_lambda_opt],
standardize = FALSE, intercept = FALSE, penalty.factor = pen_fac, maxit = 200)
ind_beta = dim(rglmModel$beta)[2]
##----- update Z and P0 -----##
Z = rglmModel$beta[1:L, ind_beta] # Lx1
P0 = matrix(0, nrow = V, ncol = V)
lowerTriangle(P0) = 1/(1 + exp(-Z))
P0 = P0 + t(P0)
##----- update Lambda -----##
Lambda = matrix(rglmModel$beta[(L + 1):(L + n * K), ind_beta], nrow = K, ncol = n) # Kxn
# unscale Lambda
Lambda = Lambda/sd_M/2
# sort lambda
if (K > 1) {
Lambda = apply(Lambda, 2, sort, decreasing = TRUE) # Kxn
}
# elapse_time[st] = as.numeric((proc.time()-ptm))[3]
}
results = list(Z = Z_best, Lambda = Lambda_best, Q = Q_best, D_LT = D_LT_best, LL_max = LL_max,
LL = LL_seq)
return(results)
}
| /R/MGRAF1.R | no_license | cran/CISE | R | false | false | 11,798 | r | #' First variant of M-GRAF model
#'
#' \code{MGRAF1} returns the estimated common structure Z and subject-specific
#' low rank components \eqn{Q_i} and \eqn{\Lambda_i} for multiple undirected
#' graphs.
#'
#' The subject-specific deviation \eqn{D_i} is decomposed into \deqn{D_i = Q_i
#' * \Lambda_i * Q_i^{\top},} where each \eqn{Q_i} is a VxK orthonormal matrix and
#' each \eqn{\Lambda_i} is a KxK diagonal matrix.
#'
#' @param A Binary array with size VxVxn storing the VxV symmetric adjacency
#' matrices of n graphs.
#' @param K An integer that specifies the latent dimension of the graphs
#' @param tol A numeric scalar that specifies the convergence threshold of CISE
#' algorithm. CISE iteration continues until the absolute percent change in
#' joint log-likelihood is smaller than this value. Default is tol = 0.01.
#' @param maxit An integer that specifies the maximum number of iterations.
#' Default is maxit = 5.
#'
#' @return A list is returned containing the ingredients below from M-GRAF1
#' model corresponding to the largest log-likelihood over iterations.
#' \item{Z}{A numeric vector containing the lower triangular entries in the
#' estimated matrix Z.} \item{Lambda}{Kxn matrix where each column stores the
#' diagonal entries in \eqn{\Lambda_i}.} \item{Q}{VxKxn array containing the
#' estimated VxK orthonormal matrix \eqn{Q_i}, i=1,...,n.} \item{D_LT}{Lxn
#' matrix where each column stores the lower triangular entries in \eqn{D_i =
#' Q_i * \Lambda_i * Q_i^{\top}}; L=V(V-1)/2.} \item{LL_max}{Maximum
#' log-likelihood across iterations.} \item{LL}{Joint log-likelihood at each
#' iteration.}
#'
#' @examples
#' data(A)
#' n = dim(A)[3]
#' subs = sample.int(n=n,size=30)
#' A_sub = A[ , , subs]
#' res = MGRAF1(A=A_sub, K=3, tol=0.01, maxit=5)
#'
#' @import gdata
#' @import Matrix
#' @import glmnet
#' @import rARPACK
#' @importFrom stats coef sd
#' @export
####################################################################################################################################### CISE algorithm to estimate common structure and low-dimensional individual
####################################################################################################################################### structure of multiple undirected binary networks #####
##------ M-GRAF Model ----------------------------------------------##
## A_i ~ Bernoulli(\Pi_i) logit(\Pi_i) = Z + D_i = Z + Q_i %*% \Lambda_i %*%
## t(Q_i) The algorithm iterate between the following steps until convergence 1.
## Given Z, \Lambda_i, solve for Q_i by doing eigen-decomposition on (A_i-P_0),
## where P_0 = 1/(1+exp(-Z)) 2. Given Q_i, solve Z and \Lambda_i by logistic
## regression
MGRAF1 = function(A, K, tol, maxit) {
n = dim(A)[3]
V = dim(A)[1]
L = V * (V - 1)/2
if (missing(tol)) {
tol = 0.01
}
if (tol<=0){
stop("threshold tol should be positive")
}
if (missing(maxit)) {
maxit = 5
}
###### Initialization ----------------------------------------------------------------
## initialize P_0 by A_bar
## -----------------------------------------------------------
P0 = apply(A, c(1, 2), sum)/n
## initialize Z by log odds of P0
vec_P0 = lowerTriangle(P0)
vec_P0[which(vec_P0 == 1)] = 1 - (1e-16) # note 1 - (1-(1e-17)) == 0 in R
vec_P0[which(vec_P0 == 0)] = 1e-16
Z = log(vec_P0/(1 - vec_P0))
## select the first K largest eigenvalue *in magnitude*
## ------------------------------------- initialize Lambda_i by eigenvalues of
## (A_i-P0)
Lambda = matrix(0, nrow = K, ncol = n)
# initialize Q_i by eigenvectors correspond to Lambda_i
Q = array(0, c(V, K, n))
for (i in 1:n) {
# select K largest eigenvalues in magnitude automatically sort them decreasingly
ED = eigs_sym(A = A[, , i] - P0, k = K, which = "LM")
Lambda[, i] = ED$values
Q[, , i] = ED$vectors
}
if (missing(tol)) {
tol = 0.01
}
if (missing(maxit)) {
maxit = 5
}
###---------- compute initial log-likelihood ---------------------------------------------------
A_LT = apply(A, 3, lowerTriangle)
## an array of lower-triangle of principal matrices specific to subject
M_array = apply(Q, c(2, 3), function(x) {
lowerTriangle(tcrossprod(x))
}) # LxKxn
D_LT = matrix(0, nrow = L, ncol = n)
LL_A = 0
for (i in 1:n) {
if (K == 1) {
D_LT[, i] = M_array[, , i] * Lambda[, i]
} else {
D_LT[, i] = M_array[, , i] %*% Lambda[, i]
}
vec_Pi = 1/(1 + exp(-Z - D_LT[, i]))
vec_Pi[which(vec_Pi == 1)] = 1 - (1e-16) # note 1 - (1-(1e-17)) == 0 in R
vec_Pi[which(vec_Pi == 0)] = 1e-16
LL_A = LL_A + sum(A_LT[, i] * log(vec_Pi) + (1 - A_LT[, i]) * log(1 - vec_Pi))
}
###################################################################################### TUNE PENALTY PARAMETER LAMBDA IN GLMNET ptm = proc.time() CONSTRUCT Y
###################################################################################### ----------------------------------------------------------------------
y = factor(c(A_LT))
### CONSTRUCT PENALTY FACTORS FOR Z AND LAMBDA ---------------------------------------
### prior precision of Z
phi_z = 0.01
# prior precision of lambda
s_l = 2.5 # prior scale
phi_lambda = 1/(s_l^2)
# penalty factor
pen_fac = c(rep(phi_z, L), rep(phi_lambda, n * K))
# normalize to ensure sum(pen_fac) = L+n*K, #variables
const_pf = sum(pen_fac)/(L + n * K)
pen_fac = pen_fac/const_pf
# glmnet penalty factor
lambda_glm = c(10^(0:-8), 0) * const_pf
### CONSTRUCT DESIGN-MATRIX ----------------------------------------------------------
### construct intercept part of design matrix
### -----------------------------------------
design_int = Diagonal(L)
for (i in 2:n) {
design_int = rbind(design_int, Diagonal(L))
}
## construct predictors M part of design matrix
## -------------------------------------- scale M
sd_M = apply(M_array, c(2, 3), sd) # Kxn
M_list = lapply(1:n, function(i) {
# scale M_array[,k,i] to have sd 0.5
if (K == 1) {
temp_M = M_array[, , i]/2/sd_M[, i] # Lx1
} else {
temp_M = sweep(M_array[, , i], 2, 2 * sd_M[, i], FUN = "/") # LxK
}
})
design_mat = cbind(design_int, bdiag(M_list))
rm(M_list)
## run cv.glmnet to determine optimal penalty lambda
rglmModel = cv.glmnet(x = design_mat, y = y, family = "binomial", alpha = 0, lambda = lambda_glm,
standardize = FALSE, intercept = FALSE, penalty.factor = pen_fac, maxit = 200,
nfolds = 5, parallel = FALSE) # type.measure=deviance
ind_lambda_opt = which(lambda_glm == rglmModel$lambda.min)
glm_coef = coef(rglmModel, s = "lambda.min")[-1]
##----- update Z and P0 -----##
Z = glm_coef[1:L] # Lx1
P0 = matrix(0, nrow = V, ncol = V)
lowerTriangle(P0) = 1/(1 + exp(-Z))
P0 = P0 + t(P0)
##----- update Lambda -----##
Lambda = matrix(glm_coef[(L + 1):(L + n * K)], nrow = K, ncol = n) # Kxn
# unscale Lambda
Lambda = Lambda/sd_M/2
# sort lambda
if (K > 1) {
Lambda = apply(Lambda, 2, sort, decreasing = TRUE) # Kxn
}
#################################################### 2-step Iterative Algorithm #####################
LL_seq = numeric(maxit + 1)
LL_seq[1] = LL_A
# elapse_time = numeric(maxit)
for (st in 1:maxit) {
ptm = proc.time()
########### Update Q ------------------------------------------------------------------
Q = array(0, c(V, K, n))
for (i in 1:n) {
# ED = eigen( A[,,i]-P0, symmetric=T) # evals sorted decreasingly
j = sum(Lambda[, i] >= 0) # number of lambda >0 for i
if (j == 0) {
ED = eigs_sym(A = A[, , i] - P0, k = K, which = "SA")
Q[, , i] = ED$vectors
} else if (j == K) {
ED = eigs_sym(A = A[, , i] - P0, k = K, which = "LA")
Q[, , i] = ED$vectors
} else {
ED1 = eigs_sym(A = A[, , i] - P0, k = j, which = "LA")
ED2 = eigs_sym(A = A[, , i] - P0, k = K - j, which = "SA")
Q[, , i] = cbind(ED1$vectors, ED2$vectors)
}
}
######### COMPUTE JOINT LOGLIKELIHOOD -----------------------------------------------------
######### an array of lower-triangle of principal matrices specific to subject
M_array = apply(Q, c(2, 3), function(x) {
lowerTriangle(tcrossprod(x))
}) # LxKxn
D_LT = matrix(0, nrow = L, ncol = n)
LL_A = 0
for (i in 1:n) {
if (K == 1) {
D_LT[, i] = M_array[, , i] * Lambda[, i]
} else {
D_LT[, i] = M_array[, , i] %*% Lambda[, i]
}
vec_Pi = 1/(1 + exp(-Z - D_LT[, i]))
vec_Pi[which(vec_Pi == 1)] = 1 - (1e-16) # note 1 - (1-(1e-17)) == 0 in R
vec_Pi[which(vec_Pi == 0)] = 1e-16
LL_A = LL_A + sum(A_LT[, i] * log(vec_Pi) + (1 - A_LT[, i]) * log(1 - vec_Pi))
}
LL_seq[st + 1] = LL_A
print(st)
if (LL_seq[st + 1] > max(LL_seq[1:st])) {
D_LT_best = D_LT
Q_best = Q
Lambda_best = Lambda
Z_best = Z
LL_max = LL_seq[st + 1]
}
if (abs(LL_seq[st + 1] - LL_seq[st])/abs(LL_seq[st]) < tol) {
break
}
############ CONSTRUCT DESIGN-MATRIX FOR LOGISTIC REGRESSION ----------------------------------
############ intercept part of design matrix has been constructed
############ -------------------------------------- construct predictors M part of design
############ matrix -------------------------------------- scale M
sd_M = apply(M_array, c(2, 3), sd) # Kxn
M_list = lapply(1:n, function(i) {
# scale M_array[,k,i] to have sd 0.5
if (K == 1) {
temp_M = M_array[, , i]/2/sd_M[, i] # Lx1
} else {
temp_M = sweep(M_array[, , i], 2, 2 * sd_M[, i], FUN = "/") # LxK
}
})
design_mat = cbind(design_int, bdiag(M_list))
rm(M_list)
########## LOGISTIC REGRESSION ------------------------------------------------------------
########## run a penalized logistic regression (ridge regression) Instead of setting penalty
########## = lambda_opt, we use a sequence of larger penalty parameters as warm starts. This
########## is more robust though may take longer time.
rglmModel = glmnet(x = design_mat, y = y, family = "binomial", alpha = 0, lambda = lambda_glm[1:ind_lambda_opt],
standardize = FALSE, intercept = FALSE, penalty.factor = pen_fac, maxit = 200)
ind_beta = dim(rglmModel$beta)[2]
##----- update Z and P0 -----##
Z = rglmModel$beta[1:L, ind_beta] # Lx1
P0 = matrix(0, nrow = V, ncol = V)
lowerTriangle(P0) = 1/(1 + exp(-Z))
P0 = P0 + t(P0)
##----- update Lambda -----##
Lambda = matrix(rglmModel$beta[(L + 1):(L + n * K), ind_beta], nrow = K, ncol = n) # Kxn
# unscale Lambda
Lambda = Lambda/sd_M/2
# sort lambda
if (K > 1) {
Lambda = apply(Lambda, 2, sort, decreasing = TRUE) # Kxn
}
# elapse_time[st] = as.numeric((proc.time()-ptm))[3]
}
results = list(Z = Z_best, Lambda = Lambda_best, Q = Q_best, D_LT = D_LT_best, LL_max = LL_max,
LL = LL_seq)
return(results)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/predict.R
\name{predict.ranger}
\alias{predict.ranger}
\title{Ranger prediction}
\usage{
\method{predict}{ranger}(object, data = NULL, predict.all = FALSE,
num.trees = object$num.trees, type = "response",
se.method = "infjack", quantiles = c(0.1, 0.5, 0.9), seed = NULL,
num.threads = NULL, verbose = TRUE, ...)
}
\arguments{
\item{object}{Ranger \code{ranger} object.}
\item{data}{New test data of class \code{data.frame} or \code{gwaa.data} (GenABEL).}
\item{predict.all}{Return individual predictions for each tree instead of aggregated predictions for all trees. Return a matrix (sample x tree) for classification and regression, a 3d array for probability estimation (sample x class x tree) and survival (sample x time x tree).}
\item{num.trees}{Number of trees used for prediction. The first \code{num.trees} in the forest are used.}
\item{type}{Type of prediction. One of 'response', 'se', 'terminalNodes', 'quantiles' with default 'response'. See below for details.}
\item{se.method}{Method to compute standard errors. One of 'jack', 'infjack' with default 'infjack'. Only applicable if type = 'se'. See below for details.}
\item{quantiles}{Vector of quantiles for quantile prediction. Set \code{type = 'quantiles'} to use.}
\item{seed}{Random seed. Default is \code{NULL}, which generates the seed from \code{R}. Set to \code{0} to ignore the \code{R} seed. The seed is used in case of ties in classification mode.}
\item{num.threads}{Number of threads. Default is number of CPUs available.}
\item{verbose}{Verbose output on or off.}
\item{...}{further arguments passed to or from other methods.}
}
\value{
Object of class \code{ranger.prediction} with elements
\tabular{ll}{
\code{predictions} \tab Predicted classes/values (only for classification and regression) \cr
\code{unique.death.times} \tab Unique death times (only for survival). \cr
\code{chf} \tab Estimated cumulative hazard function for each sample (only for survival). \cr
\code{survival} \tab Estimated survival function for each sample (only for survival). \cr
\code{num.trees} \tab Number of trees. \cr
\code{num.independent.variables} \tab Number of independent variables. \cr
\code{treetype} \tab Type of forest/tree. Classification, regression or survival. \cr
\code{num.samples} \tab Number of samples.
}
}
\description{
Prediction with new data and a saved forest from Ranger.
}
\details{
For \code{type = 'response'} (the default), the predicted classes (classification), predicted numeric values (regression), predicted probabilities (probability estimation) or survival probabilities (survival) are returned.
For \code{type = 'se'}, the standard error of the predictions are returned (regression only). The jackknife-after-bootstrap or infinitesimal jackknife for bagging is used to estimate the standard errors based on out-of-bag predictions. See Wager et al. (2014) for details.
For \code{type = 'terminalNodes'}, the IDs of the terminal node in each tree for each observation in the given dataset are returned.
For \code{type = 'quantiles'}, the selected quantiles for each observation are estimated. See Meinshausen (2006) for details.
If \code{type = 'se'} is selected, the method to estimate the variances can be chosen with \code{se.method}. Set \code{se.method = 'jack'} for jackknife-after-bootstrap and \code{se.method = 'infjack'} for the infinitesimal jackknife for bagging.
For classification and \code{predict.all = TRUE}, a factor levels are returned as numerics.
To retrieve the corresponding factor levels, use \code{rf$forest$levels}, if \code{rf} is the ranger object.
}
\references{
\itemize{
\item Wright, M. N. & Ziegler, A. (2017). ranger: A Fast Implementation of Random Forests for High Dimensional Data in C++ and R. J Stat Softw 77:1-17. \url{https://doi.org/10.18637/jss.v077.i01}.
\item Wager, S., Hastie T., & Efron, B. (2014). Confidence Intervals for Random Forests: The Jackknife and the Infinitesimal Jackknife. J Mach Learn Res 15:1625-1651. \url{http://jmlr.org/papers/v15/wager14a.html}.
\item Meinshausen (2006). Quantile Regression Forests. J Mach Learn Res 7:983-999. \url{http://www.jmlr.org/papers/v7/meinshausen06a.html}.
}
}
\seealso{
\code{\link{ranger}}
}
\author{
Marvin N. Wright
}
| /rangerts/man/predict.ranger.Rd | no_license | BenjaminGoehry/BlocRF | R | false | true | 4,389 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/predict.R
\name{predict.ranger}
\alias{predict.ranger}
\title{Ranger prediction}
\usage{
\method{predict}{ranger}(object, data = NULL, predict.all = FALSE,
num.trees = object$num.trees, type = "response",
se.method = "infjack", quantiles = c(0.1, 0.5, 0.9), seed = NULL,
num.threads = NULL, verbose = TRUE, ...)
}
\arguments{
\item{object}{Ranger \code{ranger} object.}
\item{data}{New test data of class \code{data.frame} or \code{gwaa.data} (GenABEL).}
\item{predict.all}{Return individual predictions for each tree instead of aggregated predictions for all trees. Return a matrix (sample x tree) for classification and regression, a 3d array for probability estimation (sample x class x tree) and survival (sample x time x tree).}
\item{num.trees}{Number of trees used for prediction. The first \code{num.trees} in the forest are used.}
\item{type}{Type of prediction. One of 'response', 'se', 'terminalNodes', 'quantiles' with default 'response'. See below for details.}
\item{se.method}{Method to compute standard errors. One of 'jack', 'infjack' with default 'infjack'. Only applicable if type = 'se'. See below for details.}
\item{quantiles}{Vector of quantiles for quantile prediction. Set \code{type = 'quantiles'} to use.}
\item{seed}{Random seed. Default is \code{NULL}, which generates the seed from \code{R}. Set to \code{0} to ignore the \code{R} seed. The seed is used in case of ties in classification mode.}
\item{num.threads}{Number of threads. Default is number of CPUs available.}
\item{verbose}{Verbose output on or off.}
\item{...}{further arguments passed to or from other methods.}
}
\value{
Object of class \code{ranger.prediction} with elements
\tabular{ll}{
\code{predictions} \tab Predicted classes/values (only for classification and regression) \cr
\code{unique.death.times} \tab Unique death times (only for survival). \cr
\code{chf} \tab Estimated cumulative hazard function for each sample (only for survival). \cr
\code{survival} \tab Estimated survival function for each sample (only for survival). \cr
\code{num.trees} \tab Number of trees. \cr
\code{num.independent.variables} \tab Number of independent variables. \cr
\code{treetype} \tab Type of forest/tree. Classification, regression or survival. \cr
\code{num.samples} \tab Number of samples.
}
}
\description{
Prediction with new data and a saved forest from Ranger.
}
\details{
For \code{type = 'response'} (the default), the predicted classes (classification), predicted numeric values (regression), predicted probabilities (probability estimation) or survival probabilities (survival) are returned.
For \code{type = 'se'}, the standard error of the predictions are returned (regression only). The jackknife-after-bootstrap or infinitesimal jackknife for bagging is used to estimate the standard errors based on out-of-bag predictions. See Wager et al. (2014) for details.
For \code{type = 'terminalNodes'}, the IDs of the terminal node in each tree for each observation in the given dataset are returned.
For \code{type = 'quantiles'}, the selected quantiles for each observation are estimated. See Meinshausen (2006) for details.
If \code{type = 'se'} is selected, the method to estimate the variances can be chosen with \code{se.method}. Set \code{se.method = 'jack'} for jackknife-after-bootstrap and \code{se.method = 'infjack'} for the infinitesimal jackknife for bagging.
For classification and \code{predict.all = TRUE}, a factor levels are returned as numerics.
To retrieve the corresponding factor levels, use \code{rf$forest$levels}, if \code{rf} is the ranger object.
}
\references{
\itemize{
\item Wright, M. N. & Ziegler, A. (2017). ranger: A Fast Implementation of Random Forests for High Dimensional Data in C++ and R. J Stat Softw 77:1-17. \url{https://doi.org/10.18637/jss.v077.i01}.
\item Wager, S., Hastie T., & Efron, B. (2014). Confidence Intervals for Random Forests: The Jackknife and the Infinitesimal Jackknife. J Mach Learn Res 15:1625-1651. \url{http://jmlr.org/papers/v15/wager14a.html}.
\item Meinshausen (2006). Quantile Regression Forests. J Mach Learn Res 7:983-999. \url{http://www.jmlr.org/papers/v7/meinshausen06a.html}.
}
}
\seealso{
\code{\link{ranger}}
}
\author{
Marvin N. Wright
}
|
\name{GRCdata-package}
\alias{GRCdata-package}
\alias{GRCdata}
\docType{package}
\title{
Parameter inference and optimal designs for grouped and/or right-censored count data
}
\description{
This package consists of two main functions:
The first function uses a given grouped and/or right-censored grouping scheme and empirical data to infer parameters, and implements chi-square goodness-of-fit tests;
The second function searches for the global optimal grouping scheme of grouped and/or right-censored count responses in surveys.
This R package is designed to implement methods and algorithms developed in the following papers and please cite these articles at your convenience:
Qiang Fu, Xin Guo and Kenneth C. Land. Forthcoming. "A Poisson-Multinomial Mixture Approach to Grouped and Right-Censored Counts." Communications in Statistics -- Theory and Methods.
DOI: 10.1080/03610926.2017.1303736
(mainly about the first function for aggregate-level parameter inference)
Qiang Fu, Xin Guo and Kenneth C. Land. Conditionally accepted. "Optimizing Count Responses in Surveys: A Machine-Learning Approach." Sociological Methods & Research.
(mainly about the second function for finding optimal grouping schemes)
To install the package "GRCdata_1.0.tar.gz", one may place this
file in the working directory/folder of R, and type
\code{install.packages("GRCdata", repos = NULL, type = "source")}
To check the current working directory of R, one may type
\code{getwd()}
To see the source code, one could extract the package ``GRCdata_1.0.tar.gz''.
There would be two directories/folders: \code{man} and \code{R}. The source code
is under the \code{R} directory/folder.
}
\details{
\tabular{ll}{
Package: \tab GRCdata\cr
Type: \tab Package\cr
Version: \tab 1.0\cr
Date: \tab July 28, 2017\cr
License: \tab GPLv3\cr
}
%~~ An overview of how to use the package, including the most important functions ~~
}
\author{
Authors: Xin Guo <x.guo@polyu.edu.hk>, Qiang Fu <qiang.fu@ubc.ca>
Maintainers: Xin Guo <x.guo@polyu.edu.hk>
}
\references{
Qiang Fu, Xin Guo and Kenneth C. Land. Conditionally accepted. "Optimizing Count Responses in Surveys: A Machine-Learning Approach." Sociological Methods & Research.
Qiang Fu, Xin Guo and Kenneth C. Land. Forthcoming. "A Poisson-Multinomial Mixture Approach to Grouped and Right-Censored Counts." Communications in Statistics -- Theory and Methods.
DOI: 10.1080/03610926.2017.1303736
}
| /man/GRCdata-package.Rd | no_license | cran/GRCdata | R | false | false | 2,448 | rd | \name{GRCdata-package}
\alias{GRCdata-package}
\alias{GRCdata}
\docType{package}
\title{
Parameter inference and optimal designs for grouped and/or right-censored count data
}
\description{
This package consists of two main functions:
The first function uses a given grouped and/or right-censored grouping scheme and empirical data to infer parameters, and implements chi-square goodness-of-fit tests;
The second function searches for the global optimal grouping scheme of grouped and/or right-censored count responses in surveys.
This R package is designed to implement methods and algorithms developed in the following papers and please cite these articles at your convenience:
Qiang Fu, Xin Guo and Kenneth C. Land. Forthcoming. "A Poisson-Multinomial Mixture Approach to Grouped and Right-Censored Counts." Communications in Statistics -- Theory and Methods.
DOI: 10.1080/03610926.2017.1303736
(mainly about the first function for aggregate-level parameter inference)
Qiang Fu, Xin Guo and Kenneth C. Land. Conditionally accepted. "Optimizing Count Responses in Surveys: A Machine-Learning Approach." Sociological Methods & Research.
(mainly about the second function for finding optimal grouping schemes)
To install the package "GRCdata_1.0.tar.gz", one may place this
file in the working directory/folder of R, and type
\code{install.packages("GRCdata", repos = NULL, type = "source")}
To check the current working directory of R, one may type
\code{getwd()}
To see the source code, one could extract the package ``GRCdata_1.0.tar.gz''.
There would be two directories/folders: \code{man} and \code{R}. The source code
is under the \code{R} directory/folder.
}
\details{
\tabular{ll}{
Package: \tab GRCdata\cr
Type: \tab Package\cr
Version: \tab 1.0\cr
Date: \tab July 28, 2017\cr
License: \tab GPLv3\cr
}
%~~ An overview of how to use the package, including the most important functions ~~
}
\author{
Authors: Xin Guo <x.guo@polyu.edu.hk>, Qiang Fu <qiang.fu@ubc.ca>
Maintainers: Xin Guo <x.guo@polyu.edu.hk>
}
\references{
Qiang Fu, Xin Guo and Kenneth C. Land. Conditionally accepted. "Optimizing Count Responses in Surveys: A Machine-Learning Approach." Sociological Methods & Research.
Qiang Fu, Xin Guo and Kenneth C. Land. Forthcoming. "A Poisson-Multinomial Mixture Approach to Grouped and Right-Censored Counts." Communications in Statistics -- Theory and Methods.
DOI: 10.1080/03610926.2017.1303736
}
|
################################ nc_na ################################
## substitute NA for NC
# we asked poeople to mark non-recorded variables with "NC", but this was a mistake.
nc_na <- function(x) {
if (is.character(x[1])) {
x[x == "NC" | x == "nc"] <- NA
}
return(x)
}
################################ col_char ################################
## coerce column to character, if it exists
col_char <- function(col, d) {
if (quo_name(col) %in% names(d)) {
d <- d %>%
mutate(!! quo_name(col) := as.character(!! col))
}
return(d)
}
################################ clean_names ################################
# clean column names uniformly
clean_names <- function(cnames) {
cnames <- tolower(cnames)
cnames <- str_replace_all(cnames, " ","_")
cnames <- str_replace_all(cnames, "__","_")
cnames <- str_replace_all(cnames, "optional_","")
}
################################ read_trial_file ################################
# reads trials files and cleans them up
read_trial_file <- function(fname) {
print(paste0("reading ", fname))
td <- read_multiformat_file(path = "processed_data/trials_cleaned/",
fname = fname)
originaltd_rows = nrow(td)
# do some cleanup on variable names that were mangled by data entry
names(td) <- tolower(names(td))
names(td) <- str_replace_all(names(td), " ","_")
# vars that should be numeric
numeric_vars <- intersect(c("trial_num","total_trial_time","looking_time"),
names(td))
all_vars <- intersect(c("lab", "subid", "trial_type", "stimulus", "trial_num", "looking_time",
"total_trial_time", "trial_error", "trial_error_type"),
names(td))
# get rid of missing columns
# NC was used for non-recorded data, but in practice this destroys numeric columns
td_clean <- td %>%
select(-starts_with("x")) %>%
mutate_all(as.character) %>%
mutate_all(nc_na) %>%
mutate_at(vars(numeric_vars), as.numeric) %>%
select(all_vars) %>%
mutate(file = fname,
lab = lab[1]) # some lab fields have gaps, for some reason
lost_cols <- names(td)[!(names(td) %in% names(td_clean)) & (names(td) != "notes")]
row_msg = validate_that(originaltd_rows == nrow(td_clean))
if (is.character(row_msg)) {
print(paste0('----->WARNING: DROPPED ROWS: ', originaltd_rows - nrow(td_clean)))
}
if (length(lost_cols) > 0) {
print('----->WARNING: DROPPED COLS: ')
print(lost_cols)
}
return(td_clean)
}
################################ read_multiformat_file ################################
# reads files in various formats
read_multiformat_file <- function(path, fname) {
full_path <- paste0(path,fname)
if (str_detect(fname, ".xlsx")) {
d <- read_xlsx(full_path)
} else if (str_detect(fname, ".xls")) {
d <- read_xls(full_path)
} else if (str_detect(fname, ".csv")) {
# https://stackoverflow.com/questions/33417242/how-to-check-if-csv-file-has-a-comma-or-a-semicolon-as-separator
L <- readLines(full_path, n = 1)
numfields <- count.fields(textConnection(L), sep = ";")
if (numfields == 1) {
d <- read_csv(full_path, col_types = cols(parentA_gender = col_character(), ParentA_gender = col_character()))
} else {
d <- read_csv2(full_path, col_types = cols(parentA_gender = col_character(), ParentA_gender = col_character()))
}
} else if (str_detect(fname, ".txt")) {
d <- read_tsv(full_path, col_types = cols(parentA_gender = col_character(), ParentA_gender = col_character()))
}
return(d)
}
################################ clean_participant_file ################################
# cleans up files - this one was painful
clean_participant_file <- function(fname) {
print(paste0("reading ", fname))
pd <- read_multiformat_file(path = "processed_data/participants_cleaned/",
fname = fname)
# mark the original row/col names
originalpd_rows = nrow(pd)
originalpd_cols = ncol(pd)
# do some cleanup on variable names that were mangled by data entry
names(pd) <- clean_names(names(pd))
# get rid of missing columns
# NC was used for non-recorded data, but in practice this destroys numeric columns
pd <- pd %>%
select(-starts_with("x")) %>%
mutate_all(as.character) %>%
mutate_all(nc_na)
# remove duplicated columns (yes, this is a thing)
pd <- pd[!duplicated(names(pd), fromLast = TRUE)]
# get col names
these_cols <- filter(participants_columns, column %in% names(pd))
# rename, select, and re-type these
# note, NA coercion will lose bad data here, CHECK THIS.
pd_clean <- pd %>%
rename_at(vars(these_cols$column[these_cols$status == "sub"]),
~ these_cols$substitution[these_cols$status == "sub"])
# get them again post-cleaning
these_cols <- filter(participants_columns,
column %in% names(pd_clean))
# report what columns you lost
lost_cols <- names(pd_clean)[!(names(pd_clean) %in%
participants_columns$column[
participants_columns$status == "include"])]
# select and clean the included columns
pd_clean %<>%
select_at(vars(these_cols$column[these_cols$status == "include"])) %>%
mutate_at(vars(these_cols$column[these_cols$type == "numeric" &
!is.na(these_cols$type)]), as.numeric) %>%
filter(!is.na(lab))
# throw a message if rows are being dropped
row_msg = validate_that(originalpd_rows == nrow(pd_clean))
if (is.character(row_msg)) {
print(paste0('----->WARNING: DROPPED ROWS: ', originalpd_rows - nrow(pd_clean)))
}
# throw a message if columns are being dropped
col_msg = validate_that(originalpd_cols == ncol(pd_clean))
if (is.character(col_msg)) {
print(paste0('----->WARNING: DROPPED COLS: ', originalpd_cols - ncol(pd_clean)))
}
if (length(lost_cols) > 0) {
print(lost_cols)
}
# add filename last so it doesn't mess up column checking
pd_clean %<>%
mutate(file = fname)
return(pd_clean)
}
################################ lang_exp_to_numeric ################################
## removes excess text from language exposure columns and converts to numeric
lang_exp_to_numeric <- function(my_column){
x <- as.numeric(str_replace_all(my_column, "%", "") %>%
str_replace_all("% in books", ""))
}
################################ exclude_by ################################
# excludes on a column and outputs the percentages for exclusion
# - flag return_pcts means you get a list with the data and other exclusion stats
# - flag action flips the column polarity
exclude_by <- function(d, col, setting = "all", action = "exclude",
return_pcts = FALSE, quiet = TRUE) {
# if this is an include-by variable, flip polarity
if (action == "include") {
d <- d %>%
mutate(!! quo_name(col) := ! (!! col))
}
if (!quiet) print(paste("filtering by", quo_name(col)))
percent_trial <- d %>%
ungroup %>%
summarise(trial_sum = sum(!! col, na.rm=TRUE),
trial_mean = mean(!! col, na.rm=TRUE))
percent_sub <- d %>%
group_by(lab, subid) %>%
summarise(any = any(!! col),
all = all(!! col)) %>%
ungroup %>%
summarise(any_sum = sum(any, na.rm=TRUE),
any_mean = mean(any, na.rm=TRUE),
all_sum = sum(all, na.rm=TRUE),
all_mean = mean(all, na.rm=TRUE))
if (!quiet) {
print(paste("This variable excludes", percent_trial$trial_sum, "trials, which is ",
round(percent_trial$trial_mean*100, digits = 1), "% of all trials."))
if (setting == "any") {
print(paste(percent_sub$any_sum, " subjects,",
round(percent_sub$any_mean*100, digits = 1),
"%, have any trials where", quo_name(col), "is true."))
} else if (setting == "all") {
print(paste(percent_sub$all_sum, " subjects,",
round(percent_sub$all_mean*100, digits = 1),
"%, have all trials where", quo_name(col), "is",
ifelse(action == "include", "true:", "false:"),
action))
}
}
if (action=="NA out") {
d <- mutate(d,
looking_time = ifelse(!! col, NA, looking_time))
} else {
d <- filter(d, !( !! col))
}
if (return_pcts) {
return(list(data = d,
percents = percent_sub,
percent_trials = percent_trial))
} else {
return(d)
}
} | /helper/preprocessing_helper.R | permissive | jkosie/mb1-analysis-public | R | false | false | 8,671 | r | ################################ nc_na ################################
## substitute NA for NC
# we asked poeople to mark non-recorded variables with "NC", but this was a mistake.
nc_na <- function(x) {
if (is.character(x[1])) {
x[x == "NC" | x == "nc"] <- NA
}
return(x)
}
################################ col_char ################################
## coerce column to character, if it exists
col_char <- function(col, d) {
if (quo_name(col) %in% names(d)) {
d <- d %>%
mutate(!! quo_name(col) := as.character(!! col))
}
return(d)
}
################################ clean_names ################################
# clean column names uniformly
clean_names <- function(cnames) {
cnames <- tolower(cnames)
cnames <- str_replace_all(cnames, " ","_")
cnames <- str_replace_all(cnames, "__","_")
cnames <- str_replace_all(cnames, "optional_","")
}
################################ read_trial_file ################################
# reads trials files and cleans them up
read_trial_file <- function(fname) {
print(paste0("reading ", fname))
td <- read_multiformat_file(path = "processed_data/trials_cleaned/",
fname = fname)
originaltd_rows = nrow(td)
# do some cleanup on variable names that were mangled by data entry
names(td) <- tolower(names(td))
names(td) <- str_replace_all(names(td), " ","_")
# vars that should be numeric
numeric_vars <- intersect(c("trial_num","total_trial_time","looking_time"),
names(td))
all_vars <- intersect(c("lab", "subid", "trial_type", "stimulus", "trial_num", "looking_time",
"total_trial_time", "trial_error", "trial_error_type"),
names(td))
# get rid of missing columns
# NC was used for non-recorded data, but in practice this destroys numeric columns
td_clean <- td %>%
select(-starts_with("x")) %>%
mutate_all(as.character) %>%
mutate_all(nc_na) %>%
mutate_at(vars(numeric_vars), as.numeric) %>%
select(all_vars) %>%
mutate(file = fname,
lab = lab[1]) # some lab fields have gaps, for some reason
lost_cols <- names(td)[!(names(td) %in% names(td_clean)) & (names(td) != "notes")]
row_msg = validate_that(originaltd_rows == nrow(td_clean))
if (is.character(row_msg)) {
print(paste0('----->WARNING: DROPPED ROWS: ', originaltd_rows - nrow(td_clean)))
}
if (length(lost_cols) > 0) {
print('----->WARNING: DROPPED COLS: ')
print(lost_cols)
}
return(td_clean)
}
################################ read_multiformat_file ################################
# reads files in various formats
read_multiformat_file <- function(path, fname) {
full_path <- paste0(path,fname)
if (str_detect(fname, ".xlsx")) {
d <- read_xlsx(full_path)
} else if (str_detect(fname, ".xls")) {
d <- read_xls(full_path)
} else if (str_detect(fname, ".csv")) {
# https://stackoverflow.com/questions/33417242/how-to-check-if-csv-file-has-a-comma-or-a-semicolon-as-separator
L <- readLines(full_path, n = 1)
numfields <- count.fields(textConnection(L), sep = ";")
if (numfields == 1) {
d <- read_csv(full_path, col_types = cols(parentA_gender = col_character(), ParentA_gender = col_character()))
} else {
d <- read_csv2(full_path, col_types = cols(parentA_gender = col_character(), ParentA_gender = col_character()))
}
} else if (str_detect(fname, ".txt")) {
d <- read_tsv(full_path, col_types = cols(parentA_gender = col_character(), ParentA_gender = col_character()))
}
return(d)
}
################################ clean_participant_file ################################
# cleans up files - this one was painful
clean_participant_file <- function(fname) {
print(paste0("reading ", fname))
pd <- read_multiformat_file(path = "processed_data/participants_cleaned/",
fname = fname)
# mark the original row/col names
originalpd_rows = nrow(pd)
originalpd_cols = ncol(pd)
# do some cleanup on variable names that were mangled by data entry
names(pd) <- clean_names(names(pd))
# get rid of missing columns
# NC was used for non-recorded data, but in practice this destroys numeric columns
pd <- pd %>%
select(-starts_with("x")) %>%
mutate_all(as.character) %>%
mutate_all(nc_na)
# remove duplicated columns (yes, this is a thing)
pd <- pd[!duplicated(names(pd), fromLast = TRUE)]
# get col names
these_cols <- filter(participants_columns, column %in% names(pd))
# rename, select, and re-type these
# note, NA coercion will lose bad data here, CHECK THIS.
pd_clean <- pd %>%
rename_at(vars(these_cols$column[these_cols$status == "sub"]),
~ these_cols$substitution[these_cols$status == "sub"])
# get them again post-cleaning
these_cols <- filter(participants_columns,
column %in% names(pd_clean))
# report what columns you lost
lost_cols <- names(pd_clean)[!(names(pd_clean) %in%
participants_columns$column[
participants_columns$status == "include"])]
# select and clean the included columns
pd_clean %<>%
select_at(vars(these_cols$column[these_cols$status == "include"])) %>%
mutate_at(vars(these_cols$column[these_cols$type == "numeric" &
!is.na(these_cols$type)]), as.numeric) %>%
filter(!is.na(lab))
# throw a message if rows are being dropped
row_msg = validate_that(originalpd_rows == nrow(pd_clean))
if (is.character(row_msg)) {
print(paste0('----->WARNING: DROPPED ROWS: ', originalpd_rows - nrow(pd_clean)))
}
# throw a message if columns are being dropped
col_msg = validate_that(originalpd_cols == ncol(pd_clean))
if (is.character(col_msg)) {
print(paste0('----->WARNING: DROPPED COLS: ', originalpd_cols - ncol(pd_clean)))
}
if (length(lost_cols) > 0) {
print(lost_cols)
}
# add filename last so it doesn't mess up column checking
pd_clean %<>%
mutate(file = fname)
return(pd_clean)
}
################################ lang_exp_to_numeric ################################
## removes excess text from language exposure columns and converts to numeric
lang_exp_to_numeric <- function(my_column){
x <- as.numeric(str_replace_all(my_column, "%", "") %>%
str_replace_all("% in books", ""))
}
################################ exclude_by ################################
# excludes on a column and outputs the percentages for exclusion
# - flag return_pcts means you get a list with the data and other exclusion stats
# - flag action flips the column polarity
exclude_by <- function(d, col, setting = "all", action = "exclude",
return_pcts = FALSE, quiet = TRUE) {
# if this is an include-by variable, flip polarity
if (action == "include") {
d <- d %>%
mutate(!! quo_name(col) := ! (!! col))
}
if (!quiet) print(paste("filtering by", quo_name(col)))
percent_trial <- d %>%
ungroup %>%
summarise(trial_sum = sum(!! col, na.rm=TRUE),
trial_mean = mean(!! col, na.rm=TRUE))
percent_sub <- d %>%
group_by(lab, subid) %>%
summarise(any = any(!! col),
all = all(!! col)) %>%
ungroup %>%
summarise(any_sum = sum(any, na.rm=TRUE),
any_mean = mean(any, na.rm=TRUE),
all_sum = sum(all, na.rm=TRUE),
all_mean = mean(all, na.rm=TRUE))
if (!quiet) {
print(paste("This variable excludes", percent_trial$trial_sum, "trials, which is ",
round(percent_trial$trial_mean*100, digits = 1), "% of all trials."))
if (setting == "any") {
print(paste(percent_sub$any_sum, " subjects,",
round(percent_sub$any_mean*100, digits = 1),
"%, have any trials where", quo_name(col), "is true."))
} else if (setting == "all") {
print(paste(percent_sub$all_sum, " subjects,",
round(percent_sub$all_mean*100, digits = 1),
"%, have all trials where", quo_name(col), "is",
ifelse(action == "include", "true:", "false:"),
action))
}
}
if (action=="NA out") {
d <- mutate(d,
looking_time = ifelse(!! col, NA, looking_time))
} else {
d <- filter(d, !( !! col))
}
if (return_pcts) {
return(list(data = d,
percents = percent_sub,
percent_trials = percent_trial))
} else {
return(d)
}
} |
# Module UI
#' @title mod_my_first_module_ui and mod_my_first_module_server
#' @description A shiny Module.
#'
#' @param id shiny id
#' @param input internal
#' @param output internal
#' @param session internal
#'
#' @rdname mod_my_first_module
#'
#' @keywords internal
#' @export
#' @importFrom shiny NS tagList
mod_my_first_module_ui <- function(id){
ns <- NS(id)
tagList(
col_6(
tags$div(
align = "center",
tags$img(
src = "www/guit.jpg", width = "50%", align = "center"
)
)
),
col_6(
tableOutput(ns("df"))
),
col_6(
tags$div(
align = "center",
tags$button("Alert!", onclick = "alertme();")
)
),
col_6(
tags$div(
align = "center",
actionButton(ns("go"), "Go!")
)
)
)
}
# Module Server
#' @rdname mod_my_first_module
#' @export
#' @keywords internal
mod_my_first_module_server <- function(input, output, session, r){
ns <- session$ns
output$df <- renderTable({
dataset
})
observeEvent( input$go , {
golem::invoke_js("alertarg", "12")
})
}
#' my_first_module UI Function
#'
#' @description A shiny Module.
#'
#' @param id,input,output,session Internal parameters for {shiny}.
#'
#' @noRd
#'
#' @importFrom shiny NS tagList
mod_my_first_module_ui <- function(id){
ns <- NS(id)
tagList(
)
}
#' my_first_module Server Function
#'
#' @noRd
mod_my_first_module_server <- function(input, output, session){
ns <- session$ns
}
## To be copied in the UI
# mod_my_first_module_ui("my_first_module_ui_1")
## To be copied in the server
# callModule(mod_my_first_module_server, "my_first_module_ui_1")
| /examples/10-golemexample-master/R/mod_my_first_module.R | permissive | Zen-Reportz/ZenPublish | R | false | false | 1,723 | r | # Module UI
#' @title mod_my_first_module_ui and mod_my_first_module_server
#' @description A shiny Module.
#'
#' @param id shiny id
#' @param input internal
#' @param output internal
#' @param session internal
#'
#' @rdname mod_my_first_module
#'
#' @keywords internal
#' @export
#' @importFrom shiny NS tagList
mod_my_first_module_ui <- function(id){
ns <- NS(id)
tagList(
col_6(
tags$div(
align = "center",
tags$img(
src = "www/guit.jpg", width = "50%", align = "center"
)
)
),
col_6(
tableOutput(ns("df"))
),
col_6(
tags$div(
align = "center",
tags$button("Alert!", onclick = "alertme();")
)
),
col_6(
tags$div(
align = "center",
actionButton(ns("go"), "Go!")
)
)
)
}
# Module Server
#' @rdname mod_my_first_module
#' @export
#' @keywords internal
mod_my_first_module_server <- function(input, output, session, r){
ns <- session$ns
output$df <- renderTable({
dataset
})
observeEvent( input$go , {
golem::invoke_js("alertarg", "12")
})
}
#' my_first_module UI Function
#'
#' @description A shiny Module.
#'
#' @param id,input,output,session Internal parameters for {shiny}.
#'
#' @noRd
#'
#' @importFrom shiny NS tagList
mod_my_first_module_ui <- function(id){
ns <- NS(id)
tagList(
)
}
#' my_first_module Server Function
#'
#' @noRd
mod_my_first_module_server <- function(input, output, session){
ns <- session$ns
}
## To be copied in the UI
# mod_my_first_module_ui("my_first_module_ui_1")
## To be copied in the server
# callModule(mod_my_first_module_server, "my_first_module_ui_1")
|
testlist <- list(x = c(1409286143L, 805306146L, -2130706433L, -1L, -160L, 439353343L, 570490879L, -1L, -1L, 450244607L, -14548779L, 16711973L, 30464L, -268435712L, -1L, -15007745L, -13631744L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), y = integer(0))
result <- do.call(diffrprojects:::dist_mat_absolute,testlist)
str(result) | /diffrprojects/inst/testfiles/dist_mat_absolute/libFuzzer_dist_mat_absolute/dist_mat_absolute_valgrind_files/1609963211-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 558 | r | testlist <- list(x = c(1409286143L, 805306146L, -2130706433L, -1L, -160L, 439353343L, 570490879L, -1L, -1L, 450244607L, -14548779L, 16711973L, 30464L, -268435712L, -1L, -15007745L, -13631744L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), y = integer(0))
result <- do.call(diffrprojects:::dist_mat_absolute,testlist)
str(result) |
testlist <- list(type = 0L, z = 4.46235150667355e-319)
result <- do.call(esreg::G1_fun,testlist)
str(result) | /esreg/inst/testfiles/G1_fun/libFuzzer_G1_fun/G1_fun_valgrind_files/1609892916-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 108 | r | testlist <- list(type = 0L, z = 4.46235150667355e-319)
result <- do.call(esreg::G1_fun,testlist)
str(result) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rm.R
\name{step_rm}
\alias{step_rm}
\title{General Variable Filter}
\usage{
step_rm(
recipe,
...,
role = NA,
trained = FALSE,
removals = NULL,
skip = FALSE,
id = rand_id("rm")
)
}
\arguments{
\item{recipe}{A recipe object. The step will be added to the
sequence of operations for this recipe.}
\item{...}{One or more selector functions to choose variables
for this step. See \code{\link[=selections]{selections()}} for more details.}
\item{role}{Not used by this step since no new variables are
created.}
\item{trained}{A logical to indicate if the quantities for
preprocessing have been estimated.}
\item{removals}{A character string that contains the names of
columns that should be removed. These values are not determined
until \code{\link[=prep]{prep()}} is called.}
\item{skip}{A logical. Should the step be skipped when the
recipe is baked by \code{\link[=bake]{bake()}}? While all operations are baked
when \code{\link[=prep]{prep()}} is run, some operations may not be able to be
conducted on new data (e.g. processing the outcome variable(s)).
Care should be taken when using \code{skip = TRUE} as it may affect
the computations for subsequent operations.}
\item{id}{A character string that is unique to this step to identify it.}
}
\value{
An updated version of \code{recipe} with the new step added to the
sequence of any existing operations.
}
\description{
\code{step_rm} creates a \emph{specification} of a recipe step
that will remove variables based on their name, type, or role.
}
\details{
This step can potentially remove columns from the data set. This may
cause issues for subsequent steps in your recipe if the missing columns are
specifically referenced by name. To avoid this, see the advice in the
\emph{Tips for saving recipes and filtering columns} section of \link{selections}.
}
\section{Tidying}{
When you \code{\link[=tidy.recipe]{tidy()}} this step, a tibble with column
\code{terms} (the columns that will be removed) is returned.
}
\section{Case weights}{
The underlying operation does not allow for case weights.
}
\examples{
\dontshow{if (rlang::is_installed("modeldata")) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf}
data(biomass, package = "modeldata")
biomass_tr <- biomass[biomass$dataset == "Training", ]
biomass_te <- biomass[biomass$dataset == "Testing", ]
rec <- recipe(
HHV ~ carbon + hydrogen + oxygen + nitrogen + sulfur,
data = biomass_tr
)
library(dplyr)
smaller_set <- rec \%>\%
step_rm(contains("gen"))
smaller_set <- prep(smaller_set, training = biomass_tr)
filtered_te <- bake(smaller_set, biomass_te)
filtered_te
tidy(smaller_set, number = 1)
\dontshow{\}) # examplesIf}
}
\seealso{
Other variable filter steps:
\code{\link{step_corr}()},
\code{\link{step_filter_missing}()},
\code{\link{step_lincomb}()},
\code{\link{step_nzv}()},
\code{\link{step_select}()},
\code{\link{step_zv}()}
}
\concept{variable filter steps}
| /man/step_rm.Rd | permissive | DavisVaughan/recipes | R | false | true | 3,016 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rm.R
\name{step_rm}
\alias{step_rm}
\title{General Variable Filter}
\usage{
step_rm(
recipe,
...,
role = NA,
trained = FALSE,
removals = NULL,
skip = FALSE,
id = rand_id("rm")
)
}
\arguments{
\item{recipe}{A recipe object. The step will be added to the
sequence of operations for this recipe.}
\item{...}{One or more selector functions to choose variables
for this step. See \code{\link[=selections]{selections()}} for more details.}
\item{role}{Not used by this step since no new variables are
created.}
\item{trained}{A logical to indicate if the quantities for
preprocessing have been estimated.}
\item{removals}{A character string that contains the names of
columns that should be removed. These values are not determined
until \code{\link[=prep]{prep()}} is called.}
\item{skip}{A logical. Should the step be skipped when the
recipe is baked by \code{\link[=bake]{bake()}}? While all operations are baked
when \code{\link[=prep]{prep()}} is run, some operations may not be able to be
conducted on new data (e.g. processing the outcome variable(s)).
Care should be taken when using \code{skip = TRUE} as it may affect
the computations for subsequent operations.}
\item{id}{A character string that is unique to this step to identify it.}
}
\value{
An updated version of \code{recipe} with the new step added to the
sequence of any existing operations.
}
\description{
\code{step_rm} creates a \emph{specification} of a recipe step
that will remove variables based on their name, type, or role.
}
\details{
This step can potentially remove columns from the data set. This may
cause issues for subsequent steps in your recipe if the missing columns are
specifically referenced by name. To avoid this, see the advice in the
\emph{Tips for saving recipes and filtering columns} section of \link{selections}.
}
\section{Tidying}{
When you \code{\link[=tidy.recipe]{tidy()}} this step, a tibble with column
\code{terms} (the columns that will be removed) is returned.
}
\section{Case weights}{
The underlying operation does not allow for case weights.
}
\examples{
\dontshow{if (rlang::is_installed("modeldata")) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf}
data(biomass, package = "modeldata")
biomass_tr <- biomass[biomass$dataset == "Training", ]
biomass_te <- biomass[biomass$dataset == "Testing", ]
rec <- recipe(
HHV ~ carbon + hydrogen + oxygen + nitrogen + sulfur,
data = biomass_tr
)
library(dplyr)
smaller_set <- rec \%>\%
step_rm(contains("gen"))
smaller_set <- prep(smaller_set, training = biomass_tr)
filtered_te <- bake(smaller_set, biomass_te)
filtered_te
tidy(smaller_set, number = 1)
\dontshow{\}) # examplesIf}
}
\seealso{
Other variable filter steps:
\code{\link{step_corr}()},
\code{\link{step_filter_missing}()},
\code{\link{step_lincomb}()},
\code{\link{step_nzv}()},
\code{\link{step_select}()},
\code{\link{step_zv}()}
}
\concept{variable filter steps}
|
### =========================================================================
### FilterRules objects
### -------------------------------------------------------------------------
setClassUnion("expression_OR_function", c("expression", "function"))
setClass("FilterRules", representation(active = "logical"),
prototype(elementType = "expression_OR_function"),
contains = "SimpleList")
setMethod("parallelSlotNames", "FilterRules",
function(x) c("active", callNextMethod()))
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Accessors.
###
setGeneric("active", function(x) standardGeneric("active"))
setMethod("active", "FilterRules", function(x) {
a <- x@active
names(a) <- names(x)
a
})
setGeneric("active<-", signature="x",
function(x, value) standardGeneric("active<-")
)
setReplaceMethod("active", "FilterRules", function(x, value) {
if (is.numeric(value)) {
value <- as.integer(value)[!is.na(value)]
if (any(value < 1) || any(value > length(x)))
stop("filter index out of range")
value <- names(x)[value]
}
if (is.character(value)) {
value <- value[!is.na(value)] ## NA's are dropped
filterNames <- names(x)
if (length(filterNames) == 0)
stop("there are no filter names")
if (any(!(value %in% filterNames)))
stop("'value' contains invalid filter names")
x@active <- filterNames %in% value
x
} else if (is.logical(value)) {
nfilters <- length(x)
if (length(value) > nfilters)
stop("length of 'value' must not be greater than that of 'filters'")
if (anyMissing(value))
stop("'value' cannot contain NA's")
if (nfilters && (nfilters %% length(value) != 0))
stop("number of filters not a multiple of 'value' length")
x@active <- rep(value, length.out = nfilters)
x
} else stop("unsupported type of 'value'")
})
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Constructor.
###
FilterRules.parseRule <- function(expr) {
if (is.character(expr)) {
expr <- try(parse(text = expr, srcfile = NULL), silent = TRUE)
if (is.character(expr))
stop("failed to parse filter expression: ", expr)
expr
} else if (is.language(expr) || is.logical(expr))
as.expression(expr)
else if (is.function(expr))
as(expr, "FilterClosure")
else stop("would not evaluate to logical: ", expr)
}
## takes logical expressions, character vectors, or functions to parse
FilterRules <- function(exprs = list(), ..., active = TRUE) {
exprs <- c(exprs, as.list(substitute(list(...)))[-1L])
if (length(names(exprs)) == 0) {
funs <- as.logical(sapply(exprs, is.function))
nonfuns <- exprs[!funs]
names(nonfuns) <- unlist(lapply(nonfuns, deparse))
chars <- as.logical(sapply(nonfuns, is.character))
names(nonfuns)[chars] <- unlist(nonfuns[chars])
names(exprs)[!funs] <- names(nonfuns)
}
exprs <- lapply(exprs, FilterRules.parseRule)
active <- rep(active, length.out = length(exprs))
if (!is.logical(active) || anyMissing(active))
stop("'active' must be logical without any missing values")
if (length(active) > length(exprs))
stop("length of 'active' is greater than number of rules")
if (length(exprs) && length(exprs) %% length(active) > 0)
stop("number of rules must be a multiple of length of 'active'")
ans <- new_SimpleList_from_list("FilterRules", exprs,
active = active)
validObject(ans)
ans
}
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Coercion.
###
setAs("ANY", "FilterRules", function(from) FilterRules(from))
### Coercion from SimpleList to FilterRules works out-of-the-box but silently
### returns a broken object! The problem is that this coercion is performed by
### one of the dummy coercion methods that are automatically defined by the
### methods package and that often do the wrong thing (like here). Furthermore,
### they don't bother to validate the object they return. So we overwrite it.
setAs("SimpleList", "FilterRules", function(from) FilterRules(from))
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Subsetting.
###
setReplaceMethod("[[", "FilterRules",
function(x, i, j, ..., value)
{
if (!missing(j) || length(list(...)) > 0)
warning("arguments beyond 'i' ignored")
if (missing(i))
stop("subscript is missing")
rule <- FilterRules.parseRule(value)
x <- callNextMethod(x, i, value = rule)
if (is.numeric(i) && is.character(value))
names(x)[i] <- value
active <- x@active ## in case we expanded
names(active) <- names(x)[seq_along(active)]
active[[i]] <- TRUE
names(active) <- NULL
x@active <- active
x
})
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Combination
###
setMethod("&", c("FilterRules", "FilterRules"), function(e1, e2) {
c(e1, e2)
})
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Validity.
###
.valid.FilterRules.active <- function(x) {
if (length(active(x)) != length(x))
"length of 'active' must match length of 'filters'"
else if (!identical(names(active(x)), names(x)))
"names of 'active' must match those of 'filters'"
else if (anyMissing(active(x)))
"'active' cannot contain NA's"
else NULL
}
.valid.FilterRules.rules <- function(x) {
unlist(lapply(x, function(rule) {
if (is.function(rule) && length(formals(rule)) < 1)
"function rule must take at least one parameter"
else NULL
}))
}
.valid.FilterRules <- function(x)
c(.valid.FilterRules.active(x), .valid.FilterRules.rules(x))
setValidity2("FilterRules", .valid.FilterRules)
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Evaluating
###
setMethod("eval", signature(expr="FilterRules", envir="ANY"),
function(expr, envir = parent.frame(),
enclos = if(is.list(envir) || is.pairlist(envir))
parent.frame() else baseenv())
{
result <- rep.int(TRUE, NROW(envir))
rules <- as.list(expr)[active(expr)]
for (i in seq_along(rules)) {
rule <- rules[[i]]
val <- tryCatch(if (is.expression(rule))
eval(rule, envir, enclos)
else rule(envir),
error = function(e) {
stop("Filter '", names(rules)[i], "' failed: ",
e$message)
})
if (is(val, "Rle"))
val <- as.vector(val)
if (!is.logical(val))
stop("filter rule evaluated to non-logical: ",
names(rules)[i])
if ((NROW(envir) == 0L && length(val) > 0L) ||
(NROW(envir) > 0L && length(val) == 0L) ||
(NROW(envir) > 0L &&
(max(NROW(envir), length(val)) %%
min(NROW(envir), length(val)) != 0)))
stop("filter rule evaluated to inconsistent length: ",
names(rule)[i])
if (anyNA(val)) {
val[is.na(val)] <- FALSE
}
if (length(rules) > 1L)
envir <- extractROWS(envir, val)
result[result] <- val
}
result
})
setGeneric("evalSeparately",
function(expr, envir = parent.frame(),
enclos = if (is.list(envir) ||
is.pairlist(envir)) parent.frame() else baseenv(),
...)
standardGeneric("evalSeparately"))
setMethod("evalSeparately", "FilterRules",
function(expr, envir = parent.frame(),
enclos = if (is.list(envir) ||
is.pairlist(envir)) parent.frame() else baseenv(),
serial = FALSE)
{
if (!isTRUEorFALSE(serial))
stop("'serial' must be TRUE or FALSE")
inds <- seq_len(length(expr))
names(inds) <- names(expr)
passed <- rep.int(TRUE, NROW(envir))
m <- do.call(cbind, lapply(inds, function(i) {
result <- eval(expr[i], envir = envir, enclos = enclos)
if (serial) {
envir <<- subset(envir, .(result))
passed[passed] <<- result
passed
} else result
}))
FilterMatrix(matrix = m, filterRules = expr)
})
setGeneric("subsetByFilter",
function(x, filter, ...) standardGeneric("subsetByFilter"))
setMethod("subsetByFilter", c("ANY", "FilterRules"), function(x, filter) {
extractROWS(x, eval(filter, x))
})
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Summary
###
setMethod("summary", "FilterRules",
function(object, subject, serial = FALSE, discarded = FALSE,
percent = FALSE)
{
if (!isTRUEorFALSE(serial))
stop("'serial' must be TRUE or FALSE")
mat <- evalSeparately(object, subject, serial = serial)
summary(mat, discarded = discarded, percent = percent)
})
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### FilterRule closures
###
setClass("FilterClosure", contains = "function")
setClass("GenericFilterClosure", contains = "FilterClosure")
setClass("StandardGenericFilterClosure",
contains = c("GenericFilterClosure", "standardGeneric"))
setAs("standardGeneric", "FilterClosure", function(from) {
new("StandardGenericFilterClosure", from)
})
setAs("function", "FilterClosure", function(from) {
new("FilterClosure", from)
})
setGeneric("params", function(x, ...) standardGeneric("params"))
setMethod("params", "FilterClosure",
function(x) {
as.list(environment(x))
})
setMethod("show", "FilterClosure", function(object) {
p <- params(object)
cat("filter (",
paste(names(p), "=", sapply(p, deparse, control = NULL),
collapse = ", "),
")\n", sep = "")
print(body(object))
})
### -------------------------------------------------------------------------
### FilterResults: coordinates results from multiple filters
###
setClass("FilterResults",
representation(filterRules = "FilterRules"))
.valid.FilterMatrix <- function(object)
{
c(if (!is.logical(object))
"values must be logical",
if (!is.null(names(filterRules)))
"filterRules must not be named",
if (length(object@filterRules) != ncol(object))
"length(filterRules) must equal ncol(object)")
}
setClass("FilterMatrix",
contains = c("matrix", "FilterResults"),
validity = .valid.FilterMatrix)
setGeneric("filterRules", function(x, ...) standardGeneric("filterRules"))
setMethod("filterRules", "FilterResults", function(x) {
setNames(x@filterRules, colnames(x))
})
setMethod("[", "FilterMatrix", function(x, i, j, ..., drop = TRUE) {
if (!missing(i))
i <- as.vector(i)
if (!missing(j))
j <- as.vector(j)
ans <- callNextMethod()
if (is.matrix(ans)) {
filterRules <- filterRules(x)
if (!missing(j))
filterRules <- filterRules[j]
ans <- FilterMatrix(matrix = ans, filterRules = filterRules)
}
ans
})
setMethod("rbind", "FilterMatrix", function(..., deparse.level = 1) {
args <- list(...)
ans <- do.call(rbind, lapply(args, as, "matrix"))
rulesList <- lapply(args, filterRules)
if (any(!sapply(rulesList, identical, rulesList[[1]])))
stop("cannot rbind filter matrices with non-identical rule sets")
FilterMatrix(matrix = ans, filterRules = rulesList[[1]])
})
setMethod("cbind", "FilterMatrix", function(..., deparse.level = 1) {
args <- list(...)
ans <- do.call(cbind, lapply(args, as, "matrix"))
rules <- do.call(c, lapply(args, function(x) x@filterRules))
FilterMatrix(matrix = ans, filterRules = rules)
})
FilterMatrix <- function(matrix, filterRules) {
stopifnot(ncol(matrix) == length(filterRules))
if (is.null(colnames(matrix)))
colnames(matrix) <- names(filterRules)
else if (!is.null(names(filterRules)) &&
!identical(names(filterRules), colnames(matrix)))
stop("if names(filterRules) and colnames(matrix) are both not NULL,",
" the names must match")
names(filterRules) <- NULL
new("FilterMatrix", matrix, filterRules = filterRules)
}
setMethod("show", "FilterMatrix", function(object) {
cat(class(object), " (", nrow(object), " x ", ncol(object), ")\n", sep = "")
mat <- makePrettyMatrixForCompactPrinting(object, function(x) x@.Data)
print(mat, quote = FALSE, right = TRUE)
})
setMethod("summary", "FilterResults",
function(object, discarded = FALSE, percent = FALSE)
{
if (!isTRUEorFALSE(discarded))
stop("'discarded' must be TRUE or FALSE")
if (!isTRUEorFALSE(percent))
stop("'percent' must be TRUE or FALSE")
counts <- c("<initial>" = nrow(object), colSums(object),
"<final>" = sum(rowSums(object) == ncol(object)))
if (discarded) {
counts <- nrow(object) - counts
}
if (percent) {
round(counts / nrow(object), 3)
} else counts
})
| /R/FilterRules-class.R | no_license | EugOT/S4Vectors | R | false | false | 13,761 | r | ### =========================================================================
### FilterRules objects
### -------------------------------------------------------------------------
setClassUnion("expression_OR_function", c("expression", "function"))
setClass("FilterRules", representation(active = "logical"),
prototype(elementType = "expression_OR_function"),
contains = "SimpleList")
setMethod("parallelSlotNames", "FilterRules",
function(x) c("active", callNextMethod()))
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Accessors.
###
setGeneric("active", function(x) standardGeneric("active"))
setMethod("active", "FilterRules", function(x) {
a <- x@active
names(a) <- names(x)
a
})
setGeneric("active<-", signature="x",
function(x, value) standardGeneric("active<-")
)
setReplaceMethod("active", "FilterRules", function(x, value) {
if (is.numeric(value)) {
value <- as.integer(value)[!is.na(value)]
if (any(value < 1) || any(value > length(x)))
stop("filter index out of range")
value <- names(x)[value]
}
if (is.character(value)) {
value <- value[!is.na(value)] ## NA's are dropped
filterNames <- names(x)
if (length(filterNames) == 0)
stop("there are no filter names")
if (any(!(value %in% filterNames)))
stop("'value' contains invalid filter names")
x@active <- filterNames %in% value
x
} else if (is.logical(value)) {
nfilters <- length(x)
if (length(value) > nfilters)
stop("length of 'value' must not be greater than that of 'filters'")
if (anyMissing(value))
stop("'value' cannot contain NA's")
if (nfilters && (nfilters %% length(value) != 0))
stop("number of filters not a multiple of 'value' length")
x@active <- rep(value, length.out = nfilters)
x
} else stop("unsupported type of 'value'")
})
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Constructor.
###
FilterRules.parseRule <- function(expr) {
if (is.character(expr)) {
expr <- try(parse(text = expr, srcfile = NULL), silent = TRUE)
if (is.character(expr))
stop("failed to parse filter expression: ", expr)
expr
} else if (is.language(expr) || is.logical(expr))
as.expression(expr)
else if (is.function(expr))
as(expr, "FilterClosure")
else stop("would not evaluate to logical: ", expr)
}
## takes logical expressions, character vectors, or functions to parse
FilterRules <- function(exprs = list(), ..., active = TRUE) {
exprs <- c(exprs, as.list(substitute(list(...)))[-1L])
if (length(names(exprs)) == 0) {
funs <- as.logical(sapply(exprs, is.function))
nonfuns <- exprs[!funs]
names(nonfuns) <- unlist(lapply(nonfuns, deparse))
chars <- as.logical(sapply(nonfuns, is.character))
names(nonfuns)[chars] <- unlist(nonfuns[chars])
names(exprs)[!funs] <- names(nonfuns)
}
exprs <- lapply(exprs, FilterRules.parseRule)
active <- rep(active, length.out = length(exprs))
if (!is.logical(active) || anyMissing(active))
stop("'active' must be logical without any missing values")
if (length(active) > length(exprs))
stop("length of 'active' is greater than number of rules")
if (length(exprs) && length(exprs) %% length(active) > 0)
stop("number of rules must be a multiple of length of 'active'")
ans <- new_SimpleList_from_list("FilterRules", exprs,
active = active)
validObject(ans)
ans
}
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Coercion.
###
setAs("ANY", "FilterRules", function(from) FilterRules(from))
### Coercion from SimpleList to FilterRules works out-of-the-box but silently
### returns a broken object! The problem is that this coercion is performed by
### one of the dummy coercion methods that are automatically defined by the
### methods package and that often do the wrong thing (like here). Furthermore,
### they don't bother to validate the object they return. So we overwrite it.
setAs("SimpleList", "FilterRules", function(from) FilterRules(from))
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Subsetting.
###
setReplaceMethod("[[", "FilterRules",
function(x, i, j, ..., value)
{
if (!missing(j) || length(list(...)) > 0)
warning("arguments beyond 'i' ignored")
if (missing(i))
stop("subscript is missing")
rule <- FilterRules.parseRule(value)
x <- callNextMethod(x, i, value = rule)
if (is.numeric(i) && is.character(value))
names(x)[i] <- value
active <- x@active ## in case we expanded
names(active) <- names(x)[seq_along(active)]
active[[i]] <- TRUE
names(active) <- NULL
x@active <- active
x
})
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Combination
###
setMethod("&", c("FilterRules", "FilterRules"), function(e1, e2) {
c(e1, e2)
})
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Validity.
###
.valid.FilterRules.active <- function(x) {
if (length(active(x)) != length(x))
"length of 'active' must match length of 'filters'"
else if (!identical(names(active(x)), names(x)))
"names of 'active' must match those of 'filters'"
else if (anyMissing(active(x)))
"'active' cannot contain NA's"
else NULL
}
.valid.FilterRules.rules <- function(x) {
unlist(lapply(x, function(rule) {
if (is.function(rule) && length(formals(rule)) < 1)
"function rule must take at least one parameter"
else NULL
}))
}
.valid.FilterRules <- function(x)
c(.valid.FilterRules.active(x), .valid.FilterRules.rules(x))
setValidity2("FilterRules", .valid.FilterRules)
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Evaluating
###
setMethod("eval", signature(expr="FilterRules", envir="ANY"),
function(expr, envir = parent.frame(),
enclos = if(is.list(envir) || is.pairlist(envir))
parent.frame() else baseenv())
{
result <- rep.int(TRUE, NROW(envir))
rules <- as.list(expr)[active(expr)]
for (i in seq_along(rules)) {
rule <- rules[[i]]
val <- tryCatch(if (is.expression(rule))
eval(rule, envir, enclos)
else rule(envir),
error = function(e) {
stop("Filter '", names(rules)[i], "' failed: ",
e$message)
})
if (is(val, "Rle"))
val <- as.vector(val)
if (!is.logical(val))
stop("filter rule evaluated to non-logical: ",
names(rules)[i])
if ((NROW(envir) == 0L && length(val) > 0L) ||
(NROW(envir) > 0L && length(val) == 0L) ||
(NROW(envir) > 0L &&
(max(NROW(envir), length(val)) %%
min(NROW(envir), length(val)) != 0)))
stop("filter rule evaluated to inconsistent length: ",
names(rule)[i])
if (anyNA(val)) {
val[is.na(val)] <- FALSE
}
if (length(rules) > 1L)
envir <- extractROWS(envir, val)
result[result] <- val
}
result
})
setGeneric("evalSeparately",
function(expr, envir = parent.frame(),
enclos = if (is.list(envir) ||
is.pairlist(envir)) parent.frame() else baseenv(),
...)
standardGeneric("evalSeparately"))
setMethod("evalSeparately", "FilterRules",
function(expr, envir = parent.frame(),
enclos = if (is.list(envir) ||
is.pairlist(envir)) parent.frame() else baseenv(),
serial = FALSE)
{
if (!isTRUEorFALSE(serial))
stop("'serial' must be TRUE or FALSE")
inds <- seq_len(length(expr))
names(inds) <- names(expr)
passed <- rep.int(TRUE, NROW(envir))
m <- do.call(cbind, lapply(inds, function(i) {
result <- eval(expr[i], envir = envir, enclos = enclos)
if (serial) {
envir <<- subset(envir, .(result))
passed[passed] <<- result
passed
} else result
}))
FilterMatrix(matrix = m, filterRules = expr)
})
setGeneric("subsetByFilter",
function(x, filter, ...) standardGeneric("subsetByFilter"))
setMethod("subsetByFilter", c("ANY", "FilterRules"), function(x, filter) {
extractROWS(x, eval(filter, x))
})
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### Summary
###
setMethod("summary", "FilterRules",
function(object, subject, serial = FALSE, discarded = FALSE,
percent = FALSE)
{
if (!isTRUEorFALSE(serial))
stop("'serial' must be TRUE or FALSE")
mat <- evalSeparately(object, subject, serial = serial)
summary(mat, discarded = discarded, percent = percent)
})
### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
### FilterRule closures
###
setClass("FilterClosure", contains = "function")
setClass("GenericFilterClosure", contains = "FilterClosure")
setClass("StandardGenericFilterClosure",
contains = c("GenericFilterClosure", "standardGeneric"))
setAs("standardGeneric", "FilterClosure", function(from) {
new("StandardGenericFilterClosure", from)
})
setAs("function", "FilterClosure", function(from) {
new("FilterClosure", from)
})
setGeneric("params", function(x, ...) standardGeneric("params"))
setMethod("params", "FilterClosure",
function(x) {
as.list(environment(x))
})
setMethod("show", "FilterClosure", function(object) {
p <- params(object)
cat("filter (",
paste(names(p), "=", sapply(p, deparse, control = NULL),
collapse = ", "),
")\n", sep = "")
print(body(object))
})
### -------------------------------------------------------------------------
### FilterResults: coordinates results from multiple filters
###
setClass("FilterResults",
representation(filterRules = "FilterRules"))
.valid.FilterMatrix <- function(object)
{
c(if (!is.logical(object))
"values must be logical",
if (!is.null(names(filterRules)))
"filterRules must not be named",
if (length(object@filterRules) != ncol(object))
"length(filterRules) must equal ncol(object)")
}
setClass("FilterMatrix",
contains = c("matrix", "FilterResults"),
validity = .valid.FilterMatrix)
setGeneric("filterRules", function(x, ...) standardGeneric("filterRules"))
setMethod("filterRules", "FilterResults", function(x) {
setNames(x@filterRules, colnames(x))
})
setMethod("[", "FilterMatrix", function(x, i, j, ..., drop = TRUE) {
if (!missing(i))
i <- as.vector(i)
if (!missing(j))
j <- as.vector(j)
ans <- callNextMethod()
if (is.matrix(ans)) {
filterRules <- filterRules(x)
if (!missing(j))
filterRules <- filterRules[j]
ans <- FilterMatrix(matrix = ans, filterRules = filterRules)
}
ans
})
setMethod("rbind", "FilterMatrix", function(..., deparse.level = 1) {
args <- list(...)
ans <- do.call(rbind, lapply(args, as, "matrix"))
rulesList <- lapply(args, filterRules)
if (any(!sapply(rulesList, identical, rulesList[[1]])))
stop("cannot rbind filter matrices with non-identical rule sets")
FilterMatrix(matrix = ans, filterRules = rulesList[[1]])
})
setMethod("cbind", "FilterMatrix", function(..., deparse.level = 1) {
args <- list(...)
ans <- do.call(cbind, lapply(args, as, "matrix"))
rules <- do.call(c, lapply(args, function(x) x@filterRules))
FilterMatrix(matrix = ans, filterRules = rules)
})
FilterMatrix <- function(matrix, filterRules) {
stopifnot(ncol(matrix) == length(filterRules))
if (is.null(colnames(matrix)))
colnames(matrix) <- names(filterRules)
else if (!is.null(names(filterRules)) &&
!identical(names(filterRules), colnames(matrix)))
stop("if names(filterRules) and colnames(matrix) are both not NULL,",
" the names must match")
names(filterRules) <- NULL
new("FilterMatrix", matrix, filterRules = filterRules)
}
setMethod("show", "FilterMatrix", function(object) {
cat(class(object), " (", nrow(object), " x ", ncol(object), ")\n", sep = "")
mat <- makePrettyMatrixForCompactPrinting(object, function(x) x@.Data)
print(mat, quote = FALSE, right = TRUE)
})
setMethod("summary", "FilterResults",
function(object, discarded = FALSE, percent = FALSE)
{
if (!isTRUEorFALSE(discarded))
stop("'discarded' must be TRUE or FALSE")
if (!isTRUEorFALSE(percent))
stop("'percent' must be TRUE or FALSE")
counts <- c("<initial>" = nrow(object), colSums(object),
"<final>" = sum(rowSums(object) == ncol(object)))
if (discarded) {
counts <- nrow(object) - counts
}
if (percent) {
round(counts / nrow(object), 3)
} else counts
})
|
main_cols=c("USER_ID","YOB","Gender","Income","HouseholdStatus","EducationLevel","Party")
TrainImp = read.csv("pollTrain_imputed.csv")
table(TrainImp$YOB)
otrain1 = read.csv("train2016.csv")
nrow(otrain1)
trlab = subset(otrain1$Party,!otrain1$YOB %in% c("2011","2013","2039","1880","1881","1896"))
length(trlab)
TrainImp$Party = ifelse(TrainImp$Party=="Democrat",1,0)
table(TrainImp$Party)
t=c()
t
for (col in setdiff(names(TrainImp),c("USER_ID","Party"))){
#if mean(is.na(pollTrain[,col])
cat(col,"cor:",cor(as.numeric(TrainImp[,col]),as.numeric(TrainImp$Party)),"\n")
val = cor(as.numeric(TrainImp[,col]),as.numeric(TrainImp$Party))
t = c(t,c(col=val))
#print(paste("The year is", year))
}
names(t) = setdiff(names(TrainImp),c("USER_ID","Party"))
t1=which(abs(t)>0.09)
names(t1)
TrainImp$Party = as.factor(TrainImp$Party)
library(caTools)
split = sample.split(TrainImp$Party, SplitRatio = 0.75)
Train = subset(TrainImp, split == TRUE)
Test = subset(TrainImp, split == FALSE)
library(rpart)
library(e1071)
library(randomForest)
rfModel1 = randomForest(Party~ Gender+Q115611+Q113181+Q109244+Q98197, data = Train)#, ntree = 1000, sampsize = 300)
predTrain = predict(rfModel1)
t = table(Train$Party,predTrain)
t
sum(diag(t))/sum(t)
predTest = predict(rfModel1,newdata = Test)
t = table(Test$Party,predTest)
t
sum(diag(t))/sum(t)
LogModel = glm(Party~YOB+Gender+HouseholdStatus+EducationLevel+Q98197+Q98078+
Q98869+Q99480+Q100689+Q101163+Q105840+Q107869+Q109244, data = Train, family=binomial)
summary(LogModel)
predLogTrain = predict(LogModel,type = "response")
t = table(Train$Party,predLogTrain>0.5)
t
sum(diag(t))/sum(t)
predLogTest = predict(LogModel,newdata = Test,type="response")
t = table(Test$Party,predLogTest>0.5)
t
sum(diag(t))/sum(t)
Tree1 = rpart(Party~YOB+Gender+Income+HouseholdStatus+EducationLevel+Q122771+Q121699+
Q120379+Q120472+Q119851+Q116881+Q115611+Q115899+Q113181+Q110740+Q109244+
Q107869+Q106272+Q106042+Q105840+Q102089+Q101163+Q101596+
Q100689+Q100680+Q99716+Q99480+Q98869+Q98078+Q98197, data = Train, method="class")
predtreeTest = predict(Tree1,newdata = Test, type = "class")
t = table(Test$Party,predtreeTest)
t
sum(diag(t))/sum(t)
library(e1071)
library(caret)
numFolds = trainControl(method = "cv",number = 10)
cpGrid = expand.grid(.cp=seq(0.001,0.01,0.001))
summary(cpGrid)
train(Party~ YOB+Gender+Income+HouseholdStatus+EducationLevel+Q122771+Q121699+Q120379+Q120472+Q119851+Q116881+Q115611+Q115899+Q113181+Q110740+Q109244+Q107869+Q106272+Q106042+Q105840+Q102089+Q101163+Q101596+
Q100689+Q100680+Q99716+Q99480+Q98869+Q98078+Q98197,data = Train, method = "rpart", trControl = numFolds, tuneGrid = cpGrid)
TestFinal = read.csv("pollTest_imputed.csv")
PredTreeTestFin = predict(Tree1,newdata = TestFinal,type = "class")
PredTreeTestFin = as.factor(ifelse(PredTreeTestFin==1, "Democrat", "Republican"))
table(PredTreeTestFin)
MySubmission = data.frame(USER_ID = TestFinal$USER_ID, Predictions = PredTreeTestFin)
str(MySubmission)
write.csv(MySubmission, "SubmissionRPART_IMP_FE_01.csv", row.names=FALSE)
Train2 = Train[,c("YOB","Gender","Income","HouseholdStatus","EducationLevel")]
Test2 = Test[,c("YOB","Gender","Income","HouseholdStatus","EducationLevel")]
setdiff(names(Train2),names(Test2))
for (col in setdiff(names(Train2),c("Party"))){
#if mean(is.na(pollTrain[,col])
Train2[,col] = as.numeric(Train2[,col])
Test2[,col] = as.numeric(Test2[,col])
#print(paste("The year is", year))
}
str(Train2)
preproc = preProcess(Train2)
normTrain = predict(preproc, Train2)
normTest = predict(preproc, Test2)
str(normTest)
km = kmeans(normTrain, centers = 2)
table(km$cluster)
library(flexclust)
km.kcca = as.kcca(km,normTrain)
clusterTrain = predict(km.kcca)
clusterTest = predict(km.kcca, newdata = normTest)
table(clusterTest)
length(clusterTest)
length(clusterTrain)
cTrain1 = subset(Train, clusterTrain == 1)
cTrain2 = subset(Train, clusterTrain == 2)
names(cTrain1)
cTest1 = subset(Test, clusterTest == 1)
cTest2 = subset(Test, clusterTest == 2)
cTree1 = rpart(Party~YOB+Gender+Income+HouseholdStatus+EducationLevel+Q122771+Q121699+Q120379+Q120472+Q119851+Q116881+Q115611+Q115899+Q113181+Q110740+Q109244+Q107869+Q106272+Q106042+Q105840+Q102089+Q101163+Q101596+
Q100689+Q100680+Q99716+Q99480+Q98869+Q98078+Q98197, data=cTrain1, method = "class",cp=0.006)
t= table(cTrain1$Party,predict(cTree1,type="class"))
t
sum(diag(t))/sum(t)
t= table(cTest1$Party,predict(cTree1,newdata = cTest1,type="class"))
t
sum(diag(t))/sum(t)
cTree2 = rpart(Party~YOB+Gender+Income+HouseholdStatus+EducationLevel+Q122771+Q121699+Q120379+Q120472+Q119851+Q116881+Q115611+Q115899+Q113181+Q110740+Q109244+Q107869+Q106272+Q106042+Q105840+Q102089+Q101163+Q101596+
Q100689+Q100680+Q99716+Q99480+Q98869+Q98078+Q98197, data=cTrain2, method = "class")
t= table(cTrain2$Party,predict(cTree2,type="class"))
t
sum(diag(t))/sum(t)
t= table(cTest2$Party,predict(cTree2,newdata = cTest2,type="class"))
t
sum(diag(t))/sum(t)
cLog1 = glm(Party~YOB+Gender+Income+HouseholdStatus+EducationLevel+Q122771+Q121699+Q120379+Q120472+Q119851+Q116881+Q115611+Q115899+Q113181+Q110740+Q109244+Q107869+Q106272+Q106042+Q105840+Q102089+Q101163+Q101596+
Q100689+Q100680+Q99716+Q99480+Q98869+Q98078+Q98197, data=cTrain1, family=binomial)
t= table(cTrain1$Party,predict(cLog1,type="response")>0.5)
t
sum(diag(t))/sum(t)
t= table(cTest1$Party,predict(cLog1,newdata = cTest1,type="response")>0.5)
t
sum(diag(t))/sum(t)
cLog2 = glm(Party~YOB+Gender+Income+HouseholdStatus+EducationLevel+Q122771+Q121699+Q120379+Q120472+Q119851+Q116881+Q115611+Q115899+Q113181+Q110740+Q109244+Q107869+Q106272+Q106042+Q105840+Q102089+Q101163+Q101596+
Q100689+Q100680+Q99716+Q99480+Q98869+Q98078+Q98197, data=cTrain2, family=binomial)
t= table(cTrain2$Party,predict(cLog2,type="response")>0.5)
t
sum(diag(t))/sum(t)
t= table(cTest2$Party,predict(cLog2,newdata = cTest2,type="response")>0.6)
t
sum(diag(t))/sum(t)
library(class)
knn(cTrain1,cTest1,cTrain1$Party)
Train3 = Train
Test3 = Test
tr_lab= Train3$Party
ts_lab = Test3$Party
Train3$USER_ID = NULL
Test3$USER_ID = NULL
Train3$Party =NULL
Test3$Party = NULL
library(caret)
library(class)
for (col in setdiff(names(Train3),c("Party"))){
#if mean(is.na(pollTrain[,col])
Train3[,col] = as.numeric(Train3[,col])
Test3[,col] = as.numeric(Test3[,col])
#print(paste("The year is", year))
}
preproc1 = preProcess(Train3)
Train4 = predict(preproc1,Train3)
Test4= predict(preproc1,Test3)
str(Train4)
PredKnn=knn(Train4,Test4,tr_lab,k=40)
t=table(ts_lab,PredKnn)
t
sum(diag(t))/sum(t)
?knn
| /Analytics_Edge/Kaggle_Competition/part_impute.R.r | no_license | lastworden/Statistics | R | false | false | 6,750 | r |
main_cols=c("USER_ID","YOB","Gender","Income","HouseholdStatus","EducationLevel","Party")
TrainImp = read.csv("pollTrain_imputed.csv")
table(TrainImp$YOB)
otrain1 = read.csv("train2016.csv")
nrow(otrain1)
trlab = subset(otrain1$Party,!otrain1$YOB %in% c("2011","2013","2039","1880","1881","1896"))
length(trlab)
TrainImp$Party = ifelse(TrainImp$Party=="Democrat",1,0)
table(TrainImp$Party)
t=c()
t
for (col in setdiff(names(TrainImp),c("USER_ID","Party"))){
#if mean(is.na(pollTrain[,col])
cat(col,"cor:",cor(as.numeric(TrainImp[,col]),as.numeric(TrainImp$Party)),"\n")
val = cor(as.numeric(TrainImp[,col]),as.numeric(TrainImp$Party))
t = c(t,c(col=val))
#print(paste("The year is", year))
}
names(t) = setdiff(names(TrainImp),c("USER_ID","Party"))
t1=which(abs(t)>0.09)
names(t1)
TrainImp$Party = as.factor(TrainImp$Party)
library(caTools)
split = sample.split(TrainImp$Party, SplitRatio = 0.75)
Train = subset(TrainImp, split == TRUE)
Test = subset(TrainImp, split == FALSE)
library(rpart)
library(e1071)
library(randomForest)
rfModel1 = randomForest(Party~ Gender+Q115611+Q113181+Q109244+Q98197, data = Train)#, ntree = 1000, sampsize = 300)
predTrain = predict(rfModel1)
t = table(Train$Party,predTrain)
t
sum(diag(t))/sum(t)
predTest = predict(rfModel1,newdata = Test)
t = table(Test$Party,predTest)
t
sum(diag(t))/sum(t)
LogModel = glm(Party~YOB+Gender+HouseholdStatus+EducationLevel+Q98197+Q98078+
Q98869+Q99480+Q100689+Q101163+Q105840+Q107869+Q109244, data = Train, family=binomial)
summary(LogModel)
predLogTrain = predict(LogModel,type = "response")
t = table(Train$Party,predLogTrain>0.5)
t
sum(diag(t))/sum(t)
predLogTest = predict(LogModel,newdata = Test,type="response")
t = table(Test$Party,predLogTest>0.5)
t
sum(diag(t))/sum(t)
Tree1 = rpart(Party~YOB+Gender+Income+HouseholdStatus+EducationLevel+Q122771+Q121699+
Q120379+Q120472+Q119851+Q116881+Q115611+Q115899+Q113181+Q110740+Q109244+
Q107869+Q106272+Q106042+Q105840+Q102089+Q101163+Q101596+
Q100689+Q100680+Q99716+Q99480+Q98869+Q98078+Q98197, data = Train, method="class")
predtreeTest = predict(Tree1,newdata = Test, type = "class")
t = table(Test$Party,predtreeTest)
t
sum(diag(t))/sum(t)
library(e1071)
library(caret)
numFolds = trainControl(method = "cv",number = 10)
cpGrid = expand.grid(.cp=seq(0.001,0.01,0.001))
summary(cpGrid)
train(Party~ YOB+Gender+Income+HouseholdStatus+EducationLevel+Q122771+Q121699+Q120379+Q120472+Q119851+Q116881+Q115611+Q115899+Q113181+Q110740+Q109244+Q107869+Q106272+Q106042+Q105840+Q102089+Q101163+Q101596+
Q100689+Q100680+Q99716+Q99480+Q98869+Q98078+Q98197,data = Train, method = "rpart", trControl = numFolds, tuneGrid = cpGrid)
TestFinal = read.csv("pollTest_imputed.csv")
PredTreeTestFin = predict(Tree1,newdata = TestFinal,type = "class")
PredTreeTestFin = as.factor(ifelse(PredTreeTestFin==1, "Democrat", "Republican"))
table(PredTreeTestFin)
MySubmission = data.frame(USER_ID = TestFinal$USER_ID, Predictions = PredTreeTestFin)
str(MySubmission)
write.csv(MySubmission, "SubmissionRPART_IMP_FE_01.csv", row.names=FALSE)
Train2 = Train[,c("YOB","Gender","Income","HouseholdStatus","EducationLevel")]
Test2 = Test[,c("YOB","Gender","Income","HouseholdStatus","EducationLevel")]
setdiff(names(Train2),names(Test2))
for (col in setdiff(names(Train2),c("Party"))){
#if mean(is.na(pollTrain[,col])
Train2[,col] = as.numeric(Train2[,col])
Test2[,col] = as.numeric(Test2[,col])
#print(paste("The year is", year))
}
str(Train2)
preproc = preProcess(Train2)
normTrain = predict(preproc, Train2)
normTest = predict(preproc, Test2)
str(normTest)
km = kmeans(normTrain, centers = 2)
table(km$cluster)
library(flexclust)
km.kcca = as.kcca(km,normTrain)
clusterTrain = predict(km.kcca)
clusterTest = predict(km.kcca, newdata = normTest)
table(clusterTest)
length(clusterTest)
length(clusterTrain)
cTrain1 = subset(Train, clusterTrain == 1)
cTrain2 = subset(Train, clusterTrain == 2)
names(cTrain1)
cTest1 = subset(Test, clusterTest == 1)
cTest2 = subset(Test, clusterTest == 2)
cTree1 = rpart(Party~YOB+Gender+Income+HouseholdStatus+EducationLevel+Q122771+Q121699+Q120379+Q120472+Q119851+Q116881+Q115611+Q115899+Q113181+Q110740+Q109244+Q107869+Q106272+Q106042+Q105840+Q102089+Q101163+Q101596+
Q100689+Q100680+Q99716+Q99480+Q98869+Q98078+Q98197, data=cTrain1, method = "class",cp=0.006)
t= table(cTrain1$Party,predict(cTree1,type="class"))
t
sum(diag(t))/sum(t)
t= table(cTest1$Party,predict(cTree1,newdata = cTest1,type="class"))
t
sum(diag(t))/sum(t)
cTree2 = rpart(Party~YOB+Gender+Income+HouseholdStatus+EducationLevel+Q122771+Q121699+Q120379+Q120472+Q119851+Q116881+Q115611+Q115899+Q113181+Q110740+Q109244+Q107869+Q106272+Q106042+Q105840+Q102089+Q101163+Q101596+
Q100689+Q100680+Q99716+Q99480+Q98869+Q98078+Q98197, data=cTrain2, method = "class")
t= table(cTrain2$Party,predict(cTree2,type="class"))
t
sum(diag(t))/sum(t)
t= table(cTest2$Party,predict(cTree2,newdata = cTest2,type="class"))
t
sum(diag(t))/sum(t)
cLog1 = glm(Party~YOB+Gender+Income+HouseholdStatus+EducationLevel+Q122771+Q121699+Q120379+Q120472+Q119851+Q116881+Q115611+Q115899+Q113181+Q110740+Q109244+Q107869+Q106272+Q106042+Q105840+Q102089+Q101163+Q101596+
Q100689+Q100680+Q99716+Q99480+Q98869+Q98078+Q98197, data=cTrain1, family=binomial)
t= table(cTrain1$Party,predict(cLog1,type="response")>0.5)
t
sum(diag(t))/sum(t)
t= table(cTest1$Party,predict(cLog1,newdata = cTest1,type="response")>0.5)
t
sum(diag(t))/sum(t)
cLog2 = glm(Party~YOB+Gender+Income+HouseholdStatus+EducationLevel+Q122771+Q121699+Q120379+Q120472+Q119851+Q116881+Q115611+Q115899+Q113181+Q110740+Q109244+Q107869+Q106272+Q106042+Q105840+Q102089+Q101163+Q101596+
Q100689+Q100680+Q99716+Q99480+Q98869+Q98078+Q98197, data=cTrain2, family=binomial)
t= table(cTrain2$Party,predict(cLog2,type="response")>0.5)
t
sum(diag(t))/sum(t)
t= table(cTest2$Party,predict(cLog2,newdata = cTest2,type="response")>0.6)
t
sum(diag(t))/sum(t)
library(class)
knn(cTrain1,cTest1,cTrain1$Party)
Train3 = Train
Test3 = Test
tr_lab= Train3$Party
ts_lab = Test3$Party
Train3$USER_ID = NULL
Test3$USER_ID = NULL
Train3$Party =NULL
Test3$Party = NULL
library(caret)
library(class)
for (col in setdiff(names(Train3),c("Party"))){
#if mean(is.na(pollTrain[,col])
Train3[,col] = as.numeric(Train3[,col])
Test3[,col] = as.numeric(Test3[,col])
#print(paste("The year is", year))
}
preproc1 = preProcess(Train3)
Train4 = predict(preproc1,Train3)
Test4= predict(preproc1,Test3)
str(Train4)
PredKnn=knn(Train4,Test4,tr_lab,k=40)
t=table(ts_lab,PredKnn)
t
sum(diag(t))/sum(t)
?knn
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/FitAcfCoef.R
\name{FitAcfCoef}
\alias{FitAcfCoef}
\title{Fits an AR1 AutoCorrelation Function Using the Cardano Formula}
\usage{
FitAcfCoef(a, b)
}
\arguments{
\item{a}{Coefficient a : first estimate of the autocorrelation at lag 1.}
\item{b}{Coefficient b : first estimate of the autocorrelation at lag 2.}
}
\value{
Best estimate of the autocorrelation at lag 1.
}
\description{
This function finds the minimum point of the fourth order polynom
(a - x)2 + 0.25(b - x2)2 written to fit the two autoregression coefficients
a and b.\cr
A consequence of the Cardano formula is that, provided a and b are in [0 1],
the problem is well posed, delta > 0 and there is only one minimum.\cr\cr
This function is called in Alpha() to minimize the mean square differences
between the theoretical autocorrelation function of an AR1 and the first
guess of the estimated autocorrelation function estacf, using only the
first two lags.
}
\examples{
series <- GenSeries(1000, 0.35, 2, 1)
estacf <- acf(series[951:1000], plot = FALSE)$acf
alpha <- FitAcfCoef(max(estacf[2], 0), max(estacf[3], 0))
print(alpha)
}
\author{
History:\cr
0.1 - 2012-06 (L. Auger, \email{ludovic.auger@meteo.fr}) - Original code\cr
1.0 - 2013-09 (N. Manubens, \email{nicolau.manubens@ic3.cat}) - Formatting to CRAN
}
\keyword{datagen}
| /man/FitAcfCoef.Rd | no_license | rpkgs/s2dverification | R | false | true | 1,391 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/FitAcfCoef.R
\name{FitAcfCoef}
\alias{FitAcfCoef}
\title{Fits an AR1 AutoCorrelation Function Using the Cardano Formula}
\usage{
FitAcfCoef(a, b)
}
\arguments{
\item{a}{Coefficient a : first estimate of the autocorrelation at lag 1.}
\item{b}{Coefficient b : first estimate of the autocorrelation at lag 2.}
}
\value{
Best estimate of the autocorrelation at lag 1.
}
\description{
This function finds the minimum point of the fourth order polynom
(a - x)2 + 0.25(b - x2)2 written to fit the two autoregression coefficients
a and b.\cr
A consequence of the Cardano formula is that, provided a and b are in [0 1],
the problem is well posed, delta > 0 and there is only one minimum.\cr\cr
This function is called in Alpha() to minimize the mean square differences
between the theoretical autocorrelation function of an AR1 and the first
guess of the estimated autocorrelation function estacf, using only the
first two lags.
}
\examples{
series <- GenSeries(1000, 0.35, 2, 1)
estacf <- acf(series[951:1000], plot = FALSE)$acf
alpha <- FitAcfCoef(max(estacf[2], 0), max(estacf[3], 0))
print(alpha)
}
\author{
History:\cr
0.1 - 2012-06 (L. Auger, \email{ludovic.auger@meteo.fr}) - Original code\cr
1.0 - 2013-09 (N. Manubens, \email{nicolau.manubens@ic3.cat}) - Formatting to CRAN
}
\keyword{datagen}
|
args<-commandArgs(TRUE)
file<- args[1]
bins<-as.data.frame(read.table(file,header=TRUE))
colnames(bins)
input<-args[2]
input_bins<-as.data.frame(read.table(input,header=TRUE))
colnames(input_bins)
# SL1_213_input-3 BIN SL1_418_input-7 BIN SL1_485_input-12 BIN T5_input-7 BIN T6_input-2 BIN T7_input-2
#SL1_213_K36me3-7,SL1_418_H3K36me3-6","SL1_485_H3K36me3-11","T5_H3K36me3-6","T6_H3K36me3-1","T7_H3K36me3-1"
library("ggplot2")
#xLabAt=c(0,20,50,80,100)
#xLabel=c("-5Kb","TSS","Genebody","TES","+5Kb")
xLabAt=c(0,20,50,80,100)
xLabel=c("-5Kb","TSS","Genebody","TES","+5Kb")
label=c("T5_H3K36me3","T6_H3K36me3","T7_H3K36me3","SL1_213_K36me3","SL1_418_H3K36me3","SL1_485_H3K36me3")
plot(x=c(1:101),y=(bins$T5_H3K36me3.6-input_bins$T5_input.7),lty=1,type="l",col="blue",ylim=c(0,0.5),axes=FALSE,ylab="IP-INPUT(Reads per million)",xlab="Genomic region",lwd=2)
points(x=c(1:101),y=(bins$T6_H3K36me3.1-input_bins$T6_input.2),lty=1,type="l",col="blue",lwd=2)
points(x=c(1:101),y=(bins$T7_H3K36me3.1-input_bins$T7_input.2),lty=1,type="l",col="blue",lwd=2)
points(x=c(1:101),y=(bins$SL1_418_H3K36me3.6-input_bins$SL1_418_input.7),lty=1,type="l",col="red",lwd=2)
points(x=c(1:101),y=(bins$SL1_213_K36me3.7-input_bins$SL1_213_input.3),lty=1,type="l",col="darkorange",lwd=2)
points(x=c(1:101),y=(bins$SL1_485_H3K36me3.11-input_bins$SL1_485_input.12),lty=1,type="l",col="darkgreen",lwd=2)
#
legend(0,0.5,label,col=c("blue","blue","blue","red","darkorange","darkgreen"), lty=c(1,1,1,1),cex=0.75,lwd=2)
axis(1,at=c(xLabAt),lab=c(xLabel),lwd=1,cex=1, cex.lab=1, cex.axis=1)
axis(2,lwd=1,cex=1, cex.lab=1, cex.axis=1)
| /viz/get_lineplots_bin_TcellK36me3.R | no_license | kgaonkar6/chipseq-analysis | R | false | false | 1,654 | r | args<-commandArgs(TRUE)
file<- args[1]
bins<-as.data.frame(read.table(file,header=TRUE))
colnames(bins)
input<-args[2]
input_bins<-as.data.frame(read.table(input,header=TRUE))
colnames(input_bins)
# SL1_213_input-3 BIN SL1_418_input-7 BIN SL1_485_input-12 BIN T5_input-7 BIN T6_input-2 BIN T7_input-2
#SL1_213_K36me3-7,SL1_418_H3K36me3-6","SL1_485_H3K36me3-11","T5_H3K36me3-6","T6_H3K36me3-1","T7_H3K36me3-1"
library("ggplot2")
#xLabAt=c(0,20,50,80,100)
#xLabel=c("-5Kb","TSS","Genebody","TES","+5Kb")
xLabAt=c(0,20,50,80,100)
xLabel=c("-5Kb","TSS","Genebody","TES","+5Kb")
label=c("T5_H3K36me3","T6_H3K36me3","T7_H3K36me3","SL1_213_K36me3","SL1_418_H3K36me3","SL1_485_H3K36me3")
plot(x=c(1:101),y=(bins$T5_H3K36me3.6-input_bins$T5_input.7),lty=1,type="l",col="blue",ylim=c(0,0.5),axes=FALSE,ylab="IP-INPUT(Reads per million)",xlab="Genomic region",lwd=2)
points(x=c(1:101),y=(bins$T6_H3K36me3.1-input_bins$T6_input.2),lty=1,type="l",col="blue",lwd=2)
points(x=c(1:101),y=(bins$T7_H3K36me3.1-input_bins$T7_input.2),lty=1,type="l",col="blue",lwd=2)
points(x=c(1:101),y=(bins$SL1_418_H3K36me3.6-input_bins$SL1_418_input.7),lty=1,type="l",col="red",lwd=2)
points(x=c(1:101),y=(bins$SL1_213_K36me3.7-input_bins$SL1_213_input.3),lty=1,type="l",col="darkorange",lwd=2)
points(x=c(1:101),y=(bins$SL1_485_H3K36me3.11-input_bins$SL1_485_input.12),lty=1,type="l",col="darkgreen",lwd=2)
#
legend(0,0.5,label,col=c("blue","blue","blue","red","darkorange","darkgreen"), lty=c(1,1,1,1),cex=0.75,lwd=2)
axis(1,at=c(xLabAt),lab=c(xLabel),lwd=1,cex=1, cex.lab=1, cex.axis=1)
axis(2,lwd=1,cex=1, cex.lab=1, cex.axis=1)
|
## plot3.R
# a vector of classes for the columns
mycols <- c(rep("factor", 2), rep("numeric", 7))
# reading the *.txt file from working directory
hhpc <- read.csv("data/household_power_consumption.txt", head=T, sep=";", na.strings = "?", colClasses = mycols)
# two variable for date limits, min and max, in the specific format
datamin <- as.Date("2007-02-01", "%Y-%m-%d")
datamax <- as.Date("2007-02-02", "%Y-%m-%d")
# convert the Date variable in date format
hhpc$Date <- as.Date(hhpc$Date, "%d/%m/%Y")
# convert the Time variable in time format
hhpc$Time <- paste(hhpc$Date, hhpc$Time, sep=" ")
hhpc$Time <- strptime(hhpc$Time, "%Y-%m-%d %H:%M:%S")
# selection of observation with Date between datamin and datamax
selection <- hhpc$Date >= datamin & hhpc$Date <= datamax
hhpc <- hhpc[selection, ]
# set the image dimension
png("plot3.png", width=480, height=480, units="px")
ylimits = range(c(hhpc$Sub_metering_1, hhpc$Sub_metering_2, hhpc$Sub_metering_3))
plot(hhpc$Time, hhpc$Sub_metering_1, xlab = "", ylab = "Energy sub metering", type = "l", ylim = ylimits, col = "black")
par(new = TRUE)
plot(hhpc$Time, hhpc$Sub_metering_2, xlab = "", axes = FALSE, ylab = "", type = "l", ylim = ylimits, col = "red")
par(new = TRUE)
plot(hhpc$Time, hhpc$Sub_metering_3, xlab = "", axes = FALSE, ylab = "", type = "l", ylim = ylimits, col = "blue")
legend("topright", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty = c(1, 1, 1),
col = c("black", "red", "blue"))
dev.off()
| /plot3.R | no_license | alexcipro/ExData_Plotting1 | R | false | false | 1,501 | r | ## plot3.R
# a vector of classes for the columns
mycols <- c(rep("factor", 2), rep("numeric", 7))
# reading the *.txt file from working directory
hhpc <- read.csv("data/household_power_consumption.txt", head=T, sep=";", na.strings = "?", colClasses = mycols)
# two variable for date limits, min and max, in the specific format
datamin <- as.Date("2007-02-01", "%Y-%m-%d")
datamax <- as.Date("2007-02-02", "%Y-%m-%d")
# convert the Date variable in date format
hhpc$Date <- as.Date(hhpc$Date, "%d/%m/%Y")
# convert the Time variable in time format
hhpc$Time <- paste(hhpc$Date, hhpc$Time, sep=" ")
hhpc$Time <- strptime(hhpc$Time, "%Y-%m-%d %H:%M:%S")
# selection of observation with Date between datamin and datamax
selection <- hhpc$Date >= datamin & hhpc$Date <= datamax
hhpc <- hhpc[selection, ]
# set the image dimension
png("plot3.png", width=480, height=480, units="px")
ylimits = range(c(hhpc$Sub_metering_1, hhpc$Sub_metering_2, hhpc$Sub_metering_3))
plot(hhpc$Time, hhpc$Sub_metering_1, xlab = "", ylab = "Energy sub metering", type = "l", ylim = ylimits, col = "black")
par(new = TRUE)
plot(hhpc$Time, hhpc$Sub_metering_2, xlab = "", axes = FALSE, ylab = "", type = "l", ylim = ylimits, col = "red")
par(new = TRUE)
plot(hhpc$Time, hhpc$Sub_metering_3, xlab = "", axes = FALSE, ylab = "", type = "l", ylim = ylimits, col = "blue")
legend("topright", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty = c(1, 1, 1),
col = c("black", "red", "blue"))
dev.off()
|
#email lindsay olson w/ questions: olsonlindsaya@gmail.com
clusteringData$cluster2 <- as.factor(clusteringData$cluster2)
plotNames = names(clusteringData)
name = c("1", "2")
for(i in 1:14){
mypath <- file.path("/Users/lolson/Documents/001_JDP/001_BDIL/006_Projects/14_ParentalStress/kmeansClustering/2Cluster",paste("boxPlot_", plotNames[i], ".tiff", sep = ""))
tiff(file=mypath)
#mytitle = paste(plotNames[i])
myVar = plotNames[i]
#thisData = clusteringData[i]
tiff(file=mypath,width = 4, height = 3, units = 'in', res = 300)
ggplot(clusteringData, aes(x=cluster2, y=as.matrix(clusteringData[i]), color=cluster2), lwd = 4) +
geom_boxplot(outlier.shape=NA) +
theme(text = element_text(size = 20)) +
scale_color_manual(values=c("green","orange")) +
scale_fill_manual(values=c("green","orange")) +
xlab("Cluster") +
ylab(myVar) +
coord_cartesian(ylim = c(10,90))
dev.off()
plotNames = names(clusterMeans3)
name = c("1", "2", "3")
for(i in 2:15){
mypath <- file.path("/Users/lolson/Documents/001_JDP/001_BDIL/006_Projects/14_ParentalStress/kmeansClustering/3Cluster",paste("barPlot_", plotNames[i], ".tiff", sep = ""))
tiff(file=mypath)
mytitle = paste(plotNames[i])
myVar = plotNames[i]
thisData = clusterMeans[i]
tiff(file=mypath,width = 4, height = 3, units = 'in', res = 300)
barplot(as.matrix(clusterMeans3[i]), main=mytitle,
ylim = c(0,70),
xlab=mytitle, beside=TRUE, col=coul,
names.arg = name, cex.axis=1.5, cex.names=1.5, cex.lab=1.5, cex.main=1.5)
dev.off()
}
| /automatePlotting.R | no_license | olsonlindsaya/automated-reg-models | R | false | false | 1,598 | r | #email lindsay olson w/ questions: olsonlindsaya@gmail.com
clusteringData$cluster2 <- as.factor(clusteringData$cluster2)
plotNames = names(clusteringData)
name = c("1", "2")
for(i in 1:14){
mypath <- file.path("/Users/lolson/Documents/001_JDP/001_BDIL/006_Projects/14_ParentalStress/kmeansClustering/2Cluster",paste("boxPlot_", plotNames[i], ".tiff", sep = ""))
tiff(file=mypath)
#mytitle = paste(plotNames[i])
myVar = plotNames[i]
#thisData = clusteringData[i]
tiff(file=mypath,width = 4, height = 3, units = 'in', res = 300)
ggplot(clusteringData, aes(x=cluster2, y=as.matrix(clusteringData[i]), color=cluster2), lwd = 4) +
geom_boxplot(outlier.shape=NA) +
theme(text = element_text(size = 20)) +
scale_color_manual(values=c("green","orange")) +
scale_fill_manual(values=c("green","orange")) +
xlab("Cluster") +
ylab(myVar) +
coord_cartesian(ylim = c(10,90))
dev.off()
plotNames = names(clusterMeans3)
name = c("1", "2", "3")
for(i in 2:15){
mypath <- file.path("/Users/lolson/Documents/001_JDP/001_BDIL/006_Projects/14_ParentalStress/kmeansClustering/3Cluster",paste("barPlot_", plotNames[i], ".tiff", sep = ""))
tiff(file=mypath)
mytitle = paste(plotNames[i])
myVar = plotNames[i]
thisData = clusterMeans[i]
tiff(file=mypath,width = 4, height = 3, units = 'in', res = 300)
barplot(as.matrix(clusterMeans3[i]), main=mytitle,
ylim = c(0,70),
xlab=mytitle, beside=TRUE, col=coul,
names.arg = name, cex.axis=1.5, cex.names=1.5, cex.lab=1.5, cex.main=1.5)
dev.off()
}
|
# Filename: mission_launch_multi.R
# A script to upload new missions to iButton Thermochron temperature
# dataloggers. See the comments below for the additional external software
# needed to make this run. This version will loop continuously and upload the
# same mission to multiple iButtons so that they all start at the same
# absolute time and sample at the same frequency.
# Author: Luke Miller Mar 23, 2012
###############################################################################
setwd('D:/R/ibuttons') # Alter this for your needs, thermoms.exe must be in the
# current R working directory.
# The thermoms.exe file was originally downloaded as part of the Maxim iButton
# 1-Wire Public Domain Kit.
# There are several versions of the Kit available, including
# versions with pre-compiled binaries (executables) for Windows/Linux/OSX.
# http://www.maxim-ic.com/products/ibutton/software/1wire/wirekit.cfm
# On my Windows 7 x64 computer using the DS9490B USB ibutton adapter, I used the
# precompiled binary build for Win64 USB (DS9490 + WinUSB) Preliminary Version
# 3.11 Beta 2,
# filename: winusb64vc311Beta2_r2.zip, downloaded 2012-03-15
# Unzip this file and find the .exe files thermoms.exe (and thermodl.exe) in the
# builds\winusb64vc\release folder. Copy these to your R working directory.
# The drivers for the DS9490 USB iButton adapter must also be downloaded and
# installed:
# http://www.maxim-ic.com/products/ibutton/software/tmex/
# I downloaded and installed the file "install_1_wire_drivers_x64_v403.msi"
# The Java OneWireViewer app can also be downloaded and used to verify that your
# drivers work and that you can communicate with iButtons successfully through
# the USB adapter. You can download this app here:
# http://www.maxim-ic.com/products/ibutton/software/1wire/OneWireViewer.cfm
cat('*************************************************************\n\a')
cat('************RESET COMPUTER CLOCK TO STANDARD TIME************\n')
Sys.sleep(2)
cat('\a')
Sys.sleep(2)
cat('************SERIOUSLY, RESET YOUR CLOCK TO STANDARD TIME*********\a\n')
cat('*****************************************************************\n')
cat('Enter a desired mission start time as YYYY-MM-DD HH:MM\n')
cat('Or else enter 0 for immediate start. Delay must be less than 45.5 days.\n')
time.delay = scan(file = '', what = character(), n = 1, sep = ',')
# The sep value in scan is necessary so that spaces are not interpreted as the
# default record delimiter.
if (time.delay == '0') {
launch = TRUE
} else {
# Convert the date/time into a POSIX time object
time.delay = as.POSIXct(strptime(time.delay, format = '%Y-%m-%d %H:%M'))
# Check to make sure that the delay time is usable
if (is.na(time.delay)) { # If time.delay can't be interpreted, fail
cat('Time could not be interpreted\a\n')
cat('Quitting now\n')
launch = FALSE
}
# If time.delay is a valid POSIX time, check that it is within limits
if (!is.na(time.delay)) {
curr.time = as.POSIXct(Sys.time()) # get current time
# Check time difference between user's delay and current computer time
t.diff = as.numeric(time.delay) - as.numeric(curr.time)
t.diff = t.diff / 60 # convert to minutes
if (t.diff < 0) {
cat('Time delay is less than zero. Check your watch.\a\n')
cat('Quitting now\n')
launch = FALSE
} else if (t.diff > 65535) {
cat('Time delay is longer than 45.5 days. You are bad at math.\a\n')
cat('Quitting now\n')
launch = FALSE
} else if (t.diff > 0 & t.diff < 1) {
cat('Time delay is being set to 0 for immediate launch.\a\n')
launch = TRUE
} else {
# time.delay is a readable time, and t.diff is between 0 and 65535
launch = TRUE
}
} # end of !is.na(time.delay) if-statement
} # end of time.delay if-statement
################################################################################
## Start the launch loop.
if (launch) { # only do this part if launch == TRUE
cat('Enter the desired sampling frequency in minutes (1 to 255):\n')
freq = scan(file = '', what = numeric(), n = 1)
freq = as.character(freq) # convert to character
loop = TRUE # Set loop continuation variable to TRUE initially
# This while loop will repeat continuously to launch multiple iButtons.
# The same parameters will be used to launch every iButton, except that the
# start delay (if >0) will automatically adjust as time elapses so that each
# iButton will start at the same absolute time.
while (loop) {
if (time.delay != '0') {
curr.time = as.POSIXct(Sys.time()) # Get current time
# Calculate difference between current time and time.delay value
time.diff = as.numeric(time.delay) - as.numeric(curr.time)
time.diff = time.diff / 60 # convert from seconds to minutes
time.diff = floor(time.diff) # round down to nearest minute
# iButtons only read out to the nearest minute. Rounding down to the
# nearest minute should produce the proper delay to start at the
# desired time.
# If too much time elapses during this script, the time difference
# could eventually shrink to zero or less than zero. In that case,
# warn the user and set the iButton for immediate launch.
if (time.diff < 1) {
time.diff = 0 # set for immediate launch.
cat('*********************************\n')
cat('Delay has shrunk to zero. Setting for immediate launch.')
cat('*********************************\a\n')
}
time.diff = as.character(time.diff) # convert to character
} else { # time.delay was originally set to 0, so keep that.
time.diff == '0'
}
cat('Calculated delay: ', time.diff, ' minutes\n')
# The thermoms.exe program expects a series of inputs in order to establish the
# mission parameters. Rgui doesn't work all that well with interactive command
# line programs (Rterm is just fine, but our goal is to not have to interact),
# so instead we'll create a character vector of answers to thermoms.exe's
# queries and supply those via the input option of the system() function.
# The parameters are as follows:
# Erase current mission (0) yes, (1) no. Answer: 0
# Enter start delay in minutes (0 to 65535). Answer: whatever you choose
# Enter sample rate in minutes (1 to 255). Answer: whatever you choose
# Enable roll-over (0) yes, (1) no. Answer: 1
# Enter high temperature threshold in Celsius (-40 to 70). Answer: 70
# Enter low temperature threshold in Celsius (-40 to 70). Answer: -40
mission.params = c('0', # erase current mission
time.diff, # start delay in minutes (0 to 65535)
freq, # sample rate in minutes (1 to 255)
'1', # enable roll-over? 1 = no roll over
'70', # high temperature threshold
'-40') # low temperature threshold
# Launch thermoms.exe
# The 1st argument supplied to thermoms needs to be the location of the iButton
# in the system. If using a DS9490B USB reader on Windows, you will probably get
# away with using ds2490-0. The DS9490 USB reader uses a ds2490 chip internally,
# so you need to tell thermoms.exe to look for a ds2490.
out = system('thermoms.exe ds2490-0', intern = TRUE, wait = TRUE,
input = mission.params)
# Check the output from the mission launch to ensure that the correct parameters
# were registered. Occasionally the time delay will not be properly registered
# on the first launch, so the loops below will immediately relaunch the mission
# to get the time delay to register properly.
# If no iButton is plugged in, this should be the failure message
if (out[7] == 'Thermochron not present on 1-Wire') {
cat('******************************************\n')
cat(out[7],'\n')
cat('******************************************\n\a')
} else { # if out[7] is blank, a mission was probably uploaded
for (i in 73:90) { # Display read-back from mission upload
cat(out[i],'\n')
}
# Make sure the delay was actually entered correctly if it's >0
# This while loop will run a maximum of 3 times. Each time through
# it will compare the output in out[73] to make sure the correct
# delay was returned from the iButton. If not, it will re-launch the
# mission up to two more times before sending a failure message
retry = 0
while (retry < 3) {
setting = out[79]
nums = gregexpr('[0-9]', setting) # Find numbers in the string
digs = substr(setting, nums[[1]][1],
nums[[1]][length(nums[[1]])]) # Extract delay value
if (digs != time.diff & retry < 2) {
# If delay value returned by iButton doesn't match the
# programmed delay, warn the user and re-launch the mission
cat('*****************************************\n')
cat('****Launch did not work, re-launching****\a\n')
cat('*****************************************\n')
out = system('thermoms.exe ds2490-0', intern = TRUE,
wait = TRUE, input = mission.params)
for (i in 73:90) { # Display the newly returned values
cat(out[i],'\n')
}
retry = retry + 1 # increment the loop counter
cat('---------------------------\n')
} else if (digs != time.diff & retry == 2) {
# If the returned delay still doesn't match the programmed
# delay after two more iterations, send a failure message
# to the user and let them deal with this issue.
# A common failure mode is due to a dead battery, which will
# keep returning a clock time of 01/01/1970 00:00:00
retry = 3
cat('****************************************\n')
cat('*****Uploaded failed, check iButton*****\n')
cat('****************************************\n')
for (i in 73:90) {
cat(out[i],'\n')
}
answer = out[85] # Check the iButton's internal clock
# Find the location of the date in this line (if present)
d1 = regexpr('[0-9]{2}/[0-9]{2}/[0-9]{4}', answer)
# Extract the date as a character string
button.date = substr(answer, d1,
d1 + attr(d1,'match.length') - 1)
# If the iButton date returns 01/01/1970, the iButton
# battery is probably dead
if (button.date == '01/01/1970') {
cat('********************************\n')
cat('The iButton battery may be dead.\n')
cat('********************************\n')
}
} else if (digs == time.diff) { # iButton mission launch worked
cat('\n----------Success---------\n')
retry = 3
}
} # End of retry while-loop
} # End of if (out[7]... if-else statements
# Ask the user to load the next iButton or quit.
cat('Swap next iButton and press Enter to launch. Enter q to quit.\a\n')
user.input = scan(file = '', what = character(), n = 1)
if (length(user.input) > 0) { # Allows user to not type anything
if (user.input == 'q') loop = FALSE # Kills loop
} else {loop = TRUE}
} # End of 'loop' while-loop
} # End of 'launch' if-statement
cat('Finished \a\n')
| /mission_launch_multi.R | no_license | mltConsEcol/ibuttons | R | false | false | 10,855 | r | # Filename: mission_launch_multi.R
# A script to upload new missions to iButton Thermochron temperature
# dataloggers. See the comments below for the additional external software
# needed to make this run. This version will loop continuously and upload the
# same mission to multiple iButtons so that they all start at the same
# absolute time and sample at the same frequency.
# Author: Luke Miller Mar 23, 2012
###############################################################################
setwd('D:/R/ibuttons') # Alter this for your needs, thermoms.exe must be in the
# current R working directory.
# The thermoms.exe file was originally downloaded as part of the Maxim iButton
# 1-Wire Public Domain Kit.
# There are several versions of the Kit available, including
# versions with pre-compiled binaries (executables) for Windows/Linux/OSX.
# http://www.maxim-ic.com/products/ibutton/software/1wire/wirekit.cfm
# On my Windows 7 x64 computer using the DS9490B USB ibutton adapter, I used the
# precompiled binary build for Win64 USB (DS9490 + WinUSB) Preliminary Version
# 3.11 Beta 2,
# filename: winusb64vc311Beta2_r2.zip, downloaded 2012-03-15
# Unzip this file and find the .exe files thermoms.exe (and thermodl.exe) in the
# builds\winusb64vc\release folder. Copy these to your R working directory.
# The drivers for the DS9490 USB iButton adapter must also be downloaded and
# installed:
# http://www.maxim-ic.com/products/ibutton/software/tmex/
# I downloaded and installed the file "install_1_wire_drivers_x64_v403.msi"
# The Java OneWireViewer app can also be downloaded and used to verify that your
# drivers work and that you can communicate with iButtons successfully through
# the USB adapter. You can download this app here:
# http://www.maxim-ic.com/products/ibutton/software/1wire/OneWireViewer.cfm
cat('*************************************************************\n\a')
cat('************RESET COMPUTER CLOCK TO STANDARD TIME************\n')
Sys.sleep(2)
cat('\a')
Sys.sleep(2)
cat('************SERIOUSLY, RESET YOUR CLOCK TO STANDARD TIME*********\a\n')
cat('*****************************************************************\n')
cat('Enter a desired mission start time as YYYY-MM-DD HH:MM\n')
cat('Or else enter 0 for immediate start. Delay must be less than 45.5 days.\n')
time.delay = scan(file = '', what = character(), n = 1, sep = ',')
# The sep value in scan is necessary so that spaces are not interpreted as the
# default record delimiter.
if (time.delay == '0') {
launch = TRUE
} else {
# Convert the date/time into a POSIX time object
time.delay = as.POSIXct(strptime(time.delay, format = '%Y-%m-%d %H:%M'))
# Check to make sure that the delay time is usable
if (is.na(time.delay)) { # If time.delay can't be interpreted, fail
cat('Time could not be interpreted\a\n')
cat('Quitting now\n')
launch = FALSE
}
# If time.delay is a valid POSIX time, check that it is within limits
if (!is.na(time.delay)) {
curr.time = as.POSIXct(Sys.time()) # get current time
# Check time difference between user's delay and current computer time
t.diff = as.numeric(time.delay) - as.numeric(curr.time)
t.diff = t.diff / 60 # convert to minutes
if (t.diff < 0) {
cat('Time delay is less than zero. Check your watch.\a\n')
cat('Quitting now\n')
launch = FALSE
} else if (t.diff > 65535) {
cat('Time delay is longer than 45.5 days. You are bad at math.\a\n')
cat('Quitting now\n')
launch = FALSE
} else if (t.diff > 0 & t.diff < 1) {
cat('Time delay is being set to 0 for immediate launch.\a\n')
launch = TRUE
} else {
# time.delay is a readable time, and t.diff is between 0 and 65535
launch = TRUE
}
} # end of !is.na(time.delay) if-statement
} # end of time.delay if-statement
################################################################################
## Start the launch loop.
if (launch) { # only do this part if launch == TRUE
cat('Enter the desired sampling frequency in minutes (1 to 255):\n')
freq = scan(file = '', what = numeric(), n = 1)
freq = as.character(freq) # convert to character
loop = TRUE # Set loop continuation variable to TRUE initially
# This while loop will repeat continuously to launch multiple iButtons.
# The same parameters will be used to launch every iButton, except that the
# start delay (if >0) will automatically adjust as time elapses so that each
# iButton will start at the same absolute time.
while (loop) {
if (time.delay != '0') {
curr.time = as.POSIXct(Sys.time()) # Get current time
# Calculate difference between current time and time.delay value
time.diff = as.numeric(time.delay) - as.numeric(curr.time)
time.diff = time.diff / 60 # convert from seconds to minutes
time.diff = floor(time.diff) # round down to nearest minute
# iButtons only read out to the nearest minute. Rounding down to the
# nearest minute should produce the proper delay to start at the
# desired time.
# If too much time elapses during this script, the time difference
# could eventually shrink to zero or less than zero. In that case,
# warn the user and set the iButton for immediate launch.
if (time.diff < 1) {
time.diff = 0 # set for immediate launch.
cat('*********************************\n')
cat('Delay has shrunk to zero. Setting for immediate launch.')
cat('*********************************\a\n')
}
time.diff = as.character(time.diff) # convert to character
} else { # time.delay was originally set to 0, so keep that.
time.diff == '0'
}
cat('Calculated delay: ', time.diff, ' minutes\n')
# The thermoms.exe program expects a series of inputs in order to establish the
# mission parameters. Rgui doesn't work all that well with interactive command
# line programs (Rterm is just fine, but our goal is to not have to interact),
# so instead we'll create a character vector of answers to thermoms.exe's
# queries and supply those via the input option of the system() function.
# The parameters are as follows:
# Erase current mission (0) yes, (1) no. Answer: 0
# Enter start delay in minutes (0 to 65535). Answer: whatever you choose
# Enter sample rate in minutes (1 to 255). Answer: whatever you choose
# Enable roll-over (0) yes, (1) no. Answer: 1
# Enter high temperature threshold in Celsius (-40 to 70). Answer: 70
# Enter low temperature threshold in Celsius (-40 to 70). Answer: -40
mission.params = c('0', # erase current mission
time.diff, # start delay in minutes (0 to 65535)
freq, # sample rate in minutes (1 to 255)
'1', # enable roll-over? 1 = no roll over
'70', # high temperature threshold
'-40') # low temperature threshold
# Launch thermoms.exe
# The 1st argument supplied to thermoms needs to be the location of the iButton
# in the system. If using a DS9490B USB reader on Windows, you will probably get
# away with using ds2490-0. The DS9490 USB reader uses a ds2490 chip internally,
# so you need to tell thermoms.exe to look for a ds2490.
out = system('thermoms.exe ds2490-0', intern = TRUE, wait = TRUE,
input = mission.params)
# Check the output from the mission launch to ensure that the correct parameters
# were registered. Occasionally the time delay will not be properly registered
# on the first launch, so the loops below will immediately relaunch the mission
# to get the time delay to register properly.
# If no iButton is plugged in, this should be the failure message
if (out[7] == 'Thermochron not present on 1-Wire') {
cat('******************************************\n')
cat(out[7],'\n')
cat('******************************************\n\a')
} else { # if out[7] is blank, a mission was probably uploaded
for (i in 73:90) { # Display read-back from mission upload
cat(out[i],'\n')
}
# Make sure the delay was actually entered correctly if it's >0
# This while loop will run a maximum of 3 times. Each time through
# it will compare the output in out[73] to make sure the correct
# delay was returned from the iButton. If not, it will re-launch the
# mission up to two more times before sending a failure message
retry = 0
while (retry < 3) {
setting = out[79]
nums = gregexpr('[0-9]', setting) # Find numbers in the string
digs = substr(setting, nums[[1]][1],
nums[[1]][length(nums[[1]])]) # Extract delay value
if (digs != time.diff & retry < 2) {
# If delay value returned by iButton doesn't match the
# programmed delay, warn the user and re-launch the mission
cat('*****************************************\n')
cat('****Launch did not work, re-launching****\a\n')
cat('*****************************************\n')
out = system('thermoms.exe ds2490-0', intern = TRUE,
wait = TRUE, input = mission.params)
for (i in 73:90) { # Display the newly returned values
cat(out[i],'\n')
}
retry = retry + 1 # increment the loop counter
cat('---------------------------\n')
} else if (digs != time.diff & retry == 2) {
# If the returned delay still doesn't match the programmed
# delay after two more iterations, send a failure message
# to the user and let them deal with this issue.
# A common failure mode is due to a dead battery, which will
# keep returning a clock time of 01/01/1970 00:00:00
retry = 3
cat('****************************************\n')
cat('*****Uploaded failed, check iButton*****\n')
cat('****************************************\n')
for (i in 73:90) {
cat(out[i],'\n')
}
answer = out[85] # Check the iButton's internal clock
# Find the location of the date in this line (if present)
d1 = regexpr('[0-9]{2}/[0-9]{2}/[0-9]{4}', answer)
# Extract the date as a character string
button.date = substr(answer, d1,
d1 + attr(d1,'match.length') - 1)
# If the iButton date returns 01/01/1970, the iButton
# battery is probably dead
if (button.date == '01/01/1970') {
cat('********************************\n')
cat('The iButton battery may be dead.\n')
cat('********************************\n')
}
} else if (digs == time.diff) { # iButton mission launch worked
cat('\n----------Success---------\n')
retry = 3
}
} # End of retry while-loop
} # End of if (out[7]... if-else statements
# Ask the user to load the next iButton or quit.
cat('Swap next iButton and press Enter to launch. Enter q to quit.\a\n')
user.input = scan(file = '', what = character(), n = 1)
if (length(user.input) > 0) { # Allows user to not type anything
if (user.input == 'q') loop = FALSE # Kills loop
} else {loop = TRUE}
} # End of 'loop' while-loop
} # End of 'launch' if-statement
cat('Finished \a\n')
|
### NFL Big Data Bowl 2020-21
# Robert Bernhardt, Andrew Rogan, Daniel Weiss
# November 2020
## User-Defined Functions for Data Visualization
# Plot Play-Specific Data
## Data Visualization Preparation Source Function
# Required Inputs: TrackingPlayData , input_game , input_play
# Outputs: Offense , Defense , Ball , BallX , FirstDownLine
prep_vis_data <- function(GeneralPlayData, TrackingPlayData) {
TrackingPlayData <- TrackingPlayData %>%
mutate( x = case_when( playDirection == "right" ~ x ,
playDirection == "left" ~ 120 - x)) %>%
mutate( y = case_when( playDirection == "right" ~ y ,
playDirection == "left" ~ 160/3 - y)) %>%
mutate( dir = case_when( playDirection == "right" ~ dir ,
playDirection == "left" ~ mod(dir+180,360))) %>%
mutate( o = case_when( playDirection == "right" ~ o ,
playDirection == "left" ~ mod(o+180,360)))
qb_team <- TrackingPlayData %>%
filter(position == "QB") %>%
filter(frameId == 1) %>%
select(team)
Offense <- TrackingPlayData %>%
filter(team == as.character(qb_team))
Defense <- TrackingPlayData %>%
filter(team != as.character(qb_team)) %>%
filter(team != "football")
Ball <- TrackingPlayData %>%
filter(team == "football")
BallX <- Ball %>%
filter(frameId == 1) %>%
select(x)
YardsToGo <- GeneralPlayData %>%
select(yardsToGo)
FirstDownLine <- BallX + YardsToGo
vis_data <- list("Offense" = Offense, "Defense" = Defense, "Ball" = Ball, "BallX" = BallX, "FirstDownLine" = FirstDownLine)
return(vis_data)
}
#
## Throw-Based Preparation Source Function
# Required Inputs: input_data , input_game , input_play
# Outputs: Offense_throw , Defense_throw , Ball_throw , BallX , FirstDownLine
# Identifying Players , Play Information
prep_throw_vis_data <- function(GeneralPlayData , TrackingPlayData , PlayerTargeted) {
TrackingPlayData <- TrackingPlayData %>%
mutate( x = case_when( playDirection == "right" ~ x ,
playDirection == "left" ~ 120 - x)) %>%
mutate( y = case_when( playDirection == "right" ~ y ,
playDirection == "left" ~ 160/3 - y)) %>%
mutate( dir = case_when( playDirection == "right" ~ dir ,
playDirection == "left" ~ mod(dir+180,360))) %>%
mutate( o = case_when( playDirection == "right" ~ o ,
playDirection == "left" ~ mod(o+180,360)))
qb_team <- TrackingPlayData %>%
filter(position == "QB") %>%
filter(frameId == 1) %>%
select(team)
# Identifying Critical Moment
if (sum(TrackingPlayData$event == "pass_forward") > 0) {
critical_moment <- "pass_forward"
} else if (sum(TrackingPlayData$event == "pass_shovel") > 0) {
critical_moment <- "pass_shovel"
} else if (sum(TrackingPlayData$event == "qb_spike") > 0) {
critical_moment <- "qb_spike"
} else if (sum(TrackingPlayData$event == "qb_sack") > 0) {
critical_moment <- "qb_sack"
} else if (sum(TrackingPlayData$event == "qb_strip_sack") > 0) {
critical_moment <- "qb_strip_sack"
} else if (sum(TrackingPlayData$event == "tackle") > 0) {
critical_moment <- "tackle"
}
critical_frame <- TrackingPlayData %>%
filter(event == critical_moment )
# Identifying Targeted Player
target_player_location <- critical_frame %>%
filter(nflId == PlayerTargeted$targetNflId) %>%
select(x , y , s , a , o , dis , nflId , displayName)
if (is.na(PlayerTargeted$targetNflId) | nrow(target_player_location) == 0) {
ball_arrive <- critical_frame %>%
filter(displayName == "Football")
target_player <- critical_frame %>%
mutate(ballDistance = sqrt( (as.numeric(critical_frame$x) - as.numeric(ball_arrive$x))^2 + (as.numeric(critical_frame$y) - as.numeric(ball_arrive$y))^2 )) %>%
filter(displayName != "Football") %>%
filter(team == qb_team$team) %>%
filter(ballDistance == min(ballDistance)) %>%
select(nflId , displayName)
target_player_location <- critical_frame %>%
filter(nflId == target_player$nflId) %>%
select(x , y , s , a , o , dis , nflId , displayName)
}
# Preparing Offense/Defense/Ball Data at Critical Moment
Offense_throw <- TrackingPlayData %>%
filter(team == as.character(qb_team)) %>%
filter(event == critical_moment)
Defense_throw <- TrackingPlayData %>%
filter(team != as.character(qb_team)) %>%
filter(team != "football") %>%
filter(event == critical_moment)
Ball_throw <- TrackingPlayData %>%
filter(team == "football") %>%
filter(event == critical_moment)
BallX <- TrackingPlayData %>%
filter(team == "football") %>%
filter(frameId == 1) %>%
select(x)
YardsToGo <- GeneralPlayData %>%
select(yardsToGo)
FirstDownLine <- BallX + YardsToGo
# Generating Function Output
throw_vis_data <- list("Offense_throw" = Offense_throw, "Defense_throw" = Defense_throw,
"Ball_throw" = Ball_throw, "BallX" = BallX, "FirstDownLine" = FirstDownLine,
"target_player_location" = target_player_location)
return(throw_vis_data)
}
#
## Adds Shell ID Visualization
prep_shell_vis <- function(TrackingPlayData){
dat1 <- TrackingPlayData
#flip x,y,direction, orientation
dat1 <- dat1 %>%
mutate(x = case_when(playDirection == "right" ~ x ,
playDirection == "left" ~ 120 - x)) %>%
mutate(y = case_when(playDirection == "right" ~ y ,
playDirection == "left" ~ 53.33 - y)) %>%
mutate(o = case_when(playDirection == "right" ~ o,
playDirection == "left" ~ mod(o+180, 360))) %>%
mutate(dir = case_when(playDirection == "right" ~ dir,
playDirection == "left" ~ mod(dir+180, 360)))
##ball location
#y
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(BallSnapY = y[match('Football ball_snap', paste(displayName, event))]) %>%
ungroup
#x
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(BallSnapX = x[match('Football ball_snap', paste(displayName, event))]) %>%
ungroup
###Deleting problematic plays/frames
#week 2
dat1 <- dat1 %>%
filter(!(gameId == 2018091605 & playId == 2715))
#week 3
dat1 <- dat1 %>%
filter(!(gameId == 2018092301))
#week 4
dat1 <- dat1 %>%
filter(!(gameId == 2018093011))
##week 14
#fix week 14 issue where there are two different frames with ball_snap
dat1$event[dat1$gameId == 2018120905 & dat1$playId == 1426 & dat1$event == 'ball_snap' & dat1$frameId == 12] = 'None'
#two frame 92s in week 14
dat1 <- dat1 %>%
filter(!(gameId == 2018120905 & playId == 1426))
#week 15
dat1 <- dat1 %>%
filter(!(gameId == 2018121605))
#week 16
dat1 <- dat1 %>%
filter(!(gameId == 2018123001 & playId == 435))
dat1 <- dat1 %>%
filter(!(gameId == 2018123006))
dat1 <- dat1 %>%
filter(!(gameId == 2018123000 & playId == 131))
#snap frame
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(SnapFrame = frameId[match('ball_snap', event)])
#is it the snap frame?
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(isSnapFrame = case_when(frameId==SnapFrame ~ 1,
TRUE ~0))
#frames since the snap
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(FramesSinceSnap = frameId - SnapFrame)
##side of player relative to spot where ball is snapped from (changes continuously)
dat1 <- dat1 %>%
mutate(SideDuring = case_when(BallSnapY < y ~ "Left",
BallSnapY > y ~ "Right",
BallSnapY == y ~ "OTB"))
#side at the snap (should be SideDuring for that player when the ball was snapped)
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(SideSnap = case_when(event == 'ball_snap' & y >= BallSnapY ~ 'Left',
event == 'ball_snap' & y < BallSnapY ~ 'Right',
event == 'ball_snap' & y == BallSnapY ~ 'Center')) %>%
mutate(SideSnap = replace(SideSnap, event != 'ball_snap', SideSnap[event == 'ball_snap'])) %>%
ungroup
##eliminate plays in opposing redzone
dat1 <- filter(dat1, BallSnapX < 90)
#needs to be 90 to account for left endzone
##location of each player at snap
#Y
dat1 <- dat1 %>%
group_by(displayName, gameId, playId) %>%
mutate(SnapY = y[event == "ball_snap"]) %>%
ungroup
#X
dat1 <- dat1 %>%
group_by(displayName, gameId, playId) %>%
mutate(SnapX = x[event == "ball_snap"]) %>%
ungroup
#ID correct QB
dat1 <- dat1 %>%
group_by(gameId , playId) %>%
mutate(ball_snap_y = y[match('Football ball_snap' , paste(displayName, event))]) %>%
mutate(ydist2ball_snap = abs(SnapY - BallSnapY)) %>%
mutate( isBackfieldQB = case_when(position == "QB" & ydist2ball_snap < 2 ~ 1,
TRUE ~ 0)) %>%
mutate(ball_snap_y = NULL) %>% mutate(dist2ball_snap = NULL)
#backfield count
dat1 <- dat1 %>%
group_by(gameId, playId, frameId) %>%
mutate(BackfieldQBCount = sum(isBackfieldQB))
dattest <- filter(dat1, BackfieldQBCount == 0 & FramesSinceSnap==0)
#eliminate no QB under center
dat1 <- filter(dat1, BackfieldQBCount != 0)
#eliminate 2 QBs under center
dat1 <- filter(dat1, BackfieldQBCount != 2)
##is player on offense or defense
#create qb team
dat1 <- dat1 %>%
group_by(gameId, playId, frameId) %>%
mutate(qb_team = team[match(1, isBackfieldQB)]) %>%
ungroup
#is player on QB's team?
dat1 <- dat1 %>%
mutate(OffDef = case_when(team == qb_team ~ "Offense",
team != qb_team ~ "Defense")) %>%
mutate(OffDef = case_when(displayName != "Football" ~ OffDef,
displayName == "Football" ~ "Football"))
##delete plays where QB isn't directly behind center
#create QB team
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(QBTeam = case_when(isBackfieldQB == 1 ~ "QB",
TRUE ~ OffDef))
##number receivers on each side
dat1 <- dat1 %>%
group_by(gameId, playId, QBTeam) %>%
mutate(Num = local({
fsnap <- isSnapFrame == 1
y <- y[fsnap]
left <- SideSnap[fsnap] == "Left"
right <- !left
x <- integer(length(y))
names(x) <- displayName[fsnap]
x[left] <- rank(-y[left], ties.method = "min")
x[right] <- rank(y[right], ties.method = "min")
unname(x[displayName])
}))
###location of each player at various frames
# delete plays less than 1.5 seconds
#dat1 <- dat1 %>%
#group_by(gameId, playId) %>% mutate(MaxFrame = max(frameId)) %>%
#filter(MaxFrame >= 26)
# get rid of plays where pass happens before frame 26
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(ThrowFrame = frameId[match('pass_forward', event)]) %>%
mutate(ShovelThrowFrame = frameId[match('pass_shovel', event)]) %>%
filter(ThrowFrame >= 26 | is.na(ThrowFrame) | ShovelThrowFrame >= 26)
# get rid of plays where theres not 15 frames after snap
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(MaxFramesSinceSnap = max(FramesSinceSnap)) %>%
filter(MaxFramesSinceSnap >= 15)
##half-second into the play
#Y
dat1 <- dat1 %>%
group_by(displayName, gameId, playId) %>%
mutate(HalfSecondY = y[FramesSinceSnap == 5]) %>%
ungroup
#X
dat1 <- dat1 %>%
group_by(displayName, gameId, playId) %>%
mutate(HalfSecond = x[FramesSinceSnap == 5]) %>%
ungroup
##8/10s of second into the play
#Y
dat1 <- dat1 %>%
group_by(displayName, gameId, playId) %>%
mutate(EightTenthsY = y[FramesSinceSnap == 8]) %>%
ungroup
#X
dat1 <- dat1 %>%
group_by(displayName, gameId, playId) %>%
mutate(EightTenthsX = x[FramesSinceSnap == 8]) %>%
ungroup
##1.5 seconds into the play
#Y
dat1 <- dat1 %>%
group_by(displayName, gameId, playId) %>%
mutate(SecondAndHalfY = y[FramesSinceSnap == 15]) %>%
ungroup
#X
dat1 <- dat1 %>%
group_by(displayName, gameId, playId) %>%
mutate(SecondAndHalfX = x[FramesSinceSnap == 15]) %>%
ungroup
#new db position name
dat1 <- dat1 %>%
mutate(DBPos = case_when(position == "DB" |position == "CB"| position == "FS" | position == "SS" |position == "S" ~ "DB",
position == "MLB" |position == "LB" | position == "OLB" |position == "ILB"~"LB",
TRUE ~ position))
##L and R
dat1 <- dat1%>%
mutate(LR = case_when(SideSnap == "Left" ~ "L",
SideSnap == "Right" ~ "R",
SideSnap == "OTB" ~ "C"))
##paste
dat1$ReceiverNumber <- paste(dat1$LR, dat1$Num, sep="")
##paste again
dat1$ReceiverNumber <- paste(dat1$ReceiverNumber, dat1$QBTeam, sep="")
#unique playid
dat1 <- dat1 %>%
mutate(uniqueplay = (gameId*1000)+ playId)
###location of each player by number
##Left
#L1
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(L1OffY = y[match('L1Offense ball_snap', paste(ReceiverNumber, event))]) %>%
ungroup
#L2
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(L2OffY = y[match('L2Offense ball_snap', paste(ReceiverNumber, event))]) %>%
ungroup
#L3
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(L3OffY = y[match('L3Offense ball_snap', paste(ReceiverNumber, event))]) %>%
ungroup
#L4
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(L4OffY = y[match('L4Offense ball_snap', paste(ReceiverNumber, event))]) %>%
ungroup
##Right
#R1
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(R1OffY = y[match('R1Offense ball_snap', paste(ReceiverNumber, event))]) %>%
ungroup
#R2
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(R2OffY = y[match('R2Offense ball_snap', paste(ReceiverNumber, event))]) %>%
ungroup
#R3
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(R3OffY = y[match('R3Offense ball_snap', paste(ReceiverNumber, event))]) %>%
ungroup
#R4
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(R4OffY = y[match('R4Offense ball_snap', paste(ReceiverNumber, event))]) %>%
ungroup
## creating column
#L1 - L2
dat1 <- dat1 %>%
mutate(L1L2Diff = L1OffY - L2OffY)
#R1 - R2 (opposite--has to be R2-R1)
dat1 <- dat1 %>%
mutate(R1R2Diff = R2OffY - R1OffY)
##create bounds of field
#left side
dat1 <- dat1 %>%
mutate(LeftSideline = 53.33)
#right side
dat1 <- dat1 %>%
mutate(RightSideline = 0)
##drop obs where there's no L1 or R1
dat1 <- dat1[!is.na(dat1$L1OffY), ]
dat1 <- dat1[!is.na(dat1$R1OffY), ]
##Create column lines
#left
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(LeftColumnLine = case_when(is.na(L1L2Diff) ~ (L1OffY + BallSnapY + 4) * .5,
L1L2Diff > 3 ~ L1OffY - 1.5,
L1L2Diff <=3 ~ (L1OffY-L2OffY)*.75 + L2OffY))
#right
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(RightColumnLine = case_when(is.na(R1R2Diff) ~ (R1OffY + BallSnapY - 4) * .5,
R1R2Diff > 3 ~ R1OffY + 1.5,
R1R2Diff <=3 ~ (R2OffY-R1OffY)*.25 + R1OffY))
##account for tight splits
#create indicator
#left
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(TightSplitLeft = case_when(L1OffY <= BallSnapY + 6 ~ 1,
TRUE ~ 0))
#right
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(TightSplitRight = case_when(R1OffY >= BallSnapY - 6 ~ 1,
TRUE ~ 0))
##change column line accordingly
#left
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(LeftColumnLine = case_when(TightSplitLeft==1 & L1L2Diff >=1 ~ L1OffY - .5,
TRUE ~ LeftColumnLine))
#right
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(RightColumnLine = case_when(TightSplitRight==1 & R1R2Diff >=1 ~ R1OffY + .5,
TRUE ~ RightColumnLine))
##create column indicator
#left
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(LeftColumn = case_when(SnapY >= LeftColumnLine & SnapY < LeftSideline & OffDef=="Defense" ~ 1,
TRUE ~ 0))
#right
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(RightColumn = case_when(SnapY <= RightColumnLine & SnapY > RightSideline & OffDef=="Defense" ~ 1,
TRUE ~ 0))
##column count(all positions)
#left
dat1 <- dat1 %>%
group_by(gameId, playId, frameId) %>%
mutate(LeftColumnCount = sum(LeftColumn))
#right
dat1 <- dat1 %>%
group_by(gameId, playId, frameId) %>%
mutate(RightColumnCount = sum(RightColumn))
##account for when column player is a little more than a yard inside number 1
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(LeftColumnLine = case_when(LeftColumnCount==0 & TightSplitLeft == 0 & L1L2Diff > 4 ~ L1OffY - 2.5,
TRUE ~ LeftColumnLine))
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(RightColumnLine = case_when(RightColumnCount==0 & TightSplitRight == 0 & R1R2Diff > 4 ~ R1OffY + 2.5,
TRUE ~ RightColumnLine))
##redo column indicator
#left
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(LeftColumn = case_when(SnapY >= LeftColumnLine & SnapY < LeftSideline & OffDef=="Defense" ~ 1,
TRUE ~ LeftColumn))
#right
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(RightColumn = case_when(SnapY <= RightColumnLine & SnapY > RightSideline & OffDef=="Defense" ~ 1,
TRUE ~ RightColumn))
###update column
##eliminate blitzers
#left
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(LeftColumn = case_when(SecondAndHalfX < BallSnapX ~ 0,
TRUE ~ LeftColumn))
#right
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(RightColumn = case_when(SecondAndHalfX < BallSnapX ~ 0,
TRUE ~RightColumn))
##Eliminate DL in column
#left
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(LeftColumn = case_when(LeftColumn == 1 & position == 'DL' ~ 0,
TRUE ~ LeftColumn))
#right
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(RightColumn = case_when(RightColumn == 1 & position == 'DL' ~ 0,
TRUE ~ RightColumn))
##if there is a tight split, eliminate LBs
#left
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(LeftColumn = case_when(L1OffY <= BallSnapY + 6 & DBPos =="LB" & LeftColumnCount > 1~ 0,
TRUE ~ LeftColumn))
#right
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(RightColumn = case_when(R1OffY >= BallSnapY - 6 & DBPos =="LB" & RightColumnCount > 1 ~ 0,
TRUE ~ RightColumn))
#redo column count
#left
dat1 <- dat1 %>%
group_by(gameId, playId, frameId) %>%
mutate(LeftColumnCount = sum(LeftColumn))
#right
dat1 <- dat1 %>%
group_by(gameId, playId, frameId) %>%
mutate(RightColumnCount = sum(RightColumn))
##count number of DBs in column (just DBs)
#left
dat1 <- dat1 %>%
group_by(gameId, playId, frameId) %>%
mutate(LeftColumnDBCount = sum(DBPos=="DB" & LeftColumn==1))
#right
dat1 <- dat1 %>%
group_by(gameId, playId, frameId) %>%
mutate(RightColumnDBCount = sum(DBPos=="DB" & RightColumn==1))
## alley
#L2 - L3
dat1 <- dat1 %>%
mutate(L2L3Diff = L2OffY - L3OffY)
#R2 - R3 (opposite--has to be R3-R2)
dat1 <- dat1 %>%
mutate(R2R3Diff = R3OffY - R2OffY)
##is #2 inside tackle?
#left
dat1 <- dat1 %>%
mutate(L2OffInsideOT = case_when(is.na(L2OffY) ~ 0,
L2OffY < (BallSnapY + 4) ~ 1,
L2OffY >= (BallSnapY + 4) ~ 0))
#right
dat1 <- dat1 %>%
mutate(R2OffInsideOT = case_when(is.na(R2OffY) ~ 0,
R2OffY < (BallSnapY - 4) ~ 1,
R2OffY >= (BallSnapY - 4) ~ 0))
#what number is the QB
dat1 <- dat1 %>%
mutate(ReceiverNumber = case_when(ReceiverNumber == "L2Offense" & isBackfieldQB == 1 ~ "QB",
ReceiverNumber == "L3Offense" & isBackfieldQB == 1 ~ "QB",
ReceiverNumber == "L4Offense" & isBackfieldQB == 1 ~ "QB",
ReceiverNumber == "L5Offense" & isBackfieldQB == 1 ~ "QB",
ReceiverNumber == "L6Offense" & isBackfieldQB == 1 ~ "QB",
ReceiverNumber == "R2Offense" & isBackfieldQB == 1 ~ "QB",
ReceiverNumber == "R3Offense" & isBackfieldQB == 1 ~ "QB",
ReceiverNumber == "R4Offense" & isBackfieldQB == 1 ~ "QB",
ReceiverNumber == "R5Offense" & isBackfieldQB == 1 ~ "QB",
ReceiverNumber == "R6Offense" & isBackfieldQB == 1 ~ "QB",
TRUE ~ ReceiverNumber))
#Receiver count by side
dat1$LeftReceiverCount <- rowSums(!is.na(dat1[c('L1OffY', 'L2OffY', 'L3OffY', "L4OffY")]))
dat1$RightReceiverCount <- rowSums(!is.na(dat1[c('R1OffY', 'R2OffY', 'R3OffY', "R4OffY")]))
##ID players inside tight end
#Left
#L1 inside tight?
dat1 <- dat1 %>%
mutate(L1InsideTE = case_when(L1OffY <= (BallSnapY + 4) ~ 1,
L1OffY > (BallSnapY + 4) ~ 0))
#L2 inside tight?
dat1 <- dat1 %>%
mutate(L2InsideTE = case_when(is.na(L2OffY) ~ 0,
L2OffY <= (BallSnapY + 4) ~ 1,
L2OffY > (BallSnapY + 4) ~ 0))
#L3 inside tight?
dat1 <- dat1 %>%
mutate(L3InsideTE = case_when(is.na(L2OffY) ~ 0,
is.na(L3OffY) ~ 0,
L3OffY <= (BallSnapY + 4) ~ 1,
L3OffY > (BallSnapY + 4) ~ 0))
#L4 inside tight?
dat1 <- dat1 %>%
mutate(L4InsideTE = case_when(is.na(L2OffY) ~ 0,
is.na(L3OffY) ~ 0,
is.na(L4OffY) ~ 0,
L4OffY <= (BallSnapY + 4) ~ 1,
L4OffY > (BallSnapY + 4) ~ 0))
#Right
#R1 inside tight?
dat1 <- dat1 %>%
mutate(R1InsideTE = case_when(R1OffY >= (BallSnapY - 4) ~ 1,
R1OffY < (BallSnapY - 4) ~ 0))
#R2 inside tight?
dat1 <- dat1 %>%
mutate(R2InsideTE = case_when(is.na(R2OffY) ~ 0,
R2OffY >= (BallSnapY - 4) ~ 1,
R2OffY < (BallSnapY - 4) ~ 0))
#R3 inside tight?
dat1 <- dat1 %>%
mutate(R3InsideTE = case_when(is.na(R2OffY) ~ 0,
is.na(R3OffY) ~ 0,
R3OffY >= (BallSnapY - 4) ~ 1,
R3OffY < (BallSnapY - 4) ~ 0))
#R4 inside tight?
dat1 <- dat1 %>%
mutate(R4InsideTE = case_when(is.na(R2OffY) ~ 0,
is.na(R3OffY) ~ 0,
is.na(R4OffY) ~ 0,
R4OffY >= (BallSnapY - 4) ~ 1,
R4OffY < (BallSnapY - 4) ~ 0))
#drop plays where number 1 is inside tackle
dat1 <- filter(dat1, L1InsideTE != 1)
dat1 <- filter(dat1, R1InsideTE != 1)
###subtract count if inside tackle
#Left
#L4
dat1 <- dat1 %>%
mutate(LeftReceiverCountOutsideTackle = LeftReceiverCount - L4InsideTE)
#L3
dat1$LeftReceiverCountOutsideTackle <- dat1$LeftReceiverCountOutsideTackle - dat1$L3InsideTE
#L2
dat1$LeftReceiverCountOutsideTackle <- dat1$LeftReceiverCountOutsideTackle - dat1$L2InsideTE
#L1
dat1$LeftReceiverCountOutsideTackle <- dat1$LeftReceiverCountOutsideTackle - dat1$L1InsideTE
#Right
#R4
dat1 <- dat1 %>%
mutate(RightReceiverCountOutsideTackle = RightReceiverCount - R4InsideTE)
#R3
dat1$RightReceiverCountOutsideTackle <- dat1$RightReceiverCountOutsideTackle - dat1$R3InsideTE
#R2
dat1$RightReceiverCountOutsideTackle <- dat1$RightReceiverCountOutsideTackle - dat1$R2InsideTE
#R1
dat1$RightReceiverCountOutsideTackle <- dat1$RightReceiverCountOutsideTackle - dat1$R1InsideTE
#Create indicator if first player outside is outside 5 yards from ball laterally
#left
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(LeftAlleyID = case_when(LeftReceiverCountOutsideTackle == 1 & L1OffY > (BallSnapY + 5) ~ 'L1Plus5',
LeftReceiverCountOutsideTackle == 1 & L1OffY <= (BallSnapY + 5) ~ 'L1Minus5',
LeftReceiverCountOutsideTackle == 2 & L2OffY > (BallSnapY + 5) ~ 'L2Plus5',
LeftReceiverCountOutsideTackle == 2 & L2OffY <= (BallSnapY + 5) ~ 'L2Minus5',
LeftReceiverCountOutsideTackle == 3 & L3OffY > (BallSnapY + 5) ~ 'L3Plus5',
LeftReceiverCountOutsideTackle == 3 & L3OffY <= (BallSnapY + 5) ~ 'L3Minus5',
LeftReceiverCountOutsideTackle == 4 & L3OffY > (BallSnapY + 5) ~ 'L4Plus5',
LeftReceiverCountOutsideTackle == 4 & L3OffY <= (BallSnapY + 5) ~ 'L4Minus5'))
#right #NOTE--"plus 5" means receiver is more than five yards from the ball
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(RightAlleyID = case_when(RightReceiverCountOutsideTackle == 1 & R1OffY < (BallSnapY - 5) ~ 'R1Plus5',
RightReceiverCountOutsideTackle == 1 & R1OffY >= (BallSnapY - 5) ~ 'R1Minus5',
RightReceiverCountOutsideTackle == 2 & R2OffY < (BallSnapY - 5) ~ 'R2Plus5',
RightReceiverCountOutsideTackle == 2 & R2OffY >= (BallSnapY - 5) ~ 'R2Minus5',
RightReceiverCountOutsideTackle == 3 & R3OffY < (BallSnapY - 5) ~ 'R3Plus5',
RightReceiverCountOutsideTackle == 3 & R3OffY >= (BallSnapY - 5) ~ 'R3Minus5',
RightReceiverCountOutsideTackle == 4 & R3OffY < (BallSnapY - 5) ~ 'R4Plus5',
RightReceiverCountOutsideTackle == 4 & R3OffY >= (BallSnapY - 5) ~ 'R4Minus5'))
##Create alley lines
#left
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(LeftAlleyLine = case_when(LeftAlleyID == 'L1Plus5' ~ BallSnapY + 5,
LeftAlleyID == 'L1Minus5' ~ (L1OffY + BallSnapY + 4)* .5,
LeftAlleyID == 'L2Plus5' ~ BallSnapY + 5,
LeftAlleyID == 'L2Minus5' ~ (L2OffY + (BallSnapY + 4))*.5,
LeftAlleyID == 'L3Plus5' ~ BallSnapY + 5,
LeftAlleyID == 'L3Minus5' ~ (L3OffY + (BallSnapY + 4))*.5,
LeftAlleyID == 'L4Plus5' ~ BallSnapY + 5,
LeftAlleyID == 'L4Minus5' ~ (L4OffY + (BallSnapY + 4))*.5))
#right
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(RightAlleyLine = case_when(RightAlleyID == 'R1Plus5' ~ BallSnapY - 5,
RightAlleyID == 'R1Minus5' ~ (R1OffY + (BallSnapY - 4))* .5,
RightAlleyID == 'R2Plus5' ~ BallSnapY - 5,
RightAlleyID == 'R2Minus5' ~ (R2OffY + (BallSnapY - 4))*.5,
RightAlleyID == 'R3Plus5' ~ BallSnapY - 5,
RightAlleyID == 'R3Minus5' ~ (R3OffY + (BallSnapY - 4))*.5,
RightAlleyID == 'R4Plus5' ~ BallSnapY - 5,
RightAlleyID == 'R4Minus5' ~ (R4OffY + (BallSnapY - 4))*.5))
#replace obs where alley line is wider than L1 or L2
#left
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(LeftAlleyLine = case_when(LeftAlleyLine > LeftColumnLine ~ LeftColumnLine,
TRUE ~ LeftAlleyLine))
#right
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(RightAlleyLine = case_when(RightAlleyLine < RightColumnLine ~ RightColumnLine,
TRUE ~ RightAlleyLine))
#create alley indicator
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(LeftAlley = case_when(SnapY < LeftColumnLine & SnapY >= LeftAlleyLine & OffDef=="Defense" ~ 1,
TRUE ~ 0))
#right
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(RightAlley = case_when(SnapY > RightColumnLine & SnapY <= RightAlleyLine & OffDef == "Defense" ~ 1,
TRUE ~ 0))
###identify chute
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(Chute = case_when(SnapY < LeftColumnLine & SnapY > RightColumnLine & OffDef == "Defense" ~ 1,
TRUE ~ 0))
#indicate highest DB in column
#left
dat1 <- dat1 %>%
arrange(gameId, playId, frameId, LeftColumn != 1, DBPos != 'DB', desc(SnapX)) %>%
group_by(gameId, playId, frameId) %>%
mutate(LeftColumnHighest = c(1, rep(0, n() - 1)))
#right
dat1 <- dat1 %>%
arrange(gameId, playId, frameId, RightColumn != 1, DBPos != 'DB', desc(SnapX)) %>%
group_by(gameId, playId, frameId) %>%
mutate(RightColumnHighest = c(1, rep(0, n() - 1)))
##include safeties in the column
#left
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(Chute = case_when(LeftColumnDBCount==2 & LeftColumnHighest ==1 & SnapX > BallSnapX + 7 ~ 1,
TRUE ~ Chute))
#right
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(Chute = case_when(RightColumnDBCount==2 & RightColumnHighest ==1 & SnapX > BallSnapX + 7 ~ 1,
TRUE ~ Chute))
#name these players "columnchutes"
#left
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(LeftColumnChute = case_when(LeftColumn == 1 & Chute == 1 ~ 1,
TRUE ~ 0))
#right
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(RightColumnChute = case_when(RightColumn==1 & Chute ==1 ~ 1,
TRUE ~ 0))
#####defining the window
## define two highest DBs
dat1 <- dat1 %>%
arrange(gameId, playId, frameId, Chute != 1,DBPos != 'DB', desc(SnapX)) %>%
group_by(gameId, playId, frameId) %>%
mutate(Highest = c('A','B', rep('-', n() - 2)))
#new columns for higher DB
#Y
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(HighDBY = EightTenthsY[match('A', Highest)]) %>%
ungroup
#X
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(HighDBX = EightTenthsX[match('A', Highest)]) %>%
ungroup
#new column for lower DB
#Y
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(LowDBY = EightTenthsY[match('B', Highest)]) %>%
ungroup
#X
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(LowDBX = EightTenthsX[match('B', Highest)]) %>%
ungroup
#
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(Highest = case_when(Highest =="A" & LowDBX > HighDBX ~ "B",
Highest =="B" & LowDBX > HighDBX ~ "A",
TRUE ~ Highest))
## account for observations where the safeties flipped depth
#Y
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(HighDBY = EightTenthsY[match('A', Highest)]) %>%
ungroup
#X
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(HighDBX = EightTenthsX[match('A', Highest)]) %>%
ungroup
#new column for lower DB
#Y
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(LowDBY = EightTenthsY[match('B', Highest)]) %>%
ungroup
#X
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(LowDBX = EightTenthsX[match('B', Highest)]) %>%
ungroup
#create channel safety (middle safety on the play?)
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(ChannelSafety = case_when(HighDBY < LeftAlleyLine & HighDBY > RightAlleyLine ~ 1,
TRUE ~ 0))
##determine if column-safeties or high corners
#left
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(LeftColumnSafety = case_when(LeftColumnChute == 1 & ChannelSafety == 1 ~ 0,
LeftColumnChute == 1 & Highest=="A" & ChannelSafety == 0 ~ 1,
LeftColumnChute == 1 & Highest=="B" & ChannelSafety == 0 ~ 1))
#right
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(RightColumnSafety = case_when(RightColumnChute == 1 & ChannelSafety == 1 ~ 0,
RightColumnChute == 1 & Highest=="A" & ChannelSafety == 0 ~ 1,
RightColumnChute == 1 & Highest=="B" & ChannelSafety == 0 ~ 1))
##eliminate these column-safeties from columns
#left
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(LeftColumn = case_when(LeftColumnSafety == 1 ~ 0,
TRUE ~ LeftColumn))
#Right
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(RightColumn = case_when(RightColumnSafety == 1 ~ 0,
TRUE ~ RightColumn))
#remove columnchute corners from the chute
#left
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(Chute = case_when(LeftColumnChute ==1 & LeftColumnSafety == 0 ~ 0,
TRUE ~ Chute))
#right
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(Chute = case_when(RightColumnChute==1 & RightColumnSafety == 0 ~ 0,
TRUE ~ Chute))
#redo highest player ID
##define two highest DBs
dat1 <- dat1 %>%
arrange(gameId, playId, frameId, Chute != 1,DBPos != 'DB', desc(SnapX)) %>%
group_by(gameId, playId, frameId) %>%
mutate(Highest = c('A','B', rep('-', n() - 2)))
#create new columns for higher DB
#Y
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(HighDBY = EightTenthsY[match('A', Highest)]) %>%
ungroup
#X
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(HighDBX = EightTenthsX[match('A', Highest)]) %>%
ungroup
#new column for lower DB
#Y
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(LowDBY = EightTenthsY[match('B', Highest)]) %>%
ungroup
#X
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(LowDBX = EightTenthsX[match('B', Highest)]) %>%
ungroup
#count column again
#left
dat1 <- dat1 %>%
group_by(gameId, playId, frameId) %>%
mutate(LeftColumnCount = sum(LeftColumn))
#right
dat1 <- dat1 %>%
group_by(gameId, playId, frameId) %>%
mutate(RightColumnCount = sum(RightColumn))
dat2leftcolumn <- filter(dat1, LeftColumnCount==2)
dat2rightcolumn <- filter(dat1, RightColumnCount==2)
### if still 2 in column, make the column defender the widest player
##create the indicator
#left
dat1 <- dat1 %>%
arrange(gameId, playId, frameId, LeftColumn != 1, desc(SnapY)) %>%
group_by(gameId, playId, frameId) %>%
mutate(LeftColumnWidest = c(1, rep(0, n() - 1)))
#right
dat1 <- dat1 %>%
arrange(gameId, playId, frameId, RightColumn != 1, SnapY) %>%
group_by(gameId, playId, frameId) %>%
mutate(RightColumnWidest = c(1, rep(0, n() - 1)))
## rename the inner player conflict defender (column label removed in next step)
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(LeftAlley = case_when(LeftColumn==1 & LeftColumnCount ==2 & LeftColumnWidest==0 ~ 1,
TRUE ~ LeftAlley))
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(RightAlley = case_when(RightColumn==1 & RightColumnCount ==2 & RightColumnWidest==0 ~ 1,
TRUE ~ RightAlley))
##in remaining observations with two players in column, make the widest DB the column player--set all others to 0
#left
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(LeftColumn = case_when(LeftColumnCount==2 & LeftColumnWidest==1 ~ 1,
LeftColumnCount==2 & LeftColumnWidest==0 ~ 0,
TRUE ~ LeftColumn))
#right
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(RightColumn = case_when(RightColumnCount==2 & RightColumnWidest==1 ~ 1,
RightColumnCount==2 & RightColumnWidest==0 ~ 0,
TRUE ~ RightColumn))
##count again
#left
dat1 <- dat1 %>%
group_by(gameId, playId, frameId) %>%
mutate(LeftAlleyCount = sum(LeftAlley))
#right
dat1 <- dat1 %>%
group_by(gameId, playId, frameId) %>%
mutate(RightAlleyCount = sum(RightAlley))
datleftalley <- filter(dat1, LeftAlley == 1)
datrightalley <- filter(dat1, RightAlley == 1)
#count column again
#left
dat1 <- dat1 %>%
group_by(gameId, playId, frameId) %>%
mutate(LeftColumnCount = sum(LeftColumn))
#right
dat1 <- dat1 %>%
group_by(gameId, playId, frameId) %>%
mutate(RightColumnCount = sum(RightColumn))
#delete three in column
dat1 <- filter(dat1, RightColumnCount != 3)
dat1 <- filter(dat1, LeftColumnCount != 3)
#delete none in column
dat1 <- filter(dat1, RightColumnCount != 0)
dat1 <- filter(dat1, LeftColumnCount != 0)
#midpoint of safety x values
dat1$SafetyPointX <- dat1$LowDBX
dat1$SafetyPointY <- dat1$HighDBY
###Creating the contours
##create line segments
#low DB to sideline
dat1$LowDBtoRefPointSegment <- sqrt((dat1$LowDBX - dat1$SafetyPointX)^2 + (dat1$LowDBY - dat1$SafetyPointY)^2)
#high DB to sideline
dat1$HighDBtoRefPointSegment <- sqrt((dat1$HighDBX - dat1$SafetyPointX)^2 + (dat1$HighDBY - dat1$SafetyPointY)^2)
#low DB to high DB
dat1$LowDBHighDBSegment <- sqrt((dat1$HighDBX - dat1$LowDBX)^2 + (dat1$HighDBY - dat1$LowDBY)^2)
##determine the angle
dat1 <- dat1 %>%
mutate(Sine = HighDBtoRefPointSegment/LowDBHighDBSegment) %>%
mutate(SafetyAngle = asin(Sine))
# convert to degrees
dat1$SafetyAngle <- (dat1$SafetyAngle*180)/pi
#above 7 yards indicator
dat1 <- dat1 %>%
mutate(HighSafetyDepth = case_when(HighDBX >= BallSnapX + 7 ~ 'HighSafeHigh',
HighDBX < BallSnapX + 7 ~ 'HighSafeLow'))
dat1 <- dat1 %>%
mutate(LowSafetyDepth = case_when(LowDBX >= BallSnapX + 7 ~ 'LowSafeHigh',
LowDBX < BallSnapX + 7 ~ 'LowSafeLow'))
dat1 <- dat1 %>%
mutate(SafetyOver7Count = case_when(HighSafetyDepth == 'HighSafeHigh' & LowSafetyDepth == 'LowSafeHigh' ~ 2,
HighSafetyDepth == 'HighSafeHigh' & LowSafetyDepth == 'LowSafeLow'~ 1,
HighSafetyDepth == 'HighSafeLow' & LowSafetyDepth == 'LowSafeLow' ~ 0))
##safeties in window
#difference on each side
#ID rushers and eliminate from alley
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(PassRusher = case_when(SecondAndHalfX < BallSnapX & OffDef == "Defense" ~ "PassRusher",
SecondAndHalfX >= BallSnapX & OffDef == "Defense" ~ "Coverage",
TRUE ~ "Offense"))
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(LeftAlley = case_when(LeftAlley == 1 & PassRusher == "PassRusher" ~ 0,
TRUE ~ LeftAlley))
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(RightAlley = case_when(RightAlley == 1 & PassRusher == "PassRusher" ~ 0,
TRUE ~ RightAlley))
##left difference
#defense left alley count
dat1 <- dat1 %>%
group_by(gameId, playId, frameId) %>%
mutate(LeftAlleyCount = sum(LeftAlley))
#defense left column count
dat1 <- dat1 %>%
group_by(gameId, playId, frameId) %>%
mutate(NewLeftColumnCount = sum(SnapY >= LeftColumnLine & OffDef == "Defense" & PassRusher=="Coverage"))
#sum defense left column and alley (alley tube + column tube = 'shield')
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(LeftShield = LeftAlleyCount + NewLeftColumnCount)
#offense
#offense left column count
dat1 <- dat1 %>%
group_by(gameId, playId, frameId) %>%
mutate(LeftOffColumnCount = sum(SnapY >= LeftColumnLine & OffDef == "Offense"))
#offense left alley count
dat1 <- dat1 %>%
group_by(gameId, playId, frameId) %>%
mutate(LeftOffAlleyCount = sum(SnapY < LeftColumnLine & SnapY >= LeftAlleyLine & OffDef == "Offense"))
#sum offense left shield
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(LeftOffShield = LeftOffColumnCount + LeftOffAlleyCount)
#left defense minus offensive players outside tackle
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(LeftDBDiff = LeftShield - LeftOffShield)
##right diff
#defense
#defense right alley count
dat1 <- dat1 %>%
group_by(gameId, playId, frameId) %>%
mutate(RightAlleyCount = sum(RightAlley))
#defense right column count
dat1 <- dat1 %>%
group_by(gameId, playId, frameId) %>%
mutate(NewRightColumnCount = sum(SnapY <= RightColumnLine & OffDef == "Defense" & PassRusher =="Coverage"))
#sum right column and alley
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(RightShield = RightAlleyCount + NewRightColumnCount)
#offense
#offense right column count
dat1 <- dat1 %>%
group_by(gameId, playId, frameId) %>%
mutate(RightOffColumnCount = sum(SnapY <= RightColumnLine & OffDef == "Offense"))
#offense right alley count
dat1 <- dat1 %>%
group_by(gameId, playId, frameId) %>%
mutate(RightOffAlleyCount = sum(SnapY > RightColumnLine & SnapY <= RightAlleyLine & OffDef == "Offense"))
#sum offense right shield
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(RightOffShield = RightOffColumnCount + RightOffAlleyCount)
#left defense minus offensive players outside tackle
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(RightDBDiff = RightShield - RightOffShield)
#label diffs
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(LeftDiffLabel = case_when(LeftDBDiff < 1 ~ "OneHighIndicator",
LeftDBDiff >= 1 ~ "TwoHighIndicator"))
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(RightDiffLabel = case_when(RightDBDiff < 1 ~ "OneHighIndicator",
RightDBDiff >= 1 ~ "TwoHighIndicator"))
#indicator
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(DiffLabel = case_when(LeftDiffLabel == "OneHighIndicator" & RightDiffLabel == "OneHighIndicator" ~ "One",
LeftDiffLabel == "OneHighIndicator" & RightDiffLabel == "TwoHighIndicator" ~ "Mix",
LeftDiffLabel == "TwoHighIndicator" & RightDiffLabel == "OneHighIndicator" ~ "Mix",
LeftDiffLabel == "TwoHighIndicator" & RightDiffLabel == "TwoHighIndicator" ~ "Two"))
#add channel into the diff indicator
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(DiffLabel = case_when(ChannelSafety == 1 & DiffLabel=="Mix" ~ "MixOne",
ChannelSafety == 0 & DiffLabel=="Mix" ~ "MixTwo",
TRUE ~ DiffLabel))
###using the safety angle, depth, and player difference to label the window
dat1 <- dat1 %>%
mutate(Window = case_when(SafetyOver7Count == 2 & DiffLabel == "Two" & SafetyAngle <= 30 ~ 2,
SafetyOver7Count == 2 & DiffLabel == "Two" & SafetyAngle > 30 ~ 1,
SafetyOver7Count == 2 & DiffLabel == "One" & SafetyAngle <= 20 ~ 2,
SafetyOver7Count == 2 & DiffLabel == "One" & SafetyAngle > 20 ~ 1,
SafetyOver7Count == 2 & DiffLabel == "MixOne" & SafetyAngle <= 22.5 ~ 2,
SafetyOver7Count == 2 & DiffLabel == "MixOne" & SafetyAngle > 22.5 ~ 1,
SafetyOver7Count == 2 & DiffLabel == "MixTwo" & SafetyAngle <= 27.5 ~ 2,
SafetyOver7Count == 2 & DiffLabel == "MixTwo" & SafetyAngle > 27.5 ~ 1,
SafetyOver7Count == 1 ~ 1,
SafetyOver7Count == 0 ~ 0))
dat1 <- dat1 %>%
mutate(Window = case_when(SafetyOver7Count == 2 & LowDBX >= 12 & DiffLabel == "Two" & SafetyAngle <= 35 ~ 2,
SafetyOver7Count == 2 & LowDBX >= 12 & DiffLabel == "Two" & SafetyAngle > 35 ~ 1,
SafetyOver7Count == 2 & LowDBX >= 12 & DiffLabel == "One" & SafetyAngle <= 25 ~ 2,
SafetyOver7Count == 2 & LowDBX >= 12 & DiffLabel == "One" & SafetyAngle > 25 ~ 1,
SafetyOver7Count == 2 & LowDBX >= 12 & DiffLabel == "MixOne" & SafetyAngle <= 22.5 ~ 2,
SafetyOver7Count == 2 & LowDBX >= 12 & DiffLabel == "MixOne" & SafetyAngle > 22.5 ~ 1,
SafetyOver7Count == 2 & LowDBX >= 12 & DiffLabel == "MixTwo" & SafetyAngle <= 27.5 ~ 2,
SafetyOver7Count == 2 & LowDBX >= 12 & DiffLabel == "MixTwo" & SafetyAngle > 27.5 ~ 1,
TRUE ~ Window))
## How many coverage defenders?
dat1 <- dat1 %>%
group_by(gameId, playId, frameId) %>%
mutate(CoverageDefenders = sum(SecondAndHalfX > BallSnapX & OffDef == 'Defense'))
## Creating the shell
## corner depth (depth of column defenders)
#Left
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(LeftCornerDepth = SnapX[match(1, LeftColumn)]) %>%
ungroup()
#Right
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(RightCornerDepth = SnapX[match(1, RightColumn)]) %>%
ungroup()
##identify the shell
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(Shell = case_when(Window == 0 ~ '0',
Window == 1 ~ '3',
Window == 2 & LeftCornerDepth < BallSnapX + 6 & RightCornerDepth < BallSnapX + 6 ~ '2',
Window == 2 & LeftCornerDepth >= BallSnapX + 6 & RightCornerDepth < BallSnapX + 6 ~ '6',
Window == 2 & LeftCornerDepth < BallSnapX + 6 & RightCornerDepth >= BallSnapX + 6 ~ '6',
Window == 2 & LeftCornerDepth >= BallSnapX + 6 & RightCornerDepth >= BallSnapX + 6 ~ '4'))
##identify positions redo
dat1 <- dat1 %>%
mutate(GamePosition = case_when(LeftColumn == 1 ~ "Corner",
RightColumn == 1 ~ "Corner",
Highest == "A" ~ "Safety",
Highest == "B" & Window == 2 ~ "Safety",
Highest == "B" & Window == 1 ~ "SafetyTBD",
Highest == "B" & Window == 0 ~ "Safety",
LeftAlley == 1 & Highest == "-" ~ "Conflict",
RightAlley == 1 & Highest == "-" ~ "Conflict",
LeftAlleyLine > SnapY & SnapY > RightAlleyLine & Highest == "-" ~ "Adjacent"))
# Determine SafetyTBD
dat1 <- dat1 %>%
mutate(GamePosition = case_when(GamePosition == "SafetyTBD" & LeftAlley==1 ~ "SafetyConflict",
GamePosition == "SafetyTBD" & RightAlley==1 ~ "SafetyConflict",
GamePosition == "SafetyTBD" & LeftAlleyLine > SnapY & SnapY > RightAlleyLine ~ "SafetyAdjacent",
TRUE ~ GamePosition))
# Remove offense and pass rushers
dat1 <- dat1 %>%
mutate(GamePosition = case_when(OffDef=="Offense" ~ "Offense",
PassRusher=="PassRusher" ~ "PassRusher",
TRUE ~ GamePosition))
#delete obsolete columns
dat1 <- dat1 %>%
select(-time, -SideDuring, -qb_team, -QBTeam, -uniqueplay, -L1OffY, -L2OffY, -L3OffY, -L4OffY,
-R1OffY, -R2OffY, -R3OffY, -R4OffY, -L1L2Diff, -R1R2Diff, -LeftSideline, -RightSideline,
-TightSplitLeft, -TightSplitRight, -LeftColumnDBCount, -RightColumnDBCount, -L2L3Diff,
-R2R3Diff, -L2OffInsideOT, -R2OffInsideOT, -LeftReceiverCount, -RightReceiverCount,
-L1InsideTE, -L2InsideTE, -L3InsideTE, -L4InsideTE, -R1InsideTE, -R2InsideTE, -R3InsideTE,
-R4InsideTE, -LeftReceiverCountOutsideTackle, -RightReceiverCountOutsideTackle, -LeftAlleyID,
-RightAlleyID, -LeftColumnHighest, -RightColumnHighest, -LeftColumnChute, -RightColumnChute,
-LeftColumnSafety, -RightColumnSafety, -LeftColumnWidest, -RightColumnWidest,
-NewLeftColumnCount, -LeftShield, -LeftOffColumnCount, -LeftOffAlleyCount, -LeftOffShield,
-LeftDBDiff, -NewRightColumnCount, -RightShield, -RightOffColumnCount, -RightOffAlleyCount,
-RightOffShield, -RightDBDiff)
return(dat1)
}
#
## Adding Play Data
AddsPlayData <- function(Offense, Defense, Ball, BallX, FirstDownLine) { # IMPORTANT: Names must be exact for this to work. Using prep_vis_data.r will ensure this.
list( geom_point( data = Offense , mapping = aes(x = x, y = y) , shape = 1 , size = 1.5 , color = "orange") ,
geom_point( data = Defense , mapping = aes(x = x, y = y) , shape = 4 , size = 1.5 , color = "purple") ,
geom_point( data = Ball , mapping = aes(x = x, y = y) , shape = 21 , fill = "brown" , alpha = 1.0 , size = 2) ,
geom_segment(aes(x = as.numeric(BallX) , y = 0, xend = as.numeric(BallX) , yend = 53.33) , color = "blue" , alpha = 1 , size = 1) ,
geom_segment(aes(x = as.numeric(FirstDownLine) , y = 0, xend = as.numeric(FirstDownLine) , yend = 53.33) , color = "yellow" , alpha = 1 , size = 1) )
}
#
## Adding Throw Data
AddsThrowData <- function(Offense_throw, Defense_throw, Ball_throw, BallX, FirstDownLine, target_player_location) {
list( geom_point( data = Offense_throw , mapping = aes(x = x, y = y) , shape = 1 , size = 3 , color = "orange") ,
geom_point( data = Defense_throw , mapping = aes(x = x, y = y) , shape = 4 , size = 3 , color = "purple") ,
geom_point( data = Ball_throw , mapping = aes(x = x, y = y) , shape = 21 , fill = "brown" , alpha = 1.0 , size = 4) ,
geom_segment(aes(x = as.numeric(BallX) , y = 0, xend = as.numeric(BallX) , yend = 53.33) , color = "blue" , alpha = 1 , size = 1) ,
geom_segment(aes(x = as.numeric(FirstDownLine) , y = 0, xend = as.numeric(FirstDownLine) , yend = 53.33) , color = "yellow" , alpha = 1 , size = 1) ,
geom_circle(aes(x0 = target_player_location$x, y0 = target_player_location$y , r = 10) , fill = "red" , alpha = 0.15) ,
geom_circle(aes(x0 = target_player_location$x, y0 = target_player_location$y , r = 5) , fill = "red" , alpha = 0.25) )
}
#
## Adding Shell ID Markers
AddShellMarkers <- function(shell_features) {
list( geom_segment(aes(x = shell_features$BallSnapX , y = shell_features$LeftColumnLine ,
xend =120 , yend = shell_features$LeftColumnLine) , alpha = 0.5 , color = "black") ,
geom_segment(aes(x = shell_features$BallSnapX , y = shell_features$RightColumnLine ,
xend =120 , yend = shell_features$RightColumnLine) , alpha = 0.5 , color = "black") ,
geom_segment(aes(x = shell_features$BallSnapX , y = shell_features$LeftAlleyLine ,
xend =120 , yend = shell_features$LeftAlleyLine) , alpha = 0.5 , color = "black") ,
geom_segment(aes(x = shell_features$BallSnapX , y = shell_features$RightAlleyLine ,
xend =120 , yend = shell_features$RightAlleyLine) , alpha = 0.5 , color = "black") ,
annotate("label", x = 60 , y = -5 , size = 2 , label = paste(
"Window:" , shell_features$Window[1] ,
", Safety Angle:" , round(shell_features$SafetyAngle[1],0),
", Shell:", shell_features$Shell,
", Coverage Defenders:", shell_features$CoverageDefenders))
)
}
#
# Code Based in Part On (https://www.kaggle.com/tombliss/additional-data-coverage-schemes-for-week-1?select=targetedReceiver.csv)
xmin <- 10
xmax <- 110
ymin <- 0
ymax <- 160 / 3
hash.right <- 5
hash.left <- 5
hash.width <- 1
df.hash <- expand.grid(x = c(11:109 - 0.12) , y = c(0+1, 23.36667-0.25, 29.96667-0.20, 160/3-1.6))
AddFieldLines <- function() {
NFLlogo <- readPNG("Data/nfl-logo.png")
NFLlogo <- rasterGrob(NFLlogo, interpolate=TRUE)
list( geom_segment(aes(x = 15, y = 0, xend = 15, yend = 53.33) , color = "white" , alpha = 1 , size = 0.3) , # 5 Yard Line
geom_segment(aes(x = 20, y = 0, xend = 20, yend = 53.33) , color = "white" , alpha = 1 , size = 0.4) , # 10 Yard Line
geom_segment(aes(x = 25, y = 0, xend = 25, yend = 53.33) , color = "white" , alpha = 1 , size = 0.3) , # 15 Yard Line
geom_segment(aes(x = 30, y = 0, xend = 30, yend = 53.33) , color = "white" , alpha = 1 , size = 0.4) , # 20 Yard Line
geom_segment(aes(x = 35, y = 0, xend = 35, yend = 53.33) , color = "white" , alpha = 1 , size = 0.3) , # 25 Yard Line
geom_segment(aes(x = 40, y = 0, xend = 40, yend = 53.33) , color = "white" , alpha = 1 , size = 0.4) , # 30 Yard Line
geom_segment(aes(x = 45, y = 0, xend = 45, yend = 53.33) , color = "white" , alpha = 1 , size = 0.3) , # 35 Yard Line
geom_segment(aes(x = 50, y = 0, xend = 50, yend = 53.33) , color = "white" , alpha = 1 , size = 0.4) , # 40 Yard Line
geom_segment(aes(x = 55, y = 0, xend = 55, yend = 53.33) , color = "white" , alpha = 1 , size = 0.3) , # 45 Yard Line
geom_segment(aes(x = 60, y = 0, xend = 60, yend = 53.33) , color = "white" , alpha = 1 , size = 0.5) , # 50 Yard Line
geom_segment(aes(x = 65, y = 0, xend = 65, yend = 53.33) , color = "white" , alpha = 1 , size = 0.3) , # 45 Yard Line
geom_segment(aes(x = 70, y = 0, xend = 70, yend = 53.33) , color = "white" , alpha = 1 , size = 0.4) , # 40 Yard Line
geom_segment(aes(x = 75, y = 0, xend = 75, yend = 53.33) , color = "white" , alpha = 1 , size = 0.3) , # 35 Yard Line
geom_segment(aes(x = 80, y = 0, xend = 80, yend = 53.33) , color = "white" , alpha = 1 , size = 0.4) , # 30 Yard Line
geom_segment(aes(x = 85, y = 0, xend = 85, yend = 53.33) , color = "white" , alpha = 1 , size = 0.3) , # 25 Yard Line
geom_segment(aes(x = 90, y = 0, xend = 90, yend = 53.33) , color = "white" , alpha = 1 , size = 0.4) , # 20 Yard Line
geom_segment(aes(x = 95, y = 0, xend = 95, yend = 53.33) , color = "white" , alpha = 1 , size = 0.3) , # 15 Yard Line
geom_segment(aes(x = 100, y = 0, xend = 100, yend = 53.33) , color = "white" , alpha = 1 , size = 0.4) , # 10 Yard Line
geom_segment(aes(x = 105, y = 0, xend = 105, yend = 53.33) , color = "white" , alpha = 1 , size = 0.3) , # 5 Yard Line
xlim(-10, 130) ,
ylim(-10, 63.3333) ,
annotate("text", x = df.hash$x[df.hash$x < 20/2], color = "white" , size = 2 ,
y = df.hash$y[df.hash$x < 20/2], label = "|", hjust = 0, vjust = -0) ,
annotate("text", x = df.hash$x[df.hash$x > 20/2], color = "white" , size = 2 ,
y = df.hash$y[df.hash$x > 20/2], label = "|", hjust = 0, vjust = -0) ,
annotation_custom(NFLlogo, xmin=56.9, xmax=62.9, ymin=24, ymax=29.5 ) )
}
AddFieldColor <- function(x) {
list( geom_rect(mapping=aes(xmin=0, xmax=120, ymin=0, ymax=53.33) , color = "black" , fill = "#00614c", alpha=0.7) , # Making Field Green
geom_rect(mapping=aes(xmin=0, xmax=10, ymin=0, ymax=53.33) , color = "black" , fill = "#2F3BE4" , alpha = 0.4) , # Left End Zone
geom_rect(mapping=aes(xmin=110, xmax=120, ymin=0, ymax=53.33) , color = "black" , fill = "#2F3BE4" , alpha = 0.4) ) # Right End Zone
}
AddYardNumbers <- function(x) {
list(annotate(geom = "text", x = 20, y = 5, label = "1 0", color = "white"),
annotate(geom = "text", x = 30, y = 5, label = "2 0", color = "white"),
annotate(geom = "text", x = 40, y = 5, label = "3 0", color = "white"),
annotate(geom = "text", x = 50, y = 5, label = "4 0", color = "white"),
annotate(geom = "text", x = 60, y = 5, label = "5 0", color = "white"),
annotate(geom = "text", x = 70, y = 5, label = "4 0", color = "white"),
annotate(geom = "text", x = 80, y = 5, label = "3 0", color = "white"),
annotate(geom = "text", x = 90, y = 5, label = "2 0", color = "white"),
annotate(geom = "text", x = 100, y = 5, label = "1 0", color = "white"))
}
AddUpsideDownYardNumbers <- function(x) {
list(annotate(geom = "text", x = 20, y = 48.33, label = "1 0", color = "white", angle = 180),
annotate(geom = "text", x = 30, y = 48.33, label = "2 0", color = "white", angle = 180),
annotate(geom = "text", x = 40, y = 48.33, label = "3 0", color = "white", angle = 180),
annotate(geom = "text", x = 50, y = 48.33, label = "4 0", color = "white", angle = 180),
annotate(geom = "text", x = 60, y = 48.33, label = "5 0", color = "white", angle = 180),
annotate(geom = "text", x = 70, y = 48.33, label = "4 0", color = "white", angle = 180),
annotate(geom = "text", x = 80, y = 48.33, label = "3 0", color = "white", angle = 180),
annotate(geom = "text", x = 90, y = 48.33, label = "2 0", color = "white", angle = 180),
annotate(geom = "text", x = 100, y = 48.33, label = "1 0", color = "white", angle = 180))
}
AddEndzones <- function(x) {
list(annotate(geom = "text", x = 5, y = 27, label = "OFFENSE", color = "white", angle = 90),
annotate(geom = "text", x = 115, y = 27, label = "DEFENSE", color = "white", angle = 270))
}
#
## Clear Figure Background
ClearFigBackground <- function(){
list( theme_bw() ,
theme(panel.border = element_blank()) ,
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank()) )
}
#
## Add a Circle (https://stackoverflow.com/questions/6862742/draw-a-circle-with-ggplot2)
createCircle <- function(center = c(0,0), diameter = 1, npoints = 100){
r = diameter / 2
tt <- seq(0,2*pi,length.out = npoints)
xx <- center[1] + r * cos(tt)
yy <- center[2] + r * sin(tt)
return(data.frame(x = xx, y = yy))
}
| /Analysis/Functions/visualization_functions.r | no_license | rbernhardt12/Big-Data-Bowl-20-Public | R | false | false | 60,379 | r | ### NFL Big Data Bowl 2020-21
# Robert Bernhardt, Andrew Rogan, Daniel Weiss
# November 2020
## User-Defined Functions for Data Visualization
# Plot Play-Specific Data
## Data Visualization Preparation Source Function
# Required Inputs: TrackingPlayData , input_game , input_play
# Outputs: Offense , Defense , Ball , BallX , FirstDownLine
prep_vis_data <- function(GeneralPlayData, TrackingPlayData) {
TrackingPlayData <- TrackingPlayData %>%
mutate( x = case_when( playDirection == "right" ~ x ,
playDirection == "left" ~ 120 - x)) %>%
mutate( y = case_when( playDirection == "right" ~ y ,
playDirection == "left" ~ 160/3 - y)) %>%
mutate( dir = case_when( playDirection == "right" ~ dir ,
playDirection == "left" ~ mod(dir+180,360))) %>%
mutate( o = case_when( playDirection == "right" ~ o ,
playDirection == "left" ~ mod(o+180,360)))
qb_team <- TrackingPlayData %>%
filter(position == "QB") %>%
filter(frameId == 1) %>%
select(team)
Offense <- TrackingPlayData %>%
filter(team == as.character(qb_team))
Defense <- TrackingPlayData %>%
filter(team != as.character(qb_team)) %>%
filter(team != "football")
Ball <- TrackingPlayData %>%
filter(team == "football")
BallX <- Ball %>%
filter(frameId == 1) %>%
select(x)
YardsToGo <- GeneralPlayData %>%
select(yardsToGo)
FirstDownLine <- BallX + YardsToGo
vis_data <- list("Offense" = Offense, "Defense" = Defense, "Ball" = Ball, "BallX" = BallX, "FirstDownLine" = FirstDownLine)
return(vis_data)
}
#
## Throw-Based Preparation Source Function
# Required Inputs: input_data , input_game , input_play
# Outputs: Offense_throw , Defense_throw , Ball_throw , BallX , FirstDownLine
# Identifying Players , Play Information
prep_throw_vis_data <- function(GeneralPlayData , TrackingPlayData , PlayerTargeted) {
TrackingPlayData <- TrackingPlayData %>%
mutate( x = case_when( playDirection == "right" ~ x ,
playDirection == "left" ~ 120 - x)) %>%
mutate( y = case_when( playDirection == "right" ~ y ,
playDirection == "left" ~ 160/3 - y)) %>%
mutate( dir = case_when( playDirection == "right" ~ dir ,
playDirection == "left" ~ mod(dir+180,360))) %>%
mutate( o = case_when( playDirection == "right" ~ o ,
playDirection == "left" ~ mod(o+180,360)))
qb_team <- TrackingPlayData %>%
filter(position == "QB") %>%
filter(frameId == 1) %>%
select(team)
# Identifying Critical Moment
if (sum(TrackingPlayData$event == "pass_forward") > 0) {
critical_moment <- "pass_forward"
} else if (sum(TrackingPlayData$event == "pass_shovel") > 0) {
critical_moment <- "pass_shovel"
} else if (sum(TrackingPlayData$event == "qb_spike") > 0) {
critical_moment <- "qb_spike"
} else if (sum(TrackingPlayData$event == "qb_sack") > 0) {
critical_moment <- "qb_sack"
} else if (sum(TrackingPlayData$event == "qb_strip_sack") > 0) {
critical_moment <- "qb_strip_sack"
} else if (sum(TrackingPlayData$event == "tackle") > 0) {
critical_moment <- "tackle"
}
critical_frame <- TrackingPlayData %>%
filter(event == critical_moment )
# Identifying Targeted Player
target_player_location <- critical_frame %>%
filter(nflId == PlayerTargeted$targetNflId) %>%
select(x , y , s , a , o , dis , nflId , displayName)
if (is.na(PlayerTargeted$targetNflId) | nrow(target_player_location) == 0) {
ball_arrive <- critical_frame %>%
filter(displayName == "Football")
target_player <- critical_frame %>%
mutate(ballDistance = sqrt( (as.numeric(critical_frame$x) - as.numeric(ball_arrive$x))^2 + (as.numeric(critical_frame$y) - as.numeric(ball_arrive$y))^2 )) %>%
filter(displayName != "Football") %>%
filter(team == qb_team$team) %>%
filter(ballDistance == min(ballDistance)) %>%
select(nflId , displayName)
target_player_location <- critical_frame %>%
filter(nflId == target_player$nflId) %>%
select(x , y , s , a , o , dis , nflId , displayName)
}
# Preparing Offense/Defense/Ball Data at Critical Moment
Offense_throw <- TrackingPlayData %>%
filter(team == as.character(qb_team)) %>%
filter(event == critical_moment)
Defense_throw <- TrackingPlayData %>%
filter(team != as.character(qb_team)) %>%
filter(team != "football") %>%
filter(event == critical_moment)
Ball_throw <- TrackingPlayData %>%
filter(team == "football") %>%
filter(event == critical_moment)
BallX <- TrackingPlayData %>%
filter(team == "football") %>%
filter(frameId == 1) %>%
select(x)
YardsToGo <- GeneralPlayData %>%
select(yardsToGo)
FirstDownLine <- BallX + YardsToGo
# Generating Function Output
throw_vis_data <- list("Offense_throw" = Offense_throw, "Defense_throw" = Defense_throw,
"Ball_throw" = Ball_throw, "BallX" = BallX, "FirstDownLine" = FirstDownLine,
"target_player_location" = target_player_location)
return(throw_vis_data)
}
#
## Adds Shell ID Visualization
prep_shell_vis <- function(TrackingPlayData){
dat1 <- TrackingPlayData
#flip x,y,direction, orientation
dat1 <- dat1 %>%
mutate(x = case_when(playDirection == "right" ~ x ,
playDirection == "left" ~ 120 - x)) %>%
mutate(y = case_when(playDirection == "right" ~ y ,
playDirection == "left" ~ 53.33 - y)) %>%
mutate(o = case_when(playDirection == "right" ~ o,
playDirection == "left" ~ mod(o+180, 360))) %>%
mutate(dir = case_when(playDirection == "right" ~ dir,
playDirection == "left" ~ mod(dir+180, 360)))
##ball location
#y
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(BallSnapY = y[match('Football ball_snap', paste(displayName, event))]) %>%
ungroup
#x
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(BallSnapX = x[match('Football ball_snap', paste(displayName, event))]) %>%
ungroup
###Deleting problematic plays/frames
#week 2
dat1 <- dat1 %>%
filter(!(gameId == 2018091605 & playId == 2715))
#week 3
dat1 <- dat1 %>%
filter(!(gameId == 2018092301))
#week 4
dat1 <- dat1 %>%
filter(!(gameId == 2018093011))
##week 14
#fix week 14 issue where there are two different frames with ball_snap
dat1$event[dat1$gameId == 2018120905 & dat1$playId == 1426 & dat1$event == 'ball_snap' & dat1$frameId == 12] = 'None'
#two frame 92s in week 14
dat1 <- dat1 %>%
filter(!(gameId == 2018120905 & playId == 1426))
#week 15
dat1 <- dat1 %>%
filter(!(gameId == 2018121605))
#week 16
dat1 <- dat1 %>%
filter(!(gameId == 2018123001 & playId == 435))
dat1 <- dat1 %>%
filter(!(gameId == 2018123006))
dat1 <- dat1 %>%
filter(!(gameId == 2018123000 & playId == 131))
#snap frame
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(SnapFrame = frameId[match('ball_snap', event)])
#is it the snap frame?
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(isSnapFrame = case_when(frameId==SnapFrame ~ 1,
TRUE ~0))
#frames since the snap
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(FramesSinceSnap = frameId - SnapFrame)
##side of player relative to spot where ball is snapped from (changes continuously)
dat1 <- dat1 %>%
mutate(SideDuring = case_when(BallSnapY < y ~ "Left",
BallSnapY > y ~ "Right",
BallSnapY == y ~ "OTB"))
#side at the snap (should be SideDuring for that player when the ball was snapped)
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(SideSnap = case_when(event == 'ball_snap' & y >= BallSnapY ~ 'Left',
event == 'ball_snap' & y < BallSnapY ~ 'Right',
event == 'ball_snap' & y == BallSnapY ~ 'Center')) %>%
mutate(SideSnap = replace(SideSnap, event != 'ball_snap', SideSnap[event == 'ball_snap'])) %>%
ungroup
##eliminate plays in opposing redzone
dat1 <- filter(dat1, BallSnapX < 90)
#needs to be 90 to account for left endzone
##location of each player at snap
#Y
dat1 <- dat1 %>%
group_by(displayName, gameId, playId) %>%
mutate(SnapY = y[event == "ball_snap"]) %>%
ungroup
#X
dat1 <- dat1 %>%
group_by(displayName, gameId, playId) %>%
mutate(SnapX = x[event == "ball_snap"]) %>%
ungroup
#ID correct QB
dat1 <- dat1 %>%
group_by(gameId , playId) %>%
mutate(ball_snap_y = y[match('Football ball_snap' , paste(displayName, event))]) %>%
mutate(ydist2ball_snap = abs(SnapY - BallSnapY)) %>%
mutate( isBackfieldQB = case_when(position == "QB" & ydist2ball_snap < 2 ~ 1,
TRUE ~ 0)) %>%
mutate(ball_snap_y = NULL) %>% mutate(dist2ball_snap = NULL)
#backfield count
dat1 <- dat1 %>%
group_by(gameId, playId, frameId) %>%
mutate(BackfieldQBCount = sum(isBackfieldQB))
dattest <- filter(dat1, BackfieldQBCount == 0 & FramesSinceSnap==0)
#eliminate no QB under center
dat1 <- filter(dat1, BackfieldQBCount != 0)
#eliminate 2 QBs under center
dat1 <- filter(dat1, BackfieldQBCount != 2)
##is player on offense or defense
#create qb team
dat1 <- dat1 %>%
group_by(gameId, playId, frameId) %>%
mutate(qb_team = team[match(1, isBackfieldQB)]) %>%
ungroup
#is player on QB's team?
dat1 <- dat1 %>%
mutate(OffDef = case_when(team == qb_team ~ "Offense",
team != qb_team ~ "Defense")) %>%
mutate(OffDef = case_when(displayName != "Football" ~ OffDef,
displayName == "Football" ~ "Football"))
##delete plays where QB isn't directly behind center
#create QB team
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(QBTeam = case_when(isBackfieldQB == 1 ~ "QB",
TRUE ~ OffDef))
##number receivers on each side
dat1 <- dat1 %>%
group_by(gameId, playId, QBTeam) %>%
mutate(Num = local({
fsnap <- isSnapFrame == 1
y <- y[fsnap]
left <- SideSnap[fsnap] == "Left"
right <- !left
x <- integer(length(y))
names(x) <- displayName[fsnap]
x[left] <- rank(-y[left], ties.method = "min")
x[right] <- rank(y[right], ties.method = "min")
unname(x[displayName])
}))
###location of each player at various frames
# delete plays less than 1.5 seconds
#dat1 <- dat1 %>%
#group_by(gameId, playId) %>% mutate(MaxFrame = max(frameId)) %>%
#filter(MaxFrame >= 26)
# get rid of plays where pass happens before frame 26
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(ThrowFrame = frameId[match('pass_forward', event)]) %>%
mutate(ShovelThrowFrame = frameId[match('pass_shovel', event)]) %>%
filter(ThrowFrame >= 26 | is.na(ThrowFrame) | ShovelThrowFrame >= 26)
# get rid of plays where theres not 15 frames after snap
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(MaxFramesSinceSnap = max(FramesSinceSnap)) %>%
filter(MaxFramesSinceSnap >= 15)
##half-second into the play
#Y
dat1 <- dat1 %>%
group_by(displayName, gameId, playId) %>%
mutate(HalfSecondY = y[FramesSinceSnap == 5]) %>%
ungroup
#X
dat1 <- dat1 %>%
group_by(displayName, gameId, playId) %>%
mutate(HalfSecond = x[FramesSinceSnap == 5]) %>%
ungroup
##8/10s of second into the play
#Y
dat1 <- dat1 %>%
group_by(displayName, gameId, playId) %>%
mutate(EightTenthsY = y[FramesSinceSnap == 8]) %>%
ungroup
#X
dat1 <- dat1 %>%
group_by(displayName, gameId, playId) %>%
mutate(EightTenthsX = x[FramesSinceSnap == 8]) %>%
ungroup
##1.5 seconds into the play
#Y
dat1 <- dat1 %>%
group_by(displayName, gameId, playId) %>%
mutate(SecondAndHalfY = y[FramesSinceSnap == 15]) %>%
ungroup
#X
dat1 <- dat1 %>%
group_by(displayName, gameId, playId) %>%
mutate(SecondAndHalfX = x[FramesSinceSnap == 15]) %>%
ungroup
#new db position name
dat1 <- dat1 %>%
mutate(DBPos = case_when(position == "DB" |position == "CB"| position == "FS" | position == "SS" |position == "S" ~ "DB",
position == "MLB" |position == "LB" | position == "OLB" |position == "ILB"~"LB",
TRUE ~ position))
##L and R
dat1 <- dat1%>%
mutate(LR = case_when(SideSnap == "Left" ~ "L",
SideSnap == "Right" ~ "R",
SideSnap == "OTB" ~ "C"))
##paste
dat1$ReceiverNumber <- paste(dat1$LR, dat1$Num, sep="")
##paste again
dat1$ReceiverNumber <- paste(dat1$ReceiverNumber, dat1$QBTeam, sep="")
#unique playid
dat1 <- dat1 %>%
mutate(uniqueplay = (gameId*1000)+ playId)
###location of each player by number
##Left
#L1
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(L1OffY = y[match('L1Offense ball_snap', paste(ReceiverNumber, event))]) %>%
ungroup
#L2
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(L2OffY = y[match('L2Offense ball_snap', paste(ReceiverNumber, event))]) %>%
ungroup
#L3
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(L3OffY = y[match('L3Offense ball_snap', paste(ReceiverNumber, event))]) %>%
ungroup
#L4
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(L4OffY = y[match('L4Offense ball_snap', paste(ReceiverNumber, event))]) %>%
ungroup
##Right
#R1
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(R1OffY = y[match('R1Offense ball_snap', paste(ReceiverNumber, event))]) %>%
ungroup
#R2
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(R2OffY = y[match('R2Offense ball_snap', paste(ReceiverNumber, event))]) %>%
ungroup
#R3
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(R3OffY = y[match('R3Offense ball_snap', paste(ReceiverNumber, event))]) %>%
ungroup
#R4
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(R4OffY = y[match('R4Offense ball_snap', paste(ReceiverNumber, event))]) %>%
ungroup
## creating column
#L1 - L2
dat1 <- dat1 %>%
mutate(L1L2Diff = L1OffY - L2OffY)
#R1 - R2 (opposite--has to be R2-R1)
dat1 <- dat1 %>%
mutate(R1R2Diff = R2OffY - R1OffY)
##create bounds of field
#left side
dat1 <- dat1 %>%
mutate(LeftSideline = 53.33)
#right side
dat1 <- dat1 %>%
mutate(RightSideline = 0)
##drop obs where there's no L1 or R1
dat1 <- dat1[!is.na(dat1$L1OffY), ]
dat1 <- dat1[!is.na(dat1$R1OffY), ]
##Create column lines
#left
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(LeftColumnLine = case_when(is.na(L1L2Diff) ~ (L1OffY + BallSnapY + 4) * .5,
L1L2Diff > 3 ~ L1OffY - 1.5,
L1L2Diff <=3 ~ (L1OffY-L2OffY)*.75 + L2OffY))
#right
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(RightColumnLine = case_when(is.na(R1R2Diff) ~ (R1OffY + BallSnapY - 4) * .5,
R1R2Diff > 3 ~ R1OffY + 1.5,
R1R2Diff <=3 ~ (R2OffY-R1OffY)*.25 + R1OffY))
##account for tight splits
#create indicator
#left
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(TightSplitLeft = case_when(L1OffY <= BallSnapY + 6 ~ 1,
TRUE ~ 0))
#right
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(TightSplitRight = case_when(R1OffY >= BallSnapY - 6 ~ 1,
TRUE ~ 0))
##change column line accordingly
#left
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(LeftColumnLine = case_when(TightSplitLeft==1 & L1L2Diff >=1 ~ L1OffY - .5,
TRUE ~ LeftColumnLine))
#right
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(RightColumnLine = case_when(TightSplitRight==1 & R1R2Diff >=1 ~ R1OffY + .5,
TRUE ~ RightColumnLine))
##create column indicator
#left
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(LeftColumn = case_when(SnapY >= LeftColumnLine & SnapY < LeftSideline & OffDef=="Defense" ~ 1,
TRUE ~ 0))
#right
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(RightColumn = case_when(SnapY <= RightColumnLine & SnapY > RightSideline & OffDef=="Defense" ~ 1,
TRUE ~ 0))
##column count(all positions)
#left
dat1 <- dat1 %>%
group_by(gameId, playId, frameId) %>%
mutate(LeftColumnCount = sum(LeftColumn))
#right
dat1 <- dat1 %>%
group_by(gameId, playId, frameId) %>%
mutate(RightColumnCount = sum(RightColumn))
##account for when column player is a little more than a yard inside number 1
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(LeftColumnLine = case_when(LeftColumnCount==0 & TightSplitLeft == 0 & L1L2Diff > 4 ~ L1OffY - 2.5,
TRUE ~ LeftColumnLine))
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(RightColumnLine = case_when(RightColumnCount==0 & TightSplitRight == 0 & R1R2Diff > 4 ~ R1OffY + 2.5,
TRUE ~ RightColumnLine))
##redo column indicator
#left
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(LeftColumn = case_when(SnapY >= LeftColumnLine & SnapY < LeftSideline & OffDef=="Defense" ~ 1,
TRUE ~ LeftColumn))
#right
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(RightColumn = case_when(SnapY <= RightColumnLine & SnapY > RightSideline & OffDef=="Defense" ~ 1,
TRUE ~ RightColumn))
###update column
##eliminate blitzers
#left
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(LeftColumn = case_when(SecondAndHalfX < BallSnapX ~ 0,
TRUE ~ LeftColumn))
#right
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(RightColumn = case_when(SecondAndHalfX < BallSnapX ~ 0,
TRUE ~RightColumn))
##Eliminate DL in column
#left
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(LeftColumn = case_when(LeftColumn == 1 & position == 'DL' ~ 0,
TRUE ~ LeftColumn))
#right
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(RightColumn = case_when(RightColumn == 1 & position == 'DL' ~ 0,
TRUE ~ RightColumn))
##if there is a tight split, eliminate LBs
#left
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(LeftColumn = case_when(L1OffY <= BallSnapY + 6 & DBPos =="LB" & LeftColumnCount > 1~ 0,
TRUE ~ LeftColumn))
#right
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(RightColumn = case_when(R1OffY >= BallSnapY - 6 & DBPos =="LB" & RightColumnCount > 1 ~ 0,
TRUE ~ RightColumn))
#redo column count
#left
dat1 <- dat1 %>%
group_by(gameId, playId, frameId) %>%
mutate(LeftColumnCount = sum(LeftColumn))
#right
dat1 <- dat1 %>%
group_by(gameId, playId, frameId) %>%
mutate(RightColumnCount = sum(RightColumn))
##count number of DBs in column (just DBs)
#left
dat1 <- dat1 %>%
group_by(gameId, playId, frameId) %>%
mutate(LeftColumnDBCount = sum(DBPos=="DB" & LeftColumn==1))
#right
dat1 <- dat1 %>%
group_by(gameId, playId, frameId) %>%
mutate(RightColumnDBCount = sum(DBPos=="DB" & RightColumn==1))
## alley
#L2 - L3
dat1 <- dat1 %>%
mutate(L2L3Diff = L2OffY - L3OffY)
#R2 - R3 (opposite--has to be R3-R2)
dat1 <- dat1 %>%
mutate(R2R3Diff = R3OffY - R2OffY)
##is #2 inside tackle?
#left
dat1 <- dat1 %>%
mutate(L2OffInsideOT = case_when(is.na(L2OffY) ~ 0,
L2OffY < (BallSnapY + 4) ~ 1,
L2OffY >= (BallSnapY + 4) ~ 0))
#right
dat1 <- dat1 %>%
mutate(R2OffInsideOT = case_when(is.na(R2OffY) ~ 0,
R2OffY < (BallSnapY - 4) ~ 1,
R2OffY >= (BallSnapY - 4) ~ 0))
#what number is the QB
dat1 <- dat1 %>%
mutate(ReceiverNumber = case_when(ReceiverNumber == "L2Offense" & isBackfieldQB == 1 ~ "QB",
ReceiverNumber == "L3Offense" & isBackfieldQB == 1 ~ "QB",
ReceiverNumber == "L4Offense" & isBackfieldQB == 1 ~ "QB",
ReceiverNumber == "L5Offense" & isBackfieldQB == 1 ~ "QB",
ReceiverNumber == "L6Offense" & isBackfieldQB == 1 ~ "QB",
ReceiverNumber == "R2Offense" & isBackfieldQB == 1 ~ "QB",
ReceiverNumber == "R3Offense" & isBackfieldQB == 1 ~ "QB",
ReceiverNumber == "R4Offense" & isBackfieldQB == 1 ~ "QB",
ReceiverNumber == "R5Offense" & isBackfieldQB == 1 ~ "QB",
ReceiverNumber == "R6Offense" & isBackfieldQB == 1 ~ "QB",
TRUE ~ ReceiverNumber))
#Receiver count by side
dat1$LeftReceiverCount <- rowSums(!is.na(dat1[c('L1OffY', 'L2OffY', 'L3OffY', "L4OffY")]))
dat1$RightReceiverCount <- rowSums(!is.na(dat1[c('R1OffY', 'R2OffY', 'R3OffY', "R4OffY")]))
##ID players inside tight end
#Left
#L1 inside tight?
dat1 <- dat1 %>%
mutate(L1InsideTE = case_when(L1OffY <= (BallSnapY + 4) ~ 1,
L1OffY > (BallSnapY + 4) ~ 0))
#L2 inside tight?
dat1 <- dat1 %>%
mutate(L2InsideTE = case_when(is.na(L2OffY) ~ 0,
L2OffY <= (BallSnapY + 4) ~ 1,
L2OffY > (BallSnapY + 4) ~ 0))
#L3 inside tight?
dat1 <- dat1 %>%
mutate(L3InsideTE = case_when(is.na(L2OffY) ~ 0,
is.na(L3OffY) ~ 0,
L3OffY <= (BallSnapY + 4) ~ 1,
L3OffY > (BallSnapY + 4) ~ 0))
#L4 inside tight?
dat1 <- dat1 %>%
mutate(L4InsideTE = case_when(is.na(L2OffY) ~ 0,
is.na(L3OffY) ~ 0,
is.na(L4OffY) ~ 0,
L4OffY <= (BallSnapY + 4) ~ 1,
L4OffY > (BallSnapY + 4) ~ 0))
#Right
#R1 inside tight?
dat1 <- dat1 %>%
mutate(R1InsideTE = case_when(R1OffY >= (BallSnapY - 4) ~ 1,
R1OffY < (BallSnapY - 4) ~ 0))
#R2 inside tight?
dat1 <- dat1 %>%
mutate(R2InsideTE = case_when(is.na(R2OffY) ~ 0,
R2OffY >= (BallSnapY - 4) ~ 1,
R2OffY < (BallSnapY - 4) ~ 0))
#R3 inside tight?
dat1 <- dat1 %>%
mutate(R3InsideTE = case_when(is.na(R2OffY) ~ 0,
is.na(R3OffY) ~ 0,
R3OffY >= (BallSnapY - 4) ~ 1,
R3OffY < (BallSnapY - 4) ~ 0))
#R4 inside tight?
dat1 <- dat1 %>%
mutate(R4InsideTE = case_when(is.na(R2OffY) ~ 0,
is.na(R3OffY) ~ 0,
is.na(R4OffY) ~ 0,
R4OffY >= (BallSnapY - 4) ~ 1,
R4OffY < (BallSnapY - 4) ~ 0))
#drop plays where number 1 is inside tackle
dat1 <- filter(dat1, L1InsideTE != 1)
dat1 <- filter(dat1, R1InsideTE != 1)
###subtract count if inside tackle
#Left
#L4
dat1 <- dat1 %>%
mutate(LeftReceiverCountOutsideTackle = LeftReceiverCount - L4InsideTE)
#L3
dat1$LeftReceiverCountOutsideTackle <- dat1$LeftReceiverCountOutsideTackle - dat1$L3InsideTE
#L2
dat1$LeftReceiverCountOutsideTackle <- dat1$LeftReceiverCountOutsideTackle - dat1$L2InsideTE
#L1
dat1$LeftReceiverCountOutsideTackle <- dat1$LeftReceiverCountOutsideTackle - dat1$L1InsideTE
#Right
#R4
dat1 <- dat1 %>%
mutate(RightReceiverCountOutsideTackle = RightReceiverCount - R4InsideTE)
#R3
dat1$RightReceiverCountOutsideTackle <- dat1$RightReceiverCountOutsideTackle - dat1$R3InsideTE
#R2
dat1$RightReceiverCountOutsideTackle <- dat1$RightReceiverCountOutsideTackle - dat1$R2InsideTE
#R1
dat1$RightReceiverCountOutsideTackle <- dat1$RightReceiverCountOutsideTackle - dat1$R1InsideTE
#Create indicator if first player outside is outside 5 yards from ball laterally
#left
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(LeftAlleyID = case_when(LeftReceiverCountOutsideTackle == 1 & L1OffY > (BallSnapY + 5) ~ 'L1Plus5',
LeftReceiverCountOutsideTackle == 1 & L1OffY <= (BallSnapY + 5) ~ 'L1Minus5',
LeftReceiverCountOutsideTackle == 2 & L2OffY > (BallSnapY + 5) ~ 'L2Plus5',
LeftReceiverCountOutsideTackle == 2 & L2OffY <= (BallSnapY + 5) ~ 'L2Minus5',
LeftReceiverCountOutsideTackle == 3 & L3OffY > (BallSnapY + 5) ~ 'L3Plus5',
LeftReceiverCountOutsideTackle == 3 & L3OffY <= (BallSnapY + 5) ~ 'L3Minus5',
LeftReceiverCountOutsideTackle == 4 & L3OffY > (BallSnapY + 5) ~ 'L4Plus5',
LeftReceiverCountOutsideTackle == 4 & L3OffY <= (BallSnapY + 5) ~ 'L4Minus5'))
#right #NOTE--"plus 5" means receiver is more than five yards from the ball
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(RightAlleyID = case_when(RightReceiverCountOutsideTackle == 1 & R1OffY < (BallSnapY - 5) ~ 'R1Plus5',
RightReceiverCountOutsideTackle == 1 & R1OffY >= (BallSnapY - 5) ~ 'R1Minus5',
RightReceiverCountOutsideTackle == 2 & R2OffY < (BallSnapY - 5) ~ 'R2Plus5',
RightReceiverCountOutsideTackle == 2 & R2OffY >= (BallSnapY - 5) ~ 'R2Minus5',
RightReceiverCountOutsideTackle == 3 & R3OffY < (BallSnapY - 5) ~ 'R3Plus5',
RightReceiverCountOutsideTackle == 3 & R3OffY >= (BallSnapY - 5) ~ 'R3Minus5',
RightReceiverCountOutsideTackle == 4 & R3OffY < (BallSnapY - 5) ~ 'R4Plus5',
RightReceiverCountOutsideTackle == 4 & R3OffY >= (BallSnapY - 5) ~ 'R4Minus5'))
##Create alley lines
#left
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(LeftAlleyLine = case_when(LeftAlleyID == 'L1Plus5' ~ BallSnapY + 5,
LeftAlleyID == 'L1Minus5' ~ (L1OffY + BallSnapY + 4)* .5,
LeftAlleyID == 'L2Plus5' ~ BallSnapY + 5,
LeftAlleyID == 'L2Minus5' ~ (L2OffY + (BallSnapY + 4))*.5,
LeftAlleyID == 'L3Plus5' ~ BallSnapY + 5,
LeftAlleyID == 'L3Minus5' ~ (L3OffY + (BallSnapY + 4))*.5,
LeftAlleyID == 'L4Plus5' ~ BallSnapY + 5,
LeftAlleyID == 'L4Minus5' ~ (L4OffY + (BallSnapY + 4))*.5))
#right
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(RightAlleyLine = case_when(RightAlleyID == 'R1Plus5' ~ BallSnapY - 5,
RightAlleyID == 'R1Minus5' ~ (R1OffY + (BallSnapY - 4))* .5,
RightAlleyID == 'R2Plus5' ~ BallSnapY - 5,
RightAlleyID == 'R2Minus5' ~ (R2OffY + (BallSnapY - 4))*.5,
RightAlleyID == 'R3Plus5' ~ BallSnapY - 5,
RightAlleyID == 'R3Minus5' ~ (R3OffY + (BallSnapY - 4))*.5,
RightAlleyID == 'R4Plus5' ~ BallSnapY - 5,
RightAlleyID == 'R4Minus5' ~ (R4OffY + (BallSnapY - 4))*.5))
#replace obs where alley line is wider than L1 or L2
#left
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(LeftAlleyLine = case_when(LeftAlleyLine > LeftColumnLine ~ LeftColumnLine,
TRUE ~ LeftAlleyLine))
#right
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(RightAlleyLine = case_when(RightAlleyLine < RightColumnLine ~ RightColumnLine,
TRUE ~ RightAlleyLine))
#create alley indicator
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(LeftAlley = case_when(SnapY < LeftColumnLine & SnapY >= LeftAlleyLine & OffDef=="Defense" ~ 1,
TRUE ~ 0))
#right
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(RightAlley = case_when(SnapY > RightColumnLine & SnapY <= RightAlleyLine & OffDef == "Defense" ~ 1,
TRUE ~ 0))
###identify chute
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(Chute = case_when(SnapY < LeftColumnLine & SnapY > RightColumnLine & OffDef == "Defense" ~ 1,
TRUE ~ 0))
#indicate highest DB in column
#left
dat1 <- dat1 %>%
arrange(gameId, playId, frameId, LeftColumn != 1, DBPos != 'DB', desc(SnapX)) %>%
group_by(gameId, playId, frameId) %>%
mutate(LeftColumnHighest = c(1, rep(0, n() - 1)))
#right
dat1 <- dat1 %>%
arrange(gameId, playId, frameId, RightColumn != 1, DBPos != 'DB', desc(SnapX)) %>%
group_by(gameId, playId, frameId) %>%
mutate(RightColumnHighest = c(1, rep(0, n() - 1)))
##include safeties in the column
#left
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(Chute = case_when(LeftColumnDBCount==2 & LeftColumnHighest ==1 & SnapX > BallSnapX + 7 ~ 1,
TRUE ~ Chute))
#right
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(Chute = case_when(RightColumnDBCount==2 & RightColumnHighest ==1 & SnapX > BallSnapX + 7 ~ 1,
TRUE ~ Chute))
#name these players "columnchutes"
#left
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(LeftColumnChute = case_when(LeftColumn == 1 & Chute == 1 ~ 1,
TRUE ~ 0))
#right
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(RightColumnChute = case_when(RightColumn==1 & Chute ==1 ~ 1,
TRUE ~ 0))
#####defining the window
## define two highest DBs
dat1 <- dat1 %>%
arrange(gameId, playId, frameId, Chute != 1,DBPos != 'DB', desc(SnapX)) %>%
group_by(gameId, playId, frameId) %>%
mutate(Highest = c('A','B', rep('-', n() - 2)))
#new columns for higher DB
#Y
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(HighDBY = EightTenthsY[match('A', Highest)]) %>%
ungroup
#X
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(HighDBX = EightTenthsX[match('A', Highest)]) %>%
ungroup
#new column for lower DB
#Y
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(LowDBY = EightTenthsY[match('B', Highest)]) %>%
ungroup
#X
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(LowDBX = EightTenthsX[match('B', Highest)]) %>%
ungroup
#
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(Highest = case_when(Highest =="A" & LowDBX > HighDBX ~ "B",
Highest =="B" & LowDBX > HighDBX ~ "A",
TRUE ~ Highest))
## account for observations where the safeties flipped depth
#Y
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(HighDBY = EightTenthsY[match('A', Highest)]) %>%
ungroup
#X
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(HighDBX = EightTenthsX[match('A', Highest)]) %>%
ungroup
#new column for lower DB
#Y
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(LowDBY = EightTenthsY[match('B', Highest)]) %>%
ungroup
#X
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(LowDBX = EightTenthsX[match('B', Highest)]) %>%
ungroup
#create channel safety (middle safety on the play?)
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(ChannelSafety = case_when(HighDBY < LeftAlleyLine & HighDBY > RightAlleyLine ~ 1,
TRUE ~ 0))
##determine if column-safeties or high corners
#left
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(LeftColumnSafety = case_when(LeftColumnChute == 1 & ChannelSafety == 1 ~ 0,
LeftColumnChute == 1 & Highest=="A" & ChannelSafety == 0 ~ 1,
LeftColumnChute == 1 & Highest=="B" & ChannelSafety == 0 ~ 1))
#right
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(RightColumnSafety = case_when(RightColumnChute == 1 & ChannelSafety == 1 ~ 0,
RightColumnChute == 1 & Highest=="A" & ChannelSafety == 0 ~ 1,
RightColumnChute == 1 & Highest=="B" & ChannelSafety == 0 ~ 1))
##eliminate these column-safeties from columns
#left
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(LeftColumn = case_when(LeftColumnSafety == 1 ~ 0,
TRUE ~ LeftColumn))
#Right
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(RightColumn = case_when(RightColumnSafety == 1 ~ 0,
TRUE ~ RightColumn))
#remove columnchute corners from the chute
#left
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(Chute = case_when(LeftColumnChute ==1 & LeftColumnSafety == 0 ~ 0,
TRUE ~ Chute))
#right
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(Chute = case_when(RightColumnChute==1 & RightColumnSafety == 0 ~ 0,
TRUE ~ Chute))
#redo highest player ID
##define two highest DBs
dat1 <- dat1 %>%
arrange(gameId, playId, frameId, Chute != 1,DBPos != 'DB', desc(SnapX)) %>%
group_by(gameId, playId, frameId) %>%
mutate(Highest = c('A','B', rep('-', n() - 2)))
#create new columns for higher DB
#Y
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(HighDBY = EightTenthsY[match('A', Highest)]) %>%
ungroup
#X
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(HighDBX = EightTenthsX[match('A', Highest)]) %>%
ungroup
#new column for lower DB
#Y
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(LowDBY = EightTenthsY[match('B', Highest)]) %>%
ungroup
#X
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(LowDBX = EightTenthsX[match('B', Highest)]) %>%
ungroup
#count column again
#left
dat1 <- dat1 %>%
group_by(gameId, playId, frameId) %>%
mutate(LeftColumnCount = sum(LeftColumn))
#right
dat1 <- dat1 %>%
group_by(gameId, playId, frameId) %>%
mutate(RightColumnCount = sum(RightColumn))
dat2leftcolumn <- filter(dat1, LeftColumnCount==2)
dat2rightcolumn <- filter(dat1, RightColumnCount==2)
### if still 2 in column, make the column defender the widest player
##create the indicator
#left
dat1 <- dat1 %>%
arrange(gameId, playId, frameId, LeftColumn != 1, desc(SnapY)) %>%
group_by(gameId, playId, frameId) %>%
mutate(LeftColumnWidest = c(1, rep(0, n() - 1)))
#right
dat1 <- dat1 %>%
arrange(gameId, playId, frameId, RightColumn != 1, SnapY) %>%
group_by(gameId, playId, frameId) %>%
mutate(RightColumnWidest = c(1, rep(0, n() - 1)))
## rename the inner player conflict defender (column label removed in next step)
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(LeftAlley = case_when(LeftColumn==1 & LeftColumnCount ==2 & LeftColumnWidest==0 ~ 1,
TRUE ~ LeftAlley))
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(RightAlley = case_when(RightColumn==1 & RightColumnCount ==2 & RightColumnWidest==0 ~ 1,
TRUE ~ RightAlley))
##in remaining observations with two players in column, make the widest DB the column player--set all others to 0
#left
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(LeftColumn = case_when(LeftColumnCount==2 & LeftColumnWidest==1 ~ 1,
LeftColumnCount==2 & LeftColumnWidest==0 ~ 0,
TRUE ~ LeftColumn))
#right
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(RightColumn = case_when(RightColumnCount==2 & RightColumnWidest==1 ~ 1,
RightColumnCount==2 & RightColumnWidest==0 ~ 0,
TRUE ~ RightColumn))
##count again
#left
dat1 <- dat1 %>%
group_by(gameId, playId, frameId) %>%
mutate(LeftAlleyCount = sum(LeftAlley))
#right
dat1 <- dat1 %>%
group_by(gameId, playId, frameId) %>%
mutate(RightAlleyCount = sum(RightAlley))
datleftalley <- filter(dat1, LeftAlley == 1)
datrightalley <- filter(dat1, RightAlley == 1)
#count column again
#left
dat1 <- dat1 %>%
group_by(gameId, playId, frameId) %>%
mutate(LeftColumnCount = sum(LeftColumn))
#right
dat1 <- dat1 %>%
group_by(gameId, playId, frameId) %>%
mutate(RightColumnCount = sum(RightColumn))
#delete three in column
dat1 <- filter(dat1, RightColumnCount != 3)
dat1 <- filter(dat1, LeftColumnCount != 3)
#delete none in column
dat1 <- filter(dat1, RightColumnCount != 0)
dat1 <- filter(dat1, LeftColumnCount != 0)
#midpoint of safety x values
dat1$SafetyPointX <- dat1$LowDBX
dat1$SafetyPointY <- dat1$HighDBY
###Creating the contours
##create line segments
#low DB to sideline
dat1$LowDBtoRefPointSegment <- sqrt((dat1$LowDBX - dat1$SafetyPointX)^2 + (dat1$LowDBY - dat1$SafetyPointY)^2)
#high DB to sideline
dat1$HighDBtoRefPointSegment <- sqrt((dat1$HighDBX - dat1$SafetyPointX)^2 + (dat1$HighDBY - dat1$SafetyPointY)^2)
#low DB to high DB
dat1$LowDBHighDBSegment <- sqrt((dat1$HighDBX - dat1$LowDBX)^2 + (dat1$HighDBY - dat1$LowDBY)^2)
##determine the angle
dat1 <- dat1 %>%
mutate(Sine = HighDBtoRefPointSegment/LowDBHighDBSegment) %>%
mutate(SafetyAngle = asin(Sine))
# convert to degrees
dat1$SafetyAngle <- (dat1$SafetyAngle*180)/pi
#above 7 yards indicator
dat1 <- dat1 %>%
mutate(HighSafetyDepth = case_when(HighDBX >= BallSnapX + 7 ~ 'HighSafeHigh',
HighDBX < BallSnapX + 7 ~ 'HighSafeLow'))
dat1 <- dat1 %>%
mutate(LowSafetyDepth = case_when(LowDBX >= BallSnapX + 7 ~ 'LowSafeHigh',
LowDBX < BallSnapX + 7 ~ 'LowSafeLow'))
dat1 <- dat1 %>%
mutate(SafetyOver7Count = case_when(HighSafetyDepth == 'HighSafeHigh' & LowSafetyDepth == 'LowSafeHigh' ~ 2,
HighSafetyDepth == 'HighSafeHigh' & LowSafetyDepth == 'LowSafeLow'~ 1,
HighSafetyDepth == 'HighSafeLow' & LowSafetyDepth == 'LowSafeLow' ~ 0))
##safeties in window
#difference on each side
#ID rushers and eliminate from alley
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(PassRusher = case_when(SecondAndHalfX < BallSnapX & OffDef == "Defense" ~ "PassRusher",
SecondAndHalfX >= BallSnapX & OffDef == "Defense" ~ "Coverage",
TRUE ~ "Offense"))
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(LeftAlley = case_when(LeftAlley == 1 & PassRusher == "PassRusher" ~ 0,
TRUE ~ LeftAlley))
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(RightAlley = case_when(RightAlley == 1 & PassRusher == "PassRusher" ~ 0,
TRUE ~ RightAlley))
##left difference
#defense left alley count
dat1 <- dat1 %>%
group_by(gameId, playId, frameId) %>%
mutate(LeftAlleyCount = sum(LeftAlley))
#defense left column count
dat1 <- dat1 %>%
group_by(gameId, playId, frameId) %>%
mutate(NewLeftColumnCount = sum(SnapY >= LeftColumnLine & OffDef == "Defense" & PassRusher=="Coverage"))
#sum defense left column and alley (alley tube + column tube = 'shield')
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(LeftShield = LeftAlleyCount + NewLeftColumnCount)
#offense
#offense left column count
dat1 <- dat1 %>%
group_by(gameId, playId, frameId) %>%
mutate(LeftOffColumnCount = sum(SnapY >= LeftColumnLine & OffDef == "Offense"))
#offense left alley count
dat1 <- dat1 %>%
group_by(gameId, playId, frameId) %>%
mutate(LeftOffAlleyCount = sum(SnapY < LeftColumnLine & SnapY >= LeftAlleyLine & OffDef == "Offense"))
#sum offense left shield
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(LeftOffShield = LeftOffColumnCount + LeftOffAlleyCount)
#left defense minus offensive players outside tackle
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(LeftDBDiff = LeftShield - LeftOffShield)
##right diff
#defense
#defense right alley count
dat1 <- dat1 %>%
group_by(gameId, playId, frameId) %>%
mutate(RightAlleyCount = sum(RightAlley))
#defense right column count
dat1 <- dat1 %>%
group_by(gameId, playId, frameId) %>%
mutate(NewRightColumnCount = sum(SnapY <= RightColumnLine & OffDef == "Defense" & PassRusher =="Coverage"))
#sum right column and alley
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(RightShield = RightAlleyCount + NewRightColumnCount)
#offense
#offense right column count
dat1 <- dat1 %>%
group_by(gameId, playId, frameId) %>%
mutate(RightOffColumnCount = sum(SnapY <= RightColumnLine & OffDef == "Offense"))
#offense right alley count
dat1 <- dat1 %>%
group_by(gameId, playId, frameId) %>%
mutate(RightOffAlleyCount = sum(SnapY > RightColumnLine & SnapY <= RightAlleyLine & OffDef == "Offense"))
#sum offense right shield
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(RightOffShield = RightOffColumnCount + RightOffAlleyCount)
#left defense minus offensive players outside tackle
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(RightDBDiff = RightShield - RightOffShield)
#label diffs
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(LeftDiffLabel = case_when(LeftDBDiff < 1 ~ "OneHighIndicator",
LeftDBDiff >= 1 ~ "TwoHighIndicator"))
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(RightDiffLabel = case_when(RightDBDiff < 1 ~ "OneHighIndicator",
RightDBDiff >= 1 ~ "TwoHighIndicator"))
#indicator
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(DiffLabel = case_when(LeftDiffLabel == "OneHighIndicator" & RightDiffLabel == "OneHighIndicator" ~ "One",
LeftDiffLabel == "OneHighIndicator" & RightDiffLabel == "TwoHighIndicator" ~ "Mix",
LeftDiffLabel == "TwoHighIndicator" & RightDiffLabel == "OneHighIndicator" ~ "Mix",
LeftDiffLabel == "TwoHighIndicator" & RightDiffLabel == "TwoHighIndicator" ~ "Two"))
#add channel into the diff indicator
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(DiffLabel = case_when(ChannelSafety == 1 & DiffLabel=="Mix" ~ "MixOne",
ChannelSafety == 0 & DiffLabel=="Mix" ~ "MixTwo",
TRUE ~ DiffLabel))
###using the safety angle, depth, and player difference to label the window
dat1 <- dat1 %>%
mutate(Window = case_when(SafetyOver7Count == 2 & DiffLabel == "Two" & SafetyAngle <= 30 ~ 2,
SafetyOver7Count == 2 & DiffLabel == "Two" & SafetyAngle > 30 ~ 1,
SafetyOver7Count == 2 & DiffLabel == "One" & SafetyAngle <= 20 ~ 2,
SafetyOver7Count == 2 & DiffLabel == "One" & SafetyAngle > 20 ~ 1,
SafetyOver7Count == 2 & DiffLabel == "MixOne" & SafetyAngle <= 22.5 ~ 2,
SafetyOver7Count == 2 & DiffLabel == "MixOne" & SafetyAngle > 22.5 ~ 1,
SafetyOver7Count == 2 & DiffLabel == "MixTwo" & SafetyAngle <= 27.5 ~ 2,
SafetyOver7Count == 2 & DiffLabel == "MixTwo" & SafetyAngle > 27.5 ~ 1,
SafetyOver7Count == 1 ~ 1,
SafetyOver7Count == 0 ~ 0))
dat1 <- dat1 %>%
mutate(Window = case_when(SafetyOver7Count == 2 & LowDBX >= 12 & DiffLabel == "Two" & SafetyAngle <= 35 ~ 2,
SafetyOver7Count == 2 & LowDBX >= 12 & DiffLabel == "Two" & SafetyAngle > 35 ~ 1,
SafetyOver7Count == 2 & LowDBX >= 12 & DiffLabel == "One" & SafetyAngle <= 25 ~ 2,
SafetyOver7Count == 2 & LowDBX >= 12 & DiffLabel == "One" & SafetyAngle > 25 ~ 1,
SafetyOver7Count == 2 & LowDBX >= 12 & DiffLabel == "MixOne" & SafetyAngle <= 22.5 ~ 2,
SafetyOver7Count == 2 & LowDBX >= 12 & DiffLabel == "MixOne" & SafetyAngle > 22.5 ~ 1,
SafetyOver7Count == 2 & LowDBX >= 12 & DiffLabel == "MixTwo" & SafetyAngle <= 27.5 ~ 2,
SafetyOver7Count == 2 & LowDBX >= 12 & DiffLabel == "MixTwo" & SafetyAngle > 27.5 ~ 1,
TRUE ~ Window))
## How many coverage defenders?
dat1 <- dat1 %>%
group_by(gameId, playId, frameId) %>%
mutate(CoverageDefenders = sum(SecondAndHalfX > BallSnapX & OffDef == 'Defense'))
## Creating the shell
## corner depth (depth of column defenders)
#Left
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(LeftCornerDepth = SnapX[match(1, LeftColumn)]) %>%
ungroup()
#Right
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(RightCornerDepth = SnapX[match(1, RightColumn)]) %>%
ungroup()
##identify the shell
dat1 <- dat1 %>%
group_by(gameId, playId) %>%
mutate(Shell = case_when(Window == 0 ~ '0',
Window == 1 ~ '3',
Window == 2 & LeftCornerDepth < BallSnapX + 6 & RightCornerDepth < BallSnapX + 6 ~ '2',
Window == 2 & LeftCornerDepth >= BallSnapX + 6 & RightCornerDepth < BallSnapX + 6 ~ '6',
Window == 2 & LeftCornerDepth < BallSnapX + 6 & RightCornerDepth >= BallSnapX + 6 ~ '6',
Window == 2 & LeftCornerDepth >= BallSnapX + 6 & RightCornerDepth >= BallSnapX + 6 ~ '4'))
##identify positions redo
dat1 <- dat1 %>%
mutate(GamePosition = case_when(LeftColumn == 1 ~ "Corner",
RightColumn == 1 ~ "Corner",
Highest == "A" ~ "Safety",
Highest == "B" & Window == 2 ~ "Safety",
Highest == "B" & Window == 1 ~ "SafetyTBD",
Highest == "B" & Window == 0 ~ "Safety",
LeftAlley == 1 & Highest == "-" ~ "Conflict",
RightAlley == 1 & Highest == "-" ~ "Conflict",
LeftAlleyLine > SnapY & SnapY > RightAlleyLine & Highest == "-" ~ "Adjacent"))
# Determine SafetyTBD
dat1 <- dat1 %>%
mutate(GamePosition = case_when(GamePosition == "SafetyTBD" & LeftAlley==1 ~ "SafetyConflict",
GamePosition == "SafetyTBD" & RightAlley==1 ~ "SafetyConflict",
GamePosition == "SafetyTBD" & LeftAlleyLine > SnapY & SnapY > RightAlleyLine ~ "SafetyAdjacent",
TRUE ~ GamePosition))
# Remove offense and pass rushers
dat1 <- dat1 %>%
mutate(GamePosition = case_when(OffDef=="Offense" ~ "Offense",
PassRusher=="PassRusher" ~ "PassRusher",
TRUE ~ GamePosition))
#delete obsolete columns
dat1 <- dat1 %>%
select(-time, -SideDuring, -qb_team, -QBTeam, -uniqueplay, -L1OffY, -L2OffY, -L3OffY, -L4OffY,
-R1OffY, -R2OffY, -R3OffY, -R4OffY, -L1L2Diff, -R1R2Diff, -LeftSideline, -RightSideline,
-TightSplitLeft, -TightSplitRight, -LeftColumnDBCount, -RightColumnDBCount, -L2L3Diff,
-R2R3Diff, -L2OffInsideOT, -R2OffInsideOT, -LeftReceiverCount, -RightReceiverCount,
-L1InsideTE, -L2InsideTE, -L3InsideTE, -L4InsideTE, -R1InsideTE, -R2InsideTE, -R3InsideTE,
-R4InsideTE, -LeftReceiverCountOutsideTackle, -RightReceiverCountOutsideTackle, -LeftAlleyID,
-RightAlleyID, -LeftColumnHighest, -RightColumnHighest, -LeftColumnChute, -RightColumnChute,
-LeftColumnSafety, -RightColumnSafety, -LeftColumnWidest, -RightColumnWidest,
-NewLeftColumnCount, -LeftShield, -LeftOffColumnCount, -LeftOffAlleyCount, -LeftOffShield,
-LeftDBDiff, -NewRightColumnCount, -RightShield, -RightOffColumnCount, -RightOffAlleyCount,
-RightOffShield, -RightDBDiff)
return(dat1)
}
#
## Adding Play Data
AddsPlayData <- function(Offense, Defense, Ball, BallX, FirstDownLine) { # IMPORTANT: Names must be exact for this to work. Using prep_vis_data.r will ensure this.
list( geom_point( data = Offense , mapping = aes(x = x, y = y) , shape = 1 , size = 1.5 , color = "orange") ,
geom_point( data = Defense , mapping = aes(x = x, y = y) , shape = 4 , size = 1.5 , color = "purple") ,
geom_point( data = Ball , mapping = aes(x = x, y = y) , shape = 21 , fill = "brown" , alpha = 1.0 , size = 2) ,
geom_segment(aes(x = as.numeric(BallX) , y = 0, xend = as.numeric(BallX) , yend = 53.33) , color = "blue" , alpha = 1 , size = 1) ,
geom_segment(aes(x = as.numeric(FirstDownLine) , y = 0, xend = as.numeric(FirstDownLine) , yend = 53.33) , color = "yellow" , alpha = 1 , size = 1) )
}
#
## Adding Throw Data
AddsThrowData <- function(Offense_throw, Defense_throw, Ball_throw, BallX, FirstDownLine, target_player_location) {
list( geom_point( data = Offense_throw , mapping = aes(x = x, y = y) , shape = 1 , size = 3 , color = "orange") ,
geom_point( data = Defense_throw , mapping = aes(x = x, y = y) , shape = 4 , size = 3 , color = "purple") ,
geom_point( data = Ball_throw , mapping = aes(x = x, y = y) , shape = 21 , fill = "brown" , alpha = 1.0 , size = 4) ,
geom_segment(aes(x = as.numeric(BallX) , y = 0, xend = as.numeric(BallX) , yend = 53.33) , color = "blue" , alpha = 1 , size = 1) ,
geom_segment(aes(x = as.numeric(FirstDownLine) , y = 0, xend = as.numeric(FirstDownLine) , yend = 53.33) , color = "yellow" , alpha = 1 , size = 1) ,
geom_circle(aes(x0 = target_player_location$x, y0 = target_player_location$y , r = 10) , fill = "red" , alpha = 0.15) ,
geom_circle(aes(x0 = target_player_location$x, y0 = target_player_location$y , r = 5) , fill = "red" , alpha = 0.25) )
}
#
## Adding Shell ID Markers
AddShellMarkers <- function(shell_features) {
list( geom_segment(aes(x = shell_features$BallSnapX , y = shell_features$LeftColumnLine ,
xend =120 , yend = shell_features$LeftColumnLine) , alpha = 0.5 , color = "black") ,
geom_segment(aes(x = shell_features$BallSnapX , y = shell_features$RightColumnLine ,
xend =120 , yend = shell_features$RightColumnLine) , alpha = 0.5 , color = "black") ,
geom_segment(aes(x = shell_features$BallSnapX , y = shell_features$LeftAlleyLine ,
xend =120 , yend = shell_features$LeftAlleyLine) , alpha = 0.5 , color = "black") ,
geom_segment(aes(x = shell_features$BallSnapX , y = shell_features$RightAlleyLine ,
xend =120 , yend = shell_features$RightAlleyLine) , alpha = 0.5 , color = "black") ,
annotate("label", x = 60 , y = -5 , size = 2 , label = paste(
"Window:" , shell_features$Window[1] ,
", Safety Angle:" , round(shell_features$SafetyAngle[1],0),
", Shell:", shell_features$Shell,
", Coverage Defenders:", shell_features$CoverageDefenders))
)
}
#
# Code Based in Part On (https://www.kaggle.com/tombliss/additional-data-coverage-schemes-for-week-1?select=targetedReceiver.csv)
xmin <- 10
xmax <- 110
ymin <- 0
ymax <- 160 / 3
hash.right <- 5
hash.left <- 5
hash.width <- 1
df.hash <- expand.grid(x = c(11:109 - 0.12) , y = c(0+1, 23.36667-0.25, 29.96667-0.20, 160/3-1.6))
AddFieldLines <- function() {
NFLlogo <- readPNG("Data/nfl-logo.png")
NFLlogo <- rasterGrob(NFLlogo, interpolate=TRUE)
list( geom_segment(aes(x = 15, y = 0, xend = 15, yend = 53.33) , color = "white" , alpha = 1 , size = 0.3) , # 5 Yard Line
geom_segment(aes(x = 20, y = 0, xend = 20, yend = 53.33) , color = "white" , alpha = 1 , size = 0.4) , # 10 Yard Line
geom_segment(aes(x = 25, y = 0, xend = 25, yend = 53.33) , color = "white" , alpha = 1 , size = 0.3) , # 15 Yard Line
geom_segment(aes(x = 30, y = 0, xend = 30, yend = 53.33) , color = "white" , alpha = 1 , size = 0.4) , # 20 Yard Line
geom_segment(aes(x = 35, y = 0, xend = 35, yend = 53.33) , color = "white" , alpha = 1 , size = 0.3) , # 25 Yard Line
geom_segment(aes(x = 40, y = 0, xend = 40, yend = 53.33) , color = "white" , alpha = 1 , size = 0.4) , # 30 Yard Line
geom_segment(aes(x = 45, y = 0, xend = 45, yend = 53.33) , color = "white" , alpha = 1 , size = 0.3) , # 35 Yard Line
geom_segment(aes(x = 50, y = 0, xend = 50, yend = 53.33) , color = "white" , alpha = 1 , size = 0.4) , # 40 Yard Line
geom_segment(aes(x = 55, y = 0, xend = 55, yend = 53.33) , color = "white" , alpha = 1 , size = 0.3) , # 45 Yard Line
geom_segment(aes(x = 60, y = 0, xend = 60, yend = 53.33) , color = "white" , alpha = 1 , size = 0.5) , # 50 Yard Line
geom_segment(aes(x = 65, y = 0, xend = 65, yend = 53.33) , color = "white" , alpha = 1 , size = 0.3) , # 45 Yard Line
geom_segment(aes(x = 70, y = 0, xend = 70, yend = 53.33) , color = "white" , alpha = 1 , size = 0.4) , # 40 Yard Line
geom_segment(aes(x = 75, y = 0, xend = 75, yend = 53.33) , color = "white" , alpha = 1 , size = 0.3) , # 35 Yard Line
geom_segment(aes(x = 80, y = 0, xend = 80, yend = 53.33) , color = "white" , alpha = 1 , size = 0.4) , # 30 Yard Line
geom_segment(aes(x = 85, y = 0, xend = 85, yend = 53.33) , color = "white" , alpha = 1 , size = 0.3) , # 25 Yard Line
geom_segment(aes(x = 90, y = 0, xend = 90, yend = 53.33) , color = "white" , alpha = 1 , size = 0.4) , # 20 Yard Line
geom_segment(aes(x = 95, y = 0, xend = 95, yend = 53.33) , color = "white" , alpha = 1 , size = 0.3) , # 15 Yard Line
geom_segment(aes(x = 100, y = 0, xend = 100, yend = 53.33) , color = "white" , alpha = 1 , size = 0.4) , # 10 Yard Line
geom_segment(aes(x = 105, y = 0, xend = 105, yend = 53.33) , color = "white" , alpha = 1 , size = 0.3) , # 5 Yard Line
xlim(-10, 130) ,
ylim(-10, 63.3333) ,
annotate("text", x = df.hash$x[df.hash$x < 20/2], color = "white" , size = 2 ,
y = df.hash$y[df.hash$x < 20/2], label = "|", hjust = 0, vjust = -0) ,
annotate("text", x = df.hash$x[df.hash$x > 20/2], color = "white" , size = 2 ,
y = df.hash$y[df.hash$x > 20/2], label = "|", hjust = 0, vjust = -0) ,
annotation_custom(NFLlogo, xmin=56.9, xmax=62.9, ymin=24, ymax=29.5 ) )
}
AddFieldColor <- function(x) {
list( geom_rect(mapping=aes(xmin=0, xmax=120, ymin=0, ymax=53.33) , color = "black" , fill = "#00614c", alpha=0.7) , # Making Field Green
geom_rect(mapping=aes(xmin=0, xmax=10, ymin=0, ymax=53.33) , color = "black" , fill = "#2F3BE4" , alpha = 0.4) , # Left End Zone
geom_rect(mapping=aes(xmin=110, xmax=120, ymin=0, ymax=53.33) , color = "black" , fill = "#2F3BE4" , alpha = 0.4) ) # Right End Zone
}
AddYardNumbers <- function(x) {
list(annotate(geom = "text", x = 20, y = 5, label = "1 0", color = "white"),
annotate(geom = "text", x = 30, y = 5, label = "2 0", color = "white"),
annotate(geom = "text", x = 40, y = 5, label = "3 0", color = "white"),
annotate(geom = "text", x = 50, y = 5, label = "4 0", color = "white"),
annotate(geom = "text", x = 60, y = 5, label = "5 0", color = "white"),
annotate(geom = "text", x = 70, y = 5, label = "4 0", color = "white"),
annotate(geom = "text", x = 80, y = 5, label = "3 0", color = "white"),
annotate(geom = "text", x = 90, y = 5, label = "2 0", color = "white"),
annotate(geom = "text", x = 100, y = 5, label = "1 0", color = "white"))
}
AddUpsideDownYardNumbers <- function(x) {
list(annotate(geom = "text", x = 20, y = 48.33, label = "1 0", color = "white", angle = 180),
annotate(geom = "text", x = 30, y = 48.33, label = "2 0", color = "white", angle = 180),
annotate(geom = "text", x = 40, y = 48.33, label = "3 0", color = "white", angle = 180),
annotate(geom = "text", x = 50, y = 48.33, label = "4 0", color = "white", angle = 180),
annotate(geom = "text", x = 60, y = 48.33, label = "5 0", color = "white", angle = 180),
annotate(geom = "text", x = 70, y = 48.33, label = "4 0", color = "white", angle = 180),
annotate(geom = "text", x = 80, y = 48.33, label = "3 0", color = "white", angle = 180),
annotate(geom = "text", x = 90, y = 48.33, label = "2 0", color = "white", angle = 180),
annotate(geom = "text", x = 100, y = 48.33, label = "1 0", color = "white", angle = 180))
}
AddEndzones <- function(x) {
list(annotate(geom = "text", x = 5, y = 27, label = "OFFENSE", color = "white", angle = 90),
annotate(geom = "text", x = 115, y = 27, label = "DEFENSE", color = "white", angle = 270))
}
#
## Clear Figure Background
ClearFigBackground <- function(){
list( theme_bw() ,
theme(panel.border = element_blank()) ,
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank()) )
}
#
## Add a Circle (https://stackoverflow.com/questions/6862742/draw-a-circle-with-ggplot2)
createCircle <- function(center = c(0,0), diameter = 1, npoints = 100){
r = diameter / 2
tt <- seq(0,2*pi,length.out = npoints)
xx <- center[1] + r * cos(tt)
yy <- center[2] + r * sin(tt)
return(data.frame(x = xx, y = yy))
}
|
summdata <- "ec/newecdata.rds"
ecdata <- readRDS(data)
png("ec/plot1.png", width = 480, height = 480)
hist(ecdata$Global_active_power, main = "Global Active Power", xlab = "Global Active Power (kilowatts)", ylab = "Frequency", col = "red")
dev.off() | /plot1.R | no_license | rajathithan/ExData_Plotting1 | R | false | false | 249 | r | summdata <- "ec/newecdata.rds"
ecdata <- readRDS(data)
png("ec/plot1.png", width = 480, height = 480)
hist(ecdata$Global_active_power, main = "Global Active Power", xlab = "Global Active Power (kilowatts)", ylab = "Frequency", col = "red")
dev.off() |
library(glmnet)
mydata = read.table("./TrainingSet/LassoBIC/liver.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.03,family="gaussian",standardize=FALSE)
sink('./Model/EN/Lasso/liver/liver_014.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
| /Model/EN/Lasso/liver/liver_014.R | no_license | leon1003/QSMART | R | false | false | 349 | r | library(glmnet)
mydata = read.table("./TrainingSet/LassoBIC/liver.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.03,family="gaussian",standardize=FALSE)
sink('./Model/EN/Lasso/liver/liver_014.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
# internal constant
phen_names <- c('fam', 'id', 'pheno')
#' Read *.phen files
#'
#' This function reads a standard *.phen file into a tibble.
#' It uses readr::read_table2 to do it efficiently.
#' GCTA and EMMAX use this format.
#'
#' @param file Input file (whatever is accepted by readr::read_table2).
#' If file as given does not exist and is missing the expected *.phen extension, the function adds the .phen extension and uses that path if that file exists.
#' Additionally, the .gz extension is added automatically if the file (after *.phen extension is added as needed) is still not found and did not already contained the .gz extension and adding it points to an existing file.
#' @param verbose If TRUE (default) function reports the path of the file being loaded (after autocompleting the extensions).
#'
#' @return A tibble with columns: fam, id, pheno.
#'
#' @examples
#' # read an existing plink *.phen file
#' file <- system.file("extdata", 'sample.phen', package = "genio", mustWork = TRUE)
#' phen <- read_phen(file)
#' phen
#'
#' # can specify without extension
#' file <- sub('\\.phen$', '', file) # remove extension from this path on purpose
#' file # verify .phen is missing
#' phen <- read_phen(file) # load it anyway!
#' phen
#'
#' @seealso
#' GCTA PHEN format reference:
#' \url{https://cnsgenomics.com/software/gcta/#GREMLanalysis}
#'
#' @export
read_phen <- function(file, verbose = TRUE) {
# this generic reader does all the magic
read_tab_generic(
file = file,
ext = 'phen',
tib_names = phen_names,
col_types = 'ccd',
verbose = verbose
)
}
| /fuzzedpackages/genio/R/read_phen.R | no_license | akhikolla/testpackages | R | false | false | 1,622 | r | # internal constant
phen_names <- c('fam', 'id', 'pheno')
#' Read *.phen files
#'
#' This function reads a standard *.phen file into a tibble.
#' It uses readr::read_table2 to do it efficiently.
#' GCTA and EMMAX use this format.
#'
#' @param file Input file (whatever is accepted by readr::read_table2).
#' If file as given does not exist and is missing the expected *.phen extension, the function adds the .phen extension and uses that path if that file exists.
#' Additionally, the .gz extension is added automatically if the file (after *.phen extension is added as needed) is still not found and did not already contained the .gz extension and adding it points to an existing file.
#' @param verbose If TRUE (default) function reports the path of the file being loaded (after autocompleting the extensions).
#'
#' @return A tibble with columns: fam, id, pheno.
#'
#' @examples
#' # read an existing plink *.phen file
#' file <- system.file("extdata", 'sample.phen', package = "genio", mustWork = TRUE)
#' phen <- read_phen(file)
#' phen
#'
#' # can specify without extension
#' file <- sub('\\.phen$', '', file) # remove extension from this path on purpose
#' file # verify .phen is missing
#' phen <- read_phen(file) # load it anyway!
#' phen
#'
#' @seealso
#' GCTA PHEN format reference:
#' \url{https://cnsgenomics.com/software/gcta/#GREMLanalysis}
#'
#' @export
read_phen <- function(file, verbose = TRUE) {
# this generic reader does all the magic
read_tab_generic(
file = file,
ext = 'phen',
tib_names = phen_names,
col_types = 'ccd',
verbose = verbose
)
}
|
##########################################################################################
# filename: PhDThesis_get-first-sentence-from-title-text.R
# Modified from: D:\Now\library_genetics_epidemiology\Chang_PhD_thesis\scripts\PhDThesis99_rename_files_for_thesis.R
# program author: Chang
# purpose: Get first sentences from supplementary table titles
# date created: 20190725
# file directory:
# note:
# ref:
#-----------------------------------------------------------------------------------------
# Type FilePath
# Input paste0(dir.scripts,"supp-table_title_text.csv")
# Outpu paste0(dir.scripts,"supp-table_table-of-contents.tsv")
#-----------------------------------------------------------------------------------------
# CHANGE HISTORY :
#-----------------------------------------------------------------------------------------
# Sys.time() Update
#-----------------------------------------------------------------------------------------
# 20191007 Exported supp-table_table-of-contents.tsv
# 20190920 Exported supp-table_table-of-contents.tsv
# 20190728 Exported supp-table_table-of-contents.tsv
#-----------------------------------------------------------------------------------------
# Main local folders
localMainDir <- "D:/Now/library_genetics_epidemiology/"
dir.scripts <- paste0(localMainDir,"Chang_PhD_thesis/scripts/")
dir.R.functions <- paste0(localMainDir,"/GWAS/scripts/RFunctions/")
# Import external functions
source(paste0(dir.R.functions,"RFunction_import_export_single_file.R"))
# Import thesis supplementary table title CSV file. Values are in double quotes
ImportACSVValuesInDoubleQuotes(input.file.path = paste0(dir.scripts,"supp-table_title_text.csv")
,data.name = "data") # dim(data) 19 4
# Add new header to the data. Remove prefix "X.." and suffix ".."
pattern.to.search <- glob2rx("X..|..")
colnames(data) <- gsub(colnames(data),pattern = pattern.to.search,replacement = "")
# Create an empty data.frame for appending result in the following loops
base.data <- data.frame()
# Get first sentence from the column text
for (i in 1:nrow(data)){
study.code <- data[i,"study_code"]
manuscript.section <- data[i,"manuscript_section"]
item.order <- data[i,"item_order"]
text <- data[i,"text"]
# Manipulate string in the text column: (1) Delete unwanted string in the first sentence ^{style [fontweight=bold]. Note double backslashes \\ are escape characters for special characters like ^, { , =, [, ],and }, (2) replace R^{unicode 00B2} with "R-squred" as unicodes cause errors in SAS table of contents,(3) remove }
s1 <- gsub(text, pattern="\\^\\{style \\[fontweight\\=bold\\]",replacement = "")
s2 <- gsub(s1,pattern="R\\^\\{unicode 00B2\\}",replacement = "R-squared")
s3 <- gsub(s2,pattern=".\\} ",replacement = ". ")
# (2) Get the first sentence for table of contents in SAS
s3.1 <- unlist(stringr::str_split(s3, "[.]"))[1]
s3.2 <- unlist(stringr::str_split(s3, "[.]"))[2]
first.sentence <- paste0(s3.1,".",s3.2,".","\"")
# Store result as a temporary data.frame
temp <- data.frame(study_code= study.code
,manuscript_section= "\"suppTableContent\""
,item_order=item.order
,TOC_text=first.sentence
,stringsAsFactors = F)
# Append temp to the base data
base.data <- rbind(base.data,temp)
}
# dim(base.data) 19 4
# Export data as a TSV file in a format similar to the imported file
write.table(base.data
,file=paste0(dir.scripts,"supp-table_table-of-contents.tsv")
,row.names = F
,sep = "\t"
,quote=F)
#-----------------------------------------------------------------------------------------#
#------------------------This is the end of this file-------------------------------------#
#-----------------------------------------------------------------------------------------# | /scripts_master/PhDThesis_get-first-sentence-from-title-text.R | no_license | luenhchang/PhD_reporting-with-SAS-tables | R | false | false | 3,930 | r | ##########################################################################################
# filename: PhDThesis_get-first-sentence-from-title-text.R
# Modified from: D:\Now\library_genetics_epidemiology\Chang_PhD_thesis\scripts\PhDThesis99_rename_files_for_thesis.R
# program author: Chang
# purpose: Get first sentences from supplementary table titles
# date created: 20190725
# file directory:
# note:
# ref:
#-----------------------------------------------------------------------------------------
# Type FilePath
# Input paste0(dir.scripts,"supp-table_title_text.csv")
# Outpu paste0(dir.scripts,"supp-table_table-of-contents.tsv")
#-----------------------------------------------------------------------------------------
# CHANGE HISTORY :
#-----------------------------------------------------------------------------------------
# Sys.time() Update
#-----------------------------------------------------------------------------------------
# 20191007 Exported supp-table_table-of-contents.tsv
# 20190920 Exported supp-table_table-of-contents.tsv
# 20190728 Exported supp-table_table-of-contents.tsv
#-----------------------------------------------------------------------------------------
# Main local folders
localMainDir <- "D:/Now/library_genetics_epidemiology/"
dir.scripts <- paste0(localMainDir,"Chang_PhD_thesis/scripts/")
dir.R.functions <- paste0(localMainDir,"/GWAS/scripts/RFunctions/")
# Import external functions
source(paste0(dir.R.functions,"RFunction_import_export_single_file.R"))
# Import thesis supplementary table title CSV file. Values are in double quotes
ImportACSVValuesInDoubleQuotes(input.file.path = paste0(dir.scripts,"supp-table_title_text.csv")
,data.name = "data") # dim(data) 19 4
# Add new header to the data. Remove prefix "X.." and suffix ".."
pattern.to.search <- glob2rx("X..|..")
colnames(data) <- gsub(colnames(data),pattern = pattern.to.search,replacement = "")
# Create an empty data.frame for appending result in the following loops
base.data <- data.frame()
# Get first sentence from the column text
for (i in 1:nrow(data)){
study.code <- data[i,"study_code"]
manuscript.section <- data[i,"manuscript_section"]
item.order <- data[i,"item_order"]
text <- data[i,"text"]
# Manipulate string in the text column: (1) Delete unwanted string in the first sentence ^{style [fontweight=bold]. Note double backslashes \\ are escape characters for special characters like ^, { , =, [, ],and }, (2) replace R^{unicode 00B2} with "R-squred" as unicodes cause errors in SAS table of contents,(3) remove }
s1 <- gsub(text, pattern="\\^\\{style \\[fontweight\\=bold\\]",replacement = "")
s2 <- gsub(s1,pattern="R\\^\\{unicode 00B2\\}",replacement = "R-squared")
s3 <- gsub(s2,pattern=".\\} ",replacement = ". ")
# (2) Get the first sentence for table of contents in SAS
s3.1 <- unlist(stringr::str_split(s3, "[.]"))[1]
s3.2 <- unlist(stringr::str_split(s3, "[.]"))[2]
first.sentence <- paste0(s3.1,".",s3.2,".","\"")
# Store result as a temporary data.frame
temp <- data.frame(study_code= study.code
,manuscript_section= "\"suppTableContent\""
,item_order=item.order
,TOC_text=first.sentence
,stringsAsFactors = F)
# Append temp to the base data
base.data <- rbind(base.data,temp)
}
# dim(base.data) 19 4
# Export data as a TSV file in a format similar to the imported file
write.table(base.data
,file=paste0(dir.scripts,"supp-table_table-of-contents.tsv")
,row.names = F
,sep = "\t"
,quote=F)
#-----------------------------------------------------------------------------------------#
#------------------------This is the end of this file-------------------------------------#
#-----------------------------------------------------------------------------------------# |
imi.t.test.more <- function(data.miss,data.imp0,max.M=500,epsilon,method='mvn',
x, y = NULL, alternative='two.sided',mu,paired = FALSE, var.equal = FALSE,
conf.level = 0.95,conv.plot=TRUE,successive.valid=3,
print.progress=TRUE){
# dis.measure= "p.value", "half.ci", "df".
# possible values for method:
# all possible things in MICE,
# in case mvn is chosen, then it switchs to amelia.
# possibilities for dis.method and mah.scale
# possible methods for distance
# dis.method='euclidean'
# dis.method='inf.norm'
# dis.method='mahalanobis'
# # possible methods for scale in Mahalanobis
# mah.scale='within'
# mah.scale='between'
# mah.scale='combined'
# successive.valid: length of distances successively smaller than epsilon to stop, minimum is zero
if (length(mu)>1 ){
stop('Please give a scalar mu')
}
# successive.valid: length of distances successively smaller than epsilon to stop, minimum is 1
if (is.character(successive.valid)==FALSE){
if (successive.valid<1 | floor(successive.valid)!=successive.valid){
stop('Please enter a postive integer successive.valid')
}
}
if (is.character(successive.valid)==TRUE){
stop('Please enter a postive integer successive.valid')
}
# selecting the x part
#miss.x=data.miss[-which(names(data.miss)==resp),]
M=length(data.imp0)
if (print.progress==TRUE){
cat('We are working on performing the test on the initial imputed datasets.',fill = TRUE)
}
comb.mi0=mi.t.test(data.imp0, x=x, y = y, alternative = alternative,
mu = mu, paired = paired, var.equal = var.equal, conf.level = conf.level)
data.imp=data.imp0
#max.M=500
dis.extra=epsilon+1
dis.all=0
while(sum(dis.extra>epsilon)>0 & M<max.M){
M=M+1
if (print.progress=='TRUE'){
cat(paste('We are working on imputed datasets number ',M),fill = TRUE)
}
if (method=='mvn'){
data.imp1=amelia(data.miss,m=1,p2s =0)$imputations$imp1
data.imp[[M]]=data.imp1
}
if (method!='mvn' & method!='auto'){
data.imp1_ini=mice(data.miss,m=1,method=method,print=FALSE)
data.imp1=complete(data.imp1_ini, action = 1)
data.imp[[M]]=data.imp1
}
if (method=='auto'){
data.imp1_ini=mice(data.miss,m=1,print=FALSE)
data.imp1=complete(data.imp1_ini, action = 1)
data.imp[[M]]=data.imp1
}
# now fit the model to the new imputed data
comb.mi1=mi.t.test(data.imp, x=x, y = y, alternative = alternative,
mu = mu, paired = paired, var.equal = var.equal, conf.level = conf.level)
# now compute the distance between those two based on the specified method
dis=sqrt((comb.mi1$p.value-comb.mi0$p.value)^2)
comb.mi0=comb.mi1
dis.all=c(dis.all,dis)
dis.extra=tail(dis.all,n=successive.valid)
}
dis.all=dis.all[-1]
if (conv.plot==TRUE){
plot(dis.all,xlab='Number of imputations',ylab='Distance')
}
conv.status=0
if (sum(dis.extra<epsilon)>0){
conv.status=1
}
if (print.progress==TRUE){
if (conv.status==1){
cat(paste('We are done! The convergence is acheived and the sufficient number of imputations is ',M),
fill = TRUE)
}
if (conv.status==0){
cat('The convergence could not be achieved, please increase max.M or deacrese espsilon or
number of successive validations steps.',
fill = TRUE)
}
}
return(list(test.result=comb.mi1,data.imp=data.imp,dis.steps=dis.all,conv.status=conv.status,M=M))
}
| /R/imi.t.test.more.R | no_license | vahidnassiri/imi | R | false | false | 3,561 | r | imi.t.test.more <- function(data.miss,data.imp0,max.M=500,epsilon,method='mvn',
x, y = NULL, alternative='two.sided',mu,paired = FALSE, var.equal = FALSE,
conf.level = 0.95,conv.plot=TRUE,successive.valid=3,
print.progress=TRUE){
# dis.measure= "p.value", "half.ci", "df".
# possible values for method:
# all possible things in MICE,
# in case mvn is chosen, then it switchs to amelia.
# possibilities for dis.method and mah.scale
# possible methods for distance
# dis.method='euclidean'
# dis.method='inf.norm'
# dis.method='mahalanobis'
# # possible methods for scale in Mahalanobis
# mah.scale='within'
# mah.scale='between'
# mah.scale='combined'
# successive.valid: length of distances successively smaller than epsilon to stop, minimum is zero
if (length(mu)>1 ){
stop('Please give a scalar mu')
}
# successive.valid: length of distances successively smaller than epsilon to stop, minimum is 1
if (is.character(successive.valid)==FALSE){
if (successive.valid<1 | floor(successive.valid)!=successive.valid){
stop('Please enter a postive integer successive.valid')
}
}
if (is.character(successive.valid)==TRUE){
stop('Please enter a postive integer successive.valid')
}
# selecting the x part
#miss.x=data.miss[-which(names(data.miss)==resp),]
M=length(data.imp0)
if (print.progress==TRUE){
cat('We are working on performing the test on the initial imputed datasets.',fill = TRUE)
}
comb.mi0=mi.t.test(data.imp0, x=x, y = y, alternative = alternative,
mu = mu, paired = paired, var.equal = var.equal, conf.level = conf.level)
data.imp=data.imp0
#max.M=500
dis.extra=epsilon+1
dis.all=0
while(sum(dis.extra>epsilon)>0 & M<max.M){
M=M+1
if (print.progress=='TRUE'){
cat(paste('We are working on imputed datasets number ',M),fill = TRUE)
}
if (method=='mvn'){
data.imp1=amelia(data.miss,m=1,p2s =0)$imputations$imp1
data.imp[[M]]=data.imp1
}
if (method!='mvn' & method!='auto'){
data.imp1_ini=mice(data.miss,m=1,method=method,print=FALSE)
data.imp1=complete(data.imp1_ini, action = 1)
data.imp[[M]]=data.imp1
}
if (method=='auto'){
data.imp1_ini=mice(data.miss,m=1,print=FALSE)
data.imp1=complete(data.imp1_ini, action = 1)
data.imp[[M]]=data.imp1
}
# now fit the model to the new imputed data
comb.mi1=mi.t.test(data.imp, x=x, y = y, alternative = alternative,
mu = mu, paired = paired, var.equal = var.equal, conf.level = conf.level)
# now compute the distance between those two based on the specified method
dis=sqrt((comb.mi1$p.value-comb.mi0$p.value)^2)
comb.mi0=comb.mi1
dis.all=c(dis.all,dis)
dis.extra=tail(dis.all,n=successive.valid)
}
dis.all=dis.all[-1]
if (conv.plot==TRUE){
plot(dis.all,xlab='Number of imputations',ylab='Distance')
}
conv.status=0
if (sum(dis.extra<epsilon)>0){
conv.status=1
}
if (print.progress==TRUE){
if (conv.status==1){
cat(paste('We are done! The convergence is acheived and the sufficient number of imputations is ',M),
fill = TRUE)
}
if (conv.status==0){
cat('The convergence could not be achieved, please increase max.M or deacrese espsilon or
number of successive validations steps.',
fill = TRUE)
}
}
return(list(test.result=comb.mi1,data.imp=data.imp,dis.steps=dis.all,conv.status=conv.status,M=M))
}
|
# myfile.R
#* @get /mean
normalMean <- function(samples=10){
data <- rnorm(samples)
mean(data)
}
#* @get /sum
addTwo <- function(a, b){
as.numeric(a) + as.numeric(b)
}
| /myfile.R | no_license | miguel-conde/plumber | R | false | false | 176 | r | # myfile.R
#* @get /mean
normalMean <- function(samples=10){
data <- rnorm(samples)
mean(data)
}
#* @get /sum
addTwo <- function(a, b){
as.numeric(a) + as.numeric(b)
}
|
abrirArquivo <- function(fn = NULL){
if(is.null(fn)){
setStatusbar('Escolha o arquivo...')
fn <- openFileDialog()
}
if((length(fn) > 0) && file.exists(fn)){
setStatusbar('Carregando carteira...')
n <- gtkNotebookGetNPages(nb)
for(i in 1:n){
gtkNotebookRemovePage(nb, 0)
}
if(exists('carteira', globalenv())){ rm(carteira) }
load(fn, envir = globalenv())
changed <<- FALSE
setFilename(fn)
gerenciaRecentes(filename)
setTitleJanelaPrincipal(filename)
tot <- length(ls(carteira))
i <- 1
for(simbolo in ls(carteira)){
da <- gtkDrawingArea(show=FALSE)
gSignalConnect(da, 'motion-notify-event', callbackPointerInfo)
da$setEvents(GdkEventMask['pointer-motion-mask'] + GdkEventMask['pointer-motion-hint-mask'])
asCairoDevice(da)
gtkNotebookAppendPage(nb, da, gtkLabel(simbolo))
if(attr(carteira[[simbolo]], 'manterAtualizado') && checkInternetConnection()){
z <- getZoomSimbolo(simbolo)
de <- z[1]
ate <- as.character(as.Date(Sys.time()))
getSimbolo(simbolo, de, ate, TRUE)
changed <<- TRUE
}
analise.tecnica(simbolo)
da$show()
setStatusbar('Carregando carteira: %s%%', 100*i/tot)
Sys.sleep(.5)
}
} else {
setStatusbar(paste('Carregamento cancelado.', 'Arquivo inexistente.', collapse=' '))
}
}
analise.tecnica <- function(simbolo, da){
Sys.sleep(.1)
de <- as.Date(attr(carteira[[simbolo]], 'zoom.de'))
ate <- as.Date(attr(carteira[[simbolo]], 'zoom.ate'))
# Suporte e Resistência
p <- pivot(carteira[[simbolo]])
attr(carteira[[simbolo]],'pivot') <- p
estudos <- ''
if(exists('statusEstudos')){
if(statusEstudos['opcaoVF']){
estudos <- sprintf("%s;addVo()", estudos)
}
if(statusEstudos['opcaoSupRes']){
estudos <- sprintf("%s;addTA(Resistencia1(), on = 1, col='red', lwd=2);addTA(Suporte1(), on = 1, col='green', lwd=2)", estudos)
}
if(statusEstudos['opcaoBB']){
estudos <- sprintf("%s;addTA(xts(matrix(BBands(HLC(quantmod:::get.current.chob()@xdata),n=20)[,c(1,3)], ncol=2,dimnames=list(NULL,c('Bollinger Sup 20','Bollinger Inf 20'))),index(quantmod:::get.current.chob()@xdata)), on = 1, col=c('gray','gray'), lwd=c(2,2))", estudos)
}
if(statusEstudos['opcaoMME']){
estudos <- sprintf("%s;addEMA(9, col = 'cyan');addEMA(21, col = 'red')", estudos)
}
if(statusEstudos['opcaoMMVF'] && statusEstudos['opcaoVF']){
estudos <- sprintf("%s;addTA(SMA(Vo(quantmod:::get.current.chob()@xdata)/quantmod:::get.current.chob()@passed.args$TA[[1]]@params$vol.scale[[1]], n = 21), legend = 'MMA 21', on = 2, col=c('cyan'), lwd=2)", estudos)
}
if(statusEstudos['opcaoIFR']){
estudos <- sprintf("%s;addTA(RSI(Cl(quantmod:::get.current.chob()@xdata),n=2), yrange = c(0,100), legend = paste(paste(rep(' ', 30), collapse=''), 'IFR 2'),col='blue', lwd=2);addTA(ifr.lim.sup(), legend='', on = 3, col='red');addTA(ifr.lim.med(), legend='', on = 3, col='black');addTA(ifr.lim.inf(), legend='', on = 3, col='green')", estudos)
}
if(statusEstudos['opcaoChAD']){
estudos <- sprintf("%s;addChAD()", estudos)
}
if(statusEstudos['opcaoDPO']){
estudos <- sprintf("%s;addDPO()", estudos)
}
if(estudos == ''){ estudos <- NULL }
estudos <- gsub('^ *;', '', estudos)
} else {
estudos <- "addVo();addTA(Resistencia1(), on = 1, col='red', lwd=2);addTA(Suporte1(), on = 1, col='green', lwd=2);addTA(xts(matrix(BBands(HLC(quantmod:::get.current.chob()@xdata),n=20)[,c(1,3)], ncol=2,dimnames=list(NULL,c('Bollinger Sup 20','Bollinger Inf 20'))),index(quantmod:::get.current.chob()@xdata)), on = 1, col=c('gray','gray'), lwd=c(2,2));addEMA(9, col = 'cyan');addEMA(21, col = 'red');addTA(SMA(Vo(quantmod:::get.current.chob()@xdata)/quantmod:::get.current.chob()@passed.args$TA[[1]]@params$vol.scale[[1]], n = 21), legend = 'MMA 21', on = 2, col=c('cyan'), lwd=2);addTA(RSI(Cl(quantmod:::get.current.chob()@xdata),n=2), yrange = c(0,100), legend = paste(paste(rep(' ', 30), collapse=''), 'IFR 2'),col='blue', lwd=2);addTA(ifr.lim.sup(), legend='', on = 3, col='red');addTA(ifr.lim.med(), legend='', on = 3, col='black');addTA(ifr.lim.inf(), legend='', on = 3, col='green');addChAD();"
}
chartSeries(carteira[[simbolo]], type='candlesticks', theme = 'white', multi.col=FALSE, show.grid=FALSE, name = simbolo,
subset=paste(de,ate, sep='::'),
TA=estudos)
gdkWindowSetCursor(gtkWidgetGetWindow(da),gdkCursorNewFromName(gtkWidgetGetDisplay(da), "default"))
}
checkInternetConnection <- function(){
#!is.null(suppressWarnings(nsl('br.finance.yahoo.com')))
TRUE
}
gerenciaRecentes <- function(fn){
submenu <- gtkMenuItemGetSubmenu(gtkMenuGetAttachWidget(subMenuCarteiraRecentes))
insert.pos <- 0
for(i in 1:length(submenu$getChildren())){
if(submenu[[i]]$label == fn){
insert.pos <- i
break;
}
}
mi <- gtkImageMenuItem(fn)
subMenuCarteiraRecentes$insert(mi,0)
gSignalConnect(mi, 'activate', function(...){ abrirArquivo(fn) })
if(insert.pos != 0){
submenu[[insert.pos+1]]$destroy()
}
while(length(submenu$getChildren()) > (NRECENTES+2)){
submenu[[NRECENTES+1]]$destroy()
}
}
getCandleByLocator <- function(simbolo, coord){
usr <- par('usr') # User System Coordinate
mai <- par('mai') # Inches
mai <- grconvertX(mai, 'inches', 'user') # Inches -> User System Coordinate
width <- usr[2]+mai[4]
de <- as.Date(attr(carteira[[simbolo]], 'zoom.de'))
ate <- as.Date(attr(carteira[[simbolo]], 'zoom.ate'))
subset <- carteira[[simbolo]][paste(de, ate,sep='::')]
nrsub <- nrow(subset)
nCandle <- as.numeric(nrsub)
eachCandle <- width/nCandle
n <- ceiling(coord/eachCandle)
if(n > nrsub){ n <- nrsub }
if(n < 1){ n <- 1 }
subset[n]
}
getManterAtualizado <- function(simbolo){
attr(carteira[[simbolo]], 'manterAtualizado')
}
getSimbolo <- function(mySymbol, de=as.Date(Sys.time())-180, ate=as.Date(Sys.time()), manterAtualizado=TRUE){
if(!exists('carteira', envir = globalenv())){ carteira <<- new.env() }
if(exists(mySymbol, envir = carteira) && manterAtualizado){
s.from <- as.Date(index(last(carteira[[mySymbol]])))+1
tmp <- new.env()
suppressWarnings(getSymbols(mySymbol, from=s.from, auto.assign = TRUE, env = tmp))
carteira[[mySymbol]] <- rbind(carteira[[mySymbol]], tmp[[mySymbol]])
rm(tmp)
} else {
suppressWarnings(getSymbols(mySymbol, auto.assign = TRUE, env = carteira))
}
today <- getQuote(mySymbol)#, what = yahooQuote.EOD)
if(index(carteira[[mySymbol]])[nrow(carteira[[mySymbol]])] < as.Date(today[1,1])){
carteira[[mySymbol]] <- rbind(carteira[[mySymbol]], xts(cbind(today$Open,today$High,today$Low,today$Last,today$Volume,today$Last), as.Date(today[['Trade Time']])))
}
setManterAtualizado(mySymbol, manterAtualizado)
setZoomSimbolo(mySymbol, de, ate)
}
getTopoFundo <- function(simbolo){
mx.x <- ifelse(Op(carteira[[simbolo]]) < Cl(carteira[[simbolo]]), Cl(carteira[[simbolo]]), Op(carteira[[simbolo]]))
xp <- cbind(mx.x, lag(mx.x,-1), lag(mx.x,-2))
xp <- xp[1:(nrow(xp)-2),]
fp <- ifelse(xp[,2] > xp[,1] & xp[,2] > xp[,3], TRUE, FALSE)
p <- mx.x[lag(fp),]
mn.x <- ifelse(Op(carteira[[simbolo]]) < Cl(carteira[[simbolo]]), Op(carteira[[simbolo]]), Cl(carteira[[simbolo]]))
xv <- cbind(mx.x, lag(mx.x,-1), lag(mx.x,-2))
xv <- xp[1:(nrow(xp)-2),]
fv <- ifelse(xv[,2] < xv[,1] & xv[,2] < xv[,3], TRUE, FALSE)
v <- mn.x[lag(fv),]
list(p=p, v=v)
}
getVariance <- function(candle1, candle2){
m1 <- mean(c(Op(candle1), Cl(candle1)))
m2 <- mean(c(Op(candle2), Cl(candle2)))
m <- round(100*(m2-m1)/m2,1)
mx <- round(100*(max(c(Cl(candle2),Op(candle2)))-min(c(Cl(candle1),Op(candle1))))/max(c(Cl(candle2),Op(candle2))),1)
mn <- round(100*(min(c(Cl(candle2),Op(candle2)))-max(c(Cl(candle1),Op(candle1))))/min(c(Cl(candle2),Op(candle2))),1)
c(mn,m,mx)
}
getZoomSimbolo <- function(simbolo){
c(de=as.Date(attr(carteira[[simbolo]],'zoom.de')), ate=as.Date(attr(carteira[[simbolo]],'zoom.ate')))
}
ifr.lim.inf <- function(){
idx <- index(quantmod:::get.current.chob()@xdata)
nrw <- nrow(quantmod:::get.current.chob()@xdata)
xts(rep(3,nrw), idx)
}
ifr.lim.med <- function(){
idx <- index(quantmod:::get.current.chob()@xdata)
nrw <- nrow(quantmod:::get.current.chob()@xdata)
xts(rep(50,nrw), idx)
}
ifr.lim.sup <- function(){
idx <- index(quantmod:::get.current.chob()@xdata)
nrw <- nrow(quantmod:::get.current.chob()@xdata)
xts(rep(97,nrw), idx)
}
pivot <- function(a, n=1){
# Suporte e Resistencia
a <- na.omit(a)
z <- ZigZag(cbind(Hi(a),Lo(a)), change = 5, percent = TRUE)
p <- z[findPeaks(lag(z,-1))]
v <- z[findValleys(lag(z,-1))]
pivot <- as.numeric(Cl(last(a)))
x <- rbind(p,v)
dp <- abs(as.numeric(x-pivot))
dv <- abs(as.numeric(x-pivot))
r1 <- sort(as.numeric(x[dp>.05 & x>pivot]))[1]
s1 <- sort(as.numeric(x[dv>.05 & x<pivot]), decreasing=TRUE)[1]
r2 <- sort(as.numeric(x[dp>.05 & x>pivot]))[2]
s2 <- sort(as.numeric(x[dv>.05 & x<pivot]), decreasing=TRUE)[2]
list(r2=r2,r1=r1,pivot=pivot,s1=s1,s2=s2)
}
Resistencia1 <- function(){
p <- attr(quantmod:::get.current.chob()@xdata, 'pivot')
idx <- index(quantmod:::get.current.chob()@xdata)
nrw <- nrow(quantmod:::get.current.chob()@xdata)
xts(rep(p$r1,nrw), idx)
}
Resistencia2 <- function(){
p <- attr(quantmod:::get.current.chob()@xdata, 'pivot')
idx <- index(quantmod:::get.current.chob()@xdata)
nrw <- nrow(quantmod:::get.current.chob()@xdata)
xts(rep(p$r2,nrw), idx)
}
Suporte1 <- function(){
p <- attr(quantmod:::get.current.chob()@xdata, 'pivot')
idx <- index(quantmod:::get.current.chob()@xdata)
nrw <- nrow(quantmod:::get.current.chob()@xdata)
xts(rep(p$s1,nrw), idx)
}
Suporte2 <- function(){
p <- attr(quantmod:::get.current.chob()@xdata, 'pivot')
idx <- index(quantmod:::get.current.chob()@xdata)
nrw <- nrow(quantmod:::get.current.chob()@xdata)
xts(rep(p$s2,nrw), idx)
}
salvaCarteira <- function(){
if(length(grep('.RData$', filename)) == 0 ){
filename <<- paste(filename, '.RData', sep='', collapse='')
}
save(carteira, file = filename)
setFilename(filename)
}
setFilename <- function(fn = character(0)){
filename <<- fn
setTitleJanelaPrincipal(fn)
}
setManterAtualizado <- function(simbolo, status){
attr(carteira[[simbolo]], 'manterAtualizado') <- status
}
setStatusbar <- function(fmt, ...){
msg <- sprintf(fmt, ...)
gtkStatusbarPop(sb, 0)
gtkStatusbarPush(sb, 0, msg)
invisible(NULL)
}
setTitleJanelaPrincipal <- function(fn = ''){
fn <- sub('.RData$', '', fn)
gtkWindowSetTitle(janelaPrincipal, paste(ifelse(changed, '* ', ''),fn, ' @ Analise Tecnica', sep=''))
}
setZoomSimbolo <- function(simbolo, de, ate){
attr(carteira[[simbolo]], 'zoom.de') <- as.Date(de)
attr(carteira[[simbolo]], 'zoom.ate') <- as.Date(ate)
}
| /src/function.R | no_license | juscelino-jr/CandleStudy | R | false | false | 10,975 | r | abrirArquivo <- function(fn = NULL){
if(is.null(fn)){
setStatusbar('Escolha o arquivo...')
fn <- openFileDialog()
}
if((length(fn) > 0) && file.exists(fn)){
setStatusbar('Carregando carteira...')
n <- gtkNotebookGetNPages(nb)
for(i in 1:n){
gtkNotebookRemovePage(nb, 0)
}
if(exists('carteira', globalenv())){ rm(carteira) }
load(fn, envir = globalenv())
changed <<- FALSE
setFilename(fn)
gerenciaRecentes(filename)
setTitleJanelaPrincipal(filename)
tot <- length(ls(carteira))
i <- 1
for(simbolo in ls(carteira)){
da <- gtkDrawingArea(show=FALSE)
gSignalConnect(da, 'motion-notify-event', callbackPointerInfo)
da$setEvents(GdkEventMask['pointer-motion-mask'] + GdkEventMask['pointer-motion-hint-mask'])
asCairoDevice(da)
gtkNotebookAppendPage(nb, da, gtkLabel(simbolo))
if(attr(carteira[[simbolo]], 'manterAtualizado') && checkInternetConnection()){
z <- getZoomSimbolo(simbolo)
de <- z[1]
ate <- as.character(as.Date(Sys.time()))
getSimbolo(simbolo, de, ate, TRUE)
changed <<- TRUE
}
analise.tecnica(simbolo)
da$show()
setStatusbar('Carregando carteira: %s%%', 100*i/tot)
Sys.sleep(.5)
}
} else {
setStatusbar(paste('Carregamento cancelado.', 'Arquivo inexistente.', collapse=' '))
}
}
analise.tecnica <- function(simbolo, da){
Sys.sleep(.1)
de <- as.Date(attr(carteira[[simbolo]], 'zoom.de'))
ate <- as.Date(attr(carteira[[simbolo]], 'zoom.ate'))
# Suporte e Resistência
p <- pivot(carteira[[simbolo]])
attr(carteira[[simbolo]],'pivot') <- p
estudos <- ''
if(exists('statusEstudos')){
if(statusEstudos['opcaoVF']){
estudos <- sprintf("%s;addVo()", estudos)
}
if(statusEstudos['opcaoSupRes']){
estudos <- sprintf("%s;addTA(Resistencia1(), on = 1, col='red', lwd=2);addTA(Suporte1(), on = 1, col='green', lwd=2)", estudos)
}
if(statusEstudos['opcaoBB']){
estudos <- sprintf("%s;addTA(xts(matrix(BBands(HLC(quantmod:::get.current.chob()@xdata),n=20)[,c(1,3)], ncol=2,dimnames=list(NULL,c('Bollinger Sup 20','Bollinger Inf 20'))),index(quantmod:::get.current.chob()@xdata)), on = 1, col=c('gray','gray'), lwd=c(2,2))", estudos)
}
if(statusEstudos['opcaoMME']){
estudos <- sprintf("%s;addEMA(9, col = 'cyan');addEMA(21, col = 'red')", estudos)
}
if(statusEstudos['opcaoMMVF'] && statusEstudos['opcaoVF']){
estudos <- sprintf("%s;addTA(SMA(Vo(quantmod:::get.current.chob()@xdata)/quantmod:::get.current.chob()@passed.args$TA[[1]]@params$vol.scale[[1]], n = 21), legend = 'MMA 21', on = 2, col=c('cyan'), lwd=2)", estudos)
}
if(statusEstudos['opcaoIFR']){
estudos <- sprintf("%s;addTA(RSI(Cl(quantmod:::get.current.chob()@xdata),n=2), yrange = c(0,100), legend = paste(paste(rep(' ', 30), collapse=''), 'IFR 2'),col='blue', lwd=2);addTA(ifr.lim.sup(), legend='', on = 3, col='red');addTA(ifr.lim.med(), legend='', on = 3, col='black');addTA(ifr.lim.inf(), legend='', on = 3, col='green')", estudos)
}
if(statusEstudos['opcaoChAD']){
estudos <- sprintf("%s;addChAD()", estudos)
}
if(statusEstudos['opcaoDPO']){
estudos <- sprintf("%s;addDPO()", estudos)
}
if(estudos == ''){ estudos <- NULL }
estudos <- gsub('^ *;', '', estudos)
} else {
estudos <- "addVo();addTA(Resistencia1(), on = 1, col='red', lwd=2);addTA(Suporte1(), on = 1, col='green', lwd=2);addTA(xts(matrix(BBands(HLC(quantmod:::get.current.chob()@xdata),n=20)[,c(1,3)], ncol=2,dimnames=list(NULL,c('Bollinger Sup 20','Bollinger Inf 20'))),index(quantmod:::get.current.chob()@xdata)), on = 1, col=c('gray','gray'), lwd=c(2,2));addEMA(9, col = 'cyan');addEMA(21, col = 'red');addTA(SMA(Vo(quantmod:::get.current.chob()@xdata)/quantmod:::get.current.chob()@passed.args$TA[[1]]@params$vol.scale[[1]], n = 21), legend = 'MMA 21', on = 2, col=c('cyan'), lwd=2);addTA(RSI(Cl(quantmod:::get.current.chob()@xdata),n=2), yrange = c(0,100), legend = paste(paste(rep(' ', 30), collapse=''), 'IFR 2'),col='blue', lwd=2);addTA(ifr.lim.sup(), legend='', on = 3, col='red');addTA(ifr.lim.med(), legend='', on = 3, col='black');addTA(ifr.lim.inf(), legend='', on = 3, col='green');addChAD();"
}
chartSeries(carteira[[simbolo]], type='candlesticks', theme = 'white', multi.col=FALSE, show.grid=FALSE, name = simbolo,
subset=paste(de,ate, sep='::'),
TA=estudos)
gdkWindowSetCursor(gtkWidgetGetWindow(da),gdkCursorNewFromName(gtkWidgetGetDisplay(da), "default"))
}
checkInternetConnection <- function(){
#!is.null(suppressWarnings(nsl('br.finance.yahoo.com')))
TRUE
}
gerenciaRecentes <- function(fn){
submenu <- gtkMenuItemGetSubmenu(gtkMenuGetAttachWidget(subMenuCarteiraRecentes))
insert.pos <- 0
for(i in 1:length(submenu$getChildren())){
if(submenu[[i]]$label == fn){
insert.pos <- i
break;
}
}
mi <- gtkImageMenuItem(fn)
subMenuCarteiraRecentes$insert(mi,0)
gSignalConnect(mi, 'activate', function(...){ abrirArquivo(fn) })
if(insert.pos != 0){
submenu[[insert.pos+1]]$destroy()
}
while(length(submenu$getChildren()) > (NRECENTES+2)){
submenu[[NRECENTES+1]]$destroy()
}
}
getCandleByLocator <- function(simbolo, coord){
usr <- par('usr') # User System Coordinate
mai <- par('mai') # Inches
mai <- grconvertX(mai, 'inches', 'user') # Inches -> User System Coordinate
width <- usr[2]+mai[4]
de <- as.Date(attr(carteira[[simbolo]], 'zoom.de'))
ate <- as.Date(attr(carteira[[simbolo]], 'zoom.ate'))
subset <- carteira[[simbolo]][paste(de, ate,sep='::')]
nrsub <- nrow(subset)
nCandle <- as.numeric(nrsub)
eachCandle <- width/nCandle
n <- ceiling(coord/eachCandle)
if(n > nrsub){ n <- nrsub }
if(n < 1){ n <- 1 }
subset[n]
}
getManterAtualizado <- function(simbolo){
attr(carteira[[simbolo]], 'manterAtualizado')
}
getSimbolo <- function(mySymbol, de=as.Date(Sys.time())-180, ate=as.Date(Sys.time()), manterAtualizado=TRUE){
if(!exists('carteira', envir = globalenv())){ carteira <<- new.env() }
if(exists(mySymbol, envir = carteira) && manterAtualizado){
s.from <- as.Date(index(last(carteira[[mySymbol]])))+1
tmp <- new.env()
suppressWarnings(getSymbols(mySymbol, from=s.from, auto.assign = TRUE, env = tmp))
carteira[[mySymbol]] <- rbind(carteira[[mySymbol]], tmp[[mySymbol]])
rm(tmp)
} else {
suppressWarnings(getSymbols(mySymbol, auto.assign = TRUE, env = carteira))
}
today <- getQuote(mySymbol)#, what = yahooQuote.EOD)
if(index(carteira[[mySymbol]])[nrow(carteira[[mySymbol]])] < as.Date(today[1,1])){
carteira[[mySymbol]] <- rbind(carteira[[mySymbol]], xts(cbind(today$Open,today$High,today$Low,today$Last,today$Volume,today$Last), as.Date(today[['Trade Time']])))
}
setManterAtualizado(mySymbol, manterAtualizado)
setZoomSimbolo(mySymbol, de, ate)
}
getTopoFundo <- function(simbolo){
mx.x <- ifelse(Op(carteira[[simbolo]]) < Cl(carteira[[simbolo]]), Cl(carteira[[simbolo]]), Op(carteira[[simbolo]]))
xp <- cbind(mx.x, lag(mx.x,-1), lag(mx.x,-2))
xp <- xp[1:(nrow(xp)-2),]
fp <- ifelse(xp[,2] > xp[,1] & xp[,2] > xp[,3], TRUE, FALSE)
p <- mx.x[lag(fp),]
mn.x <- ifelse(Op(carteira[[simbolo]]) < Cl(carteira[[simbolo]]), Op(carteira[[simbolo]]), Cl(carteira[[simbolo]]))
xv <- cbind(mx.x, lag(mx.x,-1), lag(mx.x,-2))
xv <- xp[1:(nrow(xp)-2),]
fv <- ifelse(xv[,2] < xv[,1] & xv[,2] < xv[,3], TRUE, FALSE)
v <- mn.x[lag(fv),]
list(p=p, v=v)
}
getVariance <- function(candle1, candle2){
m1 <- mean(c(Op(candle1), Cl(candle1)))
m2 <- mean(c(Op(candle2), Cl(candle2)))
m <- round(100*(m2-m1)/m2,1)
mx <- round(100*(max(c(Cl(candle2),Op(candle2)))-min(c(Cl(candle1),Op(candle1))))/max(c(Cl(candle2),Op(candle2))),1)
mn <- round(100*(min(c(Cl(candle2),Op(candle2)))-max(c(Cl(candle1),Op(candle1))))/min(c(Cl(candle2),Op(candle2))),1)
c(mn,m,mx)
}
getZoomSimbolo <- function(simbolo){
c(de=as.Date(attr(carteira[[simbolo]],'zoom.de')), ate=as.Date(attr(carteira[[simbolo]],'zoom.ate')))
}
ifr.lim.inf <- function(){
idx <- index(quantmod:::get.current.chob()@xdata)
nrw <- nrow(quantmod:::get.current.chob()@xdata)
xts(rep(3,nrw), idx)
}
ifr.lim.med <- function(){
idx <- index(quantmod:::get.current.chob()@xdata)
nrw <- nrow(quantmod:::get.current.chob()@xdata)
xts(rep(50,nrw), idx)
}
ifr.lim.sup <- function(){
idx <- index(quantmod:::get.current.chob()@xdata)
nrw <- nrow(quantmod:::get.current.chob()@xdata)
xts(rep(97,nrw), idx)
}
pivot <- function(a, n=1){
# Suporte e Resistencia
a <- na.omit(a)
z <- ZigZag(cbind(Hi(a),Lo(a)), change = 5, percent = TRUE)
p <- z[findPeaks(lag(z,-1))]
v <- z[findValleys(lag(z,-1))]
pivot <- as.numeric(Cl(last(a)))
x <- rbind(p,v)
dp <- abs(as.numeric(x-pivot))
dv <- abs(as.numeric(x-pivot))
r1 <- sort(as.numeric(x[dp>.05 & x>pivot]))[1]
s1 <- sort(as.numeric(x[dv>.05 & x<pivot]), decreasing=TRUE)[1]
r2 <- sort(as.numeric(x[dp>.05 & x>pivot]))[2]
s2 <- sort(as.numeric(x[dv>.05 & x<pivot]), decreasing=TRUE)[2]
list(r2=r2,r1=r1,pivot=pivot,s1=s1,s2=s2)
}
Resistencia1 <- function(){
p <- attr(quantmod:::get.current.chob()@xdata, 'pivot')
idx <- index(quantmod:::get.current.chob()@xdata)
nrw <- nrow(quantmod:::get.current.chob()@xdata)
xts(rep(p$r1,nrw), idx)
}
Resistencia2 <- function(){
p <- attr(quantmod:::get.current.chob()@xdata, 'pivot')
idx <- index(quantmod:::get.current.chob()@xdata)
nrw <- nrow(quantmod:::get.current.chob()@xdata)
xts(rep(p$r2,nrw), idx)
}
Suporte1 <- function(){
p <- attr(quantmod:::get.current.chob()@xdata, 'pivot')
idx <- index(quantmod:::get.current.chob()@xdata)
nrw <- nrow(quantmod:::get.current.chob()@xdata)
xts(rep(p$s1,nrw), idx)
}
Suporte2 <- function(){
p <- attr(quantmod:::get.current.chob()@xdata, 'pivot')
idx <- index(quantmod:::get.current.chob()@xdata)
nrw <- nrow(quantmod:::get.current.chob()@xdata)
xts(rep(p$s2,nrw), idx)
}
salvaCarteira <- function(){
if(length(grep('.RData$', filename)) == 0 ){
filename <<- paste(filename, '.RData', sep='', collapse='')
}
save(carteira, file = filename)
setFilename(filename)
}
setFilename <- function(fn = character(0)){
filename <<- fn
setTitleJanelaPrincipal(fn)
}
setManterAtualizado <- function(simbolo, status){
attr(carteira[[simbolo]], 'manterAtualizado') <- status
}
setStatusbar <- function(fmt, ...){
msg <- sprintf(fmt, ...)
gtkStatusbarPop(sb, 0)
gtkStatusbarPush(sb, 0, msg)
invisible(NULL)
}
setTitleJanelaPrincipal <- function(fn = ''){
fn <- sub('.RData$', '', fn)
gtkWindowSetTitle(janelaPrincipal, paste(ifelse(changed, '* ', ''),fn, ' @ Analise Tecnica', sep=''))
}
setZoomSimbolo <- function(simbolo, de, ate){
attr(carteira[[simbolo]], 'zoom.de') <- as.Date(de)
attr(carteira[[simbolo]], 'zoom.ate') <- as.Date(ate)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/replace.R
\name{str_replace_na}
\alias{str_replace_na}
\title{Turn NA into "NA"}
\usage{
str_replace_na(string, replacement = "NA")
}
\arguments{
\item{string}{Input vector. Either a character vector, or something
coercible to one.}
\item{replacement}{A single string.}
}
\description{
Turn NA into "NA"
}
\examples{
str_replace_na(c(NA, "abc", "def"))
}
| /man/str_replace_na.Rd | permissive | tidyverse/stringr | R | false | true | 434 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/replace.R
\name{str_replace_na}
\alias{str_replace_na}
\title{Turn NA into "NA"}
\usage{
str_replace_na(string, replacement = "NA")
}
\arguments{
\item{string}{Input vector. Either a character vector, or something
coercible to one.}
\item{replacement}{A single string.}
}
\description{
Turn NA into "NA"
}
\examples{
str_replace_na(c(NA, "abc", "def"))
}
|
\name{externalFormats}
\alias{readHB}
\alias{readMM}
% \alias{writeHB}
\alias{writeMM}
\alias{writeMM,CsparseMatrix-method}
\alias{writeMM,sparseMatrix-method}
\title{Read and write external matrix formats}
\description{
Read matrices stored in the Harwell-Boeing or MatrixMarket formats
or write \code{\linkS4class{sparseMatrix}} objects to one of these
formats.
}
\usage{
readHB(file)
readMM(file)
writeMM(obj, file, \dots)
}
\arguments{
\item{obj}{a real sparse matrix}
\item{file}{for \code{writeMM} - the name of the file to be written.
For \code{readHB} and \code{readMM} the name of the file to read, as
a character scalar. The names of files storing matrices in the
Harwell-Boeing format usually end in \code{".rua"} or \code{".rsa"}.
Those storing matrices in the MatrixMarket format usually end in
\code{".mtx"}.
Alternatively, \code{readHB} and \code{readMM} accept connection objects.}
\item{\dots}{optional additional arguments. Currently none are used in
any methods.}
}
\value{
The \code{readHB} and \code{readMM} functions return an object that
inherits from the \code{"\linkS4class{Matrix}"} class. Methods for the
\code{writeMM} generic functions usually return
\code{\link{NULL}} and, as a side effect, the matrix \code{obj} is
written to \code{file} in the MatrixMarket format (writeMM).
}
\note{
The Harwell-Boeing format is older and less flexible than the
MatrixMarket format. The function \code{writeHB} was deprecated and
has now been removed. Please use \code{writeMM} instead.
A very simple way to export small sparse matrices \code{S}, is to use
\code{summary(S)} which returns a \code{\link{data.frame}} with
columns \code{i}, \code{j}, and possibly \code{x}, see \code{summary} in
\code{\link{sparseMatrix-class}}, and an example below.
}
\references{
\url{http://math.nist.gov/MatrixMarket}
\url{http://www.cise.ufl.edu/research/sparse/matrices}
}
\examples{
str(pores <- readMM(system.file("external/pores_1.mtx",
package = "Matrix")))
str(utm <- readHB(system.file("external/utm300.rua",
package = "Matrix")))
str(lundA <- readMM(system.file("external/lund_a.mtx",
package = "Matrix")))
str(lundA <- readHB(system.file("external/lund_a.rsa",
package = "Matrix")))
\dontrun{
## NOTE: The following examples take quite some time
## ---- even on a fast internet connection:
if(FALSE) # the URL has been corrected, but we need an un-tar step!
str(sm <-
readHB(gzcon(url("http://www.cise.ufl.edu/research/sparse/RB/Boeing/msc00726.tar.gz"))))
str(jgl009 <-
readMM(gzcon(url("ftp://math.nist.gov/pub/MatrixMarket2/Harwell-Boeing/counterx/jgl009.mtx.gz"))))
}
data(KNex)
writeMM(KNex$mm, "mmMM.mtx")
## very simple export - in triplet format - to text file:
data(CAex)
s.CA <- summary(CAex)
message("writing to ", outf <- tempfile())
write.table(s.CA, file = outf, row.names=FALSE)
## and read it back -- showing off sparseMatrix():
dd <- read.table(outf, header=TRUE)
mm <- do.call(sparseMatrix, dd)
stopifnot(all.equal(mm, CAex, tolerance=1e-15))
}
\keyword{IO}
\keyword{array}
\keyword{algebra}
| /branches/Matrix_1.1-5-branch/man/externalFormats.Rd | no_license | LTLA/Matrix | R | false | false | 3,223 | rd | \name{externalFormats}
\alias{readHB}
\alias{readMM}
% \alias{writeHB}
\alias{writeMM}
\alias{writeMM,CsparseMatrix-method}
\alias{writeMM,sparseMatrix-method}
\title{Read and write external matrix formats}
\description{
Read matrices stored in the Harwell-Boeing or MatrixMarket formats
or write \code{\linkS4class{sparseMatrix}} objects to one of these
formats.
}
\usage{
readHB(file)
readMM(file)
writeMM(obj, file, \dots)
}
\arguments{
\item{obj}{a real sparse matrix}
\item{file}{for \code{writeMM} - the name of the file to be written.
For \code{readHB} and \code{readMM} the name of the file to read, as
a character scalar. The names of files storing matrices in the
Harwell-Boeing format usually end in \code{".rua"} or \code{".rsa"}.
Those storing matrices in the MatrixMarket format usually end in
\code{".mtx"}.
Alternatively, \code{readHB} and \code{readMM} accept connection objects.}
\item{\dots}{optional additional arguments. Currently none are used in
any methods.}
}
\value{
The \code{readHB} and \code{readMM} functions return an object that
inherits from the \code{"\linkS4class{Matrix}"} class. Methods for the
\code{writeMM} generic functions usually return
\code{\link{NULL}} and, as a side effect, the matrix \code{obj} is
written to \code{file} in the MatrixMarket format (writeMM).
}
\note{
The Harwell-Boeing format is older and less flexible than the
MatrixMarket format. The function \code{writeHB} was deprecated and
has now been removed. Please use \code{writeMM} instead.
A very simple way to export small sparse matrices \code{S}, is to use
\code{summary(S)} which returns a \code{\link{data.frame}} with
columns \code{i}, \code{j}, and possibly \code{x}, see \code{summary} in
\code{\link{sparseMatrix-class}}, and an example below.
}
\references{
\url{http://math.nist.gov/MatrixMarket}
\url{http://www.cise.ufl.edu/research/sparse/matrices}
}
\examples{
str(pores <- readMM(system.file("external/pores_1.mtx",
package = "Matrix")))
str(utm <- readHB(system.file("external/utm300.rua",
package = "Matrix")))
str(lundA <- readMM(system.file("external/lund_a.mtx",
package = "Matrix")))
str(lundA <- readHB(system.file("external/lund_a.rsa",
package = "Matrix")))
\dontrun{
## NOTE: The following examples take quite some time
## ---- even on a fast internet connection:
if(FALSE) # the URL has been corrected, but we need an un-tar step!
str(sm <-
readHB(gzcon(url("http://www.cise.ufl.edu/research/sparse/RB/Boeing/msc00726.tar.gz"))))
str(jgl009 <-
readMM(gzcon(url("ftp://math.nist.gov/pub/MatrixMarket2/Harwell-Boeing/counterx/jgl009.mtx.gz"))))
}
data(KNex)
writeMM(KNex$mm, "mmMM.mtx")
## very simple export - in triplet format - to text file:
data(CAex)
s.CA <- summary(CAex)
message("writing to ", outf <- tempfile())
write.table(s.CA, file = outf, row.names=FALSE)
## and read it back -- showing off sparseMatrix():
dd <- read.table(outf, header=TRUE)
mm <- do.call(sparseMatrix, dd)
stopifnot(all.equal(mm, CAex, tolerance=1e-15))
}
\keyword{IO}
\keyword{array}
\keyword{algebra}
|
library(plotly)
library(tidyverse)
library(readr)
# read the data from my folder
NHtemp <- read_csv("~/OneDrive/GitHub/NASADatanauts/data/NHtemp.csv",
na = "empty", comment = "*")
# Pull out the monthly data for each year. Removing 2017 since it has missing data.
# Tidy the data, gather the months and mean temperatures into columns.
# Arrange the data by year, ascending.
# The month means for 1880 - 2016
NHyearlyTemp <- NHtemp %>%
select(Year:Dec) %>%
filter(Year != 2017) %>%
gather(Months, MeanTemp, -Year) %>%
arrange(Year)
# The summer month means for 1880 - 2016
NHsummerTemp <- NHtemp %>%
select(Year, Jun:Aug) %>%
filter(Year != 2017) %>%
gather(Months, MeanTemp, -Year) %>%
arrange(Year)
# Adding a column for the Year groups. This will be used as the frame parameter
# for the animation, shown below.
library(data.table)
NHsummerTemp <- data.table(NHsummerTemp)
NHsummerTemp[, Group := ifelse(Year %in% c(1951:1980), "1951-1980",
ifelse(Year %in% c(1983:1993), "1983-1993",
ifelse(Year %in% c(1994:2004), "1994-2004",
ifelse(Year %in% c(2005:2015), "2005-2015", "1880-1950"))))]
nhYr_p <- plot_ly(data = filter(NHyearlyTemp, between(Year, 1951, 2015)), x = ~MeanTemp,
type = "histogram", histnorm = "probability", nbinsx = 25) %>%
layout(xaxis = list(range = c(-1, 1.3)))
nh_p <- plot_ly(alpha = 0.6, histnorm = "probability", nbinsx = 25) %>%
add_histogram(data = filter(NHyearlyTemp, between(Year, 1951, 2015)), x = ~MeanTemp, alpha = .2, fill = "black") %>%
add_histogram(data = filter(NHsummerTemp, between(Year, 1951, 1980)), x = ~MeanTemp ) %>%
add_histogram(data = filter(NHsummerTemp, between(Year, 1983, 1993)), x = ~MeanTemp ) %>%
add_histogram(data = filter(NHsummerTemp, between(Year, 1994, 2004)), x = ~MeanTemp ) %>%
add_histogram(data = filter(NHsummerTemp, between(Year, 2005, 2015)), x = ~MeanTemp ) %>%
layout(barmode = "overlay", xaxis = list(range = c(-1, 1.3)))
nh_p2 <- plot_ly(alpha = 0.6, frame = ~Group, histnorm = "probability", nbinsx = 25, color = ~Group) %>%
add_histogram(data = filter(NHsummerTemp, between(Year, 1951, 1980)), x = ~MeanTemp ) %>%
add_histogram(data = filter(NHsummerTemp, between(Year, 1983, 1993)), x = ~MeanTemp ) %>%
add_histogram(data = filter(NHsummerTemp, between(Year, 1994, 2004)), x = ~MeanTemp ) %>%
add_histogram(data = filter(NHsummerTemp, between(Year, 2005, 2015)), x = ~MeanTemp ) %>%
layout(barmode = "overlay", xaxis = list(range = c(-1, 1.3))) %>%
animation_opts(redraw = TRUE)
nh_p2
################################
accumulate_by <- function(dat, var) {
var <- lazyeval::f_eval(var, dat)
lvls <- plotly:::getLevels(var)
dats <- lapply(seq_along(lvls), function(x) {
cbind(dat[var %in% lvls[seq(1, x)], ], frame = lvls[[x]])
})
dplyr::bind_rows(dats)
}
df <- NHsummerTemp %>%
filter(between(Year, 1951, 2015), Year != 1981, Year != 1982) %>%
accumulate_by(~MeanTemp)
nh_p3 <- df %>%
plot_ly(alpha = 0.6, frame = ~Group, histnorm = "probability", nbinsx = 25, color = ~Group) %>%
add_histogram(x = ~MeanTemp ) %>%
add_histogram(x = ~MeanTemp ) %>%
add_histogram(x = ~MeanTemp ) %>%
add_histogram(x = ~MeanTemp ) %>%
layout(barmode = "overlay", xaxis = list(range = c(-1, 1.3))) %>%
animation_opts(frame = 300, transition = 0, redraw = TRUE)
| /R/northernHemipherePlotly.R | permissive | davidmeza1/NASADatanauts | R | false | false | 3,518 | r | library(plotly)
library(tidyverse)
library(readr)
# read the data from my folder
NHtemp <- read_csv("~/OneDrive/GitHub/NASADatanauts/data/NHtemp.csv",
na = "empty", comment = "*")
# Pull out the monthly data for each year. Removing 2017 since it has missing data.
# Tidy the data, gather the months and mean temperatures into columns.
# Arrange the data by year, ascending.
# The month means for 1880 - 2016
NHyearlyTemp <- NHtemp %>%
select(Year:Dec) %>%
filter(Year != 2017) %>%
gather(Months, MeanTemp, -Year) %>%
arrange(Year)
# The summer month means for 1880 - 2016
NHsummerTemp <- NHtemp %>%
select(Year, Jun:Aug) %>%
filter(Year != 2017) %>%
gather(Months, MeanTemp, -Year) %>%
arrange(Year)
# Adding a column for the Year groups. This will be used as the frame parameter
# for the animation, shown below.
library(data.table)
NHsummerTemp <- data.table(NHsummerTemp)
NHsummerTemp[, Group := ifelse(Year %in% c(1951:1980), "1951-1980",
ifelse(Year %in% c(1983:1993), "1983-1993",
ifelse(Year %in% c(1994:2004), "1994-2004",
ifelse(Year %in% c(2005:2015), "2005-2015", "1880-1950"))))]
nhYr_p <- plot_ly(data = filter(NHyearlyTemp, between(Year, 1951, 2015)), x = ~MeanTemp,
type = "histogram", histnorm = "probability", nbinsx = 25) %>%
layout(xaxis = list(range = c(-1, 1.3)))
nh_p <- plot_ly(alpha = 0.6, histnorm = "probability", nbinsx = 25) %>%
add_histogram(data = filter(NHyearlyTemp, between(Year, 1951, 2015)), x = ~MeanTemp, alpha = .2, fill = "black") %>%
add_histogram(data = filter(NHsummerTemp, between(Year, 1951, 1980)), x = ~MeanTemp ) %>%
add_histogram(data = filter(NHsummerTemp, between(Year, 1983, 1993)), x = ~MeanTemp ) %>%
add_histogram(data = filter(NHsummerTemp, between(Year, 1994, 2004)), x = ~MeanTemp ) %>%
add_histogram(data = filter(NHsummerTemp, between(Year, 2005, 2015)), x = ~MeanTemp ) %>%
layout(barmode = "overlay", xaxis = list(range = c(-1, 1.3)))
nh_p2 <- plot_ly(alpha = 0.6, frame = ~Group, histnorm = "probability", nbinsx = 25, color = ~Group) %>%
add_histogram(data = filter(NHsummerTemp, between(Year, 1951, 1980)), x = ~MeanTemp ) %>%
add_histogram(data = filter(NHsummerTemp, between(Year, 1983, 1993)), x = ~MeanTemp ) %>%
add_histogram(data = filter(NHsummerTemp, between(Year, 1994, 2004)), x = ~MeanTemp ) %>%
add_histogram(data = filter(NHsummerTemp, between(Year, 2005, 2015)), x = ~MeanTemp ) %>%
layout(barmode = "overlay", xaxis = list(range = c(-1, 1.3))) %>%
animation_opts(redraw = TRUE)
nh_p2
################################
accumulate_by <- function(dat, var) {
var <- lazyeval::f_eval(var, dat)
lvls <- plotly:::getLevels(var)
dats <- lapply(seq_along(lvls), function(x) {
cbind(dat[var %in% lvls[seq(1, x)], ], frame = lvls[[x]])
})
dplyr::bind_rows(dats)
}
df <- NHsummerTemp %>%
filter(between(Year, 1951, 2015), Year != 1981, Year != 1982) %>%
accumulate_by(~MeanTemp)
nh_p3 <- df %>%
plot_ly(alpha = 0.6, frame = ~Group, histnorm = "probability", nbinsx = 25, color = ~Group) %>%
add_histogram(x = ~MeanTemp ) %>%
add_histogram(x = ~MeanTemp ) %>%
add_histogram(x = ~MeanTemp ) %>%
add_histogram(x = ~MeanTemp ) %>%
layout(barmode = "overlay", xaxis = list(range = c(-1, 1.3))) %>%
animation_opts(frame = 300, transition = 0, redraw = TRUE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/controlchart.R
\name{KRDetect.outliers.controlchart}
\alias{KRDetect.outliers.controlchart}
\title{Identification of outliers using control charts}
\usage{
KRDetect.outliers.controlchart(x, perform.smoothing = TRUE,
bandwidth.type = "local", bandwidth.value = NULL, kernel.order = 2,
method = "range", group.size.x = 3, group.size.R = 3,
group.size.s = 3, L.x = 3, L.R = 3, L.s = 3)
}
\arguments{
\item{x}{data values.
Supported data types
\itemize{
\item{a numeric vector}
\item{a time series object \code{ts}}
\item{a time series object \code{xts}}
\item{a time series object \code{zoo}}
}}
\item{perform.smoothing}{a logical value specifying if data smoothing is performed. If \code{TRUE} (default), data are smoothed.}
\item{bandwidth.type}{a character string specifying the type of bandwidth.
Possible options are
\itemize{
\item{\code{"local"}} {(default) to use local bandwidth}
\item{\code{"global"}} {to use global bandwidth}
}}
\item{bandwidth.value}{a local bandwidth array (for \code{bandwidth.type = "local"}) or global bandwidth value (for \code{bandwidth.type = "global"}) for kernel regression estimation. If \code{bandwidth.type = "NULL"} (default) a data-adaptive local plug-in (Herrmann, 1997) (for \code{bandwidth.type = "local"}) or data-adaptive global plug-in (Gasser et al., 1991) (for \code{bandwidth.type = "global"}) bandwidth is used instead.}
\item{kernel.order}{a nonnegative integer giving the order of the optimal kernel (Gasser et al., 1985) used for smoothing.
Possible options are
\itemize{
\item{\code{kernel.order = 2}} {(default)}
\item{\code{kernel.order = 4}}
}}
\item{method}{a character string specifying the preferred estimate of standard deviation parameter.
Possible options are
\itemize{
\item{\code{"range"}} {(default) for estimation based on sample ranges}
\item{\code{"sd"}} {for estimation based on sample standard deviations}
}}
\item{group.size.x}{a positive integer giving the number of observations in individual segments used for computation of \emph{x} chart control limits.
If the data can not be equidistantly divided, the first extra values will be excluded from the analysis. Default is \code{group.size.x = 3}.}
\item{group.size.R}{a positive integer giving the number of observations in individual segments used for computation of \emph{R} chart control limits.
If the data can not be equidistantly divided, the first extra values will be excluded from the analysis. Default is \code{group.size.R = 3}.}
\item{group.size.s}{a positive integer giving the number of observations in individual segments used for computation of \emph{s} chart control limits.
If the data can not be equidistantly divided, the first extra values will be excluded from the analysis. Default is \code{group.size.s = 3}.}
\item{L.x}{a positive numeric value giving parameter \code{L} specifying the width of \emph{x} chart control limits. Default is \code{L.x = 3}.}
\item{L.R}{a positive numeric value giving parameter \code{L} specifying the width of \emph{R} chart control limits. Default is \code{L.R = 3}.}
\item{L.s}{a positive numeric value giving parameter \code{L} specifying the width of \emph{s} chart control limits. Default is \code{L.s = 3}.}
}
\value{
A \code{"KRDetect"} object which contains a list with elements:
\item{method.type}{a character string giving the type of method used for outlier idetification}
\item{x}{a numeric vector of observations}
\item{index}{a numeric vector of index design points assigned to individual observations}
\item{smoothed}{a numeric vector of estimates of the kernel regression function (smoothed data)}
\item{outlier.x}{a logical vector specyfing the identified outliers based on limits of control chart \emph{x}, \code{TRUE} means that corresponding observation from vector \code{x} is detected as outlier}
\item{outlier.R}{a logical vector specyfing the identified outliers based on limits of control chart \emph{R}, \code{TRUE} means that corresponding observation from vector \code{x} is detected as outlier}
\item{outlier.s}{a logical vector specyfing the identified outliers based on limits of control chart \emph{s}, \code{TRUE} means that corresponding observation from vector \code{x} is detected as outlier}
\item{outlier}{a logical vector specyfing the identified outliers based on at least one type of control limits. \code{TRUE} means that corresponding observation from vector \code{x} is detected as outlier}
\item{LCL.x}{a numeric value giving lower control limit of control chart \emph{x}}
\item{UCL.x}{a numeric value giving upper control limit of control chart \emph{x}}
\item{LCL.s}{a numeric value giving lower control limit of control chart \emph{s}}
\item{UCL.s}{a numeric value giving upper control limit of control chart \emph{s}}
\item{LCL.R}{a numeric value giving lower control limit of control chart \emph{R}}
\item{UCL.R}{a numeric value giving upper control limit of control chart \emph{R}}
}
\description{
Identification of outliers in environmental data using two-step method based on kernel smoothing and control charts (Campulova et al., 2017).
The outliers are identified as observations corresponding to segments of smoothing residuals exceeding control charts limits.
}
\details{
This function identifies outliers in environmental data using two-step procedure (Campulova et al., 2017).
The procedure consists of kernel smoothing and subsequent identification of observations corresponding to segments of smoothing residuals exceeding control charts limits.
This way the method does not identify individual outliers but segments of observations, where the outliers occur.
The output of the method are three logical vectors specyfing the outliers identified based on each of the three control charts.
Beside that logical vector specyfing the outliers identified based on at least one type of control limits is returned.
Crucial for the method is the choice of paramaters \code{L.x}, \code{L.R} and \code{L.s} specifying the width of control limits.
Different values of the parameters determine different criteria for outlier detection. For more information see (Campulova et al., 2017).
}
\examples{
data("mydata", package = "openair")
x = mydata$o3[format(mydata$date, "\%m \%Y") == "12 2002"]
result = KRDetect.outliers.controlchart(x)
summary(result)
plot(result)
plot(result, plot.type = "x")
plot(result, plot.type = "R")
plot(result, plot.type = "s")
}
\references{
Campulova M, Veselik P, Michalek J (2017). Control chart and Six sigma based algorithms for identification of outliers in experimental data, with an application to particulate matter PM10. Atmospheric Pollution Research. Doi=10.1016/j.apr.2017.01.004.
Shewhart W (1931). Quality control chart. Bell System Technical Journal, 5, 593–603.
SAS/QC User's Guide, Version 8, 1999. SAS Institute, Cary, N.C.
Wild C, Seber G (2000). Chance encounters: A first course in data analysis and inference. New York: John Wiley.
Joglekar, Anand M. Statistical methods for six sigma: in R&D and manufacturing. Hoboken, NJ: Wiley-Interscience. ISBN sbn0-471-20342-4.
Gasser T, Kneip A, Kohler W (1991). A flexible and fast method for automatic smoothing. Journal of the American Statistical Association, 86, 643–652.
Herrmann E (1997). Local bandwidth choice in kernel regression estimation. Journal of Computational and Graphical Statistics, 6(1), 35–54.
Eva Herrmann; Packaged for R and enhanced by Martin Maechler (2016). lokern: Kernel Regression Smoothing with Local or Global Plug-in Bandwidth. R package version 1.1-8. https://CRAN.R-project.org/package=lokern
}
| /man/KRDetect.outliers.controlchart.Rd | no_license | cran/envoutliers | R | false | true | 7,805 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/controlchart.R
\name{KRDetect.outliers.controlchart}
\alias{KRDetect.outliers.controlchart}
\title{Identification of outliers using control charts}
\usage{
KRDetect.outliers.controlchart(x, perform.smoothing = TRUE,
bandwidth.type = "local", bandwidth.value = NULL, kernel.order = 2,
method = "range", group.size.x = 3, group.size.R = 3,
group.size.s = 3, L.x = 3, L.R = 3, L.s = 3)
}
\arguments{
\item{x}{data values.
Supported data types
\itemize{
\item{a numeric vector}
\item{a time series object \code{ts}}
\item{a time series object \code{xts}}
\item{a time series object \code{zoo}}
}}
\item{perform.smoothing}{a logical value specifying if data smoothing is performed. If \code{TRUE} (default), data are smoothed.}
\item{bandwidth.type}{a character string specifying the type of bandwidth.
Possible options are
\itemize{
\item{\code{"local"}} {(default) to use local bandwidth}
\item{\code{"global"}} {to use global bandwidth}
}}
\item{bandwidth.value}{a local bandwidth array (for \code{bandwidth.type = "local"}) or global bandwidth value (for \code{bandwidth.type = "global"}) for kernel regression estimation. If \code{bandwidth.type = "NULL"} (default) a data-adaptive local plug-in (Herrmann, 1997) (for \code{bandwidth.type = "local"}) or data-adaptive global plug-in (Gasser et al., 1991) (for \code{bandwidth.type = "global"}) bandwidth is used instead.}
\item{kernel.order}{a nonnegative integer giving the order of the optimal kernel (Gasser et al., 1985) used for smoothing.
Possible options are
\itemize{
\item{\code{kernel.order = 2}} {(default)}
\item{\code{kernel.order = 4}}
}}
\item{method}{a character string specifying the preferred estimate of standard deviation parameter.
Possible options are
\itemize{
\item{\code{"range"}} {(default) for estimation based on sample ranges}
\item{\code{"sd"}} {for estimation based on sample standard deviations}
}}
\item{group.size.x}{a positive integer giving the number of observations in individual segments used for computation of \emph{x} chart control limits.
If the data can not be equidistantly divided, the first extra values will be excluded from the analysis. Default is \code{group.size.x = 3}.}
\item{group.size.R}{a positive integer giving the number of observations in individual segments used for computation of \emph{R} chart control limits.
If the data can not be equidistantly divided, the first extra values will be excluded from the analysis. Default is \code{group.size.R = 3}.}
\item{group.size.s}{a positive integer giving the number of observations in individual segments used for computation of \emph{s} chart control limits.
If the data can not be equidistantly divided, the first extra values will be excluded from the analysis. Default is \code{group.size.s = 3}.}
\item{L.x}{a positive numeric value giving parameter \code{L} specifying the width of \emph{x} chart control limits. Default is \code{L.x = 3}.}
\item{L.R}{a positive numeric value giving parameter \code{L} specifying the width of \emph{R} chart control limits. Default is \code{L.R = 3}.}
\item{L.s}{a positive numeric value giving parameter \code{L} specifying the width of \emph{s} chart control limits. Default is \code{L.s = 3}.}
}
\value{
A \code{"KRDetect"} object which contains a list with elements:
\item{method.type}{a character string giving the type of method used for outlier idetification}
\item{x}{a numeric vector of observations}
\item{index}{a numeric vector of index design points assigned to individual observations}
\item{smoothed}{a numeric vector of estimates of the kernel regression function (smoothed data)}
\item{outlier.x}{a logical vector specyfing the identified outliers based on limits of control chart \emph{x}, \code{TRUE} means that corresponding observation from vector \code{x} is detected as outlier}
\item{outlier.R}{a logical vector specyfing the identified outliers based on limits of control chart \emph{R}, \code{TRUE} means that corresponding observation from vector \code{x} is detected as outlier}
\item{outlier.s}{a logical vector specyfing the identified outliers based on limits of control chart \emph{s}, \code{TRUE} means that corresponding observation from vector \code{x} is detected as outlier}
\item{outlier}{a logical vector specyfing the identified outliers based on at least one type of control limits. \code{TRUE} means that corresponding observation from vector \code{x} is detected as outlier}
\item{LCL.x}{a numeric value giving lower control limit of control chart \emph{x}}
\item{UCL.x}{a numeric value giving upper control limit of control chart \emph{x}}
\item{LCL.s}{a numeric value giving lower control limit of control chart \emph{s}}
\item{UCL.s}{a numeric value giving upper control limit of control chart \emph{s}}
\item{LCL.R}{a numeric value giving lower control limit of control chart \emph{R}}
\item{UCL.R}{a numeric value giving upper control limit of control chart \emph{R}}
}
\description{
Identification of outliers in environmental data using two-step method based on kernel smoothing and control charts (Campulova et al., 2017).
The outliers are identified as observations corresponding to segments of smoothing residuals exceeding control charts limits.
}
\details{
This function identifies outliers in environmental data using two-step procedure (Campulova et al., 2017).
The procedure consists of kernel smoothing and subsequent identification of observations corresponding to segments of smoothing residuals exceeding control charts limits.
This way the method does not identify individual outliers but segments of observations, where the outliers occur.
The output of the method are three logical vectors specyfing the outliers identified based on each of the three control charts.
Beside that logical vector specyfing the outliers identified based on at least one type of control limits is returned.
Crucial for the method is the choice of paramaters \code{L.x}, \code{L.R} and \code{L.s} specifying the width of control limits.
Different values of the parameters determine different criteria for outlier detection. For more information see (Campulova et al., 2017).
}
\examples{
data("mydata", package = "openair")
x = mydata$o3[format(mydata$date, "\%m \%Y") == "12 2002"]
result = KRDetect.outliers.controlchart(x)
summary(result)
plot(result)
plot(result, plot.type = "x")
plot(result, plot.type = "R")
plot(result, plot.type = "s")
}
\references{
Campulova M, Veselik P, Michalek J (2017). Control chart and Six sigma based algorithms for identification of outliers in experimental data, with an application to particulate matter PM10. Atmospheric Pollution Research. Doi=10.1016/j.apr.2017.01.004.
Shewhart W (1931). Quality control chart. Bell System Technical Journal, 5, 593–603.
SAS/QC User's Guide, Version 8, 1999. SAS Institute, Cary, N.C.
Wild C, Seber G (2000). Chance encounters: A first course in data analysis and inference. New York: John Wiley.
Joglekar, Anand M. Statistical methods for six sigma: in R&D and manufacturing. Hoboken, NJ: Wiley-Interscience. ISBN sbn0-471-20342-4.
Gasser T, Kneip A, Kohler W (1991). A flexible and fast method for automatic smoothing. Journal of the American Statistical Association, 86, 643–652.
Herrmann E (1997). Local bandwidth choice in kernel regression estimation. Journal of Computational and Graphical Statistics, 6(1), 35–54.
Eva Herrmann; Packaged for R and enhanced by Martin Maechler (2016). lokern: Kernel Regression Smoothing with Local or Global Plug-in Bandwidth. R package version 1.1-8. https://CRAN.R-project.org/package=lokern
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/write.R
\name{write.ntk}
\alias{write.ntk}
\title{Write a SaTScan ntk (network) file}
\usage{
write.ntk(x, location, filename, userownames = FALSE)
}
\arguments{
\item{x}{Your data frame.}
\item{location}{Directory location where the file should be written}
\item{filename}{Name for the output file in the OS; .ntk will be added.}
\item{userownames}{If TRUE, will write the row names into the file.}
}
\description{
Write a SaTScan ntk (network) file
}
\details{
Writes the input data frame to the OS, using the .ntk extension. Contents of the data
frame should be only what you want SaTScan to see.
This is a simple function that calls write.table, since SaTScan just needs ASCII files.
}
| /man/write.ntk.Rd | no_license | cran/rsatscan | R | false | true | 771 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/write.R
\name{write.ntk}
\alias{write.ntk}
\title{Write a SaTScan ntk (network) file}
\usage{
write.ntk(x, location, filename, userownames = FALSE)
}
\arguments{
\item{x}{Your data frame.}
\item{location}{Directory location where the file should be written}
\item{filename}{Name for the output file in the OS; .ntk will be added.}
\item{userownames}{If TRUE, will write the row names into the file.}
}
\description{
Write a SaTScan ntk (network) file
}
\details{
Writes the input data frame to the OS, using the .ntk extension. Contents of the data
frame should be only what you want SaTScan to see.
This is a simple function that calls write.table, since SaTScan just needs ASCII files.
}
|
# Grab user information
# Load packages
library(twitteR)
library(dplyr)
library(rjson)
library(lubridate)
# Load data
today <- format(Sys.time(), '%Y-%m-%d')
searchstring <- 'microsoft'
# Load tweet file (from process_tweets.R)
files <- list.files('data',paste0('proc_',searchstring))
files
selectedfile <- paste0('data/',files[1])
statuses <- readRDS(file=selectedfile)
# Set up authorization
secrets <- fromJSON(file='twitter_secrets.json.nogit')
setup_twitter_oauth(secrets$api_key,
secrets$api_secret,
secrets$access_token,
secrets$access_token_secret)
# Grab user info
userlist <- sapply(unique(statuses$user), as.character)
allusers <- lookupUsers(userlist)
# Date scince founding of twitter (March 21, 2006)
twitter_date <- mdy_hm('03-21-2006 9:50 PM PST')
# Gather all the user info in a data frame
userinfo <- data.frame(user=sapply(allusers, function(x) x$screenName),
realname=sapply(allusers, function(x) x$name),
numstatuses=sapply(allusers, function(x) x$statusesCount),
followers=sapply(allusers, function(x) x$followersCount),
friends=sapply(allusers, function(x) x$friendsCount),
favorites=sapply(allusers, function(x) x$favoritesCount),
account_created=sapply(allusers, function(x) format(x$created,
format='%F %T')),
verified=sapply(allusers, function(x) x$verified),
numlists=sapply(allusers, function(x) x$listedCount)) %>%
mutate(user=as.character(user)) %>%
mutate(twitter_years=interval(account_created, Sys.time()) / dyears(1)) %>%
select(-account_created)
# Group tweet data by user
newstatuses <-
statuses %>%
group_by(user) %>%
summarize(numTopicTweets=n(),
positivity=mean(positivity),
PC1=mean(PC1),
PC2=mean(PC2),
PC3=mean(PC3),
PC4=mean(PC4),
PC5=mean(PC5),
client=rownames(sort(table(client), decreasing = T))[1], #most common client
anger=mean(anger), anticipation=mean(anticipation),
disgust=mean(disgust), fear=mean(fear), joy=mean(joy),
sadness=mean(sadness), surprise=mean(surprise), trust=mean(trust)) %>%
mutate(user=as.character(user))
# Join the data together
alldata <- inner_join(userinfo, newstatuses, by='user')
# looks like some users were lost
# Save to a file
savename <- paste0('data/users_',searchstring,'_',
nrow(alldata),'_',today,'.Rda')
saveRDS(alldata, file=savename)
| /scripts/load_users.R | permissive | dkrathi457/Twitter-Sentiment-PCA | R | false | false | 2,730 | r | # Grab user information
# Load packages
library(twitteR)
library(dplyr)
library(rjson)
library(lubridate)
# Load data
today <- format(Sys.time(), '%Y-%m-%d')
searchstring <- 'microsoft'
# Load tweet file (from process_tweets.R)
files <- list.files('data',paste0('proc_',searchstring))
files
selectedfile <- paste0('data/',files[1])
statuses <- readRDS(file=selectedfile)
# Set up authorization
secrets <- fromJSON(file='twitter_secrets.json.nogit')
setup_twitter_oauth(secrets$api_key,
secrets$api_secret,
secrets$access_token,
secrets$access_token_secret)
# Grab user info
userlist <- sapply(unique(statuses$user), as.character)
allusers <- lookupUsers(userlist)
# Date scince founding of twitter (March 21, 2006)
twitter_date <- mdy_hm('03-21-2006 9:50 PM PST')
# Gather all the user info in a data frame
userinfo <- data.frame(user=sapply(allusers, function(x) x$screenName),
realname=sapply(allusers, function(x) x$name),
numstatuses=sapply(allusers, function(x) x$statusesCount),
followers=sapply(allusers, function(x) x$followersCount),
friends=sapply(allusers, function(x) x$friendsCount),
favorites=sapply(allusers, function(x) x$favoritesCount),
account_created=sapply(allusers, function(x) format(x$created,
format='%F %T')),
verified=sapply(allusers, function(x) x$verified),
numlists=sapply(allusers, function(x) x$listedCount)) %>%
mutate(user=as.character(user)) %>%
mutate(twitter_years=interval(account_created, Sys.time()) / dyears(1)) %>%
select(-account_created)
# Group tweet data by user
newstatuses <-
statuses %>%
group_by(user) %>%
summarize(numTopicTweets=n(),
positivity=mean(positivity),
PC1=mean(PC1),
PC2=mean(PC2),
PC3=mean(PC3),
PC4=mean(PC4),
PC5=mean(PC5),
client=rownames(sort(table(client), decreasing = T))[1], #most common client
anger=mean(anger), anticipation=mean(anticipation),
disgust=mean(disgust), fear=mean(fear), joy=mean(joy),
sadness=mean(sadness), surprise=mean(surprise), trust=mean(trust)) %>%
mutate(user=as.character(user))
# Join the data together
alldata <- inner_join(userinfo, newstatuses, by='user')
# looks like some users were lost
# Save to a file
savename <- paste0('data/users_',searchstring,'_',
nrow(alldata),'_',today,'.Rda')
saveRDS(alldata, file=savename)
|
rankhospital<-function(state, outcome, num="best"){
## Read outcome data
data<-read.csv("outcome-of-care-measures.csv",na.strings="Not Available")
outcome<-gsub(" ", ".", outcome)
coln<-colnames(data)
##Check that state and outcome are valid
if (!any(data$State == state)){
stop("Invalid State")
}
len<-length(grep(outcome, coln, ignore.case=TRUE))
if ( len == 0){
stop("Invalid outcome")
}
##return hospital name with lowest 30-day death rate in state
if(num=="best"){
num<-1
}
search<-paste(c("^Hospital.30.Day.Death..Mortality..Rates.from"),outcome, sep=".")
col<-as.integer(grep(search,colnames(data),ignore.case=TRUE))
data.s<-subset(data, (data$State==state) & (!is.na(data[,col])))
if(num=="worst"){
num<-length(data.s$State)
}
as.character(data.s[ order(data.s[col]), ][num,2])
} | /rprog_data_ProgAssignment3-data/rankhospital.R | no_license | emilliman5/datasciencecoursera | R | false | false | 878 | r | rankhospital<-function(state, outcome, num="best"){
## Read outcome data
data<-read.csv("outcome-of-care-measures.csv",na.strings="Not Available")
outcome<-gsub(" ", ".", outcome)
coln<-colnames(data)
##Check that state and outcome are valid
if (!any(data$State == state)){
stop("Invalid State")
}
len<-length(grep(outcome, coln, ignore.case=TRUE))
if ( len == 0){
stop("Invalid outcome")
}
##return hospital name with lowest 30-day death rate in state
if(num=="best"){
num<-1
}
search<-paste(c("^Hospital.30.Day.Death..Mortality..Rates.from"),outcome, sep=".")
col<-as.integer(grep(search,colnames(data),ignore.case=TRUE))
data.s<-subset(data, (data$State==state) & (!is.na(data[,col])))
if(num=="worst"){
num<-length(data.s$State)
}
as.character(data.s[ order(data.s[col]), ][num,2])
} |
library(tidyverse)
library(here)
library(DESeq2)
synapser::synLogin()
syn <- synExtra::synDownloader(here("tempdl"), followLink = TRUE)
wd <- here("replicate_analysis")
dir.create(wd, recursive = TRUE, showWarnings = FALSE)
# set directories, import files ------------------------------------------------
###############################################################################T
raw_counts <- syn("syn21411551") %>%
read_tsv()
meta <- syn("syn21432975") %>%
read_csv() %>%
group_by(condition) %>%
mutate(
replicate = seq_len(n())
) %>%
ungroup()
deseq_pairwise <- syn("syn21432187") %>%
read_rds()
unloadNamespace("synExtra")
unloadNamespace("synapser")
unloadNamespace("PythonEmbedInR")
# Normalize counts -------------------------------------------------------------
###############################################################################T
counts_vst <- deseq_pairwise %>%
varianceStabilizingTransformation() %>%
assay()
# PCA with normalized counts ---------------------------------------------------
###############################################################################T
pca_data <- counts_vst %>%
prcomp()
pca_plot <- function (data, meta, aes = ggplot2::aes(PC1, PC2), extra_layers = NULL, ...) {
p <- prcomp(data, ...)
pstats <- t(summary(p)$importance) %>%
tibble::as_tibble(rownames = "component") %>%
dplyr::rename(sdev = `Standard deviation`, prop_var = `Proportion of Variance`, cum_prop = `Cumulative Proportion`) %>%
dplyr::mutate(component = factor(component, levels = unique(component))) %>%
# Remove all components after cumsum reaches .999
dplyr::filter(cumsum(dplyr::lag(cum_prop > .95, default = FALSE)) <= 1) %>%
# Maximum 10 components
dplyr::slice(1:min(10, n()))
ploadings <- p$x %>%
as.data.frame() %>%
tibble::rownames_to_column("condition") %>%
tibble::as_tibble() %>%
dplyr::inner_join(meta, by = "condition")
p_plot <- ggplot(ploadings, aes)
if (!is.null(extra_layers))
p_plot <- p_plot + extra_layers
# var_plot <- ggplot(pstats, ggplot2::aes(x = 1, y = prop_var, fill = component)) +
# geom_col(position = "stack") +
# # geom_text(ggplot2::aes(y = cum_prop, label = prop_var), halign = 0, valign = 1) +
# coord_flip() +
# guides(fill = FALSE)
# theme(legend.position = "bottom")
# browser()
var_table <- gridExtra::tableGrob(
pstats %>%
dplyr::select(component, prop_var) %>%
dplyr::mutate(prop_var = formatC(prop_var * 100, digits = 3, format = "fg")) %>%
tidyr::spread(component, prop_var),
rows = NULL,
theme = gridExtra::ttheme_default(base_size = 6)
)
p_plot
# patchwork::wrap_plots(p_plot, var_table, heights = c(5, 1), ncol = 1)
}
## Save all combinations of PC1-PC5
pca_plot_pc_param <- function(matrix, x, y, col_annotation, facet = NULL) {
pca_plot(
matrix,
col_annotation %>% arrange(condition),
aes(!!ensym(x), !!ensym(y), fill = as.factor(ERKi), size = as.factor(Time), color = as.character(DOX)),
center = FALSE, scale = FALSE,
extra_layers = list(
geom_point(shape = 21, stroke = 1.5),
scale_color_manual(values = c("0" = "#000000", "1" = "#00000000")),
scale_fill_viridis_d(),
guides(fill = guide_legend(override.aes = list(color = "#00000000"))),
# scale_size_manual(values = c("0" = 1, "1" = 2, "2" = 3, "4" = 4, "8" = 5, "16" = 6, "24" = 7)),
# if (!is.null(facet)) facet_wrap(vars(!!sym(facet))) else NULL,
facet_grid(rows = vars(Time), cols = vars(ERKi)),
ggrepel::geom_text_repel(aes(label = replicate), size = 4, color = "black", max.overlaps = Inf),
theme_minimal(),
theme(panel.grid.minor = element_blank())
# geom_text(aes(label = Repeat), color = "black")
)
)
}
x <- pca_plot_pc_param(
t(counts_vst),
PC1, PC2,
meta %>%
mutate(condition = Sample_ID, across(replicate, as.character)),
facet = "Time"
) +
labs(fill = "ERKi\nconcentration", size = "Time", color = "DOX")
cowplot::ggsave2(
file.path(wd, "pca_all_replicates_minimal.pdf"),
x, width = 12, height = 10
)
# pca_plots <- combn(paste0("PC", 1:6), 2) %>%
# t() %>%
# `colnames<-`(c("x", "y")) %>%
# as_tibble() %>%
# crossing(
# tibble(facet = list(NULL, "Time", "ERKi"))
# ) %>%
# mutate(
# plot = pmap(
# .,
# pca_plot_pc_param,
# col_annotation = col_annotation
# )
# )
# Cross-correlation -----------------------------------------------------------
###############################################################################T
coeff_variance <- deseq_pairwise %>%
counts(normalized = TRUE) %>%
apply(MARGIN = 1, function(x) sd(x) / mean(x))
most_changing_genes <- DESeq(deseq_pairwise, test = "LRT", reduced = ~Repeat)
most_changing_genes_res <- most_changing_genes %>%
results(tidy = TRUE) %>%
as_tibble() %>%
arrange(padj)
correlations <- deseq_pairwise %>%
counts(normalized = TRUE) %>%
{
.[
most_changing_genes_res %>%
arrange(padj) %>%
head(n = 1000) %>%
pull(row),
]
} %>%
# scale() %>%
cor() %>%
as.table() %>%
as.data.frame() %>%
as_tibble() %>%
magrittr::set_colnames(c("Sample_1", "Sample_2", "correlation"))
# correlations <- counts_vst %>%
# cor() %>%
# as.table() %>%
# as.data.frame() %>%
# as_tibble() %>%
# magrittr::set_colnames(c("Sample_1", "Sample_2", "correlation"))
openxlsx::write.xlsx(
correlations,
file.path(wd, "sample_pairwise_correlations_normalized_counts.xlsx")
)
meta_correlation <- meta %>%
arrange(
ERKi, Time, DOX, Repeat, replicate
) %>%
mutate(
sample_name = paste0(condition, "_", replicate) %>%
fct_inorder(),
across(c(Sample_ID, condition), fct_inorder)
) %>%
group_by(condition, Repeat) %>%
slice_head(n = 1) %>%
ungroup()
correlation_heatmap_data <- correlations %>%
inner_join(
meta_correlation %>%
# filter(DOX == 1) %>%
dplyr::select(Sample_ID, sample_name_1 = sample_name, condition_1 = condition),
by = c("Sample_1" = "Sample_ID")
) %>%
inner_join(
meta_correlation %>%
# filter(DOX == 1) %>%
dplyr::select(Sample_ID, sample_name_2 = sample_name, condition_2 = condition),
by = c("Sample_2" = "Sample_ID")
) %>%
mutate(across(where(is.factor), fct_drop)) %>%
arrange(condition_1, condition_2, sample_name_1, sample_name_2)
correlation_heatmap <- correlation_heatmap_data %>%
ggplot(
aes(sample_name_1, sample_name_2, fill = correlation)
) +
geom_tile() +
scale_fill_viridis_c(direction = 1, limits = c(0.9, 1), oob = scales::squish) +
# scale_fill_distiller(palette = "RdBu", direction = -1, limits = c(0.90, 1), oob = scales::squish) +
geom_rect(
aes(
xmin = xmin, xmax = xmax,
ymin = xmin, ymax = xmax
),
inherit.aes = FALSE,
color = "black",
fill = NA,
data = correlation_heatmap_data %>%
mutate(across(starts_with("sample_name"), as.integer)) %>%
group_by(condition_1, condition_2) %>%
summarize(
xmin = head(sample_name_1, n = 1) - 0.5, xmax = tail(sample_name_2, n = 1) + 0.5,
ymin = head(sample_name_2, n = 1) - 0.5, ymax = tail(sample_name_1, n = 1) + 0.5,
) %>%
ungroup()
) +
theme(
axis.text.x = element_text(angle = 45, hjust = 1)
)
ggsave(file.path(wd, "correlation_heatmap_most_changing_genes.pdf"), correlation_heatmap, width = 15, height = 12)
# Heatmap with ComplexHeatmap so we can show time and ERKi concentration
library(ComplexHeatmap)
meta_correlation <- meta %>%
arrange(
ERKi, Time, DOX, Repeat, replicate
) %>%
filter(DOX == 1) %>%
mutate(
sample_name = paste0(condition, "_", replicate) %>%
fct_inorder(),
across(c(Sample_ID, condition), fct_inorder)
) %>%
group_by(condition, Repeat) %>%
slice_head(n = 1) %>%
ungroup()
annotation_col <- HeatmapAnnotation(
df = meta_correlation %>%
select(ERKi, Time) %>%
mutate(across(.fns = ~fct_inorder(as.character(.x)))) %>%
as.data.frame(),
a = anno_empty(border = FALSE, height = unit(5, "points")),
col = list(
ERKi = unique(meta_correlation[["ERKi"]]) %>% {
set_names(
viridisLite::viridis(n = length(.), direction = -1),
.
)
},
Time = unique(meta_correlation[["Time"]]) %>% {
set_names(
viridisLite::magma(n = length(.), direction = -1),
.
)
}
)
)
annotation_row <- HeatmapAnnotation(
df = meta_correlation %>%
select(ERKi, Time) %>%
mutate(across(.fns = ~fct_inorder(as.character(.x)))) %>%
arrange(desc(row_number())) %>%
as.data.frame(),
a = anno_empty(border = FALSE, width = unit(5, "points")),
col = list(
ERKi = unique(meta_correlation[["ERKi"]]) %>% {
set_names(
viridisLite::viridis(n = length(.), direction = -1),
.
)
},
Time = unique(meta_correlation[["Time"]]) %>% {
set_names(
viridisLite::magma(n = length(.), direction = -1),
.
)
}
),
which = "row",
show_legend = FALSE
)
correlation_heatmap_data <- correlations %>%
inner_join(
meta_correlation %>%
filter(DOX == 1) %>%
dplyr::select(Sample_ID, sample_name_1 = sample_name, condition_1 = condition),
by = c("Sample_1" = "Sample_ID")
) %>%
inner_join(
meta_correlation %>%
filter(DOX == 1) %>%
dplyr::select(Sample_ID, sample_name_2 = sample_name, condition_2 = condition),
by = c("Sample_2" = "Sample_ID")
) %>%
mutate(across(where(is.factor), fct_drop)) %>%
arrange(condition_1, condition_2, sample_name_1, sample_name_2)
complex_correlation_heatmap <- correlation_heatmap_data %>%
mutate(correlation = pmax(correlation, 0.90)) %>%
pivot_wider(id_cols = c("Sample_1"), names_from = "Sample_2", values_from = "correlation") %>%
arrange(desc(row_number())) %>%
column_to_rownames("Sample_1") %>%
as.matrix() %>%
magrittr::set_colnames(NULL) %>%
magrittr::set_rownames(NULL) %>%
Heatmap(
col = viridis::viridis(100, direction = 1),
cluster_rows = FALSE,
cluster_columns = FALSE,
row_split = meta_correlation[["condition"]],
column_split = meta_correlation[["condition"]],
row_gap = unit(0, "points"),
column_gap = unit(0, "points"),
# border = TRUE,
show_row_names = FALSE,
show_column_names = FALSE,
top_annotation = annotation_col,
left_annotation = annotation_row,
row_title = NULL,
column_title = NULL,
heatmap_legend_param = list(title = "Correlation"),
cell_fun = function(j, i, x, y, width, height, fill) {
if (i == j)
grid.rect(gp = gpar(fill = "transparent", lwd = 2, color = "white"))
}
)
withr::with_pdf(
file.path(wd, "correlation_heatmap_annotation_with_gap_most_changing_genes.pdf"),
draw(complex_correlation_heatmap),
width = 15, height = 12
)
# Pairwise cross-correlation ---------------------------------------------------
###############################################################################T
# correlations <- deseq_pairwise %>%
# counts(normalized = TRUE) %>%
# cor() %>%
# as.table() %>%
# as.data.frame() %>%
# as_tibble() %>%
# magrittr::set_colnames(c("Sample_1", "Sample_2", "correlation"))
meta_correlation <- meta %>%
group_by(condition, Repeat) %>%
slice_head(n = 1) %>%
ungroup() %>%
arrange(
ERKi, Time, DOX, Repeat, replicate
) %>%
transmute(
condition,
Sample_ID,
sample_name = paste0(condition, "_", Repeat) %>%
fct_inorder(),
Repeat = paste0("Repeat_", Repeat)
) %>% {
# browser()
df <- .
pivot_wider(
.,
id_cols = "condition",
names_from = "Repeat",
values_from = c("sample_name", "Sample_ID")
) %>%
tidyr::expand(
Sample_ID_Repeat_1, Sample_ID_Repeat_2
) %>%
inner_join(
rename_with(df, ~paste0(.x, "_Repeat_1")),
by = "Sample_ID_Repeat_1"
) %>%
inner_join(
rename_with(df, ~paste0(.x, "_Repeat_2")),
by = c("Sample_ID_Repeat_2")
)
}
correlation_heatmap_data <- correlations %>%
inner_join(
meta_correlation,
by = c("Sample_1" = "Sample_ID_Repeat_1", "Sample_2" = "Sample_ID_Repeat_2")
)
correlation_heatmap <- correlation_heatmap_data %>%
ggplot(
aes(sample_name_Repeat_1, sample_name_Repeat_2, fill = correlation)
) +
geom_tile() +
scale_fill_viridis_c(direction = 1, limits = c(0.90, 1), oob = scales::squish) +
theme(
axis.text.x = element_text(angle = 45, hjust = 1)
)
ggsave(file.path(wd, "correlation_heatmap_pairwise.pdf"), correlation_heatmap, width = 12, height = 10)
# Heatmap with ComplexHeatmap so we can show time and ERKi concentration
library(ComplexHeatmap)
meta_correlation <- meta %>%
arrange(
ERKi, Time, DOX, Repeat, replicate
) %>%
mutate(
sample_name = paste0(condition, "_", replicate) %>%
fct_inorder(),
across(c(Sample_ID, condition), fct_inorder)
) %>%
group_by(condition, Repeat) %>%
slice_head(n = 1) %>%
ungroup()
meta_correlation <- meta %>%
group_by(condition, Repeat) %>%
slice_head(n = 1) %>%
ungroup() %>%
arrange(
ERKi, Time, desc(DOX), Repeat, replicate
)
annotation_col <- HeatmapAnnotation(
df = meta_correlation %>%
filter(Repeat == 1) %>%
select(ERKi, Time, DOX) %>%
mutate(across(.fns = ~fct_inorder(as.character(.x)))) %>%
as.data.frame(),
a = anno_empty(border = FALSE, height = unit(5, "points")),
col = list(
ERKi = unique(meta_correlation[["ERKi"]]) %>% {
set_names(
viridisLite::viridis(n = length(.), direction = -1),
.
)
},
Time = unique(meta_correlation[["Time"]]) %>% {
set_names(
viridisLite::magma(n = length(.), direction = -1),
.
)
},
DOX = c("1" = "black", "0" = "white")
)
)
annotation_row <- HeatmapAnnotation(
df = meta_correlation %>%
filter(Repeat == 2) %>%
select(ERKi, Time, DOX) %>%
mutate(across(.fns = ~fct_inorder(as.character(.x)))) %>%
arrange(desc(row_number())) %>%
as.data.frame(),
a = anno_empty(border = FALSE, width = unit(5, "points")),
col = list(
ERKi = unique(meta_correlation[["ERKi"]]) %>% {
set_names(
viridisLite::viridis(n = length(.), direction = -1),
.
)
},
Time = unique(meta_correlation[["Time"]]) %>% {
set_names(
viridisLite::magma(n = length(.), direction = -1),
.
)
},
DOX = c("1" = "black", "0" = "white")
),
which = "row",
show_legend = FALSE
)
correlation_heatmap_data <- crossing(
meta_correlation %>%
filter(Repeat == 1) %>%
select(Sample_1 = Sample_ID),
meta_correlation %>%
filter(Repeat == 2) %>%
select(Sample_2 = Sample_ID),
) %>%
arrange(
match(Sample_1, meta_correlation$Sample_ID),
match(Sample_2, meta_correlation$Sample_ID)
) %>%
inner_join(
correlations
)
complex_correlation_heatmap <- correlation_heatmap_data %>%
mutate(correlation = pmax(correlation, 0.90)) %>%
pivot_wider(names_from = Sample_1, values_from = correlation) %>%
arrange(desc(row_number())) %>%
column_to_rownames("Sample_2") %>%
as.matrix() %>%
# magrittr::set_colnames(NULL) %>%
# magrittr::set_rownames(NULL) %>%
Heatmap(
col = viridis::viridis(100, direction = 1),
cluster_rows = FALSE,
cluster_columns = FALSE,
# row_split = filter(meta_correlation, Repeat == 1)[["condition"]],
# column_split = filter(meta_correlation, Repeat == 2)[["condition"]],
row_gap = unit(0, "points"),
column_gap = unit(0, "points"),
# border = TRUE,
show_row_names = FALSE,
show_column_names = FALSE,
top_annotation = annotation_col,
left_annotation = annotation_row,
row_title = NULL,
column_title = NULL,
heatmap_legend_param = list(title = "Correlation"),
cell_fun = function(j, i, x, y, width, height, fill) {
if (44 - i == j)
grid.rect(x, y, width, height, gp = gpar(fill = "transparent", lwd = 2, col = "white"))
}
)
withr::with_pdf(
file.path(wd, "correlation_heatmap_pairwise_annotation_most_changing_genes.pdf"),
draw(complex_correlation_heatmap),
width = 15, height = 12
)
| /analysis/replicate_similarity.R | no_license | clemenshug/erk_senescence | R | false | false | 16,233 | r | library(tidyverse)
library(here)
library(DESeq2)
synapser::synLogin()
syn <- synExtra::synDownloader(here("tempdl"), followLink = TRUE)
wd <- here("replicate_analysis")
dir.create(wd, recursive = TRUE, showWarnings = FALSE)
# set directories, import files ------------------------------------------------
###############################################################################T
raw_counts <- syn("syn21411551") %>%
read_tsv()
meta <- syn("syn21432975") %>%
read_csv() %>%
group_by(condition) %>%
mutate(
replicate = seq_len(n())
) %>%
ungroup()
deseq_pairwise <- syn("syn21432187") %>%
read_rds()
unloadNamespace("synExtra")
unloadNamespace("synapser")
unloadNamespace("PythonEmbedInR")
# Normalize counts -------------------------------------------------------------
###############################################################################T
counts_vst <- deseq_pairwise %>%
varianceStabilizingTransformation() %>%
assay()
# PCA with normalized counts ---------------------------------------------------
###############################################################################T
pca_data <- counts_vst %>%
prcomp()
pca_plot <- function (data, meta, aes = ggplot2::aes(PC1, PC2), extra_layers = NULL, ...) {
p <- prcomp(data, ...)
pstats <- t(summary(p)$importance) %>%
tibble::as_tibble(rownames = "component") %>%
dplyr::rename(sdev = `Standard deviation`, prop_var = `Proportion of Variance`, cum_prop = `Cumulative Proportion`) %>%
dplyr::mutate(component = factor(component, levels = unique(component))) %>%
# Remove all components after cumsum reaches .999
dplyr::filter(cumsum(dplyr::lag(cum_prop > .95, default = FALSE)) <= 1) %>%
# Maximum 10 components
dplyr::slice(1:min(10, n()))
ploadings <- p$x %>%
as.data.frame() %>%
tibble::rownames_to_column("condition") %>%
tibble::as_tibble() %>%
dplyr::inner_join(meta, by = "condition")
p_plot <- ggplot(ploadings, aes)
if (!is.null(extra_layers))
p_plot <- p_plot + extra_layers
# var_plot <- ggplot(pstats, ggplot2::aes(x = 1, y = prop_var, fill = component)) +
# geom_col(position = "stack") +
# # geom_text(ggplot2::aes(y = cum_prop, label = prop_var), halign = 0, valign = 1) +
# coord_flip() +
# guides(fill = FALSE)
# theme(legend.position = "bottom")
# browser()
var_table <- gridExtra::tableGrob(
pstats %>%
dplyr::select(component, prop_var) %>%
dplyr::mutate(prop_var = formatC(prop_var * 100, digits = 3, format = "fg")) %>%
tidyr::spread(component, prop_var),
rows = NULL,
theme = gridExtra::ttheme_default(base_size = 6)
)
p_plot
# patchwork::wrap_plots(p_plot, var_table, heights = c(5, 1), ncol = 1)
}
## Save all combinations of PC1-PC5
pca_plot_pc_param <- function(matrix, x, y, col_annotation, facet = NULL) {
pca_plot(
matrix,
col_annotation %>% arrange(condition),
aes(!!ensym(x), !!ensym(y), fill = as.factor(ERKi), size = as.factor(Time), color = as.character(DOX)),
center = FALSE, scale = FALSE,
extra_layers = list(
geom_point(shape = 21, stroke = 1.5),
scale_color_manual(values = c("0" = "#000000", "1" = "#00000000")),
scale_fill_viridis_d(),
guides(fill = guide_legend(override.aes = list(color = "#00000000"))),
# scale_size_manual(values = c("0" = 1, "1" = 2, "2" = 3, "4" = 4, "8" = 5, "16" = 6, "24" = 7)),
# if (!is.null(facet)) facet_wrap(vars(!!sym(facet))) else NULL,
facet_grid(rows = vars(Time), cols = vars(ERKi)),
ggrepel::geom_text_repel(aes(label = replicate), size = 4, color = "black", max.overlaps = Inf),
theme_minimal(),
theme(panel.grid.minor = element_blank())
# geom_text(aes(label = Repeat), color = "black")
)
)
}
x <- pca_plot_pc_param(
t(counts_vst),
PC1, PC2,
meta %>%
mutate(condition = Sample_ID, across(replicate, as.character)),
facet = "Time"
) +
labs(fill = "ERKi\nconcentration", size = "Time", color = "DOX")
cowplot::ggsave2(
file.path(wd, "pca_all_replicates_minimal.pdf"),
x, width = 12, height = 10
)
# pca_plots <- combn(paste0("PC", 1:6), 2) %>%
# t() %>%
# `colnames<-`(c("x", "y")) %>%
# as_tibble() %>%
# crossing(
# tibble(facet = list(NULL, "Time", "ERKi"))
# ) %>%
# mutate(
# plot = pmap(
# .,
# pca_plot_pc_param,
# col_annotation = col_annotation
# )
# )
# Cross-correlation -----------------------------------------------------------
###############################################################################T
coeff_variance <- deseq_pairwise %>%
counts(normalized = TRUE) %>%
apply(MARGIN = 1, function(x) sd(x) / mean(x))
most_changing_genes <- DESeq(deseq_pairwise, test = "LRT", reduced = ~Repeat)
most_changing_genes_res <- most_changing_genes %>%
results(tidy = TRUE) %>%
as_tibble() %>%
arrange(padj)
correlations <- deseq_pairwise %>%
counts(normalized = TRUE) %>%
{
.[
most_changing_genes_res %>%
arrange(padj) %>%
head(n = 1000) %>%
pull(row),
]
} %>%
# scale() %>%
cor() %>%
as.table() %>%
as.data.frame() %>%
as_tibble() %>%
magrittr::set_colnames(c("Sample_1", "Sample_2", "correlation"))
# correlations <- counts_vst %>%
# cor() %>%
# as.table() %>%
# as.data.frame() %>%
# as_tibble() %>%
# magrittr::set_colnames(c("Sample_1", "Sample_2", "correlation"))
openxlsx::write.xlsx(
correlations,
file.path(wd, "sample_pairwise_correlations_normalized_counts.xlsx")
)
meta_correlation <- meta %>%
arrange(
ERKi, Time, DOX, Repeat, replicate
) %>%
mutate(
sample_name = paste0(condition, "_", replicate) %>%
fct_inorder(),
across(c(Sample_ID, condition), fct_inorder)
) %>%
group_by(condition, Repeat) %>%
slice_head(n = 1) %>%
ungroup()
correlation_heatmap_data <- correlations %>%
inner_join(
meta_correlation %>%
# filter(DOX == 1) %>%
dplyr::select(Sample_ID, sample_name_1 = sample_name, condition_1 = condition),
by = c("Sample_1" = "Sample_ID")
) %>%
inner_join(
meta_correlation %>%
# filter(DOX == 1) %>%
dplyr::select(Sample_ID, sample_name_2 = sample_name, condition_2 = condition),
by = c("Sample_2" = "Sample_ID")
) %>%
mutate(across(where(is.factor), fct_drop)) %>%
arrange(condition_1, condition_2, sample_name_1, sample_name_2)
correlation_heatmap <- correlation_heatmap_data %>%
ggplot(
aes(sample_name_1, sample_name_2, fill = correlation)
) +
geom_tile() +
scale_fill_viridis_c(direction = 1, limits = c(0.9, 1), oob = scales::squish) +
# scale_fill_distiller(palette = "RdBu", direction = -1, limits = c(0.90, 1), oob = scales::squish) +
geom_rect(
aes(
xmin = xmin, xmax = xmax,
ymin = xmin, ymax = xmax
),
inherit.aes = FALSE,
color = "black",
fill = NA,
data = correlation_heatmap_data %>%
mutate(across(starts_with("sample_name"), as.integer)) %>%
group_by(condition_1, condition_2) %>%
summarize(
xmin = head(sample_name_1, n = 1) - 0.5, xmax = tail(sample_name_2, n = 1) + 0.5,
ymin = head(sample_name_2, n = 1) - 0.5, ymax = tail(sample_name_1, n = 1) + 0.5,
) %>%
ungroup()
) +
theme(
axis.text.x = element_text(angle = 45, hjust = 1)
)
ggsave(file.path(wd, "correlation_heatmap_most_changing_genes.pdf"), correlation_heatmap, width = 15, height = 12)
# Heatmap with ComplexHeatmap so we can show time and ERKi concentration
library(ComplexHeatmap)
meta_correlation <- meta %>%
arrange(
ERKi, Time, DOX, Repeat, replicate
) %>%
filter(DOX == 1) %>%
mutate(
sample_name = paste0(condition, "_", replicate) %>%
fct_inorder(),
across(c(Sample_ID, condition), fct_inorder)
) %>%
group_by(condition, Repeat) %>%
slice_head(n = 1) %>%
ungroup()
annotation_col <- HeatmapAnnotation(
df = meta_correlation %>%
select(ERKi, Time) %>%
mutate(across(.fns = ~fct_inorder(as.character(.x)))) %>%
as.data.frame(),
a = anno_empty(border = FALSE, height = unit(5, "points")),
col = list(
ERKi = unique(meta_correlation[["ERKi"]]) %>% {
set_names(
viridisLite::viridis(n = length(.), direction = -1),
.
)
},
Time = unique(meta_correlation[["Time"]]) %>% {
set_names(
viridisLite::magma(n = length(.), direction = -1),
.
)
}
)
)
annotation_row <- HeatmapAnnotation(
df = meta_correlation %>%
select(ERKi, Time) %>%
mutate(across(.fns = ~fct_inorder(as.character(.x)))) %>%
arrange(desc(row_number())) %>%
as.data.frame(),
a = anno_empty(border = FALSE, width = unit(5, "points")),
col = list(
ERKi = unique(meta_correlation[["ERKi"]]) %>% {
set_names(
viridisLite::viridis(n = length(.), direction = -1),
.
)
},
Time = unique(meta_correlation[["Time"]]) %>% {
set_names(
viridisLite::magma(n = length(.), direction = -1),
.
)
}
),
which = "row",
show_legend = FALSE
)
correlation_heatmap_data <- correlations %>%
inner_join(
meta_correlation %>%
filter(DOX == 1) %>%
dplyr::select(Sample_ID, sample_name_1 = sample_name, condition_1 = condition),
by = c("Sample_1" = "Sample_ID")
) %>%
inner_join(
meta_correlation %>%
filter(DOX == 1) %>%
dplyr::select(Sample_ID, sample_name_2 = sample_name, condition_2 = condition),
by = c("Sample_2" = "Sample_ID")
) %>%
mutate(across(where(is.factor), fct_drop)) %>%
arrange(condition_1, condition_2, sample_name_1, sample_name_2)
complex_correlation_heatmap <- correlation_heatmap_data %>%
mutate(correlation = pmax(correlation, 0.90)) %>%
pivot_wider(id_cols = c("Sample_1"), names_from = "Sample_2", values_from = "correlation") %>%
arrange(desc(row_number())) %>%
column_to_rownames("Sample_1") %>%
as.matrix() %>%
magrittr::set_colnames(NULL) %>%
magrittr::set_rownames(NULL) %>%
Heatmap(
col = viridis::viridis(100, direction = 1),
cluster_rows = FALSE,
cluster_columns = FALSE,
row_split = meta_correlation[["condition"]],
column_split = meta_correlation[["condition"]],
row_gap = unit(0, "points"),
column_gap = unit(0, "points"),
# border = TRUE,
show_row_names = FALSE,
show_column_names = FALSE,
top_annotation = annotation_col,
left_annotation = annotation_row,
row_title = NULL,
column_title = NULL,
heatmap_legend_param = list(title = "Correlation"),
cell_fun = function(j, i, x, y, width, height, fill) {
if (i == j)
grid.rect(gp = gpar(fill = "transparent", lwd = 2, color = "white"))
}
)
withr::with_pdf(
file.path(wd, "correlation_heatmap_annotation_with_gap_most_changing_genes.pdf"),
draw(complex_correlation_heatmap),
width = 15, height = 12
)
# Pairwise cross-correlation ---------------------------------------------------
###############################################################################T
# correlations <- deseq_pairwise %>%
# counts(normalized = TRUE) %>%
# cor() %>%
# as.table() %>%
# as.data.frame() %>%
# as_tibble() %>%
# magrittr::set_colnames(c("Sample_1", "Sample_2", "correlation"))
meta_correlation <- meta %>%
group_by(condition, Repeat) %>%
slice_head(n = 1) %>%
ungroup() %>%
arrange(
ERKi, Time, DOX, Repeat, replicate
) %>%
transmute(
condition,
Sample_ID,
sample_name = paste0(condition, "_", Repeat) %>%
fct_inorder(),
Repeat = paste0("Repeat_", Repeat)
) %>% {
# browser()
df <- .
pivot_wider(
.,
id_cols = "condition",
names_from = "Repeat",
values_from = c("sample_name", "Sample_ID")
) %>%
tidyr::expand(
Sample_ID_Repeat_1, Sample_ID_Repeat_2
) %>%
inner_join(
rename_with(df, ~paste0(.x, "_Repeat_1")),
by = "Sample_ID_Repeat_1"
) %>%
inner_join(
rename_with(df, ~paste0(.x, "_Repeat_2")),
by = c("Sample_ID_Repeat_2")
)
}
correlation_heatmap_data <- correlations %>%
inner_join(
meta_correlation,
by = c("Sample_1" = "Sample_ID_Repeat_1", "Sample_2" = "Sample_ID_Repeat_2")
)
correlation_heatmap <- correlation_heatmap_data %>%
ggplot(
aes(sample_name_Repeat_1, sample_name_Repeat_2, fill = correlation)
) +
geom_tile() +
scale_fill_viridis_c(direction = 1, limits = c(0.90, 1), oob = scales::squish) +
theme(
axis.text.x = element_text(angle = 45, hjust = 1)
)
ggsave(file.path(wd, "correlation_heatmap_pairwise.pdf"), correlation_heatmap, width = 12, height = 10)
# Heatmap with ComplexHeatmap so we can show time and ERKi concentration
library(ComplexHeatmap)
meta_correlation <- meta %>%
arrange(
ERKi, Time, DOX, Repeat, replicate
) %>%
mutate(
sample_name = paste0(condition, "_", replicate) %>%
fct_inorder(),
across(c(Sample_ID, condition), fct_inorder)
) %>%
group_by(condition, Repeat) %>%
slice_head(n = 1) %>%
ungroup()
meta_correlation <- meta %>%
group_by(condition, Repeat) %>%
slice_head(n = 1) %>%
ungroup() %>%
arrange(
ERKi, Time, desc(DOX), Repeat, replicate
)
annotation_col <- HeatmapAnnotation(
df = meta_correlation %>%
filter(Repeat == 1) %>%
select(ERKi, Time, DOX) %>%
mutate(across(.fns = ~fct_inorder(as.character(.x)))) %>%
as.data.frame(),
a = anno_empty(border = FALSE, height = unit(5, "points")),
col = list(
ERKi = unique(meta_correlation[["ERKi"]]) %>% {
set_names(
viridisLite::viridis(n = length(.), direction = -1),
.
)
},
Time = unique(meta_correlation[["Time"]]) %>% {
set_names(
viridisLite::magma(n = length(.), direction = -1),
.
)
},
DOX = c("1" = "black", "0" = "white")
)
)
annotation_row <- HeatmapAnnotation(
df = meta_correlation %>%
filter(Repeat == 2) %>%
select(ERKi, Time, DOX) %>%
mutate(across(.fns = ~fct_inorder(as.character(.x)))) %>%
arrange(desc(row_number())) %>%
as.data.frame(),
a = anno_empty(border = FALSE, width = unit(5, "points")),
col = list(
ERKi = unique(meta_correlation[["ERKi"]]) %>% {
set_names(
viridisLite::viridis(n = length(.), direction = -1),
.
)
},
Time = unique(meta_correlation[["Time"]]) %>% {
set_names(
viridisLite::magma(n = length(.), direction = -1),
.
)
},
DOX = c("1" = "black", "0" = "white")
),
which = "row",
show_legend = FALSE
)
correlation_heatmap_data <- crossing(
meta_correlation %>%
filter(Repeat == 1) %>%
select(Sample_1 = Sample_ID),
meta_correlation %>%
filter(Repeat == 2) %>%
select(Sample_2 = Sample_ID),
) %>%
arrange(
match(Sample_1, meta_correlation$Sample_ID),
match(Sample_2, meta_correlation$Sample_ID)
) %>%
inner_join(
correlations
)
complex_correlation_heatmap <- correlation_heatmap_data %>%
mutate(correlation = pmax(correlation, 0.90)) %>%
pivot_wider(names_from = Sample_1, values_from = correlation) %>%
arrange(desc(row_number())) %>%
column_to_rownames("Sample_2") %>%
as.matrix() %>%
# magrittr::set_colnames(NULL) %>%
# magrittr::set_rownames(NULL) %>%
Heatmap(
col = viridis::viridis(100, direction = 1),
cluster_rows = FALSE,
cluster_columns = FALSE,
# row_split = filter(meta_correlation, Repeat == 1)[["condition"]],
# column_split = filter(meta_correlation, Repeat == 2)[["condition"]],
row_gap = unit(0, "points"),
column_gap = unit(0, "points"),
# border = TRUE,
show_row_names = FALSE,
show_column_names = FALSE,
top_annotation = annotation_col,
left_annotation = annotation_row,
row_title = NULL,
column_title = NULL,
heatmap_legend_param = list(title = "Correlation"),
cell_fun = function(j, i, x, y, width, height, fill) {
if (44 - i == j)
grid.rect(x, y, width, height, gp = gpar(fill = "transparent", lwd = 2, col = "white"))
}
)
withr::with_pdf(
file.path(wd, "correlation_heatmap_pairwise_annotation_most_changing_genes.pdf"),
draw(complex_correlation_heatmap),
width = 15, height = 12
)
|
clime2 <- function (x, lambda = NULL, nlambda = ifelse(is.null(lambda), 100, length(lambda)), lambda.max = 0.8,
lambda.min = ifelse(nrow(x) > ncol(x), 1e-04, 0.01), sigma = FALSE, perturb = TRUE, standardize = TRUE,
logspaced = TRUE, linsolver = c("primaldual", "simplex"), pdtol = 0.001, pdmaxiter = 50)
{
lpfun <- match.arg(linsolver, c("primaldual", "simplex"))
if (sigma) {
if (is.matrix(x)) {
Sigma <- x
}
else {
Sigma <- as.matrix(x)
}
p <- ncol(Sigma)
x <- NULL
}
else {
n <- nrow(x)
p <- ncol(x)
if (is.null(lambda)) {
if (logspaced) {
lambda <- 10^(seq(log10(lambda.min), log10(lambda.max),
length.out = nlambda))
}
else {
lambda <- seq(lambda.min, lambda.max, length.out = nlambda)
}
}
if (standardize) {x <- scale(x)}
Cov <- cov(x, method = "kendall")
diag(Cov) <- 1
Sigma <- Cov
}
eigvals <- eigen(Sigma, only.values = T)$values
if (is.logical(perturb)) {
if (perturb) {
perturb <- max(max(eigvals) - p * min(eigvals), 0)/(p -
1)
}
else {
perturb <- 0
}
}
Sigma <- Sigma + diag(p) * perturb
emat <- diag(p)
Omegalist <- vector("list", nlambda)
if (lpfun == "simplex") {
for (jl in 1:nlambda) {
Omega <- matrix(0, nrow = p, ncol = p)
lam <- lambda[jl]
for (j in 1:p) {
beta <- linprogS(Sigma, emat[, j], lam)
Omega[, j] <- beta
}
Omegalist[[jl]] <- Omega * (abs(Omega) <= abs(t(Omega))) +
t(Omega) * (abs(Omega) > abs(t(Omega)))
}
}
if (lpfun == "primaldual") {
Omega0 <- solve(Sigma)
for (jl in 1:nlambda) {
Omega <- matrix(0, nrow = p, ncol = p)
lam <- lambda[jl]
for (j in 1:p) {
beta <- linprogPD(Omega0[, j], Sigma, emat[,j], lam, pdtol, pdmaxiter)
Omega[, j] <- beta
}
Omegalist[[jl]] <- Omega * (abs(Omega) <= abs(t(Omega))) +
t(Omega) * (abs(Omega) > abs(t(Omega)))
}
}
outlist <- list(Omegalist = Omegalist, x = x, lambda = lambda,
perturb = perturb, standardize = standardize, lpfun = lpfun)
class(outlist) <- c("clime")
return(outlist)
}
linprogPD <- function(x0, A, b, epsilon, pdtol=1e-3, pdmaxiter=50) {
## Solves
## min ||x||_1 subject to ||Ax-b||_\infty <= epsilon
## Adapted from Matlab code for Dantzig Selector by J. Romberg
N <- length(x0)
x0 <- matrix(x0, nrow=N)
b <- matrix(b, nrow=N)
alpha=0.01;
beta=0.5;
mu = 10;
gradf0 <- matrix(rep(c(0,1), each=N), nrow=2*N)
if (max(abs(A%*%x0 - b)) > epsilon) {
stop("Infeasible starting point!")
}
x <- x0
u <- 0.95*abs(x0) + 0.1*max(abs(x0))
Atr <- A%*%x - b
fu1 <- x - u
fu2 <- -x - u
fe1 <- Atr - epsilon
fe2 <- -Atr - epsilon
lamu1 <- -1/fu1
lamu2 <- -1/fu2
lame1 <- -1/fe1
lame2 <- -1/fe2
AtAv <- t(A)%*%(lame1 - lame2)
sdg <- - sum(c(fu1, fu2, fe1, fe2)* c(lamu1, lamu2, lame1, lame2))
tau <- mu*(4*N)/sdg
rdual <- gradf0 + c(lamu1 - lamu2 + AtAv, -lamu1 - lamu2)
rcent <- -c(lamu1*fu1, lamu2*fu2, lame1*fe1, lame2*fe2) - 1/tau
resnorm <- sqrt(sum(rdual^2,rcent^2))
pditer <- 0
done <- (sdg < pdtol) | (pditer >= pdmaxiter)
while(!done) {
w2 <- -1 - (1/fu1 + 1/fu2)/tau
sig11 <- -lamu1/fu1 - lamu2/fu2
sig12 <- lamu1/fu1 - lamu2/fu2
siga <- -(lame1/fe1 + lame2/fe2)
sigx <- sig11 - sig12^2/sig11
w1 <- -( t(A)%*%(1/fe2 - 1/fe1) + 1/fu2 - 1/fu1)/tau
w1p <- w1 - (sig12/sig11)*w2
Hp <- t(A)%*%diag(as.vector(siga))%*%A + diag(as.vector(sigx))
dx <- solve(Hp, w1p)
if (rcond(Hp) < 1e-14) {
warning("Ill conditioned matrix. Previous iterate matrix returned! (May increase perturb/lambda.)")
xp <- x
return(xp)
}
AtAdx <- A%*%dx
du <- w2/sig11 - (sig12/sig11)*dx
dlamu1 <- -(lamu1/fu1)*(dx-du) - lamu1 - 1/(fu1*tau)
dlamu2 <- -(lamu2/fu2)*(-dx-du) - lamu2 - 1/(fu2*tau)
dlame1 <- -(lame1/fe1)*(AtAdx) - lame1 - 1/(fe1*tau)
dlame2 <- (lame2/fe2)*(AtAdx) - lame2 - 1/(fe2*tau)
AtAdv <- t(A)%*%(dlame1 - dlame2)
iu1 <- dlamu1 < 0
iu2 <- dlamu2 < 0
ie1 <- dlame1 < 0
ie2 <- dlame2 < 0
ifu1 <- (dx-du) > 0
ifu2 <- (-dx-du) > 0
ife1 <- AtAdx > 0
ife2 <- AtAdx < 0
smax <- min( -lamu1[iu1]/dlamu1[iu1], -lamu2[iu2]/dlamu2[iu2], -lame1[ie1]/dlame1[ie1], -lame2[ie2]/dlame2[ie2], -fu1[ifu1]/(dx[ifu1] - du[ifu1]), -fu2[ifu2]/(-dx[ifu2] -du[ifu2]), -fe1[ife1]/AtAdx[ife1], -fe2[ife2]/( - AtAdx[ife2]) )
smax <- min(1,smax)
s <- 0.99*smax
suffdec <- FALSE
backiter <- 0
while(!suffdec) {
xp <- x + s*dx
up <- u + s*du
Atrp <- Atr + s*AtAdx
AtAvp <- AtAv+s*AtAdv
fu1p <- fu1 + s*(dx - du)
fu2p <- fu2 + s*(-dx-du)
fe1p <- fe1 + s*AtAdx
fe2p <- fe2 + s*(-AtAdx)
lamu1p <- lamu1 + s*dlamu1
lamu2p <- lamu2 + s*dlamu2
lame1p <- lame1 + s*dlame1
lame2p <- lame2 + s*dlame2
rdp <- gradf0 + c(lamu1p - lamu2p + AtAvp, -lamu1p - lamu2p)
rcp <- -c(lamu1p*fu1p, lamu2p*fu2p, lame1p*fe1p, lame2p*fe2p) - 1/tau
suffdec <- sqrt( sum(rdp^2, rcp^2)) < (1 - alpha*s)*resnorm
s <- beta*s
backiter <- backiter+1
if (backiter > 32) {
warning("Backtracking stuck. Previous iterate matrix returned!")
xp <- x
return(xp)
}
}
x <- xp
u <- up
Atr <- Atrp
AtAv <- AtAvp
fu1 <- fu1p
fu2 <- fu2p
fe1 <- fe1p
fe2 <- fe2p
lamu1 <- lamu1p
lamu2 <- lamu2p
lame1 <- lame1p
lame2 <- lame2p
sdg <- -sum( c(fu1, fu2, fe1, fe2)*c(lamu1, lamu2, lame1, lame2))
tau <- mu*(4*N)/sdg
rdual <- rdp
rcent <- -c(fu1, fu2, fe1, fe2)*c(lamu1, lamu2, lame1, lame2) - 1/tau
resnorm <- sqrt(sum(rdual^2, rcent^2))
pditer <- pditer+1
done <- (sdg < pdtol) | (pditer >= pdmaxiter)
}
return(xp)
}
cv.clime2 <- function (clime.obj, loss = c("likelihood", "tracel2"), fold = 5)
{
x <- clime.obj$x
if (is.null(x))
stop("No x in clime object. Use x instead of sigma for computing clime!")
n <- nrow(x)
p <- ncol(x)
part.list <- cv.part(n, fold)
lambda <- clime.obj$lambda
lpfun <- clime.obj$lpfun
nlambda <- length(lambda)
lossname <- match.arg(loss, c("likelihood", "tracel2"))
lossfun <- match.fun(lossname)
loss.re <- matrix(0, nrow = fold, ncol = nlambda)
for (j in 1:fold) {
x.train <- x[part.list$trainMat[, j], ]
clime.cv <- clime2(x.train, lambda, standardize = FALSE,
perturb = clime.obj$perturb, linsolver = lpfun)
x.test <- x[part.list$testMat[, j], ]
ntest <- nrow(x.test)
for (jl in 1:nlambda) {
loss.re[j, jl] <- loss.re[j, jl] + lossfun((cov(x.test) *
(1 - 1/ntest)), clime.cv$Omegalist[[jl]])
}
}
loss.mean <- apply(loss.re, 2, mean)
loss.sd <- apply(loss.re, 2, sd)
lambdaopt <- lambda[which.min(loss.mean)]
outlist <- list(lambdaopt = lambdaopt, loss = lossname, lambda = lambda,
loss.mean = loss.mean, loss.sd = loss.sd, lpfun = lpfun)
class(outlist) <- c("cv.clime")
return(outlist)
}
cv.part <- function(n, k) {
ntest <- floor(n/k)
ntrain <- n-ntest
ind <- sample(n)
trainMat <- matrix(NA, nrow=ntrain, ncol=k)
testMat <- matrix(NA, nrow=ntest, ncol=k)
nn <- 1:n
for (j in 1:k) {
sel <- ((j-1)*ntest+1):(j*ntest)
testMat[,j] <- ind[sel ]
sel2 <-nn[ !(nn %in% sel) ]
trainMat[,j] <- ind[sel2]
}
return(list(trainMat=trainMat, testMat=testMat))
}
| /Graph Recovery/clime_kendall.R | no_license | xiangyh/Nonparametric-Graphical-Modeling | R | false | false | 7,855 | r | clime2 <- function (x, lambda = NULL, nlambda = ifelse(is.null(lambda), 100, length(lambda)), lambda.max = 0.8,
lambda.min = ifelse(nrow(x) > ncol(x), 1e-04, 0.01), sigma = FALSE, perturb = TRUE, standardize = TRUE,
logspaced = TRUE, linsolver = c("primaldual", "simplex"), pdtol = 0.001, pdmaxiter = 50)
{
lpfun <- match.arg(linsolver, c("primaldual", "simplex"))
if (sigma) {
if (is.matrix(x)) {
Sigma <- x
}
else {
Sigma <- as.matrix(x)
}
p <- ncol(Sigma)
x <- NULL
}
else {
n <- nrow(x)
p <- ncol(x)
if (is.null(lambda)) {
if (logspaced) {
lambda <- 10^(seq(log10(lambda.min), log10(lambda.max),
length.out = nlambda))
}
else {
lambda <- seq(lambda.min, lambda.max, length.out = nlambda)
}
}
if (standardize) {x <- scale(x)}
Cov <- cov(x, method = "kendall")
diag(Cov) <- 1
Sigma <- Cov
}
eigvals <- eigen(Sigma, only.values = T)$values
if (is.logical(perturb)) {
if (perturb) {
perturb <- max(max(eigvals) - p * min(eigvals), 0)/(p -
1)
}
else {
perturb <- 0
}
}
Sigma <- Sigma + diag(p) * perturb
emat <- diag(p)
Omegalist <- vector("list", nlambda)
if (lpfun == "simplex") {
for (jl in 1:nlambda) {
Omega <- matrix(0, nrow = p, ncol = p)
lam <- lambda[jl]
for (j in 1:p) {
beta <- linprogS(Sigma, emat[, j], lam)
Omega[, j] <- beta
}
Omegalist[[jl]] <- Omega * (abs(Omega) <= abs(t(Omega))) +
t(Omega) * (abs(Omega) > abs(t(Omega)))
}
}
if (lpfun == "primaldual") {
Omega0 <- solve(Sigma)
for (jl in 1:nlambda) {
Omega <- matrix(0, nrow = p, ncol = p)
lam <- lambda[jl]
for (j in 1:p) {
beta <- linprogPD(Omega0[, j], Sigma, emat[,j], lam, pdtol, pdmaxiter)
Omega[, j] <- beta
}
Omegalist[[jl]] <- Omega * (abs(Omega) <= abs(t(Omega))) +
t(Omega) * (abs(Omega) > abs(t(Omega)))
}
}
outlist <- list(Omegalist = Omegalist, x = x, lambda = lambda,
perturb = perturb, standardize = standardize, lpfun = lpfun)
class(outlist) <- c("clime")
return(outlist)
}
linprogPD <- function(x0, A, b, epsilon, pdtol=1e-3, pdmaxiter=50) {
## Solves
## min ||x||_1 subject to ||Ax-b||_\infty <= epsilon
## Adapted from Matlab code for Dantzig Selector by J. Romberg
N <- length(x0)
x0 <- matrix(x0, nrow=N)
b <- matrix(b, nrow=N)
alpha=0.01;
beta=0.5;
mu = 10;
gradf0 <- matrix(rep(c(0,1), each=N), nrow=2*N)
if (max(abs(A%*%x0 - b)) > epsilon) {
stop("Infeasible starting point!")
}
x <- x0
u <- 0.95*abs(x0) + 0.1*max(abs(x0))
Atr <- A%*%x - b
fu1 <- x - u
fu2 <- -x - u
fe1 <- Atr - epsilon
fe2 <- -Atr - epsilon
lamu1 <- -1/fu1
lamu2 <- -1/fu2
lame1 <- -1/fe1
lame2 <- -1/fe2
AtAv <- t(A)%*%(lame1 - lame2)
sdg <- - sum(c(fu1, fu2, fe1, fe2)* c(lamu1, lamu2, lame1, lame2))
tau <- mu*(4*N)/sdg
rdual <- gradf0 + c(lamu1 - lamu2 + AtAv, -lamu1 - lamu2)
rcent <- -c(lamu1*fu1, lamu2*fu2, lame1*fe1, lame2*fe2) - 1/tau
resnorm <- sqrt(sum(rdual^2,rcent^2))
pditer <- 0
done <- (sdg < pdtol) | (pditer >= pdmaxiter)
while(!done) {
w2 <- -1 - (1/fu1 + 1/fu2)/tau
sig11 <- -lamu1/fu1 - lamu2/fu2
sig12 <- lamu1/fu1 - lamu2/fu2
siga <- -(lame1/fe1 + lame2/fe2)
sigx <- sig11 - sig12^2/sig11
w1 <- -( t(A)%*%(1/fe2 - 1/fe1) + 1/fu2 - 1/fu1)/tau
w1p <- w1 - (sig12/sig11)*w2
Hp <- t(A)%*%diag(as.vector(siga))%*%A + diag(as.vector(sigx))
dx <- solve(Hp, w1p)
if (rcond(Hp) < 1e-14) {
warning("Ill conditioned matrix. Previous iterate matrix returned! (May increase perturb/lambda.)")
xp <- x
return(xp)
}
AtAdx <- A%*%dx
du <- w2/sig11 - (sig12/sig11)*dx
dlamu1 <- -(lamu1/fu1)*(dx-du) - lamu1 - 1/(fu1*tau)
dlamu2 <- -(lamu2/fu2)*(-dx-du) - lamu2 - 1/(fu2*tau)
dlame1 <- -(lame1/fe1)*(AtAdx) - lame1 - 1/(fe1*tau)
dlame2 <- (lame2/fe2)*(AtAdx) - lame2 - 1/(fe2*tau)
AtAdv <- t(A)%*%(dlame1 - dlame2)
iu1 <- dlamu1 < 0
iu2 <- dlamu2 < 0
ie1 <- dlame1 < 0
ie2 <- dlame2 < 0
ifu1 <- (dx-du) > 0
ifu2 <- (-dx-du) > 0
ife1 <- AtAdx > 0
ife2 <- AtAdx < 0
smax <- min( -lamu1[iu1]/dlamu1[iu1], -lamu2[iu2]/dlamu2[iu2], -lame1[ie1]/dlame1[ie1], -lame2[ie2]/dlame2[ie2], -fu1[ifu1]/(dx[ifu1] - du[ifu1]), -fu2[ifu2]/(-dx[ifu2] -du[ifu2]), -fe1[ife1]/AtAdx[ife1], -fe2[ife2]/( - AtAdx[ife2]) )
smax <- min(1,smax)
s <- 0.99*smax
suffdec <- FALSE
backiter <- 0
while(!suffdec) {
xp <- x + s*dx
up <- u + s*du
Atrp <- Atr + s*AtAdx
AtAvp <- AtAv+s*AtAdv
fu1p <- fu1 + s*(dx - du)
fu2p <- fu2 + s*(-dx-du)
fe1p <- fe1 + s*AtAdx
fe2p <- fe2 + s*(-AtAdx)
lamu1p <- lamu1 + s*dlamu1
lamu2p <- lamu2 + s*dlamu2
lame1p <- lame1 + s*dlame1
lame2p <- lame2 + s*dlame2
rdp <- gradf0 + c(lamu1p - lamu2p + AtAvp, -lamu1p - lamu2p)
rcp <- -c(lamu1p*fu1p, lamu2p*fu2p, lame1p*fe1p, lame2p*fe2p) - 1/tau
suffdec <- sqrt( sum(rdp^2, rcp^2)) < (1 - alpha*s)*resnorm
s <- beta*s
backiter <- backiter+1
if (backiter > 32) {
warning("Backtracking stuck. Previous iterate matrix returned!")
xp <- x
return(xp)
}
}
x <- xp
u <- up
Atr <- Atrp
AtAv <- AtAvp
fu1 <- fu1p
fu2 <- fu2p
fe1 <- fe1p
fe2 <- fe2p
lamu1 <- lamu1p
lamu2 <- lamu2p
lame1 <- lame1p
lame2 <- lame2p
sdg <- -sum( c(fu1, fu2, fe1, fe2)*c(lamu1, lamu2, lame1, lame2))
tau <- mu*(4*N)/sdg
rdual <- rdp
rcent <- -c(fu1, fu2, fe1, fe2)*c(lamu1, lamu2, lame1, lame2) - 1/tau
resnorm <- sqrt(sum(rdual^2, rcent^2))
pditer <- pditer+1
done <- (sdg < pdtol) | (pditer >= pdmaxiter)
}
return(xp)
}
cv.clime2 <- function (clime.obj, loss = c("likelihood", "tracel2"), fold = 5)
{
x <- clime.obj$x
if (is.null(x))
stop("No x in clime object. Use x instead of sigma for computing clime!")
n <- nrow(x)
p <- ncol(x)
part.list <- cv.part(n, fold)
lambda <- clime.obj$lambda
lpfun <- clime.obj$lpfun
nlambda <- length(lambda)
lossname <- match.arg(loss, c("likelihood", "tracel2"))
lossfun <- match.fun(lossname)
loss.re <- matrix(0, nrow = fold, ncol = nlambda)
for (j in 1:fold) {
x.train <- x[part.list$trainMat[, j], ]
clime.cv <- clime2(x.train, lambda, standardize = FALSE,
perturb = clime.obj$perturb, linsolver = lpfun)
x.test <- x[part.list$testMat[, j], ]
ntest <- nrow(x.test)
for (jl in 1:nlambda) {
loss.re[j, jl] <- loss.re[j, jl] + lossfun((cov(x.test) *
(1 - 1/ntest)), clime.cv$Omegalist[[jl]])
}
}
loss.mean <- apply(loss.re, 2, mean)
loss.sd <- apply(loss.re, 2, sd)
lambdaopt <- lambda[which.min(loss.mean)]
outlist <- list(lambdaopt = lambdaopt, loss = lossname, lambda = lambda,
loss.mean = loss.mean, loss.sd = loss.sd, lpfun = lpfun)
class(outlist) <- c("cv.clime")
return(outlist)
}
cv.part <- function(n, k) {
ntest <- floor(n/k)
ntrain <- n-ntest
ind <- sample(n)
trainMat <- matrix(NA, nrow=ntrain, ncol=k)
testMat <- matrix(NA, nrow=ntest, ncol=k)
nn <- 1:n
for (j in 1:k) {
sel <- ((j-1)*ntest+1):(j*ntest)
testMat[,j] <- ind[sel ]
sel2 <-nn[ !(nn %in% sel) ]
trainMat[,j] <- ind[sel2]
}
return(list(trainMat=trainMat, testMat=testMat))
}
|
\name{beanplot-package}
\alias{beanplot-package}
\docType{package}
\title{
Visualization via Beanplots (like Boxplot/Stripchart/Violin Plot
}
\description{
Plots univariate comparison graphs, alternative to boxplot/stripchart/violin plot
}
\details{
\tabular{ll}{
Package: \tab beanplot\cr
Type: \tab Package\cr
Version: \tab 1.2\cr
Date: \tab 2014-09-15\cr
License: \tab GPL-2\cr
}
The function \code{\link{beanplot}} does all the work.
}
\author{
Peter Kampstra <beanplot@kampstra.net>
}
\references{
Kampstra, P. (2008) Beanplot: A Boxplot Alternative for Visual Comparison of Distributions.
\emph{Journal of Statistical Software, Code Snippets}, \bold{28}(1), 1-9.
URL \url{http://www.jstatsoft.org/v28/c01/}
}
\seealso{
\code{\link[graphics:graphics-package]{graphics}}
\code{\link[vioplot:vioplot]{vioplot}}
}
\examples{
beanplot(rnorm(100), runif(100))
vignette("beanplot", package = "beanplot")
}
\keyword{package}
| /man/beanplot-package.Rd | no_license | a3cel2/beanplot | R | false | false | 993 | rd | \name{beanplot-package}
\alias{beanplot-package}
\docType{package}
\title{
Visualization via Beanplots (like Boxplot/Stripchart/Violin Plot
}
\description{
Plots univariate comparison graphs, alternative to boxplot/stripchart/violin plot
}
\details{
\tabular{ll}{
Package: \tab beanplot\cr
Type: \tab Package\cr
Version: \tab 1.2\cr
Date: \tab 2014-09-15\cr
License: \tab GPL-2\cr
}
The function \code{\link{beanplot}} does all the work.
}
\author{
Peter Kampstra <beanplot@kampstra.net>
}
\references{
Kampstra, P. (2008) Beanplot: A Boxplot Alternative for Visual Comparison of Distributions.
\emph{Journal of Statistical Software, Code Snippets}, \bold{28}(1), 1-9.
URL \url{http://www.jstatsoft.org/v28/c01/}
}
\seealso{
\code{\link[graphics:graphics-package]{graphics}}
\code{\link[vioplot:vioplot]{vioplot}}
}
\examples{
beanplot(rnorm(100), runif(100))
vignette("beanplot", package = "beanplot")
}
\keyword{package}
|
library("EML") ## ick, create_methods actually uses true slots to get slots and contains slots
library("XML")
library("xml2")
library("purrr")
library("dplyr")
## Filepaths assume working directory is package root
source("inst/create-package/post_process.R")
source("inst/create-package/create_classes.R")
classes_file <- "R/classes.R"
methods_file <- "R/methods.R"
## Start clean
file.remove(classes_file)
file.remove(methods_file)
## Create some boilerplate classes:
xs_base_classes <- function(file = "R/classes.R", methods_file = "R/methods.R"){
write("
setClass('slot_order', contains = 'character')
setClass('xml_attribute', contains = 'character')
setClass('eml-2.1.1', slots = c('schemaLocation' = 'xml_attribute', 'lang' = 'xml_attribute', slot_order = 'character'))
setClass('i18nNonEmptyStringType', slots = c('value' = 'ListOfvalue', 'lang' = 'xml_attribute'), contains = c('eml-2.1.1', 'character'))
setClass('InlineType', contains=c('list'))",
file, append = TRUE)
## Define classes for the these XSD schema types to correspond to the appropriate R object class
data.frame(class = c("xs:float", "xs:string", "xs:anyURI", "xs:time", "xs:decimal", "xs:int", "xs:unsignedInt", "xs:unsignedLong", "xs:long", "xs:integer", "xs:boolean", "xs:date", "xs:positiveInteger"),
contains = c("numeric", "character", "character", "character", "numeric", "integer", "integer", "integer", "integer", "integer", "logical", "Date", "integer")) %>%
purrr::by_row(function(x)
write(sprintf("setClass('%s', contains = '%s')", x[["class"]], x[["contains"]]), file, append = TRUE)
)
}
xs_base_classes(classes_file)
## This call to create_classes needs to pass a custom namespace
#stmml_ns <- xml_ns_rename( xml_ns(read_xml("inst/xsd/stmml.xsd")), xsd = "xs")
#create_classes("inst/xsd/stmml.xsd", classes_file, methods_file, ns = stmml_ns)
## Collate list -- avoid errors by manually setting the order of XSD file parsing
collate <- c(
"eml-text.xsd",
"eml-documentation.xsd",
"eml-unitTypeDefinitions.xsd",
"eml-party.xsd",
"eml-resource.xsd",
"eml-spatialReference.xsd",
"eml-access.xsd",
"eml-constraint.xsd",
"eml-literature.xsd",
"eml-coverage.xsd",
"eml-physical.xsd",
"eml-project.xsd",
"eml-software.xsd",
"eml-protocol.xsd",
"eml-methods.xsd",
"eml-attribute.xsd",
"eml-entity.xsd",
"eml-dataTable.xsd",
"eml-view.xsd",
"eml-storedProcedure.xsd",
"eml-spatialVector.xsd",
"eml-spatialRaster.xsd",
"eml-dataset.xsd",
"eml.xsd"
)
## Okay, here we go! Create all the classes...
paste0("inst/xsd/", collate) %>% map(create_classes, cf = classes_file, mf = methods_file)
fix_protected(classes_file)
## Goodness grief, fix ordering so that classes can load with no errors or warning (dependent classes must be defined first!)
move_to_end("proceduralStep", classes_file)
move_to_end("methodStep", classes_file)
move_to_end("dataSource", classes_file)
replace_class("keyword", "setClass('keyword', slots = c('keywordType' = 'xml_attribute'), contains = c('i18nNonEmptyStringType', 'eml-2.1.1'))", classes_file)
replace_class("coverage", "setClass('coverage', contains=c('Coverage'))", classes_file)
replace_class("temporalCoverage", "setClass('temporalCoverage', slots = c('system' = 'xml_attribute', 'scope' = 'xml_attribute'), contains = c('TemporalCoverage'))", classes_file)
replace_class("taxonomicCoverage", "setClass('taxonomicCoverage', slots = c('system' = 'xml_attribute', 'scope' = 'xml_attribute'), contains = c('TaxonomicCoverage'))", classes_file)
replace_class("geographicCoverage", "setClass('geographicCoverage', slots = c('system' = 'xml_attribute', 'scope' = 'xml_attribute'), contains = c('GeographicCoverage'))", classes_file)
replace_class('size', "setClass('size', slots = c('character' = 'character', 'unit' = 'xml_attribute'), contains = c('eml-2.1.1', 'character'))", classes_file)
replace_class('metadata', "setClass('metadata', contains='InlineType')", classes_file)
replace_class('inline', "setClass('inline', contains='InlineType')", classes_file)
replace_class('InlineType', "setClass('InlineType', contains=c('list'))", classes_file)
replace_class('parameter', "setClass('parameter', slots = c(name = 'character', value = 'character', 'domainDescription' = 'character', 'required' = 'character', 'repeats' = 'character'), contains = 'eml-2.1.1')", classes_file)
replace_class('PhysicalOnlineType', "setClass('PhysicalOnlineType', slots = c('onlineDescription' = 'i18nNonEmptyStringType', 'url' = 'UrlType', 'connection' = 'ConnectionType', 'connectionDefinition' = 'ConnectionDefinitionType'), contains = c('eml-2.1.1', 'character'))", classes_file)
replace_class('online', "setClass('online', contains = c('PhysicalOnlineType', 'OnlineType', 'eml-2.1.1'))", classes_file)
move_to_end("coverage", classes_file)
move_to_end("temporalCoverage", classes_file)
move_to_end("taxonomicCoverage", classes_file)
move_to_end("geographicCoverage", classes_file)
move_to_end("inline", classes_file)
move_to_end("parameter", classes_file)
move_to_end("online", classes_file)
move_to_end("metadata", classes_file)
move_to_end("additionalMetadata", classes_file)
move_up_n("UrlType", classes_file, 8)
move_up_n("schemeName", classes_file, 3)
move_up_n("ConnectionDefinitionType", classes_file, 4)
move_up_n("ConnectionType", classes_file, 5)
move_up_n("OfflineType", classes_file, 3)
move_up_n("OnlineType", classes_file, 3)
move_down_n("SpatialReferenceType", classes_file, 14)
move_down_n("foreignKey", classes_file, 10)
move_down_n("joinCondition", classes_file, 8)
move_down_n("ConstraintType", classes_file, 7)
move_up_n("CardinalityChildOccurancesType", classes_file, 12)
move_up_n("SingleDateTimeType", classes_file, 5)
move_up_n("alternativeTimeScale", classes_file, 5)
move_up_n("PhysicalOnlineType", classes_file, 2)
move_up_n("Action", classes_file, 6)
move_down_n("CitationType", classes_file, 14)
move_up_n("NonNumericDomainType", classes_file, 18)
move_up_n("NumericDomainType", classes_file, 18)
move_up_n("DateTimeDomainType", classes_file, 18)
move_up_n("UnitType", classes_file, 12)
move_up_n("BoundsGroup", classes_file, 25)
move_up_n("BoundsDateGroup", classes_file, 25)
move_down_n("AttributeType", classes_file, 3)
move_down_n("OtherEntityType", classes_file, 2)
move_down_n("SpatialVectorType", classes_file, 2)
move_down_n("DatasetType", classes_file, 2)
move_to_top("MaintUpFreqType", classes_file)
move_to_top("CellGeometryType", classes_file)
move_to_top("ImagingConditionCode", classes_file)
move_to_top("GeometryType", classes_file)
move_to_top("TopologyLevel", classes_file)
move_to_top("NumberType", classes_file)
move_to_top("PrecisionType", classes_file)
move_to_top("GRingType", classes_file)
move_to_top("ConstraintBaseGroup", classes_file)
move_to_top("constraintDescription", classes_file)
move_to_top("Coverage", classes_file)
move_to_top("ListOftaxonomicCoverage", classes_file)
move_to_top("ListOfgeographicCoverage", classes_file)
move_to_top("ListOftemporalCoverage", classes_file)
move_to_top("ReferencesGroup", classes_file)
move_to_top("references", classes_file)
move_to_top("ListOfvalue", classes_file)
move_to_top("eml-2.1.1", classes_file)
move_to_top("xml_attribute", classes_file)
R <- readLines(classes_file)
R <- gsub("'language' = 'i18nNonEmptyStringType'", "'language' = 'character'", R)
R <- gsub("setClass\\('title', contains = c\\('eml-2.1.1', 'character'\\)\\).*", "", R)
## Consistent Ordering please
R <- gsub("contains = c\\('character', 'eml-2.1.1'\\)", "contains = c('eml-2.1.1', 'character')", R)
## avoid ReferencesGroup as separate class.
R <- gsub("'ReferencesGroup' = 'ReferencesGroup'", "'references' = 'references'", R)
resource_group_slots <- "'alternateIdentifier' = 'ListOfalternateIdentifier', 'shortName' = 'character', 'title' = 'ListOftitle', 'creator' = 'ListOfcreator', 'metadataProvider' = 'ListOfmetadataProvider', 'associatedParty' = 'ListOfassociatedParty', 'pubDate' = 'yearDate', 'language' = 'character', 'series' = 'character', 'abstract' = 'TextType', 'keywordSet' = 'ListOfkeywordSet', 'additionalInfo' = 'ListOfadditionalInfo', 'intellectualRights' = 'TextType', 'distribution' = 'ListOfdistribution', 'coverage' = 'Coverage'"
R <- gsub("'ResourceGroup' = 'ResourceGroup'", resource_group_slots, R)
entity_group_slots <- "'alternateIdentifier' = 'ListOfalternateIdentifier', 'entityName' = 'character', 'entityDescription' = 'character', 'physical' = 'ListOfphysical', 'coverage' = 'Coverage', 'methods' = 'MethodsType', 'additionalInfo' = 'ListOfadditionalInfo'"
R <- gsub("'EntityGroup' = 'EntityGroup'", entity_group_slots, R)
responsible_party_slots = "'individualName' = 'ListOfindividualName', 'organizationName' = 'ListOforganizationName', 'positionName' = 'ListOfpositionName', 'address' = 'ListOfaddress', 'phone' = 'ListOfphone', 'electronicMailAddress' = 'ListOfelectronicMailAddress', 'onlineUrl' = 'ListOfonlineUrl', 'userId' = 'ListOfuserId', 'references' = 'references', 'id' = 'xml_attribute', 'system' = 'xml_attribute', 'scope' = 'xml_attribute'"
R <- gsub("'ResponsibleParty' = 'ResponsibleParty'", responsible_party_slots, R)
R <- gsub("'ProcedureStepType' = 'ProcedureStepType'", "'description' = 'TextType', 'citation' = 'ListOfcitation', 'protocol' = 'ListOfprotocol', 'instrumentation' = 'ListOfinstrumentation', 'software' = 'ListOfsoftware', 'subStep' = 'ListOfsubStep'", R)
R <- unique(R)
write(R, classes_file)
## Fix methods
M <- readLines(methods_file)
M <- gsub("'complex'", "'eml:complex'", M)
M <- gsub("signature\\('language'", "signature('eml:language'", M)
M <- gsub(".Object@complex", "slot(.Object, 'eml:complex')", M)
write(M, methods_file)
replace_class("ParagraphType", "setClass('ParagraphType', contains=c('InlineType'))", classes_file)
replace_class("SectionType", "setClass('SectionType', contains=c('InlineType'))", classes_file)
replace_class("eml:language", "setClass('eml:language', slots = c('LanguageValue' = 'character', 'LanguageCodeStandard' = 'character'), contains = c('eml-2.1.1', 'character', 'i18nNonEmptyStringType'))", classes_file)
drop_method('ParagraphType', methods_file)
drop_method('SectionType', methods_file)
drop_method('UrlType', methods_file)
sapply(c("array", "table", "matrix", "list", "description", "keyword", "unit", "eml:complex", "language", "parameter", "parameterDefinition"), drop_method, methods_file)
| /inst/create-package/create_package.R | no_license | mbjones/reml | R | false | false | 10,407 | r | library("EML") ## ick, create_methods actually uses true slots to get slots and contains slots
library("XML")
library("xml2")
library("purrr")
library("dplyr")
## Filepaths assume working directory is package root
source("inst/create-package/post_process.R")
source("inst/create-package/create_classes.R")
classes_file <- "R/classes.R"
methods_file <- "R/methods.R"
## Start clean
file.remove(classes_file)
file.remove(methods_file)
## Create some boilerplate classes:
xs_base_classes <- function(file = "R/classes.R", methods_file = "R/methods.R"){
write("
setClass('slot_order', contains = 'character')
setClass('xml_attribute', contains = 'character')
setClass('eml-2.1.1', slots = c('schemaLocation' = 'xml_attribute', 'lang' = 'xml_attribute', slot_order = 'character'))
setClass('i18nNonEmptyStringType', slots = c('value' = 'ListOfvalue', 'lang' = 'xml_attribute'), contains = c('eml-2.1.1', 'character'))
setClass('InlineType', contains=c('list'))",
file, append = TRUE)
## Define classes for the these XSD schema types to correspond to the appropriate R object class
data.frame(class = c("xs:float", "xs:string", "xs:anyURI", "xs:time", "xs:decimal", "xs:int", "xs:unsignedInt", "xs:unsignedLong", "xs:long", "xs:integer", "xs:boolean", "xs:date", "xs:positiveInteger"),
contains = c("numeric", "character", "character", "character", "numeric", "integer", "integer", "integer", "integer", "integer", "logical", "Date", "integer")) %>%
purrr::by_row(function(x)
write(sprintf("setClass('%s', contains = '%s')", x[["class"]], x[["contains"]]), file, append = TRUE)
)
}
xs_base_classes(classes_file)
## This call to create_classes needs to pass a custom namespace
#stmml_ns <- xml_ns_rename( xml_ns(read_xml("inst/xsd/stmml.xsd")), xsd = "xs")
#create_classes("inst/xsd/stmml.xsd", classes_file, methods_file, ns = stmml_ns)
## Collate list -- avoid errors by manually setting the order of XSD file parsing
collate <- c(
"eml-text.xsd",
"eml-documentation.xsd",
"eml-unitTypeDefinitions.xsd",
"eml-party.xsd",
"eml-resource.xsd",
"eml-spatialReference.xsd",
"eml-access.xsd",
"eml-constraint.xsd",
"eml-literature.xsd",
"eml-coverage.xsd",
"eml-physical.xsd",
"eml-project.xsd",
"eml-software.xsd",
"eml-protocol.xsd",
"eml-methods.xsd",
"eml-attribute.xsd",
"eml-entity.xsd",
"eml-dataTable.xsd",
"eml-view.xsd",
"eml-storedProcedure.xsd",
"eml-spatialVector.xsd",
"eml-spatialRaster.xsd",
"eml-dataset.xsd",
"eml.xsd"
)
## Okay, here we go! Create all the classes...
paste0("inst/xsd/", collate) %>% map(create_classes, cf = classes_file, mf = methods_file)
fix_protected(classes_file)
## Goodness grief, fix ordering so that classes can load with no errors or warning (dependent classes must be defined first!)
move_to_end("proceduralStep", classes_file)
move_to_end("methodStep", classes_file)
move_to_end("dataSource", classes_file)
replace_class("keyword", "setClass('keyword', slots = c('keywordType' = 'xml_attribute'), contains = c('i18nNonEmptyStringType', 'eml-2.1.1'))", classes_file)
replace_class("coverage", "setClass('coverage', contains=c('Coverage'))", classes_file)
replace_class("temporalCoverage", "setClass('temporalCoverage', slots = c('system' = 'xml_attribute', 'scope' = 'xml_attribute'), contains = c('TemporalCoverage'))", classes_file)
replace_class("taxonomicCoverage", "setClass('taxonomicCoverage', slots = c('system' = 'xml_attribute', 'scope' = 'xml_attribute'), contains = c('TaxonomicCoverage'))", classes_file)
replace_class("geographicCoverage", "setClass('geographicCoverage', slots = c('system' = 'xml_attribute', 'scope' = 'xml_attribute'), contains = c('GeographicCoverage'))", classes_file)
replace_class('size', "setClass('size', slots = c('character' = 'character', 'unit' = 'xml_attribute'), contains = c('eml-2.1.1', 'character'))", classes_file)
replace_class('metadata', "setClass('metadata', contains='InlineType')", classes_file)
replace_class('inline', "setClass('inline', contains='InlineType')", classes_file)
replace_class('InlineType', "setClass('InlineType', contains=c('list'))", classes_file)
replace_class('parameter', "setClass('parameter', slots = c(name = 'character', value = 'character', 'domainDescription' = 'character', 'required' = 'character', 'repeats' = 'character'), contains = 'eml-2.1.1')", classes_file)
replace_class('PhysicalOnlineType', "setClass('PhysicalOnlineType', slots = c('onlineDescription' = 'i18nNonEmptyStringType', 'url' = 'UrlType', 'connection' = 'ConnectionType', 'connectionDefinition' = 'ConnectionDefinitionType'), contains = c('eml-2.1.1', 'character'))", classes_file)
replace_class('online', "setClass('online', contains = c('PhysicalOnlineType', 'OnlineType', 'eml-2.1.1'))", classes_file)
move_to_end("coverage", classes_file)
move_to_end("temporalCoverage", classes_file)
move_to_end("taxonomicCoverage", classes_file)
move_to_end("geographicCoverage", classes_file)
move_to_end("inline", classes_file)
move_to_end("parameter", classes_file)
move_to_end("online", classes_file)
move_to_end("metadata", classes_file)
move_to_end("additionalMetadata", classes_file)
move_up_n("UrlType", classes_file, 8)
move_up_n("schemeName", classes_file, 3)
move_up_n("ConnectionDefinitionType", classes_file, 4)
move_up_n("ConnectionType", classes_file, 5)
move_up_n("OfflineType", classes_file, 3)
move_up_n("OnlineType", classes_file, 3)
move_down_n("SpatialReferenceType", classes_file, 14)
move_down_n("foreignKey", classes_file, 10)
move_down_n("joinCondition", classes_file, 8)
move_down_n("ConstraintType", classes_file, 7)
move_up_n("CardinalityChildOccurancesType", classes_file, 12)
move_up_n("SingleDateTimeType", classes_file, 5)
move_up_n("alternativeTimeScale", classes_file, 5)
move_up_n("PhysicalOnlineType", classes_file, 2)
move_up_n("Action", classes_file, 6)
move_down_n("CitationType", classes_file, 14)
move_up_n("NonNumericDomainType", classes_file, 18)
move_up_n("NumericDomainType", classes_file, 18)
move_up_n("DateTimeDomainType", classes_file, 18)
move_up_n("UnitType", classes_file, 12)
move_up_n("BoundsGroup", classes_file, 25)
move_up_n("BoundsDateGroup", classes_file, 25)
move_down_n("AttributeType", classes_file, 3)
move_down_n("OtherEntityType", classes_file, 2)
move_down_n("SpatialVectorType", classes_file, 2)
move_down_n("DatasetType", classes_file, 2)
move_to_top("MaintUpFreqType", classes_file)
move_to_top("CellGeometryType", classes_file)
move_to_top("ImagingConditionCode", classes_file)
move_to_top("GeometryType", classes_file)
move_to_top("TopologyLevel", classes_file)
move_to_top("NumberType", classes_file)
move_to_top("PrecisionType", classes_file)
move_to_top("GRingType", classes_file)
move_to_top("ConstraintBaseGroup", classes_file)
move_to_top("constraintDescription", classes_file)
move_to_top("Coverage", classes_file)
move_to_top("ListOftaxonomicCoverage", classes_file)
move_to_top("ListOfgeographicCoverage", classes_file)
move_to_top("ListOftemporalCoverage", classes_file)
move_to_top("ReferencesGroup", classes_file)
move_to_top("references", classes_file)
move_to_top("ListOfvalue", classes_file)
move_to_top("eml-2.1.1", classes_file)
move_to_top("xml_attribute", classes_file)
R <- readLines(classes_file)
R <- gsub("'language' = 'i18nNonEmptyStringType'", "'language' = 'character'", R)
R <- gsub("setClass\\('title', contains = c\\('eml-2.1.1', 'character'\\)\\).*", "", R)
## Consistent Ordering please
R <- gsub("contains = c\\('character', 'eml-2.1.1'\\)", "contains = c('eml-2.1.1', 'character')", R)
## avoid ReferencesGroup as separate class.
R <- gsub("'ReferencesGroup' = 'ReferencesGroup'", "'references' = 'references'", R)
resource_group_slots <- "'alternateIdentifier' = 'ListOfalternateIdentifier', 'shortName' = 'character', 'title' = 'ListOftitle', 'creator' = 'ListOfcreator', 'metadataProvider' = 'ListOfmetadataProvider', 'associatedParty' = 'ListOfassociatedParty', 'pubDate' = 'yearDate', 'language' = 'character', 'series' = 'character', 'abstract' = 'TextType', 'keywordSet' = 'ListOfkeywordSet', 'additionalInfo' = 'ListOfadditionalInfo', 'intellectualRights' = 'TextType', 'distribution' = 'ListOfdistribution', 'coverage' = 'Coverage'"
R <- gsub("'ResourceGroup' = 'ResourceGroup'", resource_group_slots, R)
entity_group_slots <- "'alternateIdentifier' = 'ListOfalternateIdentifier', 'entityName' = 'character', 'entityDescription' = 'character', 'physical' = 'ListOfphysical', 'coverage' = 'Coverage', 'methods' = 'MethodsType', 'additionalInfo' = 'ListOfadditionalInfo'"
R <- gsub("'EntityGroup' = 'EntityGroup'", entity_group_slots, R)
responsible_party_slots = "'individualName' = 'ListOfindividualName', 'organizationName' = 'ListOforganizationName', 'positionName' = 'ListOfpositionName', 'address' = 'ListOfaddress', 'phone' = 'ListOfphone', 'electronicMailAddress' = 'ListOfelectronicMailAddress', 'onlineUrl' = 'ListOfonlineUrl', 'userId' = 'ListOfuserId', 'references' = 'references', 'id' = 'xml_attribute', 'system' = 'xml_attribute', 'scope' = 'xml_attribute'"
R <- gsub("'ResponsibleParty' = 'ResponsibleParty'", responsible_party_slots, R)
R <- gsub("'ProcedureStepType' = 'ProcedureStepType'", "'description' = 'TextType', 'citation' = 'ListOfcitation', 'protocol' = 'ListOfprotocol', 'instrumentation' = 'ListOfinstrumentation', 'software' = 'ListOfsoftware', 'subStep' = 'ListOfsubStep'", R)
R <- unique(R)
write(R, classes_file)
## Fix methods
M <- readLines(methods_file)
M <- gsub("'complex'", "'eml:complex'", M)
M <- gsub("signature\\('language'", "signature('eml:language'", M)
M <- gsub(".Object@complex", "slot(.Object, 'eml:complex')", M)
write(M, methods_file)
replace_class("ParagraphType", "setClass('ParagraphType', contains=c('InlineType'))", classes_file)
replace_class("SectionType", "setClass('SectionType', contains=c('InlineType'))", classes_file)
replace_class("eml:language", "setClass('eml:language', slots = c('LanguageValue' = 'character', 'LanguageCodeStandard' = 'character'), contains = c('eml-2.1.1', 'character', 'i18nNonEmptyStringType'))", classes_file)
drop_method('ParagraphType', methods_file)
drop_method('SectionType', methods_file)
drop_method('UrlType', methods_file)
sapply(c("array", "table", "matrix", "list", "description", "keyword", "unit", "eml:complex", "language", "parameter", "parameterDefinition"), drop_method, methods_file)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helpers.R
\name{ht}
\alias{ht}
\title{{Display the top and bottom of a data frame}}
\usage{
ht(data, n = 5)
}
\arguments{
\item{data}{data frame}
\item{n}{number of rows to display from head and tail
\dontrun{
ht(mtcars)
}}
}
\description{
{Quickly display the top and bottom of a data frame}
}
| /man/ht.Rd | no_license | nrkoehler/xyzfuns | R | false | true | 374 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helpers.R
\name{ht}
\alias{ht}
\title{{Display the top and bottom of a data frame}}
\usage{
ht(data, n = 5)
}
\arguments{
\item{data}{data frame}
\item{n}{number of rows to display from head and tail
\dontrun{
ht(mtcars)
}}
}
\description{
{Quickly display the top and bottom of a data frame}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/network-tools.r
\name{jtt}
\alias{jtt}
\title{Number of JTTs in a graph}
\usage{
jtt(data, from_id, to_id)
}
\arguments{
\item{data}{data set}
\item{from_id}{name of the variable of edge tails}
\item{to_id}{name of the variable of edge heads}
}
\description{
Number of jumping transitive triplets (JTT) in a graph. A JTT between three nodes i,j,
and k is defined as the situation that when there is a (directed) edge from i to j and an
edge from j to k there is also a direct edge from i to k.
In an undirected situation we can think of any undirected edge as two directed edges between the two nodes involved.
}
\examples{
data(blood)
ggplot(data= blood$edges) + geom_net(aes(from_id=from, to_id=to), directed=TRUE) + theme_net()
jtt(blood$edges, "from", "to")
# this number is very high compared to the overall number of edges that are not self-loops
nrow(subset(blood$edges, from != to))
}
| /man/jtt.Rd | no_license | cran/geomnet | R | false | true | 973 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/network-tools.r
\name{jtt}
\alias{jtt}
\title{Number of JTTs in a graph}
\usage{
jtt(data, from_id, to_id)
}
\arguments{
\item{data}{data set}
\item{from_id}{name of the variable of edge tails}
\item{to_id}{name of the variable of edge heads}
}
\description{
Number of jumping transitive triplets (JTT) in a graph. A JTT between three nodes i,j,
and k is defined as the situation that when there is a (directed) edge from i to j and an
edge from j to k there is also a direct edge from i to k.
In an undirected situation we can think of any undirected edge as two directed edges between the two nodes involved.
}
\examples{
data(blood)
ggplot(data= blood$edges) + geom_net(aes(from_id=from, to_id=to), directed=TRUE) + theme_net()
jtt(blood$edges, "from", "to")
# this number is very high compared to the overall number of edges that are not self-loops
nrow(subset(blood$edges, from != to))
}
|
library(recommenderlab)
library(data.table)
#read in the data
customer <- fread('target_customers.csv')
target_customer <- customer[customer$cluster==1 | customer$cluster==2,]
product <- fread('product_table.csv')
head(product)
transaction <- fread('transaction_table.csv')
tran <- transaction[,c('cust_id','prod_id','tran_prod_sale_qty')]
head(tran)
#join the datasets
cust_tran <- target_customer[tran,on='cust_id',nomatch=0]
cust_tran <- cust_tran[,c('cust_id','prod_id','tran_prod_sale_qty')]
head(cust_tran)
cust_tran_prod <- cust_tran[product,on='prod_id',nomatch=0]
head(cust_tran_prod)
#filter out transactions related to shampoo or hair conditioners products
nivea_shampoo <- cust_tran_prod[cust_tran_prod$category_desc_eng %in% c('SHAMPOO','HAIR CONDITIONERS')]
head(nivea_shampoo)
#pivot into matrix
install.packages('reshape')
library(reshape)
matrix_data <- nivea_shampoo[,c('cust_id','prod_id','tran_prod_sale_qty')]
matrix_data$cust_id <- as.factor(matrix_data$cust_id)
matrix_data$prod_id <- as.factor(matrix_data$prod_id)
matrix_data$tran_prod_sale_qty <- as.numeric(matrix_data$tran_prod_sale_qty)
matrix1 <- as(matrix_data, "realRatingMatrix")
#build the recommender model
recomModel <- Recommender(matrix1, method = "LIBMF")
predict <- predict(recomModel, matrix1, type='ratingMatrix')
result<-as(predict, "matrix")
#find out the list of NIVEA products
nivea<-product[product$brand_desc=='NIVEA',prod_id]
nivea_result<-result[,colnames(result) %in% nivea]
top_product<-apply(nivea_result, 1, max)
nivea_result[1,]==top_product[1]
#find out the top rated NIVEA product for each of our target customer
recommend_product<-apply(nivea_result, 1, function(t) colnames(nivea_result)[which.max(t)])
recommend_product<-as.data.frame(recommend_product)
recommend_product$prod_id<-recommend_product$recommend_product
recommend_product$cust_id<-rownames(recommend_product)
recommend_product<-recommend_product[c('cust_id','prod_id')]
View(recommend_product)
unique(recommend_product$prod_id)
fwrite(recommend_product,'recommend_product.csv')
| /recommenderSystem.R | no_license | danieldiii/Marketing-Analytics | R | false | false | 2,128 | r | library(recommenderlab)
library(data.table)
#read in the data
customer <- fread('target_customers.csv')
target_customer <- customer[customer$cluster==1 | customer$cluster==2,]
product <- fread('product_table.csv')
head(product)
transaction <- fread('transaction_table.csv')
tran <- transaction[,c('cust_id','prod_id','tran_prod_sale_qty')]
head(tran)
#join the datasets
cust_tran <- target_customer[tran,on='cust_id',nomatch=0]
cust_tran <- cust_tran[,c('cust_id','prod_id','tran_prod_sale_qty')]
head(cust_tran)
cust_tran_prod <- cust_tran[product,on='prod_id',nomatch=0]
head(cust_tran_prod)
#filter out transactions related to shampoo or hair conditioners products
nivea_shampoo <- cust_tran_prod[cust_tran_prod$category_desc_eng %in% c('SHAMPOO','HAIR CONDITIONERS')]
head(nivea_shampoo)
#pivot into matrix
install.packages('reshape')
library(reshape)
matrix_data <- nivea_shampoo[,c('cust_id','prod_id','tran_prod_sale_qty')]
matrix_data$cust_id <- as.factor(matrix_data$cust_id)
matrix_data$prod_id <- as.factor(matrix_data$prod_id)
matrix_data$tran_prod_sale_qty <- as.numeric(matrix_data$tran_prod_sale_qty)
matrix1 <- as(matrix_data, "realRatingMatrix")
#build the recommender model
recomModel <- Recommender(matrix1, method = "LIBMF")
predict <- predict(recomModel, matrix1, type='ratingMatrix')
result<-as(predict, "matrix")
#find out the list of NIVEA products
nivea<-product[product$brand_desc=='NIVEA',prod_id]
nivea_result<-result[,colnames(result) %in% nivea]
top_product<-apply(nivea_result, 1, max)
nivea_result[1,]==top_product[1]
#find out the top rated NIVEA product for each of our target customer
recommend_product<-apply(nivea_result, 1, function(t) colnames(nivea_result)[which.max(t)])
recommend_product<-as.data.frame(recommend_product)
recommend_product$prod_id<-recommend_product$recommend_product
recommend_product$cust_id<-rownames(recommend_product)
recommend_product<-recommend_product[c('cust_id','prod_id')]
View(recommend_product)
unique(recommend_product$prod_id)
fwrite(recommend_product,'recommend_product.csv')
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/prism_subset_folders.R
\name{prism_subset_folders}
\alias{prism_subset_folders}
\title{Subsets PRISM folders on the disk}
\usage{
prism_subset_folders(type, temp_period, years = NULL, mon = NULL,
minDate = NULL, maxDate = NULL, dates = NULL, resolution = NULL)
}
\arguments{
\item{type}{The type of data you want to subset. Must be "ppt", "tmean",
"tmin", "tmax", "tdmean", "vpdmin", or "vpdmax".}
\item{temp_period}{The temporal period to subset. Must be "annual",
"monthly", "daily", "monthly normals", or "annual normals".}
\item{years}{Valid numeric year, or vector of years.}
\item{mon}{Valid numeric month, or vector of months.}
\item{minDate}{Date to start subsetting daily data. Must be specified in
a valid iso-8601 (e.g. YYYY-MM-DD) format. May be provided as either a
character or \link[base:Date]{base::Date} class.}
\item{maxDate}{Date to end subsetting daily data. Must be specified in
a valid iso-8601 (e.g. YYYY-MM-DD) format. May be provided as either a
character or \link[base:Date]{base::Date} class.}
\item{dates}{A vector of daily dates to subset. Must be specified in
a valid iso-8601 (e.g. YYYY-MM-DD) format. May be provided as either a
character or \link[base:Date]{base::Date} class.}
\item{resolution}{The spatial resolution of the data, must be either "4km" or
"800m". Should only be specified for \code{temp_period} of "normals".}
}
\value{
A character vector of the folders that meet the type and temporal
period specified. \code{character(0)} is returned if no folders are found that
meet the specifications.
}
\description{
\code{prism_subset_folders()} subsets the PRISM folders stored on disk by type,
temporal period, and date. It looks through all of the PRISM data that have
been downloaded in the "prism.path" and returns the subset based on
\code{type}, \code{temp_period}, and specified dates.
}
\details{
\code{temp_period} must be specifed so the function can distinguish between
wanting annual data or wanting monthly data for a specified year. For example
\code{prism_subset_folders("tmean", "annual", years = 2012)} would provide only
one folder: the annual average temperature for 2012. However,
\code{prism_subset_folders("tmean", "monthly", years = 2012)} would provide 12
folders: each monthly tmean folder for 2012.
\code{temp_period}, \code{years}, and \code{mon} can be combined in various different ways
to obtain different groupings of data. \code{years}, \code{mon}, and the daily
specifiers (\code{minDate}/\code{maxDate} or \code{dates}) are optional. Not specifying any
of those would result in getting all annual, monthly, or daily data.
\code{minDate}/\code{maxDate} or \code{dates} should only be specified for a \code{temp_period}
of "daily". Additionally, only \code{dates}, or \code{minDate} and \code{maxDate}, should be
specified, but all three should not be specified. Nor should the daily
specifies be combined with \code{years} and/or \code{mon}. For example, if daily
folders are desired, then specify \code{years} and/or \code{mon} to get all days for
those years and months \strong{or} specify the specific dates using
\code{minDate}/\code{maxDate} or \code{dates}
}
\examples{
\dontrun{
# get all annual tmin
prism_subset_folders("tmin", "annual")
# get only 2000-2015 annual tmin
prism_subset_folder("tmin", "annual", years = 2000-2015)
# get monthly precipitation for 2000-2010
prism_subset_folders("ppt", "monthly", years = 2000-2010)
# get only June-August monthly precip data for 2000-2010
prism_subset_folders("ppt", "monthly", years = 2000-2010, mon = 6:8)
# get all daily tmax for July-August in 2010
prism_subset_folders("tmax", "daily", years = 2010, mon = 7:8)
# same as:
prism_subset_folders(
"tmax",
"daily",
minDate = "2010-07-01",
maxDate = "2010-08-31"
)
# get the 4km 30-year average precip for January and February
prism_subset_folders("ppt", "monthly normals", mon = 1:2, resolution = "4km")
}
}
\seealso{
\code{\link[=ls_prism_data]{ls_prism_data()}}
}
| /man/prism_subset_folders.Rd | no_license | zejiang-unsw/prism | R | false | true | 4,051 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/prism_subset_folders.R
\name{prism_subset_folders}
\alias{prism_subset_folders}
\title{Subsets PRISM folders on the disk}
\usage{
prism_subset_folders(type, temp_period, years = NULL, mon = NULL,
minDate = NULL, maxDate = NULL, dates = NULL, resolution = NULL)
}
\arguments{
\item{type}{The type of data you want to subset. Must be "ppt", "tmean",
"tmin", "tmax", "tdmean", "vpdmin", or "vpdmax".}
\item{temp_period}{The temporal period to subset. Must be "annual",
"monthly", "daily", "monthly normals", or "annual normals".}
\item{years}{Valid numeric year, or vector of years.}
\item{mon}{Valid numeric month, or vector of months.}
\item{minDate}{Date to start subsetting daily data. Must be specified in
a valid iso-8601 (e.g. YYYY-MM-DD) format. May be provided as either a
character or \link[base:Date]{base::Date} class.}
\item{maxDate}{Date to end subsetting daily data. Must be specified in
a valid iso-8601 (e.g. YYYY-MM-DD) format. May be provided as either a
character or \link[base:Date]{base::Date} class.}
\item{dates}{A vector of daily dates to subset. Must be specified in
a valid iso-8601 (e.g. YYYY-MM-DD) format. May be provided as either a
character or \link[base:Date]{base::Date} class.}
\item{resolution}{The spatial resolution of the data, must be either "4km" or
"800m". Should only be specified for \code{temp_period} of "normals".}
}
\value{
A character vector of the folders that meet the type and temporal
period specified. \code{character(0)} is returned if no folders are found that
meet the specifications.
}
\description{
\code{prism_subset_folders()} subsets the PRISM folders stored on disk by type,
temporal period, and date. It looks through all of the PRISM data that have
been downloaded in the "prism.path" and returns the subset based on
\code{type}, \code{temp_period}, and specified dates.
}
\details{
\code{temp_period} must be specifed so the function can distinguish between
wanting annual data or wanting monthly data for a specified year. For example
\code{prism_subset_folders("tmean", "annual", years = 2012)} would provide only
one folder: the annual average temperature for 2012. However,
\code{prism_subset_folders("tmean", "monthly", years = 2012)} would provide 12
folders: each monthly tmean folder for 2012.
\code{temp_period}, \code{years}, and \code{mon} can be combined in various different ways
to obtain different groupings of data. \code{years}, \code{mon}, and the daily
specifiers (\code{minDate}/\code{maxDate} or \code{dates}) are optional. Not specifying any
of those would result in getting all annual, monthly, or daily data.
\code{minDate}/\code{maxDate} or \code{dates} should only be specified for a \code{temp_period}
of "daily". Additionally, only \code{dates}, or \code{minDate} and \code{maxDate}, should be
specified, but all three should not be specified. Nor should the daily
specifies be combined with \code{years} and/or \code{mon}. For example, if daily
folders are desired, then specify \code{years} and/or \code{mon} to get all days for
those years and months \strong{or} specify the specific dates using
\code{minDate}/\code{maxDate} or \code{dates}
}
\examples{
\dontrun{
# get all annual tmin
prism_subset_folders("tmin", "annual")
# get only 2000-2015 annual tmin
prism_subset_folder("tmin", "annual", years = 2000-2015)
# get monthly precipitation for 2000-2010
prism_subset_folders("ppt", "monthly", years = 2000-2010)
# get only June-August monthly precip data for 2000-2010
prism_subset_folders("ppt", "monthly", years = 2000-2010, mon = 6:8)
# get all daily tmax for July-August in 2010
prism_subset_folders("tmax", "daily", years = 2010, mon = 7:8)
# same as:
prism_subset_folders(
"tmax",
"daily",
minDate = "2010-07-01",
maxDate = "2010-08-31"
)
# get the 4km 30-year average precip for January and February
prism_subset_folders("ppt", "monthly normals", mon = 1:2, resolution = "4km")
}
}
\seealso{
\code{\link[=ls_prism_data]{ls_prism_data()}}
}
|
\name{export2csv}
\alias{export2csv}
\title{
Exporting descriptives table to plain text (CSV) format
}
\description{
This function takes the result of \code{createTable} and exports the tables to plain text (CSV) format.
}
\usage{
export2csv(x, file, which.table="descr", sep=",", nmax = TRUE, header.labels = c(), ...)
}
\arguments{
\item{x}{an object of class 'createTable'.}
\item{file}{file where table in CSV format will be written. Also, another file with the extension '_appendix' is written with the available data table.}
\item{which.table}{character indicating which table is printed. Possible values are 'descr', 'avail' or 'both' (partial matching allowed), exporting descriptives by groups table, available data table or both tables, respectively. Default value is 'descr'.}
\item{sep}{character. The variable separator, same as 'sep' argument from \code{\link[utils]{write.table}}. Default value is ','.}
\item{nmax}{logical, indicating whether to show the number of subjects with at least one valid value across all row-variables. Default value is TRUE.}
\item{header.labels}{see the 'header.labels' argument from \code{\link{createTable}}.}
\item{\ldots}{other arguments passed to \code{\link[utils]{write.table}}.}
}
\note{
The way to compute the 'N' shown in the bivariate table header, controlled by 'nmax' argument, has been changed from previous versions (<1.3). In the older versions 'N' was computed as the maximum across the cells withing each column (group) from the 'available data' table ('avail').
}
\seealso{
\code{\link{createTable}}, \code{\link{export2latex}}, \code{\link{export2pdf}}, \code{\link{export2html}}, \code{\link{export2md}}, \code{\link{export2word}}
}
\examples{
\dontrun{
require(compareGroups)
data(regicor)
res <- compareGroups(sex ~. -id-todeath-death-tocv-cv, regicor)
export2csv(createTable(res, hide.no = 'n'), file=tempfile(fileext=".csv"))
}
}
\keyword{utilities}
| /man/export2csv.Rd | no_license | isubirana/compareGroups | R | false | false | 2,112 | rd | \name{export2csv}
\alias{export2csv}
\title{
Exporting descriptives table to plain text (CSV) format
}
\description{
This function takes the result of \code{createTable} and exports the tables to plain text (CSV) format.
}
\usage{
export2csv(x, file, which.table="descr", sep=",", nmax = TRUE, header.labels = c(), ...)
}
\arguments{
\item{x}{an object of class 'createTable'.}
\item{file}{file where table in CSV format will be written. Also, another file with the extension '_appendix' is written with the available data table.}
\item{which.table}{character indicating which table is printed. Possible values are 'descr', 'avail' or 'both' (partial matching allowed), exporting descriptives by groups table, available data table or both tables, respectively. Default value is 'descr'.}
\item{sep}{character. The variable separator, same as 'sep' argument from \code{\link[utils]{write.table}}. Default value is ','.}
\item{nmax}{logical, indicating whether to show the number of subjects with at least one valid value across all row-variables. Default value is TRUE.}
\item{header.labels}{see the 'header.labels' argument from \code{\link{createTable}}.}
\item{\ldots}{other arguments passed to \code{\link[utils]{write.table}}.}
}
\note{
The way to compute the 'N' shown in the bivariate table header, controlled by 'nmax' argument, has been changed from previous versions (<1.3). In the older versions 'N' was computed as the maximum across the cells withing each column (group) from the 'available data' table ('avail').
}
\seealso{
\code{\link{createTable}}, \code{\link{export2latex}}, \code{\link{export2pdf}}, \code{\link{export2html}}, \code{\link{export2md}}, \code{\link{export2word}}
}
\examples{
\dontrun{
require(compareGroups)
data(regicor)
res <- compareGroups(sex ~. -id-todeath-death-tocv-cv, regicor)
export2csv(createTable(res, hide.no = 'n'), file=tempfile(fileext=".csv"))
}
}
\keyword{utilities}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/z2r.r
\name{z2r}
\alias{z2r}
\title{Reverse Fisher's transformation of correlation cofficient to z-score}
\usage{
z2r(z)
}
\description{
Reverse Fisher's transformation of correlation cofficient to z-score
}
| /man/z2r.Rd | no_license | devincaughey/CaugheyTools | R | false | true | 286 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/z2r.r
\name{z2r}
\alias{z2r}
\title{Reverse Fisher's transformation of correlation cofficient to z-score}
\usage{
z2r(z)
}
\description{
Reverse Fisher's transformation of correlation cofficient to z-score
}
|
#' Transform a GCT object in to a long form \code{\link{data.table}} (aka 'melt')
#'
#' @description Utilizes the \code{\link{data.table::melt}} function to transform the
#' matrix into long form. Optionally can include the row and column
#' annotations in the transformed \code{\link{data.table}}.
#'
#' @param g the GCT object
#' @param keep_rdesc boolean indicating whether to keep the row
#' descriptors in the final result
#' @param keep_cdesc boolean indicating whether to keep the column
#' descriptors in the final result
#' @param remove_symmetries boolean indicating whether to remove
#' the lower triangle of the matrix (only applies if \code{g@mat} is symmetric)
#' @param suffixes the character suffixes to be applied if there are
#' collisions between the names of the row and column descriptors
#'
#' @return a \code{\link{data.table}} object with the row and column ids and the matrix
#' values and (optinally) the row and column descriptors
#'
#' @examples
#' # simple melt, keeping both row and column meta
#' head(melt.gct(ds))
#'
#' # update row/colum suffixes to indicate rows are genes, columns experiments
#' head(melt.gct(ds, suffixes = c("_gene", "_experiment")))
#'
#' # ignore row/column meta
#' head(melt.gct(ds, keep_rdesc = F, keep_cdesc = F))
#'
#' @family GCT utilities
#' @export
setGeneric("melt.gct", function(g, suffixes=NULL, remove_symmetries=F,
keep_rdesc=T, keep_cdesc=T) {
standardGeneric("melt.gct")
})
setMethod("melt.gct", signature("GCT"),
function(g, suffixes, remove_symmetries, keep_rdesc, keep_cdesc) {
# melt a gct object's matrix into a data.frame and merge row and column
# annotations back in, using the provided suffixes
# assumes rdesc and cdesc data.frames both have an 'id' field.
# merges row and/or column annotations into the melted matrix as indicated by
# keep_rdesc and keep_cdesc, respectively.
# if remove_symmetries, will check whether matrix is symmetric
# and return only values corresponding to the upper triangle
# g@rdesc$id <- rownames(g@rdesc)
# g@cdesc$id <- rownames(g@cdesc)
# first, check if matrix is symmetric
# if it is, use only the upper triangle
message("melting GCT object...")
mat <- g@mat
if (remove_symmetries & isSymmetric(mat)) {
mat[upper.tri(mat, diag=F)] <- NA
}
mat <- data.table(mat)
mat$rid <- g@rid
d <- melt(mat, id.vars="rid")
setattr(d, "names", c("id.x", "id.y", "value"))
d$id.x <- as.character(d$id.x)
d$id.y <- as.character(d$id.y)
# standard data.frame subset here to comply with testthat
d <- d[!is.na(d$value), , with=F]
if (keep_rdesc & keep_cdesc) {
# merge back in both row and column descriptors
setattr(d, "names", c("id", "id.y", "value"))
d <- merge(d, data.table(g@rdesc), by="id")
setnames(d, "id", "id.x")
setnames(d, "id.y", "id")
setnames(d, "id", "id.y")
} else if (keep_rdesc) {
# keep only row descriptors
rdesc <- data.table(g@rdesc)
setnames(rdesc, "id", "id.x")
d <- merge(d, rdesc, by="id.x")
} else if (keep_cdesc) {
# keep only column descriptors
cdesc <- data.table(g@cdesc)
setnames(cdesc, "id", "id.y")
d <- merge(d, cdesc, by="id.y")
}
# use suffixes if provided
if (!is.null(suffixes) & length(suffixes) == 2) {
newnames <- gsub("\\.x", suffixes[1], names(d))
newnames <- gsub("\\.y", suffixes[2], newnames)
setattr(d, "names", newnames)
}
message("done")
return(d)
})
#' Check if x is a whole number
#'
#' @param x number to test
#' @param tol the allowed tolerance
#' @return boolean indicating whether x is tol away from a whole number value
#' @examples
#' is.wholenumber(1)
#' is.wholenumber(0.5)
#' @export
is.wholenumber <- function(x, tol = .Machine$double.eps^0.5) {
return(abs(x - round(x)) < tol)
}
#' Check whether \code{test_names} are columns in the \code{\link{data.frame}} df
#' @param test_names a vector of column names to test
#' @param df the \code{\link{data.frame}} to test against
#' @param throw_error boolean indicating whether to throw an error if
#' any \code{test_names} are not found in \code{df}
#' @return boolean indicating whether or not all \code{test_names} are
#' columns of \code{df}
#' @examples
#' check_colnames(c("pert_id", "pert_iname"), cdesc_char) # TRUE
#' check_colnames(c("pert_id", "foobar"), cdesc_char, throw_error=F) # FALSE, suppress error
#' @export
check_colnames <- function(test_names, df, throw_error=T) {
# check whether test_names are valid names in df
# throw error if specified
diffs <- setdiff(test_names, names(df))
if (length(diffs) > 0) {
if (throw_error) {
stop(paste("the following column names are not found in", deparse(substitute(df)), ":",
paste(diffs, collapse=" "), "\n"))
} else {
return(F)
}
} else {
return(T)
}
}
#' Do a robust \code{\link{data.frame}} subset to a set of ids
#' @param df \code{\link{data.frame}} to subset
#' @param ids the ids to subset to
#' @return a subset version of \code{df}
#' @keywords internal
subset_to_ids <- function(df, ids) {
# helper function to do a robust df subset
check_colnames("id", df)
newdf <- data.frame(df[match(ids, df$id), ])
names(newdf) <- names(df)
return(newdf)
}
#' Subset a gct object using the provided row and column ids
#'
#' @param g a gct object
#' @param rid a vector of character ids or integer indices for ROWS
#' @param cid a vector of character ids or integer indices for COLUMNS
#' @examples
#' # first 10 rows and columns by index
#' (a <- subset.gct(ds, rid=1:10, cid=1:10))
#'
#' # first 10 rows and columns using character ids
#' (b <- subset.gct(ds, rid=ds@rid[1:10], cid=ds@cid[1:10]))
#'
#' identical(a, b) # TRUE
#'
#' @family GCT utilities
#' @export
setGeneric("subset.gct", function(g, rid=NULL, cid=NULL) {
standardGeneric("subset.gct")
})
setMethod("subset.gct", signature("GCT"),
function(g, rid, cid) {
# ids can either be a vector of character strings corresponding
# to row / column ids in the gct object, or integer vectors
# corresponding to row / column indices
if (is.null(rid)) rid <- g@rid
if (is.null(cid)) cid <- g@cid
# see whether we were given characters or integers
# and handle accordingly
process_ids <- function(ids, ref_ids, param) {
# simple helper function to handle id/idx conversion
# for character or integer ids
if (is.character(ids)) {
idx <- which(ref_ids %in% ids)
} else if (all(is.wholenumber(ids))) {
idx <- ids
ids <- ref_ids[idx]
} else {
stop(paste(param, "must be character or ingeter"))
}
return(list(ids=ids, idx=idx))
}
processed_rid <- process_ids(rid, g@rid, "rid")
processed_cid <- process_ids(cid, g@cid, "cid")
rid <- processed_rid$ids
ridx <- processed_rid$idx
cid <- processed_cid$ids
cidx <- processed_cid$idx
sdrow <- setdiff(rid, g@rid)
sdcol <- setdiff(cid, g@cid)
if (length(sdrow) > 0) warning("the following rids were not found:\n", paste(sdrow, collapse="\n"))
if (length(sdcol) > 0) warning("the following cids were not found:\n", paste(sdcol, collapse="\n"))
newg <- g
# make sure ordering is right
rid <- g@rid[ridx]
cid <- g@cid[cidx]
newg@mat <- matrix(g@mat[ridx, cidx], nrow=length(rid), ncol=length(cid))
colnames(newg@mat) <- cid
rownames(newg@mat) <- rid
# cdesc <- data.frame(g@cdesc)
# rdesc <- data.frame(g@rdesc)
# make sure annotations row ordering matches
# matrix, rid, and cid
newg@cdesc <- subset_to_ids(g@cdesc, cid)
newg@rdesc <- subset_to_ids(g@rdesc, rid)
newg@rid <- rid
newg@cid <- cid
if (any(dim(newg@mat) == 0)) {
warning("one or more returned dimension is length 0
check that at least some of the provided rid and/or
cid values have matches in the GCT object supplied")
}
return(newg)
})
#' Merge two GCT objects together
#'
#' @param g1 the first GCT object
#' @param g2 the second GCT object
#' @param dimension the dimension on which to merge (row or column)
#' @param matrix_only boolean idicating whether to keep only the
#' data matrices from \code{g1} and \code{g2} and ignore their
#' row and column meta data
#' @examples
#' # take the first 10 and last 10 rows of an object
#' # and merge them back together
#' (a <- subset.gct(ds, rid=1:10))
#' (b <- subset.gct(ds, rid=969:978))
#' (merged <- merge.gct(a, b, dimension="row"))
#'
#' @family GCT utilities
#' @export
setGeneric("merge.gct", function(g1, g2, dimension="row", matrix_only=F) {
standardGeneric("merge.gct")
})
setMethod("merge.gct", signature("GCT", "GCT"),
function(g1, g2, dimension, matrix_only) {
# given two gcts objects g1 and g2, merge them
# on the specified dimension
if (dimension == "column") dimension <- "col"
if (dimension == "row") {
message("appending rows...")
newg <- g1
# we're just appending rows so don't need to do anything
# special with the rid or rdesc. just cat them
newg@rid <- c(g1@rid, g2@rid)
newg@rdesc <- data.frame(rbind(data.table(g1@rdesc), data.table(g2@rdesc), fill=T))
# need figure out the index for how to sort the columns of
# g2@mat so that they are in sync with g1@mat
idx <- match(g1@cid, g2@cid)
newg@mat <- rbind(g1@mat, g2@mat[, idx])
if (!matrix_only) {
# apply the same sort order to the rows of g2@cdesc so that
# it's in sync with the final merged matrix
# figure out which fields are common and keep from the first gct
cmn_names <- intersect(names(g1@cdesc), names(g2@cdesc))
newg@cdesc <- cbind(g1@cdesc, g2@cdesc[idx, !(names(g2@cdesc) %in% cmn_names)])
} else {
newg@cdesc <- data.frame()
}
}
else if (dimension == "col") {
message("appending columns...")
newg <- g1
# we're just appending columns so don't need to do anything
# special with cid or cdesc. just cat them
newg@cid <- c(g1@cid, g2@cid)
newg@cdesc <- data.frame(rbind(data.table(g1@cdesc), data.table(g2@cdesc), fill=T))
# need figure out the index for how to sort the rows of
# g2@mat so that they are in sync with g1@mat
idx <- match(g1@rid, g2@rid)
newg@mat <- cbind(g1@mat, g2@mat[idx, ])
if (!matrix_only) {
# apply the same sort order to the rows of g2@rdesc so that
# it's in sync with the final merged matrix
# figure out which fields are common and keep from the first gct
cmn_names <- intersect(names(g1@rdesc), names(g2@rdesc))
newg@rdesc <- cbind(g1@rdesc, g2@rdesc[idx, !(names(g2@rdesc) %in% cmn_names)])
} else {
newg@rdesc <- data.frame()
}
} else {
stop("dimension must be either row or col")
}
return(newg)
})
#' Merge two \code{\link{data.frame}}s, but where there are common fields
#' those in \code{x} are retained and those in \code{y} are dropped.
#'
#' @param x the \code{\link{data.frame}} whose columns take precedence
#' @param y another \code{\link{data.frame}}
#' @param by a vector of column names to merge on
#' @param allow.cartesian boolean indicating whether it's ok
#' for repeated values in either table to merge with each other
#' over and over again.
#' @param as_data_frame boolean indicating whether to ensure
#' the returned object is a \code{\link{data.frame}} instead of a \code{\link{data.table}}.
#' This ensures compatibility with GCT object conventions,
#' that is, the \code{\link{rdesc}} and \code{\link{cdesc}} slots must be strictly
#' \code{\link{data.frame}} objects.
#'
#' @return a \code{\link{data.frame}} or \code{\link{data.table}} object
#'
#' @examples
#' (x <- data.table(foo=letters[1:10], bar=1:10))
#' (y <- data.table(foo=letters[1:10], bar=11:20, baz=LETTERS[1:10]))
#' # the 'bar' column from y will be dropped on merge
#' merge_with_precedence(x, y, by="foo")
#'
#' @keywords internal
#' @seealso data.table::merge
merge_with_precedence <- function(x, y, by, allow.cartesian=T,
as_data_frame = T) {
trash <- check_colnames(by, x)
trash <- check_colnames(by, y)
# cast as data.tables
x <- data.table(x)
y <- data.table(y)
# get rid of row names
setattr(x, "rownames", NULL)
setattr(y, "rownames", NULL)
common_cols <- intersect(names(x), names(y))
y_keepcols <- unique(c(by, setdiff(names(y), common_cols)))
y <- y[, y_keepcols, with=F]
# if not all ids match, issue a warning
if (!all(x[[by]] %in% y[[by]])) {
warning("not all rows of x had a match in y. some columns may contain NA")
}
# merge keeping all the values in x, making sure that the
# resulting data.table is sorted in the same order as the
# original object x
merged <- merge(x, y, by=by, allow.cartesian=allow.cartesian, all.x=T)
if (as_data_frame) {
# cast back to a data.frame if requested
merged <- data.frame(merged)
}
return(merged)
}
#' Add annotations to a GCT object
#'
#' @description Given a GCT object and either a \code{\link{data.frame}} or
#' a path to an annotation table, apply the annotations to the
#' gct using the given \code{keyfield}.
#'
#' @param g a GCT object
#' @param annot a \code{\link{data.frame}} or path to text table of annotations
#' @param dimension either 'row' or 'column' indicating which dimension
#' of \code{g} to annotate
#' @param keyfield the character name of the column in \code{annot} that
#' matches the row or column identifiers in \code{g}
#'
#' @return a GCT object with annotations applied to the specified
#' dimension
#'
#' @examples
#' \dontrun{
#' g <- parse.gctx('/path/to/gct/file')
#' g <- annotate.gct(g, '/path/to/annot')
#' }
#'
#' @family GCT utilities
#' @export
setGeneric("annotate.gct", function(g, annot, dimension="row", keyfield="id") {
standardGeneric("annotate.gct")
})
setMethod("annotate.gct", signature("GCT"),
function(g, annot, dimension, keyfield) {
if (!(any(class(annot) == "data.frame"))) {
# given a file path, try to read it in
annot <- fread(annot)
} else {
# convert to data.table
annot <- data.table(annot)
}
# convert the keyfield column to id for merging
# assumes the gct object has an id field in its existing annotations
if (!(keyfield %in% names(annot))) {
stop(paste("column", keyfield, "not found in annotations"))
}
# rename the column to id so we can do the merge
annot$id <- annot[[keyfield]]
if (dimension == "column") dimension <- "col"
if (dimension == "row") {
orig_id <- g@rdesc$id
merged <- merge_with_precedence(g@rdesc, annot, by="id", allow.cartesian=T,
as_data_frame=T)
idx <- match(orig_id, merged$id)
merged <- merged[idx, ]
g@rdesc <- merged
} else if (dimension == "col") {
orig_id <- g@cdesc$id
merged <- merge_with_precedence(g@cdesc, annot, by="id", allow.cartesian=T,
as_data_frame=T)
idx <- match(orig_id, merged$id)
merged <- merged[idx, ]
g@cdesc <- merged
} else {
stop("dimension must be either row or column")
}
return(g)
})
#' Transpose a GCT object
#'
#' @param g the \code{GCT} object
#'
#' @return a modified verion of the input \code{GCT} object
#' where the matrix has been transposed and the row and column
#' ids and annotations have been swapped.
#'
#' @examples
#' transpose.gct(ds)
#'
#' @family GCT utilties
#' @export
setGeneric("transpose.gct", function(g) {
standardGeneric("transpose.gct")
})
setMethod("transpose.gct", signature("GCT"), function(g) {
# transpose matrix
g@mat <- t(g@mat)
# create new data
rid.new <- g@cid
cid.new <- g@rid
rdesc.new <- g@cdesc
cdesc.new <- g@rdesc
# overwrite g
g@rid <- rid.new
g@cid <- cid.new
g@rdesc <- rdesc.new
g@cdesc <- cdesc.new
return(g)
})
#' Convert a GCT object's matrix to ranks
#'
#' @param g the \code{GCT} object to rank
#' @param dim the dimension along which to rank
#' (row or column)
#'
#' @return a modified version of \code{g}, with the
#' values in the matrix converted to ranks
#'
#' @examples
#' (ranked <- rank.gct(ds, dim="column"))
#' # scatter rank vs. score for a few columns
#' plot(ds@mat[, 1:3], ranked@mat[, 1:3],
#' xlab="score", ylab="rank")
#'
#' @family GCT utilities
#' @export
setGeneric("rank.gct", function(g, dim="row") {
standardGeneric("rank.gct")
})
setMethod("rank.gct", signature("GCT"), function(g, dim) {
# check to make sure dim is allowed
if (dim=="column") dim <- "col"
if (!(dim %in% c("row","col"))){
stop('Dim must be one of row, col')
}
# rank along the specified axis. transpose if ranking rows so that the data
# comes back in the correct format
if (dim == 'row'){
g@mat <- t(apply(g@mat, 1, function(x) rank(-1*x)))
} else {
g@mat <- (apply(g@mat, 2, function(x) rank(-1*x)))
}
# done
return(g)
})
| /R/utils.R | permissive | minghao2016/cmapR | R | false | false | 18,367 | r | #' Transform a GCT object in to a long form \code{\link{data.table}} (aka 'melt')
#'
#' @description Utilizes the \code{\link{data.table::melt}} function to transform the
#' matrix into long form. Optionally can include the row and column
#' annotations in the transformed \code{\link{data.table}}.
#'
#' @param g the GCT object
#' @param keep_rdesc boolean indicating whether to keep the row
#' descriptors in the final result
#' @param keep_cdesc boolean indicating whether to keep the column
#' descriptors in the final result
#' @param remove_symmetries boolean indicating whether to remove
#' the lower triangle of the matrix (only applies if \code{g@mat} is symmetric)
#' @param suffixes the character suffixes to be applied if there are
#' collisions between the names of the row and column descriptors
#'
#' @return a \code{\link{data.table}} object with the row and column ids and the matrix
#' values and (optinally) the row and column descriptors
#'
#' @examples
#' # simple melt, keeping both row and column meta
#' head(melt.gct(ds))
#'
#' # update row/colum suffixes to indicate rows are genes, columns experiments
#' head(melt.gct(ds, suffixes = c("_gene", "_experiment")))
#'
#' # ignore row/column meta
#' head(melt.gct(ds, keep_rdesc = F, keep_cdesc = F))
#'
#' @family GCT utilities
#' @export
setGeneric("melt.gct", function(g, suffixes=NULL, remove_symmetries=F,
keep_rdesc=T, keep_cdesc=T) {
standardGeneric("melt.gct")
})
setMethod("melt.gct", signature("GCT"),
function(g, suffixes, remove_symmetries, keep_rdesc, keep_cdesc) {
# melt a gct object's matrix into a data.frame and merge row and column
# annotations back in, using the provided suffixes
# assumes rdesc and cdesc data.frames both have an 'id' field.
# merges row and/or column annotations into the melted matrix as indicated by
# keep_rdesc and keep_cdesc, respectively.
# if remove_symmetries, will check whether matrix is symmetric
# and return only values corresponding to the upper triangle
# g@rdesc$id <- rownames(g@rdesc)
# g@cdesc$id <- rownames(g@cdesc)
# first, check if matrix is symmetric
# if it is, use only the upper triangle
message("melting GCT object...")
mat <- g@mat
if (remove_symmetries & isSymmetric(mat)) {
mat[upper.tri(mat, diag=F)] <- NA
}
mat <- data.table(mat)
mat$rid <- g@rid
d <- melt(mat, id.vars="rid")
setattr(d, "names", c("id.x", "id.y", "value"))
d$id.x <- as.character(d$id.x)
d$id.y <- as.character(d$id.y)
# standard data.frame subset here to comply with testthat
d <- d[!is.na(d$value), , with=F]
if (keep_rdesc & keep_cdesc) {
# merge back in both row and column descriptors
setattr(d, "names", c("id", "id.y", "value"))
d <- merge(d, data.table(g@rdesc), by="id")
setnames(d, "id", "id.x")
setnames(d, "id.y", "id")
setnames(d, "id", "id.y")
} else if (keep_rdesc) {
# keep only row descriptors
rdesc <- data.table(g@rdesc)
setnames(rdesc, "id", "id.x")
d <- merge(d, rdesc, by="id.x")
} else if (keep_cdesc) {
# keep only column descriptors
cdesc <- data.table(g@cdesc)
setnames(cdesc, "id", "id.y")
d <- merge(d, cdesc, by="id.y")
}
# use suffixes if provided
if (!is.null(suffixes) & length(suffixes) == 2) {
newnames <- gsub("\\.x", suffixes[1], names(d))
newnames <- gsub("\\.y", suffixes[2], newnames)
setattr(d, "names", newnames)
}
message("done")
return(d)
})
#' Check if x is a whole number
#'
#' @param x number to test
#' @param tol the allowed tolerance
#' @return boolean indicating whether x is tol away from a whole number value
#' @examples
#' is.wholenumber(1)
#' is.wholenumber(0.5)
#' @export
is.wholenumber <- function(x, tol = .Machine$double.eps^0.5) {
return(abs(x - round(x)) < tol)
}
#' Check whether \code{test_names} are columns in the \code{\link{data.frame}} df
#' @param test_names a vector of column names to test
#' @param df the \code{\link{data.frame}} to test against
#' @param throw_error boolean indicating whether to throw an error if
#' any \code{test_names} are not found in \code{df}
#' @return boolean indicating whether or not all \code{test_names} are
#' columns of \code{df}
#' @examples
#' check_colnames(c("pert_id", "pert_iname"), cdesc_char) # TRUE
#' check_colnames(c("pert_id", "foobar"), cdesc_char, throw_error=F) # FALSE, suppress error
#' @export
check_colnames <- function(test_names, df, throw_error=T) {
# check whether test_names are valid names in df
# throw error if specified
diffs <- setdiff(test_names, names(df))
if (length(diffs) > 0) {
if (throw_error) {
stop(paste("the following column names are not found in", deparse(substitute(df)), ":",
paste(diffs, collapse=" "), "\n"))
} else {
return(F)
}
} else {
return(T)
}
}
#' Do a robust \code{\link{data.frame}} subset to a set of ids
#' @param df \code{\link{data.frame}} to subset
#' @param ids the ids to subset to
#' @return a subset version of \code{df}
#' @keywords internal
subset_to_ids <- function(df, ids) {
# helper function to do a robust df subset
check_colnames("id", df)
newdf <- data.frame(df[match(ids, df$id), ])
names(newdf) <- names(df)
return(newdf)
}
#' Subset a gct object using the provided row and column ids
#'
#' @param g a gct object
#' @param rid a vector of character ids or integer indices for ROWS
#' @param cid a vector of character ids or integer indices for COLUMNS
#' @examples
#' # first 10 rows and columns by index
#' (a <- subset.gct(ds, rid=1:10, cid=1:10))
#'
#' # first 10 rows and columns using character ids
#' (b <- subset.gct(ds, rid=ds@rid[1:10], cid=ds@cid[1:10]))
#'
#' identical(a, b) # TRUE
#'
#' @family GCT utilities
#' @export
setGeneric("subset.gct", function(g, rid=NULL, cid=NULL) {
standardGeneric("subset.gct")
})
setMethod("subset.gct", signature("GCT"),
function(g, rid, cid) {
# ids can either be a vector of character strings corresponding
# to row / column ids in the gct object, or integer vectors
# corresponding to row / column indices
if (is.null(rid)) rid <- g@rid
if (is.null(cid)) cid <- g@cid
# see whether we were given characters or integers
# and handle accordingly
process_ids <- function(ids, ref_ids, param) {
# simple helper function to handle id/idx conversion
# for character or integer ids
if (is.character(ids)) {
idx <- which(ref_ids %in% ids)
} else if (all(is.wholenumber(ids))) {
idx <- ids
ids <- ref_ids[idx]
} else {
stop(paste(param, "must be character or ingeter"))
}
return(list(ids=ids, idx=idx))
}
processed_rid <- process_ids(rid, g@rid, "rid")
processed_cid <- process_ids(cid, g@cid, "cid")
rid <- processed_rid$ids
ridx <- processed_rid$idx
cid <- processed_cid$ids
cidx <- processed_cid$idx
sdrow <- setdiff(rid, g@rid)
sdcol <- setdiff(cid, g@cid)
if (length(sdrow) > 0) warning("the following rids were not found:\n", paste(sdrow, collapse="\n"))
if (length(sdcol) > 0) warning("the following cids were not found:\n", paste(sdcol, collapse="\n"))
newg <- g
# make sure ordering is right
rid <- g@rid[ridx]
cid <- g@cid[cidx]
newg@mat <- matrix(g@mat[ridx, cidx], nrow=length(rid), ncol=length(cid))
colnames(newg@mat) <- cid
rownames(newg@mat) <- rid
# cdesc <- data.frame(g@cdesc)
# rdesc <- data.frame(g@rdesc)
# make sure annotations row ordering matches
# matrix, rid, and cid
newg@cdesc <- subset_to_ids(g@cdesc, cid)
newg@rdesc <- subset_to_ids(g@rdesc, rid)
newg@rid <- rid
newg@cid <- cid
if (any(dim(newg@mat) == 0)) {
warning("one or more returned dimension is length 0
check that at least some of the provided rid and/or
cid values have matches in the GCT object supplied")
}
return(newg)
})
#' Merge two GCT objects together
#'
#' @param g1 the first GCT object
#' @param g2 the second GCT object
#' @param dimension the dimension on which to merge (row or column)
#' @param matrix_only boolean idicating whether to keep only the
#' data matrices from \code{g1} and \code{g2} and ignore their
#' row and column meta data
#' @examples
#' # take the first 10 and last 10 rows of an object
#' # and merge them back together
#' (a <- subset.gct(ds, rid=1:10))
#' (b <- subset.gct(ds, rid=969:978))
#' (merged <- merge.gct(a, b, dimension="row"))
#'
#' @family GCT utilities
#' @export
setGeneric("merge.gct", function(g1, g2, dimension="row", matrix_only=F) {
standardGeneric("merge.gct")
})
setMethod("merge.gct", signature("GCT", "GCT"),
function(g1, g2, dimension, matrix_only) {
# given two gcts objects g1 and g2, merge them
# on the specified dimension
if (dimension == "column") dimension <- "col"
if (dimension == "row") {
message("appending rows...")
newg <- g1
# we're just appending rows so don't need to do anything
# special with the rid or rdesc. just cat them
newg@rid <- c(g1@rid, g2@rid)
newg@rdesc <- data.frame(rbind(data.table(g1@rdesc), data.table(g2@rdesc), fill=T))
# need figure out the index for how to sort the columns of
# g2@mat so that they are in sync with g1@mat
idx <- match(g1@cid, g2@cid)
newg@mat <- rbind(g1@mat, g2@mat[, idx])
if (!matrix_only) {
# apply the same sort order to the rows of g2@cdesc so that
# it's in sync with the final merged matrix
# figure out which fields are common and keep from the first gct
cmn_names <- intersect(names(g1@cdesc), names(g2@cdesc))
newg@cdesc <- cbind(g1@cdesc, g2@cdesc[idx, !(names(g2@cdesc) %in% cmn_names)])
} else {
newg@cdesc <- data.frame()
}
}
else if (dimension == "col") {
message("appending columns...")
newg <- g1
# we're just appending columns so don't need to do anything
# special with cid or cdesc. just cat them
newg@cid <- c(g1@cid, g2@cid)
newg@cdesc <- data.frame(rbind(data.table(g1@cdesc), data.table(g2@cdesc), fill=T))
# need figure out the index for how to sort the rows of
# g2@mat so that they are in sync with g1@mat
idx <- match(g1@rid, g2@rid)
newg@mat <- cbind(g1@mat, g2@mat[idx, ])
if (!matrix_only) {
# apply the same sort order to the rows of g2@rdesc so that
# it's in sync with the final merged matrix
# figure out which fields are common and keep from the first gct
cmn_names <- intersect(names(g1@rdesc), names(g2@rdesc))
newg@rdesc <- cbind(g1@rdesc, g2@rdesc[idx, !(names(g2@rdesc) %in% cmn_names)])
} else {
newg@rdesc <- data.frame()
}
} else {
stop("dimension must be either row or col")
}
return(newg)
})
#' Merge two \code{\link{data.frame}}s, but where there are common fields
#' those in \code{x} are retained and those in \code{y} are dropped.
#'
#' @param x the \code{\link{data.frame}} whose columns take precedence
#' @param y another \code{\link{data.frame}}
#' @param by a vector of column names to merge on
#' @param allow.cartesian boolean indicating whether it's ok
#' for repeated values in either table to merge with each other
#' over and over again.
#' @param as_data_frame boolean indicating whether to ensure
#' the returned object is a \code{\link{data.frame}} instead of a \code{\link{data.table}}.
#' This ensures compatibility with GCT object conventions,
#' that is, the \code{\link{rdesc}} and \code{\link{cdesc}} slots must be strictly
#' \code{\link{data.frame}} objects.
#'
#' @return a \code{\link{data.frame}} or \code{\link{data.table}} object
#'
#' @examples
#' (x <- data.table(foo=letters[1:10], bar=1:10))
#' (y <- data.table(foo=letters[1:10], bar=11:20, baz=LETTERS[1:10]))
#' # the 'bar' column from y will be dropped on merge
#' merge_with_precedence(x, y, by="foo")
#'
#' @keywords internal
#' @seealso data.table::merge
merge_with_precedence <- function(x, y, by, allow.cartesian=T,
as_data_frame = T) {
trash <- check_colnames(by, x)
trash <- check_colnames(by, y)
# cast as data.tables
x <- data.table(x)
y <- data.table(y)
# get rid of row names
setattr(x, "rownames", NULL)
setattr(y, "rownames", NULL)
common_cols <- intersect(names(x), names(y))
y_keepcols <- unique(c(by, setdiff(names(y), common_cols)))
y <- y[, y_keepcols, with=F]
# if not all ids match, issue a warning
if (!all(x[[by]] %in% y[[by]])) {
warning("not all rows of x had a match in y. some columns may contain NA")
}
# merge keeping all the values in x, making sure that the
# resulting data.table is sorted in the same order as the
# original object x
merged <- merge(x, y, by=by, allow.cartesian=allow.cartesian, all.x=T)
if (as_data_frame) {
# cast back to a data.frame if requested
merged <- data.frame(merged)
}
return(merged)
}
#' Add annotations to a GCT object
#'
#' @description Given a GCT object and either a \code{\link{data.frame}} or
#' a path to an annotation table, apply the annotations to the
#' gct using the given \code{keyfield}.
#'
#' @param g a GCT object
#' @param annot a \code{\link{data.frame}} or path to text table of annotations
#' @param dimension either 'row' or 'column' indicating which dimension
#' of \code{g} to annotate
#' @param keyfield the character name of the column in \code{annot} that
#' matches the row or column identifiers in \code{g}
#'
#' @return a GCT object with annotations applied to the specified
#' dimension
#'
#' @examples
#' \dontrun{
#' g <- parse.gctx('/path/to/gct/file')
#' g <- annotate.gct(g, '/path/to/annot')
#' }
#'
#' @family GCT utilities
#' @export
setGeneric("annotate.gct", function(g, annot, dimension="row", keyfield="id") {
standardGeneric("annotate.gct")
})
setMethod("annotate.gct", signature("GCT"),
function(g, annot, dimension, keyfield) {
if (!(any(class(annot) == "data.frame"))) {
# given a file path, try to read it in
annot <- fread(annot)
} else {
# convert to data.table
annot <- data.table(annot)
}
# convert the keyfield column to id for merging
# assumes the gct object has an id field in its existing annotations
if (!(keyfield %in% names(annot))) {
stop(paste("column", keyfield, "not found in annotations"))
}
# rename the column to id so we can do the merge
annot$id <- annot[[keyfield]]
if (dimension == "column") dimension <- "col"
if (dimension == "row") {
orig_id <- g@rdesc$id
merged <- merge_with_precedence(g@rdesc, annot, by="id", allow.cartesian=T,
as_data_frame=T)
idx <- match(orig_id, merged$id)
merged <- merged[idx, ]
g@rdesc <- merged
} else if (dimension == "col") {
orig_id <- g@cdesc$id
merged <- merge_with_precedence(g@cdesc, annot, by="id", allow.cartesian=T,
as_data_frame=T)
idx <- match(orig_id, merged$id)
merged <- merged[idx, ]
g@cdesc <- merged
} else {
stop("dimension must be either row or column")
}
return(g)
})
#' Transpose a GCT object
#'
#' @param g the \code{GCT} object
#'
#' @return a modified verion of the input \code{GCT} object
#' where the matrix has been transposed and the row and column
#' ids and annotations have been swapped.
#'
#' @examples
#' transpose.gct(ds)
#'
#' @family GCT utilties
#' @export
setGeneric("transpose.gct", function(g) {
standardGeneric("transpose.gct")
})
setMethod("transpose.gct", signature("GCT"), function(g) {
# transpose matrix
g@mat <- t(g@mat)
# create new data
rid.new <- g@cid
cid.new <- g@rid
rdesc.new <- g@cdesc
cdesc.new <- g@rdesc
# overwrite g
g@rid <- rid.new
g@cid <- cid.new
g@rdesc <- rdesc.new
g@cdesc <- cdesc.new
return(g)
})
#' Convert a GCT object's matrix to ranks
#'
#' @param g the \code{GCT} object to rank
#' @param dim the dimension along which to rank
#' (row or column)
#'
#' @return a modified version of \code{g}, with the
#' values in the matrix converted to ranks
#'
#' @examples
#' (ranked <- rank.gct(ds, dim="column"))
#' # scatter rank vs. score for a few columns
#' plot(ds@mat[, 1:3], ranked@mat[, 1:3],
#' xlab="score", ylab="rank")
#'
#' @family GCT utilities
#' @export
setGeneric("rank.gct", function(g, dim="row") {
standardGeneric("rank.gct")
})
setMethod("rank.gct", signature("GCT"), function(g, dim) {
# check to make sure dim is allowed
if (dim=="column") dim <- "col"
if (!(dim %in% c("row","col"))){
stop('Dim must be one of row, col')
}
# rank along the specified axis. transpose if ranking rows so that the data
# comes back in the correct format
if (dim == 'row'){
g@mat <- t(apply(g@mat, 1, function(x) rank(-1*x)))
} else {
g@mat <- (apply(g@mat, 2, function(x) rank(-1*x)))
}
# done
return(g)
})
|
#' Supporting function
#'
#' @importFrom stats toeplitz
acfpacf.pacf <-
function(x,m){
nx=length(x)
r<-acfpacf.acf(x,1)
rho=r$acf
rho=as.matrix(rho)
pr<-matrix(0,1,m)
pr[1]=rho[2]
for (k in 2:m)
{pmat=toeplitz(rho[1:k])
rhovec=rho[2:(k+1)]
phi=qr.solve(pmat)%*%rhovec
pr[k]=phi[k] }
pacf=pr
result = list(pacf=pacf)
class(result) = "acfpacf.pacf"
result
}
| /perARMA/R/acfpacf.pacf.R | no_license | ingted/R-Examples | R | false | false | 455 | r | #' Supporting function
#'
#' @importFrom stats toeplitz
acfpacf.pacf <-
function(x,m){
nx=length(x)
r<-acfpacf.acf(x,1)
rho=r$acf
rho=as.matrix(rho)
pr<-matrix(0,1,m)
pr[1]=rho[2]
for (k in 2:m)
{pmat=toeplitz(rho[1:k])
rhovec=rho[2:(k+1)]
phi=qr.solve(pmat)%*%rhovec
pr[k]=phi[k] }
pacf=pr
result = list(pacf=pacf)
class(result) = "acfpacf.pacf"
result
}
|
load("evan_enrichments_new.Rdata")
source("go_enrichments.R")
genes$stable_id = genes$human_protein
for (iea in c('incl_iea')) {
for (list in c('list7','list8','list9')) {
print(list)
if (iea == 'incl_iea') {
go.data = get("go.hs.iea")
} else {
go.data = get("go.hs")
}
data = read.csv(paste("evan_",list,".txt",sep=""),header=F)
names(data) = 'name'
data$name = gsub("(ENSG.*)_(ENST.*)","\\1",data$name)
merge.names = merge(genes,data,by='name')
merge.ensg = merge(genes,data,by.x='human_gene',by.y='name')
merged = rbind(merge.names,merge.ensg)
print(paste("before",nrow(data),"after",nrow(merged)))
tbl = get.enrich.by.subset(
subset=merged$stable_id,
all=genes$stable_id,
go.df=go.data,
go.field.name='stable_id'
)
write.csv(tbl,file=paste("evan_enrich_",list,"_",iea,".csv",sep=""),row.names=F)
}
} | /projects/2xmammals/evan_enrichments.R | no_license | gjuggler/greg-ensembl | R | false | false | 908 | r | load("evan_enrichments_new.Rdata")
source("go_enrichments.R")
genes$stable_id = genes$human_protein
for (iea in c('incl_iea')) {
for (list in c('list7','list8','list9')) {
print(list)
if (iea == 'incl_iea') {
go.data = get("go.hs.iea")
} else {
go.data = get("go.hs")
}
data = read.csv(paste("evan_",list,".txt",sep=""),header=F)
names(data) = 'name'
data$name = gsub("(ENSG.*)_(ENST.*)","\\1",data$name)
merge.names = merge(genes,data,by='name')
merge.ensg = merge(genes,data,by.x='human_gene',by.y='name')
merged = rbind(merge.names,merge.ensg)
print(paste("before",nrow(data),"after",nrow(merged)))
tbl = get.enrich.by.subset(
subset=merged$stable_id,
all=genes$stable_id,
go.df=go.data,
go.field.name='stable_id'
)
write.csv(tbl,file=paste("evan_enrich_",list,"_",iea,".csv",sep=""),row.names=F)
}
} |
library(RNetCDF)
library(stringr)
fillMissVar=function(myMatrix){
naLoc=which(is.na(myMatrix[,,1]),arr.ind=T)
for (i in 1:nrow(naLoc)){
longi_index=naLoc[i,1]
lati_index=naLoc[i,2]
level_search=min(which(!is.na(myMatrix[longi_index,lati_index,])))
myMatrix[longi_index,lati_index,1]=myMatrix[longi_index,lati_index,level_search]
}
surf_m=myMatrix[,,1]
return(as.vector(surf_m))
}
CDFtoMatrix=function(fileNameCDF){
nc = open.nc(fileNameCDF)
long_coord=var.get.nc(nc,"longitude")
lat_coord=var.get.nc(nc,"latitude")
temp=var.get.nc(nc,"t")
surf_temp=fillMissVar(temp)
humid=var.get.nc(nc,"rh")
surf_humid=fillMissVar(humid)
final_m=cbind(rep(long_coord,length(lat_coord)),rep(lat_coord,each=length(long_coord)),surf_temp,surf_humid)
colnames(final_m)=c("longitude","latitude","Temperature","RelativeHumidity")
return(final_m)
}
#download files
dir.create("Cache")
dir.create("Cache/CDFfiles")
urlList=read.table("NetCDF_urlList",stringsAsFactors=FALSE)
urlList=cbind(urlList,str_extract(urlList[,1],"[0-9]+.SUB.nc"))
colnames(urlList)=c("URLs","fileNames")
for(i in 1:nrow(urlList)){
if(urlList$fileNames[i] %in% list.files("Cache/CDFfiles/")){
print("Target file already exists, moving to the next file")
}else{
download.file(urlList$URLs[i],paste0("Cache/CDFfiles/",urlList$fileNames[i]))
}
}
#process files and combine to final dataframe
fileNameList=sort(paste0("Cache/CDFfiles/",urlList$fileNames))
dateList=str_extract(fileNameList,"[0-9]{6}")
finalData=do.call(rbind,lapply(fileNameList,CDFtoMatrix))
finalData=as.data.frame(finalData)
finalData=cbind(rep(dateList,each=4896L),finalData)
colnames(finalData)[1]="RecordDate"
save(finalData,file="finalData.rda")
| /CDFprocessing.R | no_license | flycat1989/RFinalProject | R | false | false | 1,731 | r | library(RNetCDF)
library(stringr)
fillMissVar=function(myMatrix){
naLoc=which(is.na(myMatrix[,,1]),arr.ind=T)
for (i in 1:nrow(naLoc)){
longi_index=naLoc[i,1]
lati_index=naLoc[i,2]
level_search=min(which(!is.na(myMatrix[longi_index,lati_index,])))
myMatrix[longi_index,lati_index,1]=myMatrix[longi_index,lati_index,level_search]
}
surf_m=myMatrix[,,1]
return(as.vector(surf_m))
}
CDFtoMatrix=function(fileNameCDF){
nc = open.nc(fileNameCDF)
long_coord=var.get.nc(nc,"longitude")
lat_coord=var.get.nc(nc,"latitude")
temp=var.get.nc(nc,"t")
surf_temp=fillMissVar(temp)
humid=var.get.nc(nc,"rh")
surf_humid=fillMissVar(humid)
final_m=cbind(rep(long_coord,length(lat_coord)),rep(lat_coord,each=length(long_coord)),surf_temp,surf_humid)
colnames(final_m)=c("longitude","latitude","Temperature","RelativeHumidity")
return(final_m)
}
#download files
dir.create("Cache")
dir.create("Cache/CDFfiles")
urlList=read.table("NetCDF_urlList",stringsAsFactors=FALSE)
urlList=cbind(urlList,str_extract(urlList[,1],"[0-9]+.SUB.nc"))
colnames(urlList)=c("URLs","fileNames")
for(i in 1:nrow(urlList)){
if(urlList$fileNames[i] %in% list.files("Cache/CDFfiles/")){
print("Target file already exists, moving to the next file")
}else{
download.file(urlList$URLs[i],paste0("Cache/CDFfiles/",urlList$fileNames[i]))
}
}
#process files and combine to final dataframe
fileNameList=sort(paste0("Cache/CDFfiles/",urlList$fileNames))
dateList=str_extract(fileNameList,"[0-9]{6}")
finalData=do.call(rbind,lapply(fileNameList,CDFtoMatrix))
finalData=as.data.frame(finalData)
finalData=cbind(rep(dateList,each=4896L),finalData)
colnames(finalData)[1]="RecordDate"
save(finalData,file="finalData.rda")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/graph-conversion.R
\name{dodgr_to_tidygraph}
\alias{dodgr_to_tidygraph}
\title{dodgr_to_tidygraph}
\usage{
dodgr_to_tidygraph(graph)
}
\arguments{
\item{graph}{A \code{dodgr} graph}
}
\value{
The \code{tidygraph} equivalent of the input
}
\description{
Convert a \code{dodgr} graph to an \pkg{tidygraph}.
}
\examples{
graph <- weight_streetnet (hampi)
grapht <- dodgr_to_tidygraph (graph)
}
| /fuzzedpackages/dodgr/man/dodgr_to_tidygraph.Rd | no_license | akhikolla/testpackages | R | false | true | 469 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/graph-conversion.R
\name{dodgr_to_tidygraph}
\alias{dodgr_to_tidygraph}
\title{dodgr_to_tidygraph}
\usage{
dodgr_to_tidygraph(graph)
}
\arguments{
\item{graph}{A \code{dodgr} graph}
}
\value{
The \code{tidygraph} equivalent of the input
}
\description{
Convert a \code{dodgr} graph to an \pkg{tidygraph}.
}
\examples{
graph <- weight_streetnet (hampi)
grapht <- dodgr_to_tidygraph (graph)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/exposure-package.R
\name{\%>\%}
\alias{\%>\%}
\title{Pipe operator}
\usage{
lhs \%>\% rhs
}
\description{
Pipe operator
}
\keyword{internal}
| /man/pipe.Rd | no_license | ATFutures/exposure | R | false | true | 219 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/exposure-package.R
\name{\%>\%}
\alias{\%>\%}
\title{Pipe operator}
\usage{
lhs \%>\% rhs
}
\description{
Pipe operator
}
\keyword{internal}
|
\name{controlled}
\alias{controlled}
\title{controlled}
\usage{
controlled(gate,n,cQubits,tQubit)
}
\arguments{
\item{gate}{ single qubit gate to create controlled version of }
\item{n}{ Number of qubits in the ket, including target, control, and all other qubits}
\item{cQubits}{Vector of qubit indices. There can be between 1 and n-1 control qubits, where n is the number of qubits in the ket. Qubits are indexed from 0, starting at the most significant qubit}
\item{tQubit}{Index of the target qubit. Qubits are indexed from 0, starting at the most significant qubit}
}
\value{
A matrix representing the operation of a controlled qubit gate on any subset of the input ket
}
\description{
Creates a matrix representing a controlled gate on a system of qubits. The target and control qubits can be located at arbitrary positions.
}
\examples{
controlled(X(),n=2,cQubits=0,tQubit=1)
controlled(X(),n=4,cQubits=c(0,1,2),tQubit=3)
}
| /man/controlled.Rd | no_license | cran/QuantumOps | R | false | false | 949 | rd | \name{controlled}
\alias{controlled}
\title{controlled}
\usage{
controlled(gate,n,cQubits,tQubit)
}
\arguments{
\item{gate}{ single qubit gate to create controlled version of }
\item{n}{ Number of qubits in the ket, including target, control, and all other qubits}
\item{cQubits}{Vector of qubit indices. There can be between 1 and n-1 control qubits, where n is the number of qubits in the ket. Qubits are indexed from 0, starting at the most significant qubit}
\item{tQubit}{Index of the target qubit. Qubits are indexed from 0, starting at the most significant qubit}
}
\value{
A matrix representing the operation of a controlled qubit gate on any subset of the input ket
}
\description{
Creates a matrix representing a controlled gate on a system of qubits. The target and control qubits can be located at arbitrary positions.
}
\examples{
controlled(X(),n=2,cQubits=0,tQubit=1)
controlled(X(),n=4,cQubits=c(0,1,2),tQubit=3)
}
|
source('util.R')
sigEpsilon <- function(epsilon) {
function(val) {
if (val > epsilon) {
1
} else if (val < -epsilon) {
-1
} else {
0
}
}
}
set.seed(6)
n <- 10
x <- runif(n, min=3, max=6)
d <- diff(x)
ggplot(data.frame(i = 1:n, x=x), aes(i, x)) +
geom_line() +
geom_point() +
ylab(expression(f[i])) +
scale_x_continuous(breaks=seq(1, n, by=1)) +
theme_minimal()
ggsave('picExample-f.pdf', width=width3, height=height3)
ggplot(data.frame(i=1:(n-1), x=sapply(d, sigEpsilon(0.5))), aes(i, x)) +
geom_segment(aes(i, x, xend=i, yend=x, y=0)) +
geom_point() +
ylab(expression(b[i])) +
scale_x_continuous(breaks=seq(1, n-1, by=1)) +
scale_y_continuous(breaks=c(-1, 0, 1)) +
theme_minimal()
ggsave('picExample-eps05.pdf', width=width3, height=height3)
ggplot(data.frame(i=1:(n-1), x=sapply(d, sigEpsilon(0.05))), aes(i, x)) +
geom_segment(aes(i, x, xend=i, yend=x, y=0)) +
geom_point() +
ylab(expression(b[i])) +
scale_x_continuous(breaks=seq(1, n-1, by=1)) +
scale_y_continuous(breaks=c(-1, 0, 1)) +
theme_minimal()
ggsave('picExample-eps005.pdf', width=width3, height=height3)
| /picExample.R | permissive | bgraf/mt-scripts | R | false | false | 1,154 | r | source('util.R')
sigEpsilon <- function(epsilon) {
function(val) {
if (val > epsilon) {
1
} else if (val < -epsilon) {
-1
} else {
0
}
}
}
set.seed(6)
n <- 10
x <- runif(n, min=3, max=6)
d <- diff(x)
ggplot(data.frame(i = 1:n, x=x), aes(i, x)) +
geom_line() +
geom_point() +
ylab(expression(f[i])) +
scale_x_continuous(breaks=seq(1, n, by=1)) +
theme_minimal()
ggsave('picExample-f.pdf', width=width3, height=height3)
ggplot(data.frame(i=1:(n-1), x=sapply(d, sigEpsilon(0.5))), aes(i, x)) +
geom_segment(aes(i, x, xend=i, yend=x, y=0)) +
geom_point() +
ylab(expression(b[i])) +
scale_x_continuous(breaks=seq(1, n-1, by=1)) +
scale_y_continuous(breaks=c(-1, 0, 1)) +
theme_minimal()
ggsave('picExample-eps05.pdf', width=width3, height=height3)
ggplot(data.frame(i=1:(n-1), x=sapply(d, sigEpsilon(0.05))), aes(i, x)) +
geom_segment(aes(i, x, xend=i, yend=x, y=0)) +
geom_point() +
ylab(expression(b[i])) +
scale_x_continuous(breaks=seq(1, n-1, by=1)) +
scale_y_continuous(breaks=c(-1, 0, 1)) +
theme_minimal()
ggsave('picExample-eps005.pdf', width=width3, height=height3)
|
#' Plot network, highlighting communities
#'
#' In a social network, community-detection algorithms can be used to find
#' cliques that are more strongly connected to each other than to the rest
#' of the network. This generates a plot similar to sma_plot(), except that
#' the largest communities in the network are highlighted.
#'
#' @param g An igraph graph object.
#' @param c An igraph communities object.
#' @param n The number of mayors to find. By default, returns mayors for the
#' largest n communities.
#' @param layout Results of an igraph layout routine.
#' (layout.fruchterman.reingold is called if nothing is passed). Because these
#' calculations can be time consuming, it makes sense to obtain the layout in
#' advance if you'll be making several versions of the same plot.
#' @param showMayors If true, highlight the location of mayors for the
#' communities shown.
#' @param extra Additional communities that should be highlighted.
#'
#' @examples
#' library(dplyr)
#' library(igraph)
#' g <- sample_tweets %>%
#' ws_to_graph() %>%
#' graph_lcc()
#' fg <- g %>%
#' as.undirected() %>%
#' simplify() %>%
#' fastgreedy.community()
#' community_plot(g,fg)
#'
#' @import ggplot2
#' @import RColorBrewer
#' @import igraph
#' @export
community_plot <- function(g,c,n=8,layout=NULL,showMayors=TRUE,extra=NULL) {
# plot a graph g using a community breakdown c
# color in the first n communities
if (is.null(layout)) {
layout <- layout.fruchterman.reingold(g)
}
if (max(membership(c)) < n) {
n <- max(membership(c))
}
t_all <- membership(c) %>% table() %>% as.data.frame()
t_all <- t_all[order(t_all$Freq,decreasing=TRUE),]
t <- head(t_all,n)
for (i in 1:length(extra)) {
t <- rbind(t,t_all[t_all$.==extra[i],])
}
col_n <- brewer.pal(nrow(t),'Dark2')
V(g)$shape <- "none"
V(g)$size <- 0
V(g)$color <- "gray"
source_nodes <- tail_of(g,E(g))
target_nodes <- head_of(g,E(g))
source_m <- membership(c)[source_nodes]
target_m <- membership(c)[target_nodes]
both_m <- rep(-1,length(E(g)))
both_m[source_m==target_m] <- source_m[source_m==target_m]
edge_colors <- rep("gray",length(E(g)))
for (i in 1:nrow(t)) {
edge_colors[both_m==t[i,1]] <- col_n[i]
}
E(g)$color <- edge_colors
if (showMayors) {
m <- mayors(g,c,n=n,extra=extra)
for (i in 1:length(m)) {
x <- m[i]
V(g)[x]$shape <- "circle"
V(g)[x]$size <- 4
V(g)[x]$color <- col_n[i]
}
}
plot(g,
layout=layout,
vertex.label=NA,
vertex.shape="none",
vertex.size=0,
edge.arrow.mode=0,
edge.width=1)
# plot again, hiding the gray edges so that the colored ones are visible
E(g)[E(g)$color=="gray"]$color <- NA
plot(g,
layout=layout,
add=TRUE,
vertex.label=NA,
edge.arrow.mode=0,
edge.width=1)
}
# g_layout <- layout.fruchterman.reingold(g)
# community_plot(g,fg,layout=g_layout)
| /R/community_plot.R | no_license | ccjolley/cRimson | R | false | false | 2,944 | r | #' Plot network, highlighting communities
#'
#' In a social network, community-detection algorithms can be used to find
#' cliques that are more strongly connected to each other than to the rest
#' of the network. This generates a plot similar to sma_plot(), except that
#' the largest communities in the network are highlighted.
#'
#' @param g An igraph graph object.
#' @param c An igraph communities object.
#' @param n The number of mayors to find. By default, returns mayors for the
#' largest n communities.
#' @param layout Results of an igraph layout routine.
#' (layout.fruchterman.reingold is called if nothing is passed). Because these
#' calculations can be time consuming, it makes sense to obtain the layout in
#' advance if you'll be making several versions of the same plot.
#' @param showMayors If true, highlight the location of mayors for the
#' communities shown.
#' @param extra Additional communities that should be highlighted.
#'
#' @examples
#' library(dplyr)
#' library(igraph)
#' g <- sample_tweets %>%
#' ws_to_graph() %>%
#' graph_lcc()
#' fg <- g %>%
#' as.undirected() %>%
#' simplify() %>%
#' fastgreedy.community()
#' community_plot(g,fg)
#'
#' @import ggplot2
#' @import RColorBrewer
#' @import igraph
#' @export
community_plot <- function(g,c,n=8,layout=NULL,showMayors=TRUE,extra=NULL) {
# plot a graph g using a community breakdown c
# color in the first n communities
if (is.null(layout)) {
layout <- layout.fruchterman.reingold(g)
}
if (max(membership(c)) < n) {
n <- max(membership(c))
}
t_all <- membership(c) %>% table() %>% as.data.frame()
t_all <- t_all[order(t_all$Freq,decreasing=TRUE),]
t <- head(t_all,n)
for (i in 1:length(extra)) {
t <- rbind(t,t_all[t_all$.==extra[i],])
}
col_n <- brewer.pal(nrow(t),'Dark2')
V(g)$shape <- "none"
V(g)$size <- 0
V(g)$color <- "gray"
source_nodes <- tail_of(g,E(g))
target_nodes <- head_of(g,E(g))
source_m <- membership(c)[source_nodes]
target_m <- membership(c)[target_nodes]
both_m <- rep(-1,length(E(g)))
both_m[source_m==target_m] <- source_m[source_m==target_m]
edge_colors <- rep("gray",length(E(g)))
for (i in 1:nrow(t)) {
edge_colors[both_m==t[i,1]] <- col_n[i]
}
E(g)$color <- edge_colors
if (showMayors) {
m <- mayors(g,c,n=n,extra=extra)
for (i in 1:length(m)) {
x <- m[i]
V(g)[x]$shape <- "circle"
V(g)[x]$size <- 4
V(g)[x]$color <- col_n[i]
}
}
plot(g,
layout=layout,
vertex.label=NA,
vertex.shape="none",
vertex.size=0,
edge.arrow.mode=0,
edge.width=1)
# plot again, hiding the gray edges so that the colored ones are visible
E(g)[E(g)$color=="gray"]$color <- NA
plot(g,
layout=layout,
add=TRUE,
vertex.label=NA,
edge.arrow.mode=0,
edge.width=1)
}
# g_layout <- layout.fruchterman.reingold(g)
# community_plot(g,fg,layout=g_layout)
|
# Clean all variables
rm(list=ls())
# Set working directory
setwd("~/Documentos/Master/courses-master/03_GettingData/project")
# Read and gather test data
subjectTest <- read.table(file = "UCI HAR Dataset/test/subject_test.txt")
activityTest <- read.table(file = "UCI HAR Dataset/test/y_test.txt")
dataTest <- read.table(file = "UCI HAR Dataset/test/X_test.txt")
dfTest = data.frame(subjectTest, activityTest, dataTest)
# Read and gather training data
subjectTrain <- read.table(file = "UCI HAR Dataset/train/subject_train.txt")
activityTrain <- read.table(file = "UCI HAR Dataset/train/y_train.txt")
dataTrain <- read.table(file = "UCI HAR Dataset/train/X_train.txt")
dfTrain = data.frame(subjectTrain, activityTrain, dataTrain)
###########################################################################################
#### 1.- Merges the training and the test sets to create one data set.
###########################################################################################
df <- rbind(dfTest, dfTrain)
###########################################################################################
### 2.- Extracts only the measurements on the mean and standard deviation for each measurement.
###########################################################################################
# get a table displaying the all the features
AllFeatures <- read.table(file = "UCI HAR Dataset/features.txt")
# select the measurements on the mean and standard deviation for each measurement.
myFeatures <- which(grepl("mean|std", x = as.character(AllFeatures$V2)))
# get the dataframe with the desired columns
myDataFrame <- df[, c(1, 2, myFeatures)]
###########################################################################################
### 3.- Uses descriptive activity names to name the activities in the data set
###########################################################################################
# get the activity labels
allActivities <- read.table(file = "UCI HAR Dataset/activity_labels.txt")
activityName = as.character(allActivities$V2)
# in the raw data the activity is coded with a number and this column is numeric.
# First convert this column in a factor
myDataFrame[ ,2] <- factor(myDataFrame[ ,2])
# and then relabel the levels of the factor
levels(myDataFrame[ ,2])<-activityName
###########################################################################################
#### 4.- Appropriately labels the data set with descriptive variable names.
###########################################################################################
# chose the right labelsfor the selected features
measuramentNames <- as.character(AllFeatures[myFeatures, 2])
# set the column names
myNames<- c("Subject", "Activity", measuramentNames)
colnames(myDataFrame) <- myNames
###########################################################################################
##### 5.- From the data set in step 4, creates a second, independent tidy data set with
##### the average of each variable for each activity and each subject
###########################################################################################
write.table(myDataFrame, file = "./tidyData.txt", row.names = FALSE)
| /run_analysis.R | no_license | marcosmarva/GettingCleaningData-Coursera | R | false | false | 3,217 | r | # Clean all variables
rm(list=ls())
# Set working directory
setwd("~/Documentos/Master/courses-master/03_GettingData/project")
# Read and gather test data
subjectTest <- read.table(file = "UCI HAR Dataset/test/subject_test.txt")
activityTest <- read.table(file = "UCI HAR Dataset/test/y_test.txt")
dataTest <- read.table(file = "UCI HAR Dataset/test/X_test.txt")
dfTest = data.frame(subjectTest, activityTest, dataTest)
# Read and gather training data
subjectTrain <- read.table(file = "UCI HAR Dataset/train/subject_train.txt")
activityTrain <- read.table(file = "UCI HAR Dataset/train/y_train.txt")
dataTrain <- read.table(file = "UCI HAR Dataset/train/X_train.txt")
dfTrain = data.frame(subjectTrain, activityTrain, dataTrain)
###########################################################################################
#### 1.- Merges the training and the test sets to create one data set.
###########################################################################################
df <- rbind(dfTest, dfTrain)
###########################################################################################
### 2.- Extracts only the measurements on the mean and standard deviation for each measurement.
###########################################################################################
# get a table displaying the all the features
AllFeatures <- read.table(file = "UCI HAR Dataset/features.txt")
# select the measurements on the mean and standard deviation for each measurement.
myFeatures <- which(grepl("mean|std", x = as.character(AllFeatures$V2)))
# get the dataframe with the desired columns
myDataFrame <- df[, c(1, 2, myFeatures)]
###########################################################################################
### 3.- Uses descriptive activity names to name the activities in the data set
###########################################################################################
# get the activity labels
allActivities <- read.table(file = "UCI HAR Dataset/activity_labels.txt")
activityName = as.character(allActivities$V2)
# in the raw data the activity is coded with a number and this column is numeric.
# First convert this column in a factor
myDataFrame[ ,2] <- factor(myDataFrame[ ,2])
# and then relabel the levels of the factor
levels(myDataFrame[ ,2])<-activityName
###########################################################################################
#### 4.- Appropriately labels the data set with descriptive variable names.
###########################################################################################
# chose the right labelsfor the selected features
measuramentNames <- as.character(AllFeatures[myFeatures, 2])
# set the column names
myNames<- c("Subject", "Activity", measuramentNames)
colnames(myDataFrame) <- myNames
###########################################################################################
##### 5.- From the data set in step 4, creates a second, independent tidy data set with
##### the average of each variable for each activity and each subject
###########################################################################################
write.table(myDataFrame, file = "./tidyData.txt", row.names = FALSE)
|
# A plotting R script produced by the REVIGO server at http://revigo.irb.hr/
# If you found REVIGO useful in your work, please cite the following reference:
# Supek F et al. "REVIGO summarizes and visualizes long lists of Gene Ontology
# terms" PLoS ONE 2011. doi:10.1371/journal.pone.0021800
# --------------------------------------------------------------------------
# If you don't have the ggplot2 package installed, uncomment the following line:
# install.packages( "ggplot2" );
library( ggplot2 );
# --------------------------------------------------------------------------
# If you don't have the scales package installed, uncomment the following line:
# install.packages( "scales" );
library( scales );
# --------------------------------------------------------------------------
# Here is your data from REVIGO. Scroll down for plot configuration options.
revigo.names <- c("term_ID","description","frequency_%","plot_X","plot_Y","plot_size","value","uniqueness","dispensability");
revigo.data <- rbind(c("GO:0005793","endoplasmic reticulum-Golgi intermediate compartment",0.691,-6.075,3.125,2.117,-3.964,0.941,0.000),
c("GO:0044450","(obsolete) microtubule organizing center part",0.501,-6.566,-0.206,1.924,-1.371,1.000,0.000),
c("GO:0044451","(obsolete) nucleoplasm part",0.501,-6.219,-2.485,1.924,-0.544,1.000,0.000),
c("GO:0031089","platelet dense granule lumen",0.074,6.310,0.196,1.176,-3.228,0.710,0.028),
c("GO:0005814","centriole",0.755,-1.432,5.907,2.155,-1.595,0.838,0.029),
c("GO:0033116","endoplasmic reticulum-Golgi intermediate compartment membrane",0.399,-0.223,-0.304,1.881,-3.606,0.887,0.040),
c("GO:0048471","perinuclear region of cytoplasm",3.813,-5.177,1.067,2.856,-0.589,0.953,0.042),
c("GO:1990454","L-type voltage-gated calcium channel complex",0.064,-3.622,-3.881,1.114,-1.743,0.833,0.050),
c("GO:0044666","MLL3/4 complex",0.069,0.627,-5.983,1.146,-1.549,0.750,0.120),
c("GO:0043202","lysosomal lumen",0.510,4.703,-6.010,1.987,-0.697,0.870,0.144),
c("GO:0000781","chromosome, telomeric region",0.787,0.456,7.136,2.173,-0.613,0.915,0.187),
c("GO:0030134","COPII-coated ER to Golgi transport vesicle",0.495,5.679,3.414,1.973,-1.788,0.772,0.287),
c("GO:0005798","Golgi-associated vesicle",0.489,6.215,3.306,1.968,-0.853,0.791,0.338),
c("GO:0030173","integral component of Golgi membrane",0.324,2.973,-1.390,1.792,-1.004,0.807,0.342),
c("GO:0005686","U2 snRNP",0.106,-0.881,-6.349,1.322,-1.274,0.758,0.361),
c("GO:0030135","coated vesicle",1.574,6.418,2.757,2.473,-0.772,0.768,0.379),
c("GO:0005796","Golgi lumen",0.532,5.402,-3.095,2.004,-0.647,0.828,0.409),
c("GO:0042827","platelet dense granule",0.112,6.531,1.288,1.342,-2.870,0.748,0.423),
c("GO:0097431","mitotic spindle pole",0.181,-2.165,5.769,1.544,-1.200,0.833,0.446),
c("GO:0005871","kinesin complex",0.266,-2.069,4.055,1.708,-0.913,0.778,0.462),
c("GO:0005689","U12-type spliceosomal complex",0.154,-1.276,-6.196,1.477,-1.168,0.750,0.480),
c("GO:0035579","specific granule membrane",0.484,5.067,1.100,1.964,-0.675,0.678,0.488),
c("GO:0005819","spindle",2.026,-1.343,6.221,2.582,-0.414,0.825,0.569),
c("GO:1904813","ficolin-1-rich granule lumen",0.659,5.846,0.004,2.097,-0.561,0.687,0.572),
c("GO:0005681","spliceosomal complex",1.016,-1.456,-5.754,2.283,-0.589,0.736,0.578),
c("GO:0042581","specific granule",0.851,6.061,1.234,2.207,-0.466,0.704,0.605),
c("GO:0005775","vacuolar lumen",0.915,4.997,-5.777,2.238,-0.464,0.865,0.606),
c("GO:0032281","AMPA glutamate receptor complex",0.133,-3.213,-3.747,1.415,-1.339,0.812,0.628),
c("GO:0101002","ficolin-1-rich granule",0.984,5.907,1.502,2.270,-0.418,0.701,0.630),
c("GO:0072686","mitotic spindle",0.835,-1.856,5.954,2.199,-0.706,0.813,0.688));
one.data <- data.frame(revigo.data);
names(one.data) <- revigo.names;
one.data <- one.data [(one.data$plot_X != "null" & one.data$plot_Y != "null"), ];
one.data$plot_X <- as.numeric( as.character(one.data$plot_X) );
one.data$plot_Y <- as.numeric( as.character(one.data$plot_Y) );
one.data$plot_size <- as.numeric( as.character(one.data$plot_size) );
one.data$value <- as.numeric( as.character(one.data$value) );
one.data$frequency <- as.numeric( as.character(one.data$frequency) );
one.data$uniqueness <- as.numeric( as.character(one.data$uniqueness) );
one.data$dispensability <- as.numeric( as.character(one.data$dispensability) );
#head(one.data);
# --------------------------------------------------------------------------
# Names of the axes, sizes of the numbers and letters, names of the columns,
# etc. can be changed below
p1 <- ggplot( data = one.data );
p1 <- p1 + geom_point( aes( plot_X, plot_Y, colour = value, size = plot_size), alpha = I(0.6) ) + scale_size_area();
p1 <- p1 + scale_colour_gradientn( colours = c("red", "yellow", "green", "blue"), limits = c( min(one.data$value), 0) );
p1 <- p1 + geom_point( aes(plot_X, plot_Y, size = plot_size), shape = 21, fill = "transparent", colour = I (alpha ("black", 0.6) )) + scale_size_area();
p1 <- p1 + scale_size( range=c(5, 30)) + theme_bw(); # + scale_fill_gradientn(colours = heat_hcl(7), limits = c(-300, 0) );
ex <- one.data [ one.data$dispensability < 0.15, ];
p1 <- p1 + geom_text( data = ex, aes(plot_X, plot_Y, label = description), colour = I(alpha("black", 0.85)), size = 2.3 );
p1 <- p1 + labs (y = "semantic space x", x = "semantic space y");
p1 <- p1 + theme(legend.key = element_blank()) ;
one.x_range = max(one.data$plot_X) - min(one.data$plot_X);
one.y_range = max(one.data$plot_Y) - min(one.data$plot_Y);
p1 <- p1 + xlim(min(one.data$plot_X)-one.x_range/10,max(one.data$plot_X)+one.x_range/10);
p1 <- p1 + ylim(min(one.data$plot_Y)-one.y_range/10,max(one.data$plot_Y)+one.y_range/10);
# --------------------------------------------------------------------------
# Output the plot to screen
p1;
# Uncomment the line below to also save the plot to a file.
# The file type depends on the extension (default=pdf).
ggsave("/home/aleix/Bioinfo/TFG/results/plots/BD1_CC.pdf", height = 7, width = 10);
| /Rscripts/RevigoBD1CC.R | no_license | aleixyuki/FGP | R | false | false | 5,977 | r | # A plotting R script produced by the REVIGO server at http://revigo.irb.hr/
# If you found REVIGO useful in your work, please cite the following reference:
# Supek F et al. "REVIGO summarizes and visualizes long lists of Gene Ontology
# terms" PLoS ONE 2011. doi:10.1371/journal.pone.0021800
# --------------------------------------------------------------------------
# If you don't have the ggplot2 package installed, uncomment the following line:
# install.packages( "ggplot2" );
library( ggplot2 );
# --------------------------------------------------------------------------
# If you don't have the scales package installed, uncomment the following line:
# install.packages( "scales" );
library( scales );
# --------------------------------------------------------------------------
# Here is your data from REVIGO. Scroll down for plot configuration options.
revigo.names <- c("term_ID","description","frequency_%","plot_X","plot_Y","plot_size","value","uniqueness","dispensability");
revigo.data <- rbind(c("GO:0005793","endoplasmic reticulum-Golgi intermediate compartment",0.691,-6.075,3.125,2.117,-3.964,0.941,0.000),
c("GO:0044450","(obsolete) microtubule organizing center part",0.501,-6.566,-0.206,1.924,-1.371,1.000,0.000),
c("GO:0044451","(obsolete) nucleoplasm part",0.501,-6.219,-2.485,1.924,-0.544,1.000,0.000),
c("GO:0031089","platelet dense granule lumen",0.074,6.310,0.196,1.176,-3.228,0.710,0.028),
c("GO:0005814","centriole",0.755,-1.432,5.907,2.155,-1.595,0.838,0.029),
c("GO:0033116","endoplasmic reticulum-Golgi intermediate compartment membrane",0.399,-0.223,-0.304,1.881,-3.606,0.887,0.040),
c("GO:0048471","perinuclear region of cytoplasm",3.813,-5.177,1.067,2.856,-0.589,0.953,0.042),
c("GO:1990454","L-type voltage-gated calcium channel complex",0.064,-3.622,-3.881,1.114,-1.743,0.833,0.050),
c("GO:0044666","MLL3/4 complex",0.069,0.627,-5.983,1.146,-1.549,0.750,0.120),
c("GO:0043202","lysosomal lumen",0.510,4.703,-6.010,1.987,-0.697,0.870,0.144),
c("GO:0000781","chromosome, telomeric region",0.787,0.456,7.136,2.173,-0.613,0.915,0.187),
c("GO:0030134","COPII-coated ER to Golgi transport vesicle",0.495,5.679,3.414,1.973,-1.788,0.772,0.287),
c("GO:0005798","Golgi-associated vesicle",0.489,6.215,3.306,1.968,-0.853,0.791,0.338),
c("GO:0030173","integral component of Golgi membrane",0.324,2.973,-1.390,1.792,-1.004,0.807,0.342),
c("GO:0005686","U2 snRNP",0.106,-0.881,-6.349,1.322,-1.274,0.758,0.361),
c("GO:0030135","coated vesicle",1.574,6.418,2.757,2.473,-0.772,0.768,0.379),
c("GO:0005796","Golgi lumen",0.532,5.402,-3.095,2.004,-0.647,0.828,0.409),
c("GO:0042827","platelet dense granule",0.112,6.531,1.288,1.342,-2.870,0.748,0.423),
c("GO:0097431","mitotic spindle pole",0.181,-2.165,5.769,1.544,-1.200,0.833,0.446),
c("GO:0005871","kinesin complex",0.266,-2.069,4.055,1.708,-0.913,0.778,0.462),
c("GO:0005689","U12-type spliceosomal complex",0.154,-1.276,-6.196,1.477,-1.168,0.750,0.480),
c("GO:0035579","specific granule membrane",0.484,5.067,1.100,1.964,-0.675,0.678,0.488),
c("GO:0005819","spindle",2.026,-1.343,6.221,2.582,-0.414,0.825,0.569),
c("GO:1904813","ficolin-1-rich granule lumen",0.659,5.846,0.004,2.097,-0.561,0.687,0.572),
c("GO:0005681","spliceosomal complex",1.016,-1.456,-5.754,2.283,-0.589,0.736,0.578),
c("GO:0042581","specific granule",0.851,6.061,1.234,2.207,-0.466,0.704,0.605),
c("GO:0005775","vacuolar lumen",0.915,4.997,-5.777,2.238,-0.464,0.865,0.606),
c("GO:0032281","AMPA glutamate receptor complex",0.133,-3.213,-3.747,1.415,-1.339,0.812,0.628),
c("GO:0101002","ficolin-1-rich granule",0.984,5.907,1.502,2.270,-0.418,0.701,0.630),
c("GO:0072686","mitotic spindle",0.835,-1.856,5.954,2.199,-0.706,0.813,0.688));
one.data <- data.frame(revigo.data);
names(one.data) <- revigo.names;
one.data <- one.data [(one.data$plot_X != "null" & one.data$plot_Y != "null"), ];
one.data$plot_X <- as.numeric( as.character(one.data$plot_X) );
one.data$plot_Y <- as.numeric( as.character(one.data$plot_Y) );
one.data$plot_size <- as.numeric( as.character(one.data$plot_size) );
one.data$value <- as.numeric( as.character(one.data$value) );
one.data$frequency <- as.numeric( as.character(one.data$frequency) );
one.data$uniqueness <- as.numeric( as.character(one.data$uniqueness) );
one.data$dispensability <- as.numeric( as.character(one.data$dispensability) );
#head(one.data);
# --------------------------------------------------------------------------
# Names of the axes, sizes of the numbers and letters, names of the columns,
# etc. can be changed below
p1 <- ggplot( data = one.data );
p1 <- p1 + geom_point( aes( plot_X, plot_Y, colour = value, size = plot_size), alpha = I(0.6) ) + scale_size_area();
p1 <- p1 + scale_colour_gradientn( colours = c("red", "yellow", "green", "blue"), limits = c( min(one.data$value), 0) );
p1 <- p1 + geom_point( aes(plot_X, plot_Y, size = plot_size), shape = 21, fill = "transparent", colour = I (alpha ("black", 0.6) )) + scale_size_area();
p1 <- p1 + scale_size( range=c(5, 30)) + theme_bw(); # + scale_fill_gradientn(colours = heat_hcl(7), limits = c(-300, 0) );
ex <- one.data [ one.data$dispensability < 0.15, ];
p1 <- p1 + geom_text( data = ex, aes(plot_X, plot_Y, label = description), colour = I(alpha("black", 0.85)), size = 2.3 );
p1 <- p1 + labs (y = "semantic space x", x = "semantic space y");
p1 <- p1 + theme(legend.key = element_blank()) ;
one.x_range = max(one.data$plot_X) - min(one.data$plot_X);
one.y_range = max(one.data$plot_Y) - min(one.data$plot_Y);
p1 <- p1 + xlim(min(one.data$plot_X)-one.x_range/10,max(one.data$plot_X)+one.x_range/10);
p1 <- p1 + ylim(min(one.data$plot_Y)-one.y_range/10,max(one.data$plot_Y)+one.y_range/10);
# --------------------------------------------------------------------------
# Output the plot to screen
p1;
# Uncomment the line below to also save the plot to a file.
# The file type depends on the extension (default=pdf).
ggsave("/home/aleix/Bioinfo/TFG/results/plots/BD1_CC.pdf", height = 7, width = 10);
|
# Process the UCI HAR Dataset and calculate the average of mean and standard deviation values by subject and activity
# Precondition: the UCI HAR Dataset zip file should be unzipped into the working directory
if (!dir.exists("./UCI HAR Dataset")) stop("Please unzip the UCI HAR Dataset file into your working directory")
# Set the working directory to the subdirectory where our data has been unzipped
setwd("./UCI HAR Dataset")
# Load the training data
xtrain <- read.table("./train/x_train.txt")
ytrain <- read.table("./train/y_train.txt")
subjecttrain <- read.table("./train/subject_train.txt")
# Load the test data
xtest <- read.table("./test/x_test.txt")
ytest <- read.table("./test/y_test.txt")
subjecttest <- read.table("./test/subject_test.txt")
# Load the activity labels
activitylabels <- read.table("activity_labels.txt")
# Load the feature list
features <- read.table("features.txt")
# Merge the training and test data by concatenation of the rows
# IMPORTANT: all row merge ops must occur in the same order. We will append in the order: test, train
xmerged <- rbind(xtest, xtrain)
ymerged <- rbind(ytest, ytrain)
subjectmerged <- rbind(subjecttest, subjecttrain)
# Set the column name of subjectmerged to "subject"
colnames(subjectmerged) <- c('subject')
# Extract the features and use to set the column names of the x data
xcolheader <- as.vector(features$V2)
colnames(xmerged) <- xcolheader
# Merge the activity list with the x data
library("dplyr")
# activityMethods implements a lookup function getActivity
source("activityMethods.R")
tblymerged <- tbl_df(ymerged)
yactivities <- mutate(tblymerged, activity = getActivity(V1))
tblxmerged <- tbl_df(xmerged)
# Now merge our x and y data
xydata <- cbind(yactivities, tblxmerged)
# Now merge the subject list with the x and y data set by appending subject as the first column
complete <- cbind(subjectmerged, xydata)
# OK we now have our complete set, now let's extract the columns of interest for analysis
# Create a list of the columns we want to include in the tidy set. It will
# contain subject, activity, and the mean and std columns. We can grep the colnames to find
# the latter two. This also groups our means and stds.
selected <- c(1,2,3,grep("mean", names(complete)), grep("std", names(complete)))
# subset the selected columns in a new tidy set
meanstd <- complete[,selected]
# calculate the average of each measurement for each subject and activity
avgbysubjectandactivity <- aggregate(meanstd[,4:82], list(meanstd$activity,meanstd$subject),mean)
# modify the column names of grouping criteria
colnames(avgbysubjectandactivity)[1:2] <- c("activity","subject")
# modify the labels of the averaged columns to accurately reflect that they are summarized
colnames(avgbysubjectandactivity)[3:81] <- paste('Avg_',colnames(avgbysubjectandactivity)[3:81],sep='')
# write the table to a file
write.table(agg, file="avgbysubjectactivity.txt",row.names=FALSE)
| /run_analysis.R | no_license | jmichael1/gettingcleaningdata | R | false | false | 2,965 | r | # Process the UCI HAR Dataset and calculate the average of mean and standard deviation values by subject and activity
# Precondition: the UCI HAR Dataset zip file should be unzipped into the working directory
if (!dir.exists("./UCI HAR Dataset")) stop("Please unzip the UCI HAR Dataset file into your working directory")
# Set the working directory to the subdirectory where our data has been unzipped
setwd("./UCI HAR Dataset")
# Load the training data
xtrain <- read.table("./train/x_train.txt")
ytrain <- read.table("./train/y_train.txt")
subjecttrain <- read.table("./train/subject_train.txt")
# Load the test data
xtest <- read.table("./test/x_test.txt")
ytest <- read.table("./test/y_test.txt")
subjecttest <- read.table("./test/subject_test.txt")
# Load the activity labels
activitylabels <- read.table("activity_labels.txt")
# Load the feature list
features <- read.table("features.txt")
# Merge the training and test data by concatenation of the rows
# IMPORTANT: all row merge ops must occur in the same order. We will append in the order: test, train
xmerged <- rbind(xtest, xtrain)
ymerged <- rbind(ytest, ytrain)
subjectmerged <- rbind(subjecttest, subjecttrain)
# Set the column name of subjectmerged to "subject"
colnames(subjectmerged) <- c('subject')
# Extract the features and use to set the column names of the x data
xcolheader <- as.vector(features$V2)
colnames(xmerged) <- xcolheader
# Merge the activity list with the x data
library("dplyr")
# activityMethods implements a lookup function getActivity
source("activityMethods.R")
tblymerged <- tbl_df(ymerged)
yactivities <- mutate(tblymerged, activity = getActivity(V1))
tblxmerged <- tbl_df(xmerged)
# Now merge our x and y data
xydata <- cbind(yactivities, tblxmerged)
# Now merge the subject list with the x and y data set by appending subject as the first column
complete <- cbind(subjectmerged, xydata)
# OK we now have our complete set, now let's extract the columns of interest for analysis
# Create a list of the columns we want to include in the tidy set. It will
# contain subject, activity, and the mean and std columns. We can grep the colnames to find
# the latter two. This also groups our means and stds.
selected <- c(1,2,3,grep("mean", names(complete)), grep("std", names(complete)))
# subset the selected columns in a new tidy set
meanstd <- complete[,selected]
# calculate the average of each measurement for each subject and activity
avgbysubjectandactivity <- aggregate(meanstd[,4:82], list(meanstd$activity,meanstd$subject),mean)
# modify the column names of grouping criteria
colnames(avgbysubjectandactivity)[1:2] <- c("activity","subject")
# modify the labels of the averaged columns to accurately reflect that they are summarized
colnames(avgbysubjectandactivity)[3:81] <- paste('Avg_',colnames(avgbysubjectandactivity)[3:81],sep='')
# write the table to a file
write.table(agg, file="avgbysubjectactivity.txt",row.names=FALSE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/splitcombine.R
\name{ilply}
\alias{ilply}
\title{Split an image along axis, apply function, return a list}
\usage{
ilply(im, axis, fun, ...)
}
\arguments{
\item{im}{image}
\item{axis}{axis for the split (e.g "c")}
\item{fun}{function to apply}
\item{...}{extra arguments for function fun}
}
\description{
Shorthand for imsplit followed by llply
}
\examples{
parrots <- load.example("parrots")
ilply(parrots,"c",mean) #mean luminance per colour channel
}
| /man/ilply.Rd | no_license | anishsingh20/imager | R | false | true | 536 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/splitcombine.R
\name{ilply}
\alias{ilply}
\title{Split an image along axis, apply function, return a list}
\usage{
ilply(im, axis, fun, ...)
}
\arguments{
\item{im}{image}
\item{axis}{axis for the split (e.g "c")}
\item{fun}{function to apply}
\item{...}{extra arguments for function fun}
}
\description{
Shorthand for imsplit followed by llply
}
\examples{
parrots <- load.example("parrots")
ilply(parrots,"c",mean) #mean luminance per colour channel
}
|
71a0289d3c827e7014c3f130ee6a7ed1 query05_eequery_1344n.qdimacs 441 1259 | /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Database/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query05_eequery_1344n/query05_eequery_1344n.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 71 | r | 71a0289d3c827e7014c3f130ee6a7ed1 query05_eequery_1344n.qdimacs 441 1259 |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ped_utils.R
\name{print.nucleus}
\alias{print.nucleus}
\title{S3 methods}
\usage{
\method{print}{nucleus}(x, ...)
}
\arguments{
\item{x}{An object}
\item{...}{Not used}
}
\description{
S3 methods
}
| /man/print.nucleus.Rd | no_license | cran/pedtools | R | false | true | 293 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ped_utils.R
\name{print.nucleus}
\alias{print.nucleus}
\title{S3 methods}
\usage{
\method{print}{nucleus}(x, ...)
}
\arguments{
\item{x}{An object}
\item{...}{Not used}
}
\description{
S3 methods
}
|
###############################################################################
#
# Project: UVA Projects
# Script: server.R
# Version:
# Created:
# Updated: Dec 3, 2015
# Author: RY5T
# Copyright University of Virginia, 2014
###############################################################################
library(shiny)
exitValue <- runApp(list(
ui = basicPage(
h2('Cars'),
dataTableOutput('carsTable'),
actionButton("exitBtn", "Exit")
),
server = function(input, output) {
output$carsTable = renderDataTable({
mtcars
})
observe({
if(input$exitBtn > 0){
stopApp(7)
}
})
}
))
| /Tools/Shiny/example_1.R | no_license | ryerex/Research_and_Methods | R | false | false | 674 | r | ###############################################################################
#
# Project: UVA Projects
# Script: server.R
# Version:
# Created:
# Updated: Dec 3, 2015
# Author: RY5T
# Copyright University of Virginia, 2014
###############################################################################
library(shiny)
exitValue <- runApp(list(
ui = basicPage(
h2('Cars'),
dataTableOutput('carsTable'),
actionButton("exitBtn", "Exit")
),
server = function(input, output) {
output$carsTable = renderDataTable({
mtcars
})
observe({
if(input$exitBtn > 0){
stopApp(7)
}
})
}
))
|
#' Write gene set file with GMT extension from a list
#'
#' Write Gene Matrix Transposed (gmt) file from a list. The first column of
#' the GMT are gene set names, the second are descriptions, then are genes.
#'
#' @param gmt.lst List of gene sets. Each element has a gene set \code{name}, \code{description}, and \code{genes}.
#' @param file.gmt Name of the GMT file to write to. \code{.gmt} is appended.
#' @return Invisibly, \code{gmt.lst}.
#' @details This function was adapted from \code{gmtlist2file} in the \pkg{cogena} package.
# returns data input, as per https://style.tidyverse.org/functions.html#return
write_gmt <- function(gmt.lst, file.gmt){
stopifnot(length(gmt.lst) > 0, is.list(gmt.lst))
for (pwy.ind in seq_along(gmt.lst)){
cat(gmt.lst[[pwy.ind]]$name, gmt.lst[[pwy.ind]]$description, gmt.lst[[pwy.ind]]$genes, file=file.gmt, append=TRUE,
sep = "\t")
cat("\n", append=TRUE, file=file.gmt)
}
return(invisible(gmt.lst))
}
| /R/write_gmt.R | permissive | jdreyf/ezlimma | R | false | false | 970 | r | #' Write gene set file with GMT extension from a list
#'
#' Write Gene Matrix Transposed (gmt) file from a list. The first column of
#' the GMT are gene set names, the second are descriptions, then are genes.
#'
#' @param gmt.lst List of gene sets. Each element has a gene set \code{name}, \code{description}, and \code{genes}.
#' @param file.gmt Name of the GMT file to write to. \code{.gmt} is appended.
#' @return Invisibly, \code{gmt.lst}.
#' @details This function was adapted from \code{gmtlist2file} in the \pkg{cogena} package.
# returns data input, as per https://style.tidyverse.org/functions.html#return
write_gmt <- function(gmt.lst, file.gmt){
stopifnot(length(gmt.lst) > 0, is.list(gmt.lst))
for (pwy.ind in seq_along(gmt.lst)){
cat(gmt.lst[[pwy.ind]]$name, gmt.lst[[pwy.ind]]$description, gmt.lst[[pwy.ind]]$genes, file=file.gmt, append=TRUE,
sep = "\t")
cat("\n", append=TRUE, file=file.gmt)
}
return(invisible(gmt.lst))
}
|
# Examples of different loess smooths
load("matlabplots.Rda")
allplots <- unlist(matlabplots, recursive = F)
allplots[[1]]
allplots[[2]]
allplots[[3]]
allplots[[4]]
allplots[[5]]
allplots[[6]]
allplots[[7]]
allplots[[8]]
allplots[[9]]
allplots[[10]]
allplots[[11]]
allplots[[12]]
allplots[[13]]
allplots[[14]]
allplots[[15]]
allplots[[16]]
allplots[[17]]
allplots[[18]]
allplots[[19]]
allplots[[20]]
allplots[[21]]
allplots[[22]]
allplots[[23]]
allplots[[24]]
allplots[[25]]
allplots[[26]]
allplots[[27]]
allplots[[28]]
allplots[[29]]
allplots[[30]]
allplots[[31]]
allplots[[32]]
allplots[[33]]
allplots[[34]]
allplots[[35]]
allplots[[36]]
allplots[[37]]
| /test_loess_plots.R | no_license | kaijagahm/BurstSpeed | R | false | false | 699 | r | # Examples of different loess smooths
load("matlabplots.Rda")
allplots <- unlist(matlabplots, recursive = F)
allplots[[1]]
allplots[[2]]
allplots[[3]]
allplots[[4]]
allplots[[5]]
allplots[[6]]
allplots[[7]]
allplots[[8]]
allplots[[9]]
allplots[[10]]
allplots[[11]]
allplots[[12]]
allplots[[13]]
allplots[[14]]
allplots[[15]]
allplots[[16]]
allplots[[17]]
allplots[[18]]
allplots[[19]]
allplots[[20]]
allplots[[21]]
allplots[[22]]
allplots[[23]]
allplots[[24]]
allplots[[25]]
allplots[[26]]
allplots[[27]]
allplots[[28]]
allplots[[29]]
allplots[[30]]
allplots[[31]]
allplots[[32]]
allplots[[33]]
allplots[[34]]
allplots[[35]]
allplots[[36]]
allplots[[37]]
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nonstdMetrics.R
\name{ser}
\alias{ser}
\title{Non-Standard Evaluation Metrics}
\usage{
ser(trues, preds, phi.trues = NULL, ph = NULL, t = 0)
}
\arguments{
\item{trues}{Target values from a test set of a given data set. Should be a vector and have the same size as the variable preds}
\item{preds}{Predicted values given a certain test set of a given data set. Should be a vector and have the same size as the variable preds}
\item{phi.trues}{Relevance of the values in the parameter trues. Use ??phi() for more information. Defaults to NULL}
\item{ph}{The relevance function providing the data points where the pairs of values-relevance are known. Default is NULL}
\item{t}{Relevance cut-off. Default is 0.}
}
\value{
Squared error for for cases where the relevance of the true value is greater than t (SERA)
}
\description{
Obtains the squared error of predictions for a given subset of relevance
}
\details{
Squared Error-Relevance Metric (SER)
}
\examples{
\dontrun{
library(IRon)
library(rpart)
data(accel)
form <- acceleration ~ .
ind <- sample(1:nrow(accel),0.75*nrow(accel))
train <- accel[ind,]
test <- accel[-ind,]
ph <- phi.control(accel$acceleration)
m <- rpart::rpart(form, train)
preds <- as.vector(predict(m,test))
trues <- test$acceleration
phi.trues <- phi(test$acceleration,ph)
ser(trues,preds,phi.trues)
}
}
| /man/ser.Rd | permissive | Yuanproj/IRon | R | false | true | 1,416 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nonstdMetrics.R
\name{ser}
\alias{ser}
\title{Non-Standard Evaluation Metrics}
\usage{
ser(trues, preds, phi.trues = NULL, ph = NULL, t = 0)
}
\arguments{
\item{trues}{Target values from a test set of a given data set. Should be a vector and have the same size as the variable preds}
\item{preds}{Predicted values given a certain test set of a given data set. Should be a vector and have the same size as the variable preds}
\item{phi.trues}{Relevance of the values in the parameter trues. Use ??phi() for more information. Defaults to NULL}
\item{ph}{The relevance function providing the data points where the pairs of values-relevance are known. Default is NULL}
\item{t}{Relevance cut-off. Default is 0.}
}
\value{
Squared error for for cases where the relevance of the true value is greater than t (SERA)
}
\description{
Obtains the squared error of predictions for a given subset of relevance
}
\details{
Squared Error-Relevance Metric (SER)
}
\examples{
\dontrun{
library(IRon)
library(rpart)
data(accel)
form <- acceleration ~ .
ind <- sample(1:nrow(accel),0.75*nrow(accel))
train <- accel[ind,]
test <- accel[-ind,]
ph <- phi.control(accel$acceleration)
m <- rpart::rpart(form, train)
preds <- as.vector(predict(m,test))
trues <- test$acceleration
phi.trues <- phi(test$acceleration,ph)
ser(trues,preds,phi.trues)
}
}
|
library(gtrendsR)
library(leaflet)
library(dplyr)
setwd("C:/Users/blake/OneDrive/Stryker Project/Country Leaflet Information Dashboard/Building Leaflet Final Interactive Map/Leaflet Map App")
uscities <- read.csv("uscities.csv")
MI <- gtrends("Dominos Pizza" ,geo = "US-MI",time = "all" )$interest_by_city
uscitiesMI <- uscities[uscities$state_id =="MI",]
MI <- data.frame(MI); colnames(MI)[1]<- "city"
uscitiesMI <- uscitiesMI[uscitiesMI$city =="Traverse City",]
MI2 <- MI[MI$location == "Traverse City",]
merge1 <-merge(uscitiesMI,MI,by = c("city"),all.x = TRUE)
merge2 <- merge1[! is.na(merge1$hits),]
merge2$color <- ifelse(merge2$city)
merge2$color <- merge2 %>%
mutate(color = case_when(hits <= 20 ~ 'Low',
hits >= 20 & hits < 50 ~ "med",
hits >= 50 & hits < 80 ~ "med high",
hits >= 80 ~ "High"))
merge2
pal <- colorFactor(palette = c("red","yellow","blue","green"),
levels = c("Low","med","med high","High"))
leaflet() %>%
addProviderTiles("CartoDB") %>%
addCircleMarkers(data = merge2,
lng = ~lng,
lat = ~lat,
popup = ~paste0("<b>","<Strong>","City: ",city, "<b>", "<br/>","<Strong>","Population Density: ",density,"<br/>","<b>","<Strong>","Google Searches: ",hits,"<b>"), # making bold and add break
color = ~pal(merge2$color$color),
radius = ~hits/10)
| /GOOGLETRENDSBYCITY.R | no_license | btindol178/Customer-Intelligence-Platform | R | false | false | 1,527 | r | library(gtrendsR)
library(leaflet)
library(dplyr)
setwd("C:/Users/blake/OneDrive/Stryker Project/Country Leaflet Information Dashboard/Building Leaflet Final Interactive Map/Leaflet Map App")
uscities <- read.csv("uscities.csv")
MI <- gtrends("Dominos Pizza" ,geo = "US-MI",time = "all" )$interest_by_city
uscitiesMI <- uscities[uscities$state_id =="MI",]
MI <- data.frame(MI); colnames(MI)[1]<- "city"
uscitiesMI <- uscitiesMI[uscitiesMI$city =="Traverse City",]
MI2 <- MI[MI$location == "Traverse City",]
merge1 <-merge(uscitiesMI,MI,by = c("city"),all.x = TRUE)
merge2 <- merge1[! is.na(merge1$hits),]
merge2$color <- ifelse(merge2$city)
merge2$color <- merge2 %>%
mutate(color = case_when(hits <= 20 ~ 'Low',
hits >= 20 & hits < 50 ~ "med",
hits >= 50 & hits < 80 ~ "med high",
hits >= 80 ~ "High"))
merge2
pal <- colorFactor(palette = c("red","yellow","blue","green"),
levels = c("Low","med","med high","High"))
leaflet() %>%
addProviderTiles("CartoDB") %>%
addCircleMarkers(data = merge2,
lng = ~lng,
lat = ~lat,
popup = ~paste0("<b>","<Strong>","City: ",city, "<b>", "<br/>","<Strong>","Population Density: ",density,"<br/>","<b>","<Strong>","Google Searches: ",hits,"<b>"), # making bold and add break
color = ~pal(merge2$color$color),
radius = ~hits/10)
|
#Installing packages - one time
install.packages("dplyr")
install.packages("mvnormtest")
install.packages("pwr")
install.packages("sm")
#### INFM 600 Project ####
df<-read.csv("FinalDataset.csv")
View(FinalDataset)
df4 <- na.omit(FinalDataset)
require(dplyr)
require(mvnormtest)
require(pwr)
require(sm)
#separating variables into different datasets
INSTNM <- as.factor(FinalDataset[,1])
YEAR <- as.factor(FinalDataset[,3])
C100_4 <- as.matrix(FinalDataset[,4])
C150_4 <- as.matrix(FinalDataset[,5])
D150_4 <- as.matrix(FinalDataset[,6])
COMPL_RPY_3YR_RT<- as.matrix(FinalDataset[,8])
COMPL_RPY_5YR_RT<- as.matrix(FinalDataset[,9])
DEBT_N <- as.matrix(FinalDataset[,10])
DEP_DEBT_MDN <- as.matrix(FinalDataset[,11])
PCTFLOAN <- as.matrix(FinalDataset[,12])
NPT41_PUB <- as.matrix(FinalDataset[,13])
NPT42_PUB <- as.matrix(FinalDataset[,14])
NPT43_PUB <- as.matrix(FinalDataset[,15])
NPT44_PUB <- as.matrix(FinalDataset[,16])
NPT45_PUB <- as.matrix(FinalDataset[,17])
head(INSTNM)
head(YEAR)
#create R objects for the residuals from each treatment level
resC100_4 =lm(FinalDataset$C100_4~FinalDataset$INSTNM)$residuals
resC150_4 =lm(FinalDataset$C150_4~FinalDataset$INSTNM)$residuals
resD150_4 =lm(FinalDataset$D150_4~FinalDataset$INSTNM)$residuals
resCOMPL_RPY_3YR_RT=lm(FinalDataset$COMPL_RPY_3YR_RT~FinalDataset$INSTNM)$residuals
resCOMPL_RPY_5YR_RT=lm(FinalDataset$COMPL_RPY_5YR_RT~FinalDataset$INSTNM)$residuals
resDEBT_N =lm(FinalDataset$DEBT_N~FinalDataset$INSTNM)$residuals
resDEP_DEBT_MDN =lm(FinalDataset$DEP_DEBT_MDN~FinalDataset$INSTNM)$residuals
resPCTFLOAN =lm(FinalDataset$PCTFLOAN~FinalDataset$INSTNM)$residuals
#resNPT41_PUB =lm(FinalDataset$NPT41_PUB~FinalDataset$INSTNM)$residuals
#resNPT42_PUB =lm(FinalDataset$NPT42_PUB~FinalDataset$INSTNM)$residuals
#resNPT43_PUB =lm(FinalDataset$NPT43_PUB~FinalDataset$INSTNM)$residuals
#resNPT44_PUB =lm(FinalDataset$NPT44_PUB~FinalDataset$INSTNM)$residuals
#resNPT45_PUB =lm(FinalDataset$NPT45_PUB~FinalDataset$INSTNM)$residuals
#checking for normality using qqplots
qqnorm(lm(FinalDataset$C100_4~FinalDataset$INSTNM)$residuals, main="resC100_4 ", col=4)
qqline(lm(FinalDataset$C100_4~FinalDataset$INSTNM)$residuals, lwd=2,col='gray86')
qqnorm(lm(FinalDataset$C150_4~FinalDataset$INSTNM)$residuals, main="resC150_4 ", col=4)
qqline(lm(FinalDataset$C150_4~FinalDataset$INSTNM)$residuals, lwd=2,col='gray86')
qqnorm(lm(FinalDataset$D150_4~FinalDataset$INSTNM)$residuals, main="resD150_4 ", col=4)
qqline(lm(FinalDataset$D150_4~FinalDataset$INSTNM)$residuals, lwd=2,col='gray86')
qqnorm(lm(FinalDataset$COMPL_RPY_3YR_RT~FinalDataset$INSTNM)$residuals, main="resCOMPL_RPY_3YR_RT", col=4)
qqline(lm(FinalDataset$COMPL_RPY_3YR_RT~FinalDataset$INSTNM)$residuals, lwd=2,col='gray86')
qqnorm(lm(FinalDataset$COMPL_RPY_5YR_RT~FinalDataset$INSTNM)$residuals, main="resCOMPL_RPY_5YR_RT", col=4)
qqline(lm(FinalDataset$COMPL_RPY_5YR_RT~FinalDataset$INSTNM)$residuals, lwd=2,col='gray86')
qqnorm(lm(FinalDataset$DEBT_N~FinalDataset$INSTNM)$residuals, main="resDEBT_N ", col=4)
qqline(lm(FinalDataset$DEBT_N~FinalDataset$INSTNM)$residuals, lwd=2,col='gray86')
qqnorm(lm(FinalDataset$DEP_DEBT_MDN~FinalDataset$INSTNM)$residuals, main="resDEP_DEBT_MDN ", col=4)
qqline(lm(FinalDataset$DEP_DEBT_MDN~FinalDataset$INSTNM)$residuals, lwd=2,col='gray86')
qqnorm(lm(FinalDataset$PCTFLOAN~FinalDataset$INSTNM)$residuals, main="resPCTFLOAN ", col=4)
qqline(lm(FinalDataset$PCTFLOAN~FinalDataset$INSTNM)$residuals, lwd=2,col='gray86')
#qqnorm(lm(FinalDataset$NPT41_PUB~FinalDataset$INSTNM)$residuals, main="resNPT41_PUB ", col=4)
#qqline(lm(FinalDataset$NPT41_PUB~FinalDataset$INSTNM)$residuals, lwd=2,col='gray86')
#
#qqnorm(lm(FinalDataset$NPT42_PUB~FinalDataset$INSTNM)$residuals, main="resNPT42_PUB ", col=4)
#qqline(lm(FinalDataset$NPT42_PUB~FinalDataset$INSTNM)$residuals, lwd=2,col='gray86')
#
#qqnorm(lm(FinalDataset$NPT43_PUB~FinalDataset$INSTNM)$residuals, main="resNPT43_PUB ", col=4)
#qqline(lm(FinalDataset$NPT43_PUB~FinalDataset$INSTNM)$residuals, lwd=2,col='gray86')
#
#qqnorm(lm(FinalDataset$NPT44_PUB~FinalDataset$INSTNM)$residuals, main="resNPT44_PUB ", col=4)
#qqline(lm(FinalDataset$NPT44_PUB~FinalDataset$INSTNM)$residuals, lwd=2,col='gray86')
#
#qqnorm(lm(FinalDataset$NPT45_PUB~FinalDataset$INSTNM)$residuals, main="resNPT45_PUB ", col=4)
#qqline(lm(FinalDataset$NPT45_PUB~FinalDataset$INSTNM)$residuals, lwd=2,col='gray86')
#manova test
x <- manova(cbind(
FinalDataset$C100_4 ,
FinalDataset$C150_4 ,
FinalDataset$D150_4 ,
FinalDataset$COMPL_RPY_3YR_RT,
FinalDataset$COMPL_RPY_5YR_RT,
FinalDataset$DEBT_N ,
FinalDataset$DEP_DEBT_MDN ,
FinalDataset$PCTFLOAN ,
FinalDataset$NPT41_PUB ,
FinalDataset$NPT42_PUB ,
FinalDataset$NPT43_PUB ,
FinalDataset$NPT44_PUB ,
FinalDataset$NPT45_PUB
) ~ FinalDataset$INSTNM, data = FinalDataset)
#running wilks test
summary(x,test="Wilks")
#difference between the response vars
summary.aov(x)
| /drafts/TeamPEDS_Manova.R | no_license | annlaurawalker/INFM_600 | R | false | false | 5,318 | r | #Installing packages - one time
install.packages("dplyr")
install.packages("mvnormtest")
install.packages("pwr")
install.packages("sm")
#### INFM 600 Project ####
df<-read.csv("FinalDataset.csv")
View(FinalDataset)
df4 <- na.omit(FinalDataset)
require(dplyr)
require(mvnormtest)
require(pwr)
require(sm)
#separating variables into different datasets
INSTNM <- as.factor(FinalDataset[,1])
YEAR <- as.factor(FinalDataset[,3])
C100_4 <- as.matrix(FinalDataset[,4])
C150_4 <- as.matrix(FinalDataset[,5])
D150_4 <- as.matrix(FinalDataset[,6])
COMPL_RPY_3YR_RT<- as.matrix(FinalDataset[,8])
COMPL_RPY_5YR_RT<- as.matrix(FinalDataset[,9])
DEBT_N <- as.matrix(FinalDataset[,10])
DEP_DEBT_MDN <- as.matrix(FinalDataset[,11])
PCTFLOAN <- as.matrix(FinalDataset[,12])
NPT41_PUB <- as.matrix(FinalDataset[,13])
NPT42_PUB <- as.matrix(FinalDataset[,14])
NPT43_PUB <- as.matrix(FinalDataset[,15])
NPT44_PUB <- as.matrix(FinalDataset[,16])
NPT45_PUB <- as.matrix(FinalDataset[,17])
head(INSTNM)
head(YEAR)
#create R objects for the residuals from each treatment level
resC100_4 =lm(FinalDataset$C100_4~FinalDataset$INSTNM)$residuals
resC150_4 =lm(FinalDataset$C150_4~FinalDataset$INSTNM)$residuals
resD150_4 =lm(FinalDataset$D150_4~FinalDataset$INSTNM)$residuals
resCOMPL_RPY_3YR_RT=lm(FinalDataset$COMPL_RPY_3YR_RT~FinalDataset$INSTNM)$residuals
resCOMPL_RPY_5YR_RT=lm(FinalDataset$COMPL_RPY_5YR_RT~FinalDataset$INSTNM)$residuals
resDEBT_N =lm(FinalDataset$DEBT_N~FinalDataset$INSTNM)$residuals
resDEP_DEBT_MDN =lm(FinalDataset$DEP_DEBT_MDN~FinalDataset$INSTNM)$residuals
resPCTFLOAN =lm(FinalDataset$PCTFLOAN~FinalDataset$INSTNM)$residuals
#resNPT41_PUB =lm(FinalDataset$NPT41_PUB~FinalDataset$INSTNM)$residuals
#resNPT42_PUB =lm(FinalDataset$NPT42_PUB~FinalDataset$INSTNM)$residuals
#resNPT43_PUB =lm(FinalDataset$NPT43_PUB~FinalDataset$INSTNM)$residuals
#resNPT44_PUB =lm(FinalDataset$NPT44_PUB~FinalDataset$INSTNM)$residuals
#resNPT45_PUB =lm(FinalDataset$NPT45_PUB~FinalDataset$INSTNM)$residuals
#checking for normality using qqplots
qqnorm(lm(FinalDataset$C100_4~FinalDataset$INSTNM)$residuals, main="resC100_4 ", col=4)
qqline(lm(FinalDataset$C100_4~FinalDataset$INSTNM)$residuals, lwd=2,col='gray86')
qqnorm(lm(FinalDataset$C150_4~FinalDataset$INSTNM)$residuals, main="resC150_4 ", col=4)
qqline(lm(FinalDataset$C150_4~FinalDataset$INSTNM)$residuals, lwd=2,col='gray86')
qqnorm(lm(FinalDataset$D150_4~FinalDataset$INSTNM)$residuals, main="resD150_4 ", col=4)
qqline(lm(FinalDataset$D150_4~FinalDataset$INSTNM)$residuals, lwd=2,col='gray86')
qqnorm(lm(FinalDataset$COMPL_RPY_3YR_RT~FinalDataset$INSTNM)$residuals, main="resCOMPL_RPY_3YR_RT", col=4)
qqline(lm(FinalDataset$COMPL_RPY_3YR_RT~FinalDataset$INSTNM)$residuals, lwd=2,col='gray86')
qqnorm(lm(FinalDataset$COMPL_RPY_5YR_RT~FinalDataset$INSTNM)$residuals, main="resCOMPL_RPY_5YR_RT", col=4)
qqline(lm(FinalDataset$COMPL_RPY_5YR_RT~FinalDataset$INSTNM)$residuals, lwd=2,col='gray86')
qqnorm(lm(FinalDataset$DEBT_N~FinalDataset$INSTNM)$residuals, main="resDEBT_N ", col=4)
qqline(lm(FinalDataset$DEBT_N~FinalDataset$INSTNM)$residuals, lwd=2,col='gray86')
qqnorm(lm(FinalDataset$DEP_DEBT_MDN~FinalDataset$INSTNM)$residuals, main="resDEP_DEBT_MDN ", col=4)
qqline(lm(FinalDataset$DEP_DEBT_MDN~FinalDataset$INSTNM)$residuals, lwd=2,col='gray86')
qqnorm(lm(FinalDataset$PCTFLOAN~FinalDataset$INSTNM)$residuals, main="resPCTFLOAN ", col=4)
qqline(lm(FinalDataset$PCTFLOAN~FinalDataset$INSTNM)$residuals, lwd=2,col='gray86')
#qqnorm(lm(FinalDataset$NPT41_PUB~FinalDataset$INSTNM)$residuals, main="resNPT41_PUB ", col=4)
#qqline(lm(FinalDataset$NPT41_PUB~FinalDataset$INSTNM)$residuals, lwd=2,col='gray86')
#
#qqnorm(lm(FinalDataset$NPT42_PUB~FinalDataset$INSTNM)$residuals, main="resNPT42_PUB ", col=4)
#qqline(lm(FinalDataset$NPT42_PUB~FinalDataset$INSTNM)$residuals, lwd=2,col='gray86')
#
#qqnorm(lm(FinalDataset$NPT43_PUB~FinalDataset$INSTNM)$residuals, main="resNPT43_PUB ", col=4)
#qqline(lm(FinalDataset$NPT43_PUB~FinalDataset$INSTNM)$residuals, lwd=2,col='gray86')
#
#qqnorm(lm(FinalDataset$NPT44_PUB~FinalDataset$INSTNM)$residuals, main="resNPT44_PUB ", col=4)
#qqline(lm(FinalDataset$NPT44_PUB~FinalDataset$INSTNM)$residuals, lwd=2,col='gray86')
#
#qqnorm(lm(FinalDataset$NPT45_PUB~FinalDataset$INSTNM)$residuals, main="resNPT45_PUB ", col=4)
#qqline(lm(FinalDataset$NPT45_PUB~FinalDataset$INSTNM)$residuals, lwd=2,col='gray86')
#manova test
x <- manova(cbind(
FinalDataset$C100_4 ,
FinalDataset$C150_4 ,
FinalDataset$D150_4 ,
FinalDataset$COMPL_RPY_3YR_RT,
FinalDataset$COMPL_RPY_5YR_RT,
FinalDataset$DEBT_N ,
FinalDataset$DEP_DEBT_MDN ,
FinalDataset$PCTFLOAN ,
FinalDataset$NPT41_PUB ,
FinalDataset$NPT42_PUB ,
FinalDataset$NPT43_PUB ,
FinalDataset$NPT44_PUB ,
FinalDataset$NPT45_PUB
) ~ FinalDataset$INSTNM, data = FinalDataset)
#running wilks test
summary(x,test="Wilks")
#difference between the response vars
summary.aov(x)
|
#' Pipe operator
#'
#' See \code{magrittr::\link[magrittr:pipe]{\%>\%}} for details.
#'
#' @name %>%
#' @rdname pipe
#' @keywords internal
#' @export
#' @importFrom magrittr %>%
#' @usage lhs \%>\% rhs
NULL
url <- "https://raw.githubusercontent.com/ishaberry/Covid19Canada/master/cases.csv"
iso.location <- data.frame(
location_code = c("CA-AB", "CA-BC", "CA-MB", "CA-NB", "CA-NL", "CA-NS", "CA-ON", "CA-PE", "CA-QC", "CA-SK", "CA-NT", "CA-NU", "CA-YT"),
location_code_type = "iso_3166_2",
province = c("Alberta", "BC", "Monitoba", "New Brunswick", "NL", "Nova Scotia", "Ontario", "PEI", "Quebec", "Saskatchewan", "NWT", "Repatriated", "Yukon")
)
read_data <- function() {
suppressWarnings(
readr::read_csv(
url,
col_types =
readr::cols(
date_report = readr::col_date(format = "%d-%m-%Y"),
report_week = readr::col_date(format = "%d-%m-%Y")
)
)
)
}
clean_data <- function(tbl) {
suppressWarnings(
tbl %>%
dplyr::mutate(
location_type = "state",
data_type = "cases_new",
data_url = "https://github.com/ishaberry/Covid19Canada"
) %>%
dplyr::select(
case_id,
provincial_case_id,
date = date_report,
location = province,
location_type,
data_type,
travel_yn,
locally_acquired,
data_details = case_source,
data_url,
additional_info,
travel_history_country,
age,
sex
) %>%
dplyr::arrange(
case_id
) %>%
dplyr::left_join(iso.location, by = c("location" = "province")) %>%
dplyr::mutate(
value = 1
)
)
}
| /R/utils.R | permissive | Covid19R/CanadaC19 | R | false | false | 1,681 | r | #' Pipe operator
#'
#' See \code{magrittr::\link[magrittr:pipe]{\%>\%}} for details.
#'
#' @name %>%
#' @rdname pipe
#' @keywords internal
#' @export
#' @importFrom magrittr %>%
#' @usage lhs \%>\% rhs
NULL
url <- "https://raw.githubusercontent.com/ishaberry/Covid19Canada/master/cases.csv"
iso.location <- data.frame(
location_code = c("CA-AB", "CA-BC", "CA-MB", "CA-NB", "CA-NL", "CA-NS", "CA-ON", "CA-PE", "CA-QC", "CA-SK", "CA-NT", "CA-NU", "CA-YT"),
location_code_type = "iso_3166_2",
province = c("Alberta", "BC", "Monitoba", "New Brunswick", "NL", "Nova Scotia", "Ontario", "PEI", "Quebec", "Saskatchewan", "NWT", "Repatriated", "Yukon")
)
read_data <- function() {
suppressWarnings(
readr::read_csv(
url,
col_types =
readr::cols(
date_report = readr::col_date(format = "%d-%m-%Y"),
report_week = readr::col_date(format = "%d-%m-%Y")
)
)
)
}
clean_data <- function(tbl) {
suppressWarnings(
tbl %>%
dplyr::mutate(
location_type = "state",
data_type = "cases_new",
data_url = "https://github.com/ishaberry/Covid19Canada"
) %>%
dplyr::select(
case_id,
provincial_case_id,
date = date_report,
location = province,
location_type,
data_type,
travel_yn,
locally_acquired,
data_details = case_source,
data_url,
additional_info,
travel_history_country,
age,
sex
) %>%
dplyr::arrange(
case_id
) %>%
dplyr::left_join(iso.location, by = c("location" = "province")) %>%
dplyr::mutate(
value = 1
)
)
}
|
library(dplyr)
library(extrafont)
library(waffle)
library(patchwork)
days_climb <- data.frame(
date = seq(as.Date("2020-12-20"), as.Date("2021-04-12"), by="days")
) %>%
mutate(label =
case_when(date <= Sys.Date() ~ "Days I've spent waiting to climb again",
TRUE ~ "Days left until I can climb again")
) %>%
group_by(label) %>%
summarise(count = n()) %>%
mutate(label = factor(label, levels = c(
"Days left until I can climb again",
"Days I've spent waiting to climb again")))
picto <- days_climb %>%
arrange(label) %>%
ggplot(aes(label = label, values = count, color = label)) +
geom_pictogram(n_rows = 10,
family = "Font Awesome 5 Free Solid",
flip = TRUE) +
scale_color_manual(
name = NULL,
labels = c(
"DAYS LEFT UNTIL I CAN CLIMB AGAIN",
"DAYS I'VE SPENT WAITING TO CLIMB AGAIN"
),
values = c(
"#2F3C7E",
"#979ebf"
)) +
scale_label_pictogram(
name = NULL,
labels = c(
"DAYS LEFT UNTIL I CAN CLIMB AGAIN",
"DAYS I'VE SPENT WAITING TO CLIMB AGAIN"
),
values = c(
"calendar-alt",
"calendar-alt"
)
) +
coord_equal() +
theme_enhance_waffle() +
theme(legend.key.height = unit(2, "line"),
legend.text = element_text(size = 8, hjust = 0, vjust = 0.75, family = "Verdana"),
axis.ticks.x = element_blank(),
axis.ticks.y = element_blank(),
panel.background = element_rect(fill = "#FBEAEB"),
plot.background = element_rect(fill = "#FBEAEB"),
legend.background = element_rect(fill = "#FBEAEB"),
legend.position = "bottom",
legend.direction = "vertical")
header <- ggdraw() +
draw_text("LIFTING LOCKDOWN", x= 0.5, y = 0.8, size = 20, family = "Verdana") +
draw_text("11 days until climbing centrEs and gyms reopen in England (hopefully)",
x = 0.5, y = 0.6, size = 12, family = "Verdana Pro Light") +
theme(panel.background = element_rect(fill = "#FBEAEB"),
plot.background = element_rect(fill = "#FBEAEB"))
final_picto <- picto + plot_annotation(
title = "LIFTING LOCKDOWN",
subtitle = "10 days until climbing centres and gyms reopen in England (hopefully)",
caption = "Data: self collected (based on date London went into Tier 4) | #30DayChartChallenge") &
theme(rect = element_rect(fill = "#FBEAEB"),
panel.background = element_rect(fill = "#FBEAEB", color = "#FBEAEB"),
plot.background = element_rect(fill = "#FBEAEB", color = "#FBEAEB"),
text = element_text(family = "Verdana"),
plot.title = element_text(color = "#2F3C7E", face = "bold", size = 22, hjust = .5),
plot.subtitle = element_text(color = "#2F3C7E", size = 11, hjust = .5),
plot.caption = element_text(color = "#2F3C7E", size = 9, hjust = .5),
plot.margin = margin(t = 15, b = 15, l = 15, r = 15),
)
ggsave(
"day2-pictogram.png", final_picto, width = 6, height=9, dpi = 300, limitsize = FALSE)
| /Comparisons/Day 2 - Pictogram/day2-pictogram.R | no_license | linda-bennett/30-day-chart-challenge | R | false | false | 3,047 | r | library(dplyr)
library(extrafont)
library(waffle)
library(patchwork)
days_climb <- data.frame(
date = seq(as.Date("2020-12-20"), as.Date("2021-04-12"), by="days")
) %>%
mutate(label =
case_when(date <= Sys.Date() ~ "Days I've spent waiting to climb again",
TRUE ~ "Days left until I can climb again")
) %>%
group_by(label) %>%
summarise(count = n()) %>%
mutate(label = factor(label, levels = c(
"Days left until I can climb again",
"Days I've spent waiting to climb again")))
picto <- days_climb %>%
arrange(label) %>%
ggplot(aes(label = label, values = count, color = label)) +
geom_pictogram(n_rows = 10,
family = "Font Awesome 5 Free Solid",
flip = TRUE) +
scale_color_manual(
name = NULL,
labels = c(
"DAYS LEFT UNTIL I CAN CLIMB AGAIN",
"DAYS I'VE SPENT WAITING TO CLIMB AGAIN"
),
values = c(
"#2F3C7E",
"#979ebf"
)) +
scale_label_pictogram(
name = NULL,
labels = c(
"DAYS LEFT UNTIL I CAN CLIMB AGAIN",
"DAYS I'VE SPENT WAITING TO CLIMB AGAIN"
),
values = c(
"calendar-alt",
"calendar-alt"
)
) +
coord_equal() +
theme_enhance_waffle() +
theme(legend.key.height = unit(2, "line"),
legend.text = element_text(size = 8, hjust = 0, vjust = 0.75, family = "Verdana"),
axis.ticks.x = element_blank(),
axis.ticks.y = element_blank(),
panel.background = element_rect(fill = "#FBEAEB"),
plot.background = element_rect(fill = "#FBEAEB"),
legend.background = element_rect(fill = "#FBEAEB"),
legend.position = "bottom",
legend.direction = "vertical")
header <- ggdraw() +
draw_text("LIFTING LOCKDOWN", x= 0.5, y = 0.8, size = 20, family = "Verdana") +
draw_text("11 days until climbing centrEs and gyms reopen in England (hopefully)",
x = 0.5, y = 0.6, size = 12, family = "Verdana Pro Light") +
theme(panel.background = element_rect(fill = "#FBEAEB"),
plot.background = element_rect(fill = "#FBEAEB"))
final_picto <- picto + plot_annotation(
title = "LIFTING LOCKDOWN",
subtitle = "10 days until climbing centres and gyms reopen in England (hopefully)",
caption = "Data: self collected (based on date London went into Tier 4) | #30DayChartChallenge") &
theme(rect = element_rect(fill = "#FBEAEB"),
panel.background = element_rect(fill = "#FBEAEB", color = "#FBEAEB"),
plot.background = element_rect(fill = "#FBEAEB", color = "#FBEAEB"),
text = element_text(family = "Verdana"),
plot.title = element_text(color = "#2F3C7E", face = "bold", size = 22, hjust = .5),
plot.subtitle = element_text(color = "#2F3C7E", size = 11, hjust = .5),
plot.caption = element_text(color = "#2F3C7E", size = 9, hjust = .5),
plot.margin = margin(t = 15, b = 15, l = 15, r = 15),
)
ggsave(
"day2-pictogram.png", final_picto, width = 6, height=9, dpi = 300, limitsize = FALSE)
|
library("RPostgreSQL", quiet=TRUE)
con <- dbConnect(PostgreSQL(), user="postgres", dbname="desura_data")
axis_color <- rgb(0.7,0.7,0.7)
primary_color <- "#DD4A4A"
default_width <- 1000
default_height <- 600
default_height_half <- 400
colors <- rainbow(8)
format_months <- function(months) {
strftime(months, "%b %y")
}
months_axis <- function(months, skip=TRUE) {
month_ids <- seq(1, length(months))
if (skip) {
month_ids <- unique(c(seq(1, length(months), 3), length(months)))
}
axis(1,
col=axis_color,
las=2,
at=month_ids,
labels=format_months(months[month_ids]))
}
axis_stops <- function(max, chunks, nearest=FALSE, log_scale=FALSE, min=0) {
if (log_scale) {
max <- log10(max)
}
step <- max / chunks
stops <- seq(0, max, step)
if (log_scale) {
stops <- 10^stops
max <- 10^max
step <- 10^max
}
if (nearest) {
stops <- floor(stops / nearest) * nearest
}
stops <- unique(c(min, stops, max))
if (do.call(`-`, as.list(rev(tail(stops, n=2)))) < step) {
# remove second to last item
stops = stops[-length(stops) + 1]
}
stops
}
count_axis <- function(counts) {
stops <- axis_stops(max(res$sum), 4)
axis(4,
at=stops,
labels=format(floor(stops), trim=TRUE, big.mark=",", scientific=FALSE),
las=2)
}
| /graphs/common.r | no_license | leafo/desura-data | R | false | false | 1,328 | r | library("RPostgreSQL", quiet=TRUE)
con <- dbConnect(PostgreSQL(), user="postgres", dbname="desura_data")
axis_color <- rgb(0.7,0.7,0.7)
primary_color <- "#DD4A4A"
default_width <- 1000
default_height <- 600
default_height_half <- 400
colors <- rainbow(8)
format_months <- function(months) {
strftime(months, "%b %y")
}
months_axis <- function(months, skip=TRUE) {
month_ids <- seq(1, length(months))
if (skip) {
month_ids <- unique(c(seq(1, length(months), 3), length(months)))
}
axis(1,
col=axis_color,
las=2,
at=month_ids,
labels=format_months(months[month_ids]))
}
axis_stops <- function(max, chunks, nearest=FALSE, log_scale=FALSE, min=0) {
if (log_scale) {
max <- log10(max)
}
step <- max / chunks
stops <- seq(0, max, step)
if (log_scale) {
stops <- 10^stops
max <- 10^max
step <- 10^max
}
if (nearest) {
stops <- floor(stops / nearest) * nearest
}
stops <- unique(c(min, stops, max))
if (do.call(`-`, as.list(rev(tail(stops, n=2)))) < step) {
# remove second to last item
stops = stops[-length(stops) + 1]
}
stops
}
count_axis <- function(counts) {
stops <- axis_stops(max(res$sum), 4)
axis(4,
at=stops,
labels=format(floor(stops), trim=TRUE, big.mark=",", scientific=FALSE),
las=2)
}
|
#Set working directory
setwd("C:/Users/deschmidt/Desktop/Exploratory Data Analysis")
filePath <- "./data/household_power_consumption.txt"
#Read in column names
columns <- read.table(filePath, sep = ";", na.strings = "?", nrows = 1, stringsAsFactors = FALSE)
#Read in only rows from the desired dates
myData <- read.table(filePath, sep = ";", col.names = columns, na.strings = "?", nrows = 2880, skip = 66637, stringsAsFactors = FALSE)
#View the structure of the data
str(myData)
#Create timestamp using strptime
timestamp <- strptime(paste(myData$Date, myData$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
#Change variables to numeric
globalActivePower <- as.numeric(myData$Global_active_power)
globalReactivePower <- as.numeric(myData$Global_reactive_power)
voltage <- as.numeric(myData$Voltage)
subMetering1 <- as.numeric(myData$Sub_metering_1)
subMetering2 <- as.numeric(myData$Sub_metering_2)
subMetering3 <- as.numeric(myData$Sub_metering_3)
#Create four plots using par function
png("plot4.png", width=480, height=480)
par(mfrow = c(2, 2))
plot(timestamp, globalActivePower, type="l", xlab="", ylab="Global Active Power", cex=0.2)
plot(timestamp, voltage, type="l", xlab="datetime", ylab="Voltage")
plot(timestamp, subMetering1, type="l", ylab="Energy Submetering", xlab="")
lines(timestamp, subMetering2, type="l", col="red")
lines(timestamp, subMetering3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=, lwd=2.5, col=c("black", "red", "blue"), bty="o")
plot(timestamp, globalReactivePower, type="l", xlab="datetime", ylab="Global_reactive_power")
dev.off() | /plot4.R | no_license | dschmidt5/ExData_Plotting1 | R | false | false | 1,665 | r | #Set working directory
setwd("C:/Users/deschmidt/Desktop/Exploratory Data Analysis")
filePath <- "./data/household_power_consumption.txt"
#Read in column names
columns <- read.table(filePath, sep = ";", na.strings = "?", nrows = 1, stringsAsFactors = FALSE)
#Read in only rows from the desired dates
myData <- read.table(filePath, sep = ";", col.names = columns, na.strings = "?", nrows = 2880, skip = 66637, stringsAsFactors = FALSE)
#View the structure of the data
str(myData)
#Create timestamp using strptime
timestamp <- strptime(paste(myData$Date, myData$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
#Change variables to numeric
globalActivePower <- as.numeric(myData$Global_active_power)
globalReactivePower <- as.numeric(myData$Global_reactive_power)
voltage <- as.numeric(myData$Voltage)
subMetering1 <- as.numeric(myData$Sub_metering_1)
subMetering2 <- as.numeric(myData$Sub_metering_2)
subMetering3 <- as.numeric(myData$Sub_metering_3)
#Create four plots using par function
png("plot4.png", width=480, height=480)
par(mfrow = c(2, 2))
plot(timestamp, globalActivePower, type="l", xlab="", ylab="Global Active Power", cex=0.2)
plot(timestamp, voltage, type="l", xlab="datetime", ylab="Voltage")
plot(timestamp, subMetering1, type="l", ylab="Energy Submetering", xlab="")
lines(timestamp, subMetering2, type="l", col="red")
lines(timestamp, subMetering3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=, lwd=2.5, col=c("black", "red", "blue"), bty="o")
plot(timestamp, globalReactivePower, type="l", xlab="datetime", ylab="Global_reactive_power")
dev.off() |
#' @title Weighted axial mean
#' @description The mean of a weighted axial variable
#' @param vec A vector of axis (degrees)
#' @param weights A vector of weights of the same length as vec
#' @return The mean axis (degrees) and the Pythagorean length
#' @export
meanaxial <- function(vec,weights=NULL){
if(is.null(weights)){weights <- rep(1,length(vec))}
vec <- vec*2
vec <- vec*pi/180
n <- sum(weights)
C <- (1/n)*sum(weights*cos(vec))
S <- (1/n)*sum(weights*sin(vec))
m <- atan2(S,C)
m2 <- m/2
m2 <- m2*(180/pi)
R <- sqrt(C^2+S^2)
return(c(m2,R))
} | /R/fn_axialmean.R | no_license | shearwavesplitter/MFASTR | R | false | false | 560 | r | #' @title Weighted axial mean
#' @description The mean of a weighted axial variable
#' @param vec A vector of axis (degrees)
#' @param weights A vector of weights of the same length as vec
#' @return The mean axis (degrees) and the Pythagorean length
#' @export
meanaxial <- function(vec,weights=NULL){
if(is.null(weights)){weights <- rep(1,length(vec))}
vec <- vec*2
vec <- vec*pi/180
n <- sum(weights)
C <- (1/n)*sum(weights*cos(vec))
S <- (1/n)*sum(weights*sin(vec))
m <- atan2(S,C)
m2 <- m/2
m2 <- m2*(180/pi)
R <- sqrt(C^2+S^2)
return(c(m2,R))
} |
icr <- function (x, ...) UseMethod("icr")
icr.formula <- function (formula, data, weights, ...,
subset, na.action, contrasts = NULL)
{
m <- match.call(expand.dots = FALSE)
if (is.matrix(eval.parent(m$data)))
m$data <- as.data.frame(data)
m$... <- m$contrasts <- NULL
m[[1]] <- as.name("model.frame")
m <- eval.parent(m)
Terms <- attr(m, "terms")
x <- model.matrix(Terms, m, contrasts)
cons <- attr(x, "contrast")
xint <- match("(Intercept)", colnames(x), nomatch = 0)
if (xint > 0)
x <- x[, -xint, drop = FALSE]
w <- model.weights(m)
if (length(w) == 0)
w <- rep(1, nrow(x))
y <- model.response(m)
res <- icr.default(x, y, weights = w, thresh = thresh, ...)
res$terms <- Terms
res$coefnames <- colnames(x)
res$na.action <- attr(m, "na.action")
res$contrasts <- cons
res$xlevels <- .getXlevels(Terms, m)
class(res) <- c("icr.formula", "icr")
res
}
icr.default <- function(x, y, ...)
{
library(fastICA)
xNames <- colnames(x)
pp <- preProcess(x, "ica", ...)
x <- predict(pp, x)
if(is.factor(y)) stop("y must be numeric")
data <- if(is.data.frame(x)) x else as.data.frame(x)
data$y <- y
modelFit <- lm(y ~ ., data = data)
out <- list(model = modelFit,
ica = pp,
dim = dim(x),
n.comp = list(...)$n.comp,
names = xNames)
class(out) <- "icr"
out
}
print.icr <- function (x, digits = max(3, getOption("digits") - 3), ...)
{
cat("Independent Component Regression\n\n")
cat("Created from", x$dim[1], "samples and", x$dim[2], "variables\n\n")
if (length(coef(x$model))) {
cat("Coefficients:\n")
print.default(
format(
coef(x$model),
digits = digits),
print.gap = 2,
quote = FALSE)
}
else cat("No coefficients\n")
cat("\n")
invisible(x)
}
predict.icr <- function(object, newdata, ...)
{
library(fastICA)
if (!inherits(object, "icr")) stop("object not of class \"icr\"")
if (missing(newdata))
{
return(fitted(object$model))
} else {
if (inherits(object, "icr.formula")) {
newdata <- as.data.frame(newdata)
rn <- row.names(newdata)
Terms <- delete.response(object$terms)
m <- model.frame(Terms, newdata, na.action = na.omit,
xlev = object$xlevels)
if (!is.null(cl <- attr(Terms, "dataClasses")))
.checkMFClasses(cl, m)
keep <- match(row.names(m), rn)
x <- model.matrix(Terms, m, contrasts = object$contrasts)
xint <- match("(Intercept)", colnames(x), nomatch = 0)
if (xint > 0)
x <- x[, -xint, drop = FALSE]
}
else {
if (is.null(dim(newdata)))
dim(newdata) <- c(1, length(newdata))
x <- as.matrix(newdata)
if (any(is.na(x)))
stop("missing values in 'x'")
keep <- 1:nrow(x)
rn <- rownames(x)
}
}
if(!is.null(object$names))
{
x <- x[, object$names, drop = FALSE]
}
if(!is.data.frame(x)) x <- as.data.frame(x)
x <- predict(object$ica, x)
predict(object$model, x, ...)
}
| /pkg/caret/R/icr.R | no_license | klainfo/caret | R | false | false | 3,456 | r |
icr <- function (x, ...) UseMethod("icr")
icr.formula <- function (formula, data, weights, ...,
subset, na.action, contrasts = NULL)
{
m <- match.call(expand.dots = FALSE)
if (is.matrix(eval.parent(m$data)))
m$data <- as.data.frame(data)
m$... <- m$contrasts <- NULL
m[[1]] <- as.name("model.frame")
m <- eval.parent(m)
Terms <- attr(m, "terms")
x <- model.matrix(Terms, m, contrasts)
cons <- attr(x, "contrast")
xint <- match("(Intercept)", colnames(x), nomatch = 0)
if (xint > 0)
x <- x[, -xint, drop = FALSE]
w <- model.weights(m)
if (length(w) == 0)
w <- rep(1, nrow(x))
y <- model.response(m)
res <- icr.default(x, y, weights = w, thresh = thresh, ...)
res$terms <- Terms
res$coefnames <- colnames(x)
res$na.action <- attr(m, "na.action")
res$contrasts <- cons
res$xlevels <- .getXlevels(Terms, m)
class(res) <- c("icr.formula", "icr")
res
}
icr.default <- function(x, y, ...)
{
library(fastICA)
xNames <- colnames(x)
pp <- preProcess(x, "ica", ...)
x <- predict(pp, x)
if(is.factor(y)) stop("y must be numeric")
data <- if(is.data.frame(x)) x else as.data.frame(x)
data$y <- y
modelFit <- lm(y ~ ., data = data)
out <- list(model = modelFit,
ica = pp,
dim = dim(x),
n.comp = list(...)$n.comp,
names = xNames)
class(out) <- "icr"
out
}
print.icr <- function (x, digits = max(3, getOption("digits") - 3), ...)
{
cat("Independent Component Regression\n\n")
cat("Created from", x$dim[1], "samples and", x$dim[2], "variables\n\n")
if (length(coef(x$model))) {
cat("Coefficients:\n")
print.default(
format(
coef(x$model),
digits = digits),
print.gap = 2,
quote = FALSE)
}
else cat("No coefficients\n")
cat("\n")
invisible(x)
}
predict.icr <- function(object, newdata, ...)
{
library(fastICA)
if (!inherits(object, "icr")) stop("object not of class \"icr\"")
if (missing(newdata))
{
return(fitted(object$model))
} else {
if (inherits(object, "icr.formula")) {
newdata <- as.data.frame(newdata)
rn <- row.names(newdata)
Terms <- delete.response(object$terms)
m <- model.frame(Terms, newdata, na.action = na.omit,
xlev = object$xlevels)
if (!is.null(cl <- attr(Terms, "dataClasses")))
.checkMFClasses(cl, m)
keep <- match(row.names(m), rn)
x <- model.matrix(Terms, m, contrasts = object$contrasts)
xint <- match("(Intercept)", colnames(x), nomatch = 0)
if (xint > 0)
x <- x[, -xint, drop = FALSE]
}
else {
if (is.null(dim(newdata)))
dim(newdata) <- c(1, length(newdata))
x <- as.matrix(newdata)
if (any(is.na(x)))
stop("missing values in 'x'")
keep <- 1:nrow(x)
rn <- rownames(x)
}
}
if(!is.null(object$names))
{
x <- x[, object$names, drop = FALSE]
}
if(!is.data.frame(x)) x <- as.data.frame(x)
x <- predict(object$ica, x)
predict(object$model, x, ...)
}
|
# This file was generated by Rcpp::compileAttributes
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
.isProbRcpp <- function(prob) {
.Call('markovchain_isProb', PACKAGE = 'markovchain', prob)
}
.isGenRcpp <- function(gen) {
.Call('markovchain_isGen', PACKAGE = 'markovchain', gen)
}
.canonicFormRcpp <- function(object) {
.Call('markovchain_canonicForm', PACKAGE = 'markovchain', object)
}
generatorToTransitionMatrix <- function(gen, byrow = TRUE) {
.Call('markovchain_generatorToTransitionMatrix', PACKAGE = 'markovchain', gen, byrow)
}
ctmcFit <- function(data, byrow = TRUE, name = "", confidencelevel = 0.95) {
.Call('markovchain_ctmcFit', PACKAGE = 'markovchain', data, byrow, name, confidencelevel)
}
seq2freqProb <- function(sequence) {
.Call('markovchain_seq2freqProb', PACKAGE = 'markovchain', sequence)
}
seq2matHigh <- function(sequence, order) {
.Call('markovchain_seq2matHigh', PACKAGE = 'markovchain', sequence, order)
}
createSequenceMatrix <- function(stringchar, toRowProbs = FALSE, sanitize = TRUE) {
.Call('markovchain_createSequenceMatrix', PACKAGE = 'markovchain', stringchar, toRowProbs, sanitize)
}
inferHyperparam <- function(transMatr = matrix(), scale = numeric(), data = character()) {
.Call('markovchain_inferHyperparam', PACKAGE = 'markovchain', transMatr, scale, data)
}
markovchainFit <- function(data, method = "mle", byrow = TRUE, nboot = 10L, laplacian = 0, name = "", parallel = FALSE, confidencelevel = 0.95, hyperparam = matrix()) {
.Call('markovchain_markovchainFit', PACKAGE = 'markovchain', data, method, byrow, nboot, laplacian, name, parallel, confidencelevel, hyperparam)
}
.commclassesKernelRcpp <- function(P) {
.Call('markovchain_commclassesKernel', PACKAGE = 'markovchain', P)
}
.communicatingClassesRcpp <- function(object) {
.Call('markovchain_communicatingClasses', PACKAGE = 'markovchain', object)
}
.recurrentClassesRcpp <- function(object) {
.Call('markovchain_recurrentClasses', PACKAGE = 'markovchain', object)
}
.commStatesFinderRcpp <- function(matr) {
.Call('markovchain_commStatesFinder', PACKAGE = 'markovchain', matr)
}
.summaryKernelRcpp <- function(object) {
.Call('markovchain_summaryKernel', PACKAGE = 'markovchain', object)
}
.firstpassageKernelRcpp <- function(P, i, n) {
.Call('markovchain_firstpassageKernel', PACKAGE = 'markovchain', P, i, n)
}
.gcdRcpp <- function(a, b) {
.Call('markovchain_gcd', PACKAGE = 'markovchain', a, b)
}
period <- function(object) {
.Call('markovchain_period', PACKAGE = 'markovchain', object)
}
predictiveDistribution <- function(stringchar, newData, hyperparam = matrix()) {
.Call('markovchain_predictiveDistribution', PACKAGE = 'markovchain', stringchar, newData, hyperparam)
}
priorDistribution <- function(transMatr, hyperparam = matrix()) {
.Call('markovchain_priorDistribution', PACKAGE = 'markovchain', transMatr, hyperparam)
}
.multinomialCIForRowRcpp <- function(x, confidencelevel) {
.Call('markovchain_multinomialCIForRow', PACKAGE = 'markovchain', x, confidencelevel)
}
.multinomialCIRcpp <- function(transMat, seqMat, confidencelevel) {
.Call('markovchain_multinomCI', PACKAGE = 'markovchain', transMat, seqMat, confidencelevel)
}
| /R/RcppExports.R | no_license | cryptomanic/markovchain | R | false | false | 3,263 | r | # This file was generated by Rcpp::compileAttributes
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
.isProbRcpp <- function(prob) {
.Call('markovchain_isProb', PACKAGE = 'markovchain', prob)
}
.isGenRcpp <- function(gen) {
.Call('markovchain_isGen', PACKAGE = 'markovchain', gen)
}
.canonicFormRcpp <- function(object) {
.Call('markovchain_canonicForm', PACKAGE = 'markovchain', object)
}
generatorToTransitionMatrix <- function(gen, byrow = TRUE) {
.Call('markovchain_generatorToTransitionMatrix', PACKAGE = 'markovchain', gen, byrow)
}
ctmcFit <- function(data, byrow = TRUE, name = "", confidencelevel = 0.95) {
.Call('markovchain_ctmcFit', PACKAGE = 'markovchain', data, byrow, name, confidencelevel)
}
seq2freqProb <- function(sequence) {
.Call('markovchain_seq2freqProb', PACKAGE = 'markovchain', sequence)
}
seq2matHigh <- function(sequence, order) {
.Call('markovchain_seq2matHigh', PACKAGE = 'markovchain', sequence, order)
}
createSequenceMatrix <- function(stringchar, toRowProbs = FALSE, sanitize = TRUE) {
.Call('markovchain_createSequenceMatrix', PACKAGE = 'markovchain', stringchar, toRowProbs, sanitize)
}
inferHyperparam <- function(transMatr = matrix(), scale = numeric(), data = character()) {
.Call('markovchain_inferHyperparam', PACKAGE = 'markovchain', transMatr, scale, data)
}
markovchainFit <- function(data, method = "mle", byrow = TRUE, nboot = 10L, laplacian = 0, name = "", parallel = FALSE, confidencelevel = 0.95, hyperparam = matrix()) {
.Call('markovchain_markovchainFit', PACKAGE = 'markovchain', data, method, byrow, nboot, laplacian, name, parallel, confidencelevel, hyperparam)
}
.commclassesKernelRcpp <- function(P) {
.Call('markovchain_commclassesKernel', PACKAGE = 'markovchain', P)
}
.communicatingClassesRcpp <- function(object) {
.Call('markovchain_communicatingClasses', PACKAGE = 'markovchain', object)
}
.recurrentClassesRcpp <- function(object) {
.Call('markovchain_recurrentClasses', PACKAGE = 'markovchain', object)
}
.commStatesFinderRcpp <- function(matr) {
.Call('markovchain_commStatesFinder', PACKAGE = 'markovchain', matr)
}
.summaryKernelRcpp <- function(object) {
.Call('markovchain_summaryKernel', PACKAGE = 'markovchain', object)
}
.firstpassageKernelRcpp <- function(P, i, n) {
.Call('markovchain_firstpassageKernel', PACKAGE = 'markovchain', P, i, n)
}
.gcdRcpp <- function(a, b) {
.Call('markovchain_gcd', PACKAGE = 'markovchain', a, b)
}
period <- function(object) {
.Call('markovchain_period', PACKAGE = 'markovchain', object)
}
predictiveDistribution <- function(stringchar, newData, hyperparam = matrix()) {
.Call('markovchain_predictiveDistribution', PACKAGE = 'markovchain', stringchar, newData, hyperparam)
}
priorDistribution <- function(transMatr, hyperparam = matrix()) {
.Call('markovchain_priorDistribution', PACKAGE = 'markovchain', transMatr, hyperparam)
}
.multinomialCIForRowRcpp <- function(x, confidencelevel) {
.Call('markovchain_multinomialCIForRow', PACKAGE = 'markovchain', x, confidencelevel)
}
.multinomialCIRcpp <- function(transMat, seqMat, confidencelevel) {
.Call('markovchain_multinomCI', PACKAGE = 'markovchain', transMat, seqMat, confidencelevel)
}
|
#' @importFrom magrittr %>%
#' @export
magrittr::`%>%`
# rdname catm
# messages() with some of the same syntax as cat(): takes a sep argument and
# does not append a newline by default
catm <- function(..., sep = " ", appendLF = FALSE) {
message(paste(..., sep = sep), appendLF = appendLF)
}
# used in displaying verbose messages for tokens_select and dfm_select
message_select <- function(selection, nfeats, ndocs, nfeatspad = 0, ndocspad = 0) {
catm(if (selection == "keep") "kept" else "removed", " ",
format(nfeats, big.mark = ",", scientific = FALSE),
" feature", if (nfeats != 1L) "s" else "", sep = "")
if (ndocs > 0) {
catm(" and ",
format(ndocs, big.mark=",", scientific = FALSE),
" document", if (ndocs != 1L) "s" else "",
sep = "")
}
if ((nfeatspad + ndocspad) > 0) {
catm(", padded ", sep = "")
}
if (nfeatspad > 0) {
catm(format(nfeatspad, big.mark=",", scientific = FALSE),
" feature", if (nfeatspad != 1L) "s" else "",
sep = "")
}
if (ndocspad > 0) {
if (nfeatspad > 0) catm(" and ", sep = "")
catm(format(ndocspad, big.mark=",", scientific = FALSE),
" document", if (ndocspad != 1L) "s" else "",
sep = "")
}
catm("", appendLF = TRUE)
}
##
## reassign the slots to an S4 dfm-like object
## necessary when some operation from the Matrix class obliterates them
## Ken B
reassign_slots <- function(x_new, x_org, exceptions = NULL) {
snames <- slotNames(class(x_org))
snames <- setdiff(snames, c("Dim", "Dimnames", "i", "p", "x", "factors", exceptions))
for (sname in snames) {
try({
slot(x_new, sname) <- slot(x_org, sname)
}, silent = TRUE)
}
x_new
}
#' Function extending base::attributes()
#' @param x an object
#' @param overwrite if \code{TRUE}, overwrite old attributes
#' @param value new attributes
#' @keywords internal
"attributes<-" <- function(x, overwrite = TRUE, value) {
if (overwrite) {
base::attributes(x) <- value
} else {
base::attributes(x) <- c(base::attributes(x), value[!(names(value) %in% names(base::attributes(x)))])
}
return(x)
}
#' Function to assign multiple slots to a S4 object
#' @param x an S4 object
#' @param exceptions slots to ignore
#' @param value a list of attributes extracted by attributes()
#' @keywords internal
"slots<-" <- function(x, exceptions = c("Dim", "Dimnames", "i", "p", "x", "factors"), value) {
slots <- methods::getSlots(class(x)[1])
for (sname in names(value)) {
if (!sname %in% names(slots) || sname %in% exceptions) next
if (!identical(typeof(value[[sname]]), slots[[sname]])) next
methods::slot(x, sname) <- value[[sname]]
}
return(x)
}
#' Utility function to create a object with new set of attributes
#' @param x an underlying R object of a new object
#' @param attrs attributes of a new object
#' @param overwrite_attributes overwrite attributes of the input object, if \code{TRUE}
#' @keywords internal
create <- function(x, what, attrs = NULL, overwrite_attributes = FALSE, ...) {
if (what == 'tokens') {
class <- c('tokens', 'list')
}
x <- structure(x, class = class, ...)
if (!is.null(attrs)) {
attributes(x, overwrite_attributes) <- attrs
}
return(x)
}
#' Convert various input as pattern to a vector used in tokens_select,
#' tokens_compound and kwic.
#' @inheritParams pattern
#' @inheritParams valuetype
#' @param case_insensitive ignore the case of dictionary values if \code{TRUE}
#' @param concatenator concatenator that join multi-word expression in tokens object
#' @param remove_unigram ignore single-word patterns if \code{TRUE}
#' @seealso regex2id
#' @keywords internal
pattern2id <- function(pattern, types, valuetype, case_insensitive,
concatenator = '_', remove_unigram = FALSE) {
if (is.dfm(pattern))
stop('dfm cannot be used as pattern')
if (is.collocations(pattern)) {
if (nrow(pattern) == 0) return(list())
pattern <- stri_split_charclass(pattern$collocation, "\\p{Z}")
pattern_id <- lapply(pattern, function(x) fastmatch::fmatch(x, types))
pattern_id <- pattern_id[vapply(pattern_id, function(x) all(!is.na(x)), logical(1))]
} else {
if (length(pattern) == 0) return(list())
if (is.dictionary(pattern)) {
pattern <- unlist(pattern, use.names = FALSE)
pattern <- split_dictionary_values(pattern, concatenator)
} else {
pattern <- as.list(pattern)
}
if (remove_unigram)
pattern <- pattern[lengths(pattern) > 1] # drop single-word pattern
pattern_id <- regex2id(pattern, types, valuetype, case_insensitive)
}
attr(pattern_id, 'pattern') <- stri_c_list(pattern, sep = ' ')
return(pattern_id)
}
#' Internal function for \code{select_types()} to check if a string is a regular expression
#' @param x a character string to be tested
#' @keywords internal
is_regex <- function(x){
any(stri_detect_fixed(x, c(".", "(", ")", "^", "{", "}", "+", "$", "*", "?", "[", "]", "\\")))
}
#' Internal function for \code{select_types()} to escape regular expressions
#'
#' This function escapes glob patterns before \code{utils:glob2rx()}, therefore * and ?
#' are unescaped.
#' @param x character vector to be escaped
#' @keywords internal
escape_regex <- function(x){
#stri_replace_all_regex(x, "([.()^\\{\\}+$*\\[\\]\\\\])", "\\\\$1") # escape any
stri_replace_all_regex(x, "([.()^\\{\\}+$\\[\\]\\\\])", "\\\\$1") # allow glob
}
# function to check dots arguments against a list of permissible arguments
check_dots <- function(dots, permissible_args = NULL) {
if (length(dots) == 0) return()
args <- names(dots)
impermissible_args <- setdiff(args, permissible_args)
if (length(impermissible_args))
warning("Argument", if (length(impermissible_args) > 1) "s " else " ",
paste(impermissible_args, collapse = ', '), " not used.",
noBreaks. = TRUE, call. = FALSE)
}
#' Print friendly object class not defined message
#'
#' Checks valid methods and issues a friendlier error message in case the method is
#' undefined for the supplied object type.
#' @param object_class character describing the object class
#' @param function_name character which is the function name
#' @keywords internal
#' @examples
#' # as.tokens.default <- function(x, concatenator = "", ...) {
#' # stop(friendly_class_undefined_message(class(x), "as.tokens"))
#' # }
friendly_class_undefined_message <- function(object_class, function_name) {
valid_object_types <-
utils::methods(function_name) %>%
as.character() %>%
stringi::stri_replace_first_fixed(paste0(function_name, "."), "")
valid_object_types <- valid_object_types[valid_object_types != "default"]
paste0(function_name, "() only works on ",
paste(valid_object_types, collapse = ", "),
" objects.")
}
| /R/utils.R | no_license | TalkStats/quanteda | R | false | false | 7,122 | r | #' @importFrom magrittr %>%
#' @export
magrittr::`%>%`
# rdname catm
# messages() with some of the same syntax as cat(): takes a sep argument and
# does not append a newline by default
catm <- function(..., sep = " ", appendLF = FALSE) {
message(paste(..., sep = sep), appendLF = appendLF)
}
# used in displaying verbose messages for tokens_select and dfm_select
message_select <- function(selection, nfeats, ndocs, nfeatspad = 0, ndocspad = 0) {
catm(if (selection == "keep") "kept" else "removed", " ",
format(nfeats, big.mark = ",", scientific = FALSE),
" feature", if (nfeats != 1L) "s" else "", sep = "")
if (ndocs > 0) {
catm(" and ",
format(ndocs, big.mark=",", scientific = FALSE),
" document", if (ndocs != 1L) "s" else "",
sep = "")
}
if ((nfeatspad + ndocspad) > 0) {
catm(", padded ", sep = "")
}
if (nfeatspad > 0) {
catm(format(nfeatspad, big.mark=",", scientific = FALSE),
" feature", if (nfeatspad != 1L) "s" else "",
sep = "")
}
if (ndocspad > 0) {
if (nfeatspad > 0) catm(" and ", sep = "")
catm(format(ndocspad, big.mark=",", scientific = FALSE),
" document", if (ndocspad != 1L) "s" else "",
sep = "")
}
catm("", appendLF = TRUE)
}
##
## reassign the slots to an S4 dfm-like object
## necessary when some operation from the Matrix class obliterates them
## Ken B
reassign_slots <- function(x_new, x_org, exceptions = NULL) {
snames <- slotNames(class(x_org))
snames <- setdiff(snames, c("Dim", "Dimnames", "i", "p", "x", "factors", exceptions))
for (sname in snames) {
try({
slot(x_new, sname) <- slot(x_org, sname)
}, silent = TRUE)
}
x_new
}
#' Function extending base::attributes()
#' @param x an object
#' @param overwrite if \code{TRUE}, overwrite old attributes
#' @param value new attributes
#' @keywords internal
"attributes<-" <- function(x, overwrite = TRUE, value) {
if (overwrite) {
base::attributes(x) <- value
} else {
base::attributes(x) <- c(base::attributes(x), value[!(names(value) %in% names(base::attributes(x)))])
}
return(x)
}
#' Function to assign multiple slots to a S4 object
#' @param x an S4 object
#' @param exceptions slots to ignore
#' @param value a list of attributes extracted by attributes()
#' @keywords internal
"slots<-" <- function(x, exceptions = c("Dim", "Dimnames", "i", "p", "x", "factors"), value) {
slots <- methods::getSlots(class(x)[1])
for (sname in names(value)) {
if (!sname %in% names(slots) || sname %in% exceptions) next
if (!identical(typeof(value[[sname]]), slots[[sname]])) next
methods::slot(x, sname) <- value[[sname]]
}
return(x)
}
#' Utility function to create a object with new set of attributes
#' @param x an underlying R object of a new object
#' @param attrs attributes of a new object
#' @param overwrite_attributes overwrite attributes of the input object, if \code{TRUE}
#' @keywords internal
create <- function(x, what, attrs = NULL, overwrite_attributes = FALSE, ...) {
if (what == 'tokens') {
class <- c('tokens', 'list')
}
x <- structure(x, class = class, ...)
if (!is.null(attrs)) {
attributes(x, overwrite_attributes) <- attrs
}
return(x)
}
#' Convert various input as pattern to a vector used in tokens_select,
#' tokens_compound and kwic.
#' @inheritParams pattern
#' @inheritParams valuetype
#' @param case_insensitive ignore the case of dictionary values if \code{TRUE}
#' @param concatenator concatenator that join multi-word expression in tokens object
#' @param remove_unigram ignore single-word patterns if \code{TRUE}
#' @seealso regex2id
#' @keywords internal
pattern2id <- function(pattern, types, valuetype, case_insensitive,
concatenator = '_', remove_unigram = FALSE) {
if (is.dfm(pattern))
stop('dfm cannot be used as pattern')
if (is.collocations(pattern)) {
if (nrow(pattern) == 0) return(list())
pattern <- stri_split_charclass(pattern$collocation, "\\p{Z}")
pattern_id <- lapply(pattern, function(x) fastmatch::fmatch(x, types))
pattern_id <- pattern_id[vapply(pattern_id, function(x) all(!is.na(x)), logical(1))]
} else {
if (length(pattern) == 0) return(list())
if (is.dictionary(pattern)) {
pattern <- unlist(pattern, use.names = FALSE)
pattern <- split_dictionary_values(pattern, concatenator)
} else {
pattern <- as.list(pattern)
}
if (remove_unigram)
pattern <- pattern[lengths(pattern) > 1] # drop single-word pattern
pattern_id <- regex2id(pattern, types, valuetype, case_insensitive)
}
attr(pattern_id, 'pattern') <- stri_c_list(pattern, sep = ' ')
return(pattern_id)
}
#' Internal function for \code{select_types()} to check if a string is a regular expression
#' @param x a character string to be tested
#' @keywords internal
is_regex <- function(x){
any(stri_detect_fixed(x, c(".", "(", ")", "^", "{", "}", "+", "$", "*", "?", "[", "]", "\\")))
}
#' Internal function for \code{select_types()} to escape regular expressions
#'
#' This function escapes glob patterns before \code{utils:glob2rx()}, therefore * and ?
#' are unescaped.
#' @param x character vector to be escaped
#' @keywords internal
escape_regex <- function(x){
#stri_replace_all_regex(x, "([.()^\\{\\}+$*\\[\\]\\\\])", "\\\\$1") # escape any
stri_replace_all_regex(x, "([.()^\\{\\}+$\\[\\]\\\\])", "\\\\$1") # allow glob
}
# function to check dots arguments against a list of permissible arguments
check_dots <- function(dots, permissible_args = NULL) {
if (length(dots) == 0) return()
args <- names(dots)
impermissible_args <- setdiff(args, permissible_args)
if (length(impermissible_args))
warning("Argument", if (length(impermissible_args) > 1) "s " else " ",
paste(impermissible_args, collapse = ', '), " not used.",
noBreaks. = TRUE, call. = FALSE)
}
#' Print friendly object class not defined message
#'
#' Checks valid methods and issues a friendlier error message in case the method is
#' undefined for the supplied object type.
#' @param object_class character describing the object class
#' @param function_name character which is the function name
#' @keywords internal
#' @examples
#' # as.tokens.default <- function(x, concatenator = "", ...) {
#' # stop(friendly_class_undefined_message(class(x), "as.tokens"))
#' # }
friendly_class_undefined_message <- function(object_class, function_name) {
valid_object_types <-
utils::methods(function_name) %>%
as.character() %>%
stringi::stri_replace_first_fixed(paste0(function_name, "."), "")
valid_object_types <- valid_object_types[valid_object_types != "default"]
paste0(function_name, "() only works on ",
paste(valid_object_types, collapse = ", "),
" objects.")
}
|
#` sboost Prediction Function
#'
#' Make predictions for a feature set based on an sboost classifier.
#'
#' @param object \emph{sboost_classifier} S3 object output from sboost.
#' @param features feature set data.frame.
#' @param scores if true, raw scores generated; if false, predictions are generated.
#' @param ... further arguments passed to or from other methods.
#' @return Predictions in the form of a vector, or scores in the form of a vector.
#' The index of the vector aligns the predictions or scores with the rows of
#' the features. Scores represent the sum of all votes for the positive outcome
#' minus the sum of all votes for the negative outcome.
#' @seealso \code{\link{sboost}} documentation.
#' @examples
#' # malware
#' malware_classifier <- sboost(malware[-1], malware[1], iterations = 5, positive = 1)
#' predict(malware_classifier, malware[-1], scores = TRUE)
#' predict(malware_classifier, malware[-1])
#'
#' # mushrooms
#' mushroom_classifier <- sboost(mushrooms[-1], mushrooms[1], iterations = 5, positive = "p")
#' predict(mushroom_classifier, mushrooms[-1], scores = TRUE)
#' predict(mushroom_classifier, mushrooms[-1])
#' @export
predict.sboost_classifier <- function(object, features, scores = FALSE, ...) {
# PREPARE INPUT
# --------------------------------------------------------------------------------
processed_classifier <- process_classifier_input(object, features)
processed_features <- process_feature_input(features)
# MAKE PREDICTIONS
# --------------------------------------------------------------------------------
predictions <- predict_cpp(processed_features, processed_classifier)
if (scores) return(predictions)
predictions <- dplyr::if_else(predictions > 0,
object$outcomes["positive"],
object$outcomes["negative"])
return(unname(predictions))
}
predict.sboost <- function(object, features, scores = FALSE, type = "median") {
# Function for combining stumps, dependent on "type"
combine_stumps <- switch(
type,
"median" = function(x, w) {
ox <- order(x)
sw <- cumsum(w[ox]) / sum(w)
for (i in 1:length(sw)) {if (sw[i] >= 0.5) return((x[ox])[i])}
},
"mean" = stats::weighted.mean
)
# Function for predicting an individual observation
if (ncol(object) == 5) {
predict_observation <- function(obs, object) {
preds <- apply(object, 1, function(x) {
ifelse(obs[x["feature"]] < x["split"],
x["mean_behind"],
x["mean_ahead"])
})
combine_stumps(preds, object[, "vote"])
}
} else if (ncol(object) == 7) {
predict_observation <- function(obs, object) {
preds <- apply(object, 1, function(x) {
ifelse(obs[x["feature"]] < x["split"],
obs[x["feature"]] * x["beta1_behind"] + x["beta0_behind"],
obs[x["feature"]] * x["beta1_ahead"] + x["beta0_ahead"])
})
combine_stumps(preds, object[, "vote"])
}
}
# Predict for all observations
apply(features, 1, function(x) predict_observation(x, object))
}
| /R/predict.R | permissive | cran/sboost | R | false | false | 3,230 | r | #` sboost Prediction Function
#'
#' Make predictions for a feature set based on an sboost classifier.
#'
#' @param object \emph{sboost_classifier} S3 object output from sboost.
#' @param features feature set data.frame.
#' @param scores if true, raw scores generated; if false, predictions are generated.
#' @param ... further arguments passed to or from other methods.
#' @return Predictions in the form of a vector, or scores in the form of a vector.
#' The index of the vector aligns the predictions or scores with the rows of
#' the features. Scores represent the sum of all votes for the positive outcome
#' minus the sum of all votes for the negative outcome.
#' @seealso \code{\link{sboost}} documentation.
#' @examples
#' # malware
#' malware_classifier <- sboost(malware[-1], malware[1], iterations = 5, positive = 1)
#' predict(malware_classifier, malware[-1], scores = TRUE)
#' predict(malware_classifier, malware[-1])
#'
#' # mushrooms
#' mushroom_classifier <- sboost(mushrooms[-1], mushrooms[1], iterations = 5, positive = "p")
#' predict(mushroom_classifier, mushrooms[-1], scores = TRUE)
#' predict(mushroom_classifier, mushrooms[-1])
#' @export
predict.sboost_classifier <- function(object, features, scores = FALSE, ...) {
# PREPARE INPUT
# --------------------------------------------------------------------------------
processed_classifier <- process_classifier_input(object, features)
processed_features <- process_feature_input(features)
# MAKE PREDICTIONS
# --------------------------------------------------------------------------------
predictions <- predict_cpp(processed_features, processed_classifier)
if (scores) return(predictions)
predictions <- dplyr::if_else(predictions > 0,
object$outcomes["positive"],
object$outcomes["negative"])
return(unname(predictions))
}
predict.sboost <- function(object, features, scores = FALSE, type = "median") {
# Function for combining stumps, dependent on "type"
combine_stumps <- switch(
type,
"median" = function(x, w) {
ox <- order(x)
sw <- cumsum(w[ox]) / sum(w)
for (i in 1:length(sw)) {if (sw[i] >= 0.5) return((x[ox])[i])}
},
"mean" = stats::weighted.mean
)
# Function for predicting an individual observation
if (ncol(object) == 5) {
predict_observation <- function(obs, object) {
preds <- apply(object, 1, function(x) {
ifelse(obs[x["feature"]] < x["split"],
x["mean_behind"],
x["mean_ahead"])
})
combine_stumps(preds, object[, "vote"])
}
} else if (ncol(object) == 7) {
predict_observation <- function(obs, object) {
preds <- apply(object, 1, function(x) {
ifelse(obs[x["feature"]] < x["split"],
obs[x["feature"]] * x["beta1_behind"] + x["beta0_behind"],
obs[x["feature"]] * x["beta1_ahead"] + x["beta0_ahead"])
})
combine_stumps(preds, object[, "vote"])
}
}
# Predict for all observations
apply(features, 1, function(x) predict_observation(x, object))
}
|
# Title : TODO
# Objective : TODO
# Created by: yichin
# Created on: 2020/06/23
yichin <- data.frame(1:5, 1:5)
x <- 2
for(i in 1:5){
x[i] <- x + i
}
for (){
} | /arduino_code/test.R | no_license | yichin-weng/environment_sensoro_system | R | false | false | 166 | r | # Title : TODO
# Objective : TODO
# Created by: yichin
# Created on: 2020/06/23
yichin <- data.frame(1:5, 1:5)
x <- 2
for(i in 1:5){
x[i] <- x + i
}
for (){
} |
geom_tallrect <- function
### ggplot2 geom with xmin and xmax aesthetics that covers the entire
### y range.
(mapping=NULL,
data=NULL,
stat="identity",
position="identity",
...){
require(proto)
require(grid)
GeomTallRect <- proto(ggplot2:::GeomRect,{
objname <- "tallrect"
required_aes <- c("xmin", "xmax")
draw <- draw_groups <- function(.,data,scales,coordinates,
ymin=0,ymax=1,...){
ymin <- unit(ymin,"npc")
ymax <- unit(ymax,"npc")
dtrans <- ggplot2::coord_transform(coordinates, data, scales)
with(dtrans, ggname(.$my_name(), {
rectGrob(xmin, ymin, xmax - xmin, ymax-ymin,
default.units = "native", just = c("left", "bottom"),
gp=gpar(
col=colour, fill=alpha(fill, alpha),
lwd=size * .pt, lty=linetype, lineend="butt"
)
)
}))
}
})
GeomTallRect$new(mapping = mapping, data = data, stat = stat,
position = position, ...)
}
| /R/geom_tallrect.R | no_license | Bhanditz/bams | R | false | false | 1,060 | r | geom_tallrect <- function
### ggplot2 geom with xmin and xmax aesthetics that covers the entire
### y range.
(mapping=NULL,
data=NULL,
stat="identity",
position="identity",
...){
require(proto)
require(grid)
GeomTallRect <- proto(ggplot2:::GeomRect,{
objname <- "tallrect"
required_aes <- c("xmin", "xmax")
draw <- draw_groups <- function(.,data,scales,coordinates,
ymin=0,ymax=1,...){
ymin <- unit(ymin,"npc")
ymax <- unit(ymax,"npc")
dtrans <- ggplot2::coord_transform(coordinates, data, scales)
with(dtrans, ggname(.$my_name(), {
rectGrob(xmin, ymin, xmax - xmin, ymax-ymin,
default.units = "native", just = c("left", "bottom"),
gp=gpar(
col=colour, fill=alpha(fill, alpha),
lwd=size * .pt, lty=linetype, lineend="butt"
)
)
}))
}
})
GeomTallRect$new(mapping = mapping, data = data, stat = stat,
position = position, ...)
}
|
.fittedW <- function(p,q,fit){
library(locfit)
l <- log(q+1)
data=data.frame(cbind(p,l))
w_fit <- predict(fit,data)
return(w_fit)
} | /R/fittedW.R | no_license | lianliu09/QNB | R | false | false | 148 | r | .fittedW <- function(p,q,fit){
library(locfit)
l <- log(q+1)
data=data.frame(cbind(p,l))
w_fit <- predict(fit,data)
return(w_fit)
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/streamSetApi.r
\name{streamSet$getPlotAdHoc}
\alias{streamSet$getPlotAdHoc}
\title{Returns values of attributes for the specified streams over the specified time
range suitable for plotting over the number of intervals (typically represents pixels).}
\arguments{
\item{webId}{The ID of a stream. Multiple streams may be specified with multiple instances
of the parameter.}
\item{endTime}{An optional end time. The default is '*'. Note that if endTime is earlier
than startTime, the resulting values will be in time-descending order.}
\item{intervals}{The number of intervals to plot over. Typically, this would be the number
of horizontal pixels in the trend. The default is '24'. For each interval, the data
available is examined and significant values are returned. Each interval can produce up to
5 values if they are unique, the first value in the interval, the last value, the highest
value, the lowest value and at most one exceptional point (bad status or digital state).}
\item{selectedFields}{List of fields to be returned in the response, separated by
semicolons (;). If this parameter is not specified, all available fields will be
returned.}
\item{sortField}{The field or property of the object used to sort the returned collection.
For better performance, by default no sorting is applied. 'Name' is the only supported
field by which to sort.}
\item{sortOrder}{The order that the returned collection is sorted. The default is
'Ascending'}
\item{startTime}{An optional start time. The default is '*-1d'.}
\item{timeZone}{The time zone in which the time string will be interpreted. This parameter
will be ignored if a time zone is specified in the time string. If no time zone is
specified in either places, the PI Web API server time zone will be used.}
\item{webIdType}{Optional parameter. Used to specify the type of WebID. Useful for URL
brevity and other special cases. Default is the value of the configuration item
"WebIDType".}
}
\value{
Plot values of the streams that meet the specified conditions.
}
\description{
Returns values of attributes for the specified streams over the specified time range
suitable for plotting over the number of intervals (typically represents pixels).
}
| /man/streamSet-cash-getPlotAdHoc.Rd | permissive | eddyrene/PI-Web-API-Client-R | R | false | true | 2,368 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/streamSetApi.r
\name{streamSet$getPlotAdHoc}
\alias{streamSet$getPlotAdHoc}
\title{Returns values of attributes for the specified streams over the specified time
range suitable for plotting over the number of intervals (typically represents pixels).}
\arguments{
\item{webId}{The ID of a stream. Multiple streams may be specified with multiple instances
of the parameter.}
\item{endTime}{An optional end time. The default is '*'. Note that if endTime is earlier
than startTime, the resulting values will be in time-descending order.}
\item{intervals}{The number of intervals to plot over. Typically, this would be the number
of horizontal pixels in the trend. The default is '24'. For each interval, the data
available is examined and significant values are returned. Each interval can produce up to
5 values if they are unique, the first value in the interval, the last value, the highest
value, the lowest value and at most one exceptional point (bad status or digital state).}
\item{selectedFields}{List of fields to be returned in the response, separated by
semicolons (;). If this parameter is not specified, all available fields will be
returned.}
\item{sortField}{The field or property of the object used to sort the returned collection.
For better performance, by default no sorting is applied. 'Name' is the only supported
field by which to sort.}
\item{sortOrder}{The order that the returned collection is sorted. The default is
'Ascending'}
\item{startTime}{An optional start time. The default is '*-1d'.}
\item{timeZone}{The time zone in which the time string will be interpreted. This parameter
will be ignored if a time zone is specified in the time string. If no time zone is
specified in either places, the PI Web API server time zone will be used.}
\item{webIdType}{Optional parameter. Used to specify the type of WebID. Useful for URL
brevity and other special cases. Default is the value of the configuration item
"WebIDType".}
}
\value{
Plot values of the streams that meet the specified conditions.
}
\description{
Returns values of attributes for the specified streams over the specified time range
suitable for plotting over the number of intervals (typically represents pixels).
}
|
# one-sample t-test
#http://www.stat.columbia.edu/~martin/W2024/R2.pdf
x = c(0.593, 0.142, 0.329, 0.691, 0.231, 0.793, 0.519, 0.392, 0.418)
t.test(x, alternative="greater", mu=0.3) | /R Codes/Lecture 4. R T test/Lecture 4.1.R. one-sample t-test.R | permissive | hejibo/Psychological-Statistics | R | false | false | 182 | r |
# one-sample t-test
#http://www.stat.columbia.edu/~martin/W2024/R2.pdf
x = c(0.593, 0.142, 0.329, 0.691, 0.231, 0.793, 0.519, 0.392, 0.418)
t.test(x, alternative="greater", mu=0.3) |
#### Individual Terms Analysis Code ####
### Written by Matthew Colón ###
rm(list = ls()) # Clear the workspace
## Libraries. Note: we should probably use packrat for version control
#Use these commands to install RSelenium if needed. Note: you have to install a
#Java development kit to make it run. See:
#https://www.oracle.com/technetwork/java/javase/downloads/jdk10-downloads-4416644.html
#library(devtools) install_version("binman",
#version = "0.1.0", repos = "https://cran.uni-muenster.de/")
#install_version("wdman", version = "0.2.2", repos =
#"https://cran.uni-muenster.de/") install_version("RSelenium", version =
#"1.7.1", repos = "https://cran.uni-muenster.de/")
library(tidyverse)
library(readr)
library(RSelenium)
#library(rvest) # We should consider using this in lieu of RSelenium for the queries
library(stringr) # We use str_split() and and str_replace() in get_number()
library(profvis) # We use pause()
library(zoo)
library(ggplot2)
## Source primary functions
source('primary_functions.R')
#### Individual Cloud Computing Term Hits Scraping ####
#cloud_terms <- list(c("cloud computing"), c("cloud technology"), c("cloud resources"), c("cloud storage"),
# c("software as a service"), c("cloud applications"), c("cloud services"),
# c("cloud services"), c("cloud services"), c("cloud services"), c("cloud services"), c("cloud services"),
# c("cloud services"), c("cloud services"), c("cloud services"))
cloud_filenames <- c("cloud_computing_term.RData", "cloud_technology_term.RData", "cloud_resources_term.RData",
"cloud_storage_term.RData", "software_as_a_service_term.RData", "cloud_applications_term.RData")
#generate_datafiles(test_words = cloud_terms, files = cloud_filenames, nsnip = NULL, first_month = "Jan",
# first_year = 2000)
### RUN THIS LINE IN TERMINAL ONCE DOCKER IS ACTIVATED!!!
#docker run -d -p 4445:4444 selenium/standalone-chrome
cloud_terms <- c("cloud computing","cloud technology", "cloud resources","cloud storage",
"software as a service","cloud applications", "cloud services", "public cloud", "cloud business", "private cloud",
"cloud service", "hybrid cloud", "cloud platform", "cloud infrastructure", "cloud solutions", "cloud providers",
"cloud offering", "cloud revenue", "cloud offerings", "cloud solution", "cloud based", "service cloud",
"cloud customers", "cloud data", "enterprise cloud", "cloud environment")
remDr <- get_awn_session()
### This line of code will run the appropraite queries for each term, and create a data frame with the number
### of hits and proportion of hits for each term as well as the total number of hits for each month
#cloud_term_hits = create_individual_term_df(cloud_terms)
cloud_term_hits = term_hits
create_individual_term_plot(cloud_term_hits, cloud_terms, TRUE)
create_individual_term_plot(cloud_term_hits, cloud_terms, FALSE)
write_csv(cloud_term_hits, "~/Desktop/Research/updated_cloud_term_hits.csv")
remDr$close()
| /individual_terms_analysis.R | no_license | nick-short/AWN-Web-Scraping | R | false | false | 3,050 | r | #### Individual Terms Analysis Code ####
### Written by Matthew Colón ###
rm(list = ls()) # Clear the workspace
## Libraries. Note: we should probably use packrat for version control
#Use these commands to install RSelenium if needed. Note: you have to install a
#Java development kit to make it run. See:
#https://www.oracle.com/technetwork/java/javase/downloads/jdk10-downloads-4416644.html
#library(devtools) install_version("binman",
#version = "0.1.0", repos = "https://cran.uni-muenster.de/")
#install_version("wdman", version = "0.2.2", repos =
#"https://cran.uni-muenster.de/") install_version("RSelenium", version =
#"1.7.1", repos = "https://cran.uni-muenster.de/")
library(tidyverse)
library(readr)
library(RSelenium)
#library(rvest) # We should consider using this in lieu of RSelenium for the queries
library(stringr) # We use str_split() and and str_replace() in get_number()
library(profvis) # We use pause()
library(zoo)
library(ggplot2)
## Source primary functions
source('primary_functions.R')
#### Individual Cloud Computing Term Hits Scraping ####
#cloud_terms <- list(c("cloud computing"), c("cloud technology"), c("cloud resources"), c("cloud storage"),
# c("software as a service"), c("cloud applications"), c("cloud services"),
# c("cloud services"), c("cloud services"), c("cloud services"), c("cloud services"), c("cloud services"),
# c("cloud services"), c("cloud services"), c("cloud services"))
cloud_filenames <- c("cloud_computing_term.RData", "cloud_technology_term.RData", "cloud_resources_term.RData",
"cloud_storage_term.RData", "software_as_a_service_term.RData", "cloud_applications_term.RData")
#generate_datafiles(test_words = cloud_terms, files = cloud_filenames, nsnip = NULL, first_month = "Jan",
# first_year = 2000)
### RUN THIS LINE IN TERMINAL ONCE DOCKER IS ACTIVATED!!!
#docker run -d -p 4445:4444 selenium/standalone-chrome
cloud_terms <- c("cloud computing","cloud technology", "cloud resources","cloud storage",
"software as a service","cloud applications", "cloud services", "public cloud", "cloud business", "private cloud",
"cloud service", "hybrid cloud", "cloud platform", "cloud infrastructure", "cloud solutions", "cloud providers",
"cloud offering", "cloud revenue", "cloud offerings", "cloud solution", "cloud based", "service cloud",
"cloud customers", "cloud data", "enterprise cloud", "cloud environment")
remDr <- get_awn_session()
### This line of code will run the appropraite queries for each term, and create a data frame with the number
### of hits and proportion of hits for each term as well as the total number of hits for each month
#cloud_term_hits = create_individual_term_df(cloud_terms)
cloud_term_hits = term_hits
create_individual_term_plot(cloud_term_hits, cloud_terms, TRUE)
create_individual_term_plot(cloud_term_hits, cloud_terms, FALSE)
write_csv(cloud_term_hits, "~/Desktop/Research/updated_cloud_term_hits.csv")
remDr$close()
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{getBMRFilteredFeatures}
\alias{getBMRFilteredFeatures}
\title{Extract the feature selection results from a benchmark result.}
\usage{
getBMRFilteredFeatures(bmr, task.ids = NULL, learner.ids = NULL,
as.df = FALSE)
}
\arguments{
\item{bmr}{[\code{\link{BenchmarkResult}}]\cr
Benchmark result.}
\item{task.ids}{[\code{character(1)}]\cr
Restrict result to certain tasks.
Default is all.}
\item{learner.ids}{[\code{character(1)}]\cr
Restrict result to certain learners.
Default is all.}
\item{as.df}{[\code{character(1)}]\cr
Return one data.frame as result - or a list of lists of objects?.
Default is \code{FALSE}}
}
\value{
[\code{list} | \code{data.frame}]. See above.
}
\description{
Returns a list of lists of \dQuote{measure.test} data.frames, as returned by
\code{\link{resample}}, or these objects are rbind-ed with extra columns
\dQuote{task.id} and \dQuote{learner.id}.
}
\seealso{
Other benchmark: \code{\link{BenchmarkResult}};
\code{\link{benchmark}};
\code{\link{getBMRAggrPerformances}};
\code{\link{getBMRFeatSelResults}};
\code{\link{getBMRLearnerIds}};
\code{\link{getBMRPerformances}};
\code{\link{getBMRPredictions}};
\code{\link{getBMRTaskIds}};
\code{\link{getBMRTuneResults}}
}
| /man/getBMRFilteredFeatures.Rd | no_license | narayana1208/mlr | R | false | false | 1,278 | rd | % Generated by roxygen2 (4.0.2): do not edit by hand
\name{getBMRFilteredFeatures}
\alias{getBMRFilteredFeatures}
\title{Extract the feature selection results from a benchmark result.}
\usage{
getBMRFilteredFeatures(bmr, task.ids = NULL, learner.ids = NULL,
as.df = FALSE)
}
\arguments{
\item{bmr}{[\code{\link{BenchmarkResult}}]\cr
Benchmark result.}
\item{task.ids}{[\code{character(1)}]\cr
Restrict result to certain tasks.
Default is all.}
\item{learner.ids}{[\code{character(1)}]\cr
Restrict result to certain learners.
Default is all.}
\item{as.df}{[\code{character(1)}]\cr
Return one data.frame as result - or a list of lists of objects?.
Default is \code{FALSE}}
}
\value{
[\code{list} | \code{data.frame}]. See above.
}
\description{
Returns a list of lists of \dQuote{measure.test} data.frames, as returned by
\code{\link{resample}}, or these objects are rbind-ed with extra columns
\dQuote{task.id} and \dQuote{learner.id}.
}
\seealso{
Other benchmark: \code{\link{BenchmarkResult}};
\code{\link{benchmark}};
\code{\link{getBMRAggrPerformances}};
\code{\link{getBMRFeatSelResults}};
\code{\link{getBMRLearnerIds}};
\code{\link{getBMRPerformances}};
\code{\link{getBMRPredictions}};
\code{\link{getBMRTaskIds}};
\code{\link{getBMRTuneResults}}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tppDeprecated.R
\name{TPP-deprecated}
\alias{TPP-deprecated}
\alias{tpp2dPlotCCRGoodCurves}
\alias{tpp2dPlotCCRAllCurves}
\alias{tpp2dPlotCCRSingleCurves}
\alias{tpp2dEvalConfigTable}
\alias{tpp2dRemoveZeroSias}
\alias{tpp2dReplaceColNames}
\alias{tpp2dCreateCCRConfigFile}
\title{Deprecated functions in package \sQuote{TPP}}
\usage{
tpp2dPlotCCRGoodCurves(configTable = NULL, data = NULL,
idVar = "gene_name", fcStr = "rel_fc_", verbose = FALSE)
tpp2dPlotCCRAllCurves(configTable = NULL, data = NULL,
idVar = "gene_name", fcStr = "rel_fc_", verbose = FALSE)
tpp2dPlotCCRSingleCurves(configTable = NULL, data = NULL,
idVar = "gene_name", fcStr = "rel_fc_", verbose = FALSE)
tpp2dEvalConfigTable(configTable)
tpp2dRemoveZeroSias(configTable, data.list, intensityStr = "signal_sum_")
tpp2dReplaceColNames(configTable, data.list, intensityStr, fcStr)
tpp2dCreateCCRConfigFile(configTable)
}
\arguments{
\item{configTable}{DEPRECATED}
\item{data}{DEPRECATED}
\item{idVar}{DEPRECATED}
\item{fcStr}{DEPRECATED}
\item{verbose}{DEPRECATED}
\item{data.list}{DEPRECATED}
\item{intensityStr}{DEPRECATED}
}
\value{
No value returned
}
\description{
These functions are provided for compatibility with older versions
of \sQuote{TPP} only, and will be defunct at the next release.
}
\details{
The following function is deprecated and will be made defunct; use
the replacement indicated below:
\itemize{
\item{tpp2dPlotCCRGoodCurves: \code{\link{tpp2dCreateDRplots}}}
\item{tpp2dPlotCCRSingleCurves: \code{\link{tpp2dCreateDRplots}}}
\item{tpp2dPlotCCRAllCurves: \code{\link{tpp2dCreateDRplots}}}
}
}
| /man/TPP-deprecated.Rd | no_license | SamGG/TPP | R | false | true | 1,689 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tppDeprecated.R
\name{TPP-deprecated}
\alias{TPP-deprecated}
\alias{tpp2dPlotCCRGoodCurves}
\alias{tpp2dPlotCCRAllCurves}
\alias{tpp2dPlotCCRSingleCurves}
\alias{tpp2dEvalConfigTable}
\alias{tpp2dRemoveZeroSias}
\alias{tpp2dReplaceColNames}
\alias{tpp2dCreateCCRConfigFile}
\title{Deprecated functions in package \sQuote{TPP}}
\usage{
tpp2dPlotCCRGoodCurves(configTable = NULL, data = NULL,
idVar = "gene_name", fcStr = "rel_fc_", verbose = FALSE)
tpp2dPlotCCRAllCurves(configTable = NULL, data = NULL,
idVar = "gene_name", fcStr = "rel_fc_", verbose = FALSE)
tpp2dPlotCCRSingleCurves(configTable = NULL, data = NULL,
idVar = "gene_name", fcStr = "rel_fc_", verbose = FALSE)
tpp2dEvalConfigTable(configTable)
tpp2dRemoveZeroSias(configTable, data.list, intensityStr = "signal_sum_")
tpp2dReplaceColNames(configTable, data.list, intensityStr, fcStr)
tpp2dCreateCCRConfigFile(configTable)
}
\arguments{
\item{configTable}{DEPRECATED}
\item{data}{DEPRECATED}
\item{idVar}{DEPRECATED}
\item{fcStr}{DEPRECATED}
\item{verbose}{DEPRECATED}
\item{data.list}{DEPRECATED}
\item{intensityStr}{DEPRECATED}
}
\value{
No value returned
}
\description{
These functions are provided for compatibility with older versions
of \sQuote{TPP} only, and will be defunct at the next release.
}
\details{
The following function is deprecated and will be made defunct; use
the replacement indicated below:
\itemize{
\item{tpp2dPlotCCRGoodCurves: \code{\link{tpp2dCreateDRplots}}}
\item{tpp2dPlotCCRSingleCurves: \code{\link{tpp2dCreateDRplots}}}
\item{tpp2dPlotCCRAllCurves: \code{\link{tpp2dCreateDRplots}}}
}
}
|
library(SEERaBomb)
### Name: msd
### Title: Mortality vs years Since Diagnosis
### Aliases: msd
### Keywords: IO
### ** Examples
## Not run:
##D library(SEERaBomb)
##D load("~/data/SEER/mrgd/cancDef.RData") #loads in canc
##D lu=canc%>%filter(cancer=="lung")
##D lu=lu%>%mutate(status=as.numeric(COD>0))%>%select(yrdx,agedx,sex,surv,status)
##D lu=lu%>%mutate(surv=round((surv+0.5)/12,3))#convert surv to years
##D
##D # library(demography)
##D # d=hmd.mx("USA", "username", "password") #make an account and put your info in here
##D # mrt=d$rate
##D # save(mrt,file="~/data/usMort/mrt.RData")
##D load("~/data/usMort/mrt.RData"); object.size(mrt)# 250kb
##D brks=c(0,0.5,3,6,10,15,20,25)
##D (dlu=msd(lu,mrt,brkst=brks))
## End(Not run)
| /data/genthat_extracted_code/SEERaBomb/examples/msd.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 752 | r | library(SEERaBomb)
### Name: msd
### Title: Mortality vs years Since Diagnosis
### Aliases: msd
### Keywords: IO
### ** Examples
## Not run:
##D library(SEERaBomb)
##D load("~/data/SEER/mrgd/cancDef.RData") #loads in canc
##D lu=canc%>%filter(cancer=="lung")
##D lu=lu%>%mutate(status=as.numeric(COD>0))%>%select(yrdx,agedx,sex,surv,status)
##D lu=lu%>%mutate(surv=round((surv+0.5)/12,3))#convert surv to years
##D
##D # library(demography)
##D # d=hmd.mx("USA", "username", "password") #make an account and put your info in here
##D # mrt=d$rate
##D # save(mrt,file="~/data/usMort/mrt.RData")
##D load("~/data/usMort/mrt.RData"); object.size(mrt)# 250kb
##D brks=c(0,0.5,3,6,10,15,20,25)
##D (dlu=msd(lu,mrt,brkst=brks))
## End(Not run)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Sensor_Methods.R
\name{getE_deriv,redoxSensor-method}
\alias{getE_deriv,redoxSensor-method}
\title{Get the derivative of the redox potential (dE/dR) for a redox sensor}
\usage{
\S4method{getE_deriv}{redoxSensor}(object, R = getR(object))
}
\arguments{
\item{object}{A redoxSensor object}
\item{R}{(Optional, defaults to getR(Object)
A numeric value (can be an array) of ratio values}
}
\value{
A numeric array of dE/dR values
}
\description{
For a given redox sensor at a certain temperature, returns the
derivative of the redox potential corresponding to a given ratio (R) value
}
| /man/getE_deriv-redoxSensor-method.Rd | permissive | ApfeldLab/SensorOverlord | R | false | true | 661 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Sensor_Methods.R
\name{getE_deriv,redoxSensor-method}
\alias{getE_deriv,redoxSensor-method}
\title{Get the derivative of the redox potential (dE/dR) for a redox sensor}
\usage{
\S4method{getE_deriv}{redoxSensor}(object, R = getR(object))
}
\arguments{
\item{object}{A redoxSensor object}
\item{R}{(Optional, defaults to getR(Object)
A numeric value (can be an array) of ratio values}
}
\value{
A numeric array of dE/dR values
}
\description{
For a given redox sensor at a certain temperature, returns the
derivative of the redox potential corresponding to a given ratio (R) value
}
|
library(generalCorr)
### Name: silentMtx0
### Title: Older kernel-causality unanimity score matrix with optional
### control variables
### Aliases: silentMtx0
### Keywords: SD1, SD2, SD3, SD4, causal correlations criteria, generalized
### ** Examples
## Not run:
##D options(np.messages=FALSE)
##D colnames(mtcars[2:ncol(mtcars)])
##D silentMtx0(mtcars[,1:3],ctrl=mtcars[,4:5]) # mpg paired with others
## End(Not run)
options(np.messages=FALSE)
set.seed(234)
z=runif(10,2,11)# z is independently created
x=sample(1:10)+z/10 #x is somewhat indep and affected by z
y=1+2*x+3*z+rnorm(10)
w=runif(10)
x2=x;x2[4]=NA;y2=y;y2[8]=NA;w2=w;w2[4]=NA
silentMtx0(mtx=cbind(x2,y2), ctrl=cbind(z,w2))
| /data/genthat_extracted_code/generalCorr/examples/silentMtx0.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 702 | r | library(generalCorr)
### Name: silentMtx0
### Title: Older kernel-causality unanimity score matrix with optional
### control variables
### Aliases: silentMtx0
### Keywords: SD1, SD2, SD3, SD4, causal correlations criteria, generalized
### ** Examples
## Not run:
##D options(np.messages=FALSE)
##D colnames(mtcars[2:ncol(mtcars)])
##D silentMtx0(mtcars[,1:3],ctrl=mtcars[,4:5]) # mpg paired with others
## End(Not run)
options(np.messages=FALSE)
set.seed(234)
z=runif(10,2,11)# z is independently created
x=sample(1:10)+z/10 #x is somewhat indep and affected by z
y=1+2*x+3*z+rnorm(10)
w=runif(10)
x2=x;x2[4]=NA;y2=y;y2[8]=NA;w2=w;w2[4]=NA
silentMtx0(mtx=cbind(x2,y2), ctrl=cbind(z,w2))
|
setwd("/Users/ianwhitestone/Documents/git/khp-analytics/analysis/")
library(ggplot2)
source("helper.R")
library(data.table)
library(dtplyr)
library(dplyr)
library(scales)
library(zoo)
library(RColorBrewer)
library(plyr)
library(RPostgreSQL)
library(lubridate)
number_ticks = function(n) {function(limits) pretty(limits, n)}
palette = brewer.pal("YlGnBu", n=9)
# loads the PostgreSQL driver
drv = dbDriver("PostgreSQL")
# creates a connection to the postgres database
# note that "con" will be used later in each connection to the database
conn = dbConnect(drv, dbname = "",
host = "", port = 5432,
user = "")
query = "
SELECT a.*, b.score, c.queue_id, c.start_time, c.end_time
FROM enhanced_transcripts as a
JOIN distress_scores as b
ON a.contact_id=b.contact_id
JOIN contacts as c
ON a.contact_id=c.contact_id
"
df = dbGetQuery(conn, query) %>% setDT
# Explore DF
head(df, n=5)
dim(df)
str(df)
ggplot(df, aes(x=score)) +
geom_bar() + theme_dlin() +
scale_x_continuous(breaks=number_ticks(8))
ggplot(df, aes(x=factor(score), y=handle_time)) +
geom_boxplot() + theme_dlin() +
labs(x='score', y='handle time (min)')
# scale_x_continuous(breaks=number_ticks(8))
| /analysis/eda.R | permissive | ian-whitestone/khp-analytics | R | false | false | 1,243 | r | setwd("/Users/ianwhitestone/Documents/git/khp-analytics/analysis/")
library(ggplot2)
source("helper.R")
library(data.table)
library(dtplyr)
library(dplyr)
library(scales)
library(zoo)
library(RColorBrewer)
library(plyr)
library(RPostgreSQL)
library(lubridate)
number_ticks = function(n) {function(limits) pretty(limits, n)}
palette = brewer.pal("YlGnBu", n=9)
# loads the PostgreSQL driver
drv = dbDriver("PostgreSQL")
# creates a connection to the postgres database
# note that "con" will be used later in each connection to the database
conn = dbConnect(drv, dbname = "",
host = "", port = 5432,
user = "")
query = "
SELECT a.*, b.score, c.queue_id, c.start_time, c.end_time
FROM enhanced_transcripts as a
JOIN distress_scores as b
ON a.contact_id=b.contact_id
JOIN contacts as c
ON a.contact_id=c.contact_id
"
df = dbGetQuery(conn, query) %>% setDT
# Explore DF
head(df, n=5)
dim(df)
str(df)
ggplot(df, aes(x=score)) +
geom_bar() + theme_dlin() +
scale_x_continuous(breaks=number_ticks(8))
ggplot(df, aes(x=factor(score), y=handle_time)) +
geom_boxplot() + theme_dlin() +
labs(x='score', y='handle time (min)')
# scale_x_continuous(breaks=number_ticks(8))
|
# osmose2R ----------------------------------------------------------------
# main function, creates 'osmose' class objects
osmose2R = function(path=NULL, version="v3r2", species.names=NULL, ...) {
if(is.null(path) & interactive()) {
path = choose.dir(caption="Select OSMOSE outputs folder")
}
if(is.null(path)) stop("No path has been provided.")
output = switch(version,
v3r0 = osmose2R.v3r0(path=path, species.names=species.names, ...),
v3r1 = osmose2R.v3r1(path=path, species.names=species.names, ...),
v3r2 = osmose2R.v3r2(path=path, species.names=species.names, ...),
stop(sprintf("Incorrect osmose version %s", version))
)
class(output) = "osmose"
return(output)
}
# methods for 'osmose' class ----------------------------------------------
plot.osmose = function(x, type="biomass", ...) {
switch(type,
biomass = plot(object=x$global$biomass, ...),
yield = plot(object=x$global$yield, ...),
abundance = plot(object=x$global$abundance, ...),
yieldN = plot(object=x$global$yieldN, ...),
error("Plot type not defined."))
return(invisible())
}
print.osmose =
function(x, ...) {
cat(paste0("OSMOSE v.", x$model$version,"\n"))
cat("Model", sQuote(x$model$model),"\n")
cat(x$model$sp, " species modeled (",x$model$simus,
" simulations):\n", sep="")
cat(paste(x$species, collapse=", "),".\n", sep="")
}
print.summary.osmose =
function(x, ...) {
cat(paste0("OSMOSE v.", x$version,"\n"))
cat("Model", sQuote(x$model),"\n")
cat(x$sp, "species modeled:\n")
cat(paste(x$species, collapse=", "),".\n", sep="")
cat("Main indicators:\n")
print(x$resumen)
}
getVar =
function(object, var, ...) {
UseMethod("getVar")
}
getVar.osmose =
function(object, var, type="global", expected=TRUE, ...) {
out = object[[type]][[var]]
xclass = "list" %in% class(out)
if(isTRUE(!xclass) & isTRUE(expected))
out = apply(out, c(1,2), mean, na.rm=TRUE)
return(out)
}
summary.osmose =
function(object, ...) {
output = object$model
output$species = object$species
biomass = apply(object$global$biomass, 2, mean, na.rm=TRUE)
yield = apply(object$global$yield, 2, mean, na.rm=TRUE)
resumen = data.frame(biomass=biomass,
yield = yield)
rownames(resumen) = object$species
output$resumen = resumen
class(output) = "summary.osmose"
return(output)
}
| /R/osmose2R_package.R | no_license | osmose-model/osmose2R | R | false | false | 2,648 | r | # osmose2R ----------------------------------------------------------------
# main function, creates 'osmose' class objects
osmose2R = function(path=NULL, version="v3r2", species.names=NULL, ...) {
if(is.null(path) & interactive()) {
path = choose.dir(caption="Select OSMOSE outputs folder")
}
if(is.null(path)) stop("No path has been provided.")
output = switch(version,
v3r0 = osmose2R.v3r0(path=path, species.names=species.names, ...),
v3r1 = osmose2R.v3r1(path=path, species.names=species.names, ...),
v3r2 = osmose2R.v3r2(path=path, species.names=species.names, ...),
stop(sprintf("Incorrect osmose version %s", version))
)
class(output) = "osmose"
return(output)
}
# methods for 'osmose' class ----------------------------------------------
plot.osmose = function(x, type="biomass", ...) {
switch(type,
biomass = plot(object=x$global$biomass, ...),
yield = plot(object=x$global$yield, ...),
abundance = plot(object=x$global$abundance, ...),
yieldN = plot(object=x$global$yieldN, ...),
error("Plot type not defined."))
return(invisible())
}
print.osmose =
function(x, ...) {
cat(paste0("OSMOSE v.", x$model$version,"\n"))
cat("Model", sQuote(x$model$model),"\n")
cat(x$model$sp, " species modeled (",x$model$simus,
" simulations):\n", sep="")
cat(paste(x$species, collapse=", "),".\n", sep="")
}
print.summary.osmose =
function(x, ...) {
cat(paste0("OSMOSE v.", x$version,"\n"))
cat("Model", sQuote(x$model),"\n")
cat(x$sp, "species modeled:\n")
cat(paste(x$species, collapse=", "),".\n", sep="")
cat("Main indicators:\n")
print(x$resumen)
}
getVar =
function(object, var, ...) {
UseMethod("getVar")
}
getVar.osmose =
function(object, var, type="global", expected=TRUE, ...) {
out = object[[type]][[var]]
xclass = "list" %in% class(out)
if(isTRUE(!xclass) & isTRUE(expected))
out = apply(out, c(1,2), mean, na.rm=TRUE)
return(out)
}
summary.osmose =
function(object, ...) {
output = object$model
output$species = object$species
biomass = apply(object$global$biomass, 2, mean, na.rm=TRUE)
yield = apply(object$global$yield, 2, mean, na.rm=TRUE)
resumen = data.frame(biomass=biomass,
yield = yield)
rownames(resumen) = object$species
output$resumen = resumen
class(output) = "summary.osmose"
return(output)
}
|
library(png)
library(pracma)
library(zoo)
n <- length(list.files("frames2/", "frame.*")) # Number of frames
r <- 30.005392 # frame rate
ts <- (0:(n-1))/r # time of each index
# Your initialization here
vect = c()
for (i in 1:n) {
name <- sprintf("frames2/frame%05d.png", i)
frame <- readPNG(name)
rs <- frame[,,1] # Matrix of red values
gs <- frame[,,2] # Matrix of green values
bs <- frame[,,3] # Matrix of blue values
vect = c(vect, mean(rs))
# Per frame computation here
}
print(vect)
plot(vect, type="l")
smooth <- rollapply(vect, width=13, FUN=mean, align = "center", fill=NA)
plot(smooth, type = "l")
length(findpeaks(smooth))
| /Heart-Rate-Determination/jon_resting2.R | no_license | StyledSteezy/DSCI211-Data-Science-I | R | false | false | 676 | r | library(png)
library(pracma)
library(zoo)
n <- length(list.files("frames2/", "frame.*")) # Number of frames
r <- 30.005392 # frame rate
ts <- (0:(n-1))/r # time of each index
# Your initialization here
vect = c()
for (i in 1:n) {
name <- sprintf("frames2/frame%05d.png", i)
frame <- readPNG(name)
rs <- frame[,,1] # Matrix of red values
gs <- frame[,,2] # Matrix of green values
bs <- frame[,,3] # Matrix of blue values
vect = c(vect, mean(rs))
# Per frame computation here
}
print(vect)
plot(vect, type="l")
smooth <- rollapply(vect, width=13, FUN=mean, align = "center", fill=NA)
plot(smooth, type = "l")
length(findpeaks(smooth))
|
#matrix are done through matrix(), rbind(), cbind()
?matrix()
data<- seq(1,20)
matrix(data,5,4) # by default it is column wise
matrix(data,5,4,byrow = TRUE) # by default it is column wise
#rbind()
v1 <- c("hi","how","are","you")
v2 <- c(1,2,3,4)
v3 <- c(4,2,4,12)
rbind(v1,v2,v3)
#cbind()
v1 <- c("hi","how","are","you")
v2 <- c(1,2,3,4)
v3 <- c(4,2,4,12)
A <- cbind(v1,v2,v3)
A[1,2]
A[3,1]
#Named vectors
vec <- c(1,2,3,4,5)
names(vec)<-c("a","b","c","d","e")
vec
vec['c']
# matrix set rownames and colnames
names(vec) <- NULL
M <- rep(vec,each=3)
M
Matrix1 <-matrix(M,3,3)
rownames(Matrix1) <- c("P","Q","R")
Matrix1
colnames(Matrix1) <- c("X","Y","Z")
Matrix1
Matrix1['Q','Z']
| /matrix.R | no_license | Meet619/R-Basics | R | false | false | 697 | r |
#matrix are done through matrix(), rbind(), cbind()
?matrix()
data<- seq(1,20)
matrix(data,5,4) # by default it is column wise
matrix(data,5,4,byrow = TRUE) # by default it is column wise
#rbind()
v1 <- c("hi","how","are","you")
v2 <- c(1,2,3,4)
v3 <- c(4,2,4,12)
rbind(v1,v2,v3)
#cbind()
v1 <- c("hi","how","are","you")
v2 <- c(1,2,3,4)
v3 <- c(4,2,4,12)
A <- cbind(v1,v2,v3)
A[1,2]
A[3,1]
#Named vectors
vec <- c(1,2,3,4,5)
names(vec)<-c("a","b","c","d","e")
vec
vec['c']
# matrix set rownames and colnames
names(vec) <- NULL
M <- rep(vec,each=3)
M
Matrix1 <-matrix(M,3,3)
rownames(Matrix1) <- c("P","Q","R")
Matrix1
colnames(Matrix1) <- c("X","Y","Z")
Matrix1
Matrix1['Q','Z']
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/runGroups.R
\name{runGroups}
\alias{runGroups}
\title{Runs a comparison of any group of years in the record.}
\usage{
runGroups(eList, windowSide, group1firstYear, group1lastYear, group2firstYear,
group2lastYear, surfaceStart = NA, surfaceEnd = NA, flowBreak = FALSE,
Q1EndDate = NA, QStartDate = NA, QEndDate = NA, wall = FALSE,
oldSurface = FALSE, fractMin = 0.75, sample1EndDate = NA,
sampleStartDate = NA, sampleEndDate = NA, paStart = NA, paLong = NA,
minNumObs = 100, minNumUncen = 50, windowY = 7, windowQ = 2,
windowS = 0.5, edgeAdjust = TRUE, verbose = TRUE)
}
\arguments{
\item{eList}{named list with at least the Daily, Sample, and INFO dataframes}
\item{windowSide}{integer. The width of the flow normalization window on each side of the year being estimated.
A common value is 11, but no default is specified. If stationary flow normalization is to be used, then windowSide = 0 (this means that
flow-normalization period for all years is the same).}
\item{group1firstYear}{decimal year. Starting year of first group.}
\item{group1lastYear}{decimal year. Ending year of first group.}
\item{group2firstYear}{decimal year. Starting year of second group.}
\item{group2lastYear}{decimal year. Ending year of second group.}
\item{surfaceStart}{The Date (or character in YYYY-MM-DD) that is the start of the WRTDS model to be estimated and the first of the
daily outputs to be generated. Default is NA, which means that the surfaceStart is based on the date of the first sample.}
\item{surfaceEnd}{The Date (or character in YYYY-MM-DD) that is the end of the WRTDS model to be estimated and the last of the daily outputs
to be generated. Default is NA, which means that the surfaceEnd is based on the date of the last sample.}
\item{flowBreak}{logical. Is there an abrupt break in the discharge record, default is FALSE.}
\item{Q1EndDate}{The Date (as character in YYYY-MM-DD) which is the last day, just before the flowBreak.}
\item{QStartDate}{The first Date (as character in YYYY-MM-DD) used in the flow normalization method. Default is
NA, which makes the QStartDate become the first Date in eList$Daily.}
\item{QEndDate}{The last Date (as character in YYYY-MM-DD) used in the flow normalization method. Default is NA,
which makes the QEndDate become the last Date in eList$Daily.}
\item{wall}{logical. Whether there is an abrupt break in the concentration versus discharge relationship due to some major change in
pollution control or water management. Default is FALSE.}
\item{oldSurface}{logical specifying whether to use the original surface, or create a new one. Default is FALSE.}
\item{fractMin}{numeric specifying the minimum fraction of the observations required to run the weighted regression, default is 0.75. The
minimum number will be the maximum of minNumObs and fractMin multiplied by total number of observations.}
\item{sample1EndDate}{The Date (as character in YYYY-MM-DD) of the last date just before the wall. Default = NA.
A date must be specified if wall = TRUE.}
\item{sampleStartDate}{The Date (as character in YYYY-MM-DD) of the first sample to be used. Default is NA which sets it
to the first Date in eList$Sample.}
\item{sampleEndDate}{The Date (as character in YYYY-MM-DD) of the last sample to be used.
Default is NA which sets it to the last Date in eList$Sample.}
\item{paStart}{numeric integer specifying the starting month for the period of analysis, 1<=paStart<=12.
Default is NA, which will use the paStart in the eList$INFO data frame. See also \code{\link{setPA}}.}
\item{paLong}{numeric integer specifying the length of the period of analysis, in months, 1<=paLong<=12.
Default is NA, which will use the paLong in the eList$INFO data frame. See also \code{\link{setPA}}.}
\item{minNumObs}{numeric specifying the miniumum number of observations required to run the weighted regression, default is 100}
\item{minNumUncen}{numeric specifying the minimum number of uncensored observations to run the weighted regression, default is 50}
\item{windowY}{numeric specifying the half-window width in the time dimension, in units of years, default is 7}
\item{windowQ}{numeric specifying the half-window width in the discharge dimension, units are natural log units, default is 2}
\item{windowS}{numeric specifying the half-window with in the seasonal dimension, in units of years, default is 0.5}
\item{edgeAdjust}{logical specifying whether to use the modified method for calculating the windows at the edge of the record.
The edgeAdjust method tends to reduce curvature near the start and end of record. Default is TRUE.}
\item{verbose}{logical specifying whether or not to display progress message}
}
\value{
Dataframe with 7 columns and 2 rows. The first row is about trends in concentration (mg/L), the second column is about trends in flux (million kg/year).
The data frame has a number of attributes.
\tabular{ll}{
Column Name \tab Description \cr
Total Change \tab The difference between the results for group2 - group1 (x22 - x11). \cr
CQTC \tab CQTC is the "Concentration v. Q Trend Component." It is the component of total change due to the change in the CQR (Concentration Discharge Relationship). (x20 - x10). \cr
QTC \tab QTC is the "Q Trend Component." It is the component of total change due to the trend in the QD (Discharge Distribution). (x22 - x11 - x20 + x10). \cr
x10 \tab The estimated value based on the CQR computed for the years in group1, integrated over the QD for the entire timespan of the Daily data frame (or the
period QStartDate and to QEndDate if these are specified).\cr
x11 \tab The estimated value based on the CQR for the years in group1, integrated over the QD specified by the user for group1. \cr
x20 \tab The estimated value based on the CQR computed for the years in group2, integrated over the QD for the entire period of record. \cr
x22 \tab The estimated value based on the CQR for the years in group2, integrated over the QD specified by the user for group2. \cr
}
}
\description{
\code{runGroups} provides comparisons of results, in terms of
flow-normalized concentration and flow-normalized flux for any groups of years
of years in the water quality record. Comparison could involve the
use of the "wall" and/or use of "generalized flow-normalization".
These two concepts are described in detail in the vignette:
\code{vignette("Enhancements", package = "EGRET")}.
}
\details{
When using generalized flow-normalization, it is best to have the Daily data frame
extend well beyond the years that are in the Sample data frame. Ideally,
the Daily data frame would start windowSide years before the
start of the Sample data set, if the data exist to provide for that. Generally
that isn't possible for the end of the record because the Sample data
may end very close to the present. To the extent that is possible therefore, it is better to
include more discharge data after the end of the Sample record.
Also note that in the case run in the examples don't do that,
because the data set needs to be appropriate for stationary flow
normalization as well (and package size considerations make it difficult to
include specialized examples).
}
\examples{
eList <- Choptank_eList
\donttest{
#Option 1: Use all years for group flow normalization.
groupOut_1 <- runGroups(eList, windowSide = 0,
group1firstYear = 1980, group1lastYear = 1990,
group2firstYear = 1995, group2lastYear = 2005)
# Option 2: Use sliding window.
# In each case it is a 23 year window (23 = 1 + 2 * 11)
groupOut_2 <- runGroups(eList, windowSide = 11,
group1firstYear = 1980, group1lastYear = 1990,
group2firstYear = 1995, group2lastYear = 2005)
# Option 3: Flow normalization is based on splitting the flow record at 1990-09-30
# But in years before the break it uses all flow data from before the break,
# and years after the break uses all flow data after the break
groupOut_3 <- runGroups(eList, windowSide = 0,
group1firstYear = 1980, group1lastYear = 1990,
group2firstYear = 1995, group2lastYear = 2005,
flowBreak = TRUE,
Q1EndDate = "1990-09-30")
# Option 4: Flow normalization is based on splitting the flow record at 1990-09-30
# but before the break uses a 23 year window of years before the break
# after the break uses a 23 year window of years after the break
groupOut_4 <- runGroups(eList, windowSide = 11,
group1firstYear = 1980, group1lastYear = 1990,
group2firstYear = 1995, group2lastYear = 2005,
flowBreak = TRUE,
Q1EndDate = "1990-09-30")
}
}
| /man/runGroups.Rd | permissive | zac-driscoll/EGRET | R | false | true | 8,927 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/runGroups.R
\name{runGroups}
\alias{runGroups}
\title{Runs a comparison of any group of years in the record.}
\usage{
runGroups(eList, windowSide, group1firstYear, group1lastYear, group2firstYear,
group2lastYear, surfaceStart = NA, surfaceEnd = NA, flowBreak = FALSE,
Q1EndDate = NA, QStartDate = NA, QEndDate = NA, wall = FALSE,
oldSurface = FALSE, fractMin = 0.75, sample1EndDate = NA,
sampleStartDate = NA, sampleEndDate = NA, paStart = NA, paLong = NA,
minNumObs = 100, minNumUncen = 50, windowY = 7, windowQ = 2,
windowS = 0.5, edgeAdjust = TRUE, verbose = TRUE)
}
\arguments{
\item{eList}{named list with at least the Daily, Sample, and INFO dataframes}
\item{windowSide}{integer. The width of the flow normalization window on each side of the year being estimated.
A common value is 11, but no default is specified. If stationary flow normalization is to be used, then windowSide = 0 (this means that
flow-normalization period for all years is the same).}
\item{group1firstYear}{decimal year. Starting year of first group.}
\item{group1lastYear}{decimal year. Ending year of first group.}
\item{group2firstYear}{decimal year. Starting year of second group.}
\item{group2lastYear}{decimal year. Ending year of second group.}
\item{surfaceStart}{The Date (or character in YYYY-MM-DD) that is the start of the WRTDS model to be estimated and the first of the
daily outputs to be generated. Default is NA, which means that the surfaceStart is based on the date of the first sample.}
\item{surfaceEnd}{The Date (or character in YYYY-MM-DD) that is the end of the WRTDS model to be estimated and the last of the daily outputs
to be generated. Default is NA, which means that the surfaceEnd is based on the date of the last sample.}
\item{flowBreak}{logical. Is there an abrupt break in the discharge record, default is FALSE.}
\item{Q1EndDate}{The Date (as character in YYYY-MM-DD) which is the last day, just before the flowBreak.}
\item{QStartDate}{The first Date (as character in YYYY-MM-DD) used in the flow normalization method. Default is
NA, which makes the QStartDate become the first Date in eList$Daily.}
\item{QEndDate}{The last Date (as character in YYYY-MM-DD) used in the flow normalization method. Default is NA,
which makes the QEndDate become the last Date in eList$Daily.}
\item{wall}{logical. Whether there is an abrupt break in the concentration versus discharge relationship due to some major change in
pollution control or water management. Default is FALSE.}
\item{oldSurface}{logical specifying whether to use the original surface, or create a new one. Default is FALSE.}
\item{fractMin}{numeric specifying the minimum fraction of the observations required to run the weighted regression, default is 0.75. The
minimum number will be the maximum of minNumObs and fractMin multiplied by total number of observations.}
\item{sample1EndDate}{The Date (as character in YYYY-MM-DD) of the last date just before the wall. Default = NA.
A date must be specified if wall = TRUE.}
\item{sampleStartDate}{The Date (as character in YYYY-MM-DD) of the first sample to be used. Default is NA which sets it
to the first Date in eList$Sample.}
\item{sampleEndDate}{The Date (as character in YYYY-MM-DD) of the last sample to be used.
Default is NA which sets it to the last Date in eList$Sample.}
\item{paStart}{numeric integer specifying the starting month for the period of analysis, 1<=paStart<=12.
Default is NA, which will use the paStart in the eList$INFO data frame. See also \code{\link{setPA}}.}
\item{paLong}{numeric integer specifying the length of the period of analysis, in months, 1<=paLong<=12.
Default is NA, which will use the paLong in the eList$INFO data frame. See also \code{\link{setPA}}.}
\item{minNumObs}{numeric specifying the miniumum number of observations required to run the weighted regression, default is 100}
\item{minNumUncen}{numeric specifying the minimum number of uncensored observations to run the weighted regression, default is 50}
\item{windowY}{numeric specifying the half-window width in the time dimension, in units of years, default is 7}
\item{windowQ}{numeric specifying the half-window width in the discharge dimension, units are natural log units, default is 2}
\item{windowS}{numeric specifying the half-window with in the seasonal dimension, in units of years, default is 0.5}
\item{edgeAdjust}{logical specifying whether to use the modified method for calculating the windows at the edge of the record.
The edgeAdjust method tends to reduce curvature near the start and end of record. Default is TRUE.}
\item{verbose}{logical specifying whether or not to display progress message}
}
\value{
Dataframe with 7 columns and 2 rows. The first row is about trends in concentration (mg/L), the second column is about trends in flux (million kg/year).
The data frame has a number of attributes.
\tabular{ll}{
Column Name \tab Description \cr
Total Change \tab The difference between the results for group2 - group1 (x22 - x11). \cr
CQTC \tab CQTC is the "Concentration v. Q Trend Component." It is the component of total change due to the change in the CQR (Concentration Discharge Relationship). (x20 - x10). \cr
QTC \tab QTC is the "Q Trend Component." It is the component of total change due to the trend in the QD (Discharge Distribution). (x22 - x11 - x20 + x10). \cr
x10 \tab The estimated value based on the CQR computed for the years in group1, integrated over the QD for the entire timespan of the Daily data frame (or the
period QStartDate and to QEndDate if these are specified).\cr
x11 \tab The estimated value based on the CQR for the years in group1, integrated over the QD specified by the user for group1. \cr
x20 \tab The estimated value based on the CQR computed for the years in group2, integrated over the QD for the entire period of record. \cr
x22 \tab The estimated value based on the CQR for the years in group2, integrated over the QD specified by the user for group2. \cr
}
}
\description{
\code{runGroups} provides comparisons of results, in terms of
flow-normalized concentration and flow-normalized flux for any groups of years
of years in the water quality record. Comparison could involve the
use of the "wall" and/or use of "generalized flow-normalization".
These two concepts are described in detail in the vignette:
\code{vignette("Enhancements", package = "EGRET")}.
}
\details{
When using generalized flow-normalization, it is best to have the Daily data frame
extend well beyond the years that are in the Sample data frame. Ideally,
the Daily data frame would start windowSide years before the
start of the Sample data set, if the data exist to provide for that. Generally
that isn't possible for the end of the record because the Sample data
may end very close to the present. To the extent that is possible therefore, it is better to
include more discharge data after the end of the Sample record.
Also note that in the case run in the examples don't do that,
because the data set needs to be appropriate for stationary flow
normalization as well (and package size considerations make it difficult to
include specialized examples).
}
\examples{
eList <- Choptank_eList
\donttest{
#Option 1: Use all years for group flow normalization.
groupOut_1 <- runGroups(eList, windowSide = 0,
group1firstYear = 1980, group1lastYear = 1990,
group2firstYear = 1995, group2lastYear = 2005)
# Option 2: Use sliding window.
# In each case it is a 23 year window (23 = 1 + 2 * 11)
groupOut_2 <- runGroups(eList, windowSide = 11,
group1firstYear = 1980, group1lastYear = 1990,
group2firstYear = 1995, group2lastYear = 2005)
# Option 3: Flow normalization is based on splitting the flow record at 1990-09-30
# But in years before the break it uses all flow data from before the break,
# and years after the break uses all flow data after the break
groupOut_3 <- runGroups(eList, windowSide = 0,
group1firstYear = 1980, group1lastYear = 1990,
group2firstYear = 1995, group2lastYear = 2005,
flowBreak = TRUE,
Q1EndDate = "1990-09-30")
# Option 4: Flow normalization is based on splitting the flow record at 1990-09-30
# but before the break uses a 23 year window of years before the break
# after the break uses a 23 year window of years after the break
groupOut_4 <- runGroups(eList, windowSide = 11,
group1firstYear = 1980, group1lastYear = 1990,
group2firstYear = 1995, group2lastYear = 2005,
flowBreak = TRUE,
Q1EndDate = "1990-09-30")
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.