content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
# solution of exercise 3
# question 1
secondMinSquared <- function (x) {
# check the input is vector or not
if (!is.vector(x)) {
stop('input is not a vector!')
# return error and quit
}
if (!is.numeric(x)){
stop('input vector contains non-numeric element(s)')
}
# squared the vector
# find the minimum value and delete
# find the min of remained vector
x <- x^2
x <- x[-c(which.min(x))]
secondMin <- min(x)
return(secondMin)
}
# question 2
vecSummary <- function (x){
# check the input is vector or not
if (!is.vector(x)) {
stop('input is not a vector!')
# return error and quit
}
if (!is.numeric(x)){
stop('input vector contains non-numeric element(s)')
}
vecSum <- c(mean(x), median(x), var(x), min(x), max(x))
return(vecSum)
}
# question 3
generateSample <- function (n_samples, mean, stdev){
return (sample(rnorm(n_samples, mean=mean, sd=stdev),n_samples))
}
# question 4
testList <- list()
totalSample <- c()
for(i in 1:20) {
x <- generateSample(20, 2, 1.5)
result <- t.test(x, mu=1.5, alternative='two.sided')
totalSample <- append(totalSample, x)
testList[[i]] <- result
}
# print(testList)
# the mean = 1.5 for each smaples varies. Some of them are true but other are not.
t.test(totalSample, mu=1.5, alternative='two.sided')
# if get together all the samples, the mean is almost 1.5
|
/Exercise3/solution.r
|
no_license
|
bailin7134/slm_fs2017
|
R
| false
| false
| 1,378
|
r
|
# solution of exercise 3
# question 1
secondMinSquared <- function (x) {
# check the input is vector or not
if (!is.vector(x)) {
stop('input is not a vector!')
# return error and quit
}
if (!is.numeric(x)){
stop('input vector contains non-numeric element(s)')
}
# squared the vector
# find the minimum value and delete
# find the min of remained vector
x <- x^2
x <- x[-c(which.min(x))]
secondMin <- min(x)
return(secondMin)
}
# question 2
vecSummary <- function (x){
# check the input is vector or not
if (!is.vector(x)) {
stop('input is not a vector!')
# return error and quit
}
if (!is.numeric(x)){
stop('input vector contains non-numeric element(s)')
}
vecSum <- c(mean(x), median(x), var(x), min(x), max(x))
return(vecSum)
}
# question 3
generateSample <- function (n_samples, mean, stdev){
return (sample(rnorm(n_samples, mean=mean, sd=stdev),n_samples))
}
# question 4
testList <- list()
totalSample <- c()
for(i in 1:20) {
x <- generateSample(20, 2, 1.5)
result <- t.test(x, mu=1.5, alternative='two.sided')
totalSample <- append(totalSample, x)
testList[[i]] <- result
}
# print(testList)
# the mean = 1.5 for each smaples varies. Some of them are true but other are not.
t.test(totalSample, mu=1.5, alternative='two.sided')
# if get together all the samples, the mean is almost 1.5
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dexr_hl_figure_costs.R
\name{hl_figure_energycosts_requests_giniByStartT}
\alias{hl_figure_energycosts_requests_giniByStartT}
\title{Retrieves requests data from DB and creates figure of gini coefficient of cost per KWh by delivery start time.}
\usage{
hl_figure_energycosts_requests_giniByStartT(
dexpa,
type = "load",
skiplegend = F
)
}
\arguments{
\item{dexpa}{parameter}
\item{type}{either 'gen' or 'load'}
}
\value{
figure file
}
\description{
Retrieves requests data from DB and creates figure of gini coefficient of cost per KWh by delivery start time.
}
\author{
Sascha Holzhauer
}
|
/man/hl_figure_energycosts_requests_giniByStartT.Rd
|
no_license
|
UniK-INES/dexR
|
R
| false
| true
| 675
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dexr_hl_figure_costs.R
\name{hl_figure_energycosts_requests_giniByStartT}
\alias{hl_figure_energycosts_requests_giniByStartT}
\title{Retrieves requests data from DB and creates figure of gini coefficient of cost per KWh by delivery start time.}
\usage{
hl_figure_energycosts_requests_giniByStartT(
dexpa,
type = "load",
skiplegend = F
)
}
\arguments{
\item{dexpa}{parameter}
\item{type}{either 'gen' or 'load'}
}
\value{
figure file
}
\description{
Retrieves requests data from DB and creates figure of gini coefficient of cost per KWh by delivery start time.
}
\author{
Sascha Holzhauer
}
|
# Take the state name (state), outcome (outcome), and ranking of the hospital for that outcome (num).
# From out-of-care measures.csv file
# Returns a vector with the name of the hospital - ranking by 'num'
#
rankhospital <- function(state, illness , num = "best") {
if (num == "best") num <- 1
## Read outcome data
# Read in the data from the .csv file:
outcomef <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
# Reduce the data frame to the relevant data:
outcome <- as.data.frame(cbind(outcomef[ ,2], outcomef[ ,7], outcomef[ ,11], outcomef[ ,17], outcomef[ ,23]))
# Name the columns of outcome with the relevant titles:
colnames(outcome) <- c("hospital_name", "state", "heart_attack", "heart_failure", "pneumonia")
# Convert factors to numeric values:
outcome$heart_attack <- as.numeric(as.character(outcome$heart_attack))
outcome$heart_failure <- as.numeric(as.character(outcome$heart_failure))
outcome$pneumonia <- as.numeric(as.character( outcome$pneumonia ))
outcome$state <- as.character(outcome$state, length(2))
state <- as.character(state, length(2))
## Check that state and outcome are valid:
state_list <- unique(outcome$state)
as.vector(state_list)
if(!state %in% (state_list)){
stop("invalid state code!!!")
}
if (illness == "heart_attack") {
## order the outcome dataset by state - heart_attack - hospital_name
outcome_sort <- outcome[with(outcome, order(state, heart_attack, hospital_name, na.last = NA)),]
outcome_sort$rank <- NA
outcome_sort$rank=unlist(with(outcome_sort,tapply(heart_attack,state,order)))
} else if (illness == "heart_failure") {
outcome_sort <- outcome[with(outcome, order(state, heart_failure, hospital_name, na.last = NA)),]
outcome_sort$rank <- NA
outcome_sort$rank=unlist(with(outcome_sort,tapply(heart_failure,state,order)))
} else if (illness == "pneumonia") {
outcome_sort <- outcome[with(outcome, order(state, pneumonia, hospital_name, na.last = NA)),]
outcome_sort$rank <- NA
outcome_sort$rank=unlist(with(outcome_sort,tapply(pneumonia,state,order)))
} else {
Print("Try again with a different illness!")
}
outcome_sort$state <- as.character(outcome_sort$state, length(2))
## Return hospital name in that state with the given rank.
rank_row <- outcome_sort[ which(outcome_sort$state == state & outcome_sort$rank == num), ]
print(rank_row)
## 30 day death rate
print(rank_row[, 1])
}
|
/rankhospital.R
|
no_license
|
CushingDavid/ProgrammingAssignment3
|
R
| false
| false
| 2,770
|
r
|
# Take the state name (state), outcome (outcome), and ranking of the hospital for that outcome (num).
# From out-of-care measures.csv file
# Returns a vector with the name of the hospital - ranking by 'num'
#
rankhospital <- function(state, illness , num = "best") {
if (num == "best") num <- 1
## Read outcome data
# Read in the data from the .csv file:
outcomef <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
# Reduce the data frame to the relevant data:
outcome <- as.data.frame(cbind(outcomef[ ,2], outcomef[ ,7], outcomef[ ,11], outcomef[ ,17], outcomef[ ,23]))
# Name the columns of outcome with the relevant titles:
colnames(outcome) <- c("hospital_name", "state", "heart_attack", "heart_failure", "pneumonia")
# Convert factors to numeric values:
outcome$heart_attack <- as.numeric(as.character(outcome$heart_attack))
outcome$heart_failure <- as.numeric(as.character(outcome$heart_failure))
outcome$pneumonia <- as.numeric(as.character( outcome$pneumonia ))
outcome$state <- as.character(outcome$state, length(2))
state <- as.character(state, length(2))
## Check that state and outcome are valid:
state_list <- unique(outcome$state)
as.vector(state_list)
if(!state %in% (state_list)){
stop("invalid state code!!!")
}
if (illness == "heart_attack") {
## order the outcome dataset by state - heart_attack - hospital_name
outcome_sort <- outcome[with(outcome, order(state, heart_attack, hospital_name, na.last = NA)),]
outcome_sort$rank <- NA
outcome_sort$rank=unlist(with(outcome_sort,tapply(heart_attack,state,order)))
} else if (illness == "heart_failure") {
outcome_sort <- outcome[with(outcome, order(state, heart_failure, hospital_name, na.last = NA)),]
outcome_sort$rank <- NA
outcome_sort$rank=unlist(with(outcome_sort,tapply(heart_failure,state,order)))
} else if (illness == "pneumonia") {
outcome_sort <- outcome[with(outcome, order(state, pneumonia, hospital_name, na.last = NA)),]
outcome_sort$rank <- NA
outcome_sort$rank=unlist(with(outcome_sort,tapply(pneumonia,state,order)))
} else {
Print("Try again with a different illness!")
}
outcome_sort$state <- as.character(outcome_sort$state, length(2))
## Return hospital name in that state with the given rank.
rank_row <- outcome_sort[ which(outcome_sort$state == state & outcome_sort$rank == num), ]
print(rank_row)
## 30 day death rate
print(rank_row[, 1])
}
|
#' GMLSphericalCS
#'
#' @docType class
#' @importFrom R6 R6Class
#' @export
#' @keywords ISO GML spherical coordinate system
#' @return Object of \code{\link{R6Class}} for modelling an GMLSphericalCS
#' @format \code{\link{R6Class}} object.
#'
#' @section Inherited Methods:
#' \describe{
#' \item{\code{new(xml, defaults, id)}}{
#' This method is used to instantiate a GML Abstract CRS
#' }
#' \item{\code{addAxis(axis)}}{
#' Adds an axis, object of class \code{GMLCoordinateSystemAxis}
#' }
#' \item{\code{delAxis(axis)}}{
#' Deletes an axis, object of class \code{GMLCoordinateSystemAxis}
#' }
#' }
#'
#' @references
#' ISO 19136:2007 Geographic Information -- Geographic Markup Language.
#' http://www.iso.org/iso/iso_catalogue/catalogue_tc/catalogue_detail.htm?csnumber=32554
#'
#' OGC Geography Markup Language. http://www.opengeospatial.org/standards/gml
#'
#' @author Emmanuel Blondel <emmanuel.blondel1@@gmail.com>
#'
GMLSphericalCS <- R6Class("GMLSphericalCS",
inherit = GMLAbstractCoordinateSystem,
private = list(
xmlElement = "SphericalCS",
xmlNamespacePrefix = "GML"
),
public = list()
)
|
/R/GMLShericalCS.R
|
no_license
|
65MO/geometa
|
R
| false
| false
| 1,145
|
r
|
#' GMLSphericalCS
#'
#' @docType class
#' @importFrom R6 R6Class
#' @export
#' @keywords ISO GML spherical coordinate system
#' @return Object of \code{\link{R6Class}} for modelling an GMLSphericalCS
#' @format \code{\link{R6Class}} object.
#'
#' @section Inherited Methods:
#' \describe{
#' \item{\code{new(xml, defaults, id)}}{
#' This method is used to instantiate a GML Abstract CRS
#' }
#' \item{\code{addAxis(axis)}}{
#' Adds an axis, object of class \code{GMLCoordinateSystemAxis}
#' }
#' \item{\code{delAxis(axis)}}{
#' Deletes an axis, object of class \code{GMLCoordinateSystemAxis}
#' }
#' }
#'
#' @references
#' ISO 19136:2007 Geographic Information -- Geographic Markup Language.
#' http://www.iso.org/iso/iso_catalogue/catalogue_tc/catalogue_detail.htm?csnumber=32554
#'
#' OGC Geography Markup Language. http://www.opengeospatial.org/standards/gml
#'
#' @author Emmanuel Blondel <emmanuel.blondel1@@gmail.com>
#'
GMLSphericalCS <- R6Class("GMLSphericalCS",
inherit = GMLAbstractCoordinateSystem,
private = list(
xmlElement = "SphericalCS",
xmlNamespacePrefix = "GML"
),
public = list()
)
|
##' The gradient for the mean function in the GLM framework.
##'
##' <details>
##' @param par "vector"
##' @param linkArgs "list"
##' @return "matrix" of the same dimension as the linear predictor
##' @references Li 2012
##' @author Feng Li, Department of Statistics, Stockholm University, Sweden.
##' @note Created: Thu Nov 24 11:46:32 CET 2011;
##' Current: Thu Nov 24 11:46:39 CET 2011.
##' @export
parMeanFunGrad <- function(par, linkArgs)
{
## Input x'b -> l(phi) = x'b -> phi
## NOTE: We want vectorized output, i.e, X is n-by-p, beta is p-by-1 and
## the output is n-by-1. But the appendix is written in scaler form.
## The linear predictor eta = x'b
linpred <- parLinkFun(mu = par, linkArgs = linkArgs)
link <- linkArgs[["type"]]
## Gradient for different situations
if(tolower(link) %in% "identity")
{
out <- linpred
out[1:length(linpred)] <- 1
}
else if(tolower(link) %in% c("log", "glog"))
{
if(tolower(link) == "glog")
{
a <- linkArgs$a
b <- linkArgs$b
if(is.null(b)) b <- Inf
}
else
{
a <- 0
b <- Inf
}
out <- exp(linpred)
## Let the gradient be zero after cutoff to stabilize the computation.
out.upidx <- ((out + a) >= b)
if(length(out.upidx) > 0)
{
out[out.upidx] <- 0
}
}
else if(tolower(link) %in% c("glogit", "logit"))
{
if(tolower(link) == "logit")
{
a <- 0
b <- 1
}
else
{
a <- linkArgs$a
if(length(a) == 0)
{
stop("A lower boundary parameter `a` for glogit link is expected.")
}
b <- linkArgs$b
if(length(b) == 0)
{
b <- Inf
}
}
exp.linPred <- exp(linpred)
## The gradients for all three parameters
out.lin <- (b-a)*exp.linPred/(1+exp.linPred)^2
## out.a <- 1/(1+exp.linPred)
## out.b <- 1/(1+1/exp.linPred)
out <- out.lin
}
else
{
stop("This link function is not implemented yet!")
}
return(out)
}
## parMeanFunGrad <- function(X, beta, link)
## {
## ## Input x'b -> l(phi) = x'b -> phi
## ## NOTE: We want vectorized output, i.e, X is n-by-p, beta is p-by-1 and
## ## the output is n-by-1. But the appendix is written in scaler form.
## if(tolower(link) == "identity")
## {
## out <- X
## }
## else if(tolower(link) == "log")
## {
## linPred <- X %*% beta
## exp.linPred <- array(exp(linPred), dim(X))
## out <- exp.linPred*X
## }
## else if(tolower(link) == "logit")
## {
## linPred <- X %*% beta
## exp.linPred <- array(exp(linPred), dim(X))
## out <- exp.linPred/(1+exp.linPred)^2*X
## }
## else
## {
## stop("This link function is not implemented yet!")
## }
## return(out)
## }
|
/R/parMeanFunGrad.R
|
no_license
|
kl-lab/fformpp
|
R
| false
| false
| 3,103
|
r
|
##' The gradient for the mean function in the GLM framework.
##'
##' <details>
##' @param par "vector"
##' @param linkArgs "list"
##' @return "matrix" of the same dimension as the linear predictor
##' @references Li 2012
##' @author Feng Li, Department of Statistics, Stockholm University, Sweden.
##' @note Created: Thu Nov 24 11:46:32 CET 2011;
##' Current: Thu Nov 24 11:46:39 CET 2011.
##' @export
parMeanFunGrad <- function(par, linkArgs)
{
## Input x'b -> l(phi) = x'b -> phi
## NOTE: We want vectorized output, i.e, X is n-by-p, beta is p-by-1 and
## the output is n-by-1. But the appendix is written in scaler form.
## The linear predictor eta = x'b
linpred <- parLinkFun(mu = par, linkArgs = linkArgs)
link <- linkArgs[["type"]]
## Gradient for different situations
if(tolower(link) %in% "identity")
{
out <- linpred
out[1:length(linpred)] <- 1
}
else if(tolower(link) %in% c("log", "glog"))
{
if(tolower(link) == "glog")
{
a <- linkArgs$a
b <- linkArgs$b
if(is.null(b)) b <- Inf
}
else
{
a <- 0
b <- Inf
}
out <- exp(linpred)
## Let the gradient be zero after cutoff to stabilize the computation.
out.upidx <- ((out + a) >= b)
if(length(out.upidx) > 0)
{
out[out.upidx] <- 0
}
}
else if(tolower(link) %in% c("glogit", "logit"))
{
if(tolower(link) == "logit")
{
a <- 0
b <- 1
}
else
{
a <- linkArgs$a
if(length(a) == 0)
{
stop("A lower boundary parameter `a` for glogit link is expected.")
}
b <- linkArgs$b
if(length(b) == 0)
{
b <- Inf
}
}
exp.linPred <- exp(linpred)
## The gradients for all three parameters
out.lin <- (b-a)*exp.linPred/(1+exp.linPred)^2
## out.a <- 1/(1+exp.linPred)
## out.b <- 1/(1+1/exp.linPred)
out <- out.lin
}
else
{
stop("This link function is not implemented yet!")
}
return(out)
}
## parMeanFunGrad <- function(X, beta, link)
## {
## ## Input x'b -> l(phi) = x'b -> phi
## ## NOTE: We want vectorized output, i.e, X is n-by-p, beta is p-by-1 and
## ## the output is n-by-1. But the appendix is written in scaler form.
## if(tolower(link) == "identity")
## {
## out <- X
## }
## else if(tolower(link) == "log")
## {
## linPred <- X %*% beta
## exp.linPred <- array(exp(linPred), dim(X))
## out <- exp.linPred*X
## }
## else if(tolower(link) == "logit")
## {
## linPred <- X %*% beta
## exp.linPred <- array(exp(linPred), dim(X))
## out <- exp.linPred/(1+exp.linPred)^2*X
## }
## else
## {
## stop("This link function is not implemented yet!")
## }
## return(out)
## }
|
#' seminr estimate_cbsem() function
#'
#' The \code{seminr} package provides a natural syntax for researchers to describe
#' structural equation models.
#'
#' @usage
#' estimate_cbsem(data, measurement_model = NULL,
#' structural_model = NULL, item_associations = NULL,
#' model = NULL, lavaan_model = NULL, estimator = "MLR", ...)
#'
#' @param data A \code{dataframe} containing the indicator measurement data.
#'
#' The entire CBSEM model can be specified in one of three ways:
#'
#' The pair of measurement and structural models, along associated items, can optionally be specified as separate model components
#'
#' @param measurement_model An optional \code{measurement_model} object representing the outer/measurement model,
#' as generated by \code{constructs}.
#' Note that only reflective constructs are supported for CBSEM models,
#' though a composite measurement model can be converted into a reflective one
#' using \code{\link{as.reflective}}.
#'
#' @param structural_model An optional \code{smMatrix} object representing the inner/structural model,
#' as generated by \code{relationships}.
#'
#' @param item_associations An item-to-item matrix representing error
#' covariances that are freed for estimation.
#' This matrix is created by \code{associations()}, or defaults to NULL
#' (no inter-item associations).
#'
#' The combination of measurement and structural models and inter-item associations can also be specified as a single \code{specified_model} object
#' Note that any given model components (measurement_model, structural_model, item_associations) will override components in the fully specified model
#'
#' @param model An optional \code{specified_model} object containing both the the outer/measurement and inner/structural models,
#' along with any inter-item associations, as generated by \code{specify_model}.
#'
#' The entire model can also be specified in Lavaan syntax (this overrides any other specifications)
#'
#' @param lavaan_model Optionally, a single character string containing the relevant model specification in \code{lavaan} syntax.
#'
#' Any further optional parameters to alter the estimation method:
#'
#' @param estimator A character string indicating which estimation method to use
#' in Lavaan. It defaults to "MLR" for robust estimation.
#' See the Lavaan documentation for other supported estimators.
#'
#' @param ... Any other parameters to pass to \code{lavaan::sem} during
#' estimation.
#'
#' @return A list of the estimated parameters for the CB-SEM model including:
#' \item{data}{A matrix of the data upon which the model was estimated.}
#' \item{measurement_model}{The SEMinR measurement model specification.}
#' \item{factor_loadings}{The matrix of estimated factor loadings.}
#' \item{associations}{A matrix of model variable associations.}
#' \item{mmMatrix}{A Matrix of the measurement model relations.}
#' \item{smMatrix}{A Matrix of the structural model relations.}
#' \item{constructs}{A vector of the construct names.}
#' \item{construct scores}{A matrix of the estimated construct scores for the CB-SEM model.}
#' \item{item_weights}{A matrix of the estimated CFA item weights.}
#' \item{lavaan_model}{The lavaan model syntax equivalent of the SEMinR model.}
#' \item{lavaan_output}{The raw lavaan output generated after model estimation.}
#'
#' @references Joreskog, K. G. (1973). A general method for estimating a linear structural equation system In: Goldberger AS, Duncan OD, editors. Structural Equation Models in the Social Sciences. New York: Seminar Press.
#'
#' @seealso \code{\link{as.reflective}}
#' \code{\link{relationships}} \code{\link{constructs}}
#' \code{\link{paths}}
#' \code{\link{associations}} \code{\link{item_errors}}
#'
#' @examples
#' mobi <- mobi
#'
#' #seminr syntax for creating measurement model
#' mobi_mm <- constructs(
#' reflective("Image", multi_items("IMAG", 1:5)),
#' reflective("Quality", multi_items("PERQ", 1:7)),
#' reflective("Value", multi_items("PERV", 1:2)),
#' reflective("Satisfaction", multi_items("CUSA", 1:3)),
#' reflective("Complaints", single_item("CUSCO")),
#' reflective("Loyalty", multi_items("CUSL", 1:3))
#' )
#'
#' #seminr syntax for freeing up item-item covariances
#' mobi_am <- associations(
#' item_errors(c("PERQ1", "PERQ2"), "IMAG1")
#' )
#'
#' #seminr syntax for creating structural model
#' mobi_sm <- relationships(
#' paths(from = c("Image", "Quality"), to = c("Value", "Satisfaction")),
#' paths(from = c("Value", "Satisfaction"), to = c("Complaints", "Loyalty")),
#' paths(from = "Complaints", to = "Loyalty")
#' )
#'
#' # Estimate model and get results
#' mobi_cbsem <- estimate_cbsem(mobi, mobi_mm, mobi_sm, mobi_am)
#'
#' # Use or capture the summary object for more results and metrics
#' summary(mobi_cbsem)
#'
#' cbsem_summary <- summary(mobi_cbsem)
#' cbsem_summary$descriptives$correlations$constructs
#'
#' @export
estimate_cbsem <- function(data, measurement_model=NULL, structural_model=NULL, item_associations=NULL, model=NULL, lavaan_model=NULL, estimator="MLR", ...) {
message("Generating the seminr model for CBSEM")
# TODO: consider higher order models (see estimate_pls() function for template)
rawdata <- data
if (is.null(lavaan_model)) {
# Extract model specifications
specified_model <- extract_models(model, measurement_model, structural_model, item_associations)
measurement_model <- specified_model$measurement_model
structural_model <- specified_model$structural_model
item_associations <- specified_model$item_associations
# Process measurement model interactions to produce simplified mmMatrix
post_interaction_object <- process_cbsem_interactions(measurement_model, data, structural_model, item_associations, estimator, ...)
names(post_interaction_object$data) <- sapply(names(post_interaction_object$data), FUN=lavaanify_name, USE.NAMES = FALSE)
mmMatrix <- post_interaction_object$mmMatrix
data <- post_interaction_object$data
# Rename interaction terms
structural_model[, "source"] <- sapply(structural_model[, "source"], FUN=lavaanify_name)
smMatrix <- structural_model
# TODO: warning if the model is incorrectly specified
# warnings(measurement_model, data, structural_model)
# Create LAVAAN syntax
measurement_syntax <- lavaan_mm_syntax(mmMatrix)
structural_syntax <- lavaan_sm_syntax(smMatrix)
association_syntax <- lavaan_item_associations(item_associations)
# Put all the parts together
lavaan_model <- paste(measurement_syntax, structural_syntax, association_syntax, sep="\n\n")
} else {
structural_model <- smMatrix <- lavaan2seminr(lavaan_model)$structural_model
measurement_model <- lavaan2seminr(lavaan_model)$measurement_model
# using process_cbsem_interactions() to produce mmMatrix
post_interaction_object <- process_cbsem_interactions(measurement_model, data, structural_model, item_associations, estimator, ...)
mmMatrix <- post_interaction_object$mmMatrix
}
# Estimate cbsem in Lavaan
lavaan_output <- try_or_stop(
lavaan::sem(
model=lavaan_model, data=data, std.lv = TRUE, estimator=estimator, ...),
"estimating CBSEM using Lavaan"
)
# Extract lavaan results
constructs <- all_construct_names(measurement_model) # needed in object for reliability... move up if lavaan_model no longer supported
lavaan_std <- lavaan::lavInspect(lavaan_output, what="std")
HOFs <- HOCs_in_model(measurement_model, structural_model)
if (length(HOFs) > 0) {
loadings <- combine_first_order_second_order_loadings_cbsem(mmMatrix, rawdata, lavaan_std)
} else {
loadings <- lavaan_std$lambda
class(loadings) <- "matrix"
}
# Arrange Coefficients Table
estimates <- lavaan::standardizedSolution(lavaan_output)
path_df <- estimates[estimates$op == "~",]
all_antecedents <- all_exogenous(smMatrix)
all_outcomes <- all_endogenous(smMatrix)
path_matrix <- df_xtab_matrix(est.std ~ rhs + lhs, path_df,
all_antecedents, all_outcomes)
rownames(path_matrix) <- gsub("_x_", "*", all_antecedents)
# Compute results from our own methods
tenB <- estimate_lavaan_ten_berge(lavaan_output)
# Gather model information
seminr_model <- list(
data = data,
rawdata = rawdata,
measurement_model = measurement_model,
factor_loadings = loadings,
associations = item_associations,
mmMatrix = mmMatrix,
smMatrix = smMatrix,
constructs = constructs,
construct_scores = tenB$scores,
item_weights = tenB$weights,
path_coef = path_matrix,
lavaan_model = lavaan_model,
lavaan_output = lavaan_output
)
class(seminr_model) <- c("cbsem_model", "seminr_model")
return(seminr_model)
}
#' seminr estimate_cfa() function
#'
#' Estimates a Confirmatory Factor Analysis (CFA) model
#'
#' @inheritParams estimate_cbsem
#'
#' @return A list of the estimated parameters for the CFA model including:
#' \item{data}{A matrix of the data upon which the model was estimated.}
#' \item{measurement_model}{The SEMinR measurement model specification.}
#' \item{construct scores}{A matrix of the estimated construct scores for the CB-SEM model.}
#' \item{item_weights}{A matrix of the estimated CFA item weights.}
#' \item{lavaan_model}{The lavaan model syntax equivalent of the SEMinR model.}
#' \item{lavaan_output}{The raw lavaan output generated after model estimation.}
#'
#' @usage
#' estimate_cfa(data, measurement_model = NULL, item_associations=NULL,
#' model = NULL, lavaan_model = NULL, estimator="MLR", ...)
#'
#' @references Jöreskog, K.G. (1969) A general approach to confirmatory maximum likelihood factor analysis. Psychometrika, 34, 183-202.
#'
#' @seealso \code{\link{constructs}} \code{\link{reflective}}
#' \code{\link{associations}} \code{\link{item_errors}}
#' \code{\link{as.reflective}}
#'
#' #' @examples
#' mobi <- mobi
#'
#' #seminr syntax for creating measurement model
#' mobi_mm <- constructs(
#' reflective("Image", multi_items("IMAG", 1:5)),
#' reflective("Expectation", multi_items("CUEX", 1:3)),
#' reflective("Quality", multi_items("PERQ", 1:7))
#' )
#'
#' #seminr syntax for freeing up item-item covariances
#' mobi_am <- associations(
#' item_errors(c("PERQ1", "PERQ2"), "CUEX3"),
#' item_errors("IMAG1", "CUEX2")
#' )
#'
#' mobi_cfa <- estimate_cfa(mobi, mobi_mm, mobi_am)
#'
#' @export
estimate_cfa <- function(data, measurement_model=NULL, item_associations=NULL,
model=NULL, lavaan_model=NULL, estimator="MLR", ...) {
message("Generating the seminr model for CFA")
# TODO: consider higher order models (see estimate_pls() function for template)
# TODO: warning if the model is incorrectly specified
# warnings(measurement_model, data, structural_model)
mmMatrix <- NULL
rawdata <- data
if (is.null(lavaan_model)) {
# Extract specified models
specified_model <- extract_models(
model = model, measurement_model = measurement_model, item_associations = item_associations
)
measurement_model <- specified_model$measurement_model
item_associations <- specified_model$item_associations
constructs <- all_construct_names(measurement_model)
# Create LAVAAN syntax
mmMatrix <- mm2matrix(measurement_model)
measurement_syntax <- lavaan_mm_syntax(mmMatrix)
association_syntax <- lavaan_item_associations(item_associations)
lavaan_model <- paste(measurement_syntax,
association_syntax,
sep="\n\n")
}
# Estimate cfa in Lavaan
lavaan_output <- try_or_stop(
lavaan::cfa(model=lavaan_model, data=data, std.lv = TRUE,
estimator=estimator, ...),
"run CFA in Lavaan"
)
# Extract Lavaan results
lavaan_std <- lavaan::lavInspect(lavaan_output, what="std")
HOFs <- HOCs_in_model(measurement_model)
if (length(HOFs) > 0) {
loadings <- combine_first_order_second_order_loadings_cbsem(mmMatrix, rawdata, lavaan_std)
} else {
loadings <- lavaan_std$lambda
class(loadings) <- "matrix"
}
# Compute results from our own methods
tenB <- estimate_lavaan_ten_berge(lavaan_output)
# Gather model information
seminr_model <- list(
data = data,
measurement_model = measurement_model,
factor_loadings = loadings,
constructs = constructs,
construct_scores = tenB$scores,
item_weights = tenB$weights,
lavaan_model = lavaan_model,
lavaan_output = lavaan_output
)
class(seminr_model) <- c("cfa_model", "seminr_model")
return(seminr_model)
}
|
/R/estimate_cbsem.R
|
no_license
|
sem-in-r/seminr
|
R
| false
| false
| 12,705
|
r
|
#' seminr estimate_cbsem() function
#'
#' The \code{seminr} package provides a natural syntax for researchers to describe
#' structural equation models.
#'
#' @usage
#' estimate_cbsem(data, measurement_model = NULL,
#' structural_model = NULL, item_associations = NULL,
#' model = NULL, lavaan_model = NULL, estimator = "MLR", ...)
#'
#' @param data A \code{dataframe} containing the indicator measurement data.
#'
#' The entire CBSEM model can be specified in one of three ways:
#'
#' The pair of measurement and structural models, along associated items, can optionally be specified as separate model components
#'
#' @param measurement_model An optional \code{measurement_model} object representing the outer/measurement model,
#' as generated by \code{constructs}.
#' Note that only reflective constructs are supported for CBSEM models,
#' though a composite measurement model can be converted into a reflective one
#' using \code{\link{as.reflective}}.
#'
#' @param structural_model An optional \code{smMatrix} object representing the inner/structural model,
#' as generated by \code{relationships}.
#'
#' @param item_associations An item-to-item matrix representing error
#' covariances that are freed for estimation.
#' This matrix is created by \code{associations()}, or defaults to NULL
#' (no inter-item associations).
#'
#' The combination of measurement and structural models and inter-item associations can also be specified as a single \code{specified_model} object
#' Note that any given model components (measurement_model, structural_model, item_associations) will override components in the fully specified model
#'
#' @param model An optional \code{specified_model} object containing both the the outer/measurement and inner/structural models,
#' along with any inter-item associations, as generated by \code{specify_model}.
#'
#' The entire model can also be specified in Lavaan syntax (this overrides any other specifications)
#'
#' @param lavaan_model Optionally, a single character string containing the relevant model specification in \code{lavaan} syntax.
#'
#' Any further optional parameters to alter the estimation method:
#'
#' @param estimator A character string indicating which estimation method to use
#' in Lavaan. It defaults to "MLR" for robust estimation.
#' See the Lavaan documentation for other supported estimators.
#'
#' @param ... Any other parameters to pass to \code{lavaan::sem} during
#' estimation.
#'
#' @return A list of the estimated parameters for the CB-SEM model including:
#' \item{data}{A matrix of the data upon which the model was estimated.}
#' \item{measurement_model}{The SEMinR measurement model specification.}
#' \item{factor_loadings}{The matrix of estimated factor loadings.}
#' \item{associations}{A matrix of model variable associations.}
#' \item{mmMatrix}{A Matrix of the measurement model relations.}
#' \item{smMatrix}{A Matrix of the structural model relations.}
#' \item{constructs}{A vector of the construct names.}
#' \item{construct scores}{A matrix of the estimated construct scores for the CB-SEM model.}
#' \item{item_weights}{A matrix of the estimated CFA item weights.}
#' \item{lavaan_model}{The lavaan model syntax equivalent of the SEMinR model.}
#' \item{lavaan_output}{The raw lavaan output generated after model estimation.}
#'
#' @references Joreskog, K. G. (1973). A general method for estimating a linear structural equation system In: Goldberger AS, Duncan OD, editors. Structural Equation Models in the Social Sciences. New York: Seminar Press.
#'
#' @seealso \code{\link{as.reflective}}
#' \code{\link{relationships}} \code{\link{constructs}}
#' \code{\link{paths}}
#' \code{\link{associations}} \code{\link{item_errors}}
#'
#' @examples
#' mobi <- mobi
#'
#' #seminr syntax for creating measurement model
#' mobi_mm <- constructs(
#' reflective("Image", multi_items("IMAG", 1:5)),
#' reflective("Quality", multi_items("PERQ", 1:7)),
#' reflective("Value", multi_items("PERV", 1:2)),
#' reflective("Satisfaction", multi_items("CUSA", 1:3)),
#' reflective("Complaints", single_item("CUSCO")),
#' reflective("Loyalty", multi_items("CUSL", 1:3))
#' )
#'
#' #seminr syntax for freeing up item-item covariances
#' mobi_am <- associations(
#' item_errors(c("PERQ1", "PERQ2"), "IMAG1")
#' )
#'
#' #seminr syntax for creating structural model
#' mobi_sm <- relationships(
#' paths(from = c("Image", "Quality"), to = c("Value", "Satisfaction")),
#' paths(from = c("Value", "Satisfaction"), to = c("Complaints", "Loyalty")),
#' paths(from = "Complaints", to = "Loyalty")
#' )
#'
#' # Estimate model and get results
#' mobi_cbsem <- estimate_cbsem(mobi, mobi_mm, mobi_sm, mobi_am)
#'
#' # Use or capture the summary object for more results and metrics
#' summary(mobi_cbsem)
#'
#' cbsem_summary <- summary(mobi_cbsem)
#' cbsem_summary$descriptives$correlations$constructs
#'
#' @export
estimate_cbsem <- function(data, measurement_model=NULL, structural_model=NULL, item_associations=NULL, model=NULL, lavaan_model=NULL, estimator="MLR", ...) {
message("Generating the seminr model for CBSEM")
# TODO: consider higher order models (see estimate_pls() function for template)
rawdata <- data
if (is.null(lavaan_model)) {
# Extract model specifications
specified_model <- extract_models(model, measurement_model, structural_model, item_associations)
measurement_model <- specified_model$measurement_model
structural_model <- specified_model$structural_model
item_associations <- specified_model$item_associations
# Process measurement model interactions to produce simplified mmMatrix
post_interaction_object <- process_cbsem_interactions(measurement_model, data, structural_model, item_associations, estimator, ...)
names(post_interaction_object$data) <- sapply(names(post_interaction_object$data), FUN=lavaanify_name, USE.NAMES = FALSE)
mmMatrix <- post_interaction_object$mmMatrix
data <- post_interaction_object$data
# Rename interaction terms
structural_model[, "source"] <- sapply(structural_model[, "source"], FUN=lavaanify_name)
smMatrix <- structural_model
# TODO: warning if the model is incorrectly specified
# warnings(measurement_model, data, structural_model)
# Create LAVAAN syntax
measurement_syntax <- lavaan_mm_syntax(mmMatrix)
structural_syntax <- lavaan_sm_syntax(smMatrix)
association_syntax <- lavaan_item_associations(item_associations)
# Put all the parts together
lavaan_model <- paste(measurement_syntax, structural_syntax, association_syntax, sep="\n\n")
} else {
structural_model <- smMatrix <- lavaan2seminr(lavaan_model)$structural_model
measurement_model <- lavaan2seminr(lavaan_model)$measurement_model
# using process_cbsem_interactions() to produce mmMatrix
post_interaction_object <- process_cbsem_interactions(measurement_model, data, structural_model, item_associations, estimator, ...)
mmMatrix <- post_interaction_object$mmMatrix
}
# Estimate cbsem in Lavaan
lavaan_output <- try_or_stop(
lavaan::sem(
model=lavaan_model, data=data, std.lv = TRUE, estimator=estimator, ...),
"estimating CBSEM using Lavaan"
)
# Extract lavaan results
constructs <- all_construct_names(measurement_model) # needed in object for reliability... move up if lavaan_model no longer supported
lavaan_std <- lavaan::lavInspect(lavaan_output, what="std")
HOFs <- HOCs_in_model(measurement_model, structural_model)
if (length(HOFs) > 0) {
loadings <- combine_first_order_second_order_loadings_cbsem(mmMatrix, rawdata, lavaan_std)
} else {
loadings <- lavaan_std$lambda
class(loadings) <- "matrix"
}
# Arrange Coefficients Table
estimates <- lavaan::standardizedSolution(lavaan_output)
path_df <- estimates[estimates$op == "~",]
all_antecedents <- all_exogenous(smMatrix)
all_outcomes <- all_endogenous(smMatrix)
path_matrix <- df_xtab_matrix(est.std ~ rhs + lhs, path_df,
all_antecedents, all_outcomes)
rownames(path_matrix) <- gsub("_x_", "*", all_antecedents)
# Compute results from our own methods
tenB <- estimate_lavaan_ten_berge(lavaan_output)
# Gather model information
seminr_model <- list(
data = data,
rawdata = rawdata,
measurement_model = measurement_model,
factor_loadings = loadings,
associations = item_associations,
mmMatrix = mmMatrix,
smMatrix = smMatrix,
constructs = constructs,
construct_scores = tenB$scores,
item_weights = tenB$weights,
path_coef = path_matrix,
lavaan_model = lavaan_model,
lavaan_output = lavaan_output
)
class(seminr_model) <- c("cbsem_model", "seminr_model")
return(seminr_model)
}
#' seminr estimate_cfa() function
#'
#' Estimates a Confirmatory Factor Analysis (CFA) model
#'
#' @inheritParams estimate_cbsem
#'
#' @return A list of the estimated parameters for the CFA model including:
#' \item{data}{A matrix of the data upon which the model was estimated.}
#' \item{measurement_model}{The SEMinR measurement model specification.}
#' \item{construct scores}{A matrix of the estimated construct scores for the CB-SEM model.}
#' \item{item_weights}{A matrix of the estimated CFA item weights.}
#' \item{lavaan_model}{The lavaan model syntax equivalent of the SEMinR model.}
#' \item{lavaan_output}{The raw lavaan output generated after model estimation.}
#'
#' @usage
#' estimate_cfa(data, measurement_model = NULL, item_associations=NULL,
#' model = NULL, lavaan_model = NULL, estimator="MLR", ...)
#'
#' @references Jöreskog, K.G. (1969) A general approach to confirmatory maximum likelihood factor analysis. Psychometrika, 34, 183-202.
#'
#' @seealso \code{\link{constructs}} \code{\link{reflective}}
#' \code{\link{associations}} \code{\link{item_errors}}
#' \code{\link{as.reflective}}
#'
#' #' @examples
#' mobi <- mobi
#'
#' #seminr syntax for creating measurement model
#' mobi_mm <- constructs(
#' reflective("Image", multi_items("IMAG", 1:5)),
#' reflective("Expectation", multi_items("CUEX", 1:3)),
#' reflective("Quality", multi_items("PERQ", 1:7))
#' )
#'
#' #seminr syntax for freeing up item-item covariances
#' mobi_am <- associations(
#' item_errors(c("PERQ1", "PERQ2"), "CUEX3"),
#' item_errors("IMAG1", "CUEX2")
#' )
#'
#' mobi_cfa <- estimate_cfa(mobi, mobi_mm, mobi_am)
#'
#' @export
estimate_cfa <- function(data, measurement_model=NULL, item_associations=NULL,
model=NULL, lavaan_model=NULL, estimator="MLR", ...) {
message("Generating the seminr model for CFA")
# TODO: consider higher order models (see estimate_pls() function for template)
# TODO: warning if the model is incorrectly specified
# warnings(measurement_model, data, structural_model)
mmMatrix <- NULL
rawdata <- data
if (is.null(lavaan_model)) {
# Extract specified models
specified_model <- extract_models(
model = model, measurement_model = measurement_model, item_associations = item_associations
)
measurement_model <- specified_model$measurement_model
item_associations <- specified_model$item_associations
constructs <- all_construct_names(measurement_model)
# Create LAVAAN syntax
mmMatrix <- mm2matrix(measurement_model)
measurement_syntax <- lavaan_mm_syntax(mmMatrix)
association_syntax <- lavaan_item_associations(item_associations)
lavaan_model <- paste(measurement_syntax,
association_syntax,
sep="\n\n")
}
# Estimate cfa in Lavaan
lavaan_output <- try_or_stop(
lavaan::cfa(model=lavaan_model, data=data, std.lv = TRUE,
estimator=estimator, ...),
"run CFA in Lavaan"
)
# Extract Lavaan results
lavaan_std <- lavaan::lavInspect(lavaan_output, what="std")
HOFs <- HOCs_in_model(measurement_model)
if (length(HOFs) > 0) {
loadings <- combine_first_order_second_order_loadings_cbsem(mmMatrix, rawdata, lavaan_std)
} else {
loadings <- lavaan_std$lambda
class(loadings) <- "matrix"
}
# Compute results from our own methods
tenB <- estimate_lavaan_ten_berge(lavaan_output)
# Gather model information
seminr_model <- list(
data = data,
measurement_model = measurement_model,
factor_loadings = loadings,
constructs = constructs,
construct_scores = tenB$scores,
item_weights = tenB$weights,
lavaan_model = lavaan_model,
lavaan_output = lavaan_output
)
class(seminr_model) <- c("cfa_model", "seminr_model")
return(seminr_model)
}
|
#'========================================================================================================================================
#' Project: ISWEL
#' Subject: Code to process CEEPA and extract relevant data for target countries
#' Author: Michiel van Dijk
#' Contact: michiel.vandijk@wur.nl
#'========================================================================================================================================
### PACKAGES
if(!require(pacman)) install.packages("pacman")
# Key packages
p_load("tidyverse", "readxl", "stringr", "scales", "RColorBrewer", "rprojroot")
# Spatial packages
#p_load("rgdal", "ggmap", "raster", "rasterVis", "rgeos", "sp", "mapproj", "maptools", "proj4", "gdalUtils")
# Additional packages
p_load("countrycode", "haven")
### DETERMINE ROOT PATH
root <- find_root(is_rstudio_project)
### DATAPATH
dataPath <- "H:\\MyDocuments\\Projects\\Global-to-local-GLOBIOM"
### R SETTINGS
options(scipen=999) # surpress scientific notation
options("stringsAsFactors"=FALSE) # ensures that characterdata that is loaded (e.g. csv) is not turned into factors
options(digits=4)
### FUNCTIONS
# Function to strip attributes and add classes
stripAttributes <- function(df){
df[] <- lapply(df, as.vector)
return(df)
}
### LOAD CROP CODES
crop_code_list <- read_excel(file.path(dataPath, "Data/ZWE/Raw/Household_surveys/CEEPA/CEEPA_crop_codes.xlsx"))
### DOWNLOAD DATA
CEEPA_raw <- read_dta(file.path(dataPath, "Data/ZWE/Raw/Household_surveys/CEEPA/CEEPASurvey.dta")) %>%
mutate(adm0 = as_factor(adm0)) %>%
filter(adm0 %in% c("zimbabwe"))
# LOCATION VARIABLES
location <- CEEPA_raw %>%
transmute(country = adm0,
adm1 = as_factor(adm1),
adm2 = as_factor(adm2),
iso3c = countrycode(adm0, "country.name", "iso3c"),
hhcode) %>%
stripAttributes() %>%
dplyr::select(-country)
### Section 4_1
# Check number of plots => max 6 and only 7 in lvs
#names(CEEPA_raw)[grep("p7" , names(CEEPA_raw))]
### INFORMATION AT PLOT, CROP AND SEASON LEVEL
### section 4_1
sect4_1 <- dplyr::select(CEEPA_raw, hhcode, s1p1c1:s1p3c6sval) %>%
gather(variable, value, -hhcode) %>%
separate(variable, c("season", "plotid", "crop_number", "indicator"), sep = c(2, 4, 6), remove = F) %>%
mutate(indicator = ifelse(indicator == "", "crop_code", indicator)) %>%
dplyr::select(-variable) %>%
spread(indicator, value) %>%
mutate_at(vars(area:sval), funs(replace(., is.nan(.), NA))) %>%
filter(!is.na(crop_code)) %>%
dplyr::select(-crop_number) %>% # replace all nan by NA
left_join(., crop_code_list)
### YIELD AT HH AND CROP LEVEL
### Section 4_12
sect4_12 <- dplyr::select(CEEPA_raw, hhcode, pc1:pc5, nyieldc1:nyieldc5) %>%
gather(variable, value, -hhcode) %>%
separate(variable, c("var", "crop_number"), sep = c(-2), remove = T) %>%
spread(var, value) %>%
rename(crop_code = pc, yld = nyieldc) %>%
left_join(., crop_code_list) %>%
dplyr::filter(!is.na(crop_name))
### INPUTS AT PLOT AND SEASON LEVEL
### Section 4_13
sect4_13 <- dplyr::select(CEEPA_raw, hhcode, s1p1fert:s3p2wat5) %>%
gather(variable, value, -hhcode) %>%
separate(variable, c("season", "plotid", "indicator"), sep = c(2, 4), remove = T) %>%
spread(indicator, value) %>%
mutate_at(vars(fert:wat5), funs(replace(., is.nan(.), NA))) %>%
dplyr::filter(season != "s3") # No information for s3
### PESTICIDE AND FERTILIZER COSTS
### Section 4_14 and 4_15
# Seems to be complete missing for Zimbabwe
sect4_14_15 <- dplyr::select(CEEPA_raw, hhcode, costkgfert, costkgpest)
### AGGREGATE AT ADM2 LEVEL
# yld
yld_ag <- left_join(location, sect4_12) %>%
na.omit() %>%
group_by(adm1, adm2, crop_name) %>%
summarize(yld = mean(yld, na.rm = T)) %>%
ungroup()
saveRDS(yld_ag, file.path(dataPath, "Data/ZWE/Processed/Household_surveys/yld_ag_ZWE.rds"))
no_inf <- sect4_13 %>%
filter(rowSums(mutate_each(.[,c(4:14)], funs(is.na(.)))) != length(c(4:14)))
|
/Code/ZWE/Household_surveys/Process_CEEPA_ZWE.R
|
no_license
|
shaohuizhang/Global-to-local-GLOBIOM
|
R
| false
| false
| 3,933
|
r
|
#'========================================================================================================================================
#' Project: ISWEL
#' Subject: Code to process CEEPA and extract relevant data for target countries
#' Author: Michiel van Dijk
#' Contact: michiel.vandijk@wur.nl
#'========================================================================================================================================
### PACKAGES
if(!require(pacman)) install.packages("pacman")
# Key packages
p_load("tidyverse", "readxl", "stringr", "scales", "RColorBrewer", "rprojroot")
# Spatial packages
#p_load("rgdal", "ggmap", "raster", "rasterVis", "rgeos", "sp", "mapproj", "maptools", "proj4", "gdalUtils")
# Additional packages
p_load("countrycode", "haven")
### DETERMINE ROOT PATH
root <- find_root(is_rstudio_project)
### DATAPATH
dataPath <- "H:\\MyDocuments\\Projects\\Global-to-local-GLOBIOM"
### R SETTINGS
options(scipen=999) # surpress scientific notation
options("stringsAsFactors"=FALSE) # ensures that characterdata that is loaded (e.g. csv) is not turned into factors
options(digits=4)
### FUNCTIONS
# Function to strip attributes and add classes
stripAttributes <- function(df){
df[] <- lapply(df, as.vector)
return(df)
}
### LOAD CROP CODES
crop_code_list <- read_excel(file.path(dataPath, "Data/ZWE/Raw/Household_surveys/CEEPA/CEEPA_crop_codes.xlsx"))
### DOWNLOAD DATA
CEEPA_raw <- read_dta(file.path(dataPath, "Data/ZWE/Raw/Household_surveys/CEEPA/CEEPASurvey.dta")) %>%
mutate(adm0 = as_factor(adm0)) %>%
filter(adm0 %in% c("zimbabwe"))
# LOCATION VARIABLES
location <- CEEPA_raw %>%
transmute(country = adm0,
adm1 = as_factor(adm1),
adm2 = as_factor(adm2),
iso3c = countrycode(adm0, "country.name", "iso3c"),
hhcode) %>%
stripAttributes() %>%
dplyr::select(-country)
### Section 4_1
# Check number of plots => max 6 and only 7 in lvs
#names(CEEPA_raw)[grep("p7" , names(CEEPA_raw))]
### INFORMATION AT PLOT, CROP AND SEASON LEVEL
### section 4_1
sect4_1 <- dplyr::select(CEEPA_raw, hhcode, s1p1c1:s1p3c6sval) %>%
gather(variable, value, -hhcode) %>%
separate(variable, c("season", "plotid", "crop_number", "indicator"), sep = c(2, 4, 6), remove = F) %>%
mutate(indicator = ifelse(indicator == "", "crop_code", indicator)) %>%
dplyr::select(-variable) %>%
spread(indicator, value) %>%
mutate_at(vars(area:sval), funs(replace(., is.nan(.), NA))) %>%
filter(!is.na(crop_code)) %>%
dplyr::select(-crop_number) %>% # replace all nan by NA
left_join(., crop_code_list)
### YIELD AT HH AND CROP LEVEL
### Section 4_12
sect4_12 <- dplyr::select(CEEPA_raw, hhcode, pc1:pc5, nyieldc1:nyieldc5) %>%
gather(variable, value, -hhcode) %>%
separate(variable, c("var", "crop_number"), sep = c(-2), remove = T) %>%
spread(var, value) %>%
rename(crop_code = pc, yld = nyieldc) %>%
left_join(., crop_code_list) %>%
dplyr::filter(!is.na(crop_name))
### INPUTS AT PLOT AND SEASON LEVEL
### Section 4_13
sect4_13 <- dplyr::select(CEEPA_raw, hhcode, s1p1fert:s3p2wat5) %>%
gather(variable, value, -hhcode) %>%
separate(variable, c("season", "plotid", "indicator"), sep = c(2, 4), remove = T) %>%
spread(indicator, value) %>%
mutate_at(vars(fert:wat5), funs(replace(., is.nan(.), NA))) %>%
dplyr::filter(season != "s3") # No information for s3
### PESTICIDE AND FERTILIZER COSTS
### Section 4_14 and 4_15
# Seems to be complete missing for Zimbabwe
sect4_14_15 <- dplyr::select(CEEPA_raw, hhcode, costkgfert, costkgpest)
### AGGREGATE AT ADM2 LEVEL
# yld
yld_ag <- left_join(location, sect4_12) %>%
na.omit() %>%
group_by(adm1, adm2, crop_name) %>%
summarize(yld = mean(yld, na.rm = T)) %>%
ungroup()
saveRDS(yld_ag, file.path(dataPath, "Data/ZWE/Processed/Household_surveys/yld_ag_ZWE.rds"))
no_inf <- sect4_13 %>%
filter(rowSums(mutate_each(.[,c(4:14)], funs(is.na(.)))) != length(c(4:14)))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calcChromArmPloidies.R
\name{calcChromArmPloidies}
\alias{calcChromArmPloidies}
\title{Calculate overall chrom arm copy numbers}
\usage{
calcChromArmPloidies(
purple.cnv.file,
out.file = NULL,
min.rel.cum.segment.size = 0.5,
max.rel.cum.segment.size.diff = 0.1,
chrom.arm.split.method = "hmf",
centromere.positions.path = CENTROMERE_POSITIONS,
one.armed.chroms = ONE_ARMED_CHROMS,
chrom.arm.names = "auto",
verbose = T
)
}
\arguments{
\item{purple.cnv.file}{Path to purple cnv file}
\item{out.file}{Path to output file. If NULL, returns a named vector}
\item{min.rel.cum.segment.size}{If a chrom arm has a CN category that covers >0.5 (i.e 50%; default)
of a chrom arm, this CN is the copy number of the arm}
\item{max.rel.cum.segment.size.diff}{This value (default 0.1) determines whether which CN
categories are considered to cover equal lengths of the chrom arm. For example, (by default) 2
CN categories covering 0.40 and 0.31 of a chrom arm are considered equally contributing. When
these CN categories have similar cumulative segment size as the one with the highest, if one of
these have the same CN as the genome CN, return the genome CN. Otherwise, simply return the one
with the highest segment support (as is done above).}
\item{chrom.arm.split.method}{Which method to determine the chromosome arm coords? If 'hmf', uses
'method' column from purple cnv file to determine centromere positions (i.e. p/q arm split point).
If 'gap', uses the a (processed) gap.txt.gz table from the UCSC genome browser to determine
centromere positions. These 2 methods should in theory be identical, unless the HMF pipeline code
changes.}
\item{chrom.arm.names}{A character vector in the form c('1p','1q','2p','2q', ...). The default
'auto' means that the human chromosome arm names are used. Note that chroms 13, 14, 15, 21, 22
are considered to only have the long (i.e. q) arm.}
\item{verbose}{Show progress messages?}
}
\value{
A named vector of chrom arm copy numbers, or specified writes a table to out.file if
specified
}
\description{
This function first rounds copy numbers (CN) to integers so that CN segments can
be grouped together. Per chrom arm, the coverage of each CN category is calculated (i.e.
cumulative segment size). The chrom arm CN is (roughly) defined as the CN category with the
highest cumulative segment size
}
\examples{
When multiple CNs have similar segment support as the one with the highest, if one
of these have the same CN as the genome CN, return the genome CN. Otherwise, simply return
the one with the highest segment support (as is done above)
}
|
/man/calcChromArmPloidies.Rd
|
no_license
|
UMCUGenetics/hmfGeneAnnotation
|
R
| false
| true
| 2,689
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calcChromArmPloidies.R
\name{calcChromArmPloidies}
\alias{calcChromArmPloidies}
\title{Calculate overall chrom arm copy numbers}
\usage{
calcChromArmPloidies(
purple.cnv.file,
out.file = NULL,
min.rel.cum.segment.size = 0.5,
max.rel.cum.segment.size.diff = 0.1,
chrom.arm.split.method = "hmf",
centromere.positions.path = CENTROMERE_POSITIONS,
one.armed.chroms = ONE_ARMED_CHROMS,
chrom.arm.names = "auto",
verbose = T
)
}
\arguments{
\item{purple.cnv.file}{Path to purple cnv file}
\item{out.file}{Path to output file. If NULL, returns a named vector}
\item{min.rel.cum.segment.size}{If a chrom arm has a CN category that covers >0.5 (i.e 50%; default)
of a chrom arm, this CN is the copy number of the arm}
\item{max.rel.cum.segment.size.diff}{This value (default 0.1) determines whether which CN
categories are considered to cover equal lengths of the chrom arm. For example, (by default) 2
CN categories covering 0.40 and 0.31 of a chrom arm are considered equally contributing. When
these CN categories have similar cumulative segment size as the one with the highest, if one of
these have the same CN as the genome CN, return the genome CN. Otherwise, simply return the one
with the highest segment support (as is done above).}
\item{chrom.arm.split.method}{Which method to determine the chromosome arm coords? If 'hmf', uses
'method' column from purple cnv file to determine centromere positions (i.e. p/q arm split point).
If 'gap', uses the a (processed) gap.txt.gz table from the UCSC genome browser to determine
centromere positions. These 2 methods should in theory be identical, unless the HMF pipeline code
changes.}
\item{chrom.arm.names}{A character vector in the form c('1p','1q','2p','2q', ...). The default
'auto' means that the human chromosome arm names are used. Note that chroms 13, 14, 15, 21, 22
are considered to only have the long (i.e. q) arm.}
\item{verbose}{Show progress messages?}
}
\value{
A named vector of chrom arm copy numbers, or specified writes a table to out.file if
specified
}
\description{
This function first rounds copy numbers (CN) to integers so that CN segments can
be grouped together. Per chrom arm, the coverage of each CN category is calculated (i.e.
cumulative segment size). The chrom arm CN is (roughly) defined as the CN category with the
highest cumulative segment size
}
\examples{
When multiple CNs have similar segment support as the one with the highest, if one
of these have the same CN as the genome CN, return the genome CN. Otherwise, simply return
the one with the highest segment support (as is done above)
}
|
#' A plot function for nonlinear relationship
#'
#' This function allows you to plot a graph on the nonlinear relationship of two variables.
#' @param
#' @keywords plot_nonlinear
#' @export
plot_nonlinear <- function(y, x, data, k = 10, family = "binomial"){
name_x <- deparse(substitute(x))
name_y <- deparse(substitute(y))
x <- data[[name_x]]
y <- data[[name_y]]
class_x <- class(x)
if(class_x %in% c("Date")){x <- as.numeric(x)}
label_x <- Wu::label(x)
label_y <- Wu::label(y)
if (label_x == ""){
label_x <- name_x
}
if (label_y == ""){
label_y <- name_y
}
mod <- mgcv::gam(
y ~ s(x, k = k)
, method = "REML"
, family = family
)
newdata <- data.frame(x = x[order(x)])
p <- predict(mod, newdata, type = "link", se.fit = TRUE)
if(class_x %in% c("Date")){newdata$x <- as.Date(newdata$x, origin = "1970-01-01")}
ci <- data.frame(
x = newdata$x
, y = p$fit
, lower = p$fit - qnorm(.975) * p$se.fit
, upper = p$fit + qnorm(.975) * p$se.fit
)
ggplot(ci, aes(x, y)) +
geom_point(position = "jitter", color = Wu::Blues(5)) +
geom_line(color = "grey70", alpha = 0.3) +
geom_ribbon(
data = ci
, aes(ymin = lower, ymax = upper)
, fill = Wu::Blues(15)
, alpha = 0.3) +
labs(x = label_x, y = label_y)
}
|
/R/plot_nonlinear.R
|
no_license
|
ghowoo/Wu
|
R
| false
| false
| 1,465
|
r
|
#' A plot function for nonlinear relationship
#'
#' This function allows you to plot a graph on the nonlinear relationship of two variables.
#' @param
#' @keywords plot_nonlinear
#' @export
plot_nonlinear <- function(y, x, data, k = 10, family = "binomial"){
name_x <- deparse(substitute(x))
name_y <- deparse(substitute(y))
x <- data[[name_x]]
y <- data[[name_y]]
class_x <- class(x)
if(class_x %in% c("Date")){x <- as.numeric(x)}
label_x <- Wu::label(x)
label_y <- Wu::label(y)
if (label_x == ""){
label_x <- name_x
}
if (label_y == ""){
label_y <- name_y
}
mod <- mgcv::gam(
y ~ s(x, k = k)
, method = "REML"
, family = family
)
newdata <- data.frame(x = x[order(x)])
p <- predict(mod, newdata, type = "link", se.fit = TRUE)
if(class_x %in% c("Date")){newdata$x <- as.Date(newdata$x, origin = "1970-01-01")}
ci <- data.frame(
x = newdata$x
, y = p$fit
, lower = p$fit - qnorm(.975) * p$se.fit
, upper = p$fit + qnorm(.975) * p$se.fit
)
ggplot(ci, aes(x, y)) +
geom_point(position = "jitter", color = Wu::Blues(5)) +
geom_line(color = "grey70", alpha = 0.3) +
geom_ribbon(
data = ci
, aes(ymin = lower, ymax = upper)
, fill = Wu::Blues(15)
, alpha = 0.3) +
labs(x = label_x, y = label_y)
}
|
#
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Define server logic required to draw a histogram
shinyServer(
function(input, output, session) {
observeEvent(input$buttonMice, {
message("running MICE.R")
source("newFireballDataMice.R")
# print(MSE)
})
observeEvent(input$buttonRF, {
message("running RandomForest.R")
source("newFireballDataRF.R")
#print(MSE)
})
observeEvent(input$buttonXg, {
message("running XGBoost.R")
source("xgboost.R")
#print(MSE)
})
observeEvent(input$buttonSVD, {
message("running GRSVD.R")
source("GRSVD.R")
#print(MSE)
})
output$renderprint <- renderPrint(
MSE
)
}
# function(input, output)
# {
#
# # Reactive value for selected dataset ----
# datasetInput <- reactive({
# switch(input$dataset,
# "Fireball_And_Bolide_Reports" = Fireball_And_Bolide_Reports,
# "Near_Earth_Comets_Orbital_Elements" = Near_Earth_Comets_Orbital_Elements,
# "Meteorite_Landings" = Meteorite_Landings,
# "Global-Landslide-Rainfall" = Global_Landslide_Rainfall)
# })
#
#
# # Table of selected dataset ----
# output$table <- renderTable({
# datasetInput()
# })
#
#
# # Downloadable csv of selected dataset ----
# output$downloadData <- downloadHandler(
# filename = function() {
# paste(input$dataset, ".csv", sep = "")
# },
# content = function(file) {
# write.csv(datasetInput(), file, row.names = FALSE)
# }
# )
#
# }
)
|
/WebApp/DataImputation/server.R
|
no_license
|
bassantNabeh/Imputation-Data
|
R
| false
| false
| 2,214
|
r
|
#
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Define server logic required to draw a histogram
shinyServer(
function(input, output, session) {
observeEvent(input$buttonMice, {
message("running MICE.R")
source("newFireballDataMice.R")
# print(MSE)
})
observeEvent(input$buttonRF, {
message("running RandomForest.R")
source("newFireballDataRF.R")
#print(MSE)
})
observeEvent(input$buttonXg, {
message("running XGBoost.R")
source("xgboost.R")
#print(MSE)
})
observeEvent(input$buttonSVD, {
message("running GRSVD.R")
source("GRSVD.R")
#print(MSE)
})
output$renderprint <- renderPrint(
MSE
)
}
# function(input, output)
# {
#
# # Reactive value for selected dataset ----
# datasetInput <- reactive({
# switch(input$dataset,
# "Fireball_And_Bolide_Reports" = Fireball_And_Bolide_Reports,
# "Near_Earth_Comets_Orbital_Elements" = Near_Earth_Comets_Orbital_Elements,
# "Meteorite_Landings" = Meteorite_Landings,
# "Global-Landslide-Rainfall" = Global_Landslide_Rainfall)
# })
#
#
# # Table of selected dataset ----
# output$table <- renderTable({
# datasetInput()
# })
#
#
# # Downloadable csv of selected dataset ----
# output$downloadData <- downloadHandler(
# filename = function() {
# paste(input$dataset, ".csv", sep = "")
# },
# content = function(file) {
# write.csv(datasetInput(), file, row.names = FALSE)
# }
# )
#
# }
)
|
input_unit_ui <- function() {
radioButtons(
"unit",
"Select data unit",
choices = c("Count", "Rate (per 100K)"),
selected = "Count"
)
}
input_category_ui <- function() {
radioButtons(
"category",
"Select crime category",
choices = c("All", "Person", "Property"),
selected = "All"
)
}
input_range_ui <- function() {
year <- APP_DATA$year
sliderInput(
"range",
"Select years",
min = min(as.integer(year)),
max = max(as.integer(year)),
value = c(min(as.integer(year)), max(as.integer(year))),
step = 1,
sep = ""
)
}
input_region_ui <- function() {
selectInput(
"region",
"Select region",
choices = c("All", sort(unique(as.character(APP_DATA$region)))),
selected = "All"
)
}
input_community_ui <- function() {
list(
selectInput(
"rural",
span("Select community type*", id="rural-text"),
choices = c("All", COMMUNITY_TYPES),
selected = "All"
),
bsPopover(
id = "rural-text",
title = "What is community type?",
content = "The community type of a county is based on the definition of \"rural\" by the U.S. Census Bureau (Ratcliffe et al. 2016). The original categorization consists of three categories: (1) completely rural, (2) mostly rural, and (3) mostly urban. The UCR Index Offense Explorer has added the fourth category, completely urban, for counties consisting fully of urban areas as defined by the Bureau. Please note that the categorization in this Explorer is based on the latest Census data (2010) and may diverge from the true status of each county for other years.",
placement = "right",
trigger = "hover",
options = list(container = 'body')
)
)
}
input_county_ui <- function() {
selectInput(
"county",
"Select county",
choices = c("All", sort(unique(as.character(APP_DATA$county)))),
selected = "All"
)
}
|
/app/modules/inputs_ui.R
|
permissive
|
ICJIA/ucr-index-offense-explorer
|
R
| false
| false
| 1,914
|
r
|
input_unit_ui <- function() {
radioButtons(
"unit",
"Select data unit",
choices = c("Count", "Rate (per 100K)"),
selected = "Count"
)
}
input_category_ui <- function() {
radioButtons(
"category",
"Select crime category",
choices = c("All", "Person", "Property"),
selected = "All"
)
}
input_range_ui <- function() {
year <- APP_DATA$year
sliderInput(
"range",
"Select years",
min = min(as.integer(year)),
max = max(as.integer(year)),
value = c(min(as.integer(year)), max(as.integer(year))),
step = 1,
sep = ""
)
}
input_region_ui <- function() {
selectInput(
"region",
"Select region",
choices = c("All", sort(unique(as.character(APP_DATA$region)))),
selected = "All"
)
}
input_community_ui <- function() {
list(
selectInput(
"rural",
span("Select community type*", id="rural-text"),
choices = c("All", COMMUNITY_TYPES),
selected = "All"
),
bsPopover(
id = "rural-text",
title = "What is community type?",
content = "The community type of a county is based on the definition of \"rural\" by the U.S. Census Bureau (Ratcliffe et al. 2016). The original categorization consists of three categories: (1) completely rural, (2) mostly rural, and (3) mostly urban. The UCR Index Offense Explorer has added the fourth category, completely urban, for counties consisting fully of urban areas as defined by the Bureau. Please note that the categorization in this Explorer is based on the latest Census data (2010) and may diverge from the true status of each county for other years.",
placement = "right",
trigger = "hover",
options = list(container = 'body')
)
)
}
input_county_ui <- function() {
selectInput(
"county",
"Select county",
choices = c("All", sort(unique(as.character(APP_DATA$county)))),
selected = "All"
)
}
|
TermFrequency <-
function(x) {
if (sum(class(x)%in%c("VCorpus", "Corpus")) > 0) {
dtm <- tm::DocumentTermMatrix(x, control = list(weighting = tm::weightBin))
} else if (sum(class(x) %in% c("DocumentTermMatrix", "simple_triplet_matrix")) > 0) {
dtm <- tm::weightBin( x )
}
dtm <- as.matrix(dtm)
termCount <- sort(colSums(dtm), decreasing = TRUE)
termFreq <- data.frame(
"Term" = names( termCount ),
"Freq" = termCount
)
rownames(termFreq) <- 1:dim(termFreq)[1]
return(termFreq)
}
|
/R/TermFrequency.R
|
no_license
|
cran/KDViz
|
R
| false
| false
| 537
|
r
|
TermFrequency <-
function(x) {
if (sum(class(x)%in%c("VCorpus", "Corpus")) > 0) {
dtm <- tm::DocumentTermMatrix(x, control = list(weighting = tm::weightBin))
} else if (sum(class(x) %in% c("DocumentTermMatrix", "simple_triplet_matrix")) > 0) {
dtm <- tm::weightBin( x )
}
dtm <- as.matrix(dtm)
termCount <- sort(colSums(dtm), decreasing = TRUE)
termFreq <- data.frame(
"Term" = names( termCount ),
"Freq" = termCount
)
rownames(termFreq) <- 1:dim(termFreq)[1]
return(termFreq)
}
|
#!/usr/bin/env Rscript
# ================================================================================
#
# Coursera - Exploratory Data Analysis - Course Project 1
#
# Generate plot4.png
## downloadAndUnpackData()
#
# Download and unpack the source data.
#
# Note: will not update/overwrite existing copies of the data. Warnings are
# reported if the source data file and/or the unpacked data directory already
# exist.
#
# Usage:
# downloadAndUnpackData()
#
downloadAndUnpackData <- function() {
file_url <- 'https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip'
file_name <- 'household_power_consumption.zip'
data_file_name <- 'household_power_consumption.txt'
# Download data archive.
if(!file.exists(file_name)) {
message('Downloading data from Internet')
# If available use the 'downloader' package to deal with HTTPS sources.
if(require(downloader, quietly=TRUE)) {
download(file_url,destfile=file_name)
}
# Otherwise use the built-in (has problems with HTTPS on non-Windows platforms)
else {
download.file(file_url, file_name, mode='wb', method='auto')
}
}
else {
warning('Local copy of data archive found, not downloading')
}
# Unpack data archive if data not already present.
if(!file.exists(data_file_name)) {
message('Unpacking downloaded data archive.')
unzip(file_name)
}
else {
warning('Existing data file found, not unpacking data archive.')
}
}
## loadSourceData()
#
# Load the source data into a single data.frame adding a 'datetime' column
# based on the Date and Time columns in the source data.
#
# Usage:
# srcData <- loadSourceData()
#
loadSourceData <- function() {
# Read cached data if available.
cacheDataFile <- 'household_power_consumption.rds'
if(file.exists(cacheDataFile)) {
data <- readRDS(cacheDataFile)
}
else {
# Read data for 2007-02-01 and 2007-02-02
srcData <- read.csv('household_power_consumption.txt', header=TRUE, sep=';',
na.strings='?', stringsAsFactors=FALSE)
data <- subset(srcData, Date == "1/2/2007" | Date == "2/2/2007")
data$datetime <- strptime(sprintf('%s %s', data$Date, data$Time), format='%d/%m/%Y %T')
# Save processed data to a cache file for faster loading.
saveRDS(data, cacheDataFile)
}
data
}
## create_plot4()
#
# Generate a plot containing four sub-plots.
#
# Usage:
# create_plot4()
#
create_plot4 <- function() {
# Download and unpack the source data if required.
downloadAndUnpackData()
# Load the data.
data <- loadSourceData()
# Set plotting output to PNG.
png(filename='plot4.png', width=480, height=480)
# Set to four sub-lots.
par(mfrow=c(2,2))
# Plot 1. Line graph of Global Active Power data.
plot(data$datetime, data$Global_active_power, type='l', xlab='',
ylab='Global Active Power')
# Plot 2.
plot(data$datetime, data$Voltage, type='l', xlab='datetime', ylab='Voltage')
# Plot 3. Graph of Energy sub metering.
plot(data$datetime, data$Sub_metering_1, type='l', xlab='',
ylab='Energy sub metering')
lines(data$datetime, data$Sub_metering_2, col='red')
lines(data$datetime, data$Sub_metering_3, col='blue')
legend('topright', col=c('black', 'red', 'blue'), lty=par('lty'), bty='n',
legend=c('Sub_metering_1', 'Sub_metering_2', 'Sub_metering_3'))
# Plot 4. Line graph of Global reactive power.
plot(data$datetime, data$Global_reactive_power, type='l', xlab='datetime', ylab='Global_reactive_power')
# Close the PNG device.
dev.off()
}
# Run plot4 generation.
create_plot4()
|
/plot4.R
|
no_license
|
hpmcwill/ExData_Plotting1
|
R
| false
| false
| 3,715
|
r
|
#!/usr/bin/env Rscript
# ================================================================================
#
# Coursera - Exploratory Data Analysis - Course Project 1
#
# Generate plot4.png
## downloadAndUnpackData()
#
# Download and unpack the source data.
#
# Note: will not update/overwrite existing copies of the data. Warnings are
# reported if the source data file and/or the unpacked data directory already
# exist.
#
# Usage:
# downloadAndUnpackData()
#
downloadAndUnpackData <- function() {
file_url <- 'https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip'
file_name <- 'household_power_consumption.zip'
data_file_name <- 'household_power_consumption.txt'
# Download data archive.
if(!file.exists(file_name)) {
message('Downloading data from Internet')
# If available use the 'downloader' package to deal with HTTPS sources.
if(require(downloader, quietly=TRUE)) {
download(file_url,destfile=file_name)
}
# Otherwise use the built-in (has problems with HTTPS on non-Windows platforms)
else {
download.file(file_url, file_name, mode='wb', method='auto')
}
}
else {
warning('Local copy of data archive found, not downloading')
}
# Unpack data archive if data not already present.
if(!file.exists(data_file_name)) {
message('Unpacking downloaded data archive.')
unzip(file_name)
}
else {
warning('Existing data file found, not unpacking data archive.')
}
}
## loadSourceData()
#
# Load the source data into a single data.frame adding a 'datetime' column
# based on the Date and Time columns in the source data.
#
# Usage:
# srcData <- loadSourceData()
#
loadSourceData <- function() {
# Read cached data if available.
cacheDataFile <- 'household_power_consumption.rds'
if(file.exists(cacheDataFile)) {
data <- readRDS(cacheDataFile)
}
else {
# Read data for 2007-02-01 and 2007-02-02
srcData <- read.csv('household_power_consumption.txt', header=TRUE, sep=';',
na.strings='?', stringsAsFactors=FALSE)
data <- subset(srcData, Date == "1/2/2007" | Date == "2/2/2007")
data$datetime <- strptime(sprintf('%s %s', data$Date, data$Time), format='%d/%m/%Y %T')
# Save processed data to a cache file for faster loading.
saveRDS(data, cacheDataFile)
}
data
}
## create_plot4()
#
# Generate a plot containing four sub-plots.
#
# Usage:
# create_plot4()
#
create_plot4 <- function() {
# Download and unpack the source data if required.
downloadAndUnpackData()
# Load the data.
data <- loadSourceData()
# Set plotting output to PNG.
png(filename='plot4.png', width=480, height=480)
# Set to four sub-lots.
par(mfrow=c(2,2))
# Plot 1. Line graph of Global Active Power data.
plot(data$datetime, data$Global_active_power, type='l', xlab='',
ylab='Global Active Power')
# Plot 2.
plot(data$datetime, data$Voltage, type='l', xlab='datetime', ylab='Voltage')
# Plot 3. Graph of Energy sub metering.
plot(data$datetime, data$Sub_metering_1, type='l', xlab='',
ylab='Energy sub metering')
lines(data$datetime, data$Sub_metering_2, col='red')
lines(data$datetime, data$Sub_metering_3, col='blue')
legend('topright', col=c('black', 'red', 'blue'), lty=par('lty'), bty='n',
legend=c('Sub_metering_1', 'Sub_metering_2', 'Sub_metering_3'))
# Plot 4. Line graph of Global reactive power.
plot(data$datetime, data$Global_reactive_power, type='l', xlab='datetime', ylab='Global_reactive_power')
# Close the PNG device.
dev.off()
}
# Run plot4 generation.
create_plot4()
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{initialize-ASEset}
\alias{ASEsetFromArrays}
\alias{ASEsetFromCountList}
\alias{initialize-ASEset}
\title{Initialize ASEset}
\usage{
ASEsetFromCountList(rowData, countListUnknown = NULL, countListPlus = NULL,
countListMinus = NULL, colData = NULL, mapBiasExpMean = NULL,
verbose = FALSE, ...)
ASEsetFromArrays(rowData, countsUnknown = NULL, countsPlus = NULL,
countsMinus = NULL, colData = NULL, mapBiasExpMean = NULL,
verbose = FALSE, ...)
}
\arguments{
\item{rowData}{A \code{GenomicRanges object} that contains the variants of
interest}
\item{countListUnknown}{A \code{list} where each entry is a matrix with
allele counts as columns and sample counts as rows}
\item{countListPlus}{A \code{list} where each entry is a matrix with allele
counts as columns and sample counts as rows}
\item{countListMinus}{A \code{list} where each entry is a matrix with allele
counts as columns and sample counts as rows}
\item{colData}{A \code{DataFrame} object containing sample specific data}
\item{mapBiasExpMean}{A 3D \code{array} where the SNPs are in the 1st
dimension, samples in the 2nd dimension and variants in the 3rd dimension.}
\item{verbose}{Makes function more talkative}
\item{...}{arguments passed on to SummarizedExperiment constructor}
\item{countsUnknown}{An array containing the countinformation}
\item{countsPlus}{An array containing the countinformation}
\item{countsMinus}{An array containing the countinformation}
}
\value{
\code{ASEsetFromCountList} returns an \code{ASEset} object.
}
\description{
Functions to construct ASEset objects
}
\details{
The resulting \code{ASEset} object is based on the
\code{SummarizedExperiment}, and will therefore inherit the same accessors
and ranges operations.
If both countListPlus and countListMinus are given they will be used to
calculate countListUnknown, which is the sum of the plus and minus strands.
countListPlus, countListMinus and countListUnknown are
i.e. the outputs from the \code{\link{getAlleleCounts}} function.
}
\note{
\code{ASEsetFromCountList} requires the same input data as an
SummarizedExperiment, but with minimum one assay for the allele counts.
}
\examples{
#make example alleleCountListPlus
set.seed(42)
countListPlus <- list()
snps <- c('snp1','snp2','snp3','snp4','snp5')
for(snp in snps){
count<-matrix(rep(0,16),ncol=4,dimnames=list(
c('sample1','sample2','sample3','sample4'),
c('A','T','G','C')))
#insert random counts in two of the alleles
for(allele in sample(c('A','T','G','C'),2)){
count[,allele]<-as.integer(rnorm(4,mean=50,sd=10))
}
countListPlus[[snp]] <- count
}
#make example alleleCountListMinus
countListMinus <- list()
snps <- c('snp1','snp2','snp3','snp4','snp5')
for(snp in snps){
count<-matrix(rep(0,16),ncol=4,dimnames=list(
c('sample1','sample2','sample3','sample4'),
c('A','T','G','C')))
#insert random counts in two of the alleles
for(allele in sample(c('A','T','G','C'),2)){
count[,allele]<-as.integer(rnorm(4,mean=50,sd=10))
}
countListMinus[[snp]] <- count
}
#make example rowData
rowData <- GRanges(
seqnames = Rle(c('chr1', 'chr2', 'chr1', 'chr3', 'chr1')),
ranges = IRanges(1:5, width = 1, names = head(letters,5)),
snp = paste('snp',1:5,sep='')
)
#make example colData
colData <- DataFrame(Treatment=c('ChIP', 'Input','Input','ChIP'),
row.names=c('ind1','ind2','ind3','ind4'))
#make ASEset
a <- ASEsetFromCountList(rowData, countListPlus=countListPlus,
countListMinus=countListMinus, colData=colData)
}
\author{
Jesper R. Gadin, Lasse Folkersen
}
\seealso{
\itemize{ \item The
\code{\link[GenomicRanges]{SummarizedExperiment}} for ranges operations. }
}
\keyword{ASEset}
\keyword{ASEsetFromCountList}
|
/man/initialize-ASEset.Rd
|
no_license
|
jimhester/AllelicImbalance
|
R
| false
| false
| 3,724
|
rd
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{initialize-ASEset}
\alias{ASEsetFromArrays}
\alias{ASEsetFromCountList}
\alias{initialize-ASEset}
\title{Initialize ASEset}
\usage{
ASEsetFromCountList(rowData, countListUnknown = NULL, countListPlus = NULL,
countListMinus = NULL, colData = NULL, mapBiasExpMean = NULL,
verbose = FALSE, ...)
ASEsetFromArrays(rowData, countsUnknown = NULL, countsPlus = NULL,
countsMinus = NULL, colData = NULL, mapBiasExpMean = NULL,
verbose = FALSE, ...)
}
\arguments{
\item{rowData}{A \code{GenomicRanges object} that contains the variants of
interest}
\item{countListUnknown}{A \code{list} where each entry is a matrix with
allele counts as columns and sample counts as rows}
\item{countListPlus}{A \code{list} where each entry is a matrix with allele
counts as columns and sample counts as rows}
\item{countListMinus}{A \code{list} where each entry is a matrix with allele
counts as columns and sample counts as rows}
\item{colData}{A \code{DataFrame} object containing sample specific data}
\item{mapBiasExpMean}{A 3D \code{array} where the SNPs are in the 1st
dimension, samples in the 2nd dimension and variants in the 3rd dimension.}
\item{verbose}{Makes function more talkative}
\item{...}{arguments passed on to SummarizedExperiment constructor}
\item{countsUnknown}{An array containing the countinformation}
\item{countsPlus}{An array containing the countinformation}
\item{countsMinus}{An array containing the countinformation}
}
\value{
\code{ASEsetFromCountList} returns an \code{ASEset} object.
}
\description{
Functions to construct ASEset objects
}
\details{
The resulting \code{ASEset} object is based on the
\code{SummarizedExperiment}, and will therefore inherit the same accessors
and ranges operations.
If both countListPlus and countListMinus are given they will be used to
calculate countListUnknown, which is the sum of the plus and minus strands.
countListPlus, countListMinus and countListUnknown are
i.e. the outputs from the \code{\link{getAlleleCounts}} function.
}
\note{
\code{ASEsetFromCountList} requires the same input data as an
SummarizedExperiment, but with minimum one assay for the allele counts.
}
\examples{
#make example alleleCountListPlus
set.seed(42)
countListPlus <- list()
snps <- c('snp1','snp2','snp3','snp4','snp5')
for(snp in snps){
count<-matrix(rep(0,16),ncol=4,dimnames=list(
c('sample1','sample2','sample3','sample4'),
c('A','T','G','C')))
#insert random counts in two of the alleles
for(allele in sample(c('A','T','G','C'),2)){
count[,allele]<-as.integer(rnorm(4,mean=50,sd=10))
}
countListPlus[[snp]] <- count
}
#make example alleleCountListMinus
countListMinus <- list()
snps <- c('snp1','snp2','snp3','snp4','snp5')
for(snp in snps){
count<-matrix(rep(0,16),ncol=4,dimnames=list(
c('sample1','sample2','sample3','sample4'),
c('A','T','G','C')))
#insert random counts in two of the alleles
for(allele in sample(c('A','T','G','C'),2)){
count[,allele]<-as.integer(rnorm(4,mean=50,sd=10))
}
countListMinus[[snp]] <- count
}
#make example rowData
rowData <- GRanges(
seqnames = Rle(c('chr1', 'chr2', 'chr1', 'chr3', 'chr1')),
ranges = IRanges(1:5, width = 1, names = head(letters,5)),
snp = paste('snp',1:5,sep='')
)
#make example colData
colData <- DataFrame(Treatment=c('ChIP', 'Input','Input','ChIP'),
row.names=c('ind1','ind2','ind3','ind4'))
#make ASEset
a <- ASEsetFromCountList(rowData, countListPlus=countListPlus,
countListMinus=countListMinus, colData=colData)
}
\author{
Jesper R. Gadin, Lasse Folkersen
}
\seealso{
\itemize{ \item The
\code{\link[GenomicRanges]{SummarizedExperiment}} for ranges operations. }
}
\keyword{ASEset}
\keyword{ASEsetFromCountList}
|
# TODO: Add comment
#
# Author: furia
###############################################################################
#####
## SynapseEntity "show" method
#####
setMethod(
f = "show",
signature = signature("SynapseEntity"),
definition = function(object){
cat('An object of class "', class(object), '"\n', sep="")
cat("Synapse Entity Name : ", properties(object)$name, "\n", sep="")
cat("Synapse Entity Id : ", properties(object)$id, "\n", sep="")
if (!is.null(properties(object)$parentId))
cat("Parent Id : ", properties(object)$parentId, "\n", sep="")
if (!is.null(properties(object)$type))
cat("Type : ", properties(object)$type, "\n", sep="")
if (!is.null(properties(object)$versionNumber)) {
cat("Version Number : ", properties(object)$versionNumber, "\n", sep="")
cat("Version Label : ", properties(object)$versionLabel, "\n", sep="")
}
cat("\nFor complete list of annotations, please use the annotations() function.\n")
}
)
setMethod(
f = "createEntity",
signature = "SynapseEntity",
definition = function(entity){
createSynapseEntity(entity)
}
)
setMethod(
f = "deleteEntity",
signature = "SynapseEntity",
definition = function(entity){
envir <- parent.frame(2)
inherits <- FALSE
name <- deparse(substitute(entity, env=parent.frame()))
deleteSynapseEntity(propertyValue(entity,"id"))
if(any(grepl(name,ls(envir=envir))))
remove(list = name, envir=envir, inherits=inherits)
entity <- deleteProperty(entity, "id")
entity <- deleteProperty(entity, "accessControlList")
entity <- deleteProperty(entity, "uri")
entity <- deleteProperty(entity, "annotations")
entity <- deleteProperty(entity, "etag")
invisible(entity)
}
)
setMethod(
f = "updateEntity",
signature = "SynapseEntity",
definition = function(entity)
{
updateSynapseEntity(entity)
}
)
setMethod(
f = "downloadEntity",
signature = "SynapseEntity",
definition = function(entity){
getEntity(entity)
}
)
#####
## as.list function. Coerce SynapseEntity to list by returning annotations
#####
as.list.SynapseEntity <-
function(x, ...){
as.list(annotations(x))
}
#####
## Get annotation names
#####
setMethod(
f = "annotationNames",
signature = "SynapseEntity",
definition = function(object){
annotationNames(annotations(object))
}
)
#####
## Get annotation values
#####
setMethod(
f = "annotationValues",
signature = "SynapseEntity",
definition = function(object){
annotationValues(annotations(object))
}
)
#####
## Set the values for multiple annotations
#####
setMethod(
f = "annotationValues<-",
signature = signature("SynapseEntity","list"),
definition = function(object, value){
annotationValues(annotations(object)) <- value
object
}
)
setMethod(
f = "annotValue<-",
signature = signature("SynapseEntity", "character", "ANY"),
definition = function(object, which, value){
annotValue(object@annotations, which = which) <- value
object
}
)
#####
## return the annotations object
#####
setMethod(
f = "annotations",
signature = "SynapseEntity",
definition = function(object){
object@annotations
}
)
setMethod(
f = "annotations<-",
signature = signature("SynapseEntity", "list"),
definition = function(object, value){
if(any(names(value) == ""))
stop("all elements of the list must be named")
aa <- SynapseAnnotations(properties(object))
for(n in names(value)){
annotValue(aa, n) <- value[[n]]
}
annotations(object) <- aa
object
}
)
#####
## set the annotations object
#####
setMethod(
f = "annotations<-",
signature = signature("SynapseEntity","SynapseAnnotations"),
definition = function(object, value){
object@annotations <- value
object
}
)
#####
## replace annotations with the values
#####
setMethod(
f = "annotations<-",
signature = signature("SynapseEntity", "list"),
definition = function(object, value){
a <- new("SynapseAnnotations")
annotations(a)<-value
annotations(object) <- a
object
}
)
#####
## get an annotation value by name
#####
setMethod(
f = "annotValue",
signature = signature("SynapseEntity", "character"),
definition = function(object, which){
annotValue(annotations(object), which)
}
)
#####
## Delete an annotation
#####
setMethod(
f = "deleteAnnotation",
signature = signature("SynapseEntity", "character"),
definition = function(object, which){
annotations(object) <- deleteAnnotation(annotations(object), which)
object
}
)
#####
## constructor that takes a list entity
#####
setMethod(
f = "SynapseEntity",
signature = signature("list"),
definition = function(entity){
ee <- new("SynapseEntity")
ee@properties <- entity
ee
}
)
#####
## constructor that takes a list entity
#####
setMethod(
f = "SynapseEntity",
signature = signature("missing"),
definition = function(entity){
SynapseEntity(emptyNamedList)
}
)
#####
## constructor that takes a serialized JSON object
#####
setMethod(
f = "SynapseEntity",
signature = signature("character"),
definition = function(entity){
ee<-fromJSON(entity)
ee@properties <- entity
ee
}
)
#####
## convert the S4 entity to a list entity
#####
setMethod(
f = ".extractEntityFromSlots",
signature = "SynapseEntity",
definition = function(object){
properties(object)
}
)
#####
## convert the list entity to an S4 entity
#####
setMethod(
f = ".populateSlotsFromEntity",
signature = signature("SynapseEntity", "list"),
definition = function(object, entity){
if(any(names(entity) == "") && length(entity) > 0)
stop("All elements of the entity must be named")
## all entity fields should be stored as properties
for(name in names(entity))
propertyValue(object, name) <- entity[[name]]
object
}
)
#####
## Get the Synapse entity kind
#####
setMethod(
f = "synapseEntityKind",
signature = "SynapseEntity",
definition = function(entity){
entity@synapseEntityKind
}
)
#####
## Set the entity kind
#####
setMethod(
f = "synapseEntityKind<-",
signature = "SynapseEntity",
definition = function(entity, value){
entity@synapseEntityKind <- value
entity
}
)
#####
## Refresh the entities annotations
#####
setMethod(
f = "refreshAnnotations",
signature = "SynapseEntity",
definition = function(entity){
# MF will refactor this code
annotations(entity) <- do.call(class(annotations(entity)), list(entity = getAnnotations(.extractEntityFromSlots(entity))))
entity
}
)
setMethod(
f = "getAnnotations",
signature = "SynapseEntity",
definition = function(entity){
as.list(entity@annotations)
}
)
names.SynapseEntity <-
function(x)
{
c("properties", "annotations")
}
setMethod(
f = "[",
signature = "SynapseEntity",
definition = function(x, i, j, ...){
if(length(as.character(as.list(substitute(list(...)))[-1L])) > 0L || !missing(j))
stop("incorrect number of subscripts")
if(is.numeric(i)){
if(any(i > length(names(x))))
stop("subscript out of bounds")
i <- names(x)[i]
}else if(is.character(i)){
if(!all(i %in% names(x)))
stop("undefined objects selected")
}else{
stop(sprintf("invalid subscript type '%s'", class(i)))
}
retVal <- lapply(i, function(i){
if(i %in% names(x)){
retVal <- slot(x, i)
}else{
retVal <- NULL
}
}
)
names(retVal) <- i
retVal
}
)
setMethod(
f = "[[",
signature = "SynapseEntity",
definition = function(x, i, j, ...){
if(length(as.character(as.list(substitute(list(...)))[-1L])) > 0L || !missing(j))
stop("incorrect number of subscripts")
if(length(i) > 1)
stop("subscript out of bounds")
x[i][[1]]
}
)
setMethod(
f = "$",
signature = "SynapseEntity",
definition = function(x, name){
x[[name]]
}
)
setReplaceMethod("$",
signature = "SynapseEntity",
definition = function(x, name, value) {
if(!(name %in% names(x)))
stop("invalid element")
slot(x, name) <- value
x
}
)
|
/R/SynapseEntity.R
|
no_license
|
hoffma23/rSynapseClient
|
R
| false
| false
| 8,318
|
r
|
# TODO: Add comment
#
# Author: furia
###############################################################################
#####
## SynapseEntity "show" method
#####
setMethod(
f = "show",
signature = signature("SynapseEntity"),
definition = function(object){
cat('An object of class "', class(object), '"\n', sep="")
cat("Synapse Entity Name : ", properties(object)$name, "\n", sep="")
cat("Synapse Entity Id : ", properties(object)$id, "\n", sep="")
if (!is.null(properties(object)$parentId))
cat("Parent Id : ", properties(object)$parentId, "\n", sep="")
if (!is.null(properties(object)$type))
cat("Type : ", properties(object)$type, "\n", sep="")
if (!is.null(properties(object)$versionNumber)) {
cat("Version Number : ", properties(object)$versionNumber, "\n", sep="")
cat("Version Label : ", properties(object)$versionLabel, "\n", sep="")
}
cat("\nFor complete list of annotations, please use the annotations() function.\n")
}
)
setMethod(
f = "createEntity",
signature = "SynapseEntity",
definition = function(entity){
createSynapseEntity(entity)
}
)
setMethod(
f = "deleteEntity",
signature = "SynapseEntity",
definition = function(entity){
envir <- parent.frame(2)
inherits <- FALSE
name <- deparse(substitute(entity, env=parent.frame()))
deleteSynapseEntity(propertyValue(entity,"id"))
if(any(grepl(name,ls(envir=envir))))
remove(list = name, envir=envir, inherits=inherits)
entity <- deleteProperty(entity, "id")
entity <- deleteProperty(entity, "accessControlList")
entity <- deleteProperty(entity, "uri")
entity <- deleteProperty(entity, "annotations")
entity <- deleteProperty(entity, "etag")
invisible(entity)
}
)
setMethod(
f = "updateEntity",
signature = "SynapseEntity",
definition = function(entity)
{
updateSynapseEntity(entity)
}
)
setMethod(
f = "downloadEntity",
signature = "SynapseEntity",
definition = function(entity){
getEntity(entity)
}
)
#####
## as.list function. Coerce SynapseEntity to list by returning annotations
#####
as.list.SynapseEntity <-
function(x, ...){
as.list(annotations(x))
}
#####
## Get annotation names
#####
setMethod(
f = "annotationNames",
signature = "SynapseEntity",
definition = function(object){
annotationNames(annotations(object))
}
)
#####
## Get annotation values
#####
setMethod(
f = "annotationValues",
signature = "SynapseEntity",
definition = function(object){
annotationValues(annotations(object))
}
)
#####
## Set the values for multiple annotations
#####
setMethod(
f = "annotationValues<-",
signature = signature("SynapseEntity","list"),
definition = function(object, value){
annotationValues(annotations(object)) <- value
object
}
)
setMethod(
f = "annotValue<-",
signature = signature("SynapseEntity", "character", "ANY"),
definition = function(object, which, value){
annotValue(object@annotations, which = which) <- value
object
}
)
#####
## return the annotations object
#####
setMethod(
f = "annotations",
signature = "SynapseEntity",
definition = function(object){
object@annotations
}
)
setMethod(
f = "annotations<-",
signature = signature("SynapseEntity", "list"),
definition = function(object, value){
if(any(names(value) == ""))
stop("all elements of the list must be named")
aa <- SynapseAnnotations(properties(object))
for(n in names(value)){
annotValue(aa, n) <- value[[n]]
}
annotations(object) <- aa
object
}
)
#####
## set the annotations object
#####
setMethod(
f = "annotations<-",
signature = signature("SynapseEntity","SynapseAnnotations"),
definition = function(object, value){
object@annotations <- value
object
}
)
#####
## replace annotations with the values
#####
setMethod(
f = "annotations<-",
signature = signature("SynapseEntity", "list"),
definition = function(object, value){
a <- new("SynapseAnnotations")
annotations(a)<-value
annotations(object) <- a
object
}
)
#####
## get an annotation value by name
#####
setMethod(
f = "annotValue",
signature = signature("SynapseEntity", "character"),
definition = function(object, which){
annotValue(annotations(object), which)
}
)
#####
## Delete an annotation
#####
setMethod(
f = "deleteAnnotation",
signature = signature("SynapseEntity", "character"),
definition = function(object, which){
annotations(object) <- deleteAnnotation(annotations(object), which)
object
}
)
#####
## constructor that takes a list entity
#####
setMethod(
f = "SynapseEntity",
signature = signature("list"),
definition = function(entity){
ee <- new("SynapseEntity")
ee@properties <- entity
ee
}
)
#####
## constructor that takes a list entity
#####
setMethod(
f = "SynapseEntity",
signature = signature("missing"),
definition = function(entity){
SynapseEntity(emptyNamedList)
}
)
#####
## constructor that takes a serialized JSON object
#####
setMethod(
f = "SynapseEntity",
signature = signature("character"),
definition = function(entity){
ee<-fromJSON(entity)
ee@properties <- entity
ee
}
)
#####
## convert the S4 entity to a list entity
#####
setMethod(
f = ".extractEntityFromSlots",
signature = "SynapseEntity",
definition = function(object){
properties(object)
}
)
#####
## convert the list entity to an S4 entity
#####
setMethod(
f = ".populateSlotsFromEntity",
signature = signature("SynapseEntity", "list"),
definition = function(object, entity){
if(any(names(entity) == "") && length(entity) > 0)
stop("All elements of the entity must be named")
## all entity fields should be stored as properties
for(name in names(entity))
propertyValue(object, name) <- entity[[name]]
object
}
)
#####
## Get the Synapse entity kind
#####
setMethod(
f = "synapseEntityKind",
signature = "SynapseEntity",
definition = function(entity){
entity@synapseEntityKind
}
)
#####
## Set the entity kind
#####
setMethod(
f = "synapseEntityKind<-",
signature = "SynapseEntity",
definition = function(entity, value){
entity@synapseEntityKind <- value
entity
}
)
#####
## Refresh the entities annotations
#####
setMethod(
f = "refreshAnnotations",
signature = "SynapseEntity",
definition = function(entity){
# MF will refactor this code
annotations(entity) <- do.call(class(annotations(entity)), list(entity = getAnnotations(.extractEntityFromSlots(entity))))
entity
}
)
setMethod(
f = "getAnnotations",
signature = "SynapseEntity",
definition = function(entity){
as.list(entity@annotations)
}
)
names.SynapseEntity <-
function(x)
{
c("properties", "annotations")
}
setMethod(
f = "[",
signature = "SynapseEntity",
definition = function(x, i, j, ...){
if(length(as.character(as.list(substitute(list(...)))[-1L])) > 0L || !missing(j))
stop("incorrect number of subscripts")
if(is.numeric(i)){
if(any(i > length(names(x))))
stop("subscript out of bounds")
i <- names(x)[i]
}else if(is.character(i)){
if(!all(i %in% names(x)))
stop("undefined objects selected")
}else{
stop(sprintf("invalid subscript type '%s'", class(i)))
}
retVal <- lapply(i, function(i){
if(i %in% names(x)){
retVal <- slot(x, i)
}else{
retVal <- NULL
}
}
)
names(retVal) <- i
retVal
}
)
setMethod(
f = "[[",
signature = "SynapseEntity",
definition = function(x, i, j, ...){
if(length(as.character(as.list(substitute(list(...)))[-1L])) > 0L || !missing(j))
stop("incorrect number of subscripts")
if(length(i) > 1)
stop("subscript out of bounds")
x[i][[1]]
}
)
setMethod(
f = "$",
signature = "SynapseEntity",
definition = function(x, name){
x[[name]]
}
)
setReplaceMethod("$",
signature = "SynapseEntity",
definition = function(x, name, value) {
if(!(name %in% names(x)))
stop("invalid element")
slot(x, name) <- value
x
}
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/optimisers.R
\name{optimisers}
\alias{optimisers}
\alias{nelder_mead}
\alias{powell}
\alias{cg}
\alias{bfgs}
\alias{newton_cg}
\alias{l_bfgs_b}
\alias{tnc}
\alias{cobyla}
\alias{slsqp}
\alias{gradient_descent}
\alias{adadelta}
\alias{adagrad}
\alias{adagrad_da}
\alias{momentum}
\alias{adam}
\alias{ftrl}
\alias{proximal_gradient_descent}
\alias{proximal_adagrad}
\alias{rms_prop}
\title{optimisation methods}
\usage{
nelder_mead()
powell()
cg()
bfgs()
newton_cg()
l_bfgs_b(maxcor = 10, maxls = 20)
tnc(max_cg_it = -1, stepmx = 0, rescale = -1)
cobyla(rhobeg = 1)
slsqp()
gradient_descent(learning_rate = 0.01)
adadelta(learning_rate = 0.001, rho = 1, epsilon = 1e-08)
adagrad(learning_rate = 0.8, initial_accumulator_value = 0.1)
adagrad_da(learning_rate = 0.8, global_step = 1L,
initial_gradient_squared_accumulator_value = 0.1,
l1_regularization_strength = 0, l2_regularization_strength = 0)
momentum(learning_rate = 0.001, momentum = 0.9, use_nesterov = TRUE)
adam(learning_rate = 0.1, beta1 = 0.9, beta2 = 0.999,
epsilon = 1e-08)
ftrl(learning_rate = 1, learning_rate_power = -0.5,
initial_accumulator_value = 0.1, l1_regularization_strength = 0,
l2_regularization_strength = 0)
proximal_gradient_descent(learning_rate = 0.01,
l1_regularization_strength = 0, l2_regularization_strength = 0)
proximal_adagrad(learning_rate = 1, initial_accumulator_value = 0.1,
l1_regularization_strength = 0, l2_regularization_strength = 0)
rms_prop(learning_rate = 0.1, decay = 0.9, momentum = 0,
epsilon = 1e-10)
}
\arguments{
\item{maxcor}{maximum number of 'variable metric corrections' used to define
the approximation to the hessian matrix}
\item{maxls}{maximum number of line search steps per iteration}
\item{max_cg_it}{maximum number of hessian * vector evaluations per iteration}
\item{stepmx}{maximum step for the line search}
\item{rescale}{log10 scaling factor used to trigger rescaling of objective}
\item{rhobeg}{reasonable initial changes to the variables}
\item{learning_rate}{the size of steps (in parameter space) towards the
optimal value}
\item{rho}{the decay rate}
\item{epsilon}{a small constant used to condition gradient updates}
\item{initial_accumulator_value}{initial value of the 'accumulator' used to
tune the algorithm}
\item{global_step}{the current training step number}
\item{initial_gradient_squared_accumulator_value}{initial value of the
accumulators used to tune the algorithm}
\item{l1_regularization_strength}{L1 regularisation coefficient (must be 0 or
greater)}
\item{l2_regularization_strength}{L2 regularisation coefficient (must be 0 or
greater)}
\item{momentum}{the momentum of the algorithm}
\item{use_nesterov}{whether to use Nesterov momentum}
\item{beta1}{exponential decay rate for the 1st moment estimates}
\item{beta2}{exponential decay rate for the 2nd moment estimates}
\item{learning_rate_power}{power on the learning rate, must be 0 or less}
\item{decay}{discounting factor for the gradient}
}
\value{
an \code{optimiser} object that can be passed to \code{\link{opt}}.
}
\description{
Functions to set up optimisers (which find parameters that
maximise the joint density of a model) and change their tuning parameters,
for use in \code{\link{opt}()}. For details of the algorithms and how to
tune them, see the
\href{https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html}{SciPy
optimiser docs} or the
\href{https://www.tensorflow.org/api_docs/python/tf/contrib/opt}{TensorFlow
optimiser docs}.
}
\details{
The optimisers \code{powell()}, \code{cg()}, \code{newton_cg()},
\code{l_bfgs_b()}, \code{tnc()}, \code{cobyla()}, and \code{slsqp()} are
deprecated. They will be removed in greta 0.4.0, since they will no longer
be available in TensorFlow 2.0, on which that version of greta will depend.
The \code{cobyla()} does not provide information about the number of
iterations nor convergence, so these elements of the output are set to NA
}
\examples{
\dontrun{
# use optimisation to find the mean and sd of some data
x <- rnorm(100, -2, 1.2)
mu <- variable()
sd <- variable(lower = 0)
distribution(x) <- normal(mu, sd)
m <- model(mu, sd)
# configure optimisers & parameters via 'optimiser' argument to opt
opt_res <- opt(m, optimiser = bfgs())
# compare results with the analytic solution
opt_res$par
c(mean(x), sd(x))
}
}
|
/man/optimisers.Rd
|
permissive
|
jeffreypullin/greta
|
R
| false
| true
| 4,455
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/optimisers.R
\name{optimisers}
\alias{optimisers}
\alias{nelder_mead}
\alias{powell}
\alias{cg}
\alias{bfgs}
\alias{newton_cg}
\alias{l_bfgs_b}
\alias{tnc}
\alias{cobyla}
\alias{slsqp}
\alias{gradient_descent}
\alias{adadelta}
\alias{adagrad}
\alias{adagrad_da}
\alias{momentum}
\alias{adam}
\alias{ftrl}
\alias{proximal_gradient_descent}
\alias{proximal_adagrad}
\alias{rms_prop}
\title{optimisation methods}
\usage{
nelder_mead()
powell()
cg()
bfgs()
newton_cg()
l_bfgs_b(maxcor = 10, maxls = 20)
tnc(max_cg_it = -1, stepmx = 0, rescale = -1)
cobyla(rhobeg = 1)
slsqp()
gradient_descent(learning_rate = 0.01)
adadelta(learning_rate = 0.001, rho = 1, epsilon = 1e-08)
adagrad(learning_rate = 0.8, initial_accumulator_value = 0.1)
adagrad_da(learning_rate = 0.8, global_step = 1L,
initial_gradient_squared_accumulator_value = 0.1,
l1_regularization_strength = 0, l2_regularization_strength = 0)
momentum(learning_rate = 0.001, momentum = 0.9, use_nesterov = TRUE)
adam(learning_rate = 0.1, beta1 = 0.9, beta2 = 0.999,
epsilon = 1e-08)
ftrl(learning_rate = 1, learning_rate_power = -0.5,
initial_accumulator_value = 0.1, l1_regularization_strength = 0,
l2_regularization_strength = 0)
proximal_gradient_descent(learning_rate = 0.01,
l1_regularization_strength = 0, l2_regularization_strength = 0)
proximal_adagrad(learning_rate = 1, initial_accumulator_value = 0.1,
l1_regularization_strength = 0, l2_regularization_strength = 0)
rms_prop(learning_rate = 0.1, decay = 0.9, momentum = 0,
epsilon = 1e-10)
}
\arguments{
\item{maxcor}{maximum number of 'variable metric corrections' used to define
the approximation to the hessian matrix}
\item{maxls}{maximum number of line search steps per iteration}
\item{max_cg_it}{maximum number of hessian * vector evaluations per iteration}
\item{stepmx}{maximum step for the line search}
\item{rescale}{log10 scaling factor used to trigger rescaling of objective}
\item{rhobeg}{reasonable initial changes to the variables}
\item{learning_rate}{the size of steps (in parameter space) towards the
optimal value}
\item{rho}{the decay rate}
\item{epsilon}{a small constant used to condition gradient updates}
\item{initial_accumulator_value}{initial value of the 'accumulator' used to
tune the algorithm}
\item{global_step}{the current training step number}
\item{initial_gradient_squared_accumulator_value}{initial value of the
accumulators used to tune the algorithm}
\item{l1_regularization_strength}{L1 regularisation coefficient (must be 0 or
greater)}
\item{l2_regularization_strength}{L2 regularisation coefficient (must be 0 or
greater)}
\item{momentum}{the momentum of the algorithm}
\item{use_nesterov}{whether to use Nesterov momentum}
\item{beta1}{exponential decay rate for the 1st moment estimates}
\item{beta2}{exponential decay rate for the 2nd moment estimates}
\item{learning_rate_power}{power on the learning rate, must be 0 or less}
\item{decay}{discounting factor for the gradient}
}
\value{
an \code{optimiser} object that can be passed to \code{\link{opt}}.
}
\description{
Functions to set up optimisers (which find parameters that
maximise the joint density of a model) and change their tuning parameters,
for use in \code{\link{opt}()}. For details of the algorithms and how to
tune them, see the
\href{https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html}{SciPy
optimiser docs} or the
\href{https://www.tensorflow.org/api_docs/python/tf/contrib/opt}{TensorFlow
optimiser docs}.
}
\details{
The optimisers \code{powell()}, \code{cg()}, \code{newton_cg()},
\code{l_bfgs_b()}, \code{tnc()}, \code{cobyla()}, and \code{slsqp()} are
deprecated. They will be removed in greta 0.4.0, since they will no longer
be available in TensorFlow 2.0, on which that version of greta will depend.
The \code{cobyla()} does not provide information about the number of
iterations nor convergence, so these elements of the output are set to NA
}
\examples{
\dontrun{
# use optimisation to find the mean and sd of some data
x <- rnorm(100, -2, 1.2)
mu <- variable()
sd <- variable(lower = 0)
distribution(x) <- normal(mu, sd)
m <- model(mu, sd)
# configure optimisers & parameters via 'optimiser' argument to opt
opt_res <- opt(m, optimiser = bfgs())
# compare results with the analytic solution
opt_res$par
c(mean(x), sd(x))
}
}
|
# postdlnre.lnre <- function (model, x, m, N, ...) {
# if (! inherits(model, "lnre")) stop("first argument must be object of class 'lnre'")
# if (!(is.numeric(N) && all(N >= 0))) stop("argument 'N' must be non-negative integer")
# if (!(is.numeric(m) && all(m >= 1))) stop("argument 'm' must be positive integer")
#
# factor <- exp( m * log(N * x) - N * x - Cgamma(m + 1, log=TRUE) ) # = (Nx)^m * exp(-Nx) / m!
# factor * tdlnre(model, x) / EVm(model, m, N) ## ******* was dlnre(), but shouldn't this be tdlnre() instead?? *******
# }
#
# postldlnre.lnre <- function (model, x, m, N, base=10, log.x=FALSE, ...)
# {
# if (! inherits(model, "lnre")) stop("first argument must be object of class 'lnre'")
#
# if (log.x) x <- base ^ x
# log(base) * x * postdlnre(model, x, m, N, ...)
# }
|
/zipfR/R/posterior_lnre.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 804
|
r
|
# postdlnre.lnre <- function (model, x, m, N, ...) {
# if (! inherits(model, "lnre")) stop("first argument must be object of class 'lnre'")
# if (!(is.numeric(N) && all(N >= 0))) stop("argument 'N' must be non-negative integer")
# if (!(is.numeric(m) && all(m >= 1))) stop("argument 'm' must be positive integer")
#
# factor <- exp( m * log(N * x) - N * x - Cgamma(m + 1, log=TRUE) ) # = (Nx)^m * exp(-Nx) / m!
# factor * tdlnre(model, x) / EVm(model, m, N) ## ******* was dlnre(), but shouldn't this be tdlnre() instead?? *******
# }
#
# postldlnre.lnre <- function (model, x, m, N, base=10, log.x=FALSE, ...)
# {
# if (! inherits(model, "lnre")) stop("first argument must be object of class 'lnre'")
#
# if (log.x) x <- base ^ x
# log(base) * x * postdlnre(model, x, m, N, ...)
# }
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{shinyalert}
\alias{shinyalert}
\title{shinyalert}
\usage{
shinyalert(id, click.hide = TRUE, auto.close.after = NULL)
}
\arguments{
\item{id}{Specifies the alert id that will be used to access the}
\item{click.hide}{If TRUE then clicking on the alert will hide it. Defaults to TRUE}
\item{auto.close.after}{After this many seconds auto close the alert}
}
\description{
Creates an shinyalert
}
\seealso{
shinyalert
Other ShinySky.elements: \code{\link{actionButton}};
\code{\link{hideshinyalert}}; \code{\link{select2Input}};
\code{\link{showshinyalert}};
\code{\link{textInput.typeahead}}
}
|
/man/shinyalert.Rd
|
permissive
|
edwinacu/ShinySky
|
R
| false
| false
| 659
|
rd
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{shinyalert}
\alias{shinyalert}
\title{shinyalert}
\usage{
shinyalert(id, click.hide = TRUE, auto.close.after = NULL)
}
\arguments{
\item{id}{Specifies the alert id that will be used to access the}
\item{click.hide}{If TRUE then clicking on the alert will hide it. Defaults to TRUE}
\item{auto.close.after}{After this many seconds auto close the alert}
}
\description{
Creates an shinyalert
}
\seealso{
shinyalert
Other ShinySky.elements: \code{\link{actionButton}};
\code{\link{hideshinyalert}}; \code{\link{select2Input}};
\code{\link{showshinyalert}};
\code{\link{textInput.typeahead}}
}
|
\name{air}
\alias{air}
\docType{data}
\title{
dataset from an environmental study.
}
\description{
This dataset contains four variables: The concentration of the air pollutant ozone, wind speed, temperature and radiation. All of them are daily measurements for 111 days. Usually the concentration of the air pollutant ozone serves as the response variable while the other three are predictors.
}
\usage{data("air")}
\format{
A data frame with 111 observations on the following 4 variables.
\describe{
\item{\code{ozone}}{a numeric vector in cube root ppb}
\item{\code{radiation}}{a numeric vector in langley}
\item{\code{temperature}}{a numeric vector in degrees F}
\item{\code{wind_speed}}{a numeric vector in mph}
}
}
\examples{
data(air)
y=air$ozone # response
X=as.matrix(air[,3:4]) # single index term ;
Z=air[,2] # partially linear term ;
result <- gplsim(y,X,Z=Z,family = gaussian,k=10)
result$theta
result$coefficients
summary(result)
# Or you can try different spline basis
result <- gplsim(y,X,Z=Z,family = gaussian,bs="tp",k=10)
result$theta
result$coefficients
summary(result)
}
\keyword{datasets}
|
/man/air.Rd
|
no_license
|
zzz1990771/gplsim
|
R
| false
| false
| 1,208
|
rd
|
\name{air}
\alias{air}
\docType{data}
\title{
dataset from an environmental study.
}
\description{
This dataset contains four variables: The concentration of the air pollutant ozone, wind speed, temperature and radiation. All of them are daily measurements for 111 days. Usually the concentration of the air pollutant ozone serves as the response variable while the other three are predictors.
}
\usage{data("air")}
\format{
A data frame with 111 observations on the following 4 variables.
\describe{
\item{\code{ozone}}{a numeric vector in cube root ppb}
\item{\code{radiation}}{a numeric vector in langley}
\item{\code{temperature}}{a numeric vector in degrees F}
\item{\code{wind_speed}}{a numeric vector in mph}
}
}
\examples{
data(air)
y=air$ozone # response
X=as.matrix(air[,3:4]) # single index term ;
Z=air[,2] # partially linear term ;
result <- gplsim(y,X,Z=Z,family = gaussian,k=10)
result$theta
result$coefficients
summary(result)
# Or you can try different spline basis
result <- gplsim(y,X,Z=Z,family = gaussian,bs="tp",k=10)
result$theta
result$coefficients
summary(result)
}
\keyword{datasets}
|
# Install and load packages
package_names <- c("survey","dplyr","foreign","devtools")
lapply(package_names, function(x) if(!x %in% installed.packages()) install.packages(x))
lapply(package_names, require, character.only=T)
install_github("e-mitchell/meps_r_pkg/MEPS")
library(MEPS)
options(survey.lonely.psu="adjust")
# Load FYC file
FYC <- read_sas('C:/MEPS/.FYC..sas7bdat');
year <- .year.
if(year <= 2001) FYC <- FYC %>% mutate(VARPSU = VARPSU.yy., VARSTR=VARSTR.yy.)
if(year <= 1998) FYC <- FYC %>% rename(PERWT.yy.F = WTDPER.yy.)
if(year == 1996) FYC <- FYC %>% mutate(AGE42X = AGE2X, AGE31X = AGE1X)
FYC <- FYC %>%
mutate_at(vars(starts_with("AGE")),funs(replace(., .< 0, NA))) %>%
mutate(AGELAST = coalesce(AGE.yy.X, AGE42X, AGE31X))
FYC$ind = 1
# Age groups
# To compute for all age groups, replace 'agegrps' in the 'svyby' function with 'agegrps_v2X' or 'agegrps_v3X'
FYC <- FYC %>%
mutate(agegrps = cut(AGELAST,
breaks = c(-1, 4.5, 17.5, 44.5, 64.5, Inf),
labels = c("Under 5","5-17","18-44","45-64","65+"))) %>%
mutate(agegrps_v2X = cut(AGELAST,
breaks = c(-1, 17.5 ,64.5, Inf),
labels = c("Under 18","18-64","65+"))) %>%
mutate(agegrps_v3X = cut(AGELAST,
breaks = c(-1, 4.5, 6.5, 12.5, 17.5, 18.5, 24.5, 29.5, 34.5, 44.5, 54.5, 64.5, Inf),
labels = c("Under 5", "5-6", "7-12", "13-17", "18", "19-24", "25-29",
"30-34", "35-44", "45-54", "55-64", "65+")))
# Poverty status
if(year == 1996)
FYC <- FYC %>% rename(POVCAT96 = POVCAT)
FYC <- FYC %>%
mutate(poverty = recode_factor(POVCAT.yy., .default = "Missing", .missing = "Missing",
"1" = "Negative or poor",
"2" = "Near-poor",
"3" = "Low income",
"4" = "Middle income",
"5" = "High income"))
FYCdsgn <- svydesign(
id = ~VARPSU,
strata = ~VARSTR,
weights = ~PERWT.yy.F,
data = FYC,
nest = TRUE)
results <- svyby(~TOTEXP.yy., FUN = svymean, by = ~poverty + agegrps, design = subset(FYCdsgn, TOTEXP.yy. > 0))
print(results)
|
/mepstrends/hc_use/json/code/r/meanEXP__poverty__agegrps__.r
|
permissive
|
HHS-AHRQ/MEPS-summary-tables
|
R
| false
| false
| 2,050
|
r
|
# Install and load packages
package_names <- c("survey","dplyr","foreign","devtools")
lapply(package_names, function(x) if(!x %in% installed.packages()) install.packages(x))
lapply(package_names, require, character.only=T)
install_github("e-mitchell/meps_r_pkg/MEPS")
library(MEPS)
options(survey.lonely.psu="adjust")
# Load FYC file
FYC <- read_sas('C:/MEPS/.FYC..sas7bdat');
year <- .year.
if(year <= 2001) FYC <- FYC %>% mutate(VARPSU = VARPSU.yy., VARSTR=VARSTR.yy.)
if(year <= 1998) FYC <- FYC %>% rename(PERWT.yy.F = WTDPER.yy.)
if(year == 1996) FYC <- FYC %>% mutate(AGE42X = AGE2X, AGE31X = AGE1X)
FYC <- FYC %>%
mutate_at(vars(starts_with("AGE")),funs(replace(., .< 0, NA))) %>%
mutate(AGELAST = coalesce(AGE.yy.X, AGE42X, AGE31X))
FYC$ind = 1
# Age groups
# To compute for all age groups, replace 'agegrps' in the 'svyby' function with 'agegrps_v2X' or 'agegrps_v3X'
FYC <- FYC %>%
mutate(agegrps = cut(AGELAST,
breaks = c(-1, 4.5, 17.5, 44.5, 64.5, Inf),
labels = c("Under 5","5-17","18-44","45-64","65+"))) %>%
mutate(agegrps_v2X = cut(AGELAST,
breaks = c(-1, 17.5 ,64.5, Inf),
labels = c("Under 18","18-64","65+"))) %>%
mutate(agegrps_v3X = cut(AGELAST,
breaks = c(-1, 4.5, 6.5, 12.5, 17.5, 18.5, 24.5, 29.5, 34.5, 44.5, 54.5, 64.5, Inf),
labels = c("Under 5", "5-6", "7-12", "13-17", "18", "19-24", "25-29",
"30-34", "35-44", "45-54", "55-64", "65+")))
# Poverty status
if(year == 1996)
FYC <- FYC %>% rename(POVCAT96 = POVCAT)
FYC <- FYC %>%
mutate(poverty = recode_factor(POVCAT.yy., .default = "Missing", .missing = "Missing",
"1" = "Negative or poor",
"2" = "Near-poor",
"3" = "Low income",
"4" = "Middle income",
"5" = "High income"))
FYCdsgn <- svydesign(
id = ~VARPSU,
strata = ~VARSTR,
weights = ~PERWT.yy.F,
data = FYC,
nest = TRUE)
results <- svyby(~TOTEXP.yy., FUN = svymean, by = ~poverty + agegrps, design = subset(FYCdsgn, TOTEXP.yy. > 0))
print(results)
|
\name{NEWS}
\title{News for Package 'biwavelet'}
\encoding{UTF-8}
\section{Changes in biwavelet version 0.20.15 (2017-03-01)}{
\subsection{fixed}{
\itemize{
\item Fixed return NULL issues with Windows platforms
}
}
}
\section{Changes in biwavelet version 0.20.14 (2017-02-24)}{
\subsection{fixed}{
\itemize{
\item Fixed \code{plot.biwavelet} so that the COI extends all
the way to the bottom of the plot (max of periods)
}
}
}
\section{Changes in biwavelet version 0.20.13 (2016-12-27)}{
\subsection{fixed}{
\itemize{
\item Fixed \code{plot.biwavelet} so that the \code{arrow.cutoff}
argument applies to rsq values for wtc and pwtc objects
}
}
}
\section{Changes in biwavelet version 0.20.12 (2016-12-27)}{
\subsection{fixed}{
\itemize{
\item Fixed \code{plot.biwavelet} so that the \code{arrow.cutoff}
argument applies to rsq values
}
}
}
\section{Changes in biwavelet version 0.20.11 (2016-08-31)}{
\subsection{added}{
\itemize{
\item Build vignettes
}
}
}
\section{Changes in biwavelet version 0.20.10 (2016-08-10)}{
\subsection{fixed}{
\itemize{
\item Function \code{phase.biwavelet} now plots the regions whose significance
exceeds \code{arrow.cutoff}. If the object being plotted does not have
a significance field, regions whose zvalues exceed the \code{arrow.cutoff}
quantile will be plotted.
\item Fixed C++ warning about unsupported dynamically sized arrays
}
}
}
\section{Changes in biwavelet version 0.20.9 (2016-07-12)}{
\subsection{fixed}{
\itemize{
\item Fixed handling of \code{lag1} coefficients in \code{wtc}.
}
}
}
\section{Changes in biwavelet version 0.20.8 (2016-06-25)}{
\subsection{fixed}{
\itemize{
\item Fixed handling of axis preferences in \code{plot.biwavelet}.
}
}
}
\section{Changes in biwavelet version 0.20.7 (2016-06-01)}{
\subsection{fixed}{
\itemize{
\item Fixed handling of time in \code{check.data}.
}
}
}
\section{Changes in biwavelet version 0.20.6 (2016-06-01)}{
\subsection{fixed}{
\itemize{
\item Fixed x-axis in \code{plot.biwavelet}.
}
}
}
\section{Changes in biwavelet version 0.20.3 (2016-05-08)}{
\subsection{fixed}{
\itemize{
\item Fixed displacement of COI, contours and phase arrows in \code{plot.biwavelet} when adding a color bar.
}
}
}
\section{Changes in biwavelet version 0.20.2 (2016-05-06)}{
\subsection{fixed}{
\itemize{
\item Fixed \code{check.datum}; hopefully for the last time.
\item Faster wt bases and row quantile (Rcpp implementations):
\itemize{
\item The \code{param} parameter for all \code{rcpp_wt_bases_*} must be
within interval (0..10).
\item The \code{rcpp_row_quantile} function requires
a matrix as a parameter (use \code{as.matrix()} for vectors).
}
}
}
}
\section{Changes in biwavelet version 0.19.2 (2016-05-03)}{
\subsection{fixed}{
\itemize{
\item Fixed Rcpp implementation of the \code{wt.bases} functions,
i.e., \code{rcpp_wt_bases_*.cpp}.
Replacing \code{int} type with \code{double} type for \code{scale} parameter
which caused small scales to be rendered incorrectly.
}
}
}
\section{Changes in biwavelet version 0.19.1 (2016-04-29)}{
\subsection{fixed}{
\itemize{
\item Fixed interpretation of phase differences in \code{plot.biwavelet} help file
}
}
\subsection{changed}{
\itemize{
\item Added unit tests for approx 78\% of the code.
\item Implemented a parallelized Monte Carlo simulation function
\code{wtc_sig_parallel} which is 2 to 4 times faster on a 4-core CPU
than the original \code{wtc.sig}. The speedup is noticeable on:
\enumerate{
\item large simulations \code{nrads >= 800},
\item multiple simulations,
\item multi-core systems with 4+ cores.
}
However, parallelization involves a significant heat-up phase because all
the workers need to be started and they need to load all the required
packages. This will be addresses in future versions of biwavelet.
\item Added a speed-optimized version of \code{convolve2D}.
\item Replaced standard \code{arima.sim} function with a pair of functions
\code{get_minroots} and \code{ar1_ma0_sim}. These functions are still
implemented in R. We can reimplement them later in C.
\item Reimplemented \code{wt.bases} morlet, paul and dog in C.
\item Removed unused function \code{meshgrid}.
}
}
}
\section{Changes in biwavelet version 0.17.11 (2015-10-09)}{
\subsection{fixed}{
\itemize{
\item close all progress bars after use
\item Function \code{wtc} can now handle non-finite values when computing
the quantiles of the rsq values from the Monte Carlo simulations
}
}
}
\section{Changes in biwavelet version 0.17.10 (2015-04-29)}{
\subsection{fixed}{
\itemize{
\item Added ability to handle custom color palettes in \code{plot.biwavelet}.
Users can now specify any color scheme using the \code{fill.cols} argument.
}
}
}
\section{Changes in biwavelet version 0.17.9 (2015-04-29)}{
\subsection{fixed}{
\itemize{
\item Fixed limited padding issue, which could lead to weird edge effects.
Current padding level is identical to that of Torrence & Compo (1998).
\item Changed the default \code{tol} value from 0.95 to 1 in the \code{plot} function.}
}
}
\section{Changes in biwavelet version 0.17.8 (2015-04-28)}{
\subsection{fixed}{
\itemize{
\item Added semi-transparent COI.}
}
}
\section{Changes in biwavelet version 0.17.7 (2015-04-13)}{
\subsection{fixed}{
\itemize{
\item Fixed \code{check.datum} function so that it does not assume a sampling frequency of 1.}
}
}
\section{Changes in biwavelet version 0.17.6 (2015-04-05)}{
\subsection{fixed}{
\itemize{
\item Added ability to set \code{zlim} in \code{plot.biwavelet}.}
}
}
\section{Changes in biwavelet version 0.17.5 (2014-11-05)}{
\subsection{fixed}{
\itemize{
\item Improved the implementation of \code{phase.plot} to allow for much better looking phase arrows (thanks Huidong Tang).}
}
}
\section{Changes in biwavelet version 0.17.4 (2014-11-04)}{
\subsection{fixed}{
\itemize{
\item Made function \code{wt} faster by avoiding excessive padding (thanks Huidong Tang).}
}
}
\section{Changes in biwavelet version 0.17.3 (2014-04-27)}{
\subsection{fixed}{
\itemize{
\item Made \code{check.datum} tolerate slight inconsistencies in the size of timesteps.}
}
}
\section{Changes in biwavelet version 0.17.2 (2014-04-11)}{
\subsection{fixed}{
\itemize{
\item Added arguments in \code{plot.biwavelet} and \code{phase.plot} to control the length
of the phase arrows and the size of the arrow heads independently.}
}
}
\section{Changes in biwavelet version 0.17.1 (2013-07-15)}{
\subsection{fixed}{
\itemize{
\item Fixed code in \code{check.data} to test for constant step size in the data.}
}
}
\section{Changes in biwavelet version 0.17 (2013-06-05)}{
\subsection{added}{
\itemize{
\item Function \code{pwtc} can be used to perform partial wavelet coherence between two time series
\code{y} and \code{x1} by controlling for (or partialling-out) a third time series \code{x2}.
}
}
}
\section{Changes in biwavelet version 0.16 (2013-05-07)}{
\subsection{added}{
\itemize{
\item Users can now specify the density of the phase arrows using the \code{plot} function.
}
}
\subsection{fixed}{
\itemize{
\item Fixed bug in \code{wt} affecting the significance region (thanks Patrick Kilduff and Flora Cordoleani).
}
}
}
\section{Changes in biwavelet version 0.15 (2013-04-08)}{
\subsection{added}{
\itemize{
\item Users can now specify the color, line width and line type for
the COI, significance contours and phase arrows using the \code{plot} function.
}
}
\subsection{fixed}{
\itemize{
\item Removed misleading examples showing how to compute the 'bias-corrected' wavelet coherence.
There is no bias for the wavelet coherence function, so using the default \code{type} argument in the
\code{plot} function is recommended.
\item Fixed typos in the documentation of plot.biwavelet and xwt (thanks Lei Cheng).
}
}
}
\section{Changes in biwavelet version 0.14 (2013-03-06)}{
\subsection{added}{
\itemize{
\item As of biwavelet version 0.14, the bias-corrected wavelet and
cross-wavelet spectra are automatically computed and plotted by default
using the methods described by Liu et al. (2007) and Veleda et al. (2012).
This correction is needed because the traditional approach for computing the power
spectrum (e.g., Torrence and Compo 1998) leads to an artificial and systematic
reduction in power at lower periods.
}
}
\subsection{fixed}{
\itemize{
\item Plotting function now accepts traditional plotting flags such as xaxt and yaxt
to control x and y tickmarks.
}
}
}
|
/inst/NEWS.Rd
|
no_license
|
mabelsfatalfable/biwavelet
|
R
| false
| false
| 9,325
|
rd
|
\name{NEWS}
\title{News for Package 'biwavelet'}
\encoding{UTF-8}
\section{Changes in biwavelet version 0.20.15 (2017-03-01)}{
\subsection{fixed}{
\itemize{
\item Fixed return NULL issues with Windows platforms
}
}
}
\section{Changes in biwavelet version 0.20.14 (2017-02-24)}{
\subsection{fixed}{
\itemize{
\item Fixed \code{plot.biwavelet} so that the COI extends all
the way to the bottom of the plot (max of periods)
}
}
}
\section{Changes in biwavelet version 0.20.13 (2016-12-27)}{
\subsection{fixed}{
\itemize{
\item Fixed \code{plot.biwavelet} so that the \code{arrow.cutoff}
argument applies to rsq values for wtc and pwtc objects
}
}
}
\section{Changes in biwavelet version 0.20.12 (2016-12-27)}{
\subsection{fixed}{
\itemize{
\item Fixed \code{plot.biwavelet} so that the \code{arrow.cutoff}
argument applies to rsq values
}
}
}
\section{Changes in biwavelet version 0.20.11 (2016-08-31)}{
\subsection{added}{
\itemize{
\item Build vignettes
}
}
}
\section{Changes in biwavelet version 0.20.10 (2016-08-10)}{
\subsection{fixed}{
\itemize{
\item Function \code{phase.biwavelet} now plots the regions whose significance
exceeds \code{arrow.cutoff}. If the object being plotted does not have
a significance field, regions whose zvalues exceed the \code{arrow.cutoff}
quantile will be plotted.
\item Fixed C++ warning about unsupported dynamically sized arrays
}
}
}
\section{Changes in biwavelet version 0.20.9 (2016-07-12)}{
\subsection{fixed}{
\itemize{
\item Fixed handling of \code{lag1} coefficients in \code{wtc}.
}
}
}
\section{Changes in biwavelet version 0.20.8 (2016-06-25)}{
\subsection{fixed}{
\itemize{
\item Fixed handling of axis preferences in \code{plot.biwavelet}.
}
}
}
\section{Changes in biwavelet version 0.20.7 (2016-06-01)}{
\subsection{fixed}{
\itemize{
\item Fixed handling of time in \code{check.data}.
}
}
}
\section{Changes in biwavelet version 0.20.6 (2016-06-01)}{
\subsection{fixed}{
\itemize{
\item Fixed x-axis in \code{plot.biwavelet}.
}
}
}
\section{Changes in biwavelet version 0.20.3 (2016-05-08)}{
\subsection{fixed}{
\itemize{
\item Fixed displacement of COI, contours and phase arrows in \code{plot.biwavelet} when adding a color bar.
}
}
}
\section{Changes in biwavelet version 0.20.2 (2016-05-06)}{
\subsection{fixed}{
\itemize{
\item Fixed \code{check.datum}; hopefully for the last time.
\item Faster wt bases and row quantile (Rcpp implementations):
\itemize{
\item The \code{param} parameter for all \code{rcpp_wt_bases_*} must be
within interval (0..10).
\item The \code{rcpp_row_quantile} function requires
a matrix as a parameter (use \code{as.matrix()} for vectors).
}
}
}
}
\section{Changes in biwavelet version 0.19.2 (2016-05-03)}{
\subsection{fixed}{
\itemize{
\item Fixed Rcpp implementation of the \code{wt.bases} functions,
i.e., \code{rcpp_wt_bases_*.cpp}.
Replacing \code{int} type with \code{double} type for \code{scale} parameter
which caused small scales to be rendered incorrectly.
}
}
}
\section{Changes in biwavelet version 0.19.1 (2016-04-29)}{
\subsection{fixed}{
\itemize{
\item Fixed interpretation of phase differences in \code{plot.biwavelet} help file
}
}
\subsection{changed}{
\itemize{
\item Added unit tests for approx 78\% of the code.
\item Implemented a parallelized Monte Carlo simulation function
\code{wtc_sig_parallel} which is 2 to 4 times faster on a 4-core CPU
than the original \code{wtc.sig}. The speedup is noticeable on:
\enumerate{
\item large simulations \code{nrads >= 800},
\item multiple simulations,
\item multi-core systems with 4+ cores.
}
However, parallelization involves a significant heat-up phase because all
the workers need to be started and they need to load all the required
packages. This will be addresses in future versions of biwavelet.
\item Added a speed-optimized version of \code{convolve2D}.
\item Replaced standard \code{arima.sim} function with a pair of functions
\code{get_minroots} and \code{ar1_ma0_sim}. These functions are still
implemented in R. We can reimplement them later in C.
\item Reimplemented \code{wt.bases} morlet, paul and dog in C.
\item Removed unused function \code{meshgrid}.
}
}
}
\section{Changes in biwavelet version 0.17.11 (2015-10-09)}{
\subsection{fixed}{
\itemize{
\item close all progress bars after use
\item Function \code{wtc} can now handle non-finite values when computing
the quantiles of the rsq values from the Monte Carlo simulations
}
}
}
\section{Changes in biwavelet version 0.17.10 (2015-04-29)}{
\subsection{fixed}{
\itemize{
\item Added ability to handle custom color palettes in \code{plot.biwavelet}.
Users can now specify any color scheme using the \code{fill.cols} argument.
}
}
}
\section{Changes in biwavelet version 0.17.9 (2015-04-29)}{
\subsection{fixed}{
\itemize{
\item Fixed limited padding issue, which could lead to weird edge effects.
Current padding level is identical to that of Torrence & Compo (1998).
\item Changed the default \code{tol} value from 0.95 to 1 in the \code{plot} function.}
}
}
\section{Changes in biwavelet version 0.17.8 (2015-04-28)}{
\subsection{fixed}{
\itemize{
\item Added semi-transparent COI.}
}
}
\section{Changes in biwavelet version 0.17.7 (2015-04-13)}{
\subsection{fixed}{
\itemize{
\item Fixed \code{check.datum} function so that it does not assume a sampling frequency of 1.}
}
}
\section{Changes in biwavelet version 0.17.6 (2015-04-05)}{
\subsection{fixed}{
\itemize{
\item Added ability to set \code{zlim} in \code{plot.biwavelet}.}
}
}
\section{Changes in biwavelet version 0.17.5 (2014-11-05)}{
\subsection{fixed}{
\itemize{
\item Improved the implementation of \code{phase.plot} to allow for much better looking phase arrows (thanks Huidong Tang).}
}
}
\section{Changes in biwavelet version 0.17.4 (2014-11-04)}{
\subsection{fixed}{
\itemize{
\item Made function \code{wt} faster by avoiding excessive padding (thanks Huidong Tang).}
}
}
\section{Changes in biwavelet version 0.17.3 (2014-04-27)}{
\subsection{fixed}{
\itemize{
\item Made \code{check.datum} tolerate slight inconsistencies in the size of timesteps.}
}
}
\section{Changes in biwavelet version 0.17.2 (2014-04-11)}{
\subsection{fixed}{
\itemize{
\item Added arguments in \code{plot.biwavelet} and \code{phase.plot} to control the length
of the phase arrows and the size of the arrow heads independently.}
}
}
\section{Changes in biwavelet version 0.17.1 (2013-07-15)}{
\subsection{fixed}{
\itemize{
\item Fixed code in \code{check.data} to test for constant step size in the data.}
}
}
\section{Changes in biwavelet version 0.17 (2013-06-05)}{
\subsection{added}{
\itemize{
\item Function \code{pwtc} can be used to perform partial wavelet coherence between two time series
\code{y} and \code{x1} by controlling for (or partialling-out) a third time series \code{x2}.
}
}
}
\section{Changes in biwavelet version 0.16 (2013-05-07)}{
\subsection{added}{
\itemize{
\item Users can now specify the density of the phase arrows using the \code{plot} function.
}
}
\subsection{fixed}{
\itemize{
\item Fixed bug in \code{wt} affecting the significance region (thanks Patrick Kilduff and Flora Cordoleani).
}
}
}
\section{Changes in biwavelet version 0.15 (2013-04-08)}{
\subsection{added}{
\itemize{
\item Users can now specify the color, line width and line type for
the COI, significance contours and phase arrows using the \code{plot} function.
}
}
\subsection{fixed}{
\itemize{
\item Removed misleading examples showing how to compute the 'bias-corrected' wavelet coherence.
There is no bias for the wavelet coherence function, so using the default \code{type} argument in the
\code{plot} function is recommended.
\item Fixed typos in the documentation of plot.biwavelet and xwt (thanks Lei Cheng).
}
}
}
\section{Changes in biwavelet version 0.14 (2013-03-06)}{
\subsection{added}{
\itemize{
\item As of biwavelet version 0.14, the bias-corrected wavelet and
cross-wavelet spectra are automatically computed and plotted by default
using the methods described by Liu et al. (2007) and Veleda et al. (2012).
This correction is needed because the traditional approach for computing the power
spectrum (e.g., Torrence and Compo 1998) leads to an artificial and systematic
reduction in power at lower periods.
}
}
\subsection{fixed}{
\itemize{
\item Plotting function now accepts traditional plotting flags such as xaxt and yaxt
to control x and y tickmarks.
}
}
}
|
graphics.off()
#Load Hmisc library
library(Hmisc)
#Read Data
data=read.csv('S:/Shared Projects/Laura/BDC/Projects/Sonalee Ravi/ASKED-IT/Data/ASKEDIT_DATA_2018-12-05_1420.csv')
#Setting Labels
label(data$pid)="Participant ID"
label(data$redcap_event_name)="Event Name"
label(data$age)="Age"
label(data$nih_sex)="Gender"
label(data$diab_durration)="Diabetes Duration"
label(data$nih_race)="Race (Choose the one with which you MOST CLOSELY identify)"
label(data$ethnicity)="Ethnicity"
label(data$zipcode)="Zipcode"
label(data$insulindelivery)="How do you regularly give your insulin?"
label(data$pumpkind)="What kind of pump do you have?"
label(data$basalinsulin)="What is your long acting insulin?"
label(data$cgm)="Do you use a continuous glucose monitor?"
label(data$othercgm)="What kind of continuous glucose monitor do you use?"
label(data$grade)="Grade in School"
label(data$insurance)="What type of insurance do you have?"
label(data$homes)="Do you live in more than one household?"
label(data$income)="What is the total family income, before taxes, of all the adult members in the household where you spend most of your time?"
label(data$mwikadenroll)="Enrollment MWIKAD Score"
label(data$enrolla1c)="Enrollment A1c"
label(data$enrollbgfrequency)="Enrollment BG testing frequency"
label(data$enrollhigh)="Enrollment % high"
label(data$enrollinrange)="Enrollment % in range"
label(data$enrolllow)="Enrollment % low"
label(data$mwikad3month)="3 month MWIKAD"
label(data$a1c3month)="3 month A1C"
label(data$bgtest3month)="3 month BG testing frequency"
label(data$high3month)="3 month % high"
label(data$inrange3month)="3 month % in range"
label(data$low3month)="3 month % low"
label(data$mwikad6month)="6 month MWIKAD"
label(data$a1c6month)="6 month A1c"
label(data$testing6month)="6 month BG testing frequency"
label(data$high6month)="6 month % high"
label(data$inrange6month)="6 month % in rage"
label(data$low6month)="6 month % low"
label(data$response_rate)="3 month response rate"
label(data$response_rate_6_months)="6 month response rate"
label(data$totalenroll)="SMODA Enrollment Total Score"
label(data$totalsmod3month)="SMODA 3 month total score"
label(data$totalsmod6month)="SMODA 6 month Total Score"
#Setting Units
#Setting Factors(will create new variable for factors)
data$redcap_event_name.factor = factor(data$redcap_event_name,levels=c("enrollment_arm_1","3_months_arm_1","6_months_arm_1","enrollment_arm_2","3_months_arm_2","6_months_arm_2"))
data$nih_sex.factor = factor(data$nih_sex,levels=c("1","2","3"))
data$nih_race.factor = factor(data$nih_race,levels=c("1","2","3","4","5","6","7"))
data$ethnicity.factor = factor(data$ethnicity,levels=c("1","2","3"))
data$insulindelivery.factor = factor(data$insulindelivery,levels=c("0","1","2"))
data$pumpkind.factor = factor(data$pumpkind,levels=c("1","2","3","4","5","6","7"))
data$basalinsulin.factor = factor(data$basalinsulin,levels=c("1","2","3","4"))
data$cgm.factor = factor(data$cgm,levels=c("Dexcom","Medtronic","Other","No"))
data$grade.factor = factor(data$grade,levels=c("1","2","3","4","5","6","7","8","9"))
data$insurance.factor = factor(data$insurance,levels=c("1","2","3","4","5","6"))
data$homes.factor = factor(data$homes,levels=c("1","2","3"))
data$income.factor = factor(data$income,levels=c("1","2","3","4","5","6","7","8","9","10"))
levels(data$redcap_event_name.factor)=c("Enrollment (Arm 1: Control)","3 Months (Arm 1: Control)","6 Months (Arm 1: Control)","Enrollment (Arm 2: Texting)","3 Months (Arm 2: Texting)","6 Months (Arm 2: Texting)")
levels(data$nih_sex.factor)=c("Female","Male","Unknown or Not Reported")
levels(data$nih_race.factor)=c("American Indian or Alaska Native","Asian","Black or African-American","Native Hawaiian or Other Pacific Islander","White","More than one race","Unknown or not reported")
levels(data$ethnicity.factor)=c("Hispanic or Latino","Not Hispanic or Latino","Unknown or not reported")
levels(data$insulindelivery.factor)=c("Insulin Pump","Insulin shots with carbohydrate counting","Insulin shots with set doses at meals based on blood sugar")
levels(data$pumpkind.factor)=c("Medtronic Revel, Paradigm or 530G","Medtronic 630G","Medtronic 670G","Animus Ping or Animus Vibe","Omnipod","Tandem T-slim or T-Flex","Other")
levels(data$basalinsulin.factor)=c("Lantus","Levemir","Tresiba","Other")
levels(data$cgm.factor)=c("Yes, Dexcom","Yes, Medtronic","Yes, Other","No")
levels(data$grade.factor)=c("7th","8th","9th","10th","11th","12th","Graduated high school, currently enrolled in college","Graduated high school, not enrolled in college","I got my GED")
levels(data$insurance.factor)=c("Private Insurance","Medicaid","Medicare","CHP+","Tricare","Currently do not have medical insurance")
levels(data$homes.factor)=c("Yes, I spend about equal time in each","Yes, but I mostly spend time at one","No")
#levels(data$income.factor)=c("$150,000","I dont know")
|
/Sonalee Ravi/ASKEDIT/ASKEDIT_R_2018-12-05_1420.r
|
no_license
|
childhealthbiostatscore/BDC-Code
|
R
| false
| false
| 4,890
|
r
|
graphics.off()
#Load Hmisc library
library(Hmisc)
#Read Data
data=read.csv('S:/Shared Projects/Laura/BDC/Projects/Sonalee Ravi/ASKED-IT/Data/ASKEDIT_DATA_2018-12-05_1420.csv')
#Setting Labels
label(data$pid)="Participant ID"
label(data$redcap_event_name)="Event Name"
label(data$age)="Age"
label(data$nih_sex)="Gender"
label(data$diab_durration)="Diabetes Duration"
label(data$nih_race)="Race (Choose the one with which you MOST CLOSELY identify)"
label(data$ethnicity)="Ethnicity"
label(data$zipcode)="Zipcode"
label(data$insulindelivery)="How do you regularly give your insulin?"
label(data$pumpkind)="What kind of pump do you have?"
label(data$basalinsulin)="What is your long acting insulin?"
label(data$cgm)="Do you use a continuous glucose monitor?"
label(data$othercgm)="What kind of continuous glucose monitor do you use?"
label(data$grade)="Grade in School"
label(data$insurance)="What type of insurance do you have?"
label(data$homes)="Do you live in more than one household?"
label(data$income)="What is the total family income, before taxes, of all the adult members in the household where you spend most of your time?"
label(data$mwikadenroll)="Enrollment MWIKAD Score"
label(data$enrolla1c)="Enrollment A1c"
label(data$enrollbgfrequency)="Enrollment BG testing frequency"
label(data$enrollhigh)="Enrollment % high"
label(data$enrollinrange)="Enrollment % in range"
label(data$enrolllow)="Enrollment % low"
label(data$mwikad3month)="3 month MWIKAD"
label(data$a1c3month)="3 month A1C"
label(data$bgtest3month)="3 month BG testing frequency"
label(data$high3month)="3 month % high"
label(data$inrange3month)="3 month % in range"
label(data$low3month)="3 month % low"
label(data$mwikad6month)="6 month MWIKAD"
label(data$a1c6month)="6 month A1c"
label(data$testing6month)="6 month BG testing frequency"
label(data$high6month)="6 month % high"
label(data$inrange6month)="6 month % in rage"
label(data$low6month)="6 month % low"
label(data$response_rate)="3 month response rate"
label(data$response_rate_6_months)="6 month response rate"
label(data$totalenroll)="SMODA Enrollment Total Score"
label(data$totalsmod3month)="SMODA 3 month total score"
label(data$totalsmod6month)="SMODA 6 month Total Score"
#Setting Units
#Setting Factors(will create new variable for factors)
data$redcap_event_name.factor = factor(data$redcap_event_name,levels=c("enrollment_arm_1","3_months_arm_1","6_months_arm_1","enrollment_arm_2","3_months_arm_2","6_months_arm_2"))
data$nih_sex.factor = factor(data$nih_sex,levels=c("1","2","3"))
data$nih_race.factor = factor(data$nih_race,levels=c("1","2","3","4","5","6","7"))
data$ethnicity.factor = factor(data$ethnicity,levels=c("1","2","3"))
data$insulindelivery.factor = factor(data$insulindelivery,levels=c("0","1","2"))
data$pumpkind.factor = factor(data$pumpkind,levels=c("1","2","3","4","5","6","7"))
data$basalinsulin.factor = factor(data$basalinsulin,levels=c("1","2","3","4"))
data$cgm.factor = factor(data$cgm,levels=c("Dexcom","Medtronic","Other","No"))
data$grade.factor = factor(data$grade,levels=c("1","2","3","4","5","6","7","8","9"))
data$insurance.factor = factor(data$insurance,levels=c("1","2","3","4","5","6"))
data$homes.factor = factor(data$homes,levels=c("1","2","3"))
data$income.factor = factor(data$income,levels=c("1","2","3","4","5","6","7","8","9","10"))
levels(data$redcap_event_name.factor)=c("Enrollment (Arm 1: Control)","3 Months (Arm 1: Control)","6 Months (Arm 1: Control)","Enrollment (Arm 2: Texting)","3 Months (Arm 2: Texting)","6 Months (Arm 2: Texting)")
levels(data$nih_sex.factor)=c("Female","Male","Unknown or Not Reported")
levels(data$nih_race.factor)=c("American Indian or Alaska Native","Asian","Black or African-American","Native Hawaiian or Other Pacific Islander","White","More than one race","Unknown or not reported")
levels(data$ethnicity.factor)=c("Hispanic or Latino","Not Hispanic or Latino","Unknown or not reported")
levels(data$insulindelivery.factor)=c("Insulin Pump","Insulin shots with carbohydrate counting","Insulin shots with set doses at meals based on blood sugar")
levels(data$pumpkind.factor)=c("Medtronic Revel, Paradigm or 530G","Medtronic 630G","Medtronic 670G","Animus Ping or Animus Vibe","Omnipod","Tandem T-slim or T-Flex","Other")
levels(data$basalinsulin.factor)=c("Lantus","Levemir","Tresiba","Other")
levels(data$cgm.factor)=c("Yes, Dexcom","Yes, Medtronic","Yes, Other","No")
levels(data$grade.factor)=c("7th","8th","9th","10th","11th","12th","Graduated high school, currently enrolled in college","Graduated high school, not enrolled in college","I got my GED")
levels(data$insurance.factor)=c("Private Insurance","Medicaid","Medicare","CHP+","Tricare","Currently do not have medical insurance")
levels(data$homes.factor)=c("Yes, I spend about equal time in each","Yes, but I mostly spend time at one","No")
#levels(data$income.factor)=c("$150,000","I dont know")
|
## http://r.789695.n4.nabble.com/Converting-data-frame-into-multidimensional-array-td3926705.html
|
/pla/R/as.array.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 101
|
r
|
## http://r.789695.n4.nabble.com/Converting-data-frame-into-multidimensional-array-td3926705.html
|
testlist <- list(id = NULL, score = NULL, id = NULL, booklet_id = c(-101058055L, -101058055L, -101058055L, -101058055L, -101058055L, -101058055L, -101058055L, -101058055L, -101058055L, -101058055L, -101058055L, -4193537L, 268435456L, 0L, 353173504L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), item_score = integer(0), person_id = integer(0))
result <- do.call(dexterMST:::mutate_booklet_score,testlist)
str(result)
|
/dexterMST/inst/testfiles/mutate_booklet_score/libFuzzer_mutate_booklet_score/mutate_booklet_score_valgrind_files/1612727298-test.R
|
no_license
|
akhikolla/updatedatatype-list1
|
R
| false
| false
| 648
|
r
|
testlist <- list(id = NULL, score = NULL, id = NULL, booklet_id = c(-101058055L, -101058055L, -101058055L, -101058055L, -101058055L, -101058055L, -101058055L, -101058055L, -101058055L, -101058055L, -101058055L, -4193537L, 268435456L, 0L, 353173504L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), item_score = integer(0), person_id = integer(0))
result <- do.call(dexterMST:::mutate_booklet_score,testlist)
str(result)
|
library(samr)
library(reticulate)
use_python("/usr/bin/python3")
source_python("gastricData.py")
# MSI_reshaped_R <- MSI_reshaped
MassSpec_R <- MassSpec
tsne3_R <- tsne3
plot(tsne3_R)
# set.seed(100)
# samfit <- SAM(MassSpec_R, tsne3_R, resp.type = "Two class unpaired")
# # examine significant gene list
# print(samfit)
# # plot results
# plot(samfit)
|
/Code/gastricData/gastricR.R
|
no_license
|
mostafa20223/GP
|
R
| false
| false
| 375
|
r
|
library(samr)
library(reticulate)
use_python("/usr/bin/python3")
source_python("gastricData.py")
# MSI_reshaped_R <- MSI_reshaped
MassSpec_R <- MassSpec
tsne3_R <- tsne3
plot(tsne3_R)
# set.seed(100)
# samfit <- SAM(MassSpec_R, tsne3_R, resp.type = "Two class unpaired")
# # examine significant gene list
# print(samfit)
# # plot results
# plot(samfit)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/barplot_errbar.R
\name{barplot_errbar}
\alias{barplot_errbar}
\title{Plot a barplot graph with error bar on y}
\usage{
barplot_errbar(
...,
errbar.y = NULL,
errbar.y.plus = NULL,
errbar.y.minus = NULL,
y.plus = NULL,
y.minus = NULL,
errbar.tick = 1/50,
errbar.lwd = par("lwd"),
errbar.lty = par("lty"),
errbar.col = par("fg"),
add = FALSE
)
}
\arguments{
\item{...}{Parameters for barplot() such as main= or ylim=}
\item{errbar.y}{The length of error bars for y. Recycled if necessary.}
\item{errbar.y.plus}{The length of positive error bars for y. Recycled if necessary.}
\item{errbar.y.minus}{The length of negative error bars for y. Recycled if necessary.}
\item{y.plus}{The absolut position of the positive error bar for y. Recycled if necessary.}
\item{y.minus}{The absolut position of the nagative error bar for y. Recycled if necessary.}
\item{errbar.tick}{Size of small ticks at the end of error bars defined as a proportion of total width or height graph size.}
\item{errbar.lwd}{Error bar line width, see par("lwd")}
\item{errbar.lty}{Error bar line type, see par("lwd")}
\item{errbar.col}{Error bar line color, see par("col")}
\item{add}{If true, add the graph to the previous one.}
}
\value{
A numeric vector (or matrix, when beside = TRUE), say mp, giving the coordinates of all the bar midpoints drawn, useful for adding to the graph.\cr
If beside is true, use colMeans(mp) for the midpoints of each group of bars, see example.
}
\description{
To plot data, just use it as a normal barplot but add the errbar.y
values or errbar.y.minus, errbar.y.plus if bars for y axis are
asymetric. Use y.plus and y.minus to set absolut limits for
error bars. Note that y.plus and y.minus have priority over errbar.y,
errbar.y.minus and errbar.y.plus.
}
\details{
barplot_errbar plot a barplot with error bar on y
}
\examples{
\dontrun{
barplot_errbar(rnorm(10, 10, 3),
xlab="axe x", ylab="axe y", bty="n",
errbar.y.plus=rnorm(10, 1, 0.1), col=rainbow(10),
names.arg=paste("Group",1:10), cex.names=0.6)
y <- rnorm(10, 10, 3)
barplot_errbar(y,
xlab="axe x", ylab="axe y", bty="n",
y.plus=y+2)
}
}
\seealso{
\code{plot_errorbar}
Other plot and barplot functions:
\code{\link{ScalePreviousPlot}()},
\code{\link{plot_add}()},
\code{\link{plot_errbar}()},
\code{\link{show_name}()}
}
\author{
Marc Girondot \email{marc.girondot@gmail.com}
}
\concept{plot and barplot functions}
|
/man/barplot_errbar.Rd
|
no_license
|
cran/HelpersMG
|
R
| false
| true
| 2,528
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/barplot_errbar.R
\name{barplot_errbar}
\alias{barplot_errbar}
\title{Plot a barplot graph with error bar on y}
\usage{
barplot_errbar(
...,
errbar.y = NULL,
errbar.y.plus = NULL,
errbar.y.minus = NULL,
y.plus = NULL,
y.minus = NULL,
errbar.tick = 1/50,
errbar.lwd = par("lwd"),
errbar.lty = par("lty"),
errbar.col = par("fg"),
add = FALSE
)
}
\arguments{
\item{...}{Parameters for barplot() such as main= or ylim=}
\item{errbar.y}{The length of error bars for y. Recycled if necessary.}
\item{errbar.y.plus}{The length of positive error bars for y. Recycled if necessary.}
\item{errbar.y.minus}{The length of negative error bars for y. Recycled if necessary.}
\item{y.plus}{The absolut position of the positive error bar for y. Recycled if necessary.}
\item{y.minus}{The absolut position of the nagative error bar for y. Recycled if necessary.}
\item{errbar.tick}{Size of small ticks at the end of error bars defined as a proportion of total width or height graph size.}
\item{errbar.lwd}{Error bar line width, see par("lwd")}
\item{errbar.lty}{Error bar line type, see par("lwd")}
\item{errbar.col}{Error bar line color, see par("col")}
\item{add}{If true, add the graph to the previous one.}
}
\value{
A numeric vector (or matrix, when beside = TRUE), say mp, giving the coordinates of all the bar midpoints drawn, useful for adding to the graph.\cr
If beside is true, use colMeans(mp) for the midpoints of each group of bars, see example.
}
\description{
To plot data, just use it as a normal barplot but add the errbar.y
values or errbar.y.minus, errbar.y.plus if bars for y axis are
asymetric. Use y.plus and y.minus to set absolut limits for
error bars. Note that y.plus and y.minus have priority over errbar.y,
errbar.y.minus and errbar.y.plus.
}
\details{
barplot_errbar plot a barplot with error bar on y
}
\examples{
\dontrun{
barplot_errbar(rnorm(10, 10, 3),
xlab="axe x", ylab="axe y", bty="n",
errbar.y.plus=rnorm(10, 1, 0.1), col=rainbow(10),
names.arg=paste("Group",1:10), cex.names=0.6)
y <- rnorm(10, 10, 3)
barplot_errbar(y,
xlab="axe x", ylab="axe y", bty="n",
y.plus=y+2)
}
}
\seealso{
\code{plot_errorbar}
Other plot and barplot functions:
\code{\link{ScalePreviousPlot}()},
\code{\link{plot_add}()},
\code{\link{plot_errbar}()},
\code{\link{show_name}()}
}
\author{
Marc Girondot \email{marc.girondot@gmail.com}
}
\concept{plot and barplot functions}
|
print("Hello from Binder!")
system("freebayes --help") #check if freebayes work
library(vcfR)
|
/hello.R
|
no_license
|
Jigyasa3/binder-test
|
R
| false
| false
| 96
|
r
|
print("Hello from Binder!")
system("freebayes --help") #check if freebayes work
library(vcfR)
|
# Create posts automatically
# Author: Shixiang Wang
# License: MIT
new_post <- function(post_name = NULL, dir = file.path(getwd(), "content/cn/post"),
type = c("rmd", "md"),
template_path = getwd(), add_prefix = TRUE, edit_file = TRUE,
force = FALSE) {
if (is.null(post_name)) {
stop("A post name must be given!")
}
type <- match.arg(type)
if (type == "rmd") {
template_name <- "template_post.Rmd"
} else if (type == "md") {
template_name <- "template_post.md"
} else {
stop("Not supported!")
}
input_file <- file.path(template_path, template_name)
if (add_prefix) {
current_time <- Sys.Date()
if (type == "rmd") {
out_file <- file.path(dir, paste0(current_time, "-", post_name, ".Rmd"))
} else {
out_file <- file.path(dir, paste0(current_time, "-", post_name, ".md"))
}
} else {
if (type == "rmd") {
out_file <- file.path(dir, paste0(post_name, ".Rmd"))
} else {
out_file <- file.path(dir, paste0(post_name, ".md"))
}
}
if (file.exists(out_file)) {
if (!force) stop("File exists, use force=TRUE if you make sure rewrite it.")
}
message("Copying contents of ", input_file, " to ", out_file)
fl_content <- readLines(input_file)
# Modify contents in template
fl_content <- ifelse(grepl("date:", fl_content), paste0("date:", " \"", Sys.Date(), "\""), fl_content)
if (type == "md") {
fl_content <- ifelse(grepl("lastmod:", fl_content), paste0("lastmod:", " \"", Sys.Date(), "\""), fl_content)
}
writeLines(fl_content, out_file)
if (edit_file) {
file.edit(out_file)
}
if (type == "rmd") {
message("Create new Rmarkdown post successfully!")
} else {
message("Create new markdown post successfully!")
}
}
|
/new.R
|
no_license
|
ShixiangWang/home
|
R
| false
| false
| 1,803
|
r
|
# Create posts automatically
# Author: Shixiang Wang
# License: MIT
new_post <- function(post_name = NULL, dir = file.path(getwd(), "content/cn/post"),
type = c("rmd", "md"),
template_path = getwd(), add_prefix = TRUE, edit_file = TRUE,
force = FALSE) {
if (is.null(post_name)) {
stop("A post name must be given!")
}
type <- match.arg(type)
if (type == "rmd") {
template_name <- "template_post.Rmd"
} else if (type == "md") {
template_name <- "template_post.md"
} else {
stop("Not supported!")
}
input_file <- file.path(template_path, template_name)
if (add_prefix) {
current_time <- Sys.Date()
if (type == "rmd") {
out_file <- file.path(dir, paste0(current_time, "-", post_name, ".Rmd"))
} else {
out_file <- file.path(dir, paste0(current_time, "-", post_name, ".md"))
}
} else {
if (type == "rmd") {
out_file <- file.path(dir, paste0(post_name, ".Rmd"))
} else {
out_file <- file.path(dir, paste0(post_name, ".md"))
}
}
if (file.exists(out_file)) {
if (!force) stop("File exists, use force=TRUE if you make sure rewrite it.")
}
message("Copying contents of ", input_file, " to ", out_file)
fl_content <- readLines(input_file)
# Modify contents in template
fl_content <- ifelse(grepl("date:", fl_content), paste0("date:", " \"", Sys.Date(), "\""), fl_content)
if (type == "md") {
fl_content <- ifelse(grepl("lastmod:", fl_content), paste0("lastmod:", " \"", Sys.Date(), "\""), fl_content)
}
writeLines(fl_content, out_file)
if (edit_file) {
file.edit(out_file)
}
if (type == "rmd") {
message("Create new Rmarkdown post successfully!")
} else {
message("Create new markdown post successfully!")
}
}
|
# "rankall" takes as input a state and the outcome name
# "rankall" returns a data fram with hospital name and state with given rank
# according to num and outcome name
rankhospital <- function(outcome, num="best") {
#1. read outcome data
#2. check state and outcome are valid
#3. sort, data checks etcss
#4. for each state find the hospital with given ran
#5. return the hospital with given rank and the state as a dataframe
}
|
/rankall.R
|
no_license
|
paolinoroscia/ProgrammingAssignment3
|
R
| false
| false
| 455
|
r
|
# "rankall" takes as input a state and the outcome name
# "rankall" returns a data fram with hospital name and state with given rank
# according to num and outcome name
rankhospital <- function(outcome, num="best") {
#1. read outcome data
#2. check state and outcome are valid
#3. sort, data checks etcss
#4. for each state find the hospital with given ran
#5. return the hospital with given rank and the state as a dataframe
}
|
## Challenge Assignment ##
# Deliverable 1: Linear regression to predict MPG
# Data: MechaCar MPG dataset
library(dplyr)
setwd("~/Desktop/DATA_ANALYTICS/challenges/module_15_R_analysis/MechaCar_Statistical_Analysis")
dev1_table <- read.csv(file='MechaCar_mpg.csv',check.names=F,stringsAsFactors = F)
#linear regression
lm(mpg ~ vehicle_length + vehicle_weight + spoiler_angle + ground_clearance + AWD,data= dev1_table)
#lm summary
summary(lm(mpg ~ vehicle_length + vehicle_weight + spoiler_angle + ground_clearance + AWD,data= dev1_table))
# Deliverable 2: Suspension coil summary statistics
dev2_table <- read.csv(file='Suspension_coil.csv',check.names=F,stringsAsFactors = F)
total_summary<- dev2_table %>% summarize(Mean=mean(PSI), Median=(PSI),Variance=var(PSI),SD=sd(PSI))
lot_summary <- dev2_table %>% group_by(Manufacturing_Lot) %>% summarize(Mean_PSI=mean(PSI), Median_PSI=median(PSI), Var_PSI=var(PSI), Std_Dev_PSI=sd(PSI), Num_Coil=n(), .groups = 'keep')
# Deliverable 3: T test on suspension coils
t.test(dev2_table$PSI, mu = 1500)
t.test(subset(dev2_table,Manufacturing_Lot=="Lot1")$PSI,mu = 1500)
t.test(subset(dev2_table,Manufacturing_Lot=="Lot2")$PSI,mu = 1500)
t.test(subset(dev2_table,Manufacturing_Lot=="Lot3")$PSI,mu = 1500)
|
/MechaCarChallenge.R
|
no_license
|
cgurbatri/MechaCar_Statistical_Analysis
|
R
| false
| false
| 1,292
|
r
|
## Challenge Assignment ##
# Deliverable 1: Linear regression to predict MPG
# Data: MechaCar MPG dataset
library(dplyr)
setwd("~/Desktop/DATA_ANALYTICS/challenges/module_15_R_analysis/MechaCar_Statistical_Analysis")
dev1_table <- read.csv(file='MechaCar_mpg.csv',check.names=F,stringsAsFactors = F)
#linear regression
lm(mpg ~ vehicle_length + vehicle_weight + spoiler_angle + ground_clearance + AWD,data= dev1_table)
#lm summary
summary(lm(mpg ~ vehicle_length + vehicle_weight + spoiler_angle + ground_clearance + AWD,data= dev1_table))
# Deliverable 2: Suspension coil summary statistics
dev2_table <- read.csv(file='Suspension_coil.csv',check.names=F,stringsAsFactors = F)
total_summary<- dev2_table %>% summarize(Mean=mean(PSI), Median=(PSI),Variance=var(PSI),SD=sd(PSI))
lot_summary <- dev2_table %>% group_by(Manufacturing_Lot) %>% summarize(Mean_PSI=mean(PSI), Median_PSI=median(PSI), Var_PSI=var(PSI), Std_Dev_PSI=sd(PSI), Num_Coil=n(), .groups = 'keep')
# Deliverable 3: T test on suspension coils
t.test(dev2_table$PSI, mu = 1500)
t.test(subset(dev2_table,Manufacturing_Lot=="Lot1")$PSI,mu = 1500)
t.test(subset(dev2_table,Manufacturing_Lot=="Lot2")$PSI,mu = 1500)
t.test(subset(dev2_table,Manufacturing_Lot=="Lot3")$PSI,mu = 1500)
|
testlist <- list(a = 0L, b = 0L, x = c(-21589L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(grattan:::anyOutside,testlist)
str(result)
|
/grattan/inst/testfiles/anyOutside/libFuzzer_anyOutside/anyOutside_valgrind_files/1610387040-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false
| false
| 170
|
r
|
testlist <- list(a = 0L, b = 0L, x = c(-21589L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(grattan:::anyOutside,testlist)
str(result)
|
############################################################################
## File Name: user_options.R
## File Purpose: Specify inputs for run_file.R
## Author: Jeanette Birnbaum
## Date: 10/14/2014
## Edited on:
## Additional Comments:
#############################################################################
############################################################
# Establish model version if this file is not being called
# by a wrapper
############################################################
# TODO
if (!'using_wrapper'%in%ls()) {
warning('Empyting the workspace')
rm(list=ls())
model_version <- 'tza_3_chemo1cages30-49'
setwd('~')
if (grepl('jbirnbau', getwd())) rootdir <- getwd()
if (grepl('jeanette', getwd())) rootdir <- file.path(getwd(), 'Documents', 'jbirnbau')
base_path <- file.path(rootdir, 'screentreatGlobal/examples')
}
############################################################
# Simulation features
############################################################
country = 'tza'
nsim = 100
times = c(5,10)
pop_size = 100000
study_year = 2013 # Approx year of incidence/life table data
inc_source = 'globocan'
standard_pop = TRUE
############################################################
# Input data files
############################################################
treat_file = file.path(base_path, model_version, 'input', 'input.csv')
incidence_file = file.path(rootdir, 'screentreatGlobal/data',
paste0(country, '_incidence.csv'))
library_file = file.path(rootdir, 'screentreatGlobal/code/screentreat_library.R')
life_table_file = file.path(rootdir, 'screentreatGlobal/data',
paste0(country, '_lifetable.csv'))
age_file = file.path(rootdir, 'screentreatGlobal/data',
paste0(country, '_age.csv'))
if (standard_pop) age_file = file.path(rootdir, 'screentreatGlobal/data',
'std_age.csv')
############################################################
# Population features
############################################################
# 8/19/16 note: using a function format_age to get single-year
# ages from 5-yr age groups
if ('age_file'%in%ls()) {
source(library_file)
ages <- format_age(age_file, minAge=30, maxAge=49)
pop_chars =
list(age=ages,
male=data.frame(male=c(0), prop=c(1)))
} else {
pop_chars =
list(age=data.frame(age=c(40), prop=c(1)),
male=data.frame(male=c(0), prop=c(1)))
}
# Is age in the data age at clinical incidence?
# If not, provide incidence table
age_is_ageclin = FALSE
if (!age_is_ageclin) {
inc_table = read.csv(incidence_file, header=FALSE,
stringsAsFactors=FALSE)
}
# Denominator for reporting results
denom <- 100000
############################################################
# Screening, treatment and cancer mortality
############################################################
# Stage shift
HR_advanced = 0.35/0.85
# Within stage effects
instage_screen_benefit_early = 1
instage_screen_benefit_advanced = 1
# Add lead time? Default is undefined or FALSE
# If true, add mean lead time in years
lead_time = FALSE
if (lead_time) lt_mean = (40/12)
# Treatment HRs and distributions by subgroup-stage
treat_chars = read.csv(treat_file, header=TRUE,
stringsAsFactors=FALSE)
# Survival distribuion: exponential or weibull?
surv_distr = 'exponential'
# Baseline mortality rates and population proportions by
# subgroup-stages. Subgroup stages specified here must
# match those given in the scrtrt_file
control_notreat = data.frame(stage=c(rep('Early',2),
rep('Advanced',2)),
subgroup=rep(c('ER+',
'ER-'),2),
mortrate=c(rep(.0446,2),rep(0.21, 2)),
prop=c(0.045, 0.105, 0.255, 0.595)
# Early ER+, Early ER-, Adv ER+, Adv ER-
# Expected ER+/- is 30%/70%
# Rough from literature: early/adv is 15%/85%
# if missings are missing at random (MAR)
)
############################################################
# Other-cause mortality
############################################################
ocd_HR = 1
############################################################
# Run model
############################################################
file.copy(file.path(rootdir, '/screentreatGlobal/code/run_file.R'),
file.path(base_path, model_version, 'input', 'run_file.R'),
overwrite=TRUE)
source(file.path(rootdir, '/screentreatGlobal/code/run_file.R'))
|
/examples/tza_3_chemo1cages30-49/input/user_options.R
|
no_license
|
netterie/screentreatGlobal
|
R
| false
| false
| 4,831
|
r
|
############################################################################
## File Name: user_options.R
## File Purpose: Specify inputs for run_file.R
## Author: Jeanette Birnbaum
## Date: 10/14/2014
## Edited on:
## Additional Comments:
#############################################################################
############################################################
# Establish model version if this file is not being called
# by a wrapper
############################################################
# TODO
if (!'using_wrapper'%in%ls()) {
warning('Empyting the workspace')
rm(list=ls())
model_version <- 'tza_3_chemo1cages30-49'
setwd('~')
if (grepl('jbirnbau', getwd())) rootdir <- getwd()
if (grepl('jeanette', getwd())) rootdir <- file.path(getwd(), 'Documents', 'jbirnbau')
base_path <- file.path(rootdir, 'screentreatGlobal/examples')
}
############################################################
# Simulation features
############################################################
country = 'tza'
nsim = 100
times = c(5,10)
pop_size = 100000
study_year = 2013 # Approx year of incidence/life table data
inc_source = 'globocan'
standard_pop = TRUE
############################################################
# Input data files
############################################################
treat_file = file.path(base_path, model_version, 'input', 'input.csv')
incidence_file = file.path(rootdir, 'screentreatGlobal/data',
paste0(country, '_incidence.csv'))
library_file = file.path(rootdir, 'screentreatGlobal/code/screentreat_library.R')
life_table_file = file.path(rootdir, 'screentreatGlobal/data',
paste0(country, '_lifetable.csv'))
age_file = file.path(rootdir, 'screentreatGlobal/data',
paste0(country, '_age.csv'))
if (standard_pop) age_file = file.path(rootdir, 'screentreatGlobal/data',
'std_age.csv')
############################################################
# Population features
############################################################
# 8/19/16 note: using a function format_age to get single-year
# ages from 5-yr age groups
if ('age_file'%in%ls()) {
source(library_file)
ages <- format_age(age_file, minAge=30, maxAge=49)
pop_chars =
list(age=ages,
male=data.frame(male=c(0), prop=c(1)))
} else {
pop_chars =
list(age=data.frame(age=c(40), prop=c(1)),
male=data.frame(male=c(0), prop=c(1)))
}
# Is age in the data age at clinical incidence?
# If not, provide incidence table
age_is_ageclin = FALSE
if (!age_is_ageclin) {
inc_table = read.csv(incidence_file, header=FALSE,
stringsAsFactors=FALSE)
}
# Denominator for reporting results
denom <- 100000
############################################################
# Screening, treatment and cancer mortality
############################################################
# Stage shift
HR_advanced = 0.35/0.85
# Within stage effects
instage_screen_benefit_early = 1
instage_screen_benefit_advanced = 1
# Add lead time? Default is undefined or FALSE
# If true, add mean lead time in years
lead_time = FALSE
if (lead_time) lt_mean = (40/12)
# Treatment HRs and distributions by subgroup-stage
treat_chars = read.csv(treat_file, header=TRUE,
stringsAsFactors=FALSE)
# Survival distribuion: exponential or weibull?
surv_distr = 'exponential'
# Baseline mortality rates and population proportions by
# subgroup-stages. Subgroup stages specified here must
# match those given in the scrtrt_file
control_notreat = data.frame(stage=c(rep('Early',2),
rep('Advanced',2)),
subgroup=rep(c('ER+',
'ER-'),2),
mortrate=c(rep(.0446,2),rep(0.21, 2)),
prop=c(0.045, 0.105, 0.255, 0.595)
# Early ER+, Early ER-, Adv ER+, Adv ER-
# Expected ER+/- is 30%/70%
# Rough from literature: early/adv is 15%/85%
# if missings are missing at random (MAR)
)
############################################################
# Other-cause mortality
############################################################
ocd_HR = 1
############################################################
# Run model
############################################################
file.copy(file.path(rootdir, '/screentreatGlobal/code/run_file.R'),
file.path(base_path, model_version, 'input', 'run_file.R'),
overwrite=TRUE)
source(file.path(rootdir, '/screentreatGlobal/code/run_file.R'))
|
#' Computes the posterior hazard values for a vector x for the Piecewise Exponential Hazard model (PEH)
#' @param x Vector of times to compute the hazard.
#' @param G1 List of posterior samples from the BayesPiecewiseHazard function.
#' @return Matrix containing the posterior distribution of hazard values h(x)
#'@export
GetALLHazPiece = function(x,G1){
GetHazPEH = function(x,s,lam,J){
y=x
for(m in 1:length(x)){
for(k in 1:(J+1)){
if((x[m]>s[k]) && (x[m]<=s[k+1])){
y[m]=lam[k]
}
}
}
return(y)
}
HAZ = matrix(ncol=length(x),nrow=nrow(G1))
y1=rep(0,length(x))
y=x
for(b in 1:nrow(G1[[1]])){
s=G1[[1]][b,]
lam=(G1[[2]])[b,]
J = G1[[3]][b]
HAZ[b,]=GetHazPEH(x,s,lam,J)
}
return(HAZ)
}
|
/BayesReversePLLH/R/GetALLHazPiece.R
|
no_license
|
akhikolla/ClusterTests
|
R
| false
| false
| 918
|
r
|
#' Computes the posterior hazard values for a vector x for the Piecewise Exponential Hazard model (PEH)
#' @param x Vector of times to compute the hazard.
#' @param G1 List of posterior samples from the BayesPiecewiseHazard function.
#' @return Matrix containing the posterior distribution of hazard values h(x)
#'@export
GetALLHazPiece = function(x,G1){
GetHazPEH = function(x,s,lam,J){
y=x
for(m in 1:length(x)){
for(k in 1:(J+1)){
if((x[m]>s[k]) && (x[m]<=s[k+1])){
y[m]=lam[k]
}
}
}
return(y)
}
HAZ = matrix(ncol=length(x),nrow=nrow(G1))
y1=rep(0,length(x))
y=x
for(b in 1:nrow(G1[[1]])){
s=G1[[1]][b,]
lam=(G1[[2]])[b,]
J = G1[[3]][b]
HAZ[b,]=GetHazPEH(x,s,lam,J)
}
return(HAZ)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{rc_geoms_2018}
\alias{rc_geoms_2018}
\title{Boundaries for Regional Council 2018}
\format{
A data frame with 17 rows and 3 variables:
\describe{
\item{code}{Code of the RC}
\item{name}{Name of the RC}
\item{geom}{multipolygon of the area boundaries, as lat/long in WGS84}
}
}
\source{
\url{https://datafinder.stats.govt.nz/}
}
\usage{
rc_geoms_2018
}
\description{
A dataset of regional council (RC) boundaries at 1 January 2018 as defined by Stats NZ.
}
\keyword{datasets}
|
/man/rc_geoms_2018.Rd
|
permissive
|
harmonic-analytics/db-geonz
|
R
| false
| true
| 580
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{rc_geoms_2018}
\alias{rc_geoms_2018}
\title{Boundaries for Regional Council 2018}
\format{
A data frame with 17 rows and 3 variables:
\describe{
\item{code}{Code of the RC}
\item{name}{Name of the RC}
\item{geom}{multipolygon of the area boundaries, as lat/long in WGS84}
}
}
\source{
\url{https://datafinder.stats.govt.nz/}
}
\usage{
rc_geoms_2018
}
\description{
A dataset of regional council (RC) boundaries at 1 January 2018 as defined by Stats NZ.
}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mysan.R
\name{mysan}
\alias{mysan}
\title{Sanitizer function for xtable that converts scientific notation to latex style}
\usage{
mysan(x)
}
\arguments{
\item{x}{character or numeric vector}
}
\value{
sanitized character
}
\description{
Sanitizer function for xtable that converts scientific notation to latex style
}
\examples{
p<-10^(-seq(1,10))
format.pval(p) # scientific notation
mysan(p) # latex style
library(xtable)
df <- data.frame(numeric=p,scientific=format.pval(p),p.san=mysan(p))
print(xtable(df)) # doesn't work
print(xtable(df), sanitize.text.function=mysan) # looks nice!
}
\author{
Chris Wallace
}
|
/man/mysan.Rd
|
no_license
|
andy3nieto/random-functions
|
R
| false
| true
| 695
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mysan.R
\name{mysan}
\alias{mysan}
\title{Sanitizer function for xtable that converts scientific notation to latex style}
\usage{
mysan(x)
}
\arguments{
\item{x}{character or numeric vector}
}
\value{
sanitized character
}
\description{
Sanitizer function for xtable that converts scientific notation to latex style
}
\examples{
p<-10^(-seq(1,10))
format.pval(p) # scientific notation
mysan(p) # latex style
library(xtable)
df <- data.frame(numeric=p,scientific=format.pval(p),p.san=mysan(p))
print(xtable(df)) # doesn't work
print(xtable(df), sanitize.text.function=mysan) # looks nice!
}
\author{
Chris Wallace
}
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{sju.groupString}
\alias{sju.groupString}
\title{Group near elements of string vectors}
\usage{
sju.groupString(strings, maxdist = 3, method = "lv", strict = FALSE,
trim.whitespace = TRUE, remove.empty = TRUE, showProgressBar = FALSE)
}
\arguments{
\item{strings}{a character vector with string elements}
\item{maxdist}{the maximum distance between two string elements, which is allowed to treat two
elements as similar or equal.}
\item{method}{Method for distance calculation. The default is \code{"lv"}. See \code{stringdist} package for details.}
\item{strict}{if \code{TRUE}, value matching is more strictly. See examples for details.}
\item{trim.whitespace}{if \code{TRUE} (default), leading and trailing white spaces will
be removed from string values.}
\item{remove.empty}{if \code{TRUE} (default), empty string values will be removed from the
character vector \code{strings}.}
\item{showProgressBar}{If \code{TRUE}, the progress bar is displayed when computing the distance matrix.
Default in \code{FALSE}, hence the bar is hidden.}
}
\value{
A character vector where similar string elements (values) are recoded into a new, single value.
}
\description{
This function groups elements of a string vector (character or string variable) according
to the element's distance. The more similar two string elements are, the higher is the
chance to be combined into a group.
}
\examples{
\dontrun{
oldstring <- c("Hello", "Helo", "Hole", "Apple", "Ape", "New", "Old", "System", "Systemic")
newstring <- sju.groupString(oldstring)
sjt.frq(data.frame(oldstring, newstring), removeStringVectors = FALSE, autoGroupStrings = FALSE)
newstring <- sju.groupString(oldstring, strict = TRUE)
sjt.frq(data.frame(oldstring, newstring), removeStringVectors = FALSE, autoGroupStrings = FALSE)}
}
|
/man/sju.groupString.Rd
|
no_license
|
harshagn/devel
|
R
| false
| false
| 1,890
|
rd
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{sju.groupString}
\alias{sju.groupString}
\title{Group near elements of string vectors}
\usage{
sju.groupString(strings, maxdist = 3, method = "lv", strict = FALSE,
trim.whitespace = TRUE, remove.empty = TRUE, showProgressBar = FALSE)
}
\arguments{
\item{strings}{a character vector with string elements}
\item{maxdist}{the maximum distance between two string elements, which is allowed to treat two
elements as similar or equal.}
\item{method}{Method for distance calculation. The default is \code{"lv"}. See \code{stringdist} package for details.}
\item{strict}{if \code{TRUE}, value matching is more strictly. See examples for details.}
\item{trim.whitespace}{if \code{TRUE} (default), leading and trailing white spaces will
be removed from string values.}
\item{remove.empty}{if \code{TRUE} (default), empty string values will be removed from the
character vector \code{strings}.}
\item{showProgressBar}{If \code{TRUE}, the progress bar is displayed when computing the distance matrix.
Default in \code{FALSE}, hence the bar is hidden.}
}
\value{
A character vector where similar string elements (values) are recoded into a new, single value.
}
\description{
This function groups elements of a string vector (character or string variable) according
to the element's distance. The more similar two string elements are, the higher is the
chance to be combined into a group.
}
\examples{
\dontrun{
oldstring <- c("Hello", "Helo", "Hole", "Apple", "Ape", "New", "Old", "System", "Systemic")
newstring <- sju.groupString(oldstring)
sjt.frq(data.frame(oldstring, newstring), removeStringVectors = FALSE, autoGroupStrings = FALSE)
newstring <- sju.groupString(oldstring, strict = TRUE)
sjt.frq(data.frame(oldstring, newstring), removeStringVectors = FALSE, autoGroupStrings = FALSE)}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/extract_tree.R
\name{extract_tree}
\alias{extract_tree}
\title{Extract sub result from the result of the wavelet_screening}
\usage{
extract_tree(res, lev_res, thresh)
}
\arguments{
\item{res}{Output of Wavelet_screening.}
\item{lev_res}{the maximum level of resolution needed, has to be less or equal to the request level of resolution in the Wavelet_screening.}
\item{thresh}{Minimal value of the Bayes Factor to defined a sub-region if missing set as 1.}
}
\value{
A vector corresponding to the subtree for the zoomed analysis.
}
\description{
Function to performed zoomed analysis of the wavelet screening output
}
\examples{
\dontrun{
#using res for the Wavelet_screening exemple
sub_analysis <- function(res, lev_res )
{
sub <- extract_tree(res,lev_res=lev_res)
my_pi <- adaptative_EM_Lambda(sub)
out <- adaptative_Lambda (my_pi, sub)
return(out)
}
sub_analysis(res, 6)
}
}
|
/man/extract_tree.Rd
|
no_license
|
william-denault/WaveletScreening
|
R
| false
| true
| 970
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/extract_tree.R
\name{extract_tree}
\alias{extract_tree}
\title{Extract sub result from the result of the wavelet_screening}
\usage{
extract_tree(res, lev_res, thresh)
}
\arguments{
\item{res}{Output of Wavelet_screening.}
\item{lev_res}{the maximum level of resolution needed, has to be less or equal to the request level of resolution in the Wavelet_screening.}
\item{thresh}{Minimal value of the Bayes Factor to defined a sub-region if missing set as 1.}
}
\value{
A vector corresponding to the subtree for the zoomed analysis.
}
\description{
Function to performed zoomed analysis of the wavelet screening output
}
\examples{
\dontrun{
#using res for the Wavelet_screening exemple
sub_analysis <- function(res, lev_res )
{
sub <- extract_tree(res,lev_res=lev_res)
my_pi <- adaptative_EM_Lambda(sub)
out <- adaptative_Lambda (my_pi, sub)
return(out)
}
sub_analysis(res, 6)
}
}
|
library(MSnbase)
### Name: plotMzDelta-methods
### Title: The delta m/z plot
### Aliases: plotMzDelta-methods plotMzDelta,MSnExp-method
### plotMzDelta,mzRramp-method plotMzDelta
### Keywords: methods
### ** Examples
mzdplot <- plotMzDelta(itraqdata,
subset = 0.5,
reporters = iTRAQ4,
verbose = FALSE, plot = FALSE)
## let's retrieve peptide sequence information
## and get a table of amino acids
peps <- as.character(fData(itraqdata)$PeptideSequence)
aas <- unlist(strsplit(peps,""))
## table of aas
table(aas)
## mzDelta plot
print(mzdplot)
|
/data/genthat_extracted_code/MSnbase/examples/plotMzDelta-methods.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 621
|
r
|
library(MSnbase)
### Name: plotMzDelta-methods
### Title: The delta m/z plot
### Aliases: plotMzDelta-methods plotMzDelta,MSnExp-method
### plotMzDelta,mzRramp-method plotMzDelta
### Keywords: methods
### ** Examples
mzdplot <- plotMzDelta(itraqdata,
subset = 0.5,
reporters = iTRAQ4,
verbose = FALSE, plot = FALSE)
## let's retrieve peptide sequence information
## and get a table of amino acids
peps <- as.character(fData(itraqdata)$PeptideSequence)
aas <- unlist(strsplit(peps,""))
## table of aas
table(aas)
## mzDelta plot
print(mzdplot)
|
#kNN Machine learning algorithm 1
# 100daysofMLcode
# loading packages
library(pacman)
p_load(class, tibble, gmodels, dplyr, gganimate, ggplot2, plotly)
# working directory
setwd("C:/Users/stanley/Desktop/MISCELLANEOUS R/ml projects/Knn/breast cancer")
# loading dataset
wbcd<-read.csv(file.choose(), as.is=T)
View(wbcd)
# a glimpse of data
glimpse(wbcd)
# removing ID column
wbcd<-wbcd[, -1]
View(wbcd)
# changing target feature(diagnosis) to factor
wbcd$diagnosis<-factor(wbcd$diagnosis,
levels=c("B", "M"),
labels=c("Benign", "Malignant"))
# confirming that target feature is a factor
round(prop.table(table(wbcd$diagnosis)*100), digits=1)
# taking closer look at three features
summary(wbcd[c("radius_mean", "area_mean", "smoothness_mean")])
# transforming and normalizing numerical data
#writing a normalization funxtion
norm<-function(x)
{
return((x-min(x))/(max(x)-min(x)))
}
wbcd_n<-wbcd
for (j in 2:31)
{
wbcd_n[ ,j]<-norm(wbcd_n[ ,j])
}
View(wbcd_n)
# testing normalization
summary(wbcd_n[c("radius_mean", "area_mean", "smoothness_mean")])
# buildingbtwo datasets, the training and testing one
div_df <- sample(nrow(wbcd_n), 100, replace = F)
wbcd_train<-wbcd_n[-div_df, -1]
wbcd_test<-wbcd_n[div_df, -1]
# creating factor vectors to store desired
#features(diagnosis) from both test and train
wbcd_train_labels<-wbcd[-div_df, 1]
wbcd_test_labels<-wbcd[div_df, 1]
# maintaining the original form of the data
wbcd_train <- wbcd_n[1:469, -1]
wbcd_test <- wbcd_n[470:569, -1]
wbcd_train_labels <- wbcd_n[1:469, 1]
wbcd_test_labels <- wbcd_n[470:569, 1]
#kNN implementation from class packge
library(class)
# we'll use k=21 since training data has
#469 observations, sqare rooting that...
#try to always use k=odd no
library(gmodels)
## before running knn we remove the diagnosis
## or feature we are interested in
# to ensure no NAs in the data we do
sum(is.na(wbcd_test))
sum(is.na(wbcd_train))
# if 0, no NA values exist
## leave one out validation
ddl <- knn_validation(wbcd_train, wbcd_train_labels, 1:30)
ddk <- nn_kfold(wbcd_train, wbcd_train_labels, fold=117, times=10, k_range=30)
wbcd_test_pred<-knn(train=wbcd_train, test=wbcd_test,
cl=wbcd_train_labels, k=21, prob=T)
as <- table(wbcd_test_pred, wbcd_test_labels)
knn_pred <- nn_of(wbcd_train,wbcd_test,wbcd_train_labels,k=5)
vnn_pred<-vknn_of(wbcd_train,wbcd_test,wbcd_train_labels,k=5)
# assessing accuracy of add train using matrix
table(vnn_pred, wbcd_test_labels)
# testing if add train is valid -------------------------------------------
tr <- wbcd_train
te <- wbcd_test
trl <- wbcd_train_labels
tel <- wbcd_test_labels
pp <- vector()
for (i in 1:nrow(te))
{
pp[i] <- knn(train=tr, test=te[i,],
cl=trl, k=5, prob = T)
print(paste("label: ",pp[i]))
if (pp[i] == 1)
{
tr <- rbind(tr, te[i,])
trl <- c(trl, tel[i])
}
else
{
}
# 2 - malignant
#tr <- ifelse(pp[i]==2, rbind(tr, te[i,]), tr)
#trl <- ifelse(pp[i]==2, c(trl, tel[i]), trl)
#tr <- rbind(tr, te[i,])
#trl <- c(trl, tel[i])
message(paste("Train instances now at: ", nrow(tr)))
}
table(pp, wbcd_test_labels)
# loopin thru k to find best k for add train
acc <- vector()
for (i in 1:25)
{
message(paste("TRAINING AT K: ", i))
temp <- vknn_of(wbcd_train,wbcd_test,wbcd_train_labels,k=i)
acc[i] <- sum(diag(table(temp, wbcd_test_labels)))
rm(temp)
}
plot(acc, type="l", col="blue", lwd=2,
main="Accuracy of K values", xlab="K values", ylab="% Accuracy")
# acc is at k=5
# evaluating model performance
agreement <- function(actual, predicted)
{
for (i in 1:length(actual))
{
if (actual[i] != predicted[i])
{
message(paste("Disagreement at :", i))
}
}
}
vnn_pred <- ifelse(vnn_pred == 1, "Benign", "Malignant")
agreement(actual=wbcd_test_labels, predicted = vnn_pred)
# looking at how much the predicted and actual factor of
#features match up
a<-CrossTable(x = wbcd_test_labels, y = wbcd_test_pred,
prop.chisq=FALSE)
## we are trying the z standardisation over normalisation
wbcd_z<-as.data.frame(scale(wbcd[-1]))
View(wbcd_z)
# confirming that it has standardised
summary(wbcd_z[c("radius_mean", "area_mean", "smoothness_mean")])
## the mean of a z score shld always be 0
## performing knn
wbcd_train<-wbcd_z[1:469, ]
wbcd_test<-wbcd_z[470:569, ]
wbcd_train_labels<-wbcd[1:469, 1]
wbcd_test_labels<-wbcd[470:569, 1]
wbcd_test_pred<-knn(wbcd_train, wbcd_test,
cl=wbcd_train_labels, k=4)
nntest <- nn_of(train = wbcd_train, test = wbcd_test, train_labs = wbcd_train_labels,
distance = "chebyshev", k=4)
table(nntest, wbcd_test_labels)
a<-CrossTable(x=wbcd_test_labels, y=wbcd_test_pred, prop.chisq = F)
a<-analyze_k(wbcd_train, wbcd_test, wbcd_train_labels, wbcd_test_labels, 50)
# database a holds test accuracy
# database dd holds train validation
cmptbl <- as.data.frame(cbind(a$`k values`, a$Percentage_accuracy, dd$accuracy))
colnames(cmptbl) <- c("k", "test_acc", "train_acc")
pp <- ggplot(data=cmptbl)+
geom_line(aes(x=k, y=test_acc), col="maroon", lwd=1)+
geom_line(aes(x=k, y=train_acc), col="blue", lwd=1)+
labs(title="TRAIN vs TEST ACCURACY", x="k values", y="accuracy")
ggplotly(pp)
pp+transition_reveal(k)
## residuals
cmptbl$res <- (cmptbl$test_acc - cmptbl$train_acc)
# plotting the residuals
pp_res <- ggplot(data=cmptbl)+
geom_point(aes(x=k, y=res), col="maroon")+
labs(title="TRAIN || TEST RESIDUALS", x="k values", y="residuals")+
geom_hline(aes(yintercept=0))+
geom_text(aes(x=k, y=res, label=k))+
theme_classic()
ggplotly(pp_res)
# foward selection for feature engineering
qq <- knn_fwd(train_data=wbcd_train,
test_data=wbcd_test,
train_labs = wbcd_train_labels,
test_labs = wbcd_test_labels,
k_neighbours = 21)
a<-analyze_k(wbcd_train[, -c(24)], wbcd_test[, -c(24)], wbcd_train_labels, wbcd_test_labels, 50)
obm<-knn(train=wbcd_train[,-27], test=wbcd_test[,-27],
cl=wbcd_train_labels, k=21, prob=T)
table(obm, wbcd_test_labels)
# Diagnosing k errors in rows instances of train --------------------------
diagnose_k(train=wbcd_train, test=wbcd_test,
train_labs = wbcd_train_labels, test_labs = wbcd_test_labels,
k_value = 5)
wbcd_train_labels[c(428,284,67,438,274)]
wbcd_train_labels[c(208,365,447,50,115)]
wbcd_test_labels[13]
wbcd_test_labels[54]
# omitting train points to see if model is improved
om <- c(428,284,67,438,274,208,365,447,50,115)
wtrl <- wbcd_train_labels[-om]
wtrd <- wbcd_train[-om,]
ompred <- knn(wtrd, wbcd_test,cl=wtrl,k=5)
table(ompred,wbcd_test_labels)
diagnose_k(wtrd,wbcd_test,wtrl,wbcd_test_labels,5)
# conclusion, these two points at 13 and 54 are completely defined
# principal components analysis -------------------------------------------
ptrain <- prcomp(wbcd_train)$x
ptest <- prcomp(wbcd_test)$x
plabs <- knn(train=ptrain, test=ptest,
cl=wbcd_train_labels, k=21, prob=T)
|
/breast cancer/breast cancer original knn algo.R
|
no_license
|
stanleyrazor/Practices-on-Classification-training-Knn
|
R
| false
| false
| 7,326
|
r
|
#kNN Machine learning algorithm 1
# 100daysofMLcode
# loading packages
library(pacman)
p_load(class, tibble, gmodels, dplyr, gganimate, ggplot2, plotly)
# working directory
setwd("C:/Users/stanley/Desktop/MISCELLANEOUS R/ml projects/Knn/breast cancer")
# loading dataset
wbcd<-read.csv(file.choose(), as.is=T)
View(wbcd)
# a glimpse of data
glimpse(wbcd)
# removing ID column
wbcd<-wbcd[, -1]
View(wbcd)
# changing target feature(diagnosis) to factor
wbcd$diagnosis<-factor(wbcd$diagnosis,
levels=c("B", "M"),
labels=c("Benign", "Malignant"))
# confirming that target feature is a factor
round(prop.table(table(wbcd$diagnosis)*100), digits=1)
# taking closer look at three features
summary(wbcd[c("radius_mean", "area_mean", "smoothness_mean")])
# transforming and normalizing numerical data
#writing a normalization funxtion
norm<-function(x)
{
return((x-min(x))/(max(x)-min(x)))
}
wbcd_n<-wbcd
for (j in 2:31)
{
wbcd_n[ ,j]<-norm(wbcd_n[ ,j])
}
View(wbcd_n)
# testing normalization
summary(wbcd_n[c("radius_mean", "area_mean", "smoothness_mean")])
# buildingbtwo datasets, the training and testing one
div_df <- sample(nrow(wbcd_n), 100, replace = F)
wbcd_train<-wbcd_n[-div_df, -1]
wbcd_test<-wbcd_n[div_df, -1]
# creating factor vectors to store desired
#features(diagnosis) from both test and train
wbcd_train_labels<-wbcd[-div_df, 1]
wbcd_test_labels<-wbcd[div_df, 1]
# maintaining the original form of the data
wbcd_train <- wbcd_n[1:469, -1]
wbcd_test <- wbcd_n[470:569, -1]
wbcd_train_labels <- wbcd_n[1:469, 1]
wbcd_test_labels <- wbcd_n[470:569, 1]
#kNN implementation from class packge
library(class)
# we'll use k=21 since training data has
#469 observations, sqare rooting that...
#try to always use k=odd no
library(gmodels)
## before running knn we remove the diagnosis
## or feature we are interested in
# to ensure no NAs in the data we do
sum(is.na(wbcd_test))
sum(is.na(wbcd_train))
# if 0, no NA values exist
## leave one out validation
ddl <- knn_validation(wbcd_train, wbcd_train_labels, 1:30)
ddk <- nn_kfold(wbcd_train, wbcd_train_labels, fold=117, times=10, k_range=30)
wbcd_test_pred<-knn(train=wbcd_train, test=wbcd_test,
cl=wbcd_train_labels, k=21, prob=T)
as <- table(wbcd_test_pred, wbcd_test_labels)
knn_pred <- nn_of(wbcd_train,wbcd_test,wbcd_train_labels,k=5)
vnn_pred<-vknn_of(wbcd_train,wbcd_test,wbcd_train_labels,k=5)
# assessing accuracy of add train using matrix
table(vnn_pred, wbcd_test_labels)
# testing if add train is valid -------------------------------------------
tr <- wbcd_train
te <- wbcd_test
trl <- wbcd_train_labels
tel <- wbcd_test_labels
pp <- vector()
for (i in 1:nrow(te))
{
pp[i] <- knn(train=tr, test=te[i,],
cl=trl, k=5, prob = T)
print(paste("label: ",pp[i]))
if (pp[i] == 1)
{
tr <- rbind(tr, te[i,])
trl <- c(trl, tel[i])
}
else
{
}
# 2 - malignant
#tr <- ifelse(pp[i]==2, rbind(tr, te[i,]), tr)
#trl <- ifelse(pp[i]==2, c(trl, tel[i]), trl)
#tr <- rbind(tr, te[i,])
#trl <- c(trl, tel[i])
message(paste("Train instances now at: ", nrow(tr)))
}
table(pp, wbcd_test_labels)
# loopin thru k to find best k for add train
acc <- vector()
for (i in 1:25)
{
message(paste("TRAINING AT K: ", i))
temp <- vknn_of(wbcd_train,wbcd_test,wbcd_train_labels,k=i)
acc[i] <- sum(diag(table(temp, wbcd_test_labels)))
rm(temp)
}
plot(acc, type="l", col="blue", lwd=2,
main="Accuracy of K values", xlab="K values", ylab="% Accuracy")
# acc is at k=5
# evaluating model performance
agreement <- function(actual, predicted)
{
for (i in 1:length(actual))
{
if (actual[i] != predicted[i])
{
message(paste("Disagreement at :", i))
}
}
}
vnn_pred <- ifelse(vnn_pred == 1, "Benign", "Malignant")
agreement(actual=wbcd_test_labels, predicted = vnn_pred)
# looking at how much the predicted and actual factor of
#features match up
a<-CrossTable(x = wbcd_test_labels, y = wbcd_test_pred,
prop.chisq=FALSE)
## we are trying the z standardisation over normalisation
wbcd_z<-as.data.frame(scale(wbcd[-1]))
View(wbcd_z)
# confirming that it has standardised
summary(wbcd_z[c("radius_mean", "area_mean", "smoothness_mean")])
## the mean of a z score shld always be 0
## performing knn
wbcd_train<-wbcd_z[1:469, ]
wbcd_test<-wbcd_z[470:569, ]
wbcd_train_labels<-wbcd[1:469, 1]
wbcd_test_labels<-wbcd[470:569, 1]
wbcd_test_pred<-knn(wbcd_train, wbcd_test,
cl=wbcd_train_labels, k=4)
nntest <- nn_of(train = wbcd_train, test = wbcd_test, train_labs = wbcd_train_labels,
distance = "chebyshev", k=4)
table(nntest, wbcd_test_labels)
a<-CrossTable(x=wbcd_test_labels, y=wbcd_test_pred, prop.chisq = F)
a<-analyze_k(wbcd_train, wbcd_test, wbcd_train_labels, wbcd_test_labels, 50)
# database a holds test accuracy
# database dd holds train validation
cmptbl <- as.data.frame(cbind(a$`k values`, a$Percentage_accuracy, dd$accuracy))
colnames(cmptbl) <- c("k", "test_acc", "train_acc")
pp <- ggplot(data=cmptbl)+
geom_line(aes(x=k, y=test_acc), col="maroon", lwd=1)+
geom_line(aes(x=k, y=train_acc), col="blue", lwd=1)+
labs(title="TRAIN vs TEST ACCURACY", x="k values", y="accuracy")
ggplotly(pp)
pp+transition_reveal(k)
## residuals
cmptbl$res <- (cmptbl$test_acc - cmptbl$train_acc)
# plotting the residuals
pp_res <- ggplot(data=cmptbl)+
geom_point(aes(x=k, y=res), col="maroon")+
labs(title="TRAIN || TEST RESIDUALS", x="k values", y="residuals")+
geom_hline(aes(yintercept=0))+
geom_text(aes(x=k, y=res, label=k))+
theme_classic()
ggplotly(pp_res)
# foward selection for feature engineering
qq <- knn_fwd(train_data=wbcd_train,
test_data=wbcd_test,
train_labs = wbcd_train_labels,
test_labs = wbcd_test_labels,
k_neighbours = 21)
a<-analyze_k(wbcd_train[, -c(24)], wbcd_test[, -c(24)], wbcd_train_labels, wbcd_test_labels, 50)
obm<-knn(train=wbcd_train[,-27], test=wbcd_test[,-27],
cl=wbcd_train_labels, k=21, prob=T)
table(obm, wbcd_test_labels)
# Diagnosing k errors in rows instances of train --------------------------
diagnose_k(train=wbcd_train, test=wbcd_test,
train_labs = wbcd_train_labels, test_labs = wbcd_test_labels,
k_value = 5)
wbcd_train_labels[c(428,284,67,438,274)]
wbcd_train_labels[c(208,365,447,50,115)]
wbcd_test_labels[13]
wbcd_test_labels[54]
# omitting train points to see if model is improved
om <- c(428,284,67,438,274,208,365,447,50,115)
wtrl <- wbcd_train_labels[-om]
wtrd <- wbcd_train[-om,]
ompred <- knn(wtrd, wbcd_test,cl=wtrl,k=5)
table(ompred,wbcd_test_labels)
diagnose_k(wtrd,wbcd_test,wtrl,wbcd_test_labels,5)
# conclusion, these two points at 13 and 54 are completely defined
# principal components analysis -------------------------------------------
ptrain <- prcomp(wbcd_train)$x
ptest <- prcomp(wbcd_test)$x
plabs <- knn(train=ptrain, test=ptest,
cl=wbcd_train_labels, k=21, prob=T)
|
testlist <- list(obs_to_nodes_temp = structure(c(3.60908643865453e+224, 1.11742128101669e+83, 1.97870122171185e+143, NaN, 9.37480792170664e+280, 3.07504141416437e-136, 1.01645313737868e-124, 1.00679150745081e-73, 1.27047634996911e-62, 2.13409614918679e-145, 2.01768927817267e+47, NaN, 4.713762584588e-48, 1.7506159598139e-260, 4.13272931804741e+261, 7.50347923086546e-71, NaN, 2.90402077461892e-286, 3.56215199780015e-230, 1.43677827629537e-133, 7.46114516421967e-258, 5.0141334820982e+184, NaN, 29072.6257709098, 1.41003229091807e-86, 1.93643921836204e+180, 1.99326012973524e-62, 1.39815628510302e-16), .Dim = c(4L, 7L)), tree_term_nodes = c(-1.48913015309838e+39, 2.63725453740933e+261, 4.51945682641686e+163, -7.62766352772688e-53, 2.09617024723214e+257, -4.68831427091371e-154, 1.11903477574716e+232, 3.26286409658457e-84, 3.96692931002303e-30, 1.30945164738812e+193, 2.81167291417672e+284, 7.81344866205955e-69, 8.06437782716787e-68, -2.43432862423038e-282, -8.66082579829582e-93, -4.44116795166925e-236, 8.79838796864221e-85, 1.18757307056025e+27, -1.93958723679818e+293, -1.4797859307384e+74, 1.0829478318779e-20, 3.18894353286941e-291, 1.89411227126564e+44, -3.25228381230693e-308, 4.16222053993285e+189, -3.47187673951786e+104, -9.61683738769097e+161, 9.71804279585044e-239, -Inf, 2.17299108064979e+107, 2.26895126666313e+150, -8.40097381850818e+307, 1.13760189546125e+200, -2.04311773710004e-81, -5.96231150102709e+47, -3.45018564135406e-87, 1.87350069967456e-225, 1.78742032507512e+27, 1.5457560660078e-223, 3.9114491893228e-103, 4.75074045149166e+44, 2.03194847659285e-141, -2.91324326516138e+212, 1.20155833324313e+91, -2.30373608285758e-256, 7.8916857411679e-125, -1.33791239970731e+107, -1.0992233037189e-83, -8.48368816965793e-131, -3.27398714521823e+63, -3.38410933449517e-93, -4.03376812105127e+85, NaN, -6.783456706827e+233, NA, 6.88318444663651e+174, -2.75811974752553e-154, 1.66811523321678e-190, -1225456.09007819, -6.72853033935425e+227, -1.29886619450838e-120, 3.20339905305954e+244, -1.21482693215067e+115, 2.43581014863413e-09, -1.64267229240635e+77, 1.69376167176247e-180, -2.96113780540984e+227, 8.68343785112204e-195, 1.0935372876868e-207, 8.82812941134646e-118, 1.0935372876868e-207, 1.35097132339656e+180, 0))
result <- do.call(bartBMA:::J,testlist)
str(result)
|
/issuestests/bartBMA/inst/testfiles/J/J_output/log_d88529322dcf14546d0075ec66599396b1bdbffd/J-test.R
|
no_license
|
akhikolla/RcppDeepStateTest
|
R
| false
| false
| 2,323
|
r
|
testlist <- list(obs_to_nodes_temp = structure(c(3.60908643865453e+224, 1.11742128101669e+83, 1.97870122171185e+143, NaN, 9.37480792170664e+280, 3.07504141416437e-136, 1.01645313737868e-124, 1.00679150745081e-73, 1.27047634996911e-62, 2.13409614918679e-145, 2.01768927817267e+47, NaN, 4.713762584588e-48, 1.7506159598139e-260, 4.13272931804741e+261, 7.50347923086546e-71, NaN, 2.90402077461892e-286, 3.56215199780015e-230, 1.43677827629537e-133, 7.46114516421967e-258, 5.0141334820982e+184, NaN, 29072.6257709098, 1.41003229091807e-86, 1.93643921836204e+180, 1.99326012973524e-62, 1.39815628510302e-16), .Dim = c(4L, 7L)), tree_term_nodes = c(-1.48913015309838e+39, 2.63725453740933e+261, 4.51945682641686e+163, -7.62766352772688e-53, 2.09617024723214e+257, -4.68831427091371e-154, 1.11903477574716e+232, 3.26286409658457e-84, 3.96692931002303e-30, 1.30945164738812e+193, 2.81167291417672e+284, 7.81344866205955e-69, 8.06437782716787e-68, -2.43432862423038e-282, -8.66082579829582e-93, -4.44116795166925e-236, 8.79838796864221e-85, 1.18757307056025e+27, -1.93958723679818e+293, -1.4797859307384e+74, 1.0829478318779e-20, 3.18894353286941e-291, 1.89411227126564e+44, -3.25228381230693e-308, 4.16222053993285e+189, -3.47187673951786e+104, -9.61683738769097e+161, 9.71804279585044e-239, -Inf, 2.17299108064979e+107, 2.26895126666313e+150, -8.40097381850818e+307, 1.13760189546125e+200, -2.04311773710004e-81, -5.96231150102709e+47, -3.45018564135406e-87, 1.87350069967456e-225, 1.78742032507512e+27, 1.5457560660078e-223, 3.9114491893228e-103, 4.75074045149166e+44, 2.03194847659285e-141, -2.91324326516138e+212, 1.20155833324313e+91, -2.30373608285758e-256, 7.8916857411679e-125, -1.33791239970731e+107, -1.0992233037189e-83, -8.48368816965793e-131, -3.27398714521823e+63, -3.38410933449517e-93, -4.03376812105127e+85, NaN, -6.783456706827e+233, NA, 6.88318444663651e+174, -2.75811974752553e-154, 1.66811523321678e-190, -1225456.09007819, -6.72853033935425e+227, -1.29886619450838e-120, 3.20339905305954e+244, -1.21482693215067e+115, 2.43581014863413e-09, -1.64267229240635e+77, 1.69376167176247e-180, -2.96113780540984e+227, 8.68343785112204e-195, 1.0935372876868e-207, 8.82812941134646e-118, 1.0935372876868e-207, 1.35097132339656e+180, 0))
result <- do.call(bartBMA:::J,testlist)
str(result)
|
gs <- read.csv("ncis_bystate_bymonth_bytype.csv")
saveRDS(gs, file="/tmp/NYTimesGunSalesData.rds")
newgs <- readRDS("/tmp/NYTimesGunSalesData.rds")
identical(newgs, gs)
object.size(gs)
file.info("/tmp/NYTimesGunSalesData.rds")$size
### old URL has changed
##webgs <- read.csv(paste0("https://raw.githubusercontent.com/NYTimes/"
## "gun-sales/master/data/ncis_bystate_bymonth_bytype.csv"))
### new URL
##webgs <- read.csv(paste0("https://raw.githubusercontent.com/NYTimes/",
## "gun-sales/master/inst/rawdata/ncis_bystate_bymonth_bytype.csv"))
### or our repo
webgs <- read.csv(paste0("https://raw.githubusercontent.com/eddelbuettel/",
"samples-intermediate-r/master/lesson02/",
"ncis_bystate_bymonth_bytype.csv"))
identical(gs, webgs)
|
/lesson02/data.R
|
no_license
|
anhnguyendepocen/samples-intermediate-r
|
R
| false
| false
| 842
|
r
|
gs <- read.csv("ncis_bystate_bymonth_bytype.csv")
saveRDS(gs, file="/tmp/NYTimesGunSalesData.rds")
newgs <- readRDS("/tmp/NYTimesGunSalesData.rds")
identical(newgs, gs)
object.size(gs)
file.info("/tmp/NYTimesGunSalesData.rds")$size
### old URL has changed
##webgs <- read.csv(paste0("https://raw.githubusercontent.com/NYTimes/"
## "gun-sales/master/data/ncis_bystate_bymonth_bytype.csv"))
### new URL
##webgs <- read.csv(paste0("https://raw.githubusercontent.com/NYTimes/",
## "gun-sales/master/inst/rawdata/ncis_bystate_bymonth_bytype.csv"))
### or our repo
webgs <- read.csv(paste0("https://raw.githubusercontent.com/eddelbuettel/",
"samples-intermediate-r/master/lesson02/",
"ncis_bystate_bymonth_bytype.csv"))
identical(gs, webgs)
|
# Example based on Chris Paciorek's tutorial:
# https://github.com/berkeley-scf/tutorial-parallel-basics/blob/master/parallel-basics.html
#
# Note: there is a conflict between openBLAS, the multi-threaded linear
# algebra package, and foreach. It can cause linear algebra operations
# within a foreach loop to hang
# If your system uses openBLAS (note that the SCF computers do),
# then before running R, execute the command:
#
# export OMP_NUM_THREADS=1
#
# This command sets an environment variable that tells BLAS to only
# use a single thread.
###################################################
########## identify number of cores ###############
###################################################
###################################################
################### FOREACH #######################
###################################################
library(foreach)
library(doParallel)
# load in data and looFit() function
source("rf.R")
# set the number of cores to use manually
nCores <- 5
registerDoParallel(nCores)
# do only first 30 for illustration
nSub <- 30
result <- foreach(i = 1:nSub) %dopar% {
cat('Starting ', i, 'th job.\n', sep = '')
output <- looFit(i, Y, X)
cat('Finishing ', i, 'th job.\n', sep = '')
output # this will become part of the out object
}
result_df <- data.frame(results = unlist(result))
# remember to save the results of your analysis if
# you're running it using a shell script!
write.csv(result_df, "results_scf.csv")
|
/week7/scf_example/parallel_scf.R
|
no_license
|
jpdunc23/stat-215a-fall-2020
|
R
| false
| false
| 1,488
|
r
|
# Example based on Chris Paciorek's tutorial:
# https://github.com/berkeley-scf/tutorial-parallel-basics/blob/master/parallel-basics.html
#
# Note: there is a conflict between openBLAS, the multi-threaded linear
# algebra package, and foreach. It can cause linear algebra operations
# within a foreach loop to hang
# If your system uses openBLAS (note that the SCF computers do),
# then before running R, execute the command:
#
# export OMP_NUM_THREADS=1
#
# This command sets an environment variable that tells BLAS to only
# use a single thread.
###################################################
########## identify number of cores ###############
###################################################
###################################################
################### FOREACH #######################
###################################################
library(foreach)
library(doParallel)
# load in data and looFit() function
source("rf.R")
# set the number of cores to use manually
nCores <- 5
registerDoParallel(nCores)
# do only first 30 for illustration
nSub <- 30
result <- foreach(i = 1:nSub) %dopar% {
cat('Starting ', i, 'th job.\n', sep = '')
output <- looFit(i, Y, X)
cat('Finishing ', i, 'th job.\n', sep = '')
output # this will become part of the out object
}
result_df <- data.frame(results = unlist(result))
# remember to save the results of your analysis if
# you're running it using a shell script!
write.csv(result_df, "results_scf.csv")
|
## Cahing the Inverse of a Matrix
## The function makeCacheMatrix creates a special matrix object that caches its inverse.
makeCacheMatrix <- function(x = matrix()) {
invcache <- NULL
set <- function(y) {
x <<- y
invcache <<- NULL
}
get <- function() x
setinv <- function(inverse) invcache <<- inverse
getinv <- function() invcache
list(set = set,
get = get,
setinv = setinv,
getinv = getinv)
}
## The function cacheSolve computes the inverse of the special matrix returned by makeCacheMatrix.
## If the inverse has already been calculated (and the matrix has not changed),
## then the cachesolve retrieves the inverse of the cache.
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
invcache <- x$getinv()
if (!is.null(invcache)) {
message("getting cached data")
return(invcache)
}
data <- x$get()
invcache <- solve(data, ...)
x$setinv(invcache)
invcache
}
#Test
mat <- matrix(c(1,2,3,4),2,2)
matinv <- makeCacheMatrix(mat)
cacheSolve(matinv)
|
/cachematrix.R
|
no_license
|
Laurels1/ProgrammingAssignment2
|
R
| false
| false
| 1,104
|
r
|
## Cahing the Inverse of a Matrix
## The function makeCacheMatrix creates a special matrix object that caches its inverse.
makeCacheMatrix <- function(x = matrix()) {
invcache <- NULL
set <- function(y) {
x <<- y
invcache <<- NULL
}
get <- function() x
setinv <- function(inverse) invcache <<- inverse
getinv <- function() invcache
list(set = set,
get = get,
setinv = setinv,
getinv = getinv)
}
## The function cacheSolve computes the inverse of the special matrix returned by makeCacheMatrix.
## If the inverse has already been calculated (and the matrix has not changed),
## then the cachesolve retrieves the inverse of the cache.
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
invcache <- x$getinv()
if (!is.null(invcache)) {
message("getting cached data")
return(invcache)
}
data <- x$get()
invcache <- solve(data, ...)
x$setinv(invcache)
invcache
}
#Test
mat <- matrix(c(1,2,3,4),2,2)
matinv <- makeCacheMatrix(mat)
cacheSolve(matinv)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/teamBowlersVsBatsmenMatch.R
\name{teamBowlersVsBatsmenMatch}
\alias{teamBowlersVsBatsmenMatch}
\title{Team bowlers vs batsmen in a match}
\usage{
teamBowlersVsBatsmenMatch(match,theTeam,opposition, plot=1)
}
\arguments{
\item{match}{The data frame of the match. This can be obtained with the call for e.g
a <- getMatchDetails("England","Pakistan","2006-09-05",dir="../temp")}
\item{theTeam}{The team against which the performance is required}
\item{opposition}{The opposition team}
\item{plot}{plot=1 (static),plot=2(interactive),plot=3(table)}
}
\value{
None or dataframe
If plot=TRUE there is no return. If plot=TRUE then the dataframe is returned
}
\description{
This function computes performance of bowlers of a team against an opposition in a match
}
\note{
Maintainer: Tinniam V Ganesh \email{tvganesh.85@gmail.com}
}
\examples{
\dontrun{
# Get the match between England and Pakistan
a <- getMatchDetails("England","Pakistan","2006-09-05",dir="../temp")
teamBowlersVsBatsmenMatch(a,"Pakistan","England")
teamBowlersVsBatsmenMatch(a,"England","Pakistan")
m <- teamBowlersVsBatsmenMatch(a,"Pakistan","England")
}
}
\references{
\url{https://cricsheet.org/}\cr
\url{https://gigadom.in/}\cr
\url{https://github.com/tvganesh/yorkrData/}
}
\seealso{
\code{\link{teamBatsmenPartnershipAllOppnAllMatches}}\cr
\code{\link{teamBatsmenPartnershipAllOppnAllMatchesPlot}}\cr
\code{\link{teamBatsmenPartnershipOppnAllMatchesChart}}\cr
\code{\link{teamBowlersVsBatsmenAllOppnAllMatchesRept}}\cr
\code{\link{teamBowlersVsBatsmenAllOppnAllMatchesPlot}}\cr
}
\author{
Tinniam V Ganesh
}
|
/man/teamBowlersVsBatsmenMatch.Rd
|
no_license
|
tvganesh/yorkr
|
R
| false
| true
| 1,659
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/teamBowlersVsBatsmenMatch.R
\name{teamBowlersVsBatsmenMatch}
\alias{teamBowlersVsBatsmenMatch}
\title{Team bowlers vs batsmen in a match}
\usage{
teamBowlersVsBatsmenMatch(match,theTeam,opposition, plot=1)
}
\arguments{
\item{match}{The data frame of the match. This can be obtained with the call for e.g
a <- getMatchDetails("England","Pakistan","2006-09-05",dir="../temp")}
\item{theTeam}{The team against which the performance is required}
\item{opposition}{The opposition team}
\item{plot}{plot=1 (static),plot=2(interactive),plot=3(table)}
}
\value{
None or dataframe
If plot=TRUE there is no return. If plot=TRUE then the dataframe is returned
}
\description{
This function computes performance of bowlers of a team against an opposition in a match
}
\note{
Maintainer: Tinniam V Ganesh \email{tvganesh.85@gmail.com}
}
\examples{
\dontrun{
# Get the match between England and Pakistan
a <- getMatchDetails("England","Pakistan","2006-09-05",dir="../temp")
teamBowlersVsBatsmenMatch(a,"Pakistan","England")
teamBowlersVsBatsmenMatch(a,"England","Pakistan")
m <- teamBowlersVsBatsmenMatch(a,"Pakistan","England")
}
}
\references{
\url{https://cricsheet.org/}\cr
\url{https://gigadom.in/}\cr
\url{https://github.com/tvganesh/yorkrData/}
}
\seealso{
\code{\link{teamBatsmenPartnershipAllOppnAllMatches}}\cr
\code{\link{teamBatsmenPartnershipAllOppnAllMatchesPlot}}\cr
\code{\link{teamBatsmenPartnershipOppnAllMatchesChart}}\cr
\code{\link{teamBowlersVsBatsmenAllOppnAllMatchesRept}}\cr
\code{\link{teamBowlersVsBatsmenAllOppnAllMatchesPlot}}\cr
}
\author{
Tinniam V Ganesh
}
|
testlist <- list(latLongs = structure(c(NaN, Inf, -Inf), .Dim = c(3L, 1L)), r = 2.2519576149808e-310)
result <- do.call(MGDrivE::calcCos,testlist)
str(result)
|
/MGDrivE/inst/testfiles/calcCos/libFuzzer_calcCos/calcCos_valgrind_files/1612727334-test.R
|
no_license
|
akhikolla/updatedatatype-list2
|
R
| false
| false
| 163
|
r
|
testlist <- list(latLongs = structure(c(NaN, Inf, -Inf), .Dim = c(3L, 1L)), r = 2.2519576149808e-310)
result <- do.call(MGDrivE::calcCos,testlist)
str(result)
|
# Rattle is Copyright (c) 2006-2014 Togaware Pty Ltd.
#============================================================
# Rattle timestamp: 2014-12-06 21:02:42 x86_64-pc-linux-gnu
# Rattle version 3.3.1 user 'kanashiro'
# Export this log textview to a file using the Export button or the Tools
# menu to save a log of all activity. This facilitates repeatability. Exporting
# to file 'myrf01.R', for example, allows us to the type in the R Console
# the command source('myrf01.R') to repeat the process automatically.
# Generally, we may want to edit the file to suit our needs. We can also directly
# edit this current log textview to record additional information before exporting.
# Saving and loading projects also retains this log.
library(rattle)
# This log generally records the process of building a model. However, with very
# little effort the log can be used to score a new dataset. The logical variable
# 'building' is used to toggle between generating transformations, as when building
# a model, and simply using the transformations, as when scoring a dataset.
building <- TRUE
scoring <- ! building
# The colorspace package is used to generate the colours used in plots, if available.
library(colorspace)
# A pre-defined value is used to reset the random seed so that results are repeatable.
crv$seed <- 42
#============================================================
# Rattle timestamp: 2014-12-06 21:02:46 x86_64-pc-linux-gnu
# Load the data.
crs$dataset <- read.csv("file:///home/kanashiro/Documents/UnB/ML/ML_metrics/data/C++/all-data-C++.csv", na.strings=c(".", "NA", "", "?"), strip.white=TRUE, encoding="UTF-8")
#============================================================
# Rattle timestamp: 2014-12-06 21:02:47 x86_64-pc-linux-gnu
# Note the user selections.
# Build the training/validate/test datasets.
set.seed(crv$seed)
crs$nobs <- nrow(crs$dataset) # 38015 observations
crs$sample <- crs$train <- sample(nrow(crs$dataset), 0.7*crs$nobs) # 26610 observations
crs$validate <- sample(setdiff(seq_len(nrow(crs$dataset)), crs$train), 0.15*crs$nobs) # 5702 observations
crs$test <- setdiff(setdiff(seq_len(nrow(crs$dataset)), crs$train), crs$validate) # 5703 observations
# The following variable selections have been noted.
crs$input <- c("acc", "accm", "amloc", "anpm",
"cbo", "dit", "lcom4", "loc",
"noa", "noc", "nom", "npa",
"npm", "rfc", "sc")
crs$numeric <- c("acc", "accm", "amloc", "anpm",
"cbo", "dit", "lcom4", "loc",
"noa", "noc", "nom", "npa",
"npm", "rfc", "sc")
crs$categoric <- NULL
crs$target <- "good_design"
crs$risk <- NULL
crs$ident <- NULL
crs$ignore <- NULL
crs$weights <- NULL
#============================================================
# Rattle timestamp: 2014-12-06 21:02:53 x86_64-pc-linux-gnu
# Transform variables by rescaling.
# Rescale acc.
crs$dataset[["R10_acc"]] <- crs$dataset[["acc"]]
# Take a log10 transform of the variable - treat -Inf as NA.
if (building)
{
crs$dataset[["R10_acc"]] <- log10(crs$dataset[["acc"]])
crs$dataset[crs$dataset[["R10_acc"]] == -Inf & ! is.na(crs$dataset[["R10_acc"]]), "R10_acc"] <- NA
}
# When scoring transform using the training data parameters.
if (scoring)
{
crs$dataset[["R10_acc"]] <- log10(crs$dataset[["acc"]])
crs$dataset[crs$dataset[["R10_acc"]] == -Inf & ! is.na(crs$dataset[["R10_acc"]]), "R10_acc"] <- NA
}
# Rescale accm.
crs$dataset[["R10_accm"]] <- crs$dataset[["accm"]]
# Take a log10 transform of the variable - treat -Inf as NA.
if (building)
{
crs$dataset[["R10_accm"]] <- log10(crs$dataset[["accm"]])
crs$dataset[crs$dataset[["R10_accm"]] == -Inf & ! is.na(crs$dataset[["R10_accm"]]), "R10_accm"] <- NA
}
# When scoring transform using the training data parameters.
if (scoring)
{
crs$dataset[["R10_accm"]] <- log10(crs$dataset[["accm"]])
crs$dataset[crs$dataset[["R10_accm"]] == -Inf & ! is.na(crs$dataset[["R10_accm"]]), "R10_accm"] <- NA
}
# Rescale amloc.
crs$dataset[["R10_amloc"]] <- crs$dataset[["amloc"]]
# Take a log10 transform of the variable - treat -Inf as NA.
if (building)
{
crs$dataset[["R10_amloc"]] <- log10(crs$dataset[["amloc"]])
crs$dataset[crs$dataset[["R10_amloc"]] == -Inf & ! is.na(crs$dataset[["R10_amloc"]]), "R10_amloc"] <- NA
}
# When scoring transform using the training data parameters.
if (scoring)
{
crs$dataset[["R10_amloc"]] <- log10(crs$dataset[["amloc"]])
crs$dataset[crs$dataset[["R10_amloc"]] == -Inf & ! is.na(crs$dataset[["R10_amloc"]]), "R10_amloc"] <- NA
}
# Rescale anpm.
crs$dataset[["R10_anpm"]] <- crs$dataset[["anpm"]]
# Take a log10 transform of the variable - treat -Inf as NA.
if (building)
{
crs$dataset[["R10_anpm"]] <- log10(crs$dataset[["anpm"]])
crs$dataset[crs$dataset[["R10_anpm"]] == -Inf & ! is.na(crs$dataset[["R10_anpm"]]), "R10_anpm"] <- NA
}
# When scoring transform using the training data parameters.
if (scoring)
{
crs$dataset[["R10_anpm"]] <- log10(crs$dataset[["anpm"]])
crs$dataset[crs$dataset[["R10_anpm"]] == -Inf & ! is.na(crs$dataset[["R10_anpm"]]), "R10_anpm"] <- NA
}
# Rescale cbo.
crs$dataset[["R10_cbo"]] <- crs$dataset[["cbo"]]
# Take a log10 transform of the variable - treat -Inf as NA.
if (building)
{
crs$dataset[["R10_cbo"]] <- log10(crs$dataset[["cbo"]])
crs$dataset[crs$dataset[["R10_cbo"]] == -Inf & ! is.na(crs$dataset[["R10_cbo"]]), "R10_cbo"] <- NA
}
# When scoring transform using the training data parameters.
if (scoring)
{
crs$dataset[["R10_cbo"]] <- log10(crs$dataset[["cbo"]])
crs$dataset[crs$dataset[["R10_cbo"]] == -Inf & ! is.na(crs$dataset[["R10_cbo"]]), "R10_cbo"] <- NA
}
# Rescale dit.
crs$dataset[["R10_dit"]] <- crs$dataset[["dit"]]
# Take a log10 transform of the variable - treat -Inf as NA.
if (building)
{
crs$dataset[["R10_dit"]] <- log10(crs$dataset[["dit"]])
crs$dataset[crs$dataset[["R10_dit"]] == -Inf & ! is.na(crs$dataset[["R10_dit"]]), "R10_dit"] <- NA
}
# When scoring transform using the training data parameters.
if (scoring)
{
crs$dataset[["R10_dit"]] <- log10(crs$dataset[["dit"]])
crs$dataset[crs$dataset[["R10_dit"]] == -Inf & ! is.na(crs$dataset[["R10_dit"]]), "R10_dit"] <- NA
}
# Rescale lcom4.
crs$dataset[["R10_lcom4"]] <- crs$dataset[["lcom4"]]
# Take a log10 transform of the variable - treat -Inf as NA.
if (building)
{
crs$dataset[["R10_lcom4"]] <- log10(crs$dataset[["lcom4"]])
crs$dataset[crs$dataset[["R10_lcom4"]] == -Inf & ! is.na(crs$dataset[["R10_lcom4"]]), "R10_lcom4"] <- NA
}
# When scoring transform using the training data parameters.
if (scoring)
{
crs$dataset[["R10_lcom4"]] <- log10(crs$dataset[["lcom4"]])
crs$dataset[crs$dataset[["R10_lcom4"]] == -Inf & ! is.na(crs$dataset[["R10_lcom4"]]), "R10_lcom4"] <- NA
}
# Rescale loc.
crs$dataset[["R10_loc"]] <- crs$dataset[["loc"]]
# Take a log10 transform of the variable - treat -Inf as NA.
if (building)
{
crs$dataset[["R10_loc"]] <- log10(crs$dataset[["loc"]])
crs$dataset[crs$dataset[["R10_loc"]] == -Inf & ! is.na(crs$dataset[["R10_loc"]]), "R10_loc"] <- NA
}
# When scoring transform using the training data parameters.
if (scoring)
{
crs$dataset[["R10_loc"]] <- log10(crs$dataset[["loc"]])
crs$dataset[crs$dataset[["R10_loc"]] == -Inf & ! is.na(crs$dataset[["R10_loc"]]), "R10_loc"] <- NA
}
# Rescale noa.
crs$dataset[["R10_noa"]] <- crs$dataset[["noa"]]
# Take a log10 transform of the variable - treat -Inf as NA.
if (building)
{
crs$dataset[["R10_noa"]] <- log10(crs$dataset[["noa"]])
crs$dataset[crs$dataset[["R10_noa"]] == -Inf & ! is.na(crs$dataset[["R10_noa"]]), "R10_noa"] <- NA
}
# When scoring transform using the training data parameters.
if (scoring)
{
crs$dataset[["R10_noa"]] <- log10(crs$dataset[["noa"]])
crs$dataset[crs$dataset[["R10_noa"]] == -Inf & ! is.na(crs$dataset[["R10_noa"]]), "R10_noa"] <- NA
}
# Rescale noc.
crs$dataset[["R10_noc"]] <- crs$dataset[["noc"]]
# Take a log10 transform of the variable - treat -Inf as NA.
if (building)
{
crs$dataset[["R10_noc"]] <- log10(crs$dataset[["noc"]])
crs$dataset[crs$dataset[["R10_noc"]] == -Inf & ! is.na(crs$dataset[["R10_noc"]]), "R10_noc"] <- NA
}
# When scoring transform using the training data parameters.
if (scoring)
{
crs$dataset[["R10_noc"]] <- log10(crs$dataset[["noc"]])
crs$dataset[crs$dataset[["R10_noc"]] == -Inf & ! is.na(crs$dataset[["R10_noc"]]), "R10_noc"] <- NA
}
# Rescale nom.
crs$dataset[["R10_nom"]] <- crs$dataset[["nom"]]
# Take a log10 transform of the variable - treat -Inf as NA.
if (building)
{
crs$dataset[["R10_nom"]] <- log10(crs$dataset[["nom"]])
crs$dataset[crs$dataset[["R10_nom"]] == -Inf & ! is.na(crs$dataset[["R10_nom"]]), "R10_nom"] <- NA
}
# When scoring transform using the training data parameters.
if (scoring)
{
crs$dataset[["R10_nom"]] <- log10(crs$dataset[["nom"]])
crs$dataset[crs$dataset[["R10_nom"]] == -Inf & ! is.na(crs$dataset[["R10_nom"]]), "R10_nom"] <- NA
}
# Rescale npa.
crs$dataset[["R10_npa"]] <- crs$dataset[["npa"]]
# Take a log10 transform of the variable - treat -Inf as NA.
if (building)
{
crs$dataset[["R10_npa"]] <- log10(crs$dataset[["npa"]])
crs$dataset[crs$dataset[["R10_npa"]] == -Inf & ! is.na(crs$dataset[["R10_npa"]]), "R10_npa"] <- NA
}
# When scoring transform using the training data parameters.
if (scoring)
{
crs$dataset[["R10_npa"]] <- log10(crs$dataset[["npa"]])
crs$dataset[crs$dataset[["R10_npa"]] == -Inf & ! is.na(crs$dataset[["R10_npa"]]), "R10_npa"] <- NA
}
# Rescale npm.
crs$dataset[["R10_npm"]] <- crs$dataset[["npm"]]
# Take a log10 transform of the variable - treat -Inf as NA.
if (building)
{
crs$dataset[["R10_npm"]] <- log10(crs$dataset[["npm"]])
crs$dataset[crs$dataset[["R10_npm"]] == -Inf & ! is.na(crs$dataset[["R10_npm"]]), "R10_npm"] <- NA
}
# When scoring transform using the training data parameters.
if (scoring)
{
crs$dataset[["R10_npm"]] <- log10(crs$dataset[["npm"]])
crs$dataset[crs$dataset[["R10_npm"]] == -Inf & ! is.na(crs$dataset[["R10_npm"]]), "R10_npm"] <- NA
}
# Rescale rfc.
crs$dataset[["R10_rfc"]] <- crs$dataset[["rfc"]]
# Take a log10 transform of the variable - treat -Inf as NA.
if (building)
{
crs$dataset[["R10_rfc"]] <- log10(crs$dataset[["rfc"]])
crs$dataset[crs$dataset[["R10_rfc"]] == -Inf & ! is.na(crs$dataset[["R10_rfc"]]), "R10_rfc"] <- NA
}
# When scoring transform using the training data parameters.
if (scoring)
{
crs$dataset[["R10_rfc"]] <- log10(crs$dataset[["rfc"]])
crs$dataset[crs$dataset[["R10_rfc"]] == -Inf & ! is.na(crs$dataset[["R10_rfc"]]), "R10_rfc"] <- NA
}
# Rescale sc.
crs$dataset[["R10_sc"]] <- crs$dataset[["sc"]]
# Take a log10 transform of the variable - treat -Inf as NA.
if (building)
{
crs$dataset[["R10_sc"]] <- log10(crs$dataset[["sc"]])
crs$dataset[crs$dataset[["R10_sc"]] == -Inf & ! is.na(crs$dataset[["R10_sc"]]), "R10_sc"] <- NA
}
# When scoring transform using the training data parameters.
if (scoring)
{
crs$dataset[["R10_sc"]] <- log10(crs$dataset[["sc"]])
crs$dataset[crs$dataset[["R10_sc"]] == -Inf & ! is.na(crs$dataset[["R10_sc"]]), "R10_sc"] <- NA
}
#============================================================
# Rattle timestamp: 2014-12-06 21:02:54 x86_64-pc-linux-gnu
# Note the user selections.
# The following variable selections have been noted.
crs$input <- c("R10_acc", "R10_accm", "R10_amloc", "R10_anpm",
"R10_cbo", "R10_dit", "R10_lcom4", "R10_loc",
"R10_noa", "R10_noc", "R10_nom", "R10_npa",
"R10_npm", "R10_rfc", "R10_sc")
crs$numeric <- c("R10_acc", "R10_accm", "R10_amloc", "R10_anpm",
"R10_cbo", "R10_dit", "R10_lcom4", "R10_loc",
"R10_noa", "R10_noc", "R10_nom", "R10_npa",
"R10_npm", "R10_rfc", "R10_sc")
crs$categoric <- NULL
crs$target <- "good_design"
crs$risk <- NULL
crs$ident <- NULL
crs$ignore <- c("acc", "accm", "amloc", "anpm", "cbo", "dit", "lcom4", "loc", "noa", "noc", "nom", "npa", "npm", "rfc", "sc")
crs$weights <- NULL
#============================================================
# Rattle timestamp: 2014-12-06 21:03:17 x86_64-pc-linux-gnu
# Decision Tree
# The 'rpart' package provides the 'rpart' function.
require(rpart, quietly=TRUE)
# Reset the random number seed to obtain the same results each time.
set.seed(crv$seed)
# Build the Decision Tree model.
crs$rpart <- rpart(good_design ~ .,
data=crs$dataset[crs$train, c(crs$input, crs$target)],
method="class",
parms=list(split="information"),
control=rpart.control(cp=0.070000,
usesurrogate=0,
maxsurrogate=0))
# Generate a textual view of the Decision Tree model.
print(crs$rpart)
printcp(crs$rpart)
cat("\n")
# Time taken: 0.39 secs
# List the rules from the tree using a Rattle support function.
asRules(crs$rpart)
#============================================================
# Rattle timestamp: 2014-12-06 21:03:36 x86_64-pc-linux-gnu
# Plot the resulting Decision Tree.
# We use the rpart.plot package.
fancyRpartPlot(crs$rpart, main="Decision Tree all-data-C++.csv $ good_design")
#============================================================
# Rattle timestamp: 2014-12-06 21:03:46 x86_64-pc-linux-gnu
# Save the plot to a file.
# Save the plot on device 2 to a file.
library(cairoDevice)
savePlotToFile("/home/kanashiro/Documents/UnB/ML/ML_metrics/analysis/C++/model/cp=0.7/decision_tree.pdf", 2)
#============================================================
# Rattle timestamp: 2014-12-06 21:03:54 x86_64-pc-linux-gnu
# Evaluate model performance.
# Generate an Error Matrix for the Decision Tree model.
# Obtain the response from the Decision Tree model.
crs$pr <- predict(crs$rpart, newdata=crs$dataset[crs$validate, c(crs$input, crs$target)], type="class")
# Generate the confusion matrix showing counts.
table(crs$dataset[crs$validate, c(crs$input, crs$target)]$good_design, crs$pr,
dnn=c("Actual", "Predicted"))
# Generate the confusion matrix showing proportions.
pcme <- function(actual, cl)
{
x <- table(actual, cl)
tbl <- cbind(round(x/length(actual), 2),
Error=round(c(x[1,2]/sum(x[1,]),
x[2,1]/sum(x[2,])), 2))
names(attr(tbl, "dimnames")) <- c("Actual", "Predicted")
return(tbl)
};
pcme(crs$dataset[crs$validate, c(crs$input, crs$target)]$good_design, crs$pr)
# Calculate the overall error percentage.
overall <- function(x)
{
if (nrow(x) == 2)
cat((x[1,2] + x[2,1]) / sum(x))
else
cat(1 - (x[1,rownames(x)]) / sum(x))
}
overall(table(crs$pr, crs$dataset[crs$validate, c(crs$input, crs$target)]$good_design,
dnn=c("Predicted", "Actual")))
# Calculate the averaged class error percentage.
avgerr <- function(x)
cat(mean(c(x[1,2], x[2,1]) / apply(x, 1, sum)))
avgerr(table(crs$pr, crs$dataset[crs$validate, c(crs$input, crs$target)]$good_design,
dnn=c("Predicted", "Actual")))
#============================================================
# Rattle timestamp: 2014-12-06 21:04:12 x86_64-pc-linux-gnu
# Evaluate model performance.
# ROC Curve: requires the ROCR package.
library(ROCR)
# ROC Curve: requires the ggplot2 package.
require(ggplot2, quietly=TRUE)
# Generate an ROC Curve for the rpart model on all-data-C++.csv [validate].
crs$pr <- predict(crs$rpart, newdata=crs$dataset[crs$validate, c(crs$input, crs$target)])[,2]
# Remove observations with missing target.
no.miss <- na.omit(crs$dataset[crs$validate, c(crs$input, crs$target)]$good_design)
miss.list <- attr(no.miss, "na.action")
attributes(no.miss) <- NULL
if (length(miss.list))
{
pred <- prediction(crs$pr[-miss.list], no.miss)
} else
{
pred <- prediction(crs$pr, no.miss)
}
pe <- performance(pred, "tpr", "fpr")
au <- performance(pred, "auc")@y.values[[1]]
pd <- data.frame(fpr=unlist(pe@x.values), tpr=unlist(pe@y.values))
p <- ggplot(pd, aes(x=fpr, y=tpr))
p <- p + geom_line(colour="red")
p <- p + xlab("False Positive Rate") + ylab("True Positive Rate")
p <- p + ggtitle("ROC Curve Decision Tree all-data-C++.csv [validate] good_design")
p <- p + theme(plot.title=element_text(size=10))
p <- p + geom_line(data=data.frame(), aes(x=c(0,1), y=c(0,1)), colour="grey")
p <- p + annotate("text", x=0.50, y=0.00, hjust=0, vjust=0, size=5,
label=paste("AUC =", round(au, 2)))
print(p)
# Calculate the area under the curve for the plot.
# Remove observations with missing target.
no.miss <- na.omit(crs$dataset[crs$validate, c(crs$input, crs$target)]$good_design)
miss.list <- attr(no.miss, "na.action")
attributes(no.miss) <- NULL
if (length(miss.list))
{
pred <- prediction(crs$pr[-miss.list], no.miss)
} else
{
pred <- prediction(crs$pr, no.miss)
}
performance(pred, "auc")
#============================================================
# Rattle timestamp: 2014-12-06 21:04:22 x86_64-pc-linux-gnu
# Save the plot to a file.
# Save the plot on device 2 to a file.
library(cairoDevice)
savePlotToFile("/home/kanashiro/Documents/UnB/ML/ML_metrics/analysis/C++/model/cp=0.7/ROC.pdf", 2)
|
/analysis/C++/model_tree/cp=0.7/metrics.R
|
no_license
|
lucaskanashiro/ML_metrics
|
R
| false
| false
| 17,051
|
r
|
# Rattle is Copyright (c) 2006-2014 Togaware Pty Ltd.
#============================================================
# Rattle timestamp: 2014-12-06 21:02:42 x86_64-pc-linux-gnu
# Rattle version 3.3.1 user 'kanashiro'
# Export this log textview to a file using the Export button or the Tools
# menu to save a log of all activity. This facilitates repeatability. Exporting
# to file 'myrf01.R', for example, allows us to the type in the R Console
# the command source('myrf01.R') to repeat the process automatically.
# Generally, we may want to edit the file to suit our needs. We can also directly
# edit this current log textview to record additional information before exporting.
# Saving and loading projects also retains this log.
library(rattle)
# This log generally records the process of building a model. However, with very
# little effort the log can be used to score a new dataset. The logical variable
# 'building' is used to toggle between generating transformations, as when building
# a model, and simply using the transformations, as when scoring a dataset.
building <- TRUE
scoring <- ! building
# The colorspace package is used to generate the colours used in plots, if available.
library(colorspace)
# A pre-defined value is used to reset the random seed so that results are repeatable.
crv$seed <- 42
#============================================================
# Rattle timestamp: 2014-12-06 21:02:46 x86_64-pc-linux-gnu
# Load the data.
crs$dataset <- read.csv("file:///home/kanashiro/Documents/UnB/ML/ML_metrics/data/C++/all-data-C++.csv", na.strings=c(".", "NA", "", "?"), strip.white=TRUE, encoding="UTF-8")
#============================================================
# Rattle timestamp: 2014-12-06 21:02:47 x86_64-pc-linux-gnu
# Note the user selections.
# Build the training/validate/test datasets.
set.seed(crv$seed)
crs$nobs <- nrow(crs$dataset) # 38015 observations
crs$sample <- crs$train <- sample(nrow(crs$dataset), 0.7*crs$nobs) # 26610 observations
crs$validate <- sample(setdiff(seq_len(nrow(crs$dataset)), crs$train), 0.15*crs$nobs) # 5702 observations
crs$test <- setdiff(setdiff(seq_len(nrow(crs$dataset)), crs$train), crs$validate) # 5703 observations
# The following variable selections have been noted.
crs$input <- c("acc", "accm", "amloc", "anpm",
"cbo", "dit", "lcom4", "loc",
"noa", "noc", "nom", "npa",
"npm", "rfc", "sc")
crs$numeric <- c("acc", "accm", "amloc", "anpm",
"cbo", "dit", "lcom4", "loc",
"noa", "noc", "nom", "npa",
"npm", "rfc", "sc")
crs$categoric <- NULL
crs$target <- "good_design"
crs$risk <- NULL
crs$ident <- NULL
crs$ignore <- NULL
crs$weights <- NULL
#============================================================
# Rattle timestamp: 2014-12-06 21:02:53 x86_64-pc-linux-gnu
# Transform variables by rescaling.
# Rescale acc.
crs$dataset[["R10_acc"]] <- crs$dataset[["acc"]]
# Take a log10 transform of the variable - treat -Inf as NA.
if (building)
{
crs$dataset[["R10_acc"]] <- log10(crs$dataset[["acc"]])
crs$dataset[crs$dataset[["R10_acc"]] == -Inf & ! is.na(crs$dataset[["R10_acc"]]), "R10_acc"] <- NA
}
# When scoring transform using the training data parameters.
if (scoring)
{
crs$dataset[["R10_acc"]] <- log10(crs$dataset[["acc"]])
crs$dataset[crs$dataset[["R10_acc"]] == -Inf & ! is.na(crs$dataset[["R10_acc"]]), "R10_acc"] <- NA
}
# Rescale accm.
crs$dataset[["R10_accm"]] <- crs$dataset[["accm"]]
# Take a log10 transform of the variable - treat -Inf as NA.
if (building)
{
crs$dataset[["R10_accm"]] <- log10(crs$dataset[["accm"]])
crs$dataset[crs$dataset[["R10_accm"]] == -Inf & ! is.na(crs$dataset[["R10_accm"]]), "R10_accm"] <- NA
}
# When scoring transform using the training data parameters.
if (scoring)
{
crs$dataset[["R10_accm"]] <- log10(crs$dataset[["accm"]])
crs$dataset[crs$dataset[["R10_accm"]] == -Inf & ! is.na(crs$dataset[["R10_accm"]]), "R10_accm"] <- NA
}
# Rescale amloc.
crs$dataset[["R10_amloc"]] <- crs$dataset[["amloc"]]
# Take a log10 transform of the variable - treat -Inf as NA.
if (building)
{
crs$dataset[["R10_amloc"]] <- log10(crs$dataset[["amloc"]])
crs$dataset[crs$dataset[["R10_amloc"]] == -Inf & ! is.na(crs$dataset[["R10_amloc"]]), "R10_amloc"] <- NA
}
# When scoring transform using the training data parameters.
if (scoring)
{
crs$dataset[["R10_amloc"]] <- log10(crs$dataset[["amloc"]])
crs$dataset[crs$dataset[["R10_amloc"]] == -Inf & ! is.na(crs$dataset[["R10_amloc"]]), "R10_amloc"] <- NA
}
# Rescale anpm.
crs$dataset[["R10_anpm"]] <- crs$dataset[["anpm"]]
# Take a log10 transform of the variable - treat -Inf as NA.
if (building)
{
crs$dataset[["R10_anpm"]] <- log10(crs$dataset[["anpm"]])
crs$dataset[crs$dataset[["R10_anpm"]] == -Inf & ! is.na(crs$dataset[["R10_anpm"]]), "R10_anpm"] <- NA
}
# When scoring transform using the training data parameters.
if (scoring)
{
crs$dataset[["R10_anpm"]] <- log10(crs$dataset[["anpm"]])
crs$dataset[crs$dataset[["R10_anpm"]] == -Inf & ! is.na(crs$dataset[["R10_anpm"]]), "R10_anpm"] <- NA
}
# Rescale cbo.
crs$dataset[["R10_cbo"]] <- crs$dataset[["cbo"]]
# Take a log10 transform of the variable - treat -Inf as NA.
if (building)
{
crs$dataset[["R10_cbo"]] <- log10(crs$dataset[["cbo"]])
crs$dataset[crs$dataset[["R10_cbo"]] == -Inf & ! is.na(crs$dataset[["R10_cbo"]]), "R10_cbo"] <- NA
}
# When scoring transform using the training data parameters.
if (scoring)
{
crs$dataset[["R10_cbo"]] <- log10(crs$dataset[["cbo"]])
crs$dataset[crs$dataset[["R10_cbo"]] == -Inf & ! is.na(crs$dataset[["R10_cbo"]]), "R10_cbo"] <- NA
}
# Rescale dit.
crs$dataset[["R10_dit"]] <- crs$dataset[["dit"]]
# Take a log10 transform of the variable - treat -Inf as NA.
if (building)
{
crs$dataset[["R10_dit"]] <- log10(crs$dataset[["dit"]])
crs$dataset[crs$dataset[["R10_dit"]] == -Inf & ! is.na(crs$dataset[["R10_dit"]]), "R10_dit"] <- NA
}
# When scoring transform using the training data parameters.
if (scoring)
{
crs$dataset[["R10_dit"]] <- log10(crs$dataset[["dit"]])
crs$dataset[crs$dataset[["R10_dit"]] == -Inf & ! is.na(crs$dataset[["R10_dit"]]), "R10_dit"] <- NA
}
# Rescale lcom4.
crs$dataset[["R10_lcom4"]] <- crs$dataset[["lcom4"]]
# Take a log10 transform of the variable - treat -Inf as NA.
if (building)
{
crs$dataset[["R10_lcom4"]] <- log10(crs$dataset[["lcom4"]])
crs$dataset[crs$dataset[["R10_lcom4"]] == -Inf & ! is.na(crs$dataset[["R10_lcom4"]]), "R10_lcom4"] <- NA
}
# When scoring transform using the training data parameters.
if (scoring)
{
crs$dataset[["R10_lcom4"]] <- log10(crs$dataset[["lcom4"]])
crs$dataset[crs$dataset[["R10_lcom4"]] == -Inf & ! is.na(crs$dataset[["R10_lcom4"]]), "R10_lcom4"] <- NA
}
# Rescale loc.
crs$dataset[["R10_loc"]] <- crs$dataset[["loc"]]
# Take a log10 transform of the variable - treat -Inf as NA.
if (building)
{
crs$dataset[["R10_loc"]] <- log10(crs$dataset[["loc"]])
crs$dataset[crs$dataset[["R10_loc"]] == -Inf & ! is.na(crs$dataset[["R10_loc"]]), "R10_loc"] <- NA
}
# When scoring transform using the training data parameters.
if (scoring)
{
crs$dataset[["R10_loc"]] <- log10(crs$dataset[["loc"]])
crs$dataset[crs$dataset[["R10_loc"]] == -Inf & ! is.na(crs$dataset[["R10_loc"]]), "R10_loc"] <- NA
}
# Rescale noa.
crs$dataset[["R10_noa"]] <- crs$dataset[["noa"]]
# Take a log10 transform of the variable - treat -Inf as NA.
if (building)
{
crs$dataset[["R10_noa"]] <- log10(crs$dataset[["noa"]])
crs$dataset[crs$dataset[["R10_noa"]] == -Inf & ! is.na(crs$dataset[["R10_noa"]]), "R10_noa"] <- NA
}
# When scoring transform using the training data parameters.
if (scoring)
{
crs$dataset[["R10_noa"]] <- log10(crs$dataset[["noa"]])
crs$dataset[crs$dataset[["R10_noa"]] == -Inf & ! is.na(crs$dataset[["R10_noa"]]), "R10_noa"] <- NA
}
# Rescale noc.
crs$dataset[["R10_noc"]] <- crs$dataset[["noc"]]
# Take a log10 transform of the variable - treat -Inf as NA.
if (building)
{
crs$dataset[["R10_noc"]] <- log10(crs$dataset[["noc"]])
crs$dataset[crs$dataset[["R10_noc"]] == -Inf & ! is.na(crs$dataset[["R10_noc"]]), "R10_noc"] <- NA
}
# When scoring transform using the training data parameters.
if (scoring)
{
crs$dataset[["R10_noc"]] <- log10(crs$dataset[["noc"]])
crs$dataset[crs$dataset[["R10_noc"]] == -Inf & ! is.na(crs$dataset[["R10_noc"]]), "R10_noc"] <- NA
}
# Rescale nom.
crs$dataset[["R10_nom"]] <- crs$dataset[["nom"]]
# Take a log10 transform of the variable - treat -Inf as NA.
if (building)
{
crs$dataset[["R10_nom"]] <- log10(crs$dataset[["nom"]])
crs$dataset[crs$dataset[["R10_nom"]] == -Inf & ! is.na(crs$dataset[["R10_nom"]]), "R10_nom"] <- NA
}
# When scoring transform using the training data parameters.
if (scoring)
{
crs$dataset[["R10_nom"]] <- log10(crs$dataset[["nom"]])
crs$dataset[crs$dataset[["R10_nom"]] == -Inf & ! is.na(crs$dataset[["R10_nom"]]), "R10_nom"] <- NA
}
# Rescale npa.
crs$dataset[["R10_npa"]] <- crs$dataset[["npa"]]
# Take a log10 transform of the variable - treat -Inf as NA.
if (building)
{
crs$dataset[["R10_npa"]] <- log10(crs$dataset[["npa"]])
crs$dataset[crs$dataset[["R10_npa"]] == -Inf & ! is.na(crs$dataset[["R10_npa"]]), "R10_npa"] <- NA
}
# When scoring transform using the training data parameters.
if (scoring)
{
crs$dataset[["R10_npa"]] <- log10(crs$dataset[["npa"]])
crs$dataset[crs$dataset[["R10_npa"]] == -Inf & ! is.na(crs$dataset[["R10_npa"]]), "R10_npa"] <- NA
}
# Rescale npm.
crs$dataset[["R10_npm"]] <- crs$dataset[["npm"]]
# Take a log10 transform of the variable - treat -Inf as NA.
if (building)
{
crs$dataset[["R10_npm"]] <- log10(crs$dataset[["npm"]])
crs$dataset[crs$dataset[["R10_npm"]] == -Inf & ! is.na(crs$dataset[["R10_npm"]]), "R10_npm"] <- NA
}
# When scoring transform using the training data parameters.
if (scoring)
{
crs$dataset[["R10_npm"]] <- log10(crs$dataset[["npm"]])
crs$dataset[crs$dataset[["R10_npm"]] == -Inf & ! is.na(crs$dataset[["R10_npm"]]), "R10_npm"] <- NA
}
# Rescale rfc.
crs$dataset[["R10_rfc"]] <- crs$dataset[["rfc"]]
# Take a log10 transform of the variable - treat -Inf as NA.
if (building)
{
crs$dataset[["R10_rfc"]] <- log10(crs$dataset[["rfc"]])
crs$dataset[crs$dataset[["R10_rfc"]] == -Inf & ! is.na(crs$dataset[["R10_rfc"]]), "R10_rfc"] <- NA
}
# When scoring transform using the training data parameters.
if (scoring)
{
crs$dataset[["R10_rfc"]] <- log10(crs$dataset[["rfc"]])
crs$dataset[crs$dataset[["R10_rfc"]] == -Inf & ! is.na(crs$dataset[["R10_rfc"]]), "R10_rfc"] <- NA
}
# Rescale sc.
crs$dataset[["R10_sc"]] <- crs$dataset[["sc"]]
# Take a log10 transform of the variable - treat -Inf as NA.
if (building)
{
crs$dataset[["R10_sc"]] <- log10(crs$dataset[["sc"]])
crs$dataset[crs$dataset[["R10_sc"]] == -Inf & ! is.na(crs$dataset[["R10_sc"]]), "R10_sc"] <- NA
}
# When scoring transform using the training data parameters.
if (scoring)
{
crs$dataset[["R10_sc"]] <- log10(crs$dataset[["sc"]])
crs$dataset[crs$dataset[["R10_sc"]] == -Inf & ! is.na(crs$dataset[["R10_sc"]]), "R10_sc"] <- NA
}
#============================================================
# Rattle timestamp: 2014-12-06 21:02:54 x86_64-pc-linux-gnu
# Note the user selections.
# The following variable selections have been noted.
crs$input <- c("R10_acc", "R10_accm", "R10_amloc", "R10_anpm",
"R10_cbo", "R10_dit", "R10_lcom4", "R10_loc",
"R10_noa", "R10_noc", "R10_nom", "R10_npa",
"R10_npm", "R10_rfc", "R10_sc")
crs$numeric <- c("R10_acc", "R10_accm", "R10_amloc", "R10_anpm",
"R10_cbo", "R10_dit", "R10_lcom4", "R10_loc",
"R10_noa", "R10_noc", "R10_nom", "R10_npa",
"R10_npm", "R10_rfc", "R10_sc")
crs$categoric <- NULL
crs$target <- "good_design"
crs$risk <- NULL
crs$ident <- NULL
crs$ignore <- c("acc", "accm", "amloc", "anpm", "cbo", "dit", "lcom4", "loc", "noa", "noc", "nom", "npa", "npm", "rfc", "sc")
crs$weights <- NULL
#============================================================
# Rattle timestamp: 2014-12-06 21:03:17 x86_64-pc-linux-gnu
# Decision Tree
# The 'rpart' package provides the 'rpart' function.
require(rpart, quietly=TRUE)
# Reset the random number seed to obtain the same results each time.
set.seed(crv$seed)
# Build the Decision Tree model.
crs$rpart <- rpart(good_design ~ .,
data=crs$dataset[crs$train, c(crs$input, crs$target)],
method="class",
parms=list(split="information"),
control=rpart.control(cp=0.070000,
usesurrogate=0,
maxsurrogate=0))
# Generate a textual view of the Decision Tree model.
print(crs$rpart)
printcp(crs$rpart)
cat("\n")
# Time taken: 0.39 secs
# List the rules from the tree using a Rattle support function.
asRules(crs$rpart)
#============================================================
# Rattle timestamp: 2014-12-06 21:03:36 x86_64-pc-linux-gnu
# Plot the resulting Decision Tree.
# We use the rpart.plot package.
fancyRpartPlot(crs$rpart, main="Decision Tree all-data-C++.csv $ good_design")
#============================================================
# Rattle timestamp: 2014-12-06 21:03:46 x86_64-pc-linux-gnu
# Save the plot to a file.
# Save the plot on device 2 to a file.
library(cairoDevice)
savePlotToFile("/home/kanashiro/Documents/UnB/ML/ML_metrics/analysis/C++/model/cp=0.7/decision_tree.pdf", 2)
#============================================================
# Rattle timestamp: 2014-12-06 21:03:54 x86_64-pc-linux-gnu
# Evaluate model performance.
# Generate an Error Matrix for the Decision Tree model.
# Obtain the response from the Decision Tree model.
crs$pr <- predict(crs$rpart, newdata=crs$dataset[crs$validate, c(crs$input, crs$target)], type="class")
# Generate the confusion matrix showing counts.
table(crs$dataset[crs$validate, c(crs$input, crs$target)]$good_design, crs$pr,
dnn=c("Actual", "Predicted"))
# Generate the confusion matrix showing proportions.
pcme <- function(actual, cl)
{
x <- table(actual, cl)
tbl <- cbind(round(x/length(actual), 2),
Error=round(c(x[1,2]/sum(x[1,]),
x[2,1]/sum(x[2,])), 2))
names(attr(tbl, "dimnames")) <- c("Actual", "Predicted")
return(tbl)
};
pcme(crs$dataset[crs$validate, c(crs$input, crs$target)]$good_design, crs$pr)
# Calculate the overall error percentage.
overall <- function(x)
{
if (nrow(x) == 2)
cat((x[1,2] + x[2,1]) / sum(x))
else
cat(1 - (x[1,rownames(x)]) / sum(x))
}
overall(table(crs$pr, crs$dataset[crs$validate, c(crs$input, crs$target)]$good_design,
dnn=c("Predicted", "Actual")))
# Calculate the averaged class error percentage.
avgerr <- function(x)
cat(mean(c(x[1,2], x[2,1]) / apply(x, 1, sum)))
avgerr(table(crs$pr, crs$dataset[crs$validate, c(crs$input, crs$target)]$good_design,
dnn=c("Predicted", "Actual")))
#============================================================
# Rattle timestamp: 2014-12-06 21:04:12 x86_64-pc-linux-gnu
# Evaluate model performance.
# ROC Curve: requires the ROCR package.
library(ROCR)
# ROC Curve: requires the ggplot2 package.
require(ggplot2, quietly=TRUE)
# Generate an ROC Curve for the rpart model on all-data-C++.csv [validate].
crs$pr <- predict(crs$rpart, newdata=crs$dataset[crs$validate, c(crs$input, crs$target)])[,2]
# Remove observations with missing target.
no.miss <- na.omit(crs$dataset[crs$validate, c(crs$input, crs$target)]$good_design)
miss.list <- attr(no.miss, "na.action")
attributes(no.miss) <- NULL
if (length(miss.list))
{
pred <- prediction(crs$pr[-miss.list], no.miss)
} else
{
pred <- prediction(crs$pr, no.miss)
}
pe <- performance(pred, "tpr", "fpr")
au <- performance(pred, "auc")@y.values[[1]]
pd <- data.frame(fpr=unlist(pe@x.values), tpr=unlist(pe@y.values))
p <- ggplot(pd, aes(x=fpr, y=tpr))
p <- p + geom_line(colour="red")
p <- p + xlab("False Positive Rate") + ylab("True Positive Rate")
p <- p + ggtitle("ROC Curve Decision Tree all-data-C++.csv [validate] good_design")
p <- p + theme(plot.title=element_text(size=10))
p <- p + geom_line(data=data.frame(), aes(x=c(0,1), y=c(0,1)), colour="grey")
p <- p + annotate("text", x=0.50, y=0.00, hjust=0, vjust=0, size=5,
label=paste("AUC =", round(au, 2)))
print(p)
# Calculate the area under the curve for the plot.
# Remove observations with missing target.
no.miss <- na.omit(crs$dataset[crs$validate, c(crs$input, crs$target)]$good_design)
miss.list <- attr(no.miss, "na.action")
attributes(no.miss) <- NULL
if (length(miss.list))
{
pred <- prediction(crs$pr[-miss.list], no.miss)
} else
{
pred <- prediction(crs$pr, no.miss)
}
performance(pred, "auc")
#============================================================
# Rattle timestamp: 2014-12-06 21:04:22 x86_64-pc-linux-gnu
# Save the plot to a file.
# Save the plot on device 2 to a file.
library(cairoDevice)
savePlotToFile("/home/kanashiro/Documents/UnB/ML/ML_metrics/analysis/C++/model/cp=0.7/ROC.pdf", 2)
|
library(parallel)
simRep <- 5000 # Replication times in one simulation
pvalue.true <- .05 # Testing type I error
b.var <- c(0.0049) # The set of varaince of random covariates b as random slope
smooth <- 0 # measurement error is added to M if smooth = 0; no measurement error is added if sooth = 1
cores <- 8
r.sim <- b.var
run_one_sample <- function(iter){
library(refund)
library(lme4)
library(nlme)
library(arm)
library(RLRsim)
library(MASS)
set.seed(iter+5000)
D <- 80 # grid number total
nSubj <- 200 # 200 # I the number of curves
nRep <- 20 # 20 # datasets for each covariance function
totalN <- nSubj * nRep
thetaK.true <- 2
timeGrid <- (1:D)/D
npc.true <- 3
percent <- 0.95
SNR <- 3 # 5, signal noise ratio'
sd.epsilon <- 1 # or 0.5
delta.true <- 0.5
a.mean <- 0
gamma.true <- 2
gammaVar.true <- 1
# hot
gammaI.true <- mapply(rnorm, nSubj, gamma.true, rep(sqrt(gammaVar.true), 1))
gammaI.true <- gammaI.true[rep(1:nrow(gammaI.true), each = nRep), ]
# warm
gammaI2.true <- mapply(rnorm, nSubj, gamma.true, rep(sqrt(gammaVar.true), 1))
gammaI2.true <- gammaI2.true[rep(1:nrow(gammaI2.true), each = nRep), ]
dummyX <- rbinom(n = totalN, size = 1, prob = 0.5) # dummyX
#generate functional covariates
lambda.sim <- function(degree) {
return(0.5^(degree - 1))
}
psi.fourier <- function(t, degree) {
result <- NA
if(degree == 1){
result <- sqrt(2) * sinpi(2*t)
}else if(degree == 2){
result <- sqrt(2) * cospi(4*t)
}else if(degree == 3){
result <- sqrt(2) * sinpi(4*t)
}
return(result)
}
lambdaVec.true <- mapply(lambda.sim, 1: npc.true)
psi.true <- matrix(data = mapply(psi.fourier, rep(timeGrid, npc.true), rep(1:npc.true, each=D)),
nrow = npc.true,
ncol = D,
byrow = TRUE)
ascore.true <- mvrnorm(totalN, rep(a.mean, npc.true), diag(lambdaVec.true))
Mt.true <- ascore.true %*% psi.true
error <- rnorm(totalN, mean = 0, sd = sd.epsilon)
thetaIK.true <- mvrnorm(nSubj, rep(thetaK.true, npc.true), diag(c(r.sim, r.sim/2, r.sim/4)))
thetaIK.true <- thetaIK.true[rep(1:nrow(thetaIK.true), each = nRep), ]
betaM.true <- thetaIK.true * ascore.true
betaM.true <- rowSums(betaM.true)
Y <- delta.true + dummyX * gammaI.true + (dummyX - 1) * gammaI2.true + betaM.true + error
##########################################################################
ID <- rep(1:nSubj, each = nRep)
if(smooth == 0){
Merror.Var <- sum(lambdaVec.true) / SNR #SNR = sum(lambdaVec.true)/Merror.Var
Mt.hat <- Mt.true + matrix(rnorm(totalN*D, mean=0, sd = sqrt(Merror.Var)), totalN, D)
}
if(smooth == 1){
Merror.Var <- 0 #SNR = sum(lambdaVec.true)/Merror.Var
Mt.hat <- Mt.true
}
M <- Mt.hat
# M <- M - matrix(rep(colMeans(M), each = totalN), totalN, D) # center:column-means are 0
t <- (1:D)/D
knots <- 5 # previous setting 10
p <- 5 # previous setting p <- 7, the number of degree for B-splines we use
results <- fpca.face(M, center = TRUE, argvals = t, knots = knots, pve = percent, p = p, lambda = 0) # pve need to be chosen!
npc <- results$npc
score <- results$scores
ascore <- score[, 1:npc]/sqrt(D)
# plot(results$efunctions[,2]*sqrt(D))
# lines(1:80, psi.fourier(timeGrid, 2)) #match very well
# to compare lambda: results$evalues/(D))
# to compare estimated M, Mt.hat, Mt.true
# a<-results$scores %*% t(results$efunctions)
# plot(M[300,]) #Mt.hat
# lines(a[300,]+results$mu,col="red") # estimated M
# lines(Mt.true[300,], col="blue") #true Mt
###########################################################################
dummyX <- cbind(dummyX, -dummyX + 1)
z.sim.uni = c()
ID.uni <- c(rbind(matrix(1:(nSubj*npc),
nrow = npc,
ncol = nSubj),
matrix(0, nrow = nRep - npc, ncol = nSubj)))
for(k in 1:nSubj){
svd <- svd(ascore[((k-1)*nRep+1):(k*nRep), ] %*% t(ascore[((k-1)*nRep+1):(k*nRep), ])) #SVD on A_i
u.tra <- t(svd$v)
u <- svd$u
d <- (svd$d)[1:npc]
# u <- cbind(u, Null(u))
Y[((k-1)*nRep+1):(k*nRep)] <- u.tra %*% Y[((k-1)*nRep+1):(k*nRep)]
dummyX[((k-1)*nRep+1):(k*nRep), ] <- u.tra %*% dummyX[((k-1)*nRep+1):(k*nRep), ]
ascore[((k-1)*nRep+1):(k*nRep), ] <- rbind(u.tra[1:npc, ] %*% ascore[((k-1)*nRep+1):(k*nRep), ],
matrix(0,
nrow = nRep - npc,
ncol = npc))
z.sim.uni <- c(z.sim.uni, sqrt(d), rep(0, nRep - npc))
}
###########################################################################
designMatrix <- data.frame(rating = Y,
temp.1 = dummyX[, 1],
temp.2 = dummyX[, 2],
ID = as.factor(ID),
ID.uni = as.factor(ID.uni),
ascore = ascore,
z.sim.uni = z.sim.uni)
# 'lmer' model
designMatrix.lmm <- designMatrix
additive0.sim <- paste(1:npc, collapse = " + ascore.")
additive.sim <- paste(1:npc, collapse = " | ID) + (0 + ascore.")
# Confusion of modifying
model.sim <- as.formula(paste("rating ~ 1 + temp.1 + temp.2 + ascore.",
additive0.sim,
" + (0 + temp.1 | ID) + (0 + temp.2 | ID) + (0 + z.sim.uni | ID.uni)",
sep = ""))
fullReml <- lmer(model.sim, data = designMatrix.lmm)
f.slope <- as.formula(paste("rating ~ 1 + temp.1 + temp.2 + ascore.",
additive0.sim,
" + (0 + z.sim.uni | ID.uni)",
sep = ""))
m.slope <- lmer(f.slope, data = designMatrix.lmm)
f0 <- as.formula(" . ~ . - (0 + z.sim.uni | ID.uni)")
m0 <- update(fullReml, f0)
tests2 <- exactRLRT(m.slope, fullReml, m0)
pvalues.bonf <- tests2$p[1]
###################################################################################
return(list(realTau = r.sim,
pvalues.bonf = pvalues.bonf,
Merror.Var = Merror.Var,
smooth = smooth,
npc = npc,
tests2 = tests2))
}
# Setup parallel
#cores <- detectCores()
cluster <- makeCluster(cores)
#clusterSetRNGStream(cluster, 20170822)
# for(nRandCovariate in 1 * 2){ # START out-outer loop
# clusterExport(cluster, c("nRandCovariate")) # casting the coefficient parameter on the random effects' covariance function
# fileName <- paste("power_", b.var, "_grp20-rep20-", nRandCovariate,".RData", sep = "") # Saving file's name
clusterExport(cluster, c("r.sim", "smooth")) # casting the coefficient parameter on the random effects' covariance function
fileName <- paste("h_f_power_", smooth, "_",b.var,"_seed2_grp200-rep20.RData", sep = "") # Saving file's name
# run the simulation
loopIndex <- 1
# resultDoubleList.sim <- list()
#power1.sim <- list()
power2.sim <- list()
# for(r.sim in b.var){ # START outer loop
node_results <- parLapply(cluster, 1:simRep, run_one_sample)
# result1.sim <- lapply(node_results, function(x) {list(realTau = x$realTau,
# pvalue = x$pvalue)})
#result2.sim <- lapply(node_results, function(x) {list(realTau = x$realTau,
# pvalues.bonf = x$pvalues.bonf,
# smooth = x$smooth,
# npc = x$npc)})
#resultDoubleList.sim[[loopIndex]] <- node_results
#save.image(file=fileName) # Auto Save
#table1.sim <- sapply(result1.sim, function(x) {
# c(sens = (sum(x$pvalue <= pvalue.true) > 0))})
#Power1 <- mean(table1.sim)
#cat("nRandCovariate: ", nRandCovariate, fill = TRUE)
#cat("Power1: ", Power1, fill = TRUE)
#power1.sim[[loopIndex]] <- list(Power = Power1, realTau = r.sim)
table2.sim <- sapply(node_results, function(x) {
c(overall.sens = (sum(x$pvalues.bonf <= pvalue.true) > 0))})
Power2 <- mean(table2.sim)
#cat("Power2: ", Power2, fill = TRUE)
power2.sim[[loopIndex]] <- list(Power = Power2, realTau = c(r.sim,r.sim/2,r.sim/4), smooth = smooth)
# loopIndex <- loopIndex + 1
# } # End outer loop
save(power2.sim, file=fileName) # Auto Save
# par(mfrow=c(2,1))
# Histogram plots
# hist(sapply(result1.sim, function(x) x$pvalue),
# main = "Histogram of p-value for lme model",
# xlab = "p-value")
# hist(sapply(result2.sim, function(x) x$pvalues.bonf),
# main = "Histogram of p-value for lmer model",
# xlab = "p-value")
# hist(sapply(resultDoubleList.sim[[1]], function(x) (x$tests1)$statistic[1]),
# breaks = (0:110)/10,
# main = "Histogram of test-statistic for lme model",
# xlab = "Test Statistics")
#
# hist(sapply(resultDoubleList.sim[[1]], function(x) (x$tests2)[1,1]),
# breaks = (0:100)/10,
# main = "Histogram of test-statistic for lmer model",
# xlab = "Test Statistics")
#} # End out-outer loop
stopCluster(cluster)
|
/full simulation/summer/hetero_power/variance0.0049/seed2/heter_power_0.0049_pca_u_seed2_200_20.R
|
no_license
|
wma9/FMRI-project
|
R
| false
| false
| 9,199
|
r
|
library(parallel)
simRep <- 5000 # Replication times in one simulation
pvalue.true <- .05 # Testing type I error
b.var <- c(0.0049) # The set of varaince of random covariates b as random slope
smooth <- 0 # measurement error is added to M if smooth = 0; no measurement error is added if sooth = 1
cores <- 8
r.sim <- b.var
run_one_sample <- function(iter){
library(refund)
library(lme4)
library(nlme)
library(arm)
library(RLRsim)
library(MASS)
set.seed(iter+5000)
D <- 80 # grid number total
nSubj <- 200 # 200 # I the number of curves
nRep <- 20 # 20 # datasets for each covariance function
totalN <- nSubj * nRep
thetaK.true <- 2
timeGrid <- (1:D)/D
npc.true <- 3
percent <- 0.95
SNR <- 3 # 5, signal noise ratio'
sd.epsilon <- 1 # or 0.5
delta.true <- 0.5
a.mean <- 0
gamma.true <- 2
gammaVar.true <- 1
# hot
gammaI.true <- mapply(rnorm, nSubj, gamma.true, rep(sqrt(gammaVar.true), 1))
gammaI.true <- gammaI.true[rep(1:nrow(gammaI.true), each = nRep), ]
# warm
gammaI2.true <- mapply(rnorm, nSubj, gamma.true, rep(sqrt(gammaVar.true), 1))
gammaI2.true <- gammaI2.true[rep(1:nrow(gammaI2.true), each = nRep), ]
dummyX <- rbinom(n = totalN, size = 1, prob = 0.5) # dummyX
#generate functional covariates
lambda.sim <- function(degree) {
return(0.5^(degree - 1))
}
psi.fourier <- function(t, degree) {
result <- NA
if(degree == 1){
result <- sqrt(2) * sinpi(2*t)
}else if(degree == 2){
result <- sqrt(2) * cospi(4*t)
}else if(degree == 3){
result <- sqrt(2) * sinpi(4*t)
}
return(result)
}
lambdaVec.true <- mapply(lambda.sim, 1: npc.true)
psi.true <- matrix(data = mapply(psi.fourier, rep(timeGrid, npc.true), rep(1:npc.true, each=D)),
nrow = npc.true,
ncol = D,
byrow = TRUE)
ascore.true <- mvrnorm(totalN, rep(a.mean, npc.true), diag(lambdaVec.true))
Mt.true <- ascore.true %*% psi.true
error <- rnorm(totalN, mean = 0, sd = sd.epsilon)
thetaIK.true <- mvrnorm(nSubj, rep(thetaK.true, npc.true), diag(c(r.sim, r.sim/2, r.sim/4)))
thetaIK.true <- thetaIK.true[rep(1:nrow(thetaIK.true), each = nRep), ]
betaM.true <- thetaIK.true * ascore.true
betaM.true <- rowSums(betaM.true)
Y <- delta.true + dummyX * gammaI.true + (dummyX - 1) * gammaI2.true + betaM.true + error
##########################################################################
ID <- rep(1:nSubj, each = nRep)
if(smooth == 0){
Merror.Var <- sum(lambdaVec.true) / SNR #SNR = sum(lambdaVec.true)/Merror.Var
Mt.hat <- Mt.true + matrix(rnorm(totalN*D, mean=0, sd = sqrt(Merror.Var)), totalN, D)
}
if(smooth == 1){
Merror.Var <- 0 #SNR = sum(lambdaVec.true)/Merror.Var
Mt.hat <- Mt.true
}
M <- Mt.hat
# M <- M - matrix(rep(colMeans(M), each = totalN), totalN, D) # center:column-means are 0
t <- (1:D)/D
knots <- 5 # previous setting 10
p <- 5 # previous setting p <- 7, the number of degree for B-splines we use
results <- fpca.face(M, center = TRUE, argvals = t, knots = knots, pve = percent, p = p, lambda = 0) # pve need to be chosen!
npc <- results$npc
score <- results$scores
ascore <- score[, 1:npc]/sqrt(D)
# plot(results$efunctions[,2]*sqrt(D))
# lines(1:80, psi.fourier(timeGrid, 2)) #match very well
# to compare lambda: results$evalues/(D))
# to compare estimated M, Mt.hat, Mt.true
# a<-results$scores %*% t(results$efunctions)
# plot(M[300,]) #Mt.hat
# lines(a[300,]+results$mu,col="red") # estimated M
# lines(Mt.true[300,], col="blue") #true Mt
###########################################################################
dummyX <- cbind(dummyX, -dummyX + 1)
z.sim.uni = c()
ID.uni <- c(rbind(matrix(1:(nSubj*npc),
nrow = npc,
ncol = nSubj),
matrix(0, nrow = nRep - npc, ncol = nSubj)))
for(k in 1:nSubj){
svd <- svd(ascore[((k-1)*nRep+1):(k*nRep), ] %*% t(ascore[((k-1)*nRep+1):(k*nRep), ])) #SVD on A_i
u.tra <- t(svd$v)
u <- svd$u
d <- (svd$d)[1:npc]
# u <- cbind(u, Null(u))
Y[((k-1)*nRep+1):(k*nRep)] <- u.tra %*% Y[((k-1)*nRep+1):(k*nRep)]
dummyX[((k-1)*nRep+1):(k*nRep), ] <- u.tra %*% dummyX[((k-1)*nRep+1):(k*nRep), ]
ascore[((k-1)*nRep+1):(k*nRep), ] <- rbind(u.tra[1:npc, ] %*% ascore[((k-1)*nRep+1):(k*nRep), ],
matrix(0,
nrow = nRep - npc,
ncol = npc))
z.sim.uni <- c(z.sim.uni, sqrt(d), rep(0, nRep - npc))
}
###########################################################################
designMatrix <- data.frame(rating = Y,
temp.1 = dummyX[, 1],
temp.2 = dummyX[, 2],
ID = as.factor(ID),
ID.uni = as.factor(ID.uni),
ascore = ascore,
z.sim.uni = z.sim.uni)
# 'lmer' model
designMatrix.lmm <- designMatrix
additive0.sim <- paste(1:npc, collapse = " + ascore.")
additive.sim <- paste(1:npc, collapse = " | ID) + (0 + ascore.")
# Confusion of modifying
model.sim <- as.formula(paste("rating ~ 1 + temp.1 + temp.2 + ascore.",
additive0.sim,
" + (0 + temp.1 | ID) + (0 + temp.2 | ID) + (0 + z.sim.uni | ID.uni)",
sep = ""))
fullReml <- lmer(model.sim, data = designMatrix.lmm)
f.slope <- as.formula(paste("rating ~ 1 + temp.1 + temp.2 + ascore.",
additive0.sim,
" + (0 + z.sim.uni | ID.uni)",
sep = ""))
m.slope <- lmer(f.slope, data = designMatrix.lmm)
f0 <- as.formula(" . ~ . - (0 + z.sim.uni | ID.uni)")
m0 <- update(fullReml, f0)
tests2 <- exactRLRT(m.slope, fullReml, m0)
pvalues.bonf <- tests2$p[1]
###################################################################################
return(list(realTau = r.sim,
pvalues.bonf = pvalues.bonf,
Merror.Var = Merror.Var,
smooth = smooth,
npc = npc,
tests2 = tests2))
}
# Setup parallel
#cores <- detectCores()
cluster <- makeCluster(cores)
#clusterSetRNGStream(cluster, 20170822)
# for(nRandCovariate in 1 * 2){ # START out-outer loop
# clusterExport(cluster, c("nRandCovariate")) # casting the coefficient parameter on the random effects' covariance function
# fileName <- paste("power_", b.var, "_grp20-rep20-", nRandCovariate,".RData", sep = "") # Saving file's name
clusterExport(cluster, c("r.sim", "smooth")) # casting the coefficient parameter on the random effects' covariance function
fileName <- paste("h_f_power_", smooth, "_",b.var,"_seed2_grp200-rep20.RData", sep = "") # Saving file's name
# run the simulation
loopIndex <- 1
# resultDoubleList.sim <- list()
#power1.sim <- list()
power2.sim <- list()
# for(r.sim in b.var){ # START outer loop
node_results <- parLapply(cluster, 1:simRep, run_one_sample)
# result1.sim <- lapply(node_results, function(x) {list(realTau = x$realTau,
# pvalue = x$pvalue)})
#result2.sim <- lapply(node_results, function(x) {list(realTau = x$realTau,
# pvalues.bonf = x$pvalues.bonf,
# smooth = x$smooth,
# npc = x$npc)})
#resultDoubleList.sim[[loopIndex]] <- node_results
#save.image(file=fileName) # Auto Save
#table1.sim <- sapply(result1.sim, function(x) {
# c(sens = (sum(x$pvalue <= pvalue.true) > 0))})
#Power1 <- mean(table1.sim)
#cat("nRandCovariate: ", nRandCovariate, fill = TRUE)
#cat("Power1: ", Power1, fill = TRUE)
#power1.sim[[loopIndex]] <- list(Power = Power1, realTau = r.sim)
table2.sim <- sapply(node_results, function(x) {
c(overall.sens = (sum(x$pvalues.bonf <= pvalue.true) > 0))})
Power2 <- mean(table2.sim)
#cat("Power2: ", Power2, fill = TRUE)
power2.sim[[loopIndex]] <- list(Power = Power2, realTau = c(r.sim,r.sim/2,r.sim/4), smooth = smooth)
# loopIndex <- loopIndex + 1
# } # End outer loop
save(power2.sim, file=fileName) # Auto Save
# par(mfrow=c(2,1))
# Histogram plots
# hist(sapply(result1.sim, function(x) x$pvalue),
# main = "Histogram of p-value for lme model",
# xlab = "p-value")
# hist(sapply(result2.sim, function(x) x$pvalues.bonf),
# main = "Histogram of p-value for lmer model",
# xlab = "p-value")
# hist(sapply(resultDoubleList.sim[[1]], function(x) (x$tests1)$statistic[1]),
# breaks = (0:110)/10,
# main = "Histogram of test-statistic for lme model",
# xlab = "Test Statistics")
#
# hist(sapply(resultDoubleList.sim[[1]], function(x) (x$tests2)[1,1]),
# breaks = (0:100)/10,
# main = "Histogram of test-statistic for lmer model",
# xlab = "Test Statistics")
#} # End out-outer loop
stopCluster(cluster)
|
for(i in 5:10){
print("Hello R")
}
for(i in 1:5){
print("Hello R")
}
|
/forloop.R
|
no_license
|
khyati124/Rstudio
|
R
| false
| false
| 73
|
r
|
for(i in 5:10){
print("Hello R")
}
for(i in 1:5){
print("Hello R")
}
|
#' Canopy and Stem Interception Model
#'
#' This model calculates the precipitation necessery to fill canopy and stem storage, based on Gash et al. 1995, and Valente et al. 1977.
#' The defults represent values of Mediterranean Quercus ilex (evergreen oak) ecosystem acording to the reference below.
#'
#' @usage calculate Interception for a day
#' @param P.rate (mm/hr) is the mean precipitation rate.
#' @param Evap (mm) total evporation (Here we try to aplly the Penman-montieth ET.)
#' @param P (mm) total precipitation
#' @param c (-) canopy cover per unit area
#' @param S (mm) canopy storage capacity
#' @param St (mm) trunks storage capacity
#' @param Pt (-) the propotion of rain goes to stemflow
#' @author Elad Dente
#' @return P.canopy (mm) canopy interception, P.stem (mm) stem interception, tot_inter (mm) total interception, and prc.inter (%) precentage of interception in precipitation. .
#' @references
#Valente, F., David, J. S., & Gash, J. H. C. (1997). Modelling interception loss for two sparse eucalypt and pine forests in central Portugal using reformulated Rutter and Gash analytical models. Journal of Hydrology, 190(1), 141-162.
#Limousin, J. M., Rambal, S., Ourcival, J. M., & Joffre, R. (2008). Modelling rainfall interception in a Mediterranean Quercus ilex ecosystem: lesson from a throughfall exclusion experiment. Journal of Hydrology, 357(1), 57-66.
interception<-
function(P.rate,Evap,P,c=0.59,S=1.7,St=0.64,Pt=0.1,dayl=11){
# Internal Variables
# Ec (mm/hr) is the mean evaporation rate.
# Sc (mm) canopy storage capcity per cover unit (S/c)
# Stc (mm) stem storage capcity per cover unit (St/c)
# Ptc (mm) propotion of rain goes to stems per cover unit (Pt/c)
#calculating mean ET rate (mm/hr)
Ec= Evap/dayl
#calculating Sc
Sc= S/c
#calculating Stc
Stc= St/c
#calculating Ptc
Ptc= Pt/c
#calculating P.canopy
P.canopy=-(P.rate/Ec)*Sc*log(1-(Ec/P.rate))
P.stem=ifelse(P.canopy < P,(P.rate/(P.rate-Ec))*(Stc/Ptc),0)
#calculating P.stem
#P.stem=(P.rate/(P.rate-Ec))*(Stc/Ptc)+P.canopy
#total interception
tot_inter=ifelse((P.canopy+P.stem)<P,(P.canopy+P.stem),P)
# % total interception from precipitation
prc.inter=100*tot_inter/P
# tot_inter_Ia=ifelse((tot_inter > P), P, tot_inter)
return (list(canopy_interception=P.canopy,stem_interception=P.stem,total_interception=tot_inter,inter.precentage=prc.inter))
}
|
/ET_functions/R/interception.R
|
no_license
|
eladante/Conductance
|
R
| false
| false
| 2,566
|
r
|
#' Canopy and Stem Interception Model
#'
#' This model calculates the precipitation necessery to fill canopy and stem storage, based on Gash et al. 1995, and Valente et al. 1977.
#' The defults represent values of Mediterranean Quercus ilex (evergreen oak) ecosystem acording to the reference below.
#'
#' @usage calculate Interception for a day
#' @param P.rate (mm/hr) is the mean precipitation rate.
#' @param Evap (mm) total evporation (Here we try to aplly the Penman-montieth ET.)
#' @param P (mm) total precipitation
#' @param c (-) canopy cover per unit area
#' @param S (mm) canopy storage capacity
#' @param St (mm) trunks storage capacity
#' @param Pt (-) the propotion of rain goes to stemflow
#' @author Elad Dente
#' @return P.canopy (mm) canopy interception, P.stem (mm) stem interception, tot_inter (mm) total interception, and prc.inter (%) precentage of interception in precipitation. .
#' @references
#Valente, F., David, J. S., & Gash, J. H. C. (1997). Modelling interception loss for two sparse eucalypt and pine forests in central Portugal using reformulated Rutter and Gash analytical models. Journal of Hydrology, 190(1), 141-162.
#Limousin, J. M., Rambal, S., Ourcival, J. M., & Joffre, R. (2008). Modelling rainfall interception in a Mediterranean Quercus ilex ecosystem: lesson from a throughfall exclusion experiment. Journal of Hydrology, 357(1), 57-66.
interception<-
function(P.rate,Evap,P,c=0.59,S=1.7,St=0.64,Pt=0.1,dayl=11){
# Internal Variables
# Ec (mm/hr) is the mean evaporation rate.
# Sc (mm) canopy storage capcity per cover unit (S/c)
# Stc (mm) stem storage capcity per cover unit (St/c)
# Ptc (mm) propotion of rain goes to stems per cover unit (Pt/c)
#calculating mean ET rate (mm/hr)
Ec= Evap/dayl
#calculating Sc
Sc= S/c
#calculating Stc
Stc= St/c
#calculating Ptc
Ptc= Pt/c
#calculating P.canopy
P.canopy=-(P.rate/Ec)*Sc*log(1-(Ec/P.rate))
P.stem=ifelse(P.canopy < P,(P.rate/(P.rate-Ec))*(Stc/Ptc),0)
#calculating P.stem
#P.stem=(P.rate/(P.rate-Ec))*(Stc/Ptc)+P.canopy
#total interception
tot_inter=ifelse((P.canopy+P.stem)<P,(P.canopy+P.stem),P)
# % total interception from precipitation
prc.inter=100*tot_inter/P
# tot_inter_Ia=ifelse((tot_inter > P), P, tot_inter)
return (list(canopy_interception=P.canopy,stem_interception=P.stem,total_interception=tot_inter,inter.precentage=prc.inter))
}
|
library(OutbreakTools)
### Name: plotggMST
### Title: Function to plot a minimum spanning tree of the class 'obkData'
### Aliases: plotggMST
### ** Examples
## Not run:
##D ## load data
##D data(HorseFlu)
##D x <- HorseFlu
##D
##D ## plot minimum spanning tree for individual 42
##D plotggMST(x,individualID=42)
##D
##D ## another example data
##D data(ToyOutbreak)
##D x <- ToyOutbreak
##D
##D ## plot minimum spanning tree for gene1
##D plotggMST(x, locus="gene1")
##D
## End(Not run)
|
/data/genthat_extracted_code/OutbreakTools/examples/plotggMST.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 499
|
r
|
library(OutbreakTools)
### Name: plotggMST
### Title: Function to plot a minimum spanning tree of the class 'obkData'
### Aliases: plotggMST
### ** Examples
## Not run:
##D ## load data
##D data(HorseFlu)
##D x <- HorseFlu
##D
##D ## plot minimum spanning tree for individual 42
##D plotggMST(x,individualID=42)
##D
##D ## another example data
##D data(ToyOutbreak)
##D x <- ToyOutbreak
##D
##D ## plot minimum spanning tree for gene1
##D plotggMST(x, locus="gene1")
##D
## End(Not run)
|
#' Ngram Collocations
#'
#' Find a important ngram (2-3) collocations. Wraps \code{\link[quanteda]{collocations}}
#' to provide stopword, min/max characters, and stemming with a generic \code{plot}
#' function.
#'
#' @param x A vector of character strings.
#' @param n The number of rows to include.
#' @param gram.length The length of ngram to generate (2-3).
#' @param stopwords A vector of stopwords to exclude.
#' @param min.char The minimum number of characters a word must be (including
#' apostrophes) for inclusion.
#' @param max.char The maximum number of characters a word must be (including
#' apostrophes) for inclusion.
#' @param order.by The name of the measure column to order by: \code{"frequency"},
#' \code{"G2"}, \code{"X2"}, \code{"pmi"}, \code{"dice"}.
#' @param stem logical. If \code{TRUE} the \code{\link[SnowballC]{wordStem}}
#' is used with \code{language = "porter"} as the default. Note that stopwords
#' will be stemmed as well.
#' @param language The stem language to use (see \code{\link[SnowballC]{wordStem}}).
#' @param \ldots Other arguments passed to \code{\link[quanteda]{collocations}}.
#' @return Retuns a data.frame of terms and frequencies.
#' @importFrom tm stopwords
#' @importFrom data.table := .SD
#' @keywords term word frequency
#' @seealso \code{\link[quanteda]{collocations}}
#' @export
#' @examples
#' x <- presidential_debates_2012[["dialogue"]]
#'
#' ngram_collocations(x)
#' ngram_collocations(x, n = 50)
#' ngram_collocations(x, stopwords = c(tm::stopwords("en"), "american", "governor"))
#' ngram_collocations(x, gram.length = 3)
#' ngram_collocations(x, gram.length = 3, stem = TRUE)
#' ngram_collocations(x, order.by = "dice")
#'
#' plot(ngram_collocations(x))
#' plot(ngram_collocations(x, n = 40))
#' plot(ngram_collocations(x, order.by = "dice"))
#' plot(ngram_collocations(x, gram.length = 3))
ngram_collocations <- function(x, n = 20, gram.length = 2,
stopwords = tm::stopwords("en"), min.char = 4,
max.char = Inf, order.by = "frequency", stem = FALSE, language = "porter", ...) {
## initial checks for: order.by column
orders <- c("frequency", "G2", "X2", "pmi", "dice")
if (!order.by %in% orders) {
stop(sprintf("`order.by` must be one of: %s", paste(paste0("\"", orders, "\""), collapse=", ")))
}
## initial checks for: order.by column
stopifnot(length(gram.length)==1)
## stemming
if (isTRUE(stem)) {
x <- stringi::stri_split_regex(x, "[[:space:]]|(?!')(?=[[:punct:]])")
lens <- sapply(x, length)
x <- SnowballC::wordStem(unlist(x), language = language)
if (! is.null(stopwords)) stopwords <- SnowballC::wordStem(stopwords, language = language)
starts <- c(1, utils::head(cumsum(lens), -1) + 1)
x <- sapply(Map(function(s, e) {x[s:e]}, starts, c(starts[-1] - 1, length(x))), paste, collapse = " ")
}
## use quanteda to calculate collocations
y <- quanteda::collocations(x, size=gram.length, n=200000, method = "all", ...)
y[["keeps"]] <- rowSums(!is.na(y)) > 0
y <- y[which(keeps), ][, keeps := NULL]
## change names to be consistent w/ ngram_collocations
names(y) <- gsub("^count$", "frequency", gsub("(^word)(\\d)", "term\\2", names(y)))
## drop empty columns
keeps <- !sapply(y, function(x) all(x == ""))
keeps <- names(keeps)[keeps]
y <- y[, .SD, .SDcols=keeps]
## filter out that below min and above max nchars
termcols <- grepl("^term\\d+$", colnames(y))
y[["keeps"]] <- rowSums(sapply(y[, .SD, .SDcols=termcols], function(x){
(nchar(x) > min.char - 1) & (nchar(x) < max.char + 1)
})) == sum(termcols)
y <- y[which(keeps), ][, keeps := NULL]
## stopword removal
if (!is.null(stopwords)){
y[["keeps"]] <- rowSums(sapply(y[, .SD, .SDcols=termcols], function(x){
!x %in% stopwords
})) == sum(termcols)
y <- y[which(keeps), ][, keeps := NULL]
}
## grabbing n rows
if (n > nrow(y)) n <- nrow(y)
y <- y[seq_len(n), ]
## ordering by frequency or collocation method
express <- parse(text=paste0("order(-", order.by, ")"))
y <- y[eval(express)]
class(y) <- c('ngram_collocations', class(y))
attributes(y)[["gram.length"]] <- gram.length
y
}
#' Plots a ngram_collocations Object
#'
#' Plots a ngram_collocations object.
#'
#' @param x The \code{ngram_collocations} object.
#' @param drop.redundant.yaxis.text logical. If \code{TRUE} the second y axis text/ticks,
#' in the heat plot are dropped.
#' @param plot logical. If \code{TRUE} the output is plotted.
#' @param \ldots ignored.
#' @return Returns a list of the three \pkg{ggplot2} objects that make the
#' combined plot.
#' @method plot ngram_collocations
#' @export
plot.ngram_collocations <- function(x, drop.redundant.yaxis.text = TRUE,
plot = TRUE, ...){
Grams <- Method <- Scaled <- Measure <- NULL
termcols <- colnames(x)[seq_len(attr(x, "gram.length"))]
data.table::setDT(x)
x[, Grams := Reduce(function(...) paste(..., sep = "-"), .SD[, mget(termcols)])]
x[["Grams"]] <- factor(x[["Grams"]], levels=rev(x[["Grams"]]))
x[, eval(parse(text=paste0("c(", paste(paste0("\"", termcols, "\""), collapse=", "), ")"))) := NULL]
plot1 <- ggplot2::ggplot(x, ggplot2::aes_string(x='Grams', weight='frequency')) +
ggplot2::geom_bar() +
ggplot2::coord_flip() +
ggplot2::ylab("Count") +
ggplot2::scale_y_continuous(expand = c(0, 0), limits = c(0, 1.01 * max(x[["frequency"]]))) +
ggplot2::theme_bw() +
ggplot2::theme(
panel.grid.major.y = ggplot2::element_blank(),
#legend.position="bottom",
legend.title = ggplot2::element_blank(),
panel.border = ggplot2::element_blank(),
axis.line = ggplot2::element_line(color="grey70")
)
dat_long <- data.table::melt(x, id = c("Grams"),
variable.name = "Method", value.name = "Measure")[,
Grams := factor(Grams, levels = levels(x[["Grams"]]))][,
Method := factor(Method, levels = utils::head(colnames(x), -1))][,
Scaled := scale(Measure), by = "Method"]
heat_plot <- ggplot2::ggplot(dat_long,
ggplot2::aes_string(y = "Grams", x = "Method", fill="Scaled")) +
ggplot2::geom_tile() +
ggplot2::scale_fill_gradient2(
high="red",
low="blue",
space = "Lab"
) +
ggplot2::ylab(NULL) +
ggplot2::xlab("Measure") +
ggplot2::theme_bw() +
ggplot2::theme(
panel.grid = ggplot2::element_blank(),
axis.title.x = ggplot2::element_text(size=11),
panel.border = ggplot2::element_rect(color="grey88")
) +
ggplot2::guides(fill = ggplot2::guide_colorbar(barwidth = .5, barheight = 10))
if (isTRUE(drop.redundant.yaxis.text)){
heat_plot <- heat_plot +
ggplot2::theme(
axis.text.y = ggplot2::element_blank(),
axis.ticks.y = ggplot2::element_blank()
)
}
plotout <- gridExtra::arrangeGrob(plot1, heat_plot, ncol=2)
if (isTRUE(plot)) gridExtra::grid.arrange(plotout)
return(invisible(list(bar = plot1, heat = heat_plot)))
}
|
/R/ngram_collocations.R
|
no_license
|
data-steve/termco
|
R
| false
| false
| 7,249
|
r
|
#' Ngram Collocations
#'
#' Find a important ngram (2-3) collocations. Wraps \code{\link[quanteda]{collocations}}
#' to provide stopword, min/max characters, and stemming with a generic \code{plot}
#' function.
#'
#' @param x A vector of character strings.
#' @param n The number of rows to include.
#' @param gram.length The length of ngram to generate (2-3).
#' @param stopwords A vector of stopwords to exclude.
#' @param min.char The minimum number of characters a word must be (including
#' apostrophes) for inclusion.
#' @param max.char The maximum number of characters a word must be (including
#' apostrophes) for inclusion.
#' @param order.by The name of the measure column to order by: \code{"frequency"},
#' \code{"G2"}, \code{"X2"}, \code{"pmi"}, \code{"dice"}.
#' @param stem logical. If \code{TRUE} the \code{\link[SnowballC]{wordStem}}
#' is used with \code{language = "porter"} as the default. Note that stopwords
#' will be stemmed as well.
#' @param language The stem language to use (see \code{\link[SnowballC]{wordStem}}).
#' @param \ldots Other arguments passed to \code{\link[quanteda]{collocations}}.
#' @return Retuns a data.frame of terms and frequencies.
#' @importFrom tm stopwords
#' @importFrom data.table := .SD
#' @keywords term word frequency
#' @seealso \code{\link[quanteda]{collocations}}
#' @export
#' @examples
#' x <- presidential_debates_2012[["dialogue"]]
#'
#' ngram_collocations(x)
#' ngram_collocations(x, n = 50)
#' ngram_collocations(x, stopwords = c(tm::stopwords("en"), "american", "governor"))
#' ngram_collocations(x, gram.length = 3)
#' ngram_collocations(x, gram.length = 3, stem = TRUE)
#' ngram_collocations(x, order.by = "dice")
#'
#' plot(ngram_collocations(x))
#' plot(ngram_collocations(x, n = 40))
#' plot(ngram_collocations(x, order.by = "dice"))
#' plot(ngram_collocations(x, gram.length = 3))
ngram_collocations <- function(x, n = 20, gram.length = 2,
stopwords = tm::stopwords("en"), min.char = 4,
max.char = Inf, order.by = "frequency", stem = FALSE, language = "porter", ...) {
## initial checks for: order.by column
orders <- c("frequency", "G2", "X2", "pmi", "dice")
if (!order.by %in% orders) {
stop(sprintf("`order.by` must be one of: %s", paste(paste0("\"", orders, "\""), collapse=", ")))
}
## initial checks for: order.by column
stopifnot(length(gram.length)==1)
## stemming
if (isTRUE(stem)) {
x <- stringi::stri_split_regex(x, "[[:space:]]|(?!')(?=[[:punct:]])")
lens <- sapply(x, length)
x <- SnowballC::wordStem(unlist(x), language = language)
if (! is.null(stopwords)) stopwords <- SnowballC::wordStem(stopwords, language = language)
starts <- c(1, utils::head(cumsum(lens), -1) + 1)
x <- sapply(Map(function(s, e) {x[s:e]}, starts, c(starts[-1] - 1, length(x))), paste, collapse = " ")
}
## use quanteda to calculate collocations
y <- quanteda::collocations(x, size=gram.length, n=200000, method = "all", ...)
y[["keeps"]] <- rowSums(!is.na(y)) > 0
y <- y[which(keeps), ][, keeps := NULL]
## change names to be consistent w/ ngram_collocations
names(y) <- gsub("^count$", "frequency", gsub("(^word)(\\d)", "term\\2", names(y)))
## drop empty columns
keeps <- !sapply(y, function(x) all(x == ""))
keeps <- names(keeps)[keeps]
y <- y[, .SD, .SDcols=keeps]
## filter out that below min and above max nchars
termcols <- grepl("^term\\d+$", colnames(y))
y[["keeps"]] <- rowSums(sapply(y[, .SD, .SDcols=termcols], function(x){
(nchar(x) > min.char - 1) & (nchar(x) < max.char + 1)
})) == sum(termcols)
y <- y[which(keeps), ][, keeps := NULL]
## stopword removal
if (!is.null(stopwords)){
y[["keeps"]] <- rowSums(sapply(y[, .SD, .SDcols=termcols], function(x){
!x %in% stopwords
})) == sum(termcols)
y <- y[which(keeps), ][, keeps := NULL]
}
## grabbing n rows
if (n > nrow(y)) n <- nrow(y)
y <- y[seq_len(n), ]
## ordering by frequency or collocation method
express <- parse(text=paste0("order(-", order.by, ")"))
y <- y[eval(express)]
class(y) <- c('ngram_collocations', class(y))
attributes(y)[["gram.length"]] <- gram.length
y
}
#' Plots a ngram_collocations Object
#'
#' Plots a ngram_collocations object.
#'
#' @param x The \code{ngram_collocations} object.
#' @param drop.redundant.yaxis.text logical. If \code{TRUE} the second y axis text/ticks,
#' in the heat plot are dropped.
#' @param plot logical. If \code{TRUE} the output is plotted.
#' @param \ldots ignored.
#' @return Returns a list of the three \pkg{ggplot2} objects that make the
#' combined plot.
#' @method plot ngram_collocations
#' @export
plot.ngram_collocations <- function(x, drop.redundant.yaxis.text = TRUE,
plot = TRUE, ...){
Grams <- Method <- Scaled <- Measure <- NULL
termcols <- colnames(x)[seq_len(attr(x, "gram.length"))]
data.table::setDT(x)
x[, Grams := Reduce(function(...) paste(..., sep = "-"), .SD[, mget(termcols)])]
x[["Grams"]] <- factor(x[["Grams"]], levels=rev(x[["Grams"]]))
x[, eval(parse(text=paste0("c(", paste(paste0("\"", termcols, "\""), collapse=", "), ")"))) := NULL]
plot1 <- ggplot2::ggplot(x, ggplot2::aes_string(x='Grams', weight='frequency')) +
ggplot2::geom_bar() +
ggplot2::coord_flip() +
ggplot2::ylab("Count") +
ggplot2::scale_y_continuous(expand = c(0, 0), limits = c(0, 1.01 * max(x[["frequency"]]))) +
ggplot2::theme_bw() +
ggplot2::theme(
panel.grid.major.y = ggplot2::element_blank(),
#legend.position="bottom",
legend.title = ggplot2::element_blank(),
panel.border = ggplot2::element_blank(),
axis.line = ggplot2::element_line(color="grey70")
)
dat_long <- data.table::melt(x, id = c("Grams"),
variable.name = "Method", value.name = "Measure")[,
Grams := factor(Grams, levels = levels(x[["Grams"]]))][,
Method := factor(Method, levels = utils::head(colnames(x), -1))][,
Scaled := scale(Measure), by = "Method"]
heat_plot <- ggplot2::ggplot(dat_long,
ggplot2::aes_string(y = "Grams", x = "Method", fill="Scaled")) +
ggplot2::geom_tile() +
ggplot2::scale_fill_gradient2(
high="red",
low="blue",
space = "Lab"
) +
ggplot2::ylab(NULL) +
ggplot2::xlab("Measure") +
ggplot2::theme_bw() +
ggplot2::theme(
panel.grid = ggplot2::element_blank(),
axis.title.x = ggplot2::element_text(size=11),
panel.border = ggplot2::element_rect(color="grey88")
) +
ggplot2::guides(fill = ggplot2::guide_colorbar(barwidth = .5, barheight = 10))
if (isTRUE(drop.redundant.yaxis.text)){
heat_plot <- heat_plot +
ggplot2::theme(
axis.text.y = ggplot2::element_blank(),
axis.ticks.y = ggplot2::element_blank()
)
}
plotout <- gridExtra::arrangeGrob(plot1, heat_plot, ncol=2)
if (isTRUE(plot)) gridExtra::grid.arrange(plotout)
return(invisible(list(bar = plot1, heat = heat_plot)))
}
|
#budburst
expgdd_bbd$ecosystem<-"forest"
expgdd_bbd$ecosystem[expgdd_bbd$site2=="exp01"]<-"grassland"
expgdd_bbd$sp.eco<-paste(expgdd_bbd$sp.name,expgdd_bbd$ecosystem)
bbecos<- expgdd_bbd %>% # start with the data frame
distinct(sp.eco, .keep_all = TRUE) %>% # establishing grouping variables
dplyr::select(sp.name,genus.species,site2,ecosystem)
bbecos<-bbecos[order(bbecos$genus.species),]
colnames(bbecos)[2]<-"spnum"
dim(bbecos)
#leafout
expgdd_lod$ecosystem<-"forest"
expgdd_lod$ecosystem[expgdd_lod$site2=="exp01"]<-"grassland"
expgdd_lod$sp.eco<-paste(expgdd_lod$sp.name,expgdd_lod$ecosystem)
loecos<- expgdd_lod %>% # start with the data frame
distinct(sp.eco, .keep_all = TRUE) %>% # establishing grouping variables
dplyr::select(sp.name,genus.species,site2,ecosystem)
loecos<-loecos[order(loecos$genus.species),]
colnames(loecos)[2]<-"spnum"
dim(loecos)
#flowering
expgdd_ffd$ecosystem<-"forest"
expgdd_ffd$ecosystem[expgdd_ffd$site2=="exp01"]<-"grassland"
expgdd_ffd$ecosystem[expgdd_ffd$site2=="exp12"]<-"grassland"
expgdd_ffd$sp.eco<-paste(expgdd_ffd$sp.name,expgdd_ffd$ecosystem)
ffecos<- expgdd_ffd %>% # start with the data frame
distinct(sp.eco, .keep_all = TRUE) %>% # establishing grouping variables
dplyr::select(sp.name,genus.species,site2,ecosystem)
ffecos<-ffecos[order(ffecos$genus.species),]
colnames(ffecos)[2]<-"spnum"
|
/Analyses/source/get_ecosystem.R
|
no_license
|
AileneKane/radcliffe
|
R
| false
| false
| 1,360
|
r
|
#budburst
expgdd_bbd$ecosystem<-"forest"
expgdd_bbd$ecosystem[expgdd_bbd$site2=="exp01"]<-"grassland"
expgdd_bbd$sp.eco<-paste(expgdd_bbd$sp.name,expgdd_bbd$ecosystem)
bbecos<- expgdd_bbd %>% # start with the data frame
distinct(sp.eco, .keep_all = TRUE) %>% # establishing grouping variables
dplyr::select(sp.name,genus.species,site2,ecosystem)
bbecos<-bbecos[order(bbecos$genus.species),]
colnames(bbecos)[2]<-"spnum"
dim(bbecos)
#leafout
expgdd_lod$ecosystem<-"forest"
expgdd_lod$ecosystem[expgdd_lod$site2=="exp01"]<-"grassland"
expgdd_lod$sp.eco<-paste(expgdd_lod$sp.name,expgdd_lod$ecosystem)
loecos<- expgdd_lod %>% # start with the data frame
distinct(sp.eco, .keep_all = TRUE) %>% # establishing grouping variables
dplyr::select(sp.name,genus.species,site2,ecosystem)
loecos<-loecos[order(loecos$genus.species),]
colnames(loecos)[2]<-"spnum"
dim(loecos)
#flowering
expgdd_ffd$ecosystem<-"forest"
expgdd_ffd$ecosystem[expgdd_ffd$site2=="exp01"]<-"grassland"
expgdd_ffd$ecosystem[expgdd_ffd$site2=="exp12"]<-"grassland"
expgdd_ffd$sp.eco<-paste(expgdd_ffd$sp.name,expgdd_ffd$ecosystem)
ffecos<- expgdd_ffd %>% # start with the data frame
distinct(sp.eco, .keep_all = TRUE) %>% # establishing grouping variables
dplyr::select(sp.name,genus.species,site2,ecosystem)
ffecos<-ffecos[order(ffecos$genus.species),]
colnames(ffecos)[2]<-"spnum"
|
\name{GLognormal}
\alias{GLognormal}
\docType{data}
\title{
%% ~~ data name/kind ... ~~
Class definition to control lognormal distribution
}
\description{
%% ~~ A concise (1-5 lines) description of the dataset. ~~
}
\usage{lnorm<-GLognormal$new(mu=350.25,sigmma=105.075)}
\format{
SetParam(lambda=lambda,zeta=zeta)
Setting lognormal parameters and calculating mu and sigm. lambda: mean of lnorm, zeta: std. of lnorm
Param(): Calculating lambda and mu
Eq(x): Calculating equivalent normal distribution for adujusting point x
}
\details{
%% ~~ If necessary, more details than the __description__ above ~~
}
\source{
%% ~~ reference to a publication or URL from which the data were obtained ~~
}
\references{
%% ~~ possibly secondary sources and usages ~~
}
\examples{
#fundamental usage
lnorm<-GLognormal$new(mu=350.25,sigmma=105.075)
res<-lnorm$Param()
cat("lambda=",res[2],",zeta=",res[1])
# lambda= 5.815558 ,zeta= 0.2935604
#example: giving lambda and eta parameters and returning mu,sigm
lambda<- 5.815558 ;zeta<- 0.2935604
res2<-lnorm$SetParam(lambda=lambda,zeta=zeta)
cat("mu=",res2[1],",sigm=",res2[2])
# mu= 350.2499 ,sigm= 105.075
#Calculating equivalent norm distribution
xval<-0.5
res3<-lnorm$Eq(X=xval)
cat("mu_eq=",res3[1],",sig_eq=",res3[2])
# mu_eq= 3.754353 ,sig_eq= 0.1467802
}
\keyword{datasets}
|
/man/GLognormal.Rd
|
no_license
|
ShinsukeSakai0321/LimitState
|
R
| false
| false
| 1,331
|
rd
|
\name{GLognormal}
\alias{GLognormal}
\docType{data}
\title{
%% ~~ data name/kind ... ~~
Class definition to control lognormal distribution
}
\description{
%% ~~ A concise (1-5 lines) description of the dataset. ~~
}
\usage{lnorm<-GLognormal$new(mu=350.25,sigmma=105.075)}
\format{
SetParam(lambda=lambda,zeta=zeta)
Setting lognormal parameters and calculating mu and sigm. lambda: mean of lnorm, zeta: std. of lnorm
Param(): Calculating lambda and mu
Eq(x): Calculating equivalent normal distribution for adujusting point x
}
\details{
%% ~~ If necessary, more details than the __description__ above ~~
}
\source{
%% ~~ reference to a publication or URL from which the data were obtained ~~
}
\references{
%% ~~ possibly secondary sources and usages ~~
}
\examples{
#fundamental usage
lnorm<-GLognormal$new(mu=350.25,sigmma=105.075)
res<-lnorm$Param()
cat("lambda=",res[2],",zeta=",res[1])
# lambda= 5.815558 ,zeta= 0.2935604
#example: giving lambda and eta parameters and returning mu,sigm
lambda<- 5.815558 ;zeta<- 0.2935604
res2<-lnorm$SetParam(lambda=lambda,zeta=zeta)
cat("mu=",res2[1],",sigm=",res2[2])
# mu= 350.2499 ,sigm= 105.075
#Calculating equivalent norm distribution
xval<-0.5
res3<-lnorm$Eq(X=xval)
cat("mu_eq=",res3[1],",sig_eq=",res3[2])
# mu_eq= 3.754353 ,sig_eq= 0.1467802
}
\keyword{datasets}
|
#' Map your prepared data with SurvMapper2
#'
#' Later version of SurvMapper. Creates surveillance chloropleth maps for data prepared with PrepMap. Note that due to the use of grid for legend and the small inlets for non-visible
#' countries, mapping is not superswift and elements appear one by one to the graph.
#' Currently uses 'Tahoma' font, but needs care with the registration of fonts with extrafont, and not perhaps ideal. Map function best used with get_GEO_data and PrepMap, i.e. geographical data predefined
#' in a certain way.
#'
#' @param data Your spatial data that you want to map, prepared to work with ggplot2, currently only chloropleth available
#' @param fills Your column/variable(s) that your want to map. Preferably a factor in defined order.
#' @param long Your longitude variable, defaults to 'long'
#' @param lat Your latitude variable, defaults to 'lat'
#' @param id Your spatial id variable, defaults to 'id'
#' @param GEO_ID Your spatial id variable (e.g. country code), defaults to 'GEO_ID'
#' @param bground Your variable with 1/0 to classify the grey background (0's included only in lightgrey), defaults to 'isEEA'
#' @param Legend_title Legend title(s). More than one if more than one fills. Use escape new line to split the legend to several rows.
#' @param col_scale Colour scale, use 'green', 'red', 'blue' or 'qualitative'. Note that the last category is always "No data" grey.
#' More than one if more than one fills.
#' @param fill_levels The order to map the levels in fills; only works with one fills variable.
#' @param reverse_colours Reverse the order of the colour scale. Note that the last category/ies are always "No data" and "Not included" grey (the latter can be omitted).
#' @param not_included Label for the background colour category for the legend, defaults to "Not included". Use NULL to omit the "Not included" category from the legend.
#' @param add_points Add point data on the map, defaults to FALSE
#' @param pointdata If adding the point data, give the points.
#' @param pointsize If adding points, give the point size
#' @param pointshape If adding points, give the symbol for points (pch)
#' @param cex_factor Cex factor for many things, changes sizes depending on the end resolution
#' @keywords map
#' @author Tommi Karki
#' @export
#' @examples
#'
#' # load the included dummy data
#' load(system.file("extdata", "dummy_data.rds", package = "SurvMaps"))
#' # Get the EU/EEA and candidate country SpatialPolygonsDataframe, including a variable "isEEA"
#' plg_map <- get_GEO_data(layer = 1, STAT_LEVL = c(0), FIELDS = c("isEEA", "GEO_ID"))
#'
#' # Prepare the data for SurvMapper with PrepMap
#' mymap <- PrepMap(data = dummy_data , geo = plg_map)
#'
#' # The map is correctly aligned only for selected width/height, so you can plot into a predefined device
#' dev.new(width=11.8,height=8, noRStudioGD = TRUE)
#'
#' # Simple chloropleth map
#' SurvMapper2(mymap, fills ="Dummy_status", Legend_title = "Testing this", col_scale = "red")
#'
#' # Simple chloropleth map with other options
#' SurvMapper2(mymap, fills ="Dummy_status", Legend_title = "Testing this", col_scale = "red")
#'
#' # Chloropleth map with some additional options
#' SurvMapper2(mymap, fills ="Dummy_status", Legend_title = "Testing this",
#' fill_levels = c("Dummy4",
#' "Dummy3",
#' "Dummy2",
#' "Dummy1",
#' "No data"),
#' col_scale = "hotcold", reverse_colours = TRUE, not_included = NULL)
#'
#' # Note that you can map at once several columns, but all options are not yet available for this scenario -
#' # e.g. level order is good to be predefined if plotting several columns. And depends on graphical device (e.g. recording)
#' SurvMapper(mymap, fills = c("Dummy_status", "Dummy2"), Legend_title = c("Testing this", "And also this"),
#' col_scale = c("blue", "qualitative"))
SurvMapper2 <- function (data, fills, long = "long", lat = "lat", id = "id",
GEO_ID = "GEO_ID", bground = "isEEA", Legend_title, col_scale,
fill_levels = NULL, reverse_colours = FALSE, not_included = "Not included",
add_points = FALSE, pointdata = NULL, pointsize = NULL, pointshape = "*",
cex_factor = 1)
{
if(add_points == TRUE & is.null(pointdata)){
stop("For adding points to the chloropleth map, please include geometries also for the points!")
}
windowsFonts(Tahoma = windowsFont("Tahoma"))
require(EcdcColors)
# for (i in fills) {
fill <- fills
Leg_title <- Legend_title
colour_scale <- col_scale
if (is.null(fill_levels)) {
data[[fill]] <- factor(data[[fill]])
pointdata[[fill]] <- factor(pointdata[[fill]])
}
else {
data[[fill]] <- factor(data[[fill]], levels = fill_levels)
pointdata[[fill]] <- factor(pointdata[[fill]], levels = fill_levels)
}
if (colour_scale == "green") {
map_cols <- SurvColors(col_scale = "green", n = length(levels(data[[fill]])) -
1)
map_cols[length(map_cols) + 1] <- SurvColors("grey",
grey_shade = "mediumlight")
}
else if (colour_scale == "blue") {
map_cols <- SurvColors(col_scale = "blue", n = length(levels(data[[fill]])) -
1)
map_cols[length(map_cols) + 1] <- SurvColors("grey",
grey_shade = "mediumlight")
}
else if (colour_scale == "red") {
map_cols <- SurvColors(col_scale = "red", n = length(levels(data[[fill]])) -
1)
map_cols[length(map_cols) + 1] <- SurvColors("grey",
grey_shade = "mediumlight")
}
else if (colour_scale == "qualitative") {
map_cols <- SurvColors(col_scale = "qualitative",
n = length(levels(data[[fill]]))-1)
map_cols[length(map_cols) + 1] <- SurvColors("grey",
grey_shade = "mediumlight")
}
else if (colour_scale == "hotcold") {
map_cols <- SurvColors(col_scale = "hotcold", n = length(levels(data[[fill]]))-
1)
map_cols[length(map_cols) + 1] <- SurvColors("grey",
grey_shade = "mediumlight")
}
else {
map_cols <- map_cols
map_cols[length(map_cols) + 1] <- SurvColors("grey",
grey_shade = "mediumlight")
}
if (reverse_colours == TRUE) {
map_cols[1:length(map_cols) - 1] <- rev(map_cols[1:length(map_cols) -
1])
}
p1 <- ggplot(data = data, aes_string(x = long, y = lat,
fill = fill)) + geom_map(data = data, map = data,
aes_string(map_id = id), color = SurvColors("grey",
grey_shade = "dark"), size = 0.2) + theme_map() +
coord_map("azequalarea", xlim = c(-24, 44), ylim = c(34,
70), orientation = c(52, 10, 0)) + theme(legend.position = "none") +
geom_map(data = data[data[[bground]] == 0, ], map = data[data[[bground]] ==
0, ], aes_string(map_id = id), fill = SurvColors("grey",
grey_shade = "light"), color = SurvColors("grey",
grey_shade = "dark"), size = 0.2)
if (length(levels(data[[fill]])) < 9) {
p1 <- p1 + scale_fill_manual(values = map_cols[1:length(levels(data[[fill]]))])
}else {
stop("Too many categories for the map, please re-check and rescale!")
}
# This needs to somehow match the levels that exist for point with the levels for the polygons, in this case -
# not completely working yet! Cutoff around 90 for rgb works nice though.
if(add_points == TRUE){
p1 <- p1 + geom_point(data=pointdata, aes_string(x="coords.x1", y="coords.x2", col = fills),size = 8*cex_factor, shape = pointshape)+
scale_color_manual(values = unlist(lapply(map_cols[1:length(levels(data[[fill]]))], function(x) ifelse( mean(col2rgb(x)) > 100, "black", "white"))))
}
if(!is.null(not_included)){
map_cols[length(map_cols) + 1] <- SurvColors("grey",
grey_shade = "light")
nfills <- c(levels(data[[fill]]), not_included)
}else{
nfills <- c(levels(data[[fill]]))
}
xpos <- 0.01
xtextpos <- 0.056
textcex <- 2*cex_factor
lwd <- 1*cex_factor
grid.newpage()
v1 <- viewport(width = 1, height = 1)
print(p1, vp = v1)
grid.rect(width = 0.04, height = 0.025, x = xpos + 0.002,
y = 0.9-0.03, just = "left", gp = gpar(fill = map_cols[1],
col = SurvColors("grey", grey_shade = "dark"),
lwd = lwd))
grid.rect(width = 0.04, height = 0.025, x = xpos + 0.002,
y = 0.865-0.03, just = "left", gp = gpar(fill = map_cols[2],
col = SurvColors("grey", grey_shade = "dark"),
lwd = lwd))
grid.text(Leg_title, x = xpos + 0.002, y = 0.93-0.03, just = c("left", "bottom"),
vp = v1, gp = gpar(fontsize = 9, fontfamily = "Tahoma",
cex = textcex))
grid.text(paste(nfills[1]), x = xtextpos,
y = 0.9-0.03, just = "left", vp = v1, gp = gpar(fontsize = 9,
fontfamily = "Tahoma", cex = textcex))
grid.text(paste(nfills[2]), x = xtextpos,
y = 0.865-0.03, just = "left", vp = v1, gp = gpar(fontsize = 9,
fontfamily = "Tahoma", cex = textcex))
if (length(nfills) >= 3) {
grid.rect(width = 0.04, height = 0.025, x = xpos +
0.002, y = 0.83-0.03, just = "left", gp = gpar(fill = map_cols[3],
col = SurvColors("grey", grey_shade = "dark"),
lwd = lwd))
grid.text(paste(nfills[3]), x = xtextpos,
y = 0.83-0.03, just = "left", vp = v1, gp = gpar(fontsize = 9,
fontfamily = "Tahoma", cex = textcex))
}
if (length(nfills) >= 4) {
grid.rect(width = 0.04, height = 0.025, x = xpos +
0.002, y = 0.795-0.03, just = "left", gp = gpar(fill = map_cols[4],
col = SurvColors("grey", grey_shade = "dark"),
lwd = lwd))
grid.text(paste(nfills[4]), x = xtextpos,
y = 0.795-0.03, just = "left", vp = v1, gp = gpar(fontsize = 9,
fontfamily = "Tahoma", cex = textcex))
}
if (length(nfills) >= 5) {
grid.rect(width = 0.04, height = 0.025, x = xpos +
0.002, y = 0.76-0.03, just = "left", gp = gpar(fill = map_cols[5],
col = SurvColors("grey", grey_shade = "dark"),
lwd = lwd))
grid.text(paste(nfills[5]), x = xtextpos,
y = 0.76-0.03, just = "left", vp = v1, gp = gpar(fontsize = 9,
fontfamily = "Tahoma", cex = textcex))
}
if (length(nfills) >= 6) {
grid.rect(width = 0.04, height = 0.025, x = xpos +
0.002, y = 0.725-0.03, just = "left", gp = gpar(fill = map_cols[6],
col = SurvColors("grey", grey_shade = "dark"),
lwd = lwd))
grid.text(paste(nfills[6]), x = xtextpos,
y = 0.725-0.03, just = "left", vp = v1, gp = gpar(fontsize = 9,
fontfamily = "Tahoma", cex = textcex))
}
if (length(nfills) >= 7) {
grid.rect(width = 0.04, height = 0.025, x = xpos +
0.002, y = 0.69-0.03, just = "left", gp = gpar(fill = map_cols[7],
col = SurvColors("grey", grey_shade = "dark"),
lwd = lwd))
grid.text(paste(nfills[7]), x = xtextpos,
y = 0.69-0.03, just = "left", vp = v1, gp = gpar(fontsize = 9,
fontfamily = "Tahoma", cex = textcex))
}
if (length(nfills) >= 8) {
grid.rect(width = 0.04, height = 0.025, x = xpos +
0.002, y = 0.655-0.03, just = "left", gp = gpar(fill = map_cols[8],
col = SurvColors("grey", grey_shade = "dark"),
lwd = lwd))
grid.text(paste(nfills[8]), x = xtextpos,
y = 0.655-0.03, just = "left", vp = v1, gp = gpar(fontsize = 9,
fontfamily = "Tahoma", cex = textcex))
}
if (length(nfills) >= 9) {
grid.rect(width = 0.04, height = 0.025, x = xpos +
0.002, y = 0.620-0.03, just = "left", gp = gpar(fill = map_cols[9],
col = SurvColors("grey", grey_shade = "dark"),
lwd = lwd))
grid.text(paste(nfills[9]), x = xtextpos,
y = 0.620-0.03, just = "left", vp = v1, gp = gpar(fontsize = 9,
fontfamily = "Tahoma", cex = textcex))
}
grid.rect(width = 0.04, height = 0.025, x = xpos + 0.002,
y = 0.55-0.03, just = "left", gp = gpar(fill = map_cols[levels(data[[fill]]) ==
unique(data[[fill]][data[[GEO_ID]] == "LU"])],
col = SurvColors("grey", grey_shade = "dark"),
lwd = lwd))
if(add_points == TRUE){
grid.text("*", x = xtextpos-0.03, y = 0.55-0.0375, just = "left",
vp = v1, gp = gpar(fontsize = 12, fontfamily = "sans",
cex = textcex, col =
ifelse(mean(col2rgb(map_cols[levels(data[[fill]]) ==
unique(data[[fill]][data[[GEO_ID]] == "LU"])])) > 100,
"black", "white")))
}
grid.text("Luxembourg", x = xtextpos, y = 0.55-0.03, just = "left",
vp = v1, gp = gpar(fontsize = 9, fontfamily = "Tahoma",
cex = textcex))
grid.rect(width = 0.04, height = 0.025, x = xpos + 0.002,
y = 0.515-0.03, just = "left", gp = gpar(fill = map_cols[levels(data[[fill]]) ==
unique(data[[fill]][data[[GEO_ID]] == "MT"])],
col = SurvColors("grey", grey_shade = "dark"),
lwd = lwd))
if(add_points == TRUE){
grid.text("*", x = xtextpos-0.03, y = 0.515-0.0375, just = "left",
vp = v1, gp = gpar(fontsize = 12, fontfamily = "sans",
cex = textcex, col =
ifelse(mean(col2rgb(map_cols[levels(data[[fill]]) ==
unique(data[[fill]][data[[GEO_ID]] == "MT"])])) > 100,
"black", "white")))
}
grid.text("Malta", x = xtextpos, y = 0.515-0.03, just = "left",
vp = v1, gp = gpar(fontsize = 9, fontfamily = "Tahoma",
cex = textcex))
# }
}
|
/R/SurvMapper2.R
|
no_license
|
TommiKarki/SurvMaps
|
R
| false
| false
| 16,920
|
r
|
#' Map your prepared data with SurvMapper2
#'
#' Later version of SurvMapper. Creates surveillance chloropleth maps for data prepared with PrepMap. Note that due to the use of grid for legend and the small inlets for non-visible
#' countries, mapping is not superswift and elements appear one by one to the graph.
#' Currently uses 'Tahoma' font, but needs care with the registration of fonts with extrafont, and not perhaps ideal. Map function best used with get_GEO_data and PrepMap, i.e. geographical data predefined
#' in a certain way.
#'
#' @param data Your spatial data that you want to map, prepared to work with ggplot2, currently only chloropleth available
#' @param fills Your column/variable(s) that your want to map. Preferably a factor in defined order.
#' @param long Your longitude variable, defaults to 'long'
#' @param lat Your latitude variable, defaults to 'lat'
#' @param id Your spatial id variable, defaults to 'id'
#' @param GEO_ID Your spatial id variable (e.g. country code), defaults to 'GEO_ID'
#' @param bground Your variable with 1/0 to classify the grey background (0's included only in lightgrey), defaults to 'isEEA'
#' @param Legend_title Legend title(s). More than one if more than one fills. Use escape new line to split the legend to several rows.
#' @param col_scale Colour scale, use 'green', 'red', 'blue' or 'qualitative'. Note that the last category is always "No data" grey.
#' More than one if more than one fills.
#' @param fill_levels The order to map the levels in fills; only works with one fills variable.
#' @param reverse_colours Reverse the order of the colour scale. Note that the last category/ies are always "No data" and "Not included" grey (the latter can be omitted).
#' @param not_included Label for the background colour category for the legend, defaults to "Not included". Use NULL to omit the "Not included" category from the legend.
#' @param add_points Add point data on the map, defaults to FALSE
#' @param pointdata If adding the point data, give the points.
#' @param pointsize If adding points, give the point size
#' @param pointshape If adding points, give the symbol for points (pch)
#' @param cex_factor Cex factor for many things, changes sizes depending on the end resolution
#' @keywords map
#' @author Tommi Karki
#' @export
#' @examples
#'
#' # load the included dummy data
#' load(system.file("extdata", "dummy_data.rds", package = "SurvMaps"))
#' # Get the EU/EEA and candidate country SpatialPolygonsDataframe, including a variable "isEEA"
#' plg_map <- get_GEO_data(layer = 1, STAT_LEVL = c(0), FIELDS = c("isEEA", "GEO_ID"))
#'
#' # Prepare the data for SurvMapper with PrepMap
#' mymap <- PrepMap(data = dummy_data , geo = plg_map)
#'
#' # The map is correctly aligned only for selected width/height, so you can plot into a predefined device
#' dev.new(width=11.8,height=8, noRStudioGD = TRUE)
#'
#' # Simple chloropleth map
#' SurvMapper2(mymap, fills ="Dummy_status", Legend_title = "Testing this", col_scale = "red")
#'
#' # Simple chloropleth map with other options
#' SurvMapper2(mymap, fills ="Dummy_status", Legend_title = "Testing this", col_scale = "red")
#'
#' # Chloropleth map with some additional options
#' SurvMapper2(mymap, fills ="Dummy_status", Legend_title = "Testing this",
#' fill_levels = c("Dummy4",
#' "Dummy3",
#' "Dummy2",
#' "Dummy1",
#' "No data"),
#' col_scale = "hotcold", reverse_colours = TRUE, not_included = NULL)
#'
#' # Note that you can map at once several columns, but all options are not yet available for this scenario -
#' # e.g. level order is good to be predefined if plotting several columns. And depends on graphical device (e.g. recording)
#' SurvMapper(mymap, fills = c("Dummy_status", "Dummy2"), Legend_title = c("Testing this", "And also this"),
#' col_scale = c("blue", "qualitative"))
SurvMapper2 <- function (data, fills, long = "long", lat = "lat", id = "id",
GEO_ID = "GEO_ID", bground = "isEEA", Legend_title, col_scale,
fill_levels = NULL, reverse_colours = FALSE, not_included = "Not included",
add_points = FALSE, pointdata = NULL, pointsize = NULL, pointshape = "*",
cex_factor = 1)
{
if(add_points == TRUE & is.null(pointdata)){
stop("For adding points to the chloropleth map, please include geometries also for the points!")
}
windowsFonts(Tahoma = windowsFont("Tahoma"))
require(EcdcColors)
# for (i in fills) {
fill <- fills
Leg_title <- Legend_title
colour_scale <- col_scale
if (is.null(fill_levels)) {
data[[fill]] <- factor(data[[fill]])
pointdata[[fill]] <- factor(pointdata[[fill]])
}
else {
data[[fill]] <- factor(data[[fill]], levels = fill_levels)
pointdata[[fill]] <- factor(pointdata[[fill]], levels = fill_levels)
}
if (colour_scale == "green") {
map_cols <- SurvColors(col_scale = "green", n = length(levels(data[[fill]])) -
1)
map_cols[length(map_cols) + 1] <- SurvColors("grey",
grey_shade = "mediumlight")
}
else if (colour_scale == "blue") {
map_cols <- SurvColors(col_scale = "blue", n = length(levels(data[[fill]])) -
1)
map_cols[length(map_cols) + 1] <- SurvColors("grey",
grey_shade = "mediumlight")
}
else if (colour_scale == "red") {
map_cols <- SurvColors(col_scale = "red", n = length(levels(data[[fill]])) -
1)
map_cols[length(map_cols) + 1] <- SurvColors("grey",
grey_shade = "mediumlight")
}
else if (colour_scale == "qualitative") {
map_cols <- SurvColors(col_scale = "qualitative",
n = length(levels(data[[fill]]))-1)
map_cols[length(map_cols) + 1] <- SurvColors("grey",
grey_shade = "mediumlight")
}
else if (colour_scale == "hotcold") {
map_cols <- SurvColors(col_scale = "hotcold", n = length(levels(data[[fill]]))-
1)
map_cols[length(map_cols) + 1] <- SurvColors("grey",
grey_shade = "mediumlight")
}
else {
map_cols <- map_cols
map_cols[length(map_cols) + 1] <- SurvColors("grey",
grey_shade = "mediumlight")
}
if (reverse_colours == TRUE) {
map_cols[1:length(map_cols) - 1] <- rev(map_cols[1:length(map_cols) -
1])
}
p1 <- ggplot(data = data, aes_string(x = long, y = lat,
fill = fill)) + geom_map(data = data, map = data,
aes_string(map_id = id), color = SurvColors("grey",
grey_shade = "dark"), size = 0.2) + theme_map() +
coord_map("azequalarea", xlim = c(-24, 44), ylim = c(34,
70), orientation = c(52, 10, 0)) + theme(legend.position = "none") +
geom_map(data = data[data[[bground]] == 0, ], map = data[data[[bground]] ==
0, ], aes_string(map_id = id), fill = SurvColors("grey",
grey_shade = "light"), color = SurvColors("grey",
grey_shade = "dark"), size = 0.2)
if (length(levels(data[[fill]])) < 9) {
p1 <- p1 + scale_fill_manual(values = map_cols[1:length(levels(data[[fill]]))])
}else {
stop("Too many categories for the map, please re-check and rescale!")
}
# This needs to somehow match the levels that exist for point with the levels for the polygons, in this case -
# not completely working yet! Cutoff around 90 for rgb works nice though.
if(add_points == TRUE){
p1 <- p1 + geom_point(data=pointdata, aes_string(x="coords.x1", y="coords.x2", col = fills),size = 8*cex_factor, shape = pointshape)+
scale_color_manual(values = unlist(lapply(map_cols[1:length(levels(data[[fill]]))], function(x) ifelse( mean(col2rgb(x)) > 100, "black", "white"))))
}
if(!is.null(not_included)){
map_cols[length(map_cols) + 1] <- SurvColors("grey",
grey_shade = "light")
nfills <- c(levels(data[[fill]]), not_included)
}else{
nfills <- c(levels(data[[fill]]))
}
xpos <- 0.01
xtextpos <- 0.056
textcex <- 2*cex_factor
lwd <- 1*cex_factor
grid.newpage()
v1 <- viewport(width = 1, height = 1)
print(p1, vp = v1)
grid.rect(width = 0.04, height = 0.025, x = xpos + 0.002,
y = 0.9-0.03, just = "left", gp = gpar(fill = map_cols[1],
col = SurvColors("grey", grey_shade = "dark"),
lwd = lwd))
grid.rect(width = 0.04, height = 0.025, x = xpos + 0.002,
y = 0.865-0.03, just = "left", gp = gpar(fill = map_cols[2],
col = SurvColors("grey", grey_shade = "dark"),
lwd = lwd))
grid.text(Leg_title, x = xpos + 0.002, y = 0.93-0.03, just = c("left", "bottom"),
vp = v1, gp = gpar(fontsize = 9, fontfamily = "Tahoma",
cex = textcex))
grid.text(paste(nfills[1]), x = xtextpos,
y = 0.9-0.03, just = "left", vp = v1, gp = gpar(fontsize = 9,
fontfamily = "Tahoma", cex = textcex))
grid.text(paste(nfills[2]), x = xtextpos,
y = 0.865-0.03, just = "left", vp = v1, gp = gpar(fontsize = 9,
fontfamily = "Tahoma", cex = textcex))
if (length(nfills) >= 3) {
grid.rect(width = 0.04, height = 0.025, x = xpos +
0.002, y = 0.83-0.03, just = "left", gp = gpar(fill = map_cols[3],
col = SurvColors("grey", grey_shade = "dark"),
lwd = lwd))
grid.text(paste(nfills[3]), x = xtextpos,
y = 0.83-0.03, just = "left", vp = v1, gp = gpar(fontsize = 9,
fontfamily = "Tahoma", cex = textcex))
}
if (length(nfills) >= 4) {
grid.rect(width = 0.04, height = 0.025, x = xpos +
0.002, y = 0.795-0.03, just = "left", gp = gpar(fill = map_cols[4],
col = SurvColors("grey", grey_shade = "dark"),
lwd = lwd))
grid.text(paste(nfills[4]), x = xtextpos,
y = 0.795-0.03, just = "left", vp = v1, gp = gpar(fontsize = 9,
fontfamily = "Tahoma", cex = textcex))
}
if (length(nfills) >= 5) {
grid.rect(width = 0.04, height = 0.025, x = xpos +
0.002, y = 0.76-0.03, just = "left", gp = gpar(fill = map_cols[5],
col = SurvColors("grey", grey_shade = "dark"),
lwd = lwd))
grid.text(paste(nfills[5]), x = xtextpos,
y = 0.76-0.03, just = "left", vp = v1, gp = gpar(fontsize = 9,
fontfamily = "Tahoma", cex = textcex))
}
if (length(nfills) >= 6) {
grid.rect(width = 0.04, height = 0.025, x = xpos +
0.002, y = 0.725-0.03, just = "left", gp = gpar(fill = map_cols[6],
col = SurvColors("grey", grey_shade = "dark"),
lwd = lwd))
grid.text(paste(nfills[6]), x = xtextpos,
y = 0.725-0.03, just = "left", vp = v1, gp = gpar(fontsize = 9,
fontfamily = "Tahoma", cex = textcex))
}
if (length(nfills) >= 7) {
grid.rect(width = 0.04, height = 0.025, x = xpos +
0.002, y = 0.69-0.03, just = "left", gp = gpar(fill = map_cols[7],
col = SurvColors("grey", grey_shade = "dark"),
lwd = lwd))
grid.text(paste(nfills[7]), x = xtextpos,
y = 0.69-0.03, just = "left", vp = v1, gp = gpar(fontsize = 9,
fontfamily = "Tahoma", cex = textcex))
}
if (length(nfills) >= 8) {
grid.rect(width = 0.04, height = 0.025, x = xpos +
0.002, y = 0.655-0.03, just = "left", gp = gpar(fill = map_cols[8],
col = SurvColors("grey", grey_shade = "dark"),
lwd = lwd))
grid.text(paste(nfills[8]), x = xtextpos,
y = 0.655-0.03, just = "left", vp = v1, gp = gpar(fontsize = 9,
fontfamily = "Tahoma", cex = textcex))
}
if (length(nfills) >= 9) {
grid.rect(width = 0.04, height = 0.025, x = xpos +
0.002, y = 0.620-0.03, just = "left", gp = gpar(fill = map_cols[9],
col = SurvColors("grey", grey_shade = "dark"),
lwd = lwd))
grid.text(paste(nfills[9]), x = xtextpos,
y = 0.620-0.03, just = "left", vp = v1, gp = gpar(fontsize = 9,
fontfamily = "Tahoma", cex = textcex))
}
grid.rect(width = 0.04, height = 0.025, x = xpos + 0.002,
y = 0.55-0.03, just = "left", gp = gpar(fill = map_cols[levels(data[[fill]]) ==
unique(data[[fill]][data[[GEO_ID]] == "LU"])],
col = SurvColors("grey", grey_shade = "dark"),
lwd = lwd))
if(add_points == TRUE){
grid.text("*", x = xtextpos-0.03, y = 0.55-0.0375, just = "left",
vp = v1, gp = gpar(fontsize = 12, fontfamily = "sans",
cex = textcex, col =
ifelse(mean(col2rgb(map_cols[levels(data[[fill]]) ==
unique(data[[fill]][data[[GEO_ID]] == "LU"])])) > 100,
"black", "white")))
}
grid.text("Luxembourg", x = xtextpos, y = 0.55-0.03, just = "left",
vp = v1, gp = gpar(fontsize = 9, fontfamily = "Tahoma",
cex = textcex))
grid.rect(width = 0.04, height = 0.025, x = xpos + 0.002,
y = 0.515-0.03, just = "left", gp = gpar(fill = map_cols[levels(data[[fill]]) ==
unique(data[[fill]][data[[GEO_ID]] == "MT"])],
col = SurvColors("grey", grey_shade = "dark"),
lwd = lwd))
if(add_points == TRUE){
grid.text("*", x = xtextpos-0.03, y = 0.515-0.0375, just = "left",
vp = v1, gp = gpar(fontsize = 12, fontfamily = "sans",
cex = textcex, col =
ifelse(mean(col2rgb(map_cols[levels(data[[fill]]) ==
unique(data[[fill]][data[[GEO_ID]] == "MT"])])) > 100,
"black", "white")))
}
grid.text("Malta", x = xtextpos, y = 0.515-0.03, just = "left",
vp = v1, gp = gpar(fontsize = 9, fontfamily = "Tahoma",
cex = textcex))
# }
}
|
test_that("eglm returns the same broom output as glm", {
m1 <- lm(mpg ~ wt, data = mtcars)
m2 <- eflm::elm(mpg ~ wt, data = mtcars, reduce = F)
m3 <- eflm::elm(mpg ~ wt, data = mtcars, reduce = T)
bm1 <- broom::tidy(m1)
bm2 <- broom::tidy(m2)
bm3 <- broom::tidy(m3)
expect_equal(bm2$term, bm1$term)
expect_equal(bm2$estimate, bm1$estimate)
expect_equal(bm3$term, bm1$term)
expect_equal(bm3$estimate, bm1$estimate)
expect_equal(bm2$std.error, bm1$std.error)
expect_equal(bm2$statistic, bm1$statistic)
expect_equal(bm3$std.error, bm1$std.error)
expect_equal(bm3$statistic, bm1$statistic)
expect_equal(bm2$p.value, bm1$p.value)
expect_equal(bm3$p.value, bm1$p.value)
bm1 <- broom::tidy(m1, conf.int = TRUE)
bm2 <- broom::tidy(m2, conf.int = TRUE)
bm3 <- broom::tidy(m3, conf.int = TRUE)
expect_equal(bm2$conf.low, bm1$conf.low)
expect_equal(bm2$conf.high, bm1$conf.high)
expect_equal(bm3$conf.low, bm1$conf.low)
expect_equal(bm3$conf.high, bm1$conf.high)
})
# test_that("broom outputs not explicitly defined are the same as glm", {
# m1 <- lm(mpg ~ wt, data = mtcars)
# m2 <- eflm::elm(mpg ~ wt, data = mtcars, reduce = F)
# m3 <- eflm::elm(mpg ~ wt, data = mtcars, reduce = T)
#
# expect_equal(broom::augment(m2, newdata = mtcars), broom::augment(m1, newdata = mtcars))
# expect_equal(broom::augment(m3, newdata = mtcars), broom::augment(m1, newdata = mtcars))
# })
|
/tests-extra/test-elm-broom.R
|
permissive
|
saxenism/eflm
|
R
| false
| false
| 1,428
|
r
|
test_that("eglm returns the same broom output as glm", {
m1 <- lm(mpg ~ wt, data = mtcars)
m2 <- eflm::elm(mpg ~ wt, data = mtcars, reduce = F)
m3 <- eflm::elm(mpg ~ wt, data = mtcars, reduce = T)
bm1 <- broom::tidy(m1)
bm2 <- broom::tidy(m2)
bm3 <- broom::tidy(m3)
expect_equal(bm2$term, bm1$term)
expect_equal(bm2$estimate, bm1$estimate)
expect_equal(bm3$term, bm1$term)
expect_equal(bm3$estimate, bm1$estimate)
expect_equal(bm2$std.error, bm1$std.error)
expect_equal(bm2$statistic, bm1$statistic)
expect_equal(bm3$std.error, bm1$std.error)
expect_equal(bm3$statistic, bm1$statistic)
expect_equal(bm2$p.value, bm1$p.value)
expect_equal(bm3$p.value, bm1$p.value)
bm1 <- broom::tidy(m1, conf.int = TRUE)
bm2 <- broom::tidy(m2, conf.int = TRUE)
bm3 <- broom::tidy(m3, conf.int = TRUE)
expect_equal(bm2$conf.low, bm1$conf.low)
expect_equal(bm2$conf.high, bm1$conf.high)
expect_equal(bm3$conf.low, bm1$conf.low)
expect_equal(bm3$conf.high, bm1$conf.high)
})
# test_that("broom outputs not explicitly defined are the same as glm", {
# m1 <- lm(mpg ~ wt, data = mtcars)
# m2 <- eflm::elm(mpg ~ wt, data = mtcars, reduce = F)
# m3 <- eflm::elm(mpg ~ wt, data = mtcars, reduce = T)
#
# expect_equal(broom::augment(m2, newdata = mtcars), broom::augment(m1, newdata = mtcars))
# expect_equal(broom::augment(m3, newdata = mtcars), broom::augment(m1, newdata = mtcars))
# })
|
library(dplyr)
# if data are not present already, download and unzip data
if (!file.exists("household_power_consumption.zip")) {
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", destfile = "household_power_consumption.zip")
unzip(zipfile = "household_power_consumption.zip")
}
# read data and select relevant date
pow_cons_raw <- read.csv("household_power_consumption.txt", header = TRUE, sep = ";", colClasses = c("character", "character", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric"), na.strings = "?")
pow_cons_raw <- tbl_df(pow_cons_raw)
pow_cons_raw$Date <- as.Date(pow_cons_raw$Date, format = "%d/%m/%Y")
pow_cons <- filter(pow_cons_raw, Date == ("2007-02-01") | Date == ("2007-02-02"))
# create date_time vector, remove date and time variable from data frame and add date_time
date_time <- paste(pow_cons$Date, pow_cons$Time)
date_time <- as.POSIXct(strptime(date_time, format = "%Y-%m-%d %H:%M:%S"))
pow_cons <- mutate(pow_cons[,-(1:2)],"datetime" = date_time)
# Set sys locale for english labels in plot
Sys.setlocale("LC_TIME", "C")
# Plot 2
png("plot2.png", width = 480, height = 480)
with(pow_cons, plot(datetime, Global_active_power, type = "l", ylab = "Global Active Power (kilowatts)"))
dev.off()
|
/plot2.R
|
no_license
|
Lemdat/ExplDatWeek1
|
R
| false
| false
| 1,306
|
r
|
library(dplyr)
# if data are not present already, download and unzip data
if (!file.exists("household_power_consumption.zip")) {
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", destfile = "household_power_consumption.zip")
unzip(zipfile = "household_power_consumption.zip")
}
# read data and select relevant date
pow_cons_raw <- read.csv("household_power_consumption.txt", header = TRUE, sep = ";", colClasses = c("character", "character", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric"), na.strings = "?")
pow_cons_raw <- tbl_df(pow_cons_raw)
pow_cons_raw$Date <- as.Date(pow_cons_raw$Date, format = "%d/%m/%Y")
pow_cons <- filter(pow_cons_raw, Date == ("2007-02-01") | Date == ("2007-02-02"))
# create date_time vector, remove date and time variable from data frame and add date_time
date_time <- paste(pow_cons$Date, pow_cons$Time)
date_time <- as.POSIXct(strptime(date_time, format = "%Y-%m-%d %H:%M:%S"))
pow_cons <- mutate(pow_cons[,-(1:2)],"datetime" = date_time)
# Set sys locale for english labels in plot
Sys.setlocale("LC_TIME", "C")
# Plot 2
png("plot2.png", width = 480, height = 480)
with(pow_cons, plot(datetime, Global_active_power, type = "l", ylab = "Global Active Power (kilowatts)"))
dev.off()
|
#' @importFrom plyr count
## Counts the frequency of each intersection being looked at and sets up data for main bar plot.
## Also orders the data for the bar plot and matrix plot
Counter <- function(data, num_sets, start_col, name_of_sets, nintersections, mbar_color, order_mat,
aggregate, cut, empty_intersects, decrease){
temp_data <- list()
Freqs <- data.frame()
end_col <- as.numeric(((start_col + num_sets) -1))
#gets indices of columns containing sets used
for( i in 1:num_sets){
temp_data[i] <- match(name_of_sets[i], colnames(data))
}
Freqs <- data.frame(count(data[ ,as.integer(temp_data)]))
colnames(Freqs)[1:num_sets] <- name_of_sets
#Adds on empty intersections if option is selected
if(is.null(empty_intersects) == F){
empty <- rep(list(c(0,1)), times = num_sets)
empty <- data.frame(expand.grid(empty))
colnames(empty) <- name_of_sets
empty$freq <- 0
all <- rbind(Freqs, empty)
Freqs <- data.frame(all[!duplicated(all[1:num_sets]), ], check.names = F)
}
#Remove universal empty set
Freqs <- Freqs[!(rowSums(Freqs[ ,1:num_sets]) == 0), ]
#Aggregation by degree
if(tolower(aggregate) == "degree"){
for(i in 1:nrow(Freqs)){
Freqs$degree[i] <- rowSums(Freqs[ i ,1:num_sets])
}
order_cols <- c()
for(i in 1:length(order_mat)){
order_cols[i] <- match(order_mat[i], colnames(Freqs))
}
# if(length(order_cols)==2 && order_cols[1]>order_cols[2]){decrease <- rev(decrease)}
for(i in 1:length(order_cols)){
logic <- decrease[i]
Freqs <- Freqs[order(Freqs[ , order_cols[i]], decreasing = logic), ]
}
}
#Aggregation by sets
else if(tolower(aggregate) == "sets")
{
Freqs <- Get_aggregates(Freqs, num_sets, order_mat, cut)
}
#delete rows used to order data correctly. Not needed to set up bars.
delete_row <- (num_sets + 2)
Freqs <- Freqs[ , -delete_row]
for( i in 1:nrow(Freqs)){
Freqs$x[i] <- i
Freqs$color <- mbar_color
}
if(is.na(nintersections)){
nintersections = nrow(Freqs)
}
Freqs <- Freqs[1:nintersections, ]
Freqs <- na.omit(Freqs)
return(Freqs)
}
## Generate main bar plot
Make_main_bar <- function(Main_bar_data, Q, show_num, ratios, customQ, number_angles,
ebar, ylabel, ymax, scale_intersections, text_scale, attribute_plots, mainbar.comma, intersection.size.comma){
bottom_margin <- (-1)*0.65
if(is.null(attribute_plots) == FALSE){
bottom_margin <- (-1)*0.45
}
if(length(text_scale) > 1 && length(text_scale) <= 6){
y_axis_title_scale <- text_scale[1]
y_axis_tick_label_scale <- text_scale[2]
intersection_size_number_scale <- text_scale[6]
}
else{
y_axis_title_scale <- text_scale
y_axis_tick_label_scale <- text_scale
intersection_size_number_scale <- text_scale
}
if(is.null(Q) == F){
inter_data <- Q
if(nrow(inter_data) != 0){
inter_data <- inter_data[order(inter_data$x), ]
}
else{inter_data <- NULL}
}
else{inter_data <- NULL}
if(is.null(ebar) == F){
elem_data <- ebar
if(nrow(elem_data) != 0){
elem_data <- elem_data[order(elem_data$x), ]
}
else{elem_data <- NULL}
}
else{elem_data <- NULL}
#ten_perc creates appropriate space above highest bar so number doesnt get cut off
if(is.null(ymax)){
ten_perc <- ((max(Main_bar_data$freq)) * 0.1)
ymax <- max(Main_bar_data$freq) + ten_perc
}
if(ylabel == "Intersection Size" && scale_intersections != "identity"){
ylabel <- paste("Intersection Size", paste0("( ", scale_intersections, " )"))
}
if(scale_intersections == "log2"){
Main_bar_data$freq <- round(log2(Main_bar_data$freq), 2)
ymax <- log2(ymax)
}
if(scale_intersections == "log10"){
Main_bar_data$freq <- round(log10(Main_bar_data$freq), 2)
ymax <- log10(ymax)
}
labels_arg <- waiver()
if(mainbar.comma) labels_arg <- scales::comma
Main_bar_plot <- (ggplot(data = Main_bar_data, aes_string(x = "x", y = "freq"))
#+ scale_y_continuous(trans = scale_intersections, labels = labels_arg, limits = c(0, ymax))
+ geom_bar(stat = "identity", width = 0.6,
fill = Main_bar_data$color)
+ scale_x_continuous(limits = c(0,(nrow(Main_bar_data)+1 )), expand = c(0,0),
breaks = NULL)
+ xlab(NULL) + ylab(ylabel) + labs(title = NULL)
+ theme(panel.background = element_rect(fill = "white"),
plot.margin = unit(c(0.5,0.5,bottom_margin,0.5), "lines"), panel.border = element_blank(),
axis.title.y = element_text(vjust = -0.8,
size = 8.3*y_axis_title_scale),
axis.text.y = element_text(vjust=0.3,size=7*y_axis_tick_label_scale))
+ scale_y_continuous(trans = scale_intersections, labels = labels_arg, limits = c(0, ymax), expand = c(0, 0))
)
if((show_num == "yes") || (show_num == "Yes")){
if(!intersection.size.comma){
Main_bar_plot <- (Main_bar_plot + geom_text(aes_string(label = "freq"), size = 2.2*intersection_size_number_scale,
vjust = 0.5, hjust = -0.1,
angle = number_angles, colour = Main_bar_data$color))
} else{
Main_bar_plot <- (Main_bar_plot + geom_text(aes(label = scales::comma(freq)), size = 2.2*intersection_size_number_scale,
vjust = 0.5, hjust = -0.1,
angle = number_angles, colour = Main_bar_data$color))
}
}
bInterDat <- NULL
pInterDat <- NULL
bCustomDat <- NULL
pCustomDat <- NULL
bElemDat <- NULL
pElemDat <- NULL
if(is.null(elem_data) == F){
bElemDat <- elem_data[which(elem_data$act == T), ]
bElemDat <- bElemDat[order(bElemDat$x), ]
pElemDat <- elem_data[which(elem_data$act == F), ]
}
if(is.null(inter_data) == F){
bInterDat <- inter_data[which(inter_data$act == T), ]
bInterDat <- bInterDat[order(bInterDat$x), ]
pInterDat <- inter_data[which(inter_data$act == F), ]
}
if(length(customQ) != 0){
pCustomDat <- customQ[which(customQ$act == F), ]
bCustomDat <- customQ[which(customQ$act == T), ]
bCustomDat <- bCustomDat[order(bCustomDat$x), ]
}
if(length(bInterDat) != 0){
Main_bar_plot <- Main_bar_plot + geom_bar(data = bInterDat,
aes_string(x="x", y = "freq"),
fill = bInterDat$color,
stat = "identity", position = "identity", width = 0.6)
}
if(length(bElemDat) != 0){
Main_bar_plot <- Main_bar_plot + geom_bar(data = bElemDat,
aes_string(x="x", y = "freq"),
fill = bElemDat$color,
stat = "identity", position = "identity", width = 0.6)
}
if(length(bCustomDat) != 0){
Main_bar_plot <- (Main_bar_plot + geom_bar(data = bCustomDat, aes_string(x="x", y = "freq2"),
fill = bCustomDat$color2,
stat = "identity", position ="identity", width = 0.6))
}
if(length(pCustomDat) != 0){
Main_bar_plot <- (Main_bar_plot + geom_point(data = pCustomDat, aes_string(x="x", y = "freq2"), colour = pCustomDat$color2,
size = 2, shape = 17, position = position_jitter(width = 0.2, height = 0.2)))
}
if(length(pInterDat) != 0){
Main_bar_plot <- (Main_bar_plot + geom_point(data = pInterDat, aes_string(x="x", y = "freq"),
position = position_jitter(width = 0.2, height = 0.2),
colour = pInterDat$color, size = 2, shape = 17))
}
if(length(pElemDat) != 0){
Main_bar_plot <- (Main_bar_plot + geom_point(data = pElemDat, aes_string(x="x", y = "freq"),
position = position_jitter(width = 0.2, height = 0.2),
colour = pElemDat$color, size = 2, shape = 17))
}
Main_bar_plot <- (Main_bar_plot
+ geom_vline(xintercept = 0, color = "gray0")
+ geom_hline(yintercept = 0, color = "gray0"))
# Main_bar_plot <- ggplotGrob(Main_bar_plot)
return(Main_bar_plot)
}
|
/R/MainBar.R
|
no_license
|
mariobecerra/UpSetR2
|
R
| false
| false
| 8,686
|
r
|
#' @importFrom plyr count
## Counts the frequency of each intersection being looked at and sets up data for main bar plot.
## Also orders the data for the bar plot and matrix plot
Counter <- function(data, num_sets, start_col, name_of_sets, nintersections, mbar_color, order_mat,
aggregate, cut, empty_intersects, decrease){
temp_data <- list()
Freqs <- data.frame()
end_col <- as.numeric(((start_col + num_sets) -1))
#gets indices of columns containing sets used
for( i in 1:num_sets){
temp_data[i] <- match(name_of_sets[i], colnames(data))
}
Freqs <- data.frame(count(data[ ,as.integer(temp_data)]))
colnames(Freqs)[1:num_sets] <- name_of_sets
#Adds on empty intersections if option is selected
if(is.null(empty_intersects) == F){
empty <- rep(list(c(0,1)), times = num_sets)
empty <- data.frame(expand.grid(empty))
colnames(empty) <- name_of_sets
empty$freq <- 0
all <- rbind(Freqs, empty)
Freqs <- data.frame(all[!duplicated(all[1:num_sets]), ], check.names = F)
}
#Remove universal empty set
Freqs <- Freqs[!(rowSums(Freqs[ ,1:num_sets]) == 0), ]
#Aggregation by degree
if(tolower(aggregate) == "degree"){
for(i in 1:nrow(Freqs)){
Freqs$degree[i] <- rowSums(Freqs[ i ,1:num_sets])
}
order_cols <- c()
for(i in 1:length(order_mat)){
order_cols[i] <- match(order_mat[i], colnames(Freqs))
}
# if(length(order_cols)==2 && order_cols[1]>order_cols[2]){decrease <- rev(decrease)}
for(i in 1:length(order_cols)){
logic <- decrease[i]
Freqs <- Freqs[order(Freqs[ , order_cols[i]], decreasing = logic), ]
}
}
#Aggregation by sets
else if(tolower(aggregate) == "sets")
{
Freqs <- Get_aggregates(Freqs, num_sets, order_mat, cut)
}
#delete rows used to order data correctly. Not needed to set up bars.
delete_row <- (num_sets + 2)
Freqs <- Freqs[ , -delete_row]
for( i in 1:nrow(Freqs)){
Freqs$x[i] <- i
Freqs$color <- mbar_color
}
if(is.na(nintersections)){
nintersections = nrow(Freqs)
}
Freqs <- Freqs[1:nintersections, ]
Freqs <- na.omit(Freqs)
return(Freqs)
}
## Generate main bar plot
Make_main_bar <- function(Main_bar_data, Q, show_num, ratios, customQ, number_angles,
ebar, ylabel, ymax, scale_intersections, text_scale, attribute_plots, mainbar.comma, intersection.size.comma){
bottom_margin <- (-1)*0.65
if(is.null(attribute_plots) == FALSE){
bottom_margin <- (-1)*0.45
}
if(length(text_scale) > 1 && length(text_scale) <= 6){
y_axis_title_scale <- text_scale[1]
y_axis_tick_label_scale <- text_scale[2]
intersection_size_number_scale <- text_scale[6]
}
else{
y_axis_title_scale <- text_scale
y_axis_tick_label_scale <- text_scale
intersection_size_number_scale <- text_scale
}
if(is.null(Q) == F){
inter_data <- Q
if(nrow(inter_data) != 0){
inter_data <- inter_data[order(inter_data$x), ]
}
else{inter_data <- NULL}
}
else{inter_data <- NULL}
if(is.null(ebar) == F){
elem_data <- ebar
if(nrow(elem_data) != 0){
elem_data <- elem_data[order(elem_data$x), ]
}
else{elem_data <- NULL}
}
else{elem_data <- NULL}
#ten_perc creates appropriate space above highest bar so number doesnt get cut off
if(is.null(ymax)){
ten_perc <- ((max(Main_bar_data$freq)) * 0.1)
ymax <- max(Main_bar_data$freq) + ten_perc
}
if(ylabel == "Intersection Size" && scale_intersections != "identity"){
ylabel <- paste("Intersection Size", paste0("( ", scale_intersections, " )"))
}
if(scale_intersections == "log2"){
Main_bar_data$freq <- round(log2(Main_bar_data$freq), 2)
ymax <- log2(ymax)
}
if(scale_intersections == "log10"){
Main_bar_data$freq <- round(log10(Main_bar_data$freq), 2)
ymax <- log10(ymax)
}
labels_arg <- waiver()
if(mainbar.comma) labels_arg <- scales::comma
Main_bar_plot <- (ggplot(data = Main_bar_data, aes_string(x = "x", y = "freq"))
#+ scale_y_continuous(trans = scale_intersections, labels = labels_arg, limits = c(0, ymax))
+ geom_bar(stat = "identity", width = 0.6,
fill = Main_bar_data$color)
+ scale_x_continuous(limits = c(0,(nrow(Main_bar_data)+1 )), expand = c(0,0),
breaks = NULL)
+ xlab(NULL) + ylab(ylabel) + labs(title = NULL)
+ theme(panel.background = element_rect(fill = "white"),
plot.margin = unit(c(0.5,0.5,bottom_margin,0.5), "lines"), panel.border = element_blank(),
axis.title.y = element_text(vjust = -0.8,
size = 8.3*y_axis_title_scale),
axis.text.y = element_text(vjust=0.3,size=7*y_axis_tick_label_scale))
+ scale_y_continuous(trans = scale_intersections, labels = labels_arg, limits = c(0, ymax), expand = c(0, 0))
)
if((show_num == "yes") || (show_num == "Yes")){
if(!intersection.size.comma){
Main_bar_plot <- (Main_bar_plot + geom_text(aes_string(label = "freq"), size = 2.2*intersection_size_number_scale,
vjust = 0.5, hjust = -0.1,
angle = number_angles, colour = Main_bar_data$color))
} else{
Main_bar_plot <- (Main_bar_plot + geom_text(aes(label = scales::comma(freq)), size = 2.2*intersection_size_number_scale,
vjust = 0.5, hjust = -0.1,
angle = number_angles, colour = Main_bar_data$color))
}
}
bInterDat <- NULL
pInterDat <- NULL
bCustomDat <- NULL
pCustomDat <- NULL
bElemDat <- NULL
pElemDat <- NULL
if(is.null(elem_data) == F){
bElemDat <- elem_data[which(elem_data$act == T), ]
bElemDat <- bElemDat[order(bElemDat$x), ]
pElemDat <- elem_data[which(elem_data$act == F), ]
}
if(is.null(inter_data) == F){
bInterDat <- inter_data[which(inter_data$act == T), ]
bInterDat <- bInterDat[order(bInterDat$x), ]
pInterDat <- inter_data[which(inter_data$act == F), ]
}
if(length(customQ) != 0){
pCustomDat <- customQ[which(customQ$act == F), ]
bCustomDat <- customQ[which(customQ$act == T), ]
bCustomDat <- bCustomDat[order(bCustomDat$x), ]
}
if(length(bInterDat) != 0){
Main_bar_plot <- Main_bar_plot + geom_bar(data = bInterDat,
aes_string(x="x", y = "freq"),
fill = bInterDat$color,
stat = "identity", position = "identity", width = 0.6)
}
if(length(bElemDat) != 0){
Main_bar_plot <- Main_bar_plot + geom_bar(data = bElemDat,
aes_string(x="x", y = "freq"),
fill = bElemDat$color,
stat = "identity", position = "identity", width = 0.6)
}
if(length(bCustomDat) != 0){
Main_bar_plot <- (Main_bar_plot + geom_bar(data = bCustomDat, aes_string(x="x", y = "freq2"),
fill = bCustomDat$color2,
stat = "identity", position ="identity", width = 0.6))
}
if(length(pCustomDat) != 0){
Main_bar_plot <- (Main_bar_plot + geom_point(data = pCustomDat, aes_string(x="x", y = "freq2"), colour = pCustomDat$color2,
size = 2, shape = 17, position = position_jitter(width = 0.2, height = 0.2)))
}
if(length(pInterDat) != 0){
Main_bar_plot <- (Main_bar_plot + geom_point(data = pInterDat, aes_string(x="x", y = "freq"),
position = position_jitter(width = 0.2, height = 0.2),
colour = pInterDat$color, size = 2, shape = 17))
}
if(length(pElemDat) != 0){
Main_bar_plot <- (Main_bar_plot + geom_point(data = pElemDat, aes_string(x="x", y = "freq"),
position = position_jitter(width = 0.2, height = 0.2),
colour = pElemDat$color, size = 2, shape = 17))
}
Main_bar_plot <- (Main_bar_plot
+ geom_vline(xintercept = 0, color = "gray0")
+ geom_hline(yintercept = 0, color = "gray0"))
# Main_bar_plot <- ggplotGrob(Main_bar_plot)
return(Main_bar_plot)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/clean.df.R
\name{clean.df}
\alias{clean.df}
\title{Clean the names of a data frame}
\usage{
clean.df(df, keep.underscore = TRUE, pidmpattern = "bannerid|banner_id")
}
\arguments{
\item{df}{a data frame}
\item{keep.underscore}{a logical value indicating whether underscores be kept or removed}
\item{pidmpattern}{a regular expression to be matched against the names of df. Set to \code{NULL} to turn off pattern mathing.}
}
\value{
a data frame of class tbl_df.
}
\description{
Clean up a data frame by changing all column names to lower case, removing spaces and punctuation.
}
\details{
If no column named \code{pidm} is found and pidmpattern is not \code{NULL}, then we look if there's only
}
|
/man/clean.df.Rd
|
no_license
|
crazybilly/muadc
|
R
| false
| true
| 776
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/clean.df.R
\name{clean.df}
\alias{clean.df}
\title{Clean the names of a data frame}
\usage{
clean.df(df, keep.underscore = TRUE, pidmpattern = "bannerid|banner_id")
}
\arguments{
\item{df}{a data frame}
\item{keep.underscore}{a logical value indicating whether underscores be kept or removed}
\item{pidmpattern}{a regular expression to be matched against the names of df. Set to \code{NULL} to turn off pattern mathing.}
}
\value{
a data frame of class tbl_df.
}
\description{
Clean up a data frame by changing all column names to lower case, removing spaces and punctuation.
}
\details{
If no column named \code{pidm} is found and pidmpattern is not \code{NULL}, then we look if there's only
}
|
#!/usr/bin/Rscript
## #!/usr/local/bin/Rscript
###
### run.deconv.rl.ls.S.R
###
### 2018.03.29 M.Morii
### R-L method with line search
###
mitooldir = "/home/morii/work/github/moriiism/mitool"
srtdir = "/home/morii/work/github/moriiism/srt"
###
source( paste(mitooldir, "script/mirlib/iolib.R", sep="/") )
source( paste(srtdir, "script/rlib/rl.R", sep="/") )
###
###
###
args <- commandArgs(TRUE)
respdir = args[1]
datafile = args[2]
outdir = args[3]
outfile.head = args[4]
tol = as.numeric(args[5])
nstep = as.integer(args[6])
printf("respdir = %s\n", respdir)
printf("datafile = %s\n", datafile)
printf("outdir = %s\n", outdir)
printf("outfile.head = %s\n", outfile.head)
printf("tol = %e\n", tol)
printf("nstep = %d\n", nstep)
dir.create(outdir, recursive = TRUE)
R.mat = LoadModel(respdir)
printf("ncol(R.mat) = %d\n", ncol(R.mat))
printf("nrow(R.mat) = %d\n", nrow(R.mat))
D.vec = LoadData(datafile)
printf("length(D.vec) = %d\n", length(D.vec))
Nph = sum(D.vec)
printf("Nph = %e\n", Nph)
ncol = 60
nrow = 60
nx.vec = ncol * nrow
x.vec = rep( 1.0 / nx.vec, nx.vec)
x.vec = SolveByRLLS(x.vec, Nph, D.vec, R.mat, tol, nstep, outdir, outfile.head)
outfile = sprintf("%s/%s.fits", outdir, outfile.head)
array = array(Nph * x.vec, dim=c(ncol, nrow))
writeFITSim(array, file=outfile)
|
/script/run.deconv.rl.ls.S.R
|
no_license
|
moriiism/srt
|
R
| false
| false
| 1,342
|
r
|
#!/usr/bin/Rscript
## #!/usr/local/bin/Rscript
###
### run.deconv.rl.ls.S.R
###
### 2018.03.29 M.Morii
### R-L method with line search
###
mitooldir = "/home/morii/work/github/moriiism/mitool"
srtdir = "/home/morii/work/github/moriiism/srt"
###
source( paste(mitooldir, "script/mirlib/iolib.R", sep="/") )
source( paste(srtdir, "script/rlib/rl.R", sep="/") )
###
###
###
args <- commandArgs(TRUE)
respdir = args[1]
datafile = args[2]
outdir = args[3]
outfile.head = args[4]
tol = as.numeric(args[5])
nstep = as.integer(args[6])
printf("respdir = %s\n", respdir)
printf("datafile = %s\n", datafile)
printf("outdir = %s\n", outdir)
printf("outfile.head = %s\n", outfile.head)
printf("tol = %e\n", tol)
printf("nstep = %d\n", nstep)
dir.create(outdir, recursive = TRUE)
R.mat = LoadModel(respdir)
printf("ncol(R.mat) = %d\n", ncol(R.mat))
printf("nrow(R.mat) = %d\n", nrow(R.mat))
D.vec = LoadData(datafile)
printf("length(D.vec) = %d\n", length(D.vec))
Nph = sum(D.vec)
printf("Nph = %e\n", Nph)
ncol = 60
nrow = 60
nx.vec = ncol * nrow
x.vec = rep( 1.0 / nx.vec, nx.vec)
x.vec = SolveByRLLS(x.vec, Nph, D.vec, R.mat, tol, nstep, outdir, outfile.head)
outfile = sprintf("%s/%s.fits", outdir, outfile.head)
array = array(Nph * x.vec, dim=c(ncol, nrow))
writeFITSim(array, file=outfile)
|
#' @title Statistical test for Detrended Fluctuation Analysis.
#'
#' @description This function performs the statistical test for the long-range correlation exponents obtained by the Detrended Fluctuation Analysis method.
#'
#' @details This function include following measures alpha_dfa, se_alpha_dfa, r2_alpha_dfa, min_test, max_test, mean_test, median_test, sd_test, skewness_test, kurtosis_test, Jarquebera_test_pvalue, CL_lower_test, CL_upper_test
#'
#' @param y A vector contaning univariate time series.
#'
#' @param npoints The number of different window sizes that will be used to estimate the Fluctuation function in each zone. See nonlinearTseries package.
#'
#' @param rep An integer value indicating the number of repetitions.
#'
#' @param ts.sim An logical value. If TRUE, the confidence interval for alpha_dfa is obtained from a White Gaussian Noise. If FALSE, the confidence interval for alpha_dfa is obtained from the shuffling of the original series.
#'
#' @param prob An numeric value indicating the quantile of probability to be used in estimating confidence intervals by N(0,1).
#'
#' @return An rbind matrix containing "alpha_dfa","se_alpha_dfa", "r2_alpha_dfa","min_alpha_dfa","max_test","mean_test", "median_test", "sd_test", "skewness_test", "kurtosis_test", "jarquebera_test_pvalue", and confidence interval: "CI_lower_test", "CI_upper_test".
#'
#' @examples
#' y=rnorm(1000)
#'dfa.test(y, npoints=15, rep=10,ts.sim="TRUE", prob=.95)
#'
#' @references
#' KRISTOUFEK, L. Rescaled Range Analysis and Detrended Fluctuation Analysis: Finite Sample Properties and Confidence Intervals. AUCO Czech Economic Review, v.4,n.3, p.315-329, 2010.
#'
#' @importFrom fgpt fyshuffle
#' @importFrom nonlinearTseries dfa
#' @importFrom stats coef lm rnorm qnorm sd
#' @importFrom PerformanceAnalytics skewness kurtosis
#' @importFrom tseries jarque.bera.test
#'
#' @export
dfa.test <- function(y, npoints, rep, ts.sim, prob){
if(!(is.null(y) || is.numeric(y) || is.logical(y))){
stop("Time series must be numeric")
}
alpha_dfa <- c()
se_alpha_dfa <- c()
r2_alpha_dfa <- c()
if(rep > 30){
error <- stats::qnorm(c(prob+((1-prob)/2)), 0,1)
}
if(rep > 0 & rep <= 30){
error <- stats::qnorm(c(prob+((1-prob)/2)), 0,1)/rep
}
if(ts.sim == 'TRUE'){
m <- matrix(data=NA, nrow=length(y), ncol=rep, byrow=F)
for(i in 1:rep){
m[,i] <- stats::rnorm(length(y), mean=0, sd=1)
}
m <- cbind(y, m)
for(i in 1:ncol(m)){
dfa <- nonlinearTseries::dfa(m[,i],
window.size.range=c(4,round(length(y)/4,0)),
npoints=npoints,
do.plot=FALSE)
model <- stats::lm(log10(dfa$fluctuation.function)~log10(dfa$window.sizes))
alpha_dfa[i] <- stats::coef(summary(model))[2, "Estimate"]
se_alpha_dfa[i] <- stats::coef(summary(model))[2, "Std. Error"]
r2_alpha_dfa[i] <- summary(model)$r.squared
}
return(rbind(alpha_dfa = alpha_dfa[1],
se_alpha_dfa = se_alpha_dfa[1],
r2_alpha_dfa = r2_alpha_dfa[1],
min_test = min(alpha_dfa[2:length(alpha_dfa)]),
max_test = max(alpha_dfa[2:length(alpha_dfa)]),
mean_test = mean(alpha_dfa[2:length(alpha_dfa)]),
median_test = stats::median(alpha_dfa[2:length(alpha_dfa)]),
sd_test = stats::sd(alpha_dfa[2:length(alpha_dfa)]),
skewness_test = PerformanceAnalytics::skewness(alpha_dfa[2:length(alpha_dfa)], method="moment"),
kurtosis_test = PerformanceAnalytics::kurtosis(alpha_dfa[2:length(alpha_dfa)], method="moment"),
Jarquebera_test_pvalue = tseries::jarque.bera.test(alpha_dfa[2:length(alpha_dfa)])$p.value,
CI_lower_test = mean(alpha_dfa[2:length(alpha_dfa)]) - error*stats::sd(alpha_dfa[2:length(alpha_dfa)]),
CI_upper_test = mean(alpha_dfa[2:length(alpha_dfa)]) + error*stats::sd(alpha_dfa[2:length(alpha_dfa)])))
}
if(ts.sim == 'FALSE'){
m <- matrix(data=NA, nrow=length(y), ncol=rep, byrow=F)
for(i in 1:rep){
m[,i] <- fgpt::fyshuffle(1:length(y))
}
m <- cbind(y=1:length(y), m)
for(i in 1:ncol(m)){
dfa <- nonlinearTseries::dfa(y[m[,i]],
window.size.range=c(4,round(length(y)/4,0)),
npoints=npoints,
do.plot=FALSE)
model <- stats::lm(log10(dfa$fluctuation.function)~log10(dfa$window.sizes))
alpha_dfa[i] <- stats::coef(summary(model))[2, "Estimate"]
se_alpha_dfa[i] <- stats::coef(summary(model))[2, "Std. Error"]
r2_alpha_dfa[i] <- summary(model)$r.squared
}
return(rbind(alpha_dfa = alpha_dfa[1],
se_alpha_dfa = se_alpha_dfa[1],
r2_alpha_dfa = r2_alpha_dfa[1],
min_test = min(alpha_dfa[2:length(alpha_dfa)]),
max_test = max(alpha_dfa[2:length(alpha_dfa)]),
mean_test = mean(alpha_dfa[2:length(alpha_dfa)]),
median_test = stats::median(alpha_dfa[2:length(alpha_dfa)]),
sd_test = stats::sd(alpha_dfa[2:length(alpha_dfa)]),
skewness_test = PerformanceAnalytics::skewness(alpha_dfa[2:length(alpha_dfa)], method="moment"),
kurtosis_test = PerformanceAnalytics::kurtosis(alpha_dfa[2:length(alpha_dfa)], method="moment"),
Jarquebera_test_pvalue = tseries::jarque.bera.test(alpha_dfa[2:length(alpha_dfa)])$p.value,
CI_lower_test = mean(alpha_dfa[2:length(alpha_dfa)]) - error*stats::sd(alpha_dfa[2:length(alpha_dfa)]),
CI_upper_test = mean(alpha_dfa[2:length(alpha_dfa)]) + error*stats::sd(alpha_dfa[2:length(alpha_dfa)])))
}
}
|
/R/DFA_test.r
|
no_license
|
cran/GMZTests
|
R
| false
| false
| 6,149
|
r
|
#' @title Statistical test for Detrended Fluctuation Analysis.
#'
#' @description This function performs the statistical test for the long-range correlation exponents obtained by the Detrended Fluctuation Analysis method.
#'
#' @details This function include following measures alpha_dfa, se_alpha_dfa, r2_alpha_dfa, min_test, max_test, mean_test, median_test, sd_test, skewness_test, kurtosis_test, Jarquebera_test_pvalue, CL_lower_test, CL_upper_test
#'
#' @param y A vector contaning univariate time series.
#'
#' @param npoints The number of different window sizes that will be used to estimate the Fluctuation function in each zone. See nonlinearTseries package.
#'
#' @param rep An integer value indicating the number of repetitions.
#'
#' @param ts.sim An logical value. If TRUE, the confidence interval for alpha_dfa is obtained from a White Gaussian Noise. If FALSE, the confidence interval for alpha_dfa is obtained from the shuffling of the original series.
#'
#' @param prob An numeric value indicating the quantile of probability to be used in estimating confidence intervals by N(0,1).
#'
#' @return An rbind matrix containing "alpha_dfa","se_alpha_dfa", "r2_alpha_dfa","min_alpha_dfa","max_test","mean_test", "median_test", "sd_test", "skewness_test", "kurtosis_test", "jarquebera_test_pvalue", and confidence interval: "CI_lower_test", "CI_upper_test".
#'
#' @examples
#' y=rnorm(1000)
#'dfa.test(y, npoints=15, rep=10,ts.sim="TRUE", prob=.95)
#'
#' @references
#' KRISTOUFEK, L. Rescaled Range Analysis and Detrended Fluctuation Analysis: Finite Sample Properties and Confidence Intervals. AUCO Czech Economic Review, v.4,n.3, p.315-329, 2010.
#'
#' @importFrom fgpt fyshuffle
#' @importFrom nonlinearTseries dfa
#' @importFrom stats coef lm rnorm qnorm sd
#' @importFrom PerformanceAnalytics skewness kurtosis
#' @importFrom tseries jarque.bera.test
#'
#' @export
dfa.test <- function(y, npoints, rep, ts.sim, prob){
if(!(is.null(y) || is.numeric(y) || is.logical(y))){
stop("Time series must be numeric")
}
alpha_dfa <- c()
se_alpha_dfa <- c()
r2_alpha_dfa <- c()
if(rep > 30){
error <- stats::qnorm(c(prob+((1-prob)/2)), 0,1)
}
if(rep > 0 & rep <= 30){
error <- stats::qnorm(c(prob+((1-prob)/2)), 0,1)/rep
}
if(ts.sim == 'TRUE'){
m <- matrix(data=NA, nrow=length(y), ncol=rep, byrow=F)
for(i in 1:rep){
m[,i] <- stats::rnorm(length(y), mean=0, sd=1)
}
m <- cbind(y, m)
for(i in 1:ncol(m)){
dfa <- nonlinearTseries::dfa(m[,i],
window.size.range=c(4,round(length(y)/4,0)),
npoints=npoints,
do.plot=FALSE)
model <- stats::lm(log10(dfa$fluctuation.function)~log10(dfa$window.sizes))
alpha_dfa[i] <- stats::coef(summary(model))[2, "Estimate"]
se_alpha_dfa[i] <- stats::coef(summary(model))[2, "Std. Error"]
r2_alpha_dfa[i] <- summary(model)$r.squared
}
return(rbind(alpha_dfa = alpha_dfa[1],
se_alpha_dfa = se_alpha_dfa[1],
r2_alpha_dfa = r2_alpha_dfa[1],
min_test = min(alpha_dfa[2:length(alpha_dfa)]),
max_test = max(alpha_dfa[2:length(alpha_dfa)]),
mean_test = mean(alpha_dfa[2:length(alpha_dfa)]),
median_test = stats::median(alpha_dfa[2:length(alpha_dfa)]),
sd_test = stats::sd(alpha_dfa[2:length(alpha_dfa)]),
skewness_test = PerformanceAnalytics::skewness(alpha_dfa[2:length(alpha_dfa)], method="moment"),
kurtosis_test = PerformanceAnalytics::kurtosis(alpha_dfa[2:length(alpha_dfa)], method="moment"),
Jarquebera_test_pvalue = tseries::jarque.bera.test(alpha_dfa[2:length(alpha_dfa)])$p.value,
CI_lower_test = mean(alpha_dfa[2:length(alpha_dfa)]) - error*stats::sd(alpha_dfa[2:length(alpha_dfa)]),
CI_upper_test = mean(alpha_dfa[2:length(alpha_dfa)]) + error*stats::sd(alpha_dfa[2:length(alpha_dfa)])))
}
if(ts.sim == 'FALSE'){
m <- matrix(data=NA, nrow=length(y), ncol=rep, byrow=F)
for(i in 1:rep){
m[,i] <- fgpt::fyshuffle(1:length(y))
}
m <- cbind(y=1:length(y), m)
for(i in 1:ncol(m)){
dfa <- nonlinearTseries::dfa(y[m[,i]],
window.size.range=c(4,round(length(y)/4,0)),
npoints=npoints,
do.plot=FALSE)
model <- stats::lm(log10(dfa$fluctuation.function)~log10(dfa$window.sizes))
alpha_dfa[i] <- stats::coef(summary(model))[2, "Estimate"]
se_alpha_dfa[i] <- stats::coef(summary(model))[2, "Std. Error"]
r2_alpha_dfa[i] <- summary(model)$r.squared
}
return(rbind(alpha_dfa = alpha_dfa[1],
se_alpha_dfa = se_alpha_dfa[1],
r2_alpha_dfa = r2_alpha_dfa[1],
min_test = min(alpha_dfa[2:length(alpha_dfa)]),
max_test = max(alpha_dfa[2:length(alpha_dfa)]),
mean_test = mean(alpha_dfa[2:length(alpha_dfa)]),
median_test = stats::median(alpha_dfa[2:length(alpha_dfa)]),
sd_test = stats::sd(alpha_dfa[2:length(alpha_dfa)]),
skewness_test = PerformanceAnalytics::skewness(alpha_dfa[2:length(alpha_dfa)], method="moment"),
kurtosis_test = PerformanceAnalytics::kurtosis(alpha_dfa[2:length(alpha_dfa)], method="moment"),
Jarquebera_test_pvalue = tseries::jarque.bera.test(alpha_dfa[2:length(alpha_dfa)])$p.value,
CI_lower_test = mean(alpha_dfa[2:length(alpha_dfa)]) - error*stats::sd(alpha_dfa[2:length(alpha_dfa)]),
CI_upper_test = mean(alpha_dfa[2:length(alpha_dfa)]) + error*stats::sd(alpha_dfa[2:length(alpha_dfa)])))
}
}
|
\name{plotAdens}
\alias{plotAdens}
\title{
Plot Age Density Curves
}
\description{
Plot density of field (e.g. length) by age as waveforms (density curves) or bars.
}
\usage{
plotAdens(dat, xfld="len", yfld="age", type="dens", sd=3,
strSpp="417", bysex=TRUE, stype=c(1,2,6:8),
xlim=NULL, yspan=2, yrel=TRUE, nmin=1,
dcol, png=FALSE, pngres=400, PIN=c(8,8), ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{dat}{\code{data.frame} -- data object created by a called to \code{'gfb_bio.sql'} and processed by \code{'processBio'}.}
\item{xfld}{\code{character} -- field name in \code{'dat'} used to create densities by \code{'yfld'}.}
\item{yfld}{\code{character} -- field name in \code{'dat'} used to group densities of \code{'xfld'}.}
\item{type}{\code{character} -- type of plot: \code{dens}=density waveform; \code{bars}=barplot.}
\item{sd}{\code{numeric} -- standard deviations to spread density across \code{'x'}.}
\item{strSpp}{\code{character} -- species Hart code (e.g., \code{"417"} for Widow Rockfish).}
\item{bysex}{\code{logical} -- if \code{TRUE}, separate data by sex.}
\item{stype}{\code{numeric} -- sample type codes, e.g.,\cr
\code{1} = total catch,\cr
\code{2} = random,\cr
\code{6} = random from randomly assigned set,\cr
\code{7} = random from set after randomly assigned set,\cr
\code{8} = random from set requested by vessel master.}
\item{xlim}{\code{numeric} -- limits of the x-axis.}
\item{yspan}{\code{numeric} -- vertical adjustment for contrast.}
\item{yrel}{\code{logical} -- if \code{TRUE}, convert y-values relative to y-max.}
\item{nmin}{\code{numeric} -- minimum number of points per \code{yfld} (e.g., \code{"len"}).}
\item{dcol}{\code{character} -- colours of the density curves (defaults to a blue colour ramp).}
\item{png}{\code{logical} -- if \code{TRUE}, send the figure to a \code{'.png'} file.}
\item{pngres}{\code{numeric} -- resolution of output figure (pixels per inch).}
\item{PIN}{\code{numeric} -- width and height of output figure (inches).}
\item{\dots}{additional values for the function \code{graphics::lines}.}
}
\details{
Creates a waveform plot or a barplot, depending on value of \code{'type'}.
Can be used to visualise lengths-at-age, but could be adapted to follow cohorts by year.
}
\author{
\href{mailto:rowan.haigh@dfo-mpo.gc.ca}{Rowan Haigh}, Program Head -- Offshore Rockfish\cr
Pacific Biological Station (PBS), Fisheries & Oceans Canada (DFO), Nanaimo BC\cr
\emph{locus opus}: Regional Headquarters (RHQ), Vancouver BC\cr
Last modified \code{Rd: 2023-02-08}
}
\seealso{
In package \pkg{PBStools}:\cr
\code{\link[PBStools]{compAF}},
\code{\link[PBStools]{compLen}},
\code{\link[PBStools]{histTail}},
\code{\link[PBStools]{plotAgeErr}},
\code{\link[PBStools]{processBio}},
\code{\link[PBStools]{quantAges}},
\code{\link[PBStools]{weightBio}}.
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory (show via RShowDoc("KEYWORDS")):
\keyword{hplot}
% Use only one keyword per line.
% For non-standard keywords, use \concept instead of \keyword:
\concept{M02_Biology}
% Use only one concept per line.
|
/PBStools/man/plotAdens.Rd
|
no_license
|
pbs-software/pbs-tools
|
R
| false
| false
| 3,215
|
rd
|
\name{plotAdens}
\alias{plotAdens}
\title{
Plot Age Density Curves
}
\description{
Plot density of field (e.g. length) by age as waveforms (density curves) or bars.
}
\usage{
plotAdens(dat, xfld="len", yfld="age", type="dens", sd=3,
strSpp="417", bysex=TRUE, stype=c(1,2,6:8),
xlim=NULL, yspan=2, yrel=TRUE, nmin=1,
dcol, png=FALSE, pngres=400, PIN=c(8,8), ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{dat}{\code{data.frame} -- data object created by a called to \code{'gfb_bio.sql'} and processed by \code{'processBio'}.}
\item{xfld}{\code{character} -- field name in \code{'dat'} used to create densities by \code{'yfld'}.}
\item{yfld}{\code{character} -- field name in \code{'dat'} used to group densities of \code{'xfld'}.}
\item{type}{\code{character} -- type of plot: \code{dens}=density waveform; \code{bars}=barplot.}
\item{sd}{\code{numeric} -- standard deviations to spread density across \code{'x'}.}
\item{strSpp}{\code{character} -- species Hart code (e.g., \code{"417"} for Widow Rockfish).}
\item{bysex}{\code{logical} -- if \code{TRUE}, separate data by sex.}
\item{stype}{\code{numeric} -- sample type codes, e.g.,\cr
\code{1} = total catch,\cr
\code{2} = random,\cr
\code{6} = random from randomly assigned set,\cr
\code{7} = random from set after randomly assigned set,\cr
\code{8} = random from set requested by vessel master.}
\item{xlim}{\code{numeric} -- limits of the x-axis.}
\item{yspan}{\code{numeric} -- vertical adjustment for contrast.}
\item{yrel}{\code{logical} -- if \code{TRUE}, convert y-values relative to y-max.}
\item{nmin}{\code{numeric} -- minimum number of points per \code{yfld} (e.g., \code{"len"}).}
\item{dcol}{\code{character} -- colours of the density curves (defaults to a blue colour ramp).}
\item{png}{\code{logical} -- if \code{TRUE}, send the figure to a \code{'.png'} file.}
\item{pngres}{\code{numeric} -- resolution of output figure (pixels per inch).}
\item{PIN}{\code{numeric} -- width and height of output figure (inches).}
\item{\dots}{additional values for the function \code{graphics::lines}.}
}
\details{
Creates a waveform plot or a barplot, depending on value of \code{'type'}.
Can be used to visualise lengths-at-age, but could be adapted to follow cohorts by year.
}
\author{
\href{mailto:rowan.haigh@dfo-mpo.gc.ca}{Rowan Haigh}, Program Head -- Offshore Rockfish\cr
Pacific Biological Station (PBS), Fisheries & Oceans Canada (DFO), Nanaimo BC\cr
\emph{locus opus}: Regional Headquarters (RHQ), Vancouver BC\cr
Last modified \code{Rd: 2023-02-08}
}
\seealso{
In package \pkg{PBStools}:\cr
\code{\link[PBStools]{compAF}},
\code{\link[PBStools]{compLen}},
\code{\link[PBStools]{histTail}},
\code{\link[PBStools]{plotAgeErr}},
\code{\link[PBStools]{processBio}},
\code{\link[PBStools]{quantAges}},
\code{\link[PBStools]{weightBio}}.
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory (show via RShowDoc("KEYWORDS")):
\keyword{hplot}
% Use only one keyword per line.
% For non-standard keywords, use \concept instead of \keyword:
\concept{M02_Biology}
% Use only one concept per line.
|
newton <- function(f, tol = 1e-7, x0 = 1, N = 100){
h <- 1e-7
i <- 1; x1 = x0
p <- numeric(N)
while (i <= N) {
df.dx <- (f(x0 + h) - f(x0)) / h
x1 <- (x0 - (f(x0) / df.dx))
p[i] <- x1
i <- i + 1
if (abs(x1 - x0) < tol) break
x0 <- x1
}
return(p[1 : (i-1)])
}
|
/newton.r
|
no_license
|
basaksilasanli/NumericalAnalysisUsingR
|
R
| false
| false
| 297
|
r
|
newton <- function(f, tol = 1e-7, x0 = 1, N = 100){
h <- 1e-7
i <- 1; x1 = x0
p <- numeric(N)
while (i <= N) {
df.dx <- (f(x0 + h) - f(x0)) / h
x1 <- (x0 - (f(x0) / df.dx))
p[i] <- x1
i <- i + 1
if (abs(x1 - x0) < tol) break
x0 <- x1
}
return(p[1 : (i-1)])
}
|
#' Spectral Angle Mapper
#'
#' Calculates the angle in spectral space between pixels and a set of reference spectra (endmembers) for image classification based on spectral similarity.
#'
#' @param img RasterBrick or RasterStack. Remote sensing imagery (usually hyperspectral)
#' @param em Matrix or data.frame with endmembers. Each row should contain the endmember spectrum of a class, i.e. columns correspond to bands in \code{img}. It is reccomended to set the rownames to class names.
#' @param angles Logical. If \code{TRUE} a RasterBrick containing each one layer per endmember will be returned containing the spectral angles.
#' @param ... further arguments to be passed to \code{\link[raster]{writeRaster}}
#' @export
#' @details
#' For each pixel the spectral angle mapper calculates the angle between the vector defined by the pixel values and each endmember vector. The result of this is
#' one raster layer for each endmember containing the spectral angle. The smaller the spectral angle the more similar a pixel is to a given endmember class.
#' In a second step one can the go ahead an enforce thresholds of maximum angles or simply classify each pixel to the most similar endmember.
#' @return RasterBrick or RasterLayer
#' If \code{angles = FALSE} a single Layer will be returned in which each pixel is assigned to the closest endmember class (integer pixel values correspond to row order of \code{em}.
#' @examples
#' library(raster)
#' library(ggplot2)
#' ## Load example data-set
#' data(lsat)
#'
#' ## Sample endmember spectra
#' ## First location is water, second is open agricultural vegetation
#' pts <- data.frame(x = c(624720, 627480), y = c(-414690, -411090))
#' endmembers <- extract(lsat, pts)
#' rownames(endmembers) <- c("water", "vegetation")
#'
#' ## Calculate spectral angles
#' lsat_sam <- sam(lsat, endmembers, angles = TRUE)
#' plot(lsat_sam)
#'
#' ## Classify based on minimum angle
#' lsat_sam <- sam(lsat, endmembers, angles = FALSE)
#'
#' \donttest{ggR(lsat_sam, forceCat = TRUE, geom_raster=TRUE) +
#' scale_fill_manual(values = c("blue", "green"), labels = c("water", "vegetation"))}
sam <- function(img, em, angles = FALSE, ...){
if(is.vector(em)) {
em <- matrix(em, nrow = 1, ncol=length(em))
} else if (is.data.frame(em)) {
em <- as.matrix(em)
}
if(ncol(em) != nlayers(img)) stop("The number of columns in em must match the number of bands in x.")
if(!angles && nrow(em) == 1){
stop(paste0("With only one class an image classification does not make sense.",
"\nUse angles=TRUE to calculate the spectral angles for your class without adding a classification on top."),
call. = FALSE )
}
## Calculate angles
out <- calc(img, fun = function(xi, emc=em) {specSimC(x=xi, em=emc)}, ..., forcefun = TRUE)
if(is.null(rownames(em))) rownames(em) <- paste0("s", 1:nrow(em))
names(out) <- paste0(rownames(em), "_sa")
## Select minimum angle
if(!angles) {
out <- which.min(out)
names(out) <- "class"
}
return(out)
}
|
/RStoolbox/R/sam.R
|
no_license
|
akhikolla/InformationHouse
|
R
| false
| false
| 3,129
|
r
|
#' Spectral Angle Mapper
#'
#' Calculates the angle in spectral space between pixels and a set of reference spectra (endmembers) for image classification based on spectral similarity.
#'
#' @param img RasterBrick or RasterStack. Remote sensing imagery (usually hyperspectral)
#' @param em Matrix or data.frame with endmembers. Each row should contain the endmember spectrum of a class, i.e. columns correspond to bands in \code{img}. It is reccomended to set the rownames to class names.
#' @param angles Logical. If \code{TRUE} a RasterBrick containing each one layer per endmember will be returned containing the spectral angles.
#' @param ... further arguments to be passed to \code{\link[raster]{writeRaster}}
#' @export
#' @details
#' For each pixel the spectral angle mapper calculates the angle between the vector defined by the pixel values and each endmember vector. The result of this is
#' one raster layer for each endmember containing the spectral angle. The smaller the spectral angle the more similar a pixel is to a given endmember class.
#' In a second step one can the go ahead an enforce thresholds of maximum angles or simply classify each pixel to the most similar endmember.
#' @return RasterBrick or RasterLayer
#' If \code{angles = FALSE} a single Layer will be returned in which each pixel is assigned to the closest endmember class (integer pixel values correspond to row order of \code{em}.
#' @examples
#' library(raster)
#' library(ggplot2)
#' ## Load example data-set
#' data(lsat)
#'
#' ## Sample endmember spectra
#' ## First location is water, second is open agricultural vegetation
#' pts <- data.frame(x = c(624720, 627480), y = c(-414690, -411090))
#' endmembers <- extract(lsat, pts)
#' rownames(endmembers) <- c("water", "vegetation")
#'
#' ## Calculate spectral angles
#' lsat_sam <- sam(lsat, endmembers, angles = TRUE)
#' plot(lsat_sam)
#'
#' ## Classify based on minimum angle
#' lsat_sam <- sam(lsat, endmembers, angles = FALSE)
#'
#' \donttest{ggR(lsat_sam, forceCat = TRUE, geom_raster=TRUE) +
#' scale_fill_manual(values = c("blue", "green"), labels = c("water", "vegetation"))}
sam <- function(img, em, angles = FALSE, ...){
if(is.vector(em)) {
em <- matrix(em, nrow = 1, ncol=length(em))
} else if (is.data.frame(em)) {
em <- as.matrix(em)
}
if(ncol(em) != nlayers(img)) stop("The number of columns in em must match the number of bands in x.")
if(!angles && nrow(em) == 1){
stop(paste0("With only one class an image classification does not make sense.",
"\nUse angles=TRUE to calculate the spectral angles for your class without adding a classification on top."),
call. = FALSE )
}
## Calculate angles
out <- calc(img, fun = function(xi, emc=em) {specSimC(x=xi, em=emc)}, ..., forcefun = TRUE)
if(is.null(rownames(em))) rownames(em) <- paste0("s", 1:nrow(em))
names(out) <- paste0(rownames(em), "_sa")
## Select minimum angle
if(!angles) {
out <- which.min(out)
names(out) <- "class"
}
return(out)
}
|
#' search column names based on regex
#'
#' @description search column names based on a regex, possibly exclude some elements based on another regex, and include some other elements (not regex but identical). For selection of columns for use in various situations
#'
#' @param DT a character vector. if a data.frame, uses the colnames() to extract a character vector with the colnames
#' @param x a regex to search for in DT
#' @param not a regex that will exclude these from argument x. Note that this uses grepl, and since grepl is not vectorized, if several elements are supplied, they are stringed together with '|' in grepl.
#' @param plus a character vector with specific elements from the string that will be included. the function will trhow an error if these do not exist in the vector.
#' @param ignore.case should cases be ignored? Default is TRUE
#' @import data.table
#' @import assertthat
#' @export
#'
#' @examples
#'\dontrun{
#' colc(dupstestdata)
#' }
#' @return This function returns a \code{character vector} that matches the regex search pattern
colc <- function(DT, x=NA, not=NA, plus=NA, ignore.case=TRUE) {
# DT <- c('a','b')
# x <- 'NA'
# not <- '6'
# plus <- c('a','x')
# ignore.case <- TRUE
# hvis x (search string) er en vector (fx input er colnames), lav til "or" statement i regex, saa den soeger paa det hele
if( length(x) > 1 ) {
x <- paste(x, collapse='|')
warning('search string var en vector > 1. collapser den med paste')
}
# er det en dataframe?
if(is.data.frame(DT)) search_vector <- colnames(DT) else search_vector <- DT
# errorcheck
assertthat::assert_that(is.character(search_vector))
if(is.na(x)) {
x1 <- search_vector
} else {
x1 <- grep(x, search_vector, value=TRUE, ignore.case=ignore.case)
}
if(any(!is.na(not))) {
if(length(not)==1){
x1 <- x1[which(!grepl(not, x1, ignore.case=ignore.case))]
} else {
not <- paste(not, collapse='|')
x1 <- x1[which(!grepl(not, x1, ignore.case=ignore.case))]
}
}
# kun sorteres hvis det er et underudvalg af cols og ikke alle cols, for saa bliver de ændret i rækkefoelgen naar du bruger dem til at udvælge i en DT
if( !is.na(x)) x1 <- sort(x1)
if( any(!is.na(plus))) {
# errorcheck: 'plus' needs to be in the search_vector, if not
if( any(plus %nin% search_vector)){
error_out <- setdiff(plus, search_vector)
stop('some element(s) in plus are not in the vetor: ', error_out)
}
x1 <- c(plus,x1)
}
if(any(is.na(x1))) {
stop('der er en NA i vectoren - fejltjek funktionen')
}
x1
}
|
/R/colc.R
|
no_license
|
emilBeBri/dttools
|
R
| false
| false
| 2,625
|
r
|
#' search column names based on regex
#'
#' @description search column names based on a regex, possibly exclude some elements based on another regex, and include some other elements (not regex but identical). For selection of columns for use in various situations
#'
#' @param DT a character vector. if a data.frame, uses the colnames() to extract a character vector with the colnames
#' @param x a regex to search for in DT
#' @param not a regex that will exclude these from argument x. Note that this uses grepl, and since grepl is not vectorized, if several elements are supplied, they are stringed together with '|' in grepl.
#' @param plus a character vector with specific elements from the string that will be included. the function will trhow an error if these do not exist in the vector.
#' @param ignore.case should cases be ignored? Default is TRUE
#' @import data.table
#' @import assertthat
#' @export
#'
#' @examples
#'\dontrun{
#' colc(dupstestdata)
#' }
#' @return This function returns a \code{character vector} that matches the regex search pattern
colc <- function(DT, x=NA, not=NA, plus=NA, ignore.case=TRUE) {
# DT <- c('a','b')
# x <- 'NA'
# not <- '6'
# plus <- c('a','x')
# ignore.case <- TRUE
# hvis x (search string) er en vector (fx input er colnames), lav til "or" statement i regex, saa den soeger paa det hele
if( length(x) > 1 ) {
x <- paste(x, collapse='|')
warning('search string var en vector > 1. collapser den med paste')
}
# er det en dataframe?
if(is.data.frame(DT)) search_vector <- colnames(DT) else search_vector <- DT
# errorcheck
assertthat::assert_that(is.character(search_vector))
if(is.na(x)) {
x1 <- search_vector
} else {
x1 <- grep(x, search_vector, value=TRUE, ignore.case=ignore.case)
}
if(any(!is.na(not))) {
if(length(not)==1){
x1 <- x1[which(!grepl(not, x1, ignore.case=ignore.case))]
} else {
not <- paste(not, collapse='|')
x1 <- x1[which(!grepl(not, x1, ignore.case=ignore.case))]
}
}
# kun sorteres hvis det er et underudvalg af cols og ikke alle cols, for saa bliver de ændret i rækkefoelgen naar du bruger dem til at udvælge i en DT
if( !is.na(x)) x1 <- sort(x1)
if( any(!is.na(plus))) {
# errorcheck: 'plus' needs to be in the search_vector, if not
if( any(plus %nin% search_vector)){
error_out <- setdiff(plus, search_vector)
stop('some element(s) in plus are not in the vetor: ', error_out)
}
x1 <- c(plus,x1)
}
if(any(is.na(x1))) {
stop('der er en NA i vectoren - fejltjek funktionen')
}
x1
}
|
####################################################################
#' ARIMA Forecast
#'
#' This function automates the ARIMA iterations and modeling for
#' time forecasting. For the moment, units can only be days.
#'
#' The ARIMA method is appropriate only for a time series that is
#' stationary (i.e., its mean, variance, and autocorrelation should
#' be approximately constant through time) and it is recommended
#' that there are at least 50 observations in the input data.
#'
#' The model consists of two parts, an autoregressive (AR) part
#' and a moving average (MA) part. The AR part involves regressing
#' the variable on its own lagged (i.e., past) values. The MA part
#' involves modeling the error term as a linear combination of error
#' terms occurring contemporaneously and at various times in the past.
#'
#' One thing to keep in mind when we think about ARIMA models is
#' given by the great power to capture very complex patters of
#' temporal correlation (Cochrane, 1997: 25)
#'
#' @family Forecast
#' @param time POSIX. Vector with date values
#' @param values Numeric. Vector with numerical values
#' @param n_future Integer. How many steps do you wish to forecast?
#' @param ARMA Integer. How many days should the model look back for ARMA?
#' Between 5 and 10 days recommmended. If set to 0 then it will forecast
#' until the end of max date's month; if set to -1, until the end of
#' max date's following month
#' @param ARMA_min Integer. How many days should the model look back for ARMA?
#' Between 5 and 10 days recommmended. If set to 0 then it will forecast
#' until the end of max date's month; if set to -1, until the end of
#' max date's following month
#' @param AR Integer. Force AR value if known
#' @param MA Integer. Force MA value if known
#' @param wd_excluded Character vector. Which weekdays are excluded in
#' your training set. If there are, please define know which ones. Example:
#' c('Sunday','Thursday'). If set to 'auto' then it will detect automatically
#' which weekdays have no data and forcast without these days.
#' @param plot Boolean. If you wish to plot your results
#' @param plot_days Integer. How many days back you wish to plot?
#' @param project Character. Name of your forecast project
#' @return List. Containing the trained model, forecast accuracy results,
#' data.frame for forecast (test) and train, and if \code{plot=TRUE}, a plot.
#' @export
forecast_arima <- function(time, values, n_future = 30,
ARMA = 8, ARMA_min = 5,
AR = NA, MA = NA,
wd_excluded = NA,
plot = TRUE, plot_days = 90, project = NA) {
# require(lubridate)
# require(ggplot2)
try_require("forecast")
# ARIMA doesn't use zeroes!
time <- time[!values == 0]
values <- values[!values == 0]
if (length(time) < 50) {
message("It is recommended that there are at least 50 observations in the input data")
}
if (Sys.Date() %in% time) {
message("It is recommended that you do NOT use today's data for training your data")
}
if (n_future == -1) {
n_future <- ceiling_date(Sys.Date(), "month") + months(1) - Sys.Date()
}
if (n_future == 0) {
n_future <- ceiling_date(Sys.Date(), "month") - Sys.Date()
}
# Which AR and MA values minimize our AIC
if (is.na(AR) & is.na(MA)) {
arma <- c(ARMA_min:ARMA)
aic <- expand.grid(AR = arma, MA = arma, cals = 0)
message("Iterating for best AR / MA combinations; there are ", nrow(aic), "!")
# if (length(time) > 1000) { method <- "ML" } else { method <- "CSS" }
for (i in seq_len(nrow(aic))) {
Tmodel <- Arima(values, order = c(aic$AR[i], 1, aic$MA[i]), method = "ML")
aic$cals[i] <- Tmodel$aic
}
AR <- aic$AR[which.min(aic$cals)]
MA <- aic$MA[which.min(aic$cals)]
message("Best combination:", AR, "and", MA)
aic_ARIMA <- min(aic$cals)
}
model <- Arima(values, order = c(AR, 1, MA), method = "ML")
train <- data.frame(time, values,
pred = model$fitted,
resid = model$residuals
)
# Forecast
future_dates <- seq.Date(max(time) + 1, max(time) %m+% days(n_future), by = 1)
if (!is.na(wd_excluded)) {
if (wd_excluded == "auto") {
weekdays <- data.frame(table(weekdays(time)))
weekdays_real <- c(weekdays(seq.Date(Sys.Date(), Sys.Date() + 6, by = 1)))
wd_excluded <- weekdays_real[!weekdays_real %in% weekdays$Var1]
message("Automatically excluding ", vector2text(wd_excluded))
}
exclude <- vector2text(wd_excluded, quotes = FALSE)
future_dates <- future_dates[!weekdays(future_dates) %in% wd_excluded]
n_future <- length(future_dates)
}
f <- forecast(model, h = n_future)
test <- data.frame(time = future_dates, pred = f$mean, data.frame(f)[, -1])
# Outut list with all results
output <- list(
model = model,
metrics = forecast::accuracy(model),
forecast = test,
train = train
)
# Plot results
if (plot == TRUE) {
if (nrow(train) > plot_days) {
train <- train[(nrow(train) - plot_days):nrow(train), ]
}
plotdata <- data.frame(
rbind(
data.frame(date = train$time, values = train$values, type = "Real"),
data.frame(date = train$time, values = train$pred, type = "Model"),
data.frame(date = test$time, values = test$pred, type = "Forecast")
)
)
rects <- data.frame(start = min(future_dates), end = max(future_dates))
output$plot <- ggplot(plotdata, aes(.data$date)) +
geom_smooth(aes(y = .data$values), method = "loess", alpha = 0.5) +
geom_line(aes(y = .data$values, colour = .data$type)) +
labs(x = "Date", y = "Counter", colour = "") +
theme_minimal() +
theme(
legend.position = "top",
axis.text.x = element_text(angle = 60, hjust = 1)
) +
scale_x_date(date_breaks = "1 month", date_labels = "%b-%Y") +
ggtitle("Real & Fitted Model vs Forecast (ARIMA)",
subtitle = paste(
"AIC", signif(output$model$aic, 4), "|",
"MAE", signif(output$metrics[3], 3), "|",
"RMSE", signif(output$metrics[2], 3), "|",
"ARIMA:", AR, "- 1 -", MA
)
) +
scale_color_manual(values = c("orange", "navy", "purple")) +
geom_rect(
data = rects, inherit.aes = FALSE,
aes(
xmin = .data$start, xmax = .data$end,
ymin = min(plotdata$values),
ymax = max(plotdata$values)
),
color = "transparent", fill = "grey", alpha = 0.25
)
if (!is.na(project)) {
output$plot <- output$plot + labs(caption = project)
}
plot(output$plot)
}
return(output)
}
####################################################################
#' Facebook's Prophet Forecast
#'
#' Prophet is Facebook's procedure for forecasting time series data
#' based on an additive model where non-linear trends are fit with
#' yearly, weekly, and daily seasonality, plus holiday effects. It
#' works best with time series that have strong seasonal effects and
#' several seasons of historical data. Prophet is robust to missing
#' data and shifts in the trend, and typically handles outliers well.
#'
#' Official documentation: \url{https://github.com/facebook/prophet}
#'
#' @family Forecast
#' @param df Data frame. Must contain date/time column and values column,
#' in that order.
#' @param n_future Integer. How many steps do you wish to forecast?
#' @param country Character. Country code for holidays.
#' @param trend.param Numeric. Flexibility of trend component. Default is 0.05,
#' and as this value becomes larger, the trend component will be more flexible.
#' @param logged Boolean. Convert values into logs?
#' @param pout Numeric. Get rid of pout \% of outliers.
#' @param project Character. Name of your forecast project for plot title
#' @return List. Containing the forecast results, the prophet model, and a plot.
#' @export
prophesize <- function(df, n_future = 60, country = "AR",
trend.param = 0.05, logged = FALSE, pout = 0.03,
project = "Prophet Forecast") {
try_require("prophet")
df <- data.frame(df[, c(1, 2)])
metric <- colnames(df)[2]
colnames(df) <- c("ds", "y")
df <- arrange(df, .data$ds)
if (logged) df$y <- log(df$y)
# Outliers
df <- df[!rank(-df$y) %in% c(1:round(nrow(df) * pout)), ]
# Run prophet functions
m <- prophet(
yearly.seasonality = TRUE, daily.seasonality = FALSE,
changepoint.prior.scale = trend.param
)
if (!is.null(country)) {
m <- add_country_holidays(m, country_name = country)
}
m <- fit.prophet(m, df)
future <- make_future_dataframe(m, periods = n_future)
forecast <- predict(m, future)
forecast$y <- forecast$trend + forecast$additive_terms
p <- plot(m, forecast) + theme_lares() +
labs(
y = metric, x = "Dates",
title = project,
subtitle = paste("Forecast results for the next", n_future, "days")
) +
scale_y_comma()
plots2 <- prophet_plot_components(m, forecast)
plots2 <- lapply(plots2, function(x) x + theme_lares())
plot2 <- wrap_plots(plots2, ncol = 1) +
plot_annotation(title = "Forecast components", theme = theme_lares())
return(list(result = forecast, model = m, plot = p, components = plot2))
}
#' ####################################################################
#' #' Machine Learning Forecast
#' #'
#' #' This function lets the user create a forecast setting a time series
#' #' and a numerical value.
#' #'
#' #' @family Forecast
#' #' @param time POSIX. Vector with dates or time values
#' #' @param values Numeric. Vector with numerical values
#' #' @param n_future Integer. How many steps do you wish to forecast?
#' #' @param use_last Boolean. Use last observation?
#' #' @param automl Boolean. Use \code{h2o_automl()}
#' #' @param plot_forecast Boolean. If you wish to plot your results
#' #' @param plot_model Boolean. If you wish to plot your model's results
#' #' @param project Character. Name of your forecast project for plot title
#' #' @export
#' forecast_ml <- function(time, values,
#' n_future = 15,
#' use_last = TRUE,
#' automl = FALSE,
#' plot_forecast = TRUE,
#' plot_model = FALSE,
#' project = "Simple Forecast using Machine Learning") {
#'
#' # require(timetk)
#' # require(tidyquant)
#'
#' if (length(time) != length(values)) {
#' stop("The parameters 'time' and 'values' should have the same length")
#' }
#'
#' df <- data.frame(time = time, amount = values)
#' if (use_last == FALSE) {
#' df <- arrange(df, desc(.data$time)) %>% slice(-1)
#' n_future <- n_future + 1
#' }
#'
#' # STEP 1: AUGMENT TIME SERIES SIGNATURE
#' augmented <- tk_augment_timeseries_signature(df)
#' augmented <- mutate(augmented,
#' month.lbl = as.character(.data$month.lbl),
#' wday.lbl = as.character(.data$wday.lbl))
#'
#' # STEP 2: BUILD FUTURE (NEW) DATA
#' idx <- tk_index(augmented)
#' future_idx <- tk_make_future_timeseries(idx, n_future = n_future)
#' new_data_tbl <- tk_get_timeseries_signature(future_idx) %>%
#' mutate(month.lbl = as.character(month.lbl),
#' wday.lbl = as.character(wday.lbl))
#'
#' # STEP 3: MODEL
#' if (!automl) {
#' fit_lm <- lm(amount ~ ., data = select(augmented, -c(time)))
#' pred <- predict(fit_lm, newdata = select(new_data_tbl, -c(index)))
#' predictions_tbl <- tibble(time = future_idx, amount = pred)
#' } else {
#' augmented_h2o <- dplyr::rename(augmented, tag = amount)
#' fit_auto <- h2o_automl(df = augmented_h2o, alarm = FALSE, project = project)
#' pred <- h2o.predict(fit_auto$model, as.h2o(new_data_tbl))
#' predictions_tbl <- tibble(time = future_idx, amount = as.vector(pred))
#' }
#'
#' # STEP 5: COMPARE ACTUAL VS PREDICTIONS
#' rects <- data.frame(start = min(future_idx), end = max(future_idx))
#' message("Predicted range: ", rects$start, " to ", rects$end)
#' forecast <- ggplot(df, aes(x = time, y = amount)) +
#' labs(title = project, y = "Amount", x = NULL,
#' subtitle = "Using simple multivariate regressions on time series with Machine Learning") +
#' # Training data
#' geom_line(color = palette_light()[[1]]) +
#' geom_point(color = palette_light()[[1]]) +
#' geom_smooth(method = 'loess', formula = 'y ~ x', alpha = 0.5) +
#' # Predictions
#' geom_line(aes(y = amount), color = palette_light()[[2]], data = predictions_tbl) +
#' geom_point(aes(y = amount), color = palette_light()[[2]], data = predictions_tbl) +
#' # Actuals
#' geom_line(color = palette_light()[[1]], data = df) +
#' geom_point(color = palette_light()[[1]], data = df) +
#' # Aesthetics
#' scale_x_date(date_breaks = "1 month", date_labels = "%b") +
#' theme_lares() +
#' geom_rect(data = rects, inherit.aes = FALSE,
#' aes(
#' xmin = start, xmax = end, ymin = 0,
#' ymax = max(df$amount) * 1.02),
#' color = "transparent", fill = "orange", alpha = 0.3)
#'
#' if (plot_forecast) {
#' print(forecast)
#' }
#'
#' if (plot_model) {
#' Sys.sleep(1)
#' mplot_full(
#' tag = df$amount,
#' score = predictions_tbl$amount[seq_along(df$amount)],
#' subtitle = project)
#' Sys.sleep(4)
#' }
#'
#' df_final <- rbind(df, predictions_tbl)
#'
#' if (automl) {
#' model <- fit_auto
#' score <- fit_auto$scores$score
#' } else {
#' model <- fit_lm
#' score <- fit_lm$fitted.values
#' }
#'
#' output <- list(data = df_final,
#' model = model,
#' errors = errors(df$amount, score))
#'
#' return(output)
#'
#' }
|
/R/forecasting.R
|
no_license
|
romainfrancois/lares
|
R
| false
| false
| 13,868
|
r
|
####################################################################
#' ARIMA Forecast
#'
#' This function automates the ARIMA iterations and modeling for
#' time forecasting. For the moment, units can only be days.
#'
#' The ARIMA method is appropriate only for a time series that is
#' stationary (i.e., its mean, variance, and autocorrelation should
#' be approximately constant through time) and it is recommended
#' that there are at least 50 observations in the input data.
#'
#' The model consists of two parts, an autoregressive (AR) part
#' and a moving average (MA) part. The AR part involves regressing
#' the variable on its own lagged (i.e., past) values. The MA part
#' involves modeling the error term as a linear combination of error
#' terms occurring contemporaneously and at various times in the past.
#'
#' One thing to keep in mind when we think about ARIMA models is
#' given by the great power to capture very complex patters of
#' temporal correlation (Cochrane, 1997: 25)
#'
#' @family Forecast
#' @param time POSIX. Vector with date values
#' @param values Numeric. Vector with numerical values
#' @param n_future Integer. How many steps do you wish to forecast?
#' @param ARMA Integer. How many days should the model look back for ARMA?
#' Between 5 and 10 days recommmended. If set to 0 then it will forecast
#' until the end of max date's month; if set to -1, until the end of
#' max date's following month
#' @param ARMA_min Integer. How many days should the model look back for ARMA?
#' Between 5 and 10 days recommmended. If set to 0 then it will forecast
#' until the end of max date's month; if set to -1, until the end of
#' max date's following month
#' @param AR Integer. Force AR value if known
#' @param MA Integer. Force MA value if known
#' @param wd_excluded Character vector. Which weekdays are excluded in
#' your training set. If there are, please define know which ones. Example:
#' c('Sunday','Thursday'). If set to 'auto' then it will detect automatically
#' which weekdays have no data and forcast without these days.
#' @param plot Boolean. If you wish to plot your results
#' @param plot_days Integer. How many days back you wish to plot?
#' @param project Character. Name of your forecast project
#' @return List. Containing the trained model, forecast accuracy results,
#' data.frame for forecast (test) and train, and if \code{plot=TRUE}, a plot.
#' @export
forecast_arima <- function(time, values, n_future = 30,
ARMA = 8, ARMA_min = 5,
AR = NA, MA = NA,
wd_excluded = NA,
plot = TRUE, plot_days = 90, project = NA) {
# require(lubridate)
# require(ggplot2)
try_require("forecast")
# ARIMA doesn't use zeroes!
time <- time[!values == 0]
values <- values[!values == 0]
if (length(time) < 50) {
message("It is recommended that there are at least 50 observations in the input data")
}
if (Sys.Date() %in% time) {
message("It is recommended that you do NOT use today's data for training your data")
}
if (n_future == -1) {
n_future <- ceiling_date(Sys.Date(), "month") + months(1) - Sys.Date()
}
if (n_future == 0) {
n_future <- ceiling_date(Sys.Date(), "month") - Sys.Date()
}
# Which AR and MA values minimize our AIC
if (is.na(AR) & is.na(MA)) {
arma <- c(ARMA_min:ARMA)
aic <- expand.grid(AR = arma, MA = arma, cals = 0)
message("Iterating for best AR / MA combinations; there are ", nrow(aic), "!")
# if (length(time) > 1000) { method <- "ML" } else { method <- "CSS" }
for (i in seq_len(nrow(aic))) {
Tmodel <- Arima(values, order = c(aic$AR[i], 1, aic$MA[i]), method = "ML")
aic$cals[i] <- Tmodel$aic
}
AR <- aic$AR[which.min(aic$cals)]
MA <- aic$MA[which.min(aic$cals)]
message("Best combination:", AR, "and", MA)
aic_ARIMA <- min(aic$cals)
}
model <- Arima(values, order = c(AR, 1, MA), method = "ML")
train <- data.frame(time, values,
pred = model$fitted,
resid = model$residuals
)
# Forecast
future_dates <- seq.Date(max(time) + 1, max(time) %m+% days(n_future), by = 1)
if (!is.na(wd_excluded)) {
if (wd_excluded == "auto") {
weekdays <- data.frame(table(weekdays(time)))
weekdays_real <- c(weekdays(seq.Date(Sys.Date(), Sys.Date() + 6, by = 1)))
wd_excluded <- weekdays_real[!weekdays_real %in% weekdays$Var1]
message("Automatically excluding ", vector2text(wd_excluded))
}
exclude <- vector2text(wd_excluded, quotes = FALSE)
future_dates <- future_dates[!weekdays(future_dates) %in% wd_excluded]
n_future <- length(future_dates)
}
f <- forecast(model, h = n_future)
test <- data.frame(time = future_dates, pred = f$mean, data.frame(f)[, -1])
# Outut list with all results
output <- list(
model = model,
metrics = forecast::accuracy(model),
forecast = test,
train = train
)
# Plot results
if (plot == TRUE) {
if (nrow(train) > plot_days) {
train <- train[(nrow(train) - plot_days):nrow(train), ]
}
plotdata <- data.frame(
rbind(
data.frame(date = train$time, values = train$values, type = "Real"),
data.frame(date = train$time, values = train$pred, type = "Model"),
data.frame(date = test$time, values = test$pred, type = "Forecast")
)
)
rects <- data.frame(start = min(future_dates), end = max(future_dates))
output$plot <- ggplot(plotdata, aes(.data$date)) +
geom_smooth(aes(y = .data$values), method = "loess", alpha = 0.5) +
geom_line(aes(y = .data$values, colour = .data$type)) +
labs(x = "Date", y = "Counter", colour = "") +
theme_minimal() +
theme(
legend.position = "top",
axis.text.x = element_text(angle = 60, hjust = 1)
) +
scale_x_date(date_breaks = "1 month", date_labels = "%b-%Y") +
ggtitle("Real & Fitted Model vs Forecast (ARIMA)",
subtitle = paste(
"AIC", signif(output$model$aic, 4), "|",
"MAE", signif(output$metrics[3], 3), "|",
"RMSE", signif(output$metrics[2], 3), "|",
"ARIMA:", AR, "- 1 -", MA
)
) +
scale_color_manual(values = c("orange", "navy", "purple")) +
geom_rect(
data = rects, inherit.aes = FALSE,
aes(
xmin = .data$start, xmax = .data$end,
ymin = min(plotdata$values),
ymax = max(plotdata$values)
),
color = "transparent", fill = "grey", alpha = 0.25
)
if (!is.na(project)) {
output$plot <- output$plot + labs(caption = project)
}
plot(output$plot)
}
return(output)
}
####################################################################
#' Facebook's Prophet Forecast
#'
#' Prophet is Facebook's procedure for forecasting time series data
#' based on an additive model where non-linear trends are fit with
#' yearly, weekly, and daily seasonality, plus holiday effects. It
#' works best with time series that have strong seasonal effects and
#' several seasons of historical data. Prophet is robust to missing
#' data and shifts in the trend, and typically handles outliers well.
#'
#' Official documentation: \url{https://github.com/facebook/prophet}
#'
#' @family Forecast
#' @param df Data frame. Must contain date/time column and values column,
#' in that order.
#' @param n_future Integer. How many steps do you wish to forecast?
#' @param country Character. Country code for holidays.
#' @param trend.param Numeric. Flexibility of trend component. Default is 0.05,
#' and as this value becomes larger, the trend component will be more flexible.
#' @param logged Boolean. Convert values into logs?
#' @param pout Numeric. Get rid of pout \% of outliers.
#' @param project Character. Name of your forecast project for plot title
#' @return List. Containing the forecast results, the prophet model, and a plot.
#' @export
prophesize <- function(df, n_future = 60, country = "AR",
trend.param = 0.05, logged = FALSE, pout = 0.03,
project = "Prophet Forecast") {
try_require("prophet")
df <- data.frame(df[, c(1, 2)])
metric <- colnames(df)[2]
colnames(df) <- c("ds", "y")
df <- arrange(df, .data$ds)
if (logged) df$y <- log(df$y)
# Outliers
df <- df[!rank(-df$y) %in% c(1:round(nrow(df) * pout)), ]
# Run prophet functions
m <- prophet(
yearly.seasonality = TRUE, daily.seasonality = FALSE,
changepoint.prior.scale = trend.param
)
if (!is.null(country)) {
m <- add_country_holidays(m, country_name = country)
}
m <- fit.prophet(m, df)
future <- make_future_dataframe(m, periods = n_future)
forecast <- predict(m, future)
forecast$y <- forecast$trend + forecast$additive_terms
p <- plot(m, forecast) + theme_lares() +
labs(
y = metric, x = "Dates",
title = project,
subtitle = paste("Forecast results for the next", n_future, "days")
) +
scale_y_comma()
plots2 <- prophet_plot_components(m, forecast)
plots2 <- lapply(plots2, function(x) x + theme_lares())
plot2 <- wrap_plots(plots2, ncol = 1) +
plot_annotation(title = "Forecast components", theme = theme_lares())
return(list(result = forecast, model = m, plot = p, components = plot2))
}
#' ####################################################################
#' #' Machine Learning Forecast
#' #'
#' #' This function lets the user create a forecast setting a time series
#' #' and a numerical value.
#' #'
#' #' @family Forecast
#' #' @param time POSIX. Vector with dates or time values
#' #' @param values Numeric. Vector with numerical values
#' #' @param n_future Integer. How many steps do you wish to forecast?
#' #' @param use_last Boolean. Use last observation?
#' #' @param automl Boolean. Use \code{h2o_automl()}
#' #' @param plot_forecast Boolean. If you wish to plot your results
#' #' @param plot_model Boolean. If you wish to plot your model's results
#' #' @param project Character. Name of your forecast project for plot title
#' #' @export
#' forecast_ml <- function(time, values,
#' n_future = 15,
#' use_last = TRUE,
#' automl = FALSE,
#' plot_forecast = TRUE,
#' plot_model = FALSE,
#' project = "Simple Forecast using Machine Learning") {
#'
#' # require(timetk)
#' # require(tidyquant)
#'
#' if (length(time) != length(values)) {
#' stop("The parameters 'time' and 'values' should have the same length")
#' }
#'
#' df <- data.frame(time = time, amount = values)
#' if (use_last == FALSE) {
#' df <- arrange(df, desc(.data$time)) %>% slice(-1)
#' n_future <- n_future + 1
#' }
#'
#' # STEP 1: AUGMENT TIME SERIES SIGNATURE
#' augmented <- tk_augment_timeseries_signature(df)
#' augmented <- mutate(augmented,
#' month.lbl = as.character(.data$month.lbl),
#' wday.lbl = as.character(.data$wday.lbl))
#'
#' # STEP 2: BUILD FUTURE (NEW) DATA
#' idx <- tk_index(augmented)
#' future_idx <- tk_make_future_timeseries(idx, n_future = n_future)
#' new_data_tbl <- tk_get_timeseries_signature(future_idx) %>%
#' mutate(month.lbl = as.character(month.lbl),
#' wday.lbl = as.character(wday.lbl))
#'
#' # STEP 3: MODEL
#' if (!automl) {
#' fit_lm <- lm(amount ~ ., data = select(augmented, -c(time)))
#' pred <- predict(fit_lm, newdata = select(new_data_tbl, -c(index)))
#' predictions_tbl <- tibble(time = future_idx, amount = pred)
#' } else {
#' augmented_h2o <- dplyr::rename(augmented, tag = amount)
#' fit_auto <- h2o_automl(df = augmented_h2o, alarm = FALSE, project = project)
#' pred <- h2o.predict(fit_auto$model, as.h2o(new_data_tbl))
#' predictions_tbl <- tibble(time = future_idx, amount = as.vector(pred))
#' }
#'
#' # STEP 5: COMPARE ACTUAL VS PREDICTIONS
#' rects <- data.frame(start = min(future_idx), end = max(future_idx))
#' message("Predicted range: ", rects$start, " to ", rects$end)
#' forecast <- ggplot(df, aes(x = time, y = amount)) +
#' labs(title = project, y = "Amount", x = NULL,
#' subtitle = "Using simple multivariate regressions on time series with Machine Learning") +
#' # Training data
#' geom_line(color = palette_light()[[1]]) +
#' geom_point(color = palette_light()[[1]]) +
#' geom_smooth(method = 'loess', formula = 'y ~ x', alpha = 0.5) +
#' # Predictions
#' geom_line(aes(y = amount), color = palette_light()[[2]], data = predictions_tbl) +
#' geom_point(aes(y = amount), color = palette_light()[[2]], data = predictions_tbl) +
#' # Actuals
#' geom_line(color = palette_light()[[1]], data = df) +
#' geom_point(color = palette_light()[[1]], data = df) +
#' # Aesthetics
#' scale_x_date(date_breaks = "1 month", date_labels = "%b") +
#' theme_lares() +
#' geom_rect(data = rects, inherit.aes = FALSE,
#' aes(
#' xmin = start, xmax = end, ymin = 0,
#' ymax = max(df$amount) * 1.02),
#' color = "transparent", fill = "orange", alpha = 0.3)
#'
#' if (plot_forecast) {
#' print(forecast)
#' }
#'
#' if (plot_model) {
#' Sys.sleep(1)
#' mplot_full(
#' tag = df$amount,
#' score = predictions_tbl$amount[seq_along(df$amount)],
#' subtitle = project)
#' Sys.sleep(4)
#' }
#'
#' df_final <- rbind(df, predictions_tbl)
#'
#' if (automl) {
#' model <- fit_auto
#' score <- fit_auto$scores$score
#' } else {
#' model <- fit_lm
#' score <- fit_lm$fitted.values
#' }
#'
#' output <- list(data = df_final,
#' model = model,
#' errors = errors(df$amount, score))
#'
#' return(output)
#'
#' }
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/semmsFuncs.R
\docType{data}
\name{NKI70}
\alias{NKI70}
\title{NKI70 data}
\format{
The NKI70 data has 144 observations and 72 predictors.
}
\description{
The NKI70 data is available from the “penalized” package in R (Goeman, 2010)
}
\examples{
\dontrun{
fn <- system.file("extdata", "NKI70_t1.RData", package = "SEMMS", mustWork = TRUE)
dataYXZ <- readInputFile(fn, ycol=1, Zcols=2:73,addIntercept = TRUE)
fittedSEMMS <- fitSEMMS(dataYXZ, mincor=0.8, nn=6, minchange= 1,
distribution="P", verbose=T, rnd=F)
fittedGLM <- runLinearModel(dataYXZ,fittedSEMMS$gam.out$nn, "P")
plotMDS(dataYXZ, fittedSEMMS, fittedGLM, ttl="NKI70")}
}
\references{
Goeman, J J. 2010. "L1 penalized estimation in the Cox proportional hazards model." no. 1. 14
}
\keyword{datasets}
|
/man/NKI70.Rd
|
no_license
|
haimbar/SEMMS
|
R
| false
| true
| 842
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/semmsFuncs.R
\docType{data}
\name{NKI70}
\alias{NKI70}
\title{NKI70 data}
\format{
The NKI70 data has 144 observations and 72 predictors.
}
\description{
The NKI70 data is available from the “penalized” package in R (Goeman, 2010)
}
\examples{
\dontrun{
fn <- system.file("extdata", "NKI70_t1.RData", package = "SEMMS", mustWork = TRUE)
dataYXZ <- readInputFile(fn, ycol=1, Zcols=2:73,addIntercept = TRUE)
fittedSEMMS <- fitSEMMS(dataYXZ, mincor=0.8, nn=6, minchange= 1,
distribution="P", verbose=T, rnd=F)
fittedGLM <- runLinearModel(dataYXZ,fittedSEMMS$gam.out$nn, "P")
plotMDS(dataYXZ, fittedSEMMS, fittedGLM, ttl="NKI70")}
}
\references{
Goeman, J J. 2010. "L1 penalized estimation in the Cox proportional hazards model." no. 1. 14
}
\keyword{datasets}
|
library(tm)
library(twitteR)
library(proxy)
library(wordcloud)
library(fpc)
library(topicmodels)
library(png)
#clusteringFun(30,0.99,0.9,FALSE,FALSE,0)
clusteringFun <- function(numCluster,sparsity,factor,flag,flag_topic,numTopic){
# change this file location to suit your machine
file_loc <- "trdata.csv"
# change TRUE to FALSE if you have no column headings in the CSV
trdata <- read.csv(file_loc, header = FALSE, sep="\n",strip.white=TRUE)
trdata$id <- matrix(1:length(trdata[,1]),length(trdata[,1]),1)
colnames(trdata)[1]<-"Text"
require(tm)
trdataFinal <- data.frame(matrix(nrow=1591,ncol=0))
#stem document
#datasetCopy <- dataset
#dataset <- tm_map(dataset, stemDocument)
#dataset <-tm_map(dataset, stemCompletion, dictionary = datasetCopy)
#######################
for(i in 1:length(trdata$Text)){
trdataFinal$version[i]<-""
trdataFinal$numInfo[i]<-""
trdataFinal$cvlt[i]<-""
trdataFinal$organization[i]<-""
trdataFinal$content[i]<-""
textVec <- strsplit(toString(trdata$Text[[i]]), "-")
textContent <- ""
for(j in 1:length(textVec[[1]])){
textVec[[1]][j] <- trim(textVec[[1]][j])
tmp <- ripNumPunc(textVec[[1]][j])
if(tolower(tmp)=="v"){
trdataFinal$version[i]<-textVec[[1]][j]
}
else if(tmp == ""){
trdataFinal$numInfo[i]<-ripPunc(textVec[[1]][j])
}
else if(tolower(tmp) == "cvlt"){
trdataFinal$cvlt[i]<-textVec[[1]][j]
}
else if(tolower(tmp)!="tr"){
if(isAllUpper(tmp)){
trdataFinal$organization[i]<-textVec[[1]][j]
}
else{
if(textContent=="")textContent<- tmp
else textContent<- paste(textContent,tmp,sep="-")
}
}
}
if(textContent=="" & trdataFinal$organization[i]!=""){
trdataFinal$content[i] <- trdataFinal$organization[i]
trdataFinal$organization[i]<- ""
}
else trdataFinal$content[i]<-textContent
}
write.csv(trdataFinal, file=paste("cleaned_data.csv",sep=","))
dataset <- Corpus(DataframeSource(as.data.frame(trdataFinal$Text)))
dataset <- tm_map(dataset, tolower)
dataset <- tm_map(dataset, removeNumbers)
dataset <- tm_map(dataset, removeWords, stopwords("english"))
dataset <- tm_map(dataset, removePunctuation)
dataset <- tm_map(dataset, stripWhitespace)
dataset <- tm_map(dataset,PlainTextDocument)
dtm <- DocumentTermMatrix(dataset,control=list(wordLengths=c(2,Inf), bounds = list(global = c(2,floor(length(trdata[,1])*factor))),
weighting = function(x)
weightTfIdf(x, normalize = TRUE)))
tdmTF <- TermDocumentMatrix(dataset,control=list(wordLengths=c(2,Inf),bounds = list(global = c(2,floor(length(trdata[,1])*factor))),
weighting = function(x)
weightTf(x)))
#Remove empty entry
rowTotals <- apply(dtm , 1, sum)
trdata <- trdata[which(rowTotals!=0),]
dataset <- dataset[which(rowTotals!=0)]
dtm <- dtm[rowTotals> 0,]
colTotals <- apply(tdmTF , 2, sum)
tdmTF <- tdmTF[ ,colTotals> 0]
#Remove duplicates
trdata <- trdata[!duplicated(dtm,MARGIN=1),]
dataset <- dataset[!duplicated(dtm,MARGIN=1)]
dtm <- unique(dtm,MARGIN=1)
tdmTF <- unique(tdmTF,MARGIN=2)
findFreqTerms(tdmTF, 100)
#WordCloud
m <- as.matrix(tdmTF)
word.freq <- sort(rowSums(m), decreasing =T)
png("plots/WordCloudPlotAll.png")
wordcloud(words = names(word.freq), freq = word.freq, min.freq = 15, random.order = F)
dev.off()
if(flag)
dtm <- removeSparseTerms(dtm, sparse = sparsity)
set.seed(100)
m2 <- as.matrix(dtm)
rownames(m2) <- 1:nrow(m2)
norm_eucl <- function(m) m/apply(m, MARGIN=1, FUN=function(x) sum(x^2)^.5)
m_norm <- norm_eucl(m2)
cl <- kmeans(m_norm, numCluster,nstart=25)
trdata$cluster <- matrix(cl$cluster,length(cl$cluster),1)
for(i in 1:numCluster){
cat(paste("cluster",i,": ",sep = ""))
s<-sort(cl$centers[i,], decreasing = T)
cat(names(s)[1:5],"\n")
}
print(table(cl$cluster))
write.csv(trdata, file=paste("Result_NumCluster#",numCluster,".csv",sep=""))
plot(prcomp(m_norm)$x,col=(cl$cluster)+1, main = "K-Means Clustering Results", xlab = "", ylab = "", pch = 20, cex = 2)
if(flag_topic){
lda = LDA(as.DocumentTermMatrix(tdmTF),k=numTopic)
term <- apply(terms(lda,5), MARGIN = 2, paste, collapse = ", ")
term
#for(i in 1:numTopic){
print(topics(lda,1))
#}
}
#Hierarchical clustering
#d <- dist(m, method="cosine")
#hc <- hclust(d, method="average")
#plot(hclust)
#myplclust(hclust, lab = rep())
#cl <- cutree(hc, 50)
#table(cl)
#findFreqTerms(dtm[cl==1], 50)
resultList<- list(m,trdata$cluster,numCluster)
return(resultList)
}
#plotClusterWordCloud(resultList,TRUE,5,0)
plotClusterWordCloud <- function(resultList, batch, mfreq, clusterId){
m <- resultList[1][[1]]
trCluster<- resultList[2][[1]]
numCluster<- resultList[3][[1]]
if(batch==TRUE){
for(i in 1:numCluster){
if(length(which(trCluster==i))>0){
m_clust <- m[,which(trCluster==i)]
word.freq <- sort(rowSums(m_clust), decreasing =T)
png(paste("plots/WordCloudPlot_Cluster#",i,".png",sep=""))
wordcloud(words = names(word.freq), freq = word.freq, min.freq = mfreq, random.order = F)
dev.off()
}
}
}else{
if(length(which(trCluster==clusterId))>0){
m_clust <- m[,which(trCluster==clusterId)]
word.freq <- sort(rowSums(m_clust), decreasing =T)
png(paste("plots/WordCloudPlot_Cluster#",clusterId,".png",sep=""))
wordcloud(words = names(word.freq), freq = word.freq, min.freq = mfreq, random.order = F)
dev.off()
}
}
}
isNum <- function(x){
return(!is.na(as.numeric(x)))
}
isUpper <- function(x) grepl("^[[:upper:]]+", x)
isAllUpper <- function(x){
flag <- TRUE
tmp <- strsplit(x, " ")
for(i in 1:length(tmp[[1]])){
if(!isUpper(tmp[[1]][i])) flag <- FALSE
}
return(flag)
}
ripNumPunc <- function(x){
y <- gsub("\\d+","",x)
y <- gsub("[[:punct:]]","",y)
y<-trim(y)
return(y)
}
ripPunc <- function(x){
y <- gsub("[[:punct:]]","",y)
y<-trim(y)
return(y)
}
trim <- function (x) gsub("^\\s+|\\s+$", "", x)
myplclust <- function(hclust,lab=hclust$labels, lab.col=rep(1,length(hclust$labels)), hang=0.1, ...){
y<-rep(hclust$height, 2)
x <- as.numeric(hclust$merge)
y <- y[which(x<0)]
x <- x[which(x<0)]
x <- abs(x)
y <- y[order(x)]
x <- x[order(x)]
plot(hclust, labels = FALSE, hang = hang,...)
text(x=x,y=y[hclust$order] - (max(hclust$height) * hang), labels = lab[hclust$order], col=lab.col[hclust$order], srt=90,adj=c(1,0.5), xpd = NA, ...)
}
|
/clusterFun.R
|
no_license
|
yinghaoh/TestDir1
|
R
| false
| false
| 6,832
|
r
|
library(tm)
library(twitteR)
library(proxy)
library(wordcloud)
library(fpc)
library(topicmodels)
library(png)
#clusteringFun(30,0.99,0.9,FALSE,FALSE,0)
clusteringFun <- function(numCluster,sparsity,factor,flag,flag_topic,numTopic){
# change this file location to suit your machine
file_loc <- "trdata.csv"
# change TRUE to FALSE if you have no column headings in the CSV
trdata <- read.csv(file_loc, header = FALSE, sep="\n",strip.white=TRUE)
trdata$id <- matrix(1:length(trdata[,1]),length(trdata[,1]),1)
colnames(trdata)[1]<-"Text"
require(tm)
trdataFinal <- data.frame(matrix(nrow=1591,ncol=0))
#stem document
#datasetCopy <- dataset
#dataset <- tm_map(dataset, stemDocument)
#dataset <-tm_map(dataset, stemCompletion, dictionary = datasetCopy)
#######################
for(i in 1:length(trdata$Text)){
trdataFinal$version[i]<-""
trdataFinal$numInfo[i]<-""
trdataFinal$cvlt[i]<-""
trdataFinal$organization[i]<-""
trdataFinal$content[i]<-""
textVec <- strsplit(toString(trdata$Text[[i]]), "-")
textContent <- ""
for(j in 1:length(textVec[[1]])){
textVec[[1]][j] <- trim(textVec[[1]][j])
tmp <- ripNumPunc(textVec[[1]][j])
if(tolower(tmp)=="v"){
trdataFinal$version[i]<-textVec[[1]][j]
}
else if(tmp == ""){
trdataFinal$numInfo[i]<-ripPunc(textVec[[1]][j])
}
else if(tolower(tmp) == "cvlt"){
trdataFinal$cvlt[i]<-textVec[[1]][j]
}
else if(tolower(tmp)!="tr"){
if(isAllUpper(tmp)){
trdataFinal$organization[i]<-textVec[[1]][j]
}
else{
if(textContent=="")textContent<- tmp
else textContent<- paste(textContent,tmp,sep="-")
}
}
}
if(textContent=="" & trdataFinal$organization[i]!=""){
trdataFinal$content[i] <- trdataFinal$organization[i]
trdataFinal$organization[i]<- ""
}
else trdataFinal$content[i]<-textContent
}
write.csv(trdataFinal, file=paste("cleaned_data.csv",sep=","))
dataset <- Corpus(DataframeSource(as.data.frame(trdataFinal$Text)))
dataset <- tm_map(dataset, tolower)
dataset <- tm_map(dataset, removeNumbers)
dataset <- tm_map(dataset, removeWords, stopwords("english"))
dataset <- tm_map(dataset, removePunctuation)
dataset <- tm_map(dataset, stripWhitespace)
dataset <- tm_map(dataset,PlainTextDocument)
dtm <- DocumentTermMatrix(dataset,control=list(wordLengths=c(2,Inf), bounds = list(global = c(2,floor(length(trdata[,1])*factor))),
weighting = function(x)
weightTfIdf(x, normalize = TRUE)))
tdmTF <- TermDocumentMatrix(dataset,control=list(wordLengths=c(2,Inf),bounds = list(global = c(2,floor(length(trdata[,1])*factor))),
weighting = function(x)
weightTf(x)))
#Remove empty entry
rowTotals <- apply(dtm , 1, sum)
trdata <- trdata[which(rowTotals!=0),]
dataset <- dataset[which(rowTotals!=0)]
dtm <- dtm[rowTotals> 0,]
colTotals <- apply(tdmTF , 2, sum)
tdmTF <- tdmTF[ ,colTotals> 0]
#Remove duplicates
trdata <- trdata[!duplicated(dtm,MARGIN=1),]
dataset <- dataset[!duplicated(dtm,MARGIN=1)]
dtm <- unique(dtm,MARGIN=1)
tdmTF <- unique(tdmTF,MARGIN=2)
findFreqTerms(tdmTF, 100)
#WordCloud
m <- as.matrix(tdmTF)
word.freq <- sort(rowSums(m), decreasing =T)
png("plots/WordCloudPlotAll.png")
wordcloud(words = names(word.freq), freq = word.freq, min.freq = 15, random.order = F)
dev.off()
if(flag)
dtm <- removeSparseTerms(dtm, sparse = sparsity)
set.seed(100)
m2 <- as.matrix(dtm)
rownames(m2) <- 1:nrow(m2)
norm_eucl <- function(m) m/apply(m, MARGIN=1, FUN=function(x) sum(x^2)^.5)
m_norm <- norm_eucl(m2)
cl <- kmeans(m_norm, numCluster,nstart=25)
trdata$cluster <- matrix(cl$cluster,length(cl$cluster),1)
for(i in 1:numCluster){
cat(paste("cluster",i,": ",sep = ""))
s<-sort(cl$centers[i,], decreasing = T)
cat(names(s)[1:5],"\n")
}
print(table(cl$cluster))
write.csv(trdata, file=paste("Result_NumCluster#",numCluster,".csv",sep=""))
plot(prcomp(m_norm)$x,col=(cl$cluster)+1, main = "K-Means Clustering Results", xlab = "", ylab = "", pch = 20, cex = 2)
if(flag_topic){
lda = LDA(as.DocumentTermMatrix(tdmTF),k=numTopic)
term <- apply(terms(lda,5), MARGIN = 2, paste, collapse = ", ")
term
#for(i in 1:numTopic){
print(topics(lda,1))
#}
}
#Hierarchical clustering
#d <- dist(m, method="cosine")
#hc <- hclust(d, method="average")
#plot(hclust)
#myplclust(hclust, lab = rep())
#cl <- cutree(hc, 50)
#table(cl)
#findFreqTerms(dtm[cl==1], 50)
resultList<- list(m,trdata$cluster,numCluster)
return(resultList)
}
#plotClusterWordCloud(resultList,TRUE,5,0)
plotClusterWordCloud <- function(resultList, batch, mfreq, clusterId){
m <- resultList[1][[1]]
trCluster<- resultList[2][[1]]
numCluster<- resultList[3][[1]]
if(batch==TRUE){
for(i in 1:numCluster){
if(length(which(trCluster==i))>0){
m_clust <- m[,which(trCluster==i)]
word.freq <- sort(rowSums(m_clust), decreasing =T)
png(paste("plots/WordCloudPlot_Cluster#",i,".png",sep=""))
wordcloud(words = names(word.freq), freq = word.freq, min.freq = mfreq, random.order = F)
dev.off()
}
}
}else{
if(length(which(trCluster==clusterId))>0){
m_clust <- m[,which(trCluster==clusterId)]
word.freq <- sort(rowSums(m_clust), decreasing =T)
png(paste("plots/WordCloudPlot_Cluster#",clusterId,".png",sep=""))
wordcloud(words = names(word.freq), freq = word.freq, min.freq = mfreq, random.order = F)
dev.off()
}
}
}
isNum <- function(x){
return(!is.na(as.numeric(x)))
}
isUpper <- function(x) grepl("^[[:upper:]]+", x)
isAllUpper <- function(x){
flag <- TRUE
tmp <- strsplit(x, " ")
for(i in 1:length(tmp[[1]])){
if(!isUpper(tmp[[1]][i])) flag <- FALSE
}
return(flag)
}
ripNumPunc <- function(x){
y <- gsub("\\d+","",x)
y <- gsub("[[:punct:]]","",y)
y<-trim(y)
return(y)
}
ripPunc <- function(x){
y <- gsub("[[:punct:]]","",y)
y<-trim(y)
return(y)
}
trim <- function (x) gsub("^\\s+|\\s+$", "", x)
myplclust <- function(hclust,lab=hclust$labels, lab.col=rep(1,length(hclust$labels)), hang=0.1, ...){
y<-rep(hclust$height, 2)
x <- as.numeric(hclust$merge)
y <- y[which(x<0)]
x <- x[which(x<0)]
x <- abs(x)
y <- y[order(x)]
x <- x[order(x)]
plot(hclust, labels = FALSE, hang = hang,...)
text(x=x,y=y[hclust$order] - (max(hclust$height) * hang), labels = lab[hclust$order], col=lab.col[hclust$order], srt=90,adj=c(1,0.5), xpd = NA, ...)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/asca_results.R
\name{asca_results}
\alias{asca_results}
\alias{print.asca}
\alias{summary.asca}
\alias{projections}
\alias{projections.asca}
\alias{print.summary.asca}
\alias{loadings.asca}
\alias{scores.asca}
\title{ASCA Result Methods}
\usage{
\method{print}{asca}(x, ...)
\method{summary}{asca}(object, ...)
\method{print}{summary.asca}(x, digits = 2, ...)
\method{loadings}{asca}(object, factor = 1, ...)
\method{scores}{asca}(object, factor = 1, ...)
projections(object, ...)
\method{projections}{asca}(object, factor = 1, ...)
}
\arguments{
\item{x}{\code{asca} object.}
\item{...}{additional arguments to underlying methods.}
\item{object}{\code{asca} object.}
\item{digits}{\code{integer} number of digits for printing.}
\item{factor}{\code{integer/character} for selecting a model factor.}
}
\value{
Returns depend on method used, e.g. \code{projections.sopls} returns projected samples,
\code{scores.sopls} return scores, while print and summary methods return the object invisibly.
}
\description{
Standard result computation and extraction functions for ASCA (\code{\link{asca}}).
}
\details{
Usage of the functions are shown using generics in the examples in \code{\link{asca}}.
Explained variances are available (block-wise and global) through \code{blockexpl} and \code{print.rosaexpl}.
Object printing and summary are available through:
\code{print.asca} and \code{summary.asca}.
Scores and loadings have their own extensions of \code{scores()} and \code{loadings()} through
\code{scores.asca} and \code{loadings.asca}. Special to ASCA is that scores are on a
factor level basis, while back-projected samples have their own function in \code{projections.asca}.
}
\references{
\itemize{
\item Smilde, A., Jansen, J., Hoefsloot, H., Lamers,R., Van Der Greef, J., and Timmerman, M.(2005). ANOVA-Simultaneous Component Analysis (ASCA): A new tool for analyzing designed metabolomics data. Bioinformatics, 21(13), 3043–3048.
\item Liland, K.H., Smilde, A., Marini, F., and Næs,T. (2018). Confidence ellipsoids for ASCA models based on multivariate regression theory. Journal of Chemometrics, 32(e2990), 1–13.
\item Martin, M. and Govaerts, B. (2020). LiMM-PCA: Combining ASCA+ and linear mixed models to analyse high-dimensional designed data. Journal of Chemometrics, 34(6), e3232.
}
}
\seealso{
Overviews of available methods, \code{\link{multiblock}}, and methods organised by main structure: \code{\link{basic}}, \code{\link{unsupervised}}, \code{\link{asca}}, \code{\link{supervised}} and \code{\link{complex}}.
Common functions for plotting are found in \code{\link{asca_plots}}.
}
|
/man/asca_results.Rd
|
no_license
|
minghao2016/multiblock
|
R
| false
| true
| 2,692
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/asca_results.R
\name{asca_results}
\alias{asca_results}
\alias{print.asca}
\alias{summary.asca}
\alias{projections}
\alias{projections.asca}
\alias{print.summary.asca}
\alias{loadings.asca}
\alias{scores.asca}
\title{ASCA Result Methods}
\usage{
\method{print}{asca}(x, ...)
\method{summary}{asca}(object, ...)
\method{print}{summary.asca}(x, digits = 2, ...)
\method{loadings}{asca}(object, factor = 1, ...)
\method{scores}{asca}(object, factor = 1, ...)
projections(object, ...)
\method{projections}{asca}(object, factor = 1, ...)
}
\arguments{
\item{x}{\code{asca} object.}
\item{...}{additional arguments to underlying methods.}
\item{object}{\code{asca} object.}
\item{digits}{\code{integer} number of digits for printing.}
\item{factor}{\code{integer/character} for selecting a model factor.}
}
\value{
Returns depend on method used, e.g. \code{projections.sopls} returns projected samples,
\code{scores.sopls} return scores, while print and summary methods return the object invisibly.
}
\description{
Standard result computation and extraction functions for ASCA (\code{\link{asca}}).
}
\details{
Usage of the functions are shown using generics in the examples in \code{\link{asca}}.
Explained variances are available (block-wise and global) through \code{blockexpl} and \code{print.rosaexpl}.
Object printing and summary are available through:
\code{print.asca} and \code{summary.asca}.
Scores and loadings have their own extensions of \code{scores()} and \code{loadings()} through
\code{scores.asca} and \code{loadings.asca}. Special to ASCA is that scores are on a
factor level basis, while back-projected samples have their own function in \code{projections.asca}.
}
\references{
\itemize{
\item Smilde, A., Jansen, J., Hoefsloot, H., Lamers,R., Van Der Greef, J., and Timmerman, M.(2005). ANOVA-Simultaneous Component Analysis (ASCA): A new tool for analyzing designed metabolomics data. Bioinformatics, 21(13), 3043–3048.
\item Liland, K.H., Smilde, A., Marini, F., and Næs,T. (2018). Confidence ellipsoids for ASCA models based on multivariate regression theory. Journal of Chemometrics, 32(e2990), 1–13.
\item Martin, M. and Govaerts, B. (2020). LiMM-PCA: Combining ASCA+ and linear mixed models to analyse high-dimensional designed data. Journal of Chemometrics, 34(6), e3232.
}
}
\seealso{
Overviews of available methods, \code{\link{multiblock}}, and methods organised by main structure: \code{\link{basic}}, \code{\link{unsupervised}}, \code{\link{asca}}, \code{\link{supervised}} and \code{\link{complex}}.
Common functions for plotting are found in \code{\link{asca_plots}}.
}
|
source("packages.R")
# source("usefulFunctions.R")
options(digits = 20)
day11 <- read_table2("day11.txt", col_names = FALSE)
rawData<-day11$X1
rawData %>% sapply(function(x) str_replace_all(string = x, pattern = "#", replacement = "B")) %>%
unname() -> rawData
mazeInfo<-buildMaze(rawData)
directions<-list(c(1,0),c(0,1),c(1,1),c(-1,1),c(1,-1),c(-1,-1),c(-1,0),c(0,-1))
# A mieux
tableNeighbour1<-data.frame(a=c(0),b=c(0),c=c(0),d=c(0))
for(j in 1:nrow(maze)){
a<-maze$x[j]
b<-maze$y[j]
for (dir in directions){
place<-c(a,b)+dir
tableNeighbour1 %>% add_row(a=a,b=b,c=place[1],d=place[2]) ->tableNeighbour1
}
if(j%%20 == 0){
print(j)
}
}
tableNeighbour1 %>% filter(c>0 & d>0 & c<limx+1 & d<limy+1) ->tableNeighbour1
names(tableNeighbour1)<-c("x","y","c","d")
finish<-FALSE
nbStep<-0
maze<-mazeInfo$Maze %>% filter(t!=".")
maze$nbNeigh<-0
while(!finish & nbStep<1000){
newMaze<-maze
mazeO<-maze %>% filter(t=="O")
if (nrow(mazeO)>0){
newMaze<-newMaze %>% select(-nbNeigh)
mazeO %>% left_join(tableNeighbour1,by=c("x","y")) %>%
group_by(c,d) %>% count %>% rename(x=c,y=d) %>% right_join(newMaze,by=c("x","y")) %>%
rename(nbNeigh=n) %>% mutate(nbNeigh=ifelse(is.na(nbNeigh),0,nbNeigh))->newMaze
}
newMaze %>% mutate(newT=ifelse(((t=="L" & nbNeigh==0) | (t=="O" & nbNeigh<4)),"O","L"))->newMaze
nbStep<-nbStep+1
if(nbStep%%10 == 0){
print(nbStep)
print(sum(maze$t=="O"))
}
if(sum(newMaze$t != newMaze$newT)==0) finish<-TRUE
newMaze %>% mutate(t=newT) ->maze
}
sum(maze$t=="O")
# 2324
drawMaze(maze)
# B
maze<-mazeInfo$Maze %>% filter(t!=".")
refMaze<-mazeInfo$Maze
limx<-max(maze$x)
limy<-max(maze$y)
tableNeighbour<-data.frame(a=c(0),b=c(0),c=c(0),d=c(0))
for(j in 1:nrow(maze)){
a<-maze$x[j]
b<-maze$y[j]
for (dir in directions){
t<-"A"
place<-c(a,b)+dir
if(all(place>0) & (place[1]<limx+1) & (place[2]<limy+1)) {t<-(refMaze %>% filter(x==place[1]&y==place[2]))$t}
while(t=="." & all(place>0) & (place[1]<limx+1) & (place[2]<limy+1)){
place<-place+dir
if(all(place>0) & (place[1]<limx+1) & (place[2]<limy+1)) {t<-(refMaze %>% filter(x==place[1]&y==place[2]))$t}
}
tableNeighbour %>% add_row(a=a,b=b,c=place[1],d=place[2]) ->tableNeighbour
}
if(j%%20 == 0){
print(j)
}
}
tableNeighbour %>% filter(c>0 & d>0 & c<limx+1 & d<limy+1) ->tableNeighbour
names(tableNeighbour)<-c("x","y","c","d")
finish<-FALSE
nbStep<-0
maze<-mazeInfo$Maze %>% filter(t!=".")
maze$nbNeigh<-0
while(!finish & nbStep<1000){
newMaze<-maze
mazeO<-maze %>% filter(t=="O")
if (nrow(mazeO)>0){
newMaze<-newMaze %>% select(-nbNeigh)
mazeO %>% left_join(tableNeighbour,by=c("x","y")) %>%
group_by(c,d) %>% count %>% rename(x=c,y=d) %>% right_join(newMaze,by=c("x","y")) %>%
rename(nbNeigh=n) %>% mutate(nbNeigh=ifelse(is.na(nbNeigh),0,nbNeigh))->newMaze
}
newMaze %>% mutate(newT=ifelse(((t=="L" & nbNeigh==0) | (t=="O" & nbNeigh<5)),"O","L"))->newMaze
nbStep<-nbStep+1
if(nbStep%%10 == 0){
print(nbStep)
print(sum(maze$t=="O"))
}
if(sum(newMaze$t != newMaze$newT)==0) finish<-TRUE
newMaze %>% mutate(t=newT) ->maze
}
sum(maze$t=="O")
# 2068
drawMaze(maze)
|
/Day11.R
|
no_license
|
Evargalo/AdventOfCode2020
|
R
| false
| false
| 3,222
|
r
|
source("packages.R")
# source("usefulFunctions.R")
options(digits = 20)
day11 <- read_table2("day11.txt", col_names = FALSE)
rawData<-day11$X1
rawData %>% sapply(function(x) str_replace_all(string = x, pattern = "#", replacement = "B")) %>%
unname() -> rawData
mazeInfo<-buildMaze(rawData)
directions<-list(c(1,0),c(0,1),c(1,1),c(-1,1),c(1,-1),c(-1,-1),c(-1,0),c(0,-1))
# A mieux
tableNeighbour1<-data.frame(a=c(0),b=c(0),c=c(0),d=c(0))
for(j in 1:nrow(maze)){
a<-maze$x[j]
b<-maze$y[j]
for (dir in directions){
place<-c(a,b)+dir
tableNeighbour1 %>% add_row(a=a,b=b,c=place[1],d=place[2]) ->tableNeighbour1
}
if(j%%20 == 0){
print(j)
}
}
tableNeighbour1 %>% filter(c>0 & d>0 & c<limx+1 & d<limy+1) ->tableNeighbour1
names(tableNeighbour1)<-c("x","y","c","d")
finish<-FALSE
nbStep<-0
maze<-mazeInfo$Maze %>% filter(t!=".")
maze$nbNeigh<-0
while(!finish & nbStep<1000){
newMaze<-maze
mazeO<-maze %>% filter(t=="O")
if (nrow(mazeO)>0){
newMaze<-newMaze %>% select(-nbNeigh)
mazeO %>% left_join(tableNeighbour1,by=c("x","y")) %>%
group_by(c,d) %>% count %>% rename(x=c,y=d) %>% right_join(newMaze,by=c("x","y")) %>%
rename(nbNeigh=n) %>% mutate(nbNeigh=ifelse(is.na(nbNeigh),0,nbNeigh))->newMaze
}
newMaze %>% mutate(newT=ifelse(((t=="L" & nbNeigh==0) | (t=="O" & nbNeigh<4)),"O","L"))->newMaze
nbStep<-nbStep+1
if(nbStep%%10 == 0){
print(nbStep)
print(sum(maze$t=="O"))
}
if(sum(newMaze$t != newMaze$newT)==0) finish<-TRUE
newMaze %>% mutate(t=newT) ->maze
}
sum(maze$t=="O")
# 2324
drawMaze(maze)
# B
maze<-mazeInfo$Maze %>% filter(t!=".")
refMaze<-mazeInfo$Maze
limx<-max(maze$x)
limy<-max(maze$y)
tableNeighbour<-data.frame(a=c(0),b=c(0),c=c(0),d=c(0))
for(j in 1:nrow(maze)){
a<-maze$x[j]
b<-maze$y[j]
for (dir in directions){
t<-"A"
place<-c(a,b)+dir
if(all(place>0) & (place[1]<limx+1) & (place[2]<limy+1)) {t<-(refMaze %>% filter(x==place[1]&y==place[2]))$t}
while(t=="." & all(place>0) & (place[1]<limx+1) & (place[2]<limy+1)){
place<-place+dir
if(all(place>0) & (place[1]<limx+1) & (place[2]<limy+1)) {t<-(refMaze %>% filter(x==place[1]&y==place[2]))$t}
}
tableNeighbour %>% add_row(a=a,b=b,c=place[1],d=place[2]) ->tableNeighbour
}
if(j%%20 == 0){
print(j)
}
}
tableNeighbour %>% filter(c>0 & d>0 & c<limx+1 & d<limy+1) ->tableNeighbour
names(tableNeighbour)<-c("x","y","c","d")
finish<-FALSE
nbStep<-0
maze<-mazeInfo$Maze %>% filter(t!=".")
maze$nbNeigh<-0
while(!finish & nbStep<1000){
newMaze<-maze
mazeO<-maze %>% filter(t=="O")
if (nrow(mazeO)>0){
newMaze<-newMaze %>% select(-nbNeigh)
mazeO %>% left_join(tableNeighbour,by=c("x","y")) %>%
group_by(c,d) %>% count %>% rename(x=c,y=d) %>% right_join(newMaze,by=c("x","y")) %>%
rename(nbNeigh=n) %>% mutate(nbNeigh=ifelse(is.na(nbNeigh),0,nbNeigh))->newMaze
}
newMaze %>% mutate(newT=ifelse(((t=="L" & nbNeigh==0) | (t=="O" & nbNeigh<5)),"O","L"))->newMaze
nbStep<-nbStep+1
if(nbStep%%10 == 0){
print(nbStep)
print(sum(maze$t=="O"))
}
if(sum(newMaze$t != newMaze$newT)==0) finish<-TRUE
newMaze %>% mutate(t=newT) ->maze
}
sum(maze$t=="O")
# 2068
drawMaze(maze)
|
#' read_transducer
#'
#' @param x
#' @param ...
#'
#' @return
#' @export
#'
#' @examples
read_transducer <- function(x, n_cores = 1, ...) {
# check for valid input
.check_files(x)
cl <- parallel::makePSOCKcluster(n_cores)
on.exit(parallel::stopCluster(cl))
dat <- rbindlist(parallel::parLapply(cl, x, switch_reader, ...))
dat
}
#' switch_reader
#'
#' @param z
#' @param ...
#'
#' @return
#' @export
#'
#' @examples
switch_reader <- function(z, ...) {
# get file extension
f_ext <- tolower(tools::file_ext(z))
ext <- c('rsk', 'xle', 'lev', 'dat', 'mon', 'csv', 'xlsx')
ext_ind <- which(ext %in% f_ext)
# choose the transducer type
switch(EXPR = which(ext %in% f_ext),
read_rbr(z, ...),
read_levelogger(z, ...),
read_diver(z, ...),
read_diver(z, ...),
read_diver(z, ...),
read_westbay(z, ...),
read_micron(z, ...))
}
|
/R/read_transducer.R
|
no_license
|
jkennel/transducer
|
R
| false
| false
| 921
|
r
|
#' read_transducer
#'
#' @param x
#' @param ...
#'
#' @return
#' @export
#'
#' @examples
read_transducer <- function(x, n_cores = 1, ...) {
# check for valid input
.check_files(x)
cl <- parallel::makePSOCKcluster(n_cores)
on.exit(parallel::stopCluster(cl))
dat <- rbindlist(parallel::parLapply(cl, x, switch_reader, ...))
dat
}
#' switch_reader
#'
#' @param z
#' @param ...
#'
#' @return
#' @export
#'
#' @examples
switch_reader <- function(z, ...) {
# get file extension
f_ext <- tolower(tools::file_ext(z))
ext <- c('rsk', 'xle', 'lev', 'dat', 'mon', 'csv', 'xlsx')
ext_ind <- which(ext %in% f_ext)
# choose the transducer type
switch(EXPR = which(ext %in% f_ext),
read_rbr(z, ...),
read_levelogger(z, ...),
read_diver(z, ...),
read_diver(z, ...),
read_diver(z, ...),
read_westbay(z, ...),
read_micron(z, ...))
}
|
library(shiny)
library(dplyr)
library(stringr)
library(leaflet)
x1 <- stops %>%
filter(str_detect(stop_name, 'Vargas C/B')) %>%
inner_join(stop_times, 'stop_id') %>%
select(stop_name, stop_id, trip_id, stop_lon, stop_lat) %>%
inner_join(trips, 'trip_id')
x2 <- stops %>%
filter(stop_id %in% c(340015459, 340015468, 340015325)) %>%
inner_join(stop_times, 'stop_id') %>%
select(stop_name, stop_id, trip_id) %>%
inner_join(trips, 'trip_id') %>%
select(trip_id, stop_name, stop_id)
cods <- inner_join(x1, x2, 'trip_id') %>%
select(-service_id, -trip_id) %>%
head(10) %>%
group_by(stop_id.x) %>%
mutate(label = paste(route_id, collapse = '<br/>')) %>%
ungroup
m0 <- leaflet_desenha_shapes(shape_ids = cods$shape_id) %>%
addCircles(lng = ~ stop_lon,
lat = ~ stop_lat,
popup = ~ label,
data = distinct(cods, stop_id.x)) %>%
setView(-46.65431, -23.55924, 17)
shinyServer(function(session, input, output) {
output$map <- renderLeaflet({
invalidateLater(10000, session)
leaflet_mapeia_linhas(cods$route_id, sentido = cods$direction_id, m0) %>%
setView(-46.65431, -23.55924, 17)
})
})
|
/inst/shiny/trabalho/server.R
|
no_license
|
jtrecenti/sptrans
|
R
| false
| false
| 1,174
|
r
|
library(shiny)
library(dplyr)
library(stringr)
library(leaflet)
x1 <- stops %>%
filter(str_detect(stop_name, 'Vargas C/B')) %>%
inner_join(stop_times, 'stop_id') %>%
select(stop_name, stop_id, trip_id, stop_lon, stop_lat) %>%
inner_join(trips, 'trip_id')
x2 <- stops %>%
filter(stop_id %in% c(340015459, 340015468, 340015325)) %>%
inner_join(stop_times, 'stop_id') %>%
select(stop_name, stop_id, trip_id) %>%
inner_join(trips, 'trip_id') %>%
select(trip_id, stop_name, stop_id)
cods <- inner_join(x1, x2, 'trip_id') %>%
select(-service_id, -trip_id) %>%
head(10) %>%
group_by(stop_id.x) %>%
mutate(label = paste(route_id, collapse = '<br/>')) %>%
ungroup
m0 <- leaflet_desenha_shapes(shape_ids = cods$shape_id) %>%
addCircles(lng = ~ stop_lon,
lat = ~ stop_lat,
popup = ~ label,
data = distinct(cods, stop_id.x)) %>%
setView(-46.65431, -23.55924, 17)
shinyServer(function(session, input, output) {
output$map <- renderLeaflet({
invalidateLater(10000, session)
leaflet_mapeia_linhas(cods$route_id, sentido = cods$direction_id, m0) %>%
setView(-46.65431, -23.55924, 17)
})
})
|
setwd("C:/Users/Hayden/Documents/GitHub/bootcamp-2016-example")
rm(list=ls())
#assuming that height normal distribution mean 69, sd of 10
#1
#need a function called get_heights
#that will use rnorm to get a sample of heihgts
#storing it in variable "Heights"
#make it so that can be used for both 100 and 1000
get_heights<-function(num){
heights<-rnorm(num, mean=69, sd=10)
}
#calling number from 100 first sample
hist(get_heights(100))
hist(get_heights(1000))
#checking the outputs with hist shows that it has worked yay!
#it works
#2 and #3 (worked for both)
#want to compute average height from heights vector
get_heights<-function(num){
heights<-rnorm(num, mean=69, sd=10);
average=mean(heights);
}
#3 now return the heights
get_heights<-function(num){
heights<-rnorm(num, mean=69, sd=10);
average=mean(heights);
return(average)
}
get_heights(100)
get_heights(1000)
#both give results around 69
#4
#need to repeat multiple times with for loop,
#so for 1:1000, get_heights()
#save for each value into the position i of a vector mean_heights_100
#can use rep function to generate a vector that is certain value long but a repeated number it multiple times
mean_heights_100<-c(rep(0, 1000))
for (i in 1:1000){
mean_heights_100[i]<-get_heights(100)
}
print(mean_heights_100)
#checking it, and the values are around 69
#5
#doing the same thing but changing the input of get_heights to 1000
mean_heights_1000<-c(rep(0, 1000))
for (i in 1:1000){
mean_heights_1000[i]<-get_heights(1000)
}
print(mean_heights_1000)
#6
#plot histogram of the distribution of the average heights for sample size of 100 and 1000
#the two sets of data should be plotted on same axes
#add a legend
#label the axes
#plot data from 100 samples in red
#plot data from 1000 samples in blue
#will likely need to set the x value to be big enough
#can set x axis size with xlim=c(start,stop)
#can set breaks with breaks=desired break size
bins2<-seq(65, 73, by=0.5)
counts_m_h_100<-hist(mean_heights_100, breaks=bins2)$counts
counts_m_h_1000<-hist(mean_heights_1000, breaks=bins2)$counts
pdf(file="Average_Height_Histogram.pdf", width=6,height=6)
par(mfrow=c(1,1), mar=c(4, 4, 3, 2))
barplot(rbind(counts_m_h_100,counts_m_h_1000), names.arg = c(seq(65,72.5, by=0.5)), col=c(2,4), beside=T, xlab="Average height (inches)", ylab="Count")
legend(6,350,c("n=100","n=1000"),col=c(2,4),lwd=4)
dev.off()
#issue, had to reorder where names.arg went, before col instead of after
#issue names.arg not have incorrect number of names
#basing it on the example in class
#thinking logically
#there will be one less bin than break
#therefore need names= one less interval (set by "by=") than the number of breaks
#set it manually
#finished!!!
|
/R-Plotting-Exercise1.R
|
no_license
|
hspeck/bootcamp-2016-example
|
R
| false
| false
| 2,739
|
r
|
setwd("C:/Users/Hayden/Documents/GitHub/bootcamp-2016-example")
rm(list=ls())
#assuming that height normal distribution mean 69, sd of 10
#1
#need a function called get_heights
#that will use rnorm to get a sample of heihgts
#storing it in variable "Heights"
#make it so that can be used for both 100 and 1000
get_heights<-function(num){
heights<-rnorm(num, mean=69, sd=10)
}
#calling number from 100 first sample
hist(get_heights(100))
hist(get_heights(1000))
#checking the outputs with hist shows that it has worked yay!
#it works
#2 and #3 (worked for both)
#want to compute average height from heights vector
get_heights<-function(num){
heights<-rnorm(num, mean=69, sd=10);
average=mean(heights);
}
#3 now return the heights
get_heights<-function(num){
heights<-rnorm(num, mean=69, sd=10);
average=mean(heights);
return(average)
}
get_heights(100)
get_heights(1000)
#both give results around 69
#4
#need to repeat multiple times with for loop,
#so for 1:1000, get_heights()
#save for each value into the position i of a vector mean_heights_100
#can use rep function to generate a vector that is certain value long but a repeated number it multiple times
mean_heights_100<-c(rep(0, 1000))
for (i in 1:1000){
mean_heights_100[i]<-get_heights(100)
}
print(mean_heights_100)
#checking it, and the values are around 69
#5
#doing the same thing but changing the input of get_heights to 1000
mean_heights_1000<-c(rep(0, 1000))
for (i in 1:1000){
mean_heights_1000[i]<-get_heights(1000)
}
print(mean_heights_1000)
#6
#plot histogram of the distribution of the average heights for sample size of 100 and 1000
#the two sets of data should be plotted on same axes
#add a legend
#label the axes
#plot data from 100 samples in red
#plot data from 1000 samples in blue
#will likely need to set the x value to be big enough
#can set x axis size with xlim=c(start,stop)
#can set breaks with breaks=desired break size
bins2<-seq(65, 73, by=0.5)
counts_m_h_100<-hist(mean_heights_100, breaks=bins2)$counts
counts_m_h_1000<-hist(mean_heights_1000, breaks=bins2)$counts
pdf(file="Average_Height_Histogram.pdf", width=6,height=6)
par(mfrow=c(1,1), mar=c(4, 4, 3, 2))
barplot(rbind(counts_m_h_100,counts_m_h_1000), names.arg = c(seq(65,72.5, by=0.5)), col=c(2,4), beside=T, xlab="Average height (inches)", ylab="Count")
legend(6,350,c("n=100","n=1000"),col=c(2,4),lwd=4)
dev.off()
#issue, had to reorder where names.arg went, before col instead of after
#issue names.arg not have incorrect number of names
#basing it on the example in class
#thinking logically
#there will be one less bin than break
#therefore need names= one less interval (set by "by=") than the number of breaks
#set it manually
#finished!!!
|
#!/usr/bin/Rscript
################WORKING DIRECTORY MAGIC##########
##################DO NOT ALTER###################
initial.options <- commandArgs(trailingOnly = FALSE)
file.arg.name <- "--file="
script.name <- sub(file.arg.name, "", initial.options[grep(file.arg.name, initial.options)])
script.basename <- dirname(script.name)
execution_wd=getwd()
setwd(script.basename)
base_script_wd=getwd()
###################END BLOCK#####################
##############COMMON REQUIREMENTS################
##################DO NOT ALTER###################
#Parser file with all the parser types.
source('Rscript/parser.R')
###################END BLOCK#####################
##############SCRIPT PARAMETERS##################
#############MODIFY AS REQUIRED##################
#Path file for setting up the location of your R files.
#you can alternatively set rfold='/path/to/your/files', but its less modular.
source('Rscript/path.R')
#description and epilog for the console help output.
#e.g. description="This scripts does this and that."
#e.g. epilog="use with caution. refer to www.site.com .."
description=''
epilog=''
#External parser function: for usual options.
#e.g. parser=parser_swf(description,epilog)
parser=parser_pred(description,epilog)
#additional argparse entries for this particular script.
#e.g. parser$add_argument('-s','--sum', dest='accumulate', action='store_const', const=sum, default=max,help='sum the integers (default: find the max)')
#files you want to source from the rfold folder for this Rscript
#e.g. c('common.R','histogram.R')
userfiles=c()
###################END BLOCK#####################
###SOURCING, CONTEXT CLEANING, ARG RETRIEVE######
##################DO NOT ALTER###################
#code insertion.
rm(list=setdiff(ls(),c("parser","rfold","userfiles","execution_wd","base_script_wd")))
args=parser$parse_args()
#Verbosity management.
source('Rscript/verbosity.R')
verb<-verbosity_function_maker(args$verbosity)
verb(args,"Parameters to the script")
setwd(rfold)
rfold_wd=getwd()
for (filename in userfiles) {
source(filename)
}
setwd(base_script_wd)
rm(parser,rfold,userfiles)
###################END BLOCK#####################
#####################OUTPUT_MANAGEMENT###########
################MODIFY IF NEEDED####################
source('Rscript/output_management.R')
options_vector=set_output(args$device,args$output,args$ratio,execution_wd)
###################END BLOCK#####################
#############BEGIN YOUR RSCRIPT HERE.############
#here is your working directory :)
setwd(execution_wd)
#You have access to:
#set_output(device='pdf',filename='tmp.pdf',ratio=1)
#use it if you really want to change the output type on your way.
#pause_output(pause=TRUE) for x11
#use it to pause for output.
#args for retrieving your arguments.
library('plyr')
library('ggplot2')
#type stuff here.
plot_rec_curves <- function(preds,true_values,labelnames){
true_runtime=ldply(true_values,data.frame)
colnames(true_runtime)<-c("value")
preds_dfs=data.frame()
for (i in 1:length(preds)){
d=ldply(preds[i],data.frame)
d$value=labelnames[i]
d$id=1:nrow(d)
colnames(d)<-c("value","type","id")
d$value=(true_runtime$value-d$value)**2
#print(typeof(d))
#print(class(d))
#print(summary(d))
preds_dfs=rbind(preds_dfs,d)
}
print(summary(preds_dfs))
p0 = ggplot(preds_dfs, aes(x = id,y=value)) +
stat_smooth(aes(group = type, colour = type))+
scale_color_brewer(palette="Set3")
print(p0)
#m <- ggplot(d, aes(x=value))
#m +
#geom_density(aes(group=type,fill=type),adjust=4, colour="black",alpha=0.2,fill="gray20")+
#coord_trans(y = "sqrt")+
#scale_x_continuous(breaks=seq(from=0,to=86400,by=3600),labels=seq(from=0,to=24,by=1))+
#xlab("Absolute error (hours)")+
#ylab("Density")+
#ggtitle("Kernel density estimation of the absolute error.")+
#annotate("text",x=12500,y=0.000025,label="Random Forest",size=5)+
#annotate("text",x=4500,y=0.0003,label="Baseline",size=5)+
#theme_bw()
}
print(args$pred_filenames)
data=lapply(args$pred_filenames,read.table)
plot_rec_curves(preds=data[-1],true_values=data[1],labelnames=args$pred_filenames[-1])
print(args$pred_filenames)
###################END BLOCK#####################
#############X11 OUTPUT MANAGEMENT###############
#################MODIFY IF NEEDED################
pause_output(options_vector)
###################END BLOCK#####################
|
/prediction_analysis/MSE_vs_t_smoothed.R
|
no_license
|
algo74/predictsim
|
R
| false
| false
| 4,425
|
r
|
#!/usr/bin/Rscript
################WORKING DIRECTORY MAGIC##########
##################DO NOT ALTER###################
initial.options <- commandArgs(trailingOnly = FALSE)
file.arg.name <- "--file="
script.name <- sub(file.arg.name, "", initial.options[grep(file.arg.name, initial.options)])
script.basename <- dirname(script.name)
execution_wd=getwd()
setwd(script.basename)
base_script_wd=getwd()
###################END BLOCK#####################
##############COMMON REQUIREMENTS################
##################DO NOT ALTER###################
#Parser file with all the parser types.
source('Rscript/parser.R')
###################END BLOCK#####################
##############SCRIPT PARAMETERS##################
#############MODIFY AS REQUIRED##################
#Path file for setting up the location of your R files.
#you can alternatively set rfold='/path/to/your/files', but its less modular.
source('Rscript/path.R')
#description and epilog for the console help output.
#e.g. description="This scripts does this and that."
#e.g. epilog="use with caution. refer to www.site.com .."
description=''
epilog=''
#External parser function: for usual options.
#e.g. parser=parser_swf(description,epilog)
parser=parser_pred(description,epilog)
#additional argparse entries for this particular script.
#e.g. parser$add_argument('-s','--sum', dest='accumulate', action='store_const', const=sum, default=max,help='sum the integers (default: find the max)')
#files you want to source from the rfold folder for this Rscript
#e.g. c('common.R','histogram.R')
userfiles=c()
###################END BLOCK#####################
###SOURCING, CONTEXT CLEANING, ARG RETRIEVE######
##################DO NOT ALTER###################
#code insertion.
rm(list=setdiff(ls(),c("parser","rfold","userfiles","execution_wd","base_script_wd")))
args=parser$parse_args()
#Verbosity management.
source('Rscript/verbosity.R')
verb<-verbosity_function_maker(args$verbosity)
verb(args,"Parameters to the script")
setwd(rfold)
rfold_wd=getwd()
for (filename in userfiles) {
source(filename)
}
setwd(base_script_wd)
rm(parser,rfold,userfiles)
###################END BLOCK#####################
#####################OUTPUT_MANAGEMENT###########
################MODIFY IF NEEDED####################
source('Rscript/output_management.R')
options_vector=set_output(args$device,args$output,args$ratio,execution_wd)
###################END BLOCK#####################
#############BEGIN YOUR RSCRIPT HERE.############
#here is your working directory :)
setwd(execution_wd)
#You have access to:
#set_output(device='pdf',filename='tmp.pdf',ratio=1)
#use it if you really want to change the output type on your way.
#pause_output(pause=TRUE) for x11
#use it to pause for output.
#args for retrieving your arguments.
library('plyr')
library('ggplot2')
#type stuff here.
plot_rec_curves <- function(preds,true_values,labelnames){
true_runtime=ldply(true_values,data.frame)
colnames(true_runtime)<-c("value")
preds_dfs=data.frame()
for (i in 1:length(preds)){
d=ldply(preds[i],data.frame)
d$value=labelnames[i]
d$id=1:nrow(d)
colnames(d)<-c("value","type","id")
d$value=(true_runtime$value-d$value)**2
#print(typeof(d))
#print(class(d))
#print(summary(d))
preds_dfs=rbind(preds_dfs,d)
}
print(summary(preds_dfs))
p0 = ggplot(preds_dfs, aes(x = id,y=value)) +
stat_smooth(aes(group = type, colour = type))+
scale_color_brewer(palette="Set3")
print(p0)
#m <- ggplot(d, aes(x=value))
#m +
#geom_density(aes(group=type,fill=type),adjust=4, colour="black",alpha=0.2,fill="gray20")+
#coord_trans(y = "sqrt")+
#scale_x_continuous(breaks=seq(from=0,to=86400,by=3600),labels=seq(from=0,to=24,by=1))+
#xlab("Absolute error (hours)")+
#ylab("Density")+
#ggtitle("Kernel density estimation of the absolute error.")+
#annotate("text",x=12500,y=0.000025,label="Random Forest",size=5)+
#annotate("text",x=4500,y=0.0003,label="Baseline",size=5)+
#theme_bw()
}
print(args$pred_filenames)
data=lapply(args$pred_filenames,read.table)
plot_rec_curves(preds=data[-1],true_values=data[1],labelnames=args$pred_filenames[-1])
print(args$pred_filenames)
###################END BLOCK#####################
#############X11 OUTPUT MANAGEMENT###############
#################MODIFY IF NEEDED################
pause_output(options_vector)
###################END BLOCK#####################
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/signal.R
\name{signal_experimental}
\alias{signal_experimental}
\alias{signal_superseded}
\title{Deprecated funtions for signalling experimental and lifecycle stages}
\usage{
signal_experimental(when, what, env = caller_env())
signal_superseded(when, what, with = NULL, env = caller_env())
}
\description{
\ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#deprecated}{\figure{lifecycle-deprecated.svg}{options: alt='[Deprecated]'}}}{\strong{[Deprecated]}}
Please use \code{\link[=signal_stage]{signal_stage()}} instead
}
\keyword{internal}
|
/man/signal_experimental.Rd
|
permissive
|
isabella232/lifecycle
|
R
| false
| true
| 638
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/signal.R
\name{signal_experimental}
\alias{signal_experimental}
\alias{signal_superseded}
\title{Deprecated funtions for signalling experimental and lifecycle stages}
\usage{
signal_experimental(when, what, env = caller_env())
signal_superseded(when, what, with = NULL, env = caller_env())
}
\description{
\ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#deprecated}{\figure{lifecycle-deprecated.svg}{options: alt='[Deprecated]'}}}{\strong{[Deprecated]}}
Please use \code{\link[=signal_stage]{signal_stage()}} instead
}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/configservice_operations.R
\name{configservice_describe_configuration_aggregators}
\alias{configservice_describe_configuration_aggregators}
\title{Returns the details of one or more configuration aggregators}
\usage{
configservice_describe_configuration_aggregators(
ConfigurationAggregatorNames, NextToken, Limit)
}
\arguments{
\item{ConfigurationAggregatorNames}{The name of the configuration aggregators.}
\item{NextToken}{The \code{nextToken} string returned on a previous page that you use to get
the next page of results in a paginated response.}
\item{Limit}{The maximum number of configuration aggregators returned on each page.
The default is maximum. If you specify 0, AWS Config uses the default.}
}
\description{
Returns the details of one or more configuration aggregators. If the
configuration aggregator is not specified, this action returns the
details for all the configuration aggregators associated with the
account.
}
\section{Request syntax}{
\preformatted{svc$describe_configuration_aggregators(
ConfigurationAggregatorNames = list(
"string"
),
NextToken = "string",
Limit = 123
)
}
}
\keyword{internal}
|
/cran/paws.management/man/configservice_describe_configuration_aggregators.Rd
|
permissive
|
johnnytommy/paws
|
R
| false
| true
| 1,221
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/configservice_operations.R
\name{configservice_describe_configuration_aggregators}
\alias{configservice_describe_configuration_aggregators}
\title{Returns the details of one or more configuration aggregators}
\usage{
configservice_describe_configuration_aggregators(
ConfigurationAggregatorNames, NextToken, Limit)
}
\arguments{
\item{ConfigurationAggregatorNames}{The name of the configuration aggregators.}
\item{NextToken}{The \code{nextToken} string returned on a previous page that you use to get
the next page of results in a paginated response.}
\item{Limit}{The maximum number of configuration aggregators returned on each page.
The default is maximum. If you specify 0, AWS Config uses the default.}
}
\description{
Returns the details of one or more configuration aggregators. If the
configuration aggregator is not specified, this action returns the
details for all the configuration aggregators associated with the
account.
}
\section{Request syntax}{
\preformatted{svc$describe_configuration_aggregators(
ConfigurationAggregatorNames = list(
"string"
),
NextToken = "string",
Limit = 123
)
}
}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ec2_operations.R
\name{ec2_attach_volume}
\alias{ec2_attach_volume}
\title{Attaches an EBS volume to a running or stopped instance and exposes it
to the instance with the specified device name}
\usage{
ec2_attach_volume(Device, InstanceId, VolumeId, DryRun)
}
\arguments{
\item{Device}{[required] The device name (for example, \verb{/dev/sdh} or \code{xvdh}).}
\item{InstanceId}{[required] The ID of the instance.}
\item{VolumeId}{[required] The ID of the EBS volume. The volume and instance must be within the
same Availability Zone.}
\item{DryRun}{Checks whether you have the required permissions for the action, without
actually making the request, and provides an error response. If you have
the required permissions, the error response is \code{DryRunOperation}.
Otherwise, it is \code{UnauthorizedOperation}.}
}
\description{
Attaches an EBS volume to a running or stopped instance and exposes it
to the instance with the specified device name.
}
\details{
Encrypted EBS volumes must be attached to instances that support Amazon
EBS encryption. For more information, see \href{https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html}{Amazon EBS Encryption}
in the \emph{Amazon Elastic Compute Cloud User Guide}.
After you attach an EBS volume, you must make it available. For more
information, see \href{https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-using-volumes.html}{Making an EBS Volume Available For Use}.
If a volume has an AWS Marketplace product code:
\itemize{
\item The volume can be attached only to a stopped instance.
\item AWS Marketplace product codes are copied from the volume to the
instance.
\item You must be subscribed to the product.
\item The instance type and operating system of the instance must support
the product. For example, you can\'t detach a volume from a Windows
instance and attach it to a Linux instance.
}
For more information, see \href{https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-attaching-volume.html}{Attaching Amazon EBS Volumes}
in the \emph{Amazon Elastic Compute Cloud User Guide}.
}
\section{Request syntax}{
\preformatted{svc$attach_volume(
Device = "string",
InstanceId = "string",
VolumeId = "string",
DryRun = TRUE|FALSE
)
}
}
\examples{
\dontrun{
# This example attaches a volume (`vol-1234567890abcdef0`) to an
# instance (`i-01474ef662b89480`) as `/dev/sdf`.
svc$attach_volume(
Device = "/dev/sdf",
InstanceId = "i-01474ef662b89480",
VolumeId = "vol-1234567890abcdef0"
)
}
}
\keyword{internal}
|
/paws/man/ec2_attach_volume.Rd
|
permissive
|
johnnytommy/paws
|
R
| false
| true
| 2,591
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ec2_operations.R
\name{ec2_attach_volume}
\alias{ec2_attach_volume}
\title{Attaches an EBS volume to a running or stopped instance and exposes it
to the instance with the specified device name}
\usage{
ec2_attach_volume(Device, InstanceId, VolumeId, DryRun)
}
\arguments{
\item{Device}{[required] The device name (for example, \verb{/dev/sdh} or \code{xvdh}).}
\item{InstanceId}{[required] The ID of the instance.}
\item{VolumeId}{[required] The ID of the EBS volume. The volume and instance must be within the
same Availability Zone.}
\item{DryRun}{Checks whether you have the required permissions for the action, without
actually making the request, and provides an error response. If you have
the required permissions, the error response is \code{DryRunOperation}.
Otherwise, it is \code{UnauthorizedOperation}.}
}
\description{
Attaches an EBS volume to a running or stopped instance and exposes it
to the instance with the specified device name.
}
\details{
Encrypted EBS volumes must be attached to instances that support Amazon
EBS encryption. For more information, see \href{https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html}{Amazon EBS Encryption}
in the \emph{Amazon Elastic Compute Cloud User Guide}.
After you attach an EBS volume, you must make it available. For more
information, see \href{https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-using-volumes.html}{Making an EBS Volume Available For Use}.
If a volume has an AWS Marketplace product code:
\itemize{
\item The volume can be attached only to a stopped instance.
\item AWS Marketplace product codes are copied from the volume to the
instance.
\item You must be subscribed to the product.
\item The instance type and operating system of the instance must support
the product. For example, you can\'t detach a volume from a Windows
instance and attach it to a Linux instance.
}
For more information, see \href{https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-attaching-volume.html}{Attaching Amazon EBS Volumes}
in the \emph{Amazon Elastic Compute Cloud User Guide}.
}
\section{Request syntax}{
\preformatted{svc$attach_volume(
Device = "string",
InstanceId = "string",
VolumeId = "string",
DryRun = TRUE|FALSE
)
}
}
\examples{
\dontrun{
# This example attaches a volume (`vol-1234567890abcdef0`) to an
# instance (`i-01474ef662b89480`) as `/dev/sdf`.
svc$attach_volume(
Device = "/dev/sdf",
InstanceId = "i-01474ef662b89480",
VolumeId = "vol-1234567890abcdef0"
)
}
}
\keyword{internal}
|
context("Filter")
df <- expand.grid(a = 1:10, b = letters[1:10],
KEEP.OUT.ATTRS = FALSE,
stringsAsFactors = FALSE)
tbls <- test_load(df)
test_that("filter results independent of data tbl (simple)", {
skip_if_no_sqlite()
expected <- df[df$a > 6, , drop = FALSE]
compare_tbls(tbls[c("df","sqlite")], function(x) {
filter_(x, ~ a > 6)
}, expected)
})
test_that("filter captures local variables", {
sel <- c("d", "g", "a")
expected <- df[df$b %in% sel, , drop = FALSE]
compare_tbls(tbls, function(x) x %>% filter(b %in% sel), ref = expected)
})
test_that("two filters equivalent to one", {
expected <- filter(df, a > 4 & b == "a")
compare_tbls(tbls, function(x) x %>% filter(a > 4) %>% filter(b == "a"),
ref = expected)
})
test_that("filter fails if inputs incorrect length (#156)", {
expect_error( filter(tbl_df(mtcars), c(F, T)) )
expect_error( filter(group_by(mtcars, am), c(F, T)) )
})
test_that("filter gives useful error message when given incorrect input", {
expect_error( filter(tbl_df(mtcars), x ), "unknown column" )
})
test_that("filter complains in inputs are named", {
expect_error(filter(mtcars, x = 1), "takes unnamed arguments")
expect_error(filter(mtcars, x = 1 & y > 2), "takes unnamed arguments")
})
test_that("filter handles passing ...", {
df <- data.frame( x = 1:4 )
f <- function(...){
x1 <- 4
f1 <- function(y) y
filter(df, ..., f1(x1) > x)
}
g <- function(...){
x2 <- 2
f(x > x2, ...)
}
res <- g()
expect_equal( res$x, 3L )
df <- group_by(df,x)
res <- g()
expect_equal( res$x, 3L )
})
test_that( "filter handles simple symbols", {
df <- data.frame( x = 1:4, test = rep(c(T,F), each = 2) )
res <- filter(df, test)
gdf <- group_by(df,x)
res <- filter(gdf, test)
h <- function(data){
test2 <- c(T,T,F,F)
filter(data,test2)
}
expect_equal(h(df), df[1:2,])
f <- function(data, ...){
one <- 1
filter( data, test, x > one, ...)
}
g <- function(data, ...){
four <- 4
f( data, x < four, ...)
}
res <- g(df)
expect_equal(res$x, 2L)
expect_equal(res$test, TRUE)
res <- g(gdf)
expect_equal(res$x, 2L)
expect_equal(res$test, TRUE)
})
test_that("filter handlers scalar results", {
expect_equivalent( filter(mtcars, min(mpg)>0 ), mtcars )
expect_equal( filter(group_by(mtcars,cyl), min(mpg)>0 ), group_by(mtcars,cyl) )
})
test_that("filter propagates attributes", {
date.start <- ISOdate(2010, 01, 01, 0)
test <- data.frame(Date = ISOdate(2010, 01, 01, 1:10))
test2 <- test %>% filter(Date < ISOdate(2010, 01, 01, 5))
expect_equal(test$Date[1:4], test2$Date)
})
test_that("filter fails on integer indices", {
expect_error(filter(mtcars, 1:2))
expect_error(filter(group_by(mtcars,cyl), 1:2))
})
test_that("filter discards NA", {
temp <- data.frame(
i = 1:5,
x = c(NA, 1L, 1L, 0L, 0L)
)
res <- filter(temp, x == 1)
expect_equal(nrow(res), 2L)
})
test_that("date class remains on filter (#273)",{
x1 <- x2 <- data.frame(
date = seq.Date(as.Date('2013-01-01'), by = "1 days", length.out = 2),
var = c(5, 8)
)
x1.filter <- x1 %>% filter(as.Date(date) > as.Date('2013-01-01'))
x2$date <- x2$date + 1
x2.filter <- x2 %>% filter(as.Date(date) > as.Date('2013-01-01'))
expect_equal(class(x1.filter$date), "Date")
expect_equal(class(x2.filter$date), "Date")
})
test_that("filter handles $ correctly (#278)", {
d1 <- tbl_df(data.frame(
num1 = as.character(sample(1:10, 1000, T)),
var1 = runif(1000),
stringsAsFactors = FALSE))
d2 <- data.frame(num1 = as.character(1:3), stringsAsFactors = FALSE)
res1 <- d1 %>% filter(num1 %in% c("1", "2", "3"))
res2 <- d1 %>% filter(num1 %in% d2$num1)
expect_equal(res1, res2)
})
test_that( "filter returns the input data if no parameters are given", {
expect_equivalent( filter(mtcars), mtcars )
})
test_that( "$ does not end call traversing. #502", {
# Suppose some analysis options are set much earlier in the script
analysis_opts <- list(min_outcome = .25)
# Generate some dummy data
d <- expand.grid(Subject = 1:3, TrialNo = 1:2, Time = 1:3) %>% tbl_df %>%
arrange(Subject, TrialNo, Time) %>%
mutate(Outcome = (1:18 %% c(5, 7, 11)) / 10)
# Do some aggregation
trial_outcomes <- d %>% group_by(Subject, TrialNo) %>%
summarise(MeanOutcome = mean(Outcome))
left <- filter(trial_outcomes, MeanOutcome < analysis_opts$min_outcome)
right <- filter(trial_outcomes, analysis_opts$min_outcome > MeanOutcome)
expect_equal(left,right)
})
test_that( "GroupedDataFrame checks consistency of data (#606)", {
df1 <- data_frame(
g = rep(1:2, each = 5),
x = 1:10
) %>% group_by(g)
attr(df1, "group_sizes") <- c(2, 2)
expect_error(df1 %>% filter(x == 1), "corrupt 'grouped_df'" )
})
test_that( "filter uses the white list (#566)", {
datesDF <- read.csv(stringsAsFactors=FALSE, text="
X
2014-03-13 16:08:19
2014-03-13 16:16:23
2014-03-13 16:28:28
2014-03-13 16:28:54
")
datesDF$X <- as.POSIXlt(datesDF$X)
expect_error(
filter(datesDF, X > as.POSIXlt("2014-03-13")),
"column 'X' has unsupported class|POSIXct, not POSIXlt.*'X'"
)
})
test_that( "filter handles complex vectors (#436)", {
d <- data.frame(x=1:10, y=1:10+2i)
expect_equal(filter(d, x<4)$y, 1:3+2i)
expect_equal(filter(d, Re(y)<4)$y, 1:3+2i)
})
test_that("%in% works as expected (#126)", {
df <- data_frame( a = c("a", "b", "ab"), g = c(1,1,2) )
res <- df %>% filter( a %in% letters )
expect_equal(nrow(res), 2L)
res <- df %>% group_by(g) %>% filter( a %in% letters )
expect_equal(nrow(res), 2L)
})
test_that("row_number does not segfault with example from #781", {
z <- data.frame(a=c(1,2,3))
b <- "a"
res <- z %>% filter(row_number(b) == 2)
expect_equal( nrow(res), 0L )
})
test_that("filter does not alter expression (#971)", {
my_filter <- ~ am == 1;
expect_error( mtcars %>% filter(my_filter) )
expect_equal( my_filter[[2]][[2]], as.name("am") )
})
test_that("hybrid evaluation handles $ correctly (#1134)", {
df <- data_frame( x = 1:10, g = rep(1:5, 2 ) )
res <- df %>% group_by(g) %>% filter( x > min(df$x) )
expect_equal( nrow(res), 9L )
})
test_that("filter correctly handles empty data frames (#782)", {
res <- data_frame() %>% filter(F)
expect_equal( nrow(res), 0L )
expect_equal( length(names(res)), 0L )
})
test_that("filter(.,TRUE,TRUE) works (#1210)", {
df <- data.frame(x=1:5)
res <- filter(df,TRUE,TRUE)
expect_equal(res, df)
})
test_that("filter, slice and arrange preserves attributes (#1064)", {
df <- structure(
data.frame( x = 1:10, g1 = rep(1:2, each = 5), g2 = rep(1:5, 2) ),
meta = "this is important"
)
res <- filter( df, x < 5 ) %>% attr("meta" )
expect_equal( res, "this is important")
res <- filter( df, x < 5, x > 4) %>% attr("meta" )
expect_equal( res, "this is important")
res <- df %>% slice(1:50) %>% attr("meta")
expect_equal( res, "this is important")
res <- df %>% arrange(x) %>% attr("meta")
expect_equal( res, "this is important")
res <- df %>% summarise( n() ) %>% attr("meta")
expect_equal( res, "this is important")
res <- df %>% group_by(g1) %>% summarise( n() ) %>% attr("meta")
expect_equal( res, "this is important")
res <- df %>% group_by(g1,g2) %>% summarise( n() ) %>% attr("meta")
expect_equal( res, "this is important")
})
test_that("filter works with rowwise data (#1099)", {
df <- data_frame(First = c("string1", "string2"), Second = c("Sentence with string1", "something"))
res <- df %>% rowwise() %>% filter(grepl(First, Second, fixed = TRUE))
expect_equal( nrow(res), 1L)
expect_equal( df[1,], res)
})
test_that("grouped filter handles indices (#880)", {
res <- iris %>% group_by(Species) %>% filter( Sepal.Length > 5 )
res2 <- mutate( res, Petal = Petal.Width * Petal.Length)
expect_equal( nrow(res), nrow(res2) )
expect_equal( attr(res, "indices"), attr(res2, "indices") )
})
test_that("filter(FALSE) drops indices", {
out <- mtcars %>%
group_by(cyl) %>%
filter(FALSE) %>%
attr("indices")
expect_equal(out, NULL)
})
test_that("filter handles S4 objects (#1366)", {
env <- environment()
Numbers <- suppressWarnings( setClass("Numbers", slots = c(foo = "numeric"), contains = "integer", where = env) )
on.exit(removeClass("Numbers", where = env))
df <- data.frame( x = Numbers( 1:10, foo = 10 ) )
res <- filter( df, x > 3 )
expect_true( isS4(res$x) )
expect_is( res$x, "Numbers")
expect_equal( res$x@foo, 10)
})
test_that("hybrid lag and default value for string columns work (#1403)", {
res <- mtcars %>%
mutate(xx=LETTERS[gear]) %>%
filter(xx==lag(xx, default='foo'))
xx <- LETTERS[ mtcars$gear ]
ok <- xx == lag( xx, default = "foo" )
expect_equal( xx[ok], res$xx )
res <- mtcars %>%
mutate(xx=LETTERS[gear]) %>%
filter(xx==lead(xx, default='foo'))
xx <- LETTERS[mtcars$gear ]
ok <- xx == lead( xx, default = "foo" )
expect_equal( xx[ok], res$xx )
})
# .data and .env tests now in test-hybrid-traverse.R
test_that("each argument gets implicit parens", {
df <- data_frame(
v1 = c("a", "b", "a", "b"),
v2 = c("b", "a", "a", "b"),
v3 = c("a", "b", "c", "d")
)
tbls <- test_load(df)
one <- tbls %>% lapply(. %>% filter((v1 == "a" | v2 == "a") & v3 == "a"))
two <- tbls %>% lapply(. %>% filter(v1 == "a" | v2 == "a", v3 == "a"))
lapply(seq_along(one), function(i) {
expect_equal(collect(one[[i]]), collect(two[[i]]))
})
})
test_that("filter fails gracefully on raw columns (#1803)", {
df <- data_frame(a = 1:3, b = as.raw(1:3))
expect_error( filter(df, a == 1), "unsupported type" )
expect_error( filter(df, b == 1), "unsupported type" )
})
|
/tests/testthat/test-filter.r
|
no_license
|
ravinpoudel/dplyr
|
R
| false
| false
| 9,728
|
r
|
context("Filter")
df <- expand.grid(a = 1:10, b = letters[1:10],
KEEP.OUT.ATTRS = FALSE,
stringsAsFactors = FALSE)
tbls <- test_load(df)
test_that("filter results independent of data tbl (simple)", {
skip_if_no_sqlite()
expected <- df[df$a > 6, , drop = FALSE]
compare_tbls(tbls[c("df","sqlite")], function(x) {
filter_(x, ~ a > 6)
}, expected)
})
test_that("filter captures local variables", {
sel <- c("d", "g", "a")
expected <- df[df$b %in% sel, , drop = FALSE]
compare_tbls(tbls, function(x) x %>% filter(b %in% sel), ref = expected)
})
test_that("two filters equivalent to one", {
expected <- filter(df, a > 4 & b == "a")
compare_tbls(tbls, function(x) x %>% filter(a > 4) %>% filter(b == "a"),
ref = expected)
})
test_that("filter fails if inputs incorrect length (#156)", {
expect_error( filter(tbl_df(mtcars), c(F, T)) )
expect_error( filter(group_by(mtcars, am), c(F, T)) )
})
test_that("filter gives useful error message when given incorrect input", {
expect_error( filter(tbl_df(mtcars), x ), "unknown column" )
})
test_that("filter complains in inputs are named", {
expect_error(filter(mtcars, x = 1), "takes unnamed arguments")
expect_error(filter(mtcars, x = 1 & y > 2), "takes unnamed arguments")
})
test_that("filter handles passing ...", {
df <- data.frame( x = 1:4 )
f <- function(...){
x1 <- 4
f1 <- function(y) y
filter(df, ..., f1(x1) > x)
}
g <- function(...){
x2 <- 2
f(x > x2, ...)
}
res <- g()
expect_equal( res$x, 3L )
df <- group_by(df,x)
res <- g()
expect_equal( res$x, 3L )
})
test_that( "filter handles simple symbols", {
df <- data.frame( x = 1:4, test = rep(c(T,F), each = 2) )
res <- filter(df, test)
gdf <- group_by(df,x)
res <- filter(gdf, test)
h <- function(data){
test2 <- c(T,T,F,F)
filter(data,test2)
}
expect_equal(h(df), df[1:2,])
f <- function(data, ...){
one <- 1
filter( data, test, x > one, ...)
}
g <- function(data, ...){
four <- 4
f( data, x < four, ...)
}
res <- g(df)
expect_equal(res$x, 2L)
expect_equal(res$test, TRUE)
res <- g(gdf)
expect_equal(res$x, 2L)
expect_equal(res$test, TRUE)
})
test_that("filter handlers scalar results", {
expect_equivalent( filter(mtcars, min(mpg)>0 ), mtcars )
expect_equal( filter(group_by(mtcars,cyl), min(mpg)>0 ), group_by(mtcars,cyl) )
})
test_that("filter propagates attributes", {
date.start <- ISOdate(2010, 01, 01, 0)
test <- data.frame(Date = ISOdate(2010, 01, 01, 1:10))
test2 <- test %>% filter(Date < ISOdate(2010, 01, 01, 5))
expect_equal(test$Date[1:4], test2$Date)
})
test_that("filter fails on integer indices", {
expect_error(filter(mtcars, 1:2))
expect_error(filter(group_by(mtcars,cyl), 1:2))
})
test_that("filter discards NA", {
temp <- data.frame(
i = 1:5,
x = c(NA, 1L, 1L, 0L, 0L)
)
res <- filter(temp, x == 1)
expect_equal(nrow(res), 2L)
})
test_that("date class remains on filter (#273)",{
x1 <- x2 <- data.frame(
date = seq.Date(as.Date('2013-01-01'), by = "1 days", length.out = 2),
var = c(5, 8)
)
x1.filter <- x1 %>% filter(as.Date(date) > as.Date('2013-01-01'))
x2$date <- x2$date + 1
x2.filter <- x2 %>% filter(as.Date(date) > as.Date('2013-01-01'))
expect_equal(class(x1.filter$date), "Date")
expect_equal(class(x2.filter$date), "Date")
})
test_that("filter handles $ correctly (#278)", {
d1 <- tbl_df(data.frame(
num1 = as.character(sample(1:10, 1000, T)),
var1 = runif(1000),
stringsAsFactors = FALSE))
d2 <- data.frame(num1 = as.character(1:3), stringsAsFactors = FALSE)
res1 <- d1 %>% filter(num1 %in% c("1", "2", "3"))
res2 <- d1 %>% filter(num1 %in% d2$num1)
expect_equal(res1, res2)
})
test_that( "filter returns the input data if no parameters are given", {
expect_equivalent( filter(mtcars), mtcars )
})
test_that( "$ does not end call traversing. #502", {
# Suppose some analysis options are set much earlier in the script
analysis_opts <- list(min_outcome = .25)
# Generate some dummy data
d <- expand.grid(Subject = 1:3, TrialNo = 1:2, Time = 1:3) %>% tbl_df %>%
arrange(Subject, TrialNo, Time) %>%
mutate(Outcome = (1:18 %% c(5, 7, 11)) / 10)
# Do some aggregation
trial_outcomes <- d %>% group_by(Subject, TrialNo) %>%
summarise(MeanOutcome = mean(Outcome))
left <- filter(trial_outcomes, MeanOutcome < analysis_opts$min_outcome)
right <- filter(trial_outcomes, analysis_opts$min_outcome > MeanOutcome)
expect_equal(left,right)
})
test_that( "GroupedDataFrame checks consistency of data (#606)", {
df1 <- data_frame(
g = rep(1:2, each = 5),
x = 1:10
) %>% group_by(g)
attr(df1, "group_sizes") <- c(2, 2)
expect_error(df1 %>% filter(x == 1), "corrupt 'grouped_df'" )
})
test_that( "filter uses the white list (#566)", {
datesDF <- read.csv(stringsAsFactors=FALSE, text="
X
2014-03-13 16:08:19
2014-03-13 16:16:23
2014-03-13 16:28:28
2014-03-13 16:28:54
")
datesDF$X <- as.POSIXlt(datesDF$X)
expect_error(
filter(datesDF, X > as.POSIXlt("2014-03-13")),
"column 'X' has unsupported class|POSIXct, not POSIXlt.*'X'"
)
})
test_that( "filter handles complex vectors (#436)", {
d <- data.frame(x=1:10, y=1:10+2i)
expect_equal(filter(d, x<4)$y, 1:3+2i)
expect_equal(filter(d, Re(y)<4)$y, 1:3+2i)
})
test_that("%in% works as expected (#126)", {
df <- data_frame( a = c("a", "b", "ab"), g = c(1,1,2) )
res <- df %>% filter( a %in% letters )
expect_equal(nrow(res), 2L)
res <- df %>% group_by(g) %>% filter( a %in% letters )
expect_equal(nrow(res), 2L)
})
test_that("row_number does not segfault with example from #781", {
z <- data.frame(a=c(1,2,3))
b <- "a"
res <- z %>% filter(row_number(b) == 2)
expect_equal( nrow(res), 0L )
})
test_that("filter does not alter expression (#971)", {
my_filter <- ~ am == 1;
expect_error( mtcars %>% filter(my_filter) )
expect_equal( my_filter[[2]][[2]], as.name("am") )
})
test_that("hybrid evaluation handles $ correctly (#1134)", {
df <- data_frame( x = 1:10, g = rep(1:5, 2 ) )
res <- df %>% group_by(g) %>% filter( x > min(df$x) )
expect_equal( nrow(res), 9L )
})
test_that("filter correctly handles empty data frames (#782)", {
res <- data_frame() %>% filter(F)
expect_equal( nrow(res), 0L )
expect_equal( length(names(res)), 0L )
})
test_that("filter(.,TRUE,TRUE) works (#1210)", {
df <- data.frame(x=1:5)
res <- filter(df,TRUE,TRUE)
expect_equal(res, df)
})
test_that("filter, slice and arrange preserves attributes (#1064)", {
df <- structure(
data.frame( x = 1:10, g1 = rep(1:2, each = 5), g2 = rep(1:5, 2) ),
meta = "this is important"
)
res <- filter( df, x < 5 ) %>% attr("meta" )
expect_equal( res, "this is important")
res <- filter( df, x < 5, x > 4) %>% attr("meta" )
expect_equal( res, "this is important")
res <- df %>% slice(1:50) %>% attr("meta")
expect_equal( res, "this is important")
res <- df %>% arrange(x) %>% attr("meta")
expect_equal( res, "this is important")
res <- df %>% summarise( n() ) %>% attr("meta")
expect_equal( res, "this is important")
res <- df %>% group_by(g1) %>% summarise( n() ) %>% attr("meta")
expect_equal( res, "this is important")
res <- df %>% group_by(g1,g2) %>% summarise( n() ) %>% attr("meta")
expect_equal( res, "this is important")
})
test_that("filter works with rowwise data (#1099)", {
df <- data_frame(First = c("string1", "string2"), Second = c("Sentence with string1", "something"))
res <- df %>% rowwise() %>% filter(grepl(First, Second, fixed = TRUE))
expect_equal( nrow(res), 1L)
expect_equal( df[1,], res)
})
test_that("grouped filter handles indices (#880)", {
res <- iris %>% group_by(Species) %>% filter( Sepal.Length > 5 )
res2 <- mutate( res, Petal = Petal.Width * Petal.Length)
expect_equal( nrow(res), nrow(res2) )
expect_equal( attr(res, "indices"), attr(res2, "indices") )
})
test_that("filter(FALSE) drops indices", {
out <- mtcars %>%
group_by(cyl) %>%
filter(FALSE) %>%
attr("indices")
expect_equal(out, NULL)
})
test_that("filter handles S4 objects (#1366)", {
env <- environment()
Numbers <- suppressWarnings( setClass("Numbers", slots = c(foo = "numeric"), contains = "integer", where = env) )
on.exit(removeClass("Numbers", where = env))
df <- data.frame( x = Numbers( 1:10, foo = 10 ) )
res <- filter( df, x > 3 )
expect_true( isS4(res$x) )
expect_is( res$x, "Numbers")
expect_equal( res$x@foo, 10)
})
test_that("hybrid lag and default value for string columns work (#1403)", {
res <- mtcars %>%
mutate(xx=LETTERS[gear]) %>%
filter(xx==lag(xx, default='foo'))
xx <- LETTERS[ mtcars$gear ]
ok <- xx == lag( xx, default = "foo" )
expect_equal( xx[ok], res$xx )
res <- mtcars %>%
mutate(xx=LETTERS[gear]) %>%
filter(xx==lead(xx, default='foo'))
xx <- LETTERS[mtcars$gear ]
ok <- xx == lead( xx, default = "foo" )
expect_equal( xx[ok], res$xx )
})
# .data and .env tests now in test-hybrid-traverse.R
test_that("each argument gets implicit parens", {
df <- data_frame(
v1 = c("a", "b", "a", "b"),
v2 = c("b", "a", "a", "b"),
v3 = c("a", "b", "c", "d")
)
tbls <- test_load(df)
one <- tbls %>% lapply(. %>% filter((v1 == "a" | v2 == "a") & v3 == "a"))
two <- tbls %>% lapply(. %>% filter(v1 == "a" | v2 == "a", v3 == "a"))
lapply(seq_along(one), function(i) {
expect_equal(collect(one[[i]]), collect(two[[i]]))
})
})
test_that("filter fails gracefully on raw columns (#1803)", {
df <- data_frame(a = 1:3, b = as.raw(1:3))
expect_error( filter(df, a == 1), "unsupported type" )
expect_error( filter(df, b == 1), "unsupported type" )
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/caching.R
\name{rhdx_cache_delete}
\alias{rhdx_cache_delete}
\title{Delete file from cache}
\usage{
rhdx_cache_delete(file)
}
\arguments{
\item{file}{Character, the file to delete}
}
\description{
Delete file from cache
}
|
/man/rhdx_cache_delete.Rd
|
permissive
|
bmpacifique/rhdx
|
R
| false
| true
| 300
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/caching.R
\name{rhdx_cache_delete}
\alias{rhdx_cache_delete}
\title{Delete file from cache}
\usage{
rhdx_cache_delete(file)
}
\arguments{
\item{file}{Character, the file to delete}
}
\description{
Delete file from cache
}
|
## Purpose: control parameters for modelsum function
## Authors: P Votruba, Jason Sinnwell, Beth Atkinson
## Created: 9/3/2015
#' Control settings for \code{modelsum} function
#'
#' Control test and summary settings for \code{\link{modelsum}} function.
#'
#' @param digits Numeric, denoting the number of digits after the decimal point for beta coefficients and standard errors.
#' @param digits.ratio Numeric, denoting the number of digits after the decimal point for ratios, e.g. OR, RR, HR.
#' @param digits.p Numeric, denoting the number of digits for p-values. See "Details", below.
#' @param format.p Logical, denoting whether to format p-values. See "Details", below.
#' @param show.adjust Logical, denoting whether to show adjustment terms.
#' @param show.intercept Logical, denoting whether to show intercept terms.
#' @param conf.level Numeric, giving the confidence level.
#' @param ordinal.stats,binomial.stats,survival.stats,gaussian.stats,poisson.stats,negbin.stats
#' Character vectors denoting which stats to show for the various model types.
#' @param stat.labels A named list of labels for all the stats used above.
#' @param ... Other arguments (not in use at this time).
#' @return A list with settings to be used within the \code{modelsum} function.
#' @details
#' If \code{format.p} is \code{FALSE}, \code{digits.p} denotes the number of significant digits shown. The
#' p-values will be in exponential notation if necessary. If \code{format.p} is \code{TRUE},
#' \code{digits.p} will determine the number of digits after the decimal point to show. If the p-value
#' is less than the resulting number of places, it will be formatted to show so.
#' @seealso \code{\link{modelsum}}, \code{\link{summary.modelsum}}, \code{\link{modelsum.internal}}
#' @export
modelsum.control <- function(
digits = 3L, digits.ratio = 3L, digits.p = 3L, format.p = TRUE,
show.adjust = TRUE, show.intercept = TRUE, conf.level = 0.95,
ordinal.stats=c("OR","CI.lower.OR","CI.upper.OR", "p.value","Nmiss"),
binomial.stats=c("OR","CI.lower.OR","CI.upper.OR","p.value", "concordance","Nmiss"),
gaussian.stats=c("estimate","std.error","p.value","adj.r.squared","Nmiss"),
poisson.stats=c("RR","CI.lower.RR", "CI.upper.RR","p.value","Nmiss"),
negbin.stats=c("RR","CI.lower.RR", "CI.upper.RR","p.value","Nmiss"),
survival.stats=c("HR","CI.lower.HR","CI.upper.HR","p.value","concordance","Nmiss"),
stat.labels = list(), ...
) {
if("nsmall" %in% names(list(...))) .Deprecated(msg = "Using 'nsmall = ' is deprecated. Use 'digits = ' instead.")
if("nsmall.ratio" %in% names(list(...))) .Deprecated(msg = "Using 'nsmall.ratio = ' is deprecated. Use 'digits.ratio = ' instead.")
if("digits.test" %in% names(list(...))) .Deprecated(msg = "Using 'digits.test = ' is deprecated. Use 'digits.p = ' instead.")
# digits and digits.test are OK to be NULL. See ?format
if(!is.null(digits) && digits < 0L)
{
warning("digits must be >= 0. Set to default.")
digits <- 3L
}
if(!is.null(digits.ratio) && digits.ratio < 0L)
{
warning("digits.ratio must be >= 0. Set to default.")
digits.ratio <- 3L
}
if(!is.null(digits.p) && digits.p < 0L)
{
warning("digits.p must be >= 0. Set to default.")
digits.p <- 3L
}
if(conf.level <= 0 || conf.level >= 1) {
warning("conf.level must be between (0,1). Setting to default.\n")
conf.level <- 0.95
}
##########################
## Ordinal stats:
##########################
ordinal.stats.valid <- c(
"Nmiss", "OR", "CI.lower.OR", "CI.upper.OR", "p.value", # default
"estimate", "CI.OR", "CI.estimate", "CI.lower.estimate", "CI.upper.estimate", "N", "Nmiss2", "endpoint", "std.error", "statistic",
"logLik", "AIC", "BIC", "edf", "deviance", "df.residual", "p.value.lrt"
)
if(any(ordinal.stats %nin% ordinal.stats.valid)) {
stop("Invalid binomial stats: ",
paste(ordinal.stats[ordinal.stats %nin% ordinal.stats.valid],collapse=","), "\n")
}
## let CI.OR decode to CI.lower.OR and CI.upper.OR
if(any(ordinal.stats == "CI.OR")) {
ordinal.stats <- unique(c(ordinal.stats[ordinal.stats != "CI.OR"], "CI.lower.OR", "CI.upper.OR"))
}
if(any(ordinal.stats == "CI.estimate")) {
ordinal.stats <- unique(c(ordinal.stats[ordinal.stats != "CI.estimate"], "CI.lower.estimate", "CI.upper.estimate"))
}
##########################
## Binomial stats:
##########################
##Other coefficient columns:
##CI.estimate, N, Nmiss2, depvar (show name of dependent variable), estimate, se, zstat
##Other model fits: logLik,AIC,BIC
binomial.stats.valid <- c(
"Nmiss", "OR", "CI.lower.OR", "CI.upper.OR", "p.value", "concordance", # default
"estimate", "CI.OR", "CI.estimate", "CI.lower.estimate", "CI.upper.estimate",
"CI.wald", "CI.lower.wald", "CI.upper.wald", "CI.OR.wald", "CI.lower.OR.wald", "CI.upper.OR.wald",
"N", "Nmiss2", "endpoint", "std.error", "statistic",
"logLik", "AIC", "BIC", "null.deviance", "deviance", "df.residual", "df.null", "p.value.lrt"
)
if(any(binomial.stats %nin% binomial.stats.valid)) {
stop("Invalid binomial stats: ",
paste(binomial.stats[binomial.stats %nin% binomial.stats.valid],collapse=","), "\n")
}
## let CI.OR decode to CI.lower.OR and CI.upper.OR
if(any(binomial.stats == "CI.OR")) {
binomial.stats <- unique(c(binomial.stats[binomial.stats != "CI.OR"], "CI.lower.OR", "CI.upper.OR"))
}
if(any(binomial.stats == "CI.estimate")) {
binomial.stats <- unique(c(binomial.stats[binomial.stats != "CI.estimate"], "CI.lower.estimate", "CI.upper.estimate"))
}
if(any(binomial.stats == "CI.wald")) {
binomial.stats <- unique(c(binomial.stats[binomial.stats != "CI.wald"], "CI.lower.wald", "CI.upper.wald"))
}
if(any(binomial.stats == "CI.OR.wald")) {
binomial.stats <- unique(c(binomial.stats[binomial.stats != "CI.OR.wald"], "CI.lower.OR.wald", "CI.upper.OR.wald"))
}
##########################
## Gaussian stats:
##########################
##Other coefficient columns: CI.estimate, N, Nmiss2, t.stat, standard.estimate, endpoint
##Other model fits: r.squared, AIC, BIC,logLik
gaussian.stats.valid <- c(
"Nmiss", "estimate", "std.error", "p.value", "adj.r.squared", #default
"CI.estimate", "CI.lower.estimate", "CI.upper.estimate", "N", "Nmiss2", "statistic", "standard.estimate", "endpoint",
"r.squared", "AIC", "BIC", "logLik", "statistic.F", "p.value.F", "p.value.lrt"
)
if(any(gaussian.stats %nin% gaussian.stats.valid)) {
stop("Invalid gaussian stats: ",
paste(gaussian.stats[gaussian.stats %nin% gaussian.stats.valid],collapse=","), "\n")
}
if(any(gaussian.stats == "CI.estimate")) {
gaussian.stats <- unique(c(gaussian.stats[gaussian.stats != "CI.estimate"], "CI.lower.estimate", "CI.upper.estimate"))
}
##########################
## Poisson stats:
##########################
##(quasi)/poisson.stats=c("Nmiss","RR","CI.RR", "p.value","concordance"),
##Other coeff columns: CI.estimate, CI.RR (ci for relrisk),N,Nmiss2, std.error, estimate, z.stat, endpoint
##Other model fits: AIC,BIC,logLik, dispersion
## dispersion = deviance/df.residual
poisson.stats.valid <- c(
"RR", "CI.lower.RR", "CI.upper.RR", "p.value", "Nmiss", # default
"CI.RR", "CI.estimate", "CI.lower.estimate", "CI.upper.estimate", "CI.RR", "Nmiss2", "std.error", "estimate", "statistic", "endpoint",
"AIC", "BIC", "logLik", "dispersion", "null.deviance", "deviance", "df.residual", "df.null", "p.value.lrt"
)
if(any(poisson.stats %nin% poisson.stats.valid)) {
stop("Invalid poisson stats: ",
paste(poisson.stats[poisson.stats %nin% poisson.stats.valid],collapse=","), "\n")
}
## let CI.RR decode to CI.lower.RR and CI.upper.RR
if(any(poisson.stats == "CI.RR")) {
poisson.stats <- unique(c(poisson.stats[poisson.stats != "CI.RR"], "CI.lower.RR", "CI.upper.RR"))
}
if(any(poisson.stats == "CI.estimate")) {
poisson.stats <- unique(c(poisson.stats[poisson.stats == "CI.estimate"], "CI.lower.estimate", "CI.upper.estimate"))
}
##########################
## Negbin stats:
##########################
negbin.stats.valid <- c(
"RR", "CI.lower.RR", "CI.upper.RR", "p.value", "Nmiss", # default
"CI.RR", "CI.estimate", "CI.lower.estimate", "CI.upper.estimate", "CI.RR", "Nmiss2", "std.error", "estimate", "statistic", "endpoint",
"AIC", "BIC", "logLik", "dispersion", "null.deviance", "deviance", "df.residual", "df.null", "theta", "SE.theta", "p.value.lrt"
)
if(any(negbin.stats %nin% negbin.stats.valid)) {
stop("Invalid poisson stats: ",
paste(negbin.stats[negbin.stats %nin% negbin.stats.valid],collapse=","), "\n")
}
## let CI.RR decode to CI.lower.RR and CI.upper.RR
if(any(negbin.stats == "CI.RR")) {
negbin.stats <- unique(c(negbin.stats[negbin.stats != "CI.RR"], "CI.lower.RR", "CI.upper.RR"))
}
if(any(negbin.stats == "CI.estimate")) {
negbin.stats <- unique(c(negbin.stats[negbin.stats == "CI.estimate"], "CI.lower.estimate", "CI.upper.estimate"))
}
##########################
## Survival stats:
##########################
##surv.stats=c(Nmiss,HR,CI.HR,p.value,concorance)
##Other possible coefficient table columns: CI.estimate,N,Nmiss2,estimate,se,endpoint,Nevents,z.stat
##Other possible model fits: r.squared, logLik, AIC, BIC
surv.stats.valid <- c(
"HR", "CI.lower.HR", "CI.upper.HR", "p.value", "concordance", "Nmiss", # default
"CI.HR", "CI.estimate", "CI.lower.estimate", "CI.upper.estimate", "N", "Nmiss2", "estimate", "std.error", "endpoint", "Nevents", "statistic",
"r.squared", "logLik", "AIC", "BIC", "statistic.sc", "p.value.sc", "p.value.log", "p.value.wald", "N", "std.error.concordance", "p.value.lrt"
)
if(any(survival.stats %nin% surv.stats.valid)) {
stop("Invalid survival stats: ",
paste(survival.stats[survival.stats %nin% surv.stats.valid], collapse=","), "\n")
}
## let CI.HR decode to CI.lower.HR and CI.upper.HR
if(any(survival.stats == "CI.HR")) {
survival.stats <- unique(c(survival.stats[survival.stats != "CI.HR"], "CI.lower.HR", "CI.upper.HR"))
}
if(any(survival.stats == "CI.estimate")) {
survival.stats <- unique(c(survival.stats[survival.stats != "CI.estimate"], "CI.lower.estimate", "CI.upper.estimate"))
}
list(digits=digits, digits.ratio=digits.ratio, digits.p = digits.p, format.p = format.p,
show.adjust=show.adjust, show.intercept=show.intercept, conf.level=conf.level,
ordinal.stats=ordinal.stats, binomial.stats=binomial.stats, gaussian.stats=gaussian.stats,
poisson.stats=poisson.stats, negbin.stats = negbin.stats, survival.stats=survival.stats, stat.labels = stat.labels)
}
|
/R/modelsum.control.R
|
no_license
|
bzkrouse/arsenal
|
R
| false
| false
| 10,734
|
r
|
## Purpose: control parameters for modelsum function
## Authors: P Votruba, Jason Sinnwell, Beth Atkinson
## Created: 9/3/2015
#' Control settings for \code{modelsum} function
#'
#' Control test and summary settings for \code{\link{modelsum}} function.
#'
#' @param digits Numeric, denoting the number of digits after the decimal point for beta coefficients and standard errors.
#' @param digits.ratio Numeric, denoting the number of digits after the decimal point for ratios, e.g. OR, RR, HR.
#' @param digits.p Numeric, denoting the number of digits for p-values. See "Details", below.
#' @param format.p Logical, denoting whether to format p-values. See "Details", below.
#' @param show.adjust Logical, denoting whether to show adjustment terms.
#' @param show.intercept Logical, denoting whether to show intercept terms.
#' @param conf.level Numeric, giving the confidence level.
#' @param ordinal.stats,binomial.stats,survival.stats,gaussian.stats,poisson.stats,negbin.stats
#' Character vectors denoting which stats to show for the various model types.
#' @param stat.labels A named list of labels for all the stats used above.
#' @param ... Other arguments (not in use at this time).
#' @return A list with settings to be used within the \code{modelsum} function.
#' @details
#' If \code{format.p} is \code{FALSE}, \code{digits.p} denotes the number of significant digits shown. The
#' p-values will be in exponential notation if necessary. If \code{format.p} is \code{TRUE},
#' \code{digits.p} will determine the number of digits after the decimal point to show. If the p-value
#' is less than the resulting number of places, it will be formatted to show so.
#' @seealso \code{\link{modelsum}}, \code{\link{summary.modelsum}}, \code{\link{modelsum.internal}}
#' @export
modelsum.control <- function(
digits = 3L, digits.ratio = 3L, digits.p = 3L, format.p = TRUE,
show.adjust = TRUE, show.intercept = TRUE, conf.level = 0.95,
ordinal.stats=c("OR","CI.lower.OR","CI.upper.OR", "p.value","Nmiss"),
binomial.stats=c("OR","CI.lower.OR","CI.upper.OR","p.value", "concordance","Nmiss"),
gaussian.stats=c("estimate","std.error","p.value","adj.r.squared","Nmiss"),
poisson.stats=c("RR","CI.lower.RR", "CI.upper.RR","p.value","Nmiss"),
negbin.stats=c("RR","CI.lower.RR", "CI.upper.RR","p.value","Nmiss"),
survival.stats=c("HR","CI.lower.HR","CI.upper.HR","p.value","concordance","Nmiss"),
stat.labels = list(), ...
) {
if("nsmall" %in% names(list(...))) .Deprecated(msg = "Using 'nsmall = ' is deprecated. Use 'digits = ' instead.")
if("nsmall.ratio" %in% names(list(...))) .Deprecated(msg = "Using 'nsmall.ratio = ' is deprecated. Use 'digits.ratio = ' instead.")
if("digits.test" %in% names(list(...))) .Deprecated(msg = "Using 'digits.test = ' is deprecated. Use 'digits.p = ' instead.")
# digits and digits.test are OK to be NULL. See ?format
if(!is.null(digits) && digits < 0L)
{
warning("digits must be >= 0. Set to default.")
digits <- 3L
}
if(!is.null(digits.ratio) && digits.ratio < 0L)
{
warning("digits.ratio must be >= 0. Set to default.")
digits.ratio <- 3L
}
if(!is.null(digits.p) && digits.p < 0L)
{
warning("digits.p must be >= 0. Set to default.")
digits.p <- 3L
}
if(conf.level <= 0 || conf.level >= 1) {
warning("conf.level must be between (0,1). Setting to default.\n")
conf.level <- 0.95
}
##########################
## Ordinal stats:
##########################
ordinal.stats.valid <- c(
"Nmiss", "OR", "CI.lower.OR", "CI.upper.OR", "p.value", # default
"estimate", "CI.OR", "CI.estimate", "CI.lower.estimate", "CI.upper.estimate", "N", "Nmiss2", "endpoint", "std.error", "statistic",
"logLik", "AIC", "BIC", "edf", "deviance", "df.residual", "p.value.lrt"
)
if(any(ordinal.stats %nin% ordinal.stats.valid)) {
stop("Invalid binomial stats: ",
paste(ordinal.stats[ordinal.stats %nin% ordinal.stats.valid],collapse=","), "\n")
}
## let CI.OR decode to CI.lower.OR and CI.upper.OR
if(any(ordinal.stats == "CI.OR")) {
ordinal.stats <- unique(c(ordinal.stats[ordinal.stats != "CI.OR"], "CI.lower.OR", "CI.upper.OR"))
}
if(any(ordinal.stats == "CI.estimate")) {
ordinal.stats <- unique(c(ordinal.stats[ordinal.stats != "CI.estimate"], "CI.lower.estimate", "CI.upper.estimate"))
}
##########################
## Binomial stats:
##########################
##Other coefficient columns:
##CI.estimate, N, Nmiss2, depvar (show name of dependent variable), estimate, se, zstat
##Other model fits: logLik,AIC,BIC
binomial.stats.valid <- c(
"Nmiss", "OR", "CI.lower.OR", "CI.upper.OR", "p.value", "concordance", # default
"estimate", "CI.OR", "CI.estimate", "CI.lower.estimate", "CI.upper.estimate",
"CI.wald", "CI.lower.wald", "CI.upper.wald", "CI.OR.wald", "CI.lower.OR.wald", "CI.upper.OR.wald",
"N", "Nmiss2", "endpoint", "std.error", "statistic",
"logLik", "AIC", "BIC", "null.deviance", "deviance", "df.residual", "df.null", "p.value.lrt"
)
if(any(binomial.stats %nin% binomial.stats.valid)) {
stop("Invalid binomial stats: ",
paste(binomial.stats[binomial.stats %nin% binomial.stats.valid],collapse=","), "\n")
}
## let CI.OR decode to CI.lower.OR and CI.upper.OR
if(any(binomial.stats == "CI.OR")) {
binomial.stats <- unique(c(binomial.stats[binomial.stats != "CI.OR"], "CI.lower.OR", "CI.upper.OR"))
}
if(any(binomial.stats == "CI.estimate")) {
binomial.stats <- unique(c(binomial.stats[binomial.stats != "CI.estimate"], "CI.lower.estimate", "CI.upper.estimate"))
}
if(any(binomial.stats == "CI.wald")) {
binomial.stats <- unique(c(binomial.stats[binomial.stats != "CI.wald"], "CI.lower.wald", "CI.upper.wald"))
}
if(any(binomial.stats == "CI.OR.wald")) {
binomial.stats <- unique(c(binomial.stats[binomial.stats != "CI.OR.wald"], "CI.lower.OR.wald", "CI.upper.OR.wald"))
}
##########################
## Gaussian stats:
##########################
##Other coefficient columns: CI.estimate, N, Nmiss2, t.stat, standard.estimate, endpoint
##Other model fits: r.squared, AIC, BIC,logLik
gaussian.stats.valid <- c(
"Nmiss", "estimate", "std.error", "p.value", "adj.r.squared", #default
"CI.estimate", "CI.lower.estimate", "CI.upper.estimate", "N", "Nmiss2", "statistic", "standard.estimate", "endpoint",
"r.squared", "AIC", "BIC", "logLik", "statistic.F", "p.value.F", "p.value.lrt"
)
if(any(gaussian.stats %nin% gaussian.stats.valid)) {
stop("Invalid gaussian stats: ",
paste(gaussian.stats[gaussian.stats %nin% gaussian.stats.valid],collapse=","), "\n")
}
if(any(gaussian.stats == "CI.estimate")) {
gaussian.stats <- unique(c(gaussian.stats[gaussian.stats != "CI.estimate"], "CI.lower.estimate", "CI.upper.estimate"))
}
##########################
## Poisson stats:
##########################
##(quasi)/poisson.stats=c("Nmiss","RR","CI.RR", "p.value","concordance"),
##Other coeff columns: CI.estimate, CI.RR (ci for relrisk),N,Nmiss2, std.error, estimate, z.stat, endpoint
##Other model fits: AIC,BIC,logLik, dispersion
## dispersion = deviance/df.residual
poisson.stats.valid <- c(
"RR", "CI.lower.RR", "CI.upper.RR", "p.value", "Nmiss", # default
"CI.RR", "CI.estimate", "CI.lower.estimate", "CI.upper.estimate", "CI.RR", "Nmiss2", "std.error", "estimate", "statistic", "endpoint",
"AIC", "BIC", "logLik", "dispersion", "null.deviance", "deviance", "df.residual", "df.null", "p.value.lrt"
)
if(any(poisson.stats %nin% poisson.stats.valid)) {
stop("Invalid poisson stats: ",
paste(poisson.stats[poisson.stats %nin% poisson.stats.valid],collapse=","), "\n")
}
## let CI.RR decode to CI.lower.RR and CI.upper.RR
if(any(poisson.stats == "CI.RR")) {
poisson.stats <- unique(c(poisson.stats[poisson.stats != "CI.RR"], "CI.lower.RR", "CI.upper.RR"))
}
if(any(poisson.stats == "CI.estimate")) {
poisson.stats <- unique(c(poisson.stats[poisson.stats == "CI.estimate"], "CI.lower.estimate", "CI.upper.estimate"))
}
##########################
## Negbin stats:
##########################
negbin.stats.valid <- c(
"RR", "CI.lower.RR", "CI.upper.RR", "p.value", "Nmiss", # default
"CI.RR", "CI.estimate", "CI.lower.estimate", "CI.upper.estimate", "CI.RR", "Nmiss2", "std.error", "estimate", "statistic", "endpoint",
"AIC", "BIC", "logLik", "dispersion", "null.deviance", "deviance", "df.residual", "df.null", "theta", "SE.theta", "p.value.lrt"
)
if(any(negbin.stats %nin% negbin.stats.valid)) {
stop("Invalid poisson stats: ",
paste(negbin.stats[negbin.stats %nin% negbin.stats.valid],collapse=","), "\n")
}
## let CI.RR decode to CI.lower.RR and CI.upper.RR
if(any(negbin.stats == "CI.RR")) {
negbin.stats <- unique(c(negbin.stats[negbin.stats != "CI.RR"], "CI.lower.RR", "CI.upper.RR"))
}
if(any(negbin.stats == "CI.estimate")) {
negbin.stats <- unique(c(negbin.stats[negbin.stats == "CI.estimate"], "CI.lower.estimate", "CI.upper.estimate"))
}
##########################
## Survival stats:
##########################
##surv.stats=c(Nmiss,HR,CI.HR,p.value,concorance)
##Other possible coefficient table columns: CI.estimate,N,Nmiss2,estimate,se,endpoint,Nevents,z.stat
##Other possible model fits: r.squared, logLik, AIC, BIC
surv.stats.valid <- c(
"HR", "CI.lower.HR", "CI.upper.HR", "p.value", "concordance", "Nmiss", # default
"CI.HR", "CI.estimate", "CI.lower.estimate", "CI.upper.estimate", "N", "Nmiss2", "estimate", "std.error", "endpoint", "Nevents", "statistic",
"r.squared", "logLik", "AIC", "BIC", "statistic.sc", "p.value.sc", "p.value.log", "p.value.wald", "N", "std.error.concordance", "p.value.lrt"
)
if(any(survival.stats %nin% surv.stats.valid)) {
stop("Invalid survival stats: ",
paste(survival.stats[survival.stats %nin% surv.stats.valid], collapse=","), "\n")
}
## let CI.HR decode to CI.lower.HR and CI.upper.HR
if(any(survival.stats == "CI.HR")) {
survival.stats <- unique(c(survival.stats[survival.stats != "CI.HR"], "CI.lower.HR", "CI.upper.HR"))
}
if(any(survival.stats == "CI.estimate")) {
survival.stats <- unique(c(survival.stats[survival.stats != "CI.estimate"], "CI.lower.estimate", "CI.upper.estimate"))
}
list(digits=digits, digits.ratio=digits.ratio, digits.p = digits.p, format.p = format.p,
show.adjust=show.adjust, show.intercept=show.intercept, conf.level=conf.level,
ordinal.stats=ordinal.stats, binomial.stats=binomial.stats, gaussian.stats=gaussian.stats,
poisson.stats=poisson.stats, negbin.stats = negbin.stats, survival.stats=survival.stats, stat.labels = stat.labels)
}
|
# challenge C
###########################
rm(list = ls())
graphics.off()
challenge_C =function()
{
# define vector to store abundance
sum_size1 = array(0,4000)
sum_size2 = array(0,8000)
sum_size3 = array(0,20000)
sum_size4 = array(0,40000)
#read results throughly
for (i in 1:100)
{
load(paste("../Data/result_rda/HPC_results",i,".rda",sep = ""))
#calculate results for different size
if (i %% 4 == 1)
{
sum_size1 = sum_size1 + rich
}
else if (i %% 4 == 2)
{
sum_size2 = sum_size2+rich
}
else if (i %% 4 == 3)
{
sum_size3 = sum_size3 + rich
}
else if (i %% 4 == 0)
{
sum_size4 = sum_size4 + rich
}
}
mean_size1 = sum_size1/25
mean_size2 = sum_size2/25
mean_size3 = sum_size3/25
mean_size4 = sum_size4/25
plot(mean_size4,type = "line",col="blue",xlab = "generation", ylab = "mean species richness", main = "Mean species richness against simulation generation")
points(mean_size1,type = "line",col="yellow")
points(mean_size2,type = "line",col="red")
points(mean_size3,type = "line",col="green")
legend("bottomright", inset = .05, c("J = 500", "J = 1000", "J = 2500", "J = 5000"), lwd = c(1,1,1,1), col = c("yellow", "red", "green", "blue"))
plot(mean_size4,type = "line",col="blue",xlim = range(0:1500),xlab = "generation", ylab = "mean species richness", main = "Mean species richness against simulation generation")
points(mean_size1,type = "line",col="yellow")
points(mean_size2,type = "line",col="red")
points(mean_size3,type = "line",col="green")
legend("bottomright", inset = .05, c("J = 500", "J = 1000", "J = 2500", "J = 5000"), lwd = c(1,1,1,1), col = c("yellow", "red", "green", "blue"))
abline(v = 600, col = "blue")
abline(v = 400, col = "green")
abline(v = 260, col = "red")
abline(v = 180, col = "yellow")
}
challenge_C()
|
/Week9/HPC_exercises/Code/chanllegeC.R
|
no_license
|
tisssu/CMEECourseWork
|
R
| false
| false
| 1,786
|
r
|
# challenge C
###########################
rm(list = ls())
graphics.off()
challenge_C =function()
{
# define vector to store abundance
sum_size1 = array(0,4000)
sum_size2 = array(0,8000)
sum_size3 = array(0,20000)
sum_size4 = array(0,40000)
#read results throughly
for (i in 1:100)
{
load(paste("../Data/result_rda/HPC_results",i,".rda",sep = ""))
#calculate results for different size
if (i %% 4 == 1)
{
sum_size1 = sum_size1 + rich
}
else if (i %% 4 == 2)
{
sum_size2 = sum_size2+rich
}
else if (i %% 4 == 3)
{
sum_size3 = sum_size3 + rich
}
else if (i %% 4 == 0)
{
sum_size4 = sum_size4 + rich
}
}
mean_size1 = sum_size1/25
mean_size2 = sum_size2/25
mean_size3 = sum_size3/25
mean_size4 = sum_size4/25
plot(mean_size4,type = "line",col="blue",xlab = "generation", ylab = "mean species richness", main = "Mean species richness against simulation generation")
points(mean_size1,type = "line",col="yellow")
points(mean_size2,type = "line",col="red")
points(mean_size3,type = "line",col="green")
legend("bottomright", inset = .05, c("J = 500", "J = 1000", "J = 2500", "J = 5000"), lwd = c(1,1,1,1), col = c("yellow", "red", "green", "blue"))
plot(mean_size4,type = "line",col="blue",xlim = range(0:1500),xlab = "generation", ylab = "mean species richness", main = "Mean species richness against simulation generation")
points(mean_size1,type = "line",col="yellow")
points(mean_size2,type = "line",col="red")
points(mean_size3,type = "line",col="green")
legend("bottomright", inset = .05, c("J = 500", "J = 1000", "J = 2500", "J = 5000"), lwd = c(1,1,1,1), col = c("yellow", "red", "green", "blue"))
abline(v = 600, col = "blue")
abline(v = 400, col = "green")
abline(v = 260, col = "red")
abline(v = 180, col = "yellow")
}
challenge_C()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/io.R
\docType{data}
\name{data_err}
\alias{data_err}
\title{Test}
\description{
Test
}
\keyword{data}
|
/rjacks/jacks/man/data_err.Rd
|
permissive
|
goedel-gang/JACKS
|
R
| false
| true
| 180
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/io.R
\docType{data}
\name{data_err}
\alias{data_err}
\title{Test}
\description{
Test
}
\keyword{data}
|
#require(rCharts)
#options(RCHART_WIDTH=800)
shinyServer(
function(input,output){
output$text1 <- renderText({input$text1})
output$text2 <- renderText({input$text2})
output$text3 <- renderText({
if (input$goButton ==0) "You have not pressed the btn"
else if (input$goButton ==1) "pressed it once"
else "OK quit pressing it"
})
# output$x1<-renderChart({
# r1<-rPlot(SepalLength~SepalWidth|Species,data=iris,color="Species",type="point")
# r1$addParams(height=300,dom='x1')
# return(r1)
# })
}
)
|
/server.R
|
no_license
|
bouloude/julien-shiny-test
|
R
| false
| false
| 739
|
r
|
#require(rCharts)
#options(RCHART_WIDTH=800)
shinyServer(
function(input,output){
output$text1 <- renderText({input$text1})
output$text2 <- renderText({input$text2})
output$text3 <- renderText({
if (input$goButton ==0) "You have not pressed the btn"
else if (input$goButton ==1) "pressed it once"
else "OK quit pressing it"
})
# output$x1<-renderChart({
# r1<-rPlot(SepalLength~SepalWidth|Species,data=iris,color="Species",type="point")
# r1$addParams(height=300,dom='x1')
# return(r1)
# })
}
)
|
# Author: Sifan Liu
# Date: Thu Dec 27 10:46:17 2018
# --------------
pkgs <- c('tidyverse','officer')
check <- sapply(pkgs,require,warn.conflicts = TRUE,character.only = TRUE)
if(any(!check)){
pkgs.missing <- pkgs[!check]
install.packages(pkgs.missing)
check <- sapply(pkgs.missing,require,warn.conflicts = TRUE,character.only = TRUE)
}
# read doc ---
# https://cran.r-project.org/web/packages/officer/vignettes/officer_reader.html
# temp <- read_docx("../../Data warehouse_Solution bank/Solution Bank/METRO solutions template.docx")
# content <- docx_summary(temp)
# Read slide template and charts ======================
pptx <- read_pptx("V:/Building Inclusive Cities/Birmingham/Market Assessment/test.pptx")
source('deck_charts.R')
# function to add new slide with graph ----------------
new_slides <- function(title, plot, width = 8, height = 5.5){
pptx <- pptx %>%
add_slide(layout = "Title and Content", master = "Office Theme") %>%
ph_with_text(type = "title",str = title)
if(!missing(plot)){
if(capabilities(what = "png"))pptx <- ph_with_gg(pptx, value = plot, index = 2, width = width, height = height)}
}
# add slides -----------------------------------------
# Metromonitor
new_slides("Birmingham lags its peers in economic performance", p_peermap, 10)
#tradables
new_slides("Birmingham would have had 38k more jobs in 2016 if it had grown at national rates",p_shiftshare, 11)
new_slides("Industrial decline touches both non-tradable and tradable sectors", p_2by2,11)
new_slides("But tradable industries acount for a lower share of jobs than peers", p_expemp)
new_slides("Tradable indusries disproportionately house 'good jobs'", p_opp)
new_slides("Birmingham relies less on exports than peer metro areas", p_expshare)
new_slides("Business receive less export financing in Birmingham and Jefferson County", p_EXIM)
#innovation
new_slides("Local universities are an incredible research asset", p_RDuni)
new_slides("R&D dominated by health sciences and biomedical sciences", p_UAB_donut)
new_slides("But research spend is not translating into commercialization", p_USPTO)
new_slides("Top patenting activities in Birmingham concentrate in life sciences and advanced manufacturing")
new_slides("A second challenge is that Birmingham has low levels of technological complexity", p_pci)
new_slides("Licensing and start-ups at UAB")
new_slides("Venture capital investment", p_VC)
#dynamism
new_slides("Dynamic firm growth is critical to advancing innovation and job creation",p_BDS)
new_slides("But the share of employment at young firms has been in decline", p_young)
new_slides("And Birmingham’s share of high growth companies has lagged", p_inc)
#capital access
new_slides("Is this a capital access issue?", p_FDIC)
new_slides("But there may be capital gaps by place and by race", p_FDICmap,5)
new_slides("But there may be capital gaps by place and by race", p_FDIC_r,5)
new_slides("Birmingham has not had significant CDFI funding", p_CDFImap,5)
new_slides("Birmingham has not had significant CDFI funding", p_CDFI_r,5)
# table
pptx <- pptx%>%
add_slide(layout = "Title and Content", master = "Office Theme")%>%
ph_with_table(type = "body", value = MSA_inclusion)
# FINAL OUTPUT ================================================
print(pptx, target = "V:/Building Inclusive Cities/Birmingham/Market Assessment/test.pptx")
|
/deck_output.R
|
no_license
|
fansi-sifan/County-Cluster
|
R
| false
| false
| 3,378
|
r
|
# Author: Sifan Liu
# Date: Thu Dec 27 10:46:17 2018
# --------------
pkgs <- c('tidyverse','officer')
check <- sapply(pkgs,require,warn.conflicts = TRUE,character.only = TRUE)
if(any(!check)){
pkgs.missing <- pkgs[!check]
install.packages(pkgs.missing)
check <- sapply(pkgs.missing,require,warn.conflicts = TRUE,character.only = TRUE)
}
# read doc ---
# https://cran.r-project.org/web/packages/officer/vignettes/officer_reader.html
# temp <- read_docx("../../Data warehouse_Solution bank/Solution Bank/METRO solutions template.docx")
# content <- docx_summary(temp)
# Read slide template and charts ======================
pptx <- read_pptx("V:/Building Inclusive Cities/Birmingham/Market Assessment/test.pptx")
source('deck_charts.R')
# function to add new slide with graph ----------------
new_slides <- function(title, plot, width = 8, height = 5.5){
pptx <- pptx %>%
add_slide(layout = "Title and Content", master = "Office Theme") %>%
ph_with_text(type = "title",str = title)
if(!missing(plot)){
if(capabilities(what = "png"))pptx <- ph_with_gg(pptx, value = plot, index = 2, width = width, height = height)}
}
# add slides -----------------------------------------
# Metromonitor
new_slides("Birmingham lags its peers in economic performance", p_peermap, 10)
#tradables
new_slides("Birmingham would have had 38k more jobs in 2016 if it had grown at national rates",p_shiftshare, 11)
new_slides("Industrial decline touches both non-tradable and tradable sectors", p_2by2,11)
new_slides("But tradable industries acount for a lower share of jobs than peers", p_expemp)
new_slides("Tradable indusries disproportionately house 'good jobs'", p_opp)
new_slides("Birmingham relies less on exports than peer metro areas", p_expshare)
new_slides("Business receive less export financing in Birmingham and Jefferson County", p_EXIM)
#innovation
new_slides("Local universities are an incredible research asset", p_RDuni)
new_slides("R&D dominated by health sciences and biomedical sciences", p_UAB_donut)
new_slides("But research spend is not translating into commercialization", p_USPTO)
new_slides("Top patenting activities in Birmingham concentrate in life sciences and advanced manufacturing")
new_slides("A second challenge is that Birmingham has low levels of technological complexity", p_pci)
new_slides("Licensing and start-ups at UAB")
new_slides("Venture capital investment", p_VC)
#dynamism
new_slides("Dynamic firm growth is critical to advancing innovation and job creation",p_BDS)
new_slides("But the share of employment at young firms has been in decline", p_young)
new_slides("And Birmingham’s share of high growth companies has lagged", p_inc)
#capital access
new_slides("Is this a capital access issue?", p_FDIC)
new_slides("But there may be capital gaps by place and by race", p_FDICmap,5)
new_slides("But there may be capital gaps by place and by race", p_FDIC_r,5)
new_slides("Birmingham has not had significant CDFI funding", p_CDFImap,5)
new_slides("Birmingham has not had significant CDFI funding", p_CDFI_r,5)
# table
pptx <- pptx%>%
add_slide(layout = "Title and Content", master = "Office Theme")%>%
ph_with_table(type = "body", value = MSA_inclusion)
# FINAL OUTPUT ================================================
print(pptx, target = "V:/Building Inclusive Cities/Birmingham/Market Assessment/test.pptx")
|
#Setting locale to English because my default locale isn't english
Sys.setlocale("LC_ALL","English")
#Loading raw data
filePath <- "./Data/RawData/household_power_consumption.txt"
expDataSet <- read.csv(filePath, header = TRUE, sep = ";", as.is = TRUE)
#Converting data (Date) to necessary format for filtering rows
expDataSet$Date <- as.Date(expDataSet$Date, "%d/%m/%Y")
#Loading library for filtering rows
library(dplyr)
#Filtering rows for data with date from 2007-02-01 to 2007-02-02
tidyDataSet <- filter(expDataSet, Date == as.Date("2007-02-01", format = "%Y-%m-%d") | Date == as.Date("2007-02-02", format = "%Y-%m-%d"))
#Converting data to necessary format for plotting graphs
tidyDataSet$Global_active_power <- as.numeric(tidyDataSet$Global_active_power)
#Creating a complex variable DateTime and converting it to time format
tidyDataSet <- mutate(tidyDataSet, DateTime = paste(tidyDataSet$Date, tidyDataSet$Time, sep = " "))
tidyDataSet$DateTime <- strptime(tidyDataSet$DateTime, "%Y-%m-%d%H:%M:%OS")
#Opening graphics device png
png(filename = "plot2.png", width = 480, height = 480)
#Plotting the graph with necessary parameters (type of graph, names of axises)
plot(tidyDataSet$DateTime, tidyDataSet$Global_active_power, type = "l", xlab = " ", ylab = "Global Active Power (kilowatts)")
#Closing graphics device png
dev.off()
|
/Plot2.R
|
no_license
|
aktru/ExData_Plotting1
|
R
| false
| false
| 1,349
|
r
|
#Setting locale to English because my default locale isn't english
Sys.setlocale("LC_ALL","English")
#Loading raw data
filePath <- "./Data/RawData/household_power_consumption.txt"
expDataSet <- read.csv(filePath, header = TRUE, sep = ";", as.is = TRUE)
#Converting data (Date) to necessary format for filtering rows
expDataSet$Date <- as.Date(expDataSet$Date, "%d/%m/%Y")
#Loading library for filtering rows
library(dplyr)
#Filtering rows for data with date from 2007-02-01 to 2007-02-02
tidyDataSet <- filter(expDataSet, Date == as.Date("2007-02-01", format = "%Y-%m-%d") | Date == as.Date("2007-02-02", format = "%Y-%m-%d"))
#Converting data to necessary format for plotting graphs
tidyDataSet$Global_active_power <- as.numeric(tidyDataSet$Global_active_power)
#Creating a complex variable DateTime and converting it to time format
tidyDataSet <- mutate(tidyDataSet, DateTime = paste(tidyDataSet$Date, tidyDataSet$Time, sep = " "))
tidyDataSet$DateTime <- strptime(tidyDataSet$DateTime, "%Y-%m-%d%H:%M:%OS")
#Opening graphics device png
png(filename = "plot2.png", width = 480, height = 480)
#Plotting the graph with necessary parameters (type of graph, names of axises)
plot(tidyDataSet$DateTime, tidyDataSet$Global_active_power, type = "l", xlab = " ", ylab = "Global Active Power (kilowatts)")
#Closing graphics device png
dev.off()
|
print_header <- function(round) {
cat("#################\n\n")
cat("#### Round", round, "####\n\n")
cat("#################\n\n")
}
print_board <- function() {
# Prints the current state of the board
cat("Current board:\n")
cat("~~~~~~~~~~~~~~\n\n")
cat(" ")
for (i in c(1, 2, 3)) {
cat(i, " ")
}
cat("\n")
for (i in c(1, 2, 3)) {
cat(" ", i, " ")
for (c in board[i, ]) {
cat(c, " ")
}
cat("\n")
}
cat("\n~~~~~~~~~~~~~~\n\n")
}
|
/print_functions.R
|
no_license
|
ballaneypranav/brn_r_programming
|
R
| false
| false
| 486
|
r
|
print_header <- function(round) {
cat("#################\n\n")
cat("#### Round", round, "####\n\n")
cat("#################\n\n")
}
print_board <- function() {
# Prints the current state of the board
cat("Current board:\n")
cat("~~~~~~~~~~~~~~\n\n")
cat(" ")
for (i in c(1, 2, 3)) {
cat(i, " ")
}
cat("\n")
for (i in c(1, 2, 3)) {
cat(" ", i, " ")
for (c in board[i, ]) {
cat(c, " ")
}
cat("\n")
}
cat("\n~~~~~~~~~~~~~~\n\n")
}
|
get_fp_proj = function(tolx=NULL,sstable,params,target=NULL)
{
# Project the data and create a transformed sstable and target
if(!is.null(tolx))target = as.numeric(target)
nss = dim(sstable)[2]
niter = dim(sstable)[1]
if(!is.null(tolx)){
scaling = numeric(nss)
for(j in 1:nss){
v1 = sstable[,j]
v1 = abs(v1-target[j])
v1 = sort(v1)
scaling[j] = v1[niter*tolx]
}
scaling = ifelse(scaling == 0,1,scaling)
centering = target
sstable2 = sstable
sstable2 = sweep(sstable2,2,centering)
sstable2 = sweep(sstable2,2,scaling,"/")
target2 = (target-centering)/scaling
}else{
scaling = apply(sstable,2,mad)
scaling = ifelse(scaling == 0,1,scaling)
centering = apply(sstable,2,median)
sstable2 = sstable
sstable2 = sweep(sstable2,2,centering)
sstable2 = sweep(sstable2,2,scaling,"/")
}
# calculate euclidean distance
if(!is.null(tolx))dst = sqrt(apply(sweep(sstable2,2,target2)^2,1,sum))
if(is.null(tolx)){
wt = rep(T,niter)
sampsize = niter
}else{
ntol = ceiling(tolx * niter)
if(ntol <= 0 )ntol = 1
if(ntol > niter)ntol = niter
thresh = sort(dst)[ntol]
wt = dst <= thresh
sampsize = sum(wt)
}
ss.df = data.frame(sstable2[wt,])
pwt = params[wt,]
xvar.names <- paste("v",as.character(c(1:nss)),sep="")
names(ss.df) <- xvar.names
fmla <- as.formula(paste("pwt ~ ", paste(xvar.names, collapse= "+")))
fit1 <- lm(fmla,data=ss.df)
coeff.ok = ifelse(is.finite(fit1$coeff),fit1$coeff,0)
list(coeff = coeff.ok,centering = centering, scaling = scaling)
}
make_fp_proj = function(trans,target)
{
scaling = trans$scaling
centering = trans$centering
m1 = trans$coeff
if(is.null(dim(m1)))stop("projection matrix not matrix")
if(is.null(dim(target))){
if(length(target) != dim(m1)[1] - 1)stop("vector argument has incompatible length")
target = (target-centering)/scaling
return(as.vector(t(c(1,target))%*%m1))
}
if(dim(target)[2] + 1 != dim(m1)[1])stop("matrix argument has incompatible number of columns")
sstable2 = target
sstable2 = sweep(sstable2,2,centering)
sstable2 = sweep(sstable2,2,scaling,"/")
sstable2=cbind(1,sstable2)
return(as.matrix(sstable2)%*%m1)
}
|
/fp_proj.R
|
no_license
|
johowardmcc/wildcat_abc
|
R
| false
| false
| 2,163
|
r
|
get_fp_proj = function(tolx=NULL,sstable,params,target=NULL)
{
# Project the data and create a transformed sstable and target
if(!is.null(tolx))target = as.numeric(target)
nss = dim(sstable)[2]
niter = dim(sstable)[1]
if(!is.null(tolx)){
scaling = numeric(nss)
for(j in 1:nss){
v1 = sstable[,j]
v1 = abs(v1-target[j])
v1 = sort(v1)
scaling[j] = v1[niter*tolx]
}
scaling = ifelse(scaling == 0,1,scaling)
centering = target
sstable2 = sstable
sstable2 = sweep(sstable2,2,centering)
sstable2 = sweep(sstable2,2,scaling,"/")
target2 = (target-centering)/scaling
}else{
scaling = apply(sstable,2,mad)
scaling = ifelse(scaling == 0,1,scaling)
centering = apply(sstable,2,median)
sstable2 = sstable
sstable2 = sweep(sstable2,2,centering)
sstable2 = sweep(sstable2,2,scaling,"/")
}
# calculate euclidean distance
if(!is.null(tolx))dst = sqrt(apply(sweep(sstable2,2,target2)^2,1,sum))
if(is.null(tolx)){
wt = rep(T,niter)
sampsize = niter
}else{
ntol = ceiling(tolx * niter)
if(ntol <= 0 )ntol = 1
if(ntol > niter)ntol = niter
thresh = sort(dst)[ntol]
wt = dst <= thresh
sampsize = sum(wt)
}
ss.df = data.frame(sstable2[wt,])
pwt = params[wt,]
xvar.names <- paste("v",as.character(c(1:nss)),sep="")
names(ss.df) <- xvar.names
fmla <- as.formula(paste("pwt ~ ", paste(xvar.names, collapse= "+")))
fit1 <- lm(fmla,data=ss.df)
coeff.ok = ifelse(is.finite(fit1$coeff),fit1$coeff,0)
list(coeff = coeff.ok,centering = centering, scaling = scaling)
}
make_fp_proj = function(trans,target)
{
scaling = trans$scaling
centering = trans$centering
m1 = trans$coeff
if(is.null(dim(m1)))stop("projection matrix not matrix")
if(is.null(dim(target))){
if(length(target) != dim(m1)[1] - 1)stop("vector argument has incompatible length")
target = (target-centering)/scaling
return(as.vector(t(c(1,target))%*%m1))
}
if(dim(target)[2] + 1 != dim(m1)[1])stop("matrix argument has incompatible number of columns")
sstable2 = target
sstable2 = sweep(sstable2,2,centering)
sstable2 = sweep(sstable2,2,scaling,"/")
sstable2=cbind(1,sstable2)
return(as.matrix(sstable2)%*%m1)
}
|
# 1. Read data into R
xtest <- read.table("test/X_test.txt")
ytest <- read.table("test/Y_test.txt")
xtrain <- read.table("train/X_train.txt")
ytrain <- read.table("train/Y_train.txt")
subject_test <- read.table("test/subject_test.txt")
subject_train <- read.table("train/subject_train.txt")
# 2. Merge data
data <- rbind(cbind(subject_test,ytest,xtest),cbind(subject_train,ytrain,xtrain))
# 3. Extract measurements on mean and standard deviation
feature <- read.table("features.txt",stringsAsFactors = FALSE)[,2]
fIn <- grep("mean|std",feature)
fulldata <- data[,c(1,2,fIn+2)]
colnames(fulldata) <- c("subject","activity",feature[fIn])
# 4. Change activity names
activityName <- read.table("activity_labels.txt")
fulldata$activity <- factor(fulldata$activity,labels = activityName[[2]])
# 5. Use descriptive variable names
names(fulldata) <- gsub("\\(\\)","",names(fulldata))
names(fulldata) <- gsub("\\-mean","Mean",names(fulldata))
names(fulldata) <- gsub("\\-std","Std",names(fulldata))
names(fulldata) <- gsub("^t","Time",names(fulldata))
names(fulldata) <- gsub("^f","Frequency",names(fulldata))
# 6. Create table
library(dplyr)
groupdata <- group_by(fulldata,activity,subject)
groupdt <- summarise_each(groupdata,funs(mean))
write.table(groupdt, "MeanData.txt", row.names = FALSE)
|
/run_analysis.R
|
no_license
|
Micky-angela/datasciencecoursera
|
R
| false
| false
| 1,286
|
r
|
# 1. Read data into R
xtest <- read.table("test/X_test.txt")
ytest <- read.table("test/Y_test.txt")
xtrain <- read.table("train/X_train.txt")
ytrain <- read.table("train/Y_train.txt")
subject_test <- read.table("test/subject_test.txt")
subject_train <- read.table("train/subject_train.txt")
# 2. Merge data
data <- rbind(cbind(subject_test,ytest,xtest),cbind(subject_train,ytrain,xtrain))
# 3. Extract measurements on mean and standard deviation
feature <- read.table("features.txt",stringsAsFactors = FALSE)[,2]
fIn <- grep("mean|std",feature)
fulldata <- data[,c(1,2,fIn+2)]
colnames(fulldata) <- c("subject","activity",feature[fIn])
# 4. Change activity names
activityName <- read.table("activity_labels.txt")
fulldata$activity <- factor(fulldata$activity,labels = activityName[[2]])
# 5. Use descriptive variable names
names(fulldata) <- gsub("\\(\\)","",names(fulldata))
names(fulldata) <- gsub("\\-mean","Mean",names(fulldata))
names(fulldata) <- gsub("\\-std","Std",names(fulldata))
names(fulldata) <- gsub("^t","Time",names(fulldata))
names(fulldata) <- gsub("^f","Frequency",names(fulldata))
# 6. Create table
library(dplyr)
groupdata <- group_by(fulldata,activity,subject)
groupdt <- summarise_each(groupdata,funs(mean))
write.table(groupdt, "MeanData.txt", row.names = FALSE)
|
tabItem(
tabName = "drug", align = "center",
fluidRow(style = "width:80%;", shiny::uiOutput(outputId = "ui_drug_welcome")),
fluidRow(style = "width:80%", shiny::uiOutput(outputId = "ui_drug_help")),
fluidRow(style = "width:80%", shiny::tags$hr(style = "width:80%")),
fluidRow(style = "width:80%", shiny::uiOutput(outputId = "ui_drug_result")),
# Load footer ----
source(file.path(config$wd, "ui", "footer.R"), echo = FALSE, verbose = FALSE)$value
)
|
/ui/drug_ui.R
|
permissive
|
COMODr/GSCALite
|
R
| false
| false
| 480
|
r
|
tabItem(
tabName = "drug", align = "center",
fluidRow(style = "width:80%;", shiny::uiOutput(outputId = "ui_drug_welcome")),
fluidRow(style = "width:80%", shiny::uiOutput(outputId = "ui_drug_help")),
fluidRow(style = "width:80%", shiny::tags$hr(style = "width:80%")),
fluidRow(style = "width:80%", shiny::uiOutput(outputId = "ui_drug_result")),
# Load footer ----
source(file.path(config$wd, "ui", "footer.R"), echo = FALSE, verbose = FALSE)$value
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hierarchie.R
\name{hier_departement_academie}
\alias{hier_departement_academie}
\title{Renvoie les codes academie a partir des codes de departement}
\usage{
hier_departement_academie(code_departement)
}
\arguments{
\item{code_departement}{Un vecteur de code de départements.}
}
\value{
Un vecteur de code académie.
Jeu de données source : \code{apogee::departement_academie}.\cr
Il est créé à partir de la table "departement_academie" de la base Access "Tables_ref.accdb".
}
\description{
Renvoie les codes académie à partir des codes de département.
}
\examples{
apogee::hier_departement_academie(c("031", "056"))
}
|
/man/hier_departement_academie.Rd
|
permissive
|
ove-ut3/apogee
|
R
| false
| true
| 705
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hierarchie.R
\name{hier_departement_academie}
\alias{hier_departement_academie}
\title{Renvoie les codes academie a partir des codes de departement}
\usage{
hier_departement_academie(code_departement)
}
\arguments{
\item{code_departement}{Un vecteur de code de départements.}
}
\value{
Un vecteur de code académie.
Jeu de données source : \code{apogee::departement_academie}.\cr
Il est créé à partir de la table "departement_academie" de la base Access "Tables_ref.accdb".
}
\description{
Renvoie les codes académie à partir des codes de département.
}
\examples{
apogee::hier_departement_academie(c("031", "056"))
}
|
#' Fit a ridge regression model
#'
#' @description This function fits the ridge regression model.
#' @param form a formula
#' @import stats
#' @param lambda a number
#' @param d a data.frame
#' @return An list of rigde regression information
#' @examples
#' ridge_fit <- ridge_reg(Sepal.Length ~. , lambda = 1.2, iris)
#' @export
ridge_reg <- function(form, lambda, d) {
rownames(d) <- NULL
m <- model.matrix(form, d)
y <- matrix(d[, as.character(form)[2]], ncol = 1)
y <- y[as.numeric(rownames(m)),, drop = FALSE]
## via svd
svd_obj <- svd(m)
U <- svd_obj$u
V <- svd_obj$v
svals <- svd_obj$d
D <- diag(svals / (svals^2 + lambda))
beta <- V %*% D %*% t(U) %*% y
rownames(beta) <- colnames(m)
ret <- list(coefficients = beta, lambda = lambda, form = form)
class(ret) <- "ridge_reg"
return(ret)
}
|
/R/ridge-regression.R
|
no_license
|
daiw3/bis557
|
R
| false
| false
| 834
|
r
|
#' Fit a ridge regression model
#'
#' @description This function fits the ridge regression model.
#' @param form a formula
#' @import stats
#' @param lambda a number
#' @param d a data.frame
#' @return An list of rigde regression information
#' @examples
#' ridge_fit <- ridge_reg(Sepal.Length ~. , lambda = 1.2, iris)
#' @export
ridge_reg <- function(form, lambda, d) {
rownames(d) <- NULL
m <- model.matrix(form, d)
y <- matrix(d[, as.character(form)[2]], ncol = 1)
y <- y[as.numeric(rownames(m)),, drop = FALSE]
## via svd
svd_obj <- svd(m)
U <- svd_obj$u
V <- svd_obj$v
svals <- svd_obj$d
D <- diag(svals / (svals^2 + lambda))
beta <- V %*% D %*% t(U) %*% y
rownames(beta) <- colnames(m)
ret <- list(coefficients = beta, lambda = lambda, form = form)
class(ret) <- "ridge_reg"
return(ret)
}
|
#' Reading the NOAA earthquake data file
#'
#' @param The filename of the NOAA earthquake data file
#' @return tbl_df object (earthquake data)
#' @note Stop if the filename does not exist (error message)
#' @import dplyr
#' @import tibble
#' @importFrom readr read_delim
#' @examples
#' \dontrun{
#' filename<-system.file("data","earthquakes.tsv",package="mycapstone") #xx
#' eq_data_read(filename)
#' }
#'
#' @export
eq_data_read <- function(filename) {
if(!file.exists(filename))
stop("file '", filename, "' does not exist")
data <- suppressMessages({
readr::read_delim(filename, delim='\t',progress = FALSE)
})
tibble::as_tibble(data)
}
#' cleans the LOCATION_NAME column by stripping out the country name (including the colon) and
#' converts names to title case (as opposed to all caps)
#' @param mydf that contains location names written in upper case
#' @return a dataframe filtered required for mapping in a timeline the data
#' @importFrom tidyr unite drop_na
#' @importFrom stringi stri_trans_totitle
#' @import %>%
#' @import lubridate
#' @import dplyr
#' @examples
#'\dontrun{
#' filename<-system.file("data","earthquakes.tsv",package="mycapstone") ##XX
#' eq_clean_data(eq_data_read(filename))
#' }
#'
#' @export
eq_clean_data<-function(mydf){
clean_data <- mydf%>%
dplyr::rename(COUNTRY=Country,LOCATION_NAME='Location Name',LATITUDE=Latitude, LONGITUDE=Longitude,YEAR=Year, MONTH=Mo, DAY=Dy, HOUR=Hr, EQ_MAG_ML='MMI Int',DEATHS=Deaths)%>%
dplyr::select(COUNTRY,LOCATION_NAME, LATITUDE, LONGITUDE,YEAR, MONTH, DAY, HOUR, EQ_MAG_ML,DEATHS) %>%
dplyr::mutate(LOCATION_NAME=gsub(".*:", "", LOCATION_NAME))%>%
dplyr::mutate(LATITUDE= as.numeric(LATITUDE)) %>%
dplyr::mutate(LONGITUDE= as.numeric(LONGITUDE))%>%
tidyr::unite(datetime, YEAR, MONTH, DAY, HOUR) %>%
dplyr::mutate(datetime = lubridate::ymd_h(datetime))%>%
dplyr::mutate(DEATHS=as.numeric(DEATHS))
eq_location_clean(clean_data)
}
#' title case the Earthquake's Location Data-Name
#' @param mydf contains location names written in Uper case
#' @return contains the Eathquake data filtered required for mapping in a timeline the data and the Tittle Case Location
#' @importFrom stringi stri_trans_totitle
#'@examples
#'\dontrun{
#' filename<-system.file("data","earthquakes.tsv",package="mycapstone")
#' eq_location_clean(eq_clean_data(eq_data_read(filename)))
#' }
#'
#' @export
eq_location_clean<-function(mydf){
LOCATION_NAME<-NULL
mydf = mydf%>%
dplyr::mutate(LOCATION_NAME=stringi::stri_trans_totitle(LOCATION_NAME))
mydf
}
# use the GeomTimeLine Prototype Function required to Plot a Timeline with the Earthquakes of a given country
#' @param mapping aesthetic mappings created by aes
#' @param data is the dataframe that contains the Earthquake's data
#' @param na.rm will hepls to remove the NA values from the data frame
#' @param position position adjustment functio
#' @param stat The Layer's statistical transformation
#' @param show.legend layer's legend
#' @param inherit.aes will indicate the default aesthetics overridng
#' @param ... layer's other arguments
#' @return plot of an Earthquakes timeline which contains all Earthquakes of a Given Country or List of Countries between a set of dates
#' @import ggplot2
#' @examples
#' \dontrun{
#' filename<-system.file("data","earthquakes.tsv",package="mycapstone")
#' eq_clean_data(eq_data_read(filename)) %>%
#' dplyr::filter(datetime >= "1990-01-01" & datetime <="2018-01-01" & COUNTRY %in% c("MEXICO","USA", "JORDAN"))%>%
#' ggplot() +
#' geom_timeline(aes(x = datetime, size = EQ_MAG_ML, colour = DEATHS, fill = DEATHS))
#' }
#'
#' @export
geom_timeline <- function(mapping = NULL,
data = NULL,
na.rm = TRUE,
position = "identity",
stat = "identity",
show.legend = NA,
inherit.aes = TRUE, ...) {
ggplot2::layer(
Geom = GeomTimeline,
mapping = mapping,
data = data,
stat = stat,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(na.rm = na.rm, ...))
}
#'Ploting an Earthquake's Location timeline
#'building a GEOM Function from scratch
#'use a Dataframe compiled using the function eq_clean_data.
#'a prototype function as foundation for geom_timeline function.
#'use the ggplot2's geom_point.
#'use the Earthquakes' dates as X-axis main values
#'The geom_point's size and colour defined by the Earthquake's magnitude
#'The GeomTimeLine was build using the Function Prototype provided in the Course's Material 4.7.1 Building a New Geom
GeomTimeline <- ggplot2::ggproto("GeomTimeline", ggplot2::Geom,
#<character vector of required aesthetics>
required_aes = c("x"),
#aes(<default values for certain aesthetics>)
default_aes = ggplot2::aes(y = 0.1,
shape = 21,
size = 1,
colour = "blue",
alpha = 0.8,
stroke = 1,
fill = NA),
draw_key = ggplot2::draw_key_point,
draw_panel = function(data, panel_scales, coord) {
coords <- coord$transform(data, panel_scales)
Timeline_line_grobs <- grid::polylineGrob(x = grid::unit(rep(c(0, 1),
length(coords$y)),
"npc"),
y = rep(coords$y, each = 2),
id.length = rep(2,length(coords$y)),
gp = grid::gpar(col = "black", lwd = 0.3, lty = 1))
Earthquakes_points_grobs <- grid::pointsGrob(
x = coords$x,
y = coords$y,
pch = coords$shape,
gp = grid::gpar(col = alpha(coords$colour, coords$alpha), fill = alpha(coords$fill, coords$alpha),
lwd = coords$stroke * .stroke / 2),
fontsize = coords$size * .pt + coords$stroke * .stroke / 2
)
grid::gTree(children = grid::gList(Timeline_line, Earthquakes_points_grobs))
})
#' Funcion for adding the Eartquakes's Location labels to an Earthquake's timeline
#' @param mapping aesthetic mappings created by aes
#' @param data is the dataframe that contains the Earthquake's data
#' @param na.rm will hepls to remove the NA values from the data frame
#' @param show.legend layer's legend
#' @param stat The Layer's statistical transformation
#' @param position position adjustment functio
#' @param inherit.aes will indicate the default aesthetics overridng
#' @param ... layer's other arguments
#' @return the Earthquake's labels
#' @examples
#' \dontrun{
#' filename<-system.file("data","earthquakes.tsv",package="mycapstone")
#' eq_location_clean(eq_clean_data(eq_data_read(filename))) %>%
#' dplyr::filter(datetime >= "1980-01-01" & datetime <="2018-01-01" & COUNTRY %in% c("MEXICO","USA", "JORDAN"))%>%
#' ggplot() +
#' geom_timeline(aes(x = datetime, y = COUNTRY, size = EQ_MAG_ML, colour = DEATHS, fill = DEATHS)) +
#' geom_timeline_label(aes(x = datetime, y = COUNTRY, label = LOCATION_NAME, number = 3, max_aes = EQ_MAG_ML))
#'}
#'
#' @export
geom_timeline_label <- function(mapping = NULL,
data = NULL,
na.rm = TRUE,
show.legend = NA,
stat = "identity",
position = "identity",
inherit.aes = TRUE, ...) {
ggplot2::layer(
geom = GeomTimeLineAnnotation,
mapping = mapping,
data = data,
stat = stat,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(na.rm = na.rm, ...)
)
}
GeomTimeLineAnnotation <- ggplot2::ggproto("GeomTimeLineAnnotation", ggplot2::Geom,
required_aes = c("x", "tags"),
default_aes = ggplot2::aes(y = 0.5,
number = NULL,
max_aes = NULL),
draw_panel = function(data, panel_scales, coord) {
coords <- coord$transform(data, panel_scales)
Timeline_seg_grobs <- grid::segmentsGrob(x0 = grid::unit(coords$x, "npc"),
y0 = grid::unit(coords$y, "npc"),
x1 = grid::unit(coords$x, "npc"),
y1 = grid::unit(coords$y + 0.06/length(unique(coords$y)), "npc"),
default.units = "npc",
arrow = NULL,
name = NULL,
gp = grid::gpar(),
vp = NULL)
Earthquake_text_grobs <- grid::textGrob(label = coords$tags,
x = unit(coords$x, "npc"),
y = unit(coords$y + 0.06/length(unique(coords$y)), "npc"),
rot = 60,
just = "left",
gp = grid::gpar(fontsize = 8))
grid::gTree(children = grid::gList(Timeline_seg_grobs, Earthquake_text_grobs))
}
)
#' Earthquakes Data in an Interactive Map.
#'
#' mapped centered with their latitude and longitude "epicenter" which is annotated based on an annot_col which the user can specify.
#' plus specifies "popup_text" by a call to eq_create_label generates the appropriate text..
#'
#' @references \url{http://rstudio.github.io/leaflet/}
#'
#' @param eq_clean The clean earthquake data in a tbl_df object.
#' @param annot_col Column in the tbl_df object to be used for annotation.
#'
#' @return returns an interactive map.
#'
#' @note warning an invalid column name & uses the LOCATION_NAME column as annotation column.
#'
#' @import leaflet
#' @import %>%
#'
#' @examples
#' \dontrun{
#' filename<-system.file("data","earthquakes.tsv",package="mycapstone")
#' eq_location_clean(eq_clean_data(eq_data_read(filename))) %>%
#' dplyr::filter(COUNTRY == "MEXICO" & lubridate::year(datetime) >= 1980) %>%
#' eq_map(annot_col = "datetime")
#' }
#'
#' @export
eq_map <- function(eq_clean=NULL, annot_col="datetime"){
all_columns <- colnames(eq_clean)
stopifnot(any('datetime' %in% all_columns),any('LATITUDE' %in% all_columns),
any('LONGITUDE' %in% all_columns),any('EQ_MAG_ML' %in% all_columns))
if(!(any(annot_col %in% all_columns))) {
warning("Invalid Column - DATE Displayed")
annot_col = "datetime"
}
leaflet::leaflet() %>%
leaflet::addTiles() %>%
leaflet::addCircleMarkers(data = eq_clean, lng = ~ LONGITUDE, lat = ~ LATITUDE, radius = ~ EQ_MAG_ML,
weight=1, fillOpacity = 0.2, popup =~ paste(get(annot_col)))
}
#' Creates pop up text for markers.
#' generates HTML formatted text for popups for map markers.
#'
#' @param eq_clean The clean earthquake data in a tbl_df object.
#' @return returns a character vector containing popup text to be used in a leaflet visualization.
#' @import dplyr
#' @import %>%
#' @examples
#' \dontrun{
#' filename<-system.file("data","earthquakes.tsv",package="mycapstone")
#' eq_location_clean(eq_clean_data(eq_data_read(filename))) %>%
#' dplyr::filter(COUNTRY == "MEXICO" & lubridate::year(datetime) >= 1980) %>%
#' dplyr::mutate(popup_text = eq_create_label(.)) %>%
#' eq_map(annot_col = "popup_text")
#' }
#'
#' @export
eq_create_label <- function(eq_clean=NULL) {
all_columns <- colnames(eq_clean)
stopifnot(any('LOCATION_NAME' %in% all_columns),any('EQ_MAG_ML' %in% all_columns),
any('DEATHS' %in% all_columns))
data2<- eq_clean %>% dplyr::select_(.dots=c('LOCATION_NAME','EQ_MAG_ML','DEATHS')) %>%
dplyr::mutate(new_LOCATION_NAME = ifelse(is.na(LOCATION_NAME), LOCATION_NAME, paste0("<b>Location:</b> ", LOCATION_NAME,"<br />"))) %>%
dplyr::mutate(new_EQ_PRIMARY = ifelse(is.na(EQ_MAG_ML), EQ_MAG_ML, paste0("<b>Magnitude:</b> ", EQ_MAG_ML,"<br />"))) %>%
dplyr::mutate(new_DEATHS = ifelse(is.na(DEATHS), DEATHS, paste0("<b>Total Deaths:</b> ", DEATHS))) %>%
tidyr::unite('popup_values',c('new_LOCATION_NAME','new_EQ_PRIMARY','new_DEATHS'),sep ='') %>%
dplyr::mutate(popup_values = stringr::str_replace_all(popup_values,"[,]*NA[,]*","")) %>%
dplyr::mutate(popup_values = ifelse(popup_values=="","All Values are NA",popup_values))
popup_values <- dplyr::collect(dplyr::select(data2,.dots=c('popup_values')))[[1]]
return(popup_values)
}
|
/R/1capstoneproject2.R
|
no_license
|
jianweilu/mycapstone
|
R
| false
| false
| 14,601
|
r
|
#' Reading the NOAA earthquake data file
#'
#' @param The filename of the NOAA earthquake data file
#' @return tbl_df object (earthquake data)
#' @note Stop if the filename does not exist (error message)
#' @import dplyr
#' @import tibble
#' @importFrom readr read_delim
#' @examples
#' \dontrun{
#' filename<-system.file("data","earthquakes.tsv",package="mycapstone") #xx
#' eq_data_read(filename)
#' }
#'
#' @export
eq_data_read <- function(filename) {
if(!file.exists(filename))
stop("file '", filename, "' does not exist")
data <- suppressMessages({
readr::read_delim(filename, delim='\t',progress = FALSE)
})
tibble::as_tibble(data)
}
#' cleans the LOCATION_NAME column by stripping out the country name (including the colon) and
#' converts names to title case (as opposed to all caps)
#' @param mydf that contains location names written in upper case
#' @return a dataframe filtered required for mapping in a timeline the data
#' @importFrom tidyr unite drop_na
#' @importFrom stringi stri_trans_totitle
#' @import %>%
#' @import lubridate
#' @import dplyr
#' @examples
#'\dontrun{
#' filename<-system.file("data","earthquakes.tsv",package="mycapstone") ##XX
#' eq_clean_data(eq_data_read(filename))
#' }
#'
#' @export
eq_clean_data<-function(mydf){
clean_data <- mydf%>%
dplyr::rename(COUNTRY=Country,LOCATION_NAME='Location Name',LATITUDE=Latitude, LONGITUDE=Longitude,YEAR=Year, MONTH=Mo, DAY=Dy, HOUR=Hr, EQ_MAG_ML='MMI Int',DEATHS=Deaths)%>%
dplyr::select(COUNTRY,LOCATION_NAME, LATITUDE, LONGITUDE,YEAR, MONTH, DAY, HOUR, EQ_MAG_ML,DEATHS) %>%
dplyr::mutate(LOCATION_NAME=gsub(".*:", "", LOCATION_NAME))%>%
dplyr::mutate(LATITUDE= as.numeric(LATITUDE)) %>%
dplyr::mutate(LONGITUDE= as.numeric(LONGITUDE))%>%
tidyr::unite(datetime, YEAR, MONTH, DAY, HOUR) %>%
dplyr::mutate(datetime = lubridate::ymd_h(datetime))%>%
dplyr::mutate(DEATHS=as.numeric(DEATHS))
eq_location_clean(clean_data)
}
#' title case the Earthquake's Location Data-Name
#' @param mydf contains location names written in Uper case
#' @return contains the Eathquake data filtered required for mapping in a timeline the data and the Tittle Case Location
#' @importFrom stringi stri_trans_totitle
#'@examples
#'\dontrun{
#' filename<-system.file("data","earthquakes.tsv",package="mycapstone")
#' eq_location_clean(eq_clean_data(eq_data_read(filename)))
#' }
#'
#' @export
eq_location_clean<-function(mydf){
LOCATION_NAME<-NULL
mydf = mydf%>%
dplyr::mutate(LOCATION_NAME=stringi::stri_trans_totitle(LOCATION_NAME))
mydf
}
# use the GeomTimeLine Prototype Function required to Plot a Timeline with the Earthquakes of a given country
#' @param mapping aesthetic mappings created by aes
#' @param data is the dataframe that contains the Earthquake's data
#' @param na.rm will hepls to remove the NA values from the data frame
#' @param position position adjustment functio
#' @param stat The Layer's statistical transformation
#' @param show.legend layer's legend
#' @param inherit.aes will indicate the default aesthetics overridng
#' @param ... layer's other arguments
#' @return plot of an Earthquakes timeline which contains all Earthquakes of a Given Country or List of Countries between a set of dates
#' @import ggplot2
#' @examples
#' \dontrun{
#' filename<-system.file("data","earthquakes.tsv",package="mycapstone")
#' eq_clean_data(eq_data_read(filename)) %>%
#' dplyr::filter(datetime >= "1990-01-01" & datetime <="2018-01-01" & COUNTRY %in% c("MEXICO","USA", "JORDAN"))%>%
#' ggplot() +
#' geom_timeline(aes(x = datetime, size = EQ_MAG_ML, colour = DEATHS, fill = DEATHS))
#' }
#'
#' @export
geom_timeline <- function(mapping = NULL,
data = NULL,
na.rm = TRUE,
position = "identity",
stat = "identity",
show.legend = NA,
inherit.aes = TRUE, ...) {
ggplot2::layer(
Geom = GeomTimeline,
mapping = mapping,
data = data,
stat = stat,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(na.rm = na.rm, ...))
}
#'Ploting an Earthquake's Location timeline
#'building a GEOM Function from scratch
#'use a Dataframe compiled using the function eq_clean_data.
#'a prototype function as foundation for geom_timeline function.
#'use the ggplot2's geom_point.
#'use the Earthquakes' dates as X-axis main values
#'The geom_point's size and colour defined by the Earthquake's magnitude
#'The GeomTimeLine was build using the Function Prototype provided in the Course's Material 4.7.1 Building a New Geom
GeomTimeline <- ggplot2::ggproto("GeomTimeline", ggplot2::Geom,
#<character vector of required aesthetics>
required_aes = c("x"),
#aes(<default values for certain aesthetics>)
default_aes = ggplot2::aes(y = 0.1,
shape = 21,
size = 1,
colour = "blue",
alpha = 0.8,
stroke = 1,
fill = NA),
draw_key = ggplot2::draw_key_point,
draw_panel = function(data, panel_scales, coord) {
coords <- coord$transform(data, panel_scales)
Timeline_line_grobs <- grid::polylineGrob(x = grid::unit(rep(c(0, 1),
length(coords$y)),
"npc"),
y = rep(coords$y, each = 2),
id.length = rep(2,length(coords$y)),
gp = grid::gpar(col = "black", lwd = 0.3, lty = 1))
Earthquakes_points_grobs <- grid::pointsGrob(
x = coords$x,
y = coords$y,
pch = coords$shape,
gp = grid::gpar(col = alpha(coords$colour, coords$alpha), fill = alpha(coords$fill, coords$alpha),
lwd = coords$stroke * .stroke / 2),
fontsize = coords$size * .pt + coords$stroke * .stroke / 2
)
grid::gTree(children = grid::gList(Timeline_line, Earthquakes_points_grobs))
})
#' Funcion for adding the Eartquakes's Location labels to an Earthquake's timeline
#' @param mapping aesthetic mappings created by aes
#' @param data is the dataframe that contains the Earthquake's data
#' @param na.rm will hepls to remove the NA values from the data frame
#' @param show.legend layer's legend
#' @param stat The Layer's statistical transformation
#' @param position position adjustment functio
#' @param inherit.aes will indicate the default aesthetics overridng
#' @param ... layer's other arguments
#' @return the Earthquake's labels
#' @examples
#' \dontrun{
#' filename<-system.file("data","earthquakes.tsv",package="mycapstone")
#' eq_location_clean(eq_clean_data(eq_data_read(filename))) %>%
#' dplyr::filter(datetime >= "1980-01-01" & datetime <="2018-01-01" & COUNTRY %in% c("MEXICO","USA", "JORDAN"))%>%
#' ggplot() +
#' geom_timeline(aes(x = datetime, y = COUNTRY, size = EQ_MAG_ML, colour = DEATHS, fill = DEATHS)) +
#' geom_timeline_label(aes(x = datetime, y = COUNTRY, label = LOCATION_NAME, number = 3, max_aes = EQ_MAG_ML))
#'}
#'
#' @export
geom_timeline_label <- function(mapping = NULL,
data = NULL,
na.rm = TRUE,
show.legend = NA,
stat = "identity",
position = "identity",
inherit.aes = TRUE, ...) {
ggplot2::layer(
geom = GeomTimeLineAnnotation,
mapping = mapping,
data = data,
stat = stat,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(na.rm = na.rm, ...)
)
}
GeomTimeLineAnnotation <- ggplot2::ggproto("GeomTimeLineAnnotation", ggplot2::Geom,
required_aes = c("x", "tags"),
default_aes = ggplot2::aes(y = 0.5,
number = NULL,
max_aes = NULL),
draw_panel = function(data, panel_scales, coord) {
coords <- coord$transform(data, panel_scales)
Timeline_seg_grobs <- grid::segmentsGrob(x0 = grid::unit(coords$x, "npc"),
y0 = grid::unit(coords$y, "npc"),
x1 = grid::unit(coords$x, "npc"),
y1 = grid::unit(coords$y + 0.06/length(unique(coords$y)), "npc"),
default.units = "npc",
arrow = NULL,
name = NULL,
gp = grid::gpar(),
vp = NULL)
Earthquake_text_grobs <- grid::textGrob(label = coords$tags,
x = unit(coords$x, "npc"),
y = unit(coords$y + 0.06/length(unique(coords$y)), "npc"),
rot = 60,
just = "left",
gp = grid::gpar(fontsize = 8))
grid::gTree(children = grid::gList(Timeline_seg_grobs, Earthquake_text_grobs))
}
)
#' Earthquakes Data in an Interactive Map.
#'
#' mapped centered with their latitude and longitude "epicenter" which is annotated based on an annot_col which the user can specify.
#' plus specifies "popup_text" by a call to eq_create_label generates the appropriate text..
#'
#' @references \url{http://rstudio.github.io/leaflet/}
#'
#' @param eq_clean The clean earthquake data in a tbl_df object.
#' @param annot_col Column in the tbl_df object to be used for annotation.
#'
#' @return returns an interactive map.
#'
#' @note warning an invalid column name & uses the LOCATION_NAME column as annotation column.
#'
#' @import leaflet
#' @import %>%
#'
#' @examples
#' \dontrun{
#' filename<-system.file("data","earthquakes.tsv",package="mycapstone")
#' eq_location_clean(eq_clean_data(eq_data_read(filename))) %>%
#' dplyr::filter(COUNTRY == "MEXICO" & lubridate::year(datetime) >= 1980) %>%
#' eq_map(annot_col = "datetime")
#' }
#'
#' @export
eq_map <- function(eq_clean=NULL, annot_col="datetime"){
all_columns <- colnames(eq_clean)
stopifnot(any('datetime' %in% all_columns),any('LATITUDE' %in% all_columns),
any('LONGITUDE' %in% all_columns),any('EQ_MAG_ML' %in% all_columns))
if(!(any(annot_col %in% all_columns))) {
warning("Invalid Column - DATE Displayed")
annot_col = "datetime"
}
leaflet::leaflet() %>%
leaflet::addTiles() %>%
leaflet::addCircleMarkers(data = eq_clean, lng = ~ LONGITUDE, lat = ~ LATITUDE, radius = ~ EQ_MAG_ML,
weight=1, fillOpacity = 0.2, popup =~ paste(get(annot_col)))
}
#' Creates pop up text for markers.
#' generates HTML formatted text for popups for map markers.
#'
#' @param eq_clean The clean earthquake data in a tbl_df object.
#' @return returns a character vector containing popup text to be used in a leaflet visualization.
#' @import dplyr
#' @import %>%
#' @examples
#' \dontrun{
#' filename<-system.file("data","earthquakes.tsv",package="mycapstone")
#' eq_location_clean(eq_clean_data(eq_data_read(filename))) %>%
#' dplyr::filter(COUNTRY == "MEXICO" & lubridate::year(datetime) >= 1980) %>%
#' dplyr::mutate(popup_text = eq_create_label(.)) %>%
#' eq_map(annot_col = "popup_text")
#' }
#'
#' @export
eq_create_label <- function(eq_clean=NULL) {
all_columns <- colnames(eq_clean)
stopifnot(any('LOCATION_NAME' %in% all_columns),any('EQ_MAG_ML' %in% all_columns),
any('DEATHS' %in% all_columns))
data2<- eq_clean %>% dplyr::select_(.dots=c('LOCATION_NAME','EQ_MAG_ML','DEATHS')) %>%
dplyr::mutate(new_LOCATION_NAME = ifelse(is.na(LOCATION_NAME), LOCATION_NAME, paste0("<b>Location:</b> ", LOCATION_NAME,"<br />"))) %>%
dplyr::mutate(new_EQ_PRIMARY = ifelse(is.na(EQ_MAG_ML), EQ_MAG_ML, paste0("<b>Magnitude:</b> ", EQ_MAG_ML,"<br />"))) %>%
dplyr::mutate(new_DEATHS = ifelse(is.na(DEATHS), DEATHS, paste0("<b>Total Deaths:</b> ", DEATHS))) %>%
tidyr::unite('popup_values',c('new_LOCATION_NAME','new_EQ_PRIMARY','new_DEATHS'),sep ='') %>%
dplyr::mutate(popup_values = stringr::str_replace_all(popup_values,"[,]*NA[,]*","")) %>%
dplyr::mutate(popup_values = ifelse(popup_values=="","All Values are NA",popup_values))
popup_values <- dplyr::collect(dplyr::select(data2,.dots=c('popup_values')))[[1]]
return(popup_values)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/summary.R
\name{summary.landscape_list}
\alias{summary.landscape_list}
\title{Summary method for landscape lists.}
\usage{
\method{summary}{landscape_list}(L, parms = livestock$parms)
}
\arguments{
\item{L}{A list of landscape objects (may be of class "landscape_list").}
}
\value{
A list of statistical output containing the following entries:
\itemize{
\item{\code{$cover}}{Stuff}
\item{\code{$local}}{Stuff}
\item{\code{$clustering}}{Stuff}
\item{\code{$kernel}}{Stuff}
}
}
\description{
Summary method for landscape lists.
}
\examples{
L0 <- init_list(100, runif_range = c(0.6,0.99), width = 50)
summary(L0)
}
|
/man/summary.landscape_list.Rd
|
permissive
|
skefi/socialecological
|
R
| false
| true
| 699
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/summary.R
\name{summary.landscape_list}
\alias{summary.landscape_list}
\title{Summary method for landscape lists.}
\usage{
\method{summary}{landscape_list}(L, parms = livestock$parms)
}
\arguments{
\item{L}{A list of landscape objects (may be of class "landscape_list").}
}
\value{
A list of statistical output containing the following entries:
\itemize{
\item{\code{$cover}}{Stuff}
\item{\code{$local}}{Stuff}
\item{\code{$clustering}}{Stuff}
\item{\code{$kernel}}{Stuff}
}
}
\description{
Summary method for landscape lists.
}
\examples{
L0 <- init_list(100, runif_range = c(0.6,0.99), width = 50)
summary(L0)
}
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
m<-NULL
set<-function(y){
x <<- y
m <<- NULL
}
get <- function() x
setinverse<-function(inverse) m <<- inverse
getinverse<-function()m
list(set =set, get=get,setinverse=setinverse,getinverse=getinverse)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m<-x$getinverse()
if(!is.null(m)){
message("getting cached data")
return(m)
}
data<-x$get()
m<-solve(data,...)
x$setinverse(m)
m
}
|
/cachematrix.R
|
no_license
|
sreenila/ProgrammingAssignment2
|
R
| false
| false
| 837
|
r
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
m<-NULL
set<-function(y){
x <<- y
m <<- NULL
}
get <- function() x
setinverse<-function(inverse) m <<- inverse
getinverse<-function()m
list(set =set, get=get,setinverse=setinverse,getinverse=getinverse)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m<-x$getinverse()
if(!is.null(m)){
message("getting cached data")
return(m)
}
data<-x$get()
m<-solve(data,...)
x$setinverse(m)
m
}
|
# R Script
# Author: Adam W Hansen
# Date Created: Jan 26, 2018
# Date Last Modified: Aug 17, 2018
library(getPass)
library(RJDBC)
#USERNAME = readline(prompt = "Username: ")
USERNAME = getPass(msg = "Username: ")
USERNAME
PASSWORD = getPass(msg = "Password: ")
query =
"select
count(distinct(sname)) as n_samples, gene_symbol, omim_gene_2016, omim_gene_2018
from
(select freeze2_3.variant_genotype_filtered_qualpass.* from freeze2_3.variant_genotype_filtered_qualpass
left join freeze2_3.filtered_qualpass_maf
on variant_genotype_filtered_qualpass.vid = filtered_qualpass_maf.vid
where impact = 'HIGH'
and f2_3_af < 0.01
and (af < 0.001 or af is null)
and (charge_af <= 0.0001 or charge_af is null)
and (gnomad_af <= 0.0001 or gnomad_ext_af <= 0.0001 or (gnomad_af is null and gnomad_ext_af is null))
and (pubmed = '' or pubmed is null)
and (project not in ('CMG', 'SASCHX', 'WGLTEST', 'WGLM', 'WGLBP', 'WGLCARDIO', 'WGLC', 'WGLHES'))
and (existing_variation = '' or existing_variation is null)
and variant_genotype_filtered_qualpass.vid not like 'GL%'
and (domains not like 'Low_complexity%' or domains is null)
and exac_mu_syn is not null
and (vr >=4 and vr >= 0.25*dp)
and not (sname rlike '[Mm]other' or sname rlike '[Ff]ather' or sname rlike '[\\-_0-9][Mm]$' or sname rlike '[\\-_0-9][Ff]$' or sname rlike '[\\-_0-9][Dd]$' or sname rlike '[\\-_]0?[2-3]' or sname rlike '^[0-9]{4}[A-Z]{2}0[2-3]$' or sname like 'WPW%' or sname like 'HGSC%' or sname like 'NA%')
and exac_pli >= 1.0
) cases
left anti join
(select freeze2_3.variant_genotype_filtered_qualpass.* from freeze2_3.variant_genotype_filtered_qualpass
left join freeze2_3.filtered_qualpass_maf
on variant_genotype_filtered_qualpass.vid = filtered_qualpass_maf.vid
where impact = 'HIGH'
and f2_3_af < 0.01
and (af < 0.001 or af is null)
and (charge_af <= 0.0001 or charge_af is null)
and (gnomad_af <= 0.0001 or gnomad_ext_af <= 0.0001 or (gnomad_af is null and gnomad_ext_af is null))
and (pubmed = '' or pubmed is null)
and (project not in ('CMG', 'SASCHX', 'WGLTEST', 'WGLM', 'WGLBP', 'WGLCARDIO', 'WGLC', 'WGLHES'))
and (existing_variation = '' or existing_variation is null)
and variant_genotype_filtered_qualpass.vid not like 'GL%'
and (domains not like 'Low_complexity%' or domains is null)
and exac_mu_syn is not null
and (vr >=4 and vr >= 0.25*dp)
and (sname rlike '[Mm]other' or sname rlike '[Ff]ather' or sname rlike '[\\-_0-9][Mm]$' or sname rlike '[\\-_0-9][Ff]$' or sname rlike '[\\-_0-9][Dd]$' or sname rlike '[\\-_]0?[2-3]' or sname rlike '^[0-9]{4}[A-Z]{2}0[2-3]$' or sname like 'WPW%' or sname like 'HGSC%' or sname like 'NA%')
and exac_pli >= 1.0
) controls
on cases.vid=controls.vid
group by gene_symbol, omim_gene_2016, omim_gene_2018
having count(distinct(sname)) >= 5
order by omim_gene_2016 nulls first, omim_gene_2018 nulls last, count(distinct(sname)) desc"
# Make database connection
drv <- JDBC(driverClass = "com.cloudera.impala.jdbc41.Driver", classPath = list.files("/opt/JDBC/jars/ImpalaJDBC41/",pattern="jar$",full.names=T),identifier.quote="`")
impalaConnectionUrl <- paste("jdbc:impala://XXXX;AuthMech=4;SSLTrustStore=/opt/cloudera/security/jks/truststore.jks;SSLTrustStorePwd=XXXX;ssl=1;UID=", USERNAME, "@XXXX;PWD=", PASSWORD, sep="")
conn <- dbConnect(drv, impalaConnectionUrl)
# Execute current query, get results
curResult = dbGetQuery(conn, query)
# Rename default col name "EXPR_0" as "n_samples
names(curResult)[names(curResult)=="EXPR_0"] = "n_samples"
# Add extra column to current query (min query val)
curResult$">=" = 1
# Write master data frame to file
write.table(curResult, file=paste(c(as.character(Sys.Date()), "_exac_pli_Default_1_fixed_bgl", ".tsv"), collapse = ''), sep="\t", quote=FALSE, row.names=FALSE)
|
/2018/aug/lof/pli/filter/aug_17_pli1_filter_bgl.R
|
no_license
|
BCM-HGSC/HARLEE_analysis
|
R
| false
| false
| 4,053
|
r
|
# R Script
# Author: Adam W Hansen
# Date Created: Jan 26, 2018
# Date Last Modified: Aug 17, 2018
library(getPass)
library(RJDBC)
#USERNAME = readline(prompt = "Username: ")
USERNAME = getPass(msg = "Username: ")
USERNAME
PASSWORD = getPass(msg = "Password: ")
query =
"select
count(distinct(sname)) as n_samples, gene_symbol, omim_gene_2016, omim_gene_2018
from
(select freeze2_3.variant_genotype_filtered_qualpass.* from freeze2_3.variant_genotype_filtered_qualpass
left join freeze2_3.filtered_qualpass_maf
on variant_genotype_filtered_qualpass.vid = filtered_qualpass_maf.vid
where impact = 'HIGH'
and f2_3_af < 0.01
and (af < 0.001 or af is null)
and (charge_af <= 0.0001 or charge_af is null)
and (gnomad_af <= 0.0001 or gnomad_ext_af <= 0.0001 or (gnomad_af is null and gnomad_ext_af is null))
and (pubmed = '' or pubmed is null)
and (project not in ('CMG', 'SASCHX', 'WGLTEST', 'WGLM', 'WGLBP', 'WGLCARDIO', 'WGLC', 'WGLHES'))
and (existing_variation = '' or existing_variation is null)
and variant_genotype_filtered_qualpass.vid not like 'GL%'
and (domains not like 'Low_complexity%' or domains is null)
and exac_mu_syn is not null
and (vr >=4 and vr >= 0.25*dp)
and not (sname rlike '[Mm]other' or sname rlike '[Ff]ather' or sname rlike '[\\-_0-9][Mm]$' or sname rlike '[\\-_0-9][Ff]$' or sname rlike '[\\-_0-9][Dd]$' or sname rlike '[\\-_]0?[2-3]' or sname rlike '^[0-9]{4}[A-Z]{2}0[2-3]$' or sname like 'WPW%' or sname like 'HGSC%' or sname like 'NA%')
and exac_pli >= 1.0
) cases
left anti join
(select freeze2_3.variant_genotype_filtered_qualpass.* from freeze2_3.variant_genotype_filtered_qualpass
left join freeze2_3.filtered_qualpass_maf
on variant_genotype_filtered_qualpass.vid = filtered_qualpass_maf.vid
where impact = 'HIGH'
and f2_3_af < 0.01
and (af < 0.001 or af is null)
and (charge_af <= 0.0001 or charge_af is null)
and (gnomad_af <= 0.0001 or gnomad_ext_af <= 0.0001 or (gnomad_af is null and gnomad_ext_af is null))
and (pubmed = '' or pubmed is null)
and (project not in ('CMG', 'SASCHX', 'WGLTEST', 'WGLM', 'WGLBP', 'WGLCARDIO', 'WGLC', 'WGLHES'))
and (existing_variation = '' or existing_variation is null)
and variant_genotype_filtered_qualpass.vid not like 'GL%'
and (domains not like 'Low_complexity%' or domains is null)
and exac_mu_syn is not null
and (vr >=4 and vr >= 0.25*dp)
and (sname rlike '[Mm]other' or sname rlike '[Ff]ather' or sname rlike '[\\-_0-9][Mm]$' or sname rlike '[\\-_0-9][Ff]$' or sname rlike '[\\-_0-9][Dd]$' or sname rlike '[\\-_]0?[2-3]' or sname rlike '^[0-9]{4}[A-Z]{2}0[2-3]$' or sname like 'WPW%' or sname like 'HGSC%' or sname like 'NA%')
and exac_pli >= 1.0
) controls
on cases.vid=controls.vid
group by gene_symbol, omim_gene_2016, omim_gene_2018
having count(distinct(sname)) >= 5
order by omim_gene_2016 nulls first, omim_gene_2018 nulls last, count(distinct(sname)) desc"
# Make database connection
drv <- JDBC(driverClass = "com.cloudera.impala.jdbc41.Driver", classPath = list.files("/opt/JDBC/jars/ImpalaJDBC41/",pattern="jar$",full.names=T),identifier.quote="`")
impalaConnectionUrl <- paste("jdbc:impala://XXXX;AuthMech=4;SSLTrustStore=/opt/cloudera/security/jks/truststore.jks;SSLTrustStorePwd=XXXX;ssl=1;UID=", USERNAME, "@XXXX;PWD=", PASSWORD, sep="")
conn <- dbConnect(drv, impalaConnectionUrl)
# Execute current query, get results
curResult = dbGetQuery(conn, query)
# Rename default col name "EXPR_0" as "n_samples
names(curResult)[names(curResult)=="EXPR_0"] = "n_samples"
# Add extra column to current query (min query val)
curResult$">=" = 1
# Write master data frame to file
write.table(curResult, file=paste(c(as.character(Sys.Date()), "_exac_pli_Default_1_fixed_bgl", ".tsv"), collapse = ''), sep="\t", quote=FALSE, row.names=FALSE)
|
### CMC: new version with hierarchical IW
nextDhIW = function(a, b, env, lambda) {
S = (b - t(matrix(a,nrow=env$gNIV,ncol=env$gNP)))
Vnew = t(S)%*%S + 2*env$degreesOfFreedom*lambda
Sigmanew = riwish(env$degreesOfFreedom+env$gNP+env$gNIV-1,Vnew)
return(Sigmanew)
}
|
/R/nextDhIW.R
|
no_license
|
cran/RSGHB
|
R
| false
| false
| 295
|
r
|
### CMC: new version with hierarchical IW
nextDhIW = function(a, b, env, lambda) {
S = (b - t(matrix(a,nrow=env$gNIV,ncol=env$gNP)))
Vnew = t(S)%*%S + 2*env$degreesOfFreedom*lambda
Sigmanew = riwish(env$degreesOfFreedom+env$gNP+env$gNIV-1,Vnew)
return(Sigmanew)
}
|
context("test-check-all.R")
library("tibble")
syn <- attempt_instantiate()
tryCatch(
attempt_login(syn),
error = function(e) {
print(glue::glue("Did not log into Synapse: {e$message}"))
}
)
annots <- tribble(
~key, ~value, ~columnType,
"assay", "rnaSeq", "STRING",
"fileFormat", "fastq", "STRING",
"fileFormat", "txt", "STRING",
"fileFormat", "csv", "STRING",
"species", "Human", "STRING"
)
Sys.setenv(R_CONFIG_ACTIVE = "testing")
test_that("check_all() returns a list of check conditions or NULLs", {
skip_if_not(logged_in(syn = syn))
data <- tibble::tibble(
metadataType = c(
"manifest",
"individual",
"biospecimen",
"assay"
),
name = c("file1", "file2", "file3", "file4"),
species = "human",
assay = "rnaSeq",
file_data = c(
list(data.frame(a = c(TRUE, FALSE), b = c(1, 3))),
list(data.frame(a = c(TRUE, FALSE), b = c(1, 3))),
list(data.frame(a = c(TRUE, FALSE), b = c(1, 3))),
list(data.frame(a = c(TRUE, FALSE), b = c(1, 3)))
)
)
res <- check_all(
data = data,
annotations = annots,
study = "foo",
syn = syn
)
expect_equal(class(res), "list")
expect_true(all(unlist(
purrr::map(
res,
function(x) {
inherits(x, "check_fail") | inherits(x, "check_pass") | inherits(x, "check_warn") | is.null(x) # nolint
}
)
)))
})
test_that("check_all() returns NULL for checks with missing data", {
skip_if_not(logged_in(syn = syn))
data1 <- tibble::tibble(
metadataType = c(
"manifest",
"individual",
"biospecimen",
"assay"
),
name = c(NA, NA, NA, NA),
species = "human",
assay = "rnaSeq",
file_data = c(
list(NULL),
list(NULL),
list(NULL),
list(NULL)
)
)
data2 <- tibble::tibble(
metadataType = c(
"manifest",
"individual",
"biospecimen",
"assay"
),
name = c("file1", NA, NA, NA),
species = "human",
assay = "rnaSeq",
file_data = c(
list(data.frame(a = c(TRUE, FALSE), b = c(1, 3))),
list(NULL),
list(NULL),
list(NULL)
)
)
data3 <- tibble::tibble(
metadataType = c(
"manifest",
"individual",
"biospecimen",
"assay"
),
name = c(NA, "file2", NA, "file4"),
species = "human",
assay = "rnaSeq",
file_data = c(
list(NULL),
list(data.frame(a = c(TRUE, FALSE), b = c(1, 3))),
list(NULL),
list(data.frame(a = c(TRUE, FALSE), b = c(1, 3)))
)
)
data4 <- tibble::tibble(
metadataType = c(
"manifest",
"individual",
"biospecimen",
"assay"
),
name = c("file1", "file2", "file3", NA),
species = "human",
assay = "rnaSeq",
file_data = c(
list(data.frame(a = c(TRUE, FALSE), b = c(1, 3))),
list(data.frame(a = c(TRUE, FALSE), b = c(1, 3))),
list(data.frame(a = c(TRUE, FALSE), b = c(1, 3))),
list(NULL)
)
)
res1 <- check_all(
data = data1,
annotations = annots,
study = "foo",
syn = syn
)
res2 <- check_all(
data = data2,
annotations = annots,
study = "foo",
syn = syn
)
res3 <- check_all(
data = data3,
annotations = annots,
study = "foo",
syn = syn
)
res4 <- check_all(
data = data4,
annotations = annots,
study = "foo",
syn = syn
)
# Some checks should be NULL based on which data is missing
# Since all of these have missing data, the # of checks done
# should be less than the total # of checks possible
expect_true(all(purrr::map_lgl(res1, ~ is.null(.x))))
expect_true(sum(purrr::map_lgl(res2, ~ !is.null(.x))) < length(res2))
expect_true(sum(purrr::map_lgl(res3, ~ !is.null(.x))) < length(res3))
expect_true(sum(purrr::map_lgl(res4, ~ !is.null(.x))) < length(res4))
})
test_that("check_all() returns expected conditions", {
skip_if_not(logged_in(syn = syn))
data <- tibble::tibble(
metadataType = c(
"manifest",
"individual",
"biospecimen",
"assay"
),
name = c("file1", "file2", "file3", "file4"),
species = "human",
assay = "rnaSeq",
file_data = c(
list(data.frame(
path = c("file1", "file2", "file3", "file4", NA, NA, NA),
individualID = c(NA, NA, NA, NA, "a", "b", "c"),
specimenID = c(NA, NA, NA, NA, NA, "1", "3"),
stringsAsFactors = FALSE
)),
list(data.frame(
individualID = c("a", "b"),
age = c(27, 32),
stringsAsFactors = FALSE
)),
list(data.frame(
individualID = c("a", "b"),
specimenID = c("1", "3"),
fileFormat = c("xlsx", "tex"),
stringsAsFactors = FALSE
)),
list(data.frame(
specimenID = c("1", "3"),
assay = c("rnaSeq", "rnaSeq"),
stringsAsFactors = FALSE
))
)
)
res <- check_all(
data = data,
annotations = annots,
study = "foo",
syn = syn
)
# All metadata filenames in manifest passes
expect_true(inherits(res$meta_files_in_manifest, "check_pass"))
# Missing individualID "c" from individual metadata
expect_equal(
res$individual_ids_indiv_manifest$data$`Missing from individual`[1],
"c"
)
# Invalid tissue annotation values
expect_equal(res$annotation_values_biosp$data$fileFormat, c("xlsx", "tex"))
})
test_that("check_all() throws error if not exactly 1 metadata type each", {
skip_if_not(logged_in(syn = syn))
# Missing biospecimen
data1 <- tibble::tibble(
metadataType = c(
"manifest",
"individual",
"assay"
)
)
# Duplicate assay
data2 <- tibble::tibble(
metadataType = c(
"manifest",
"individual",
"assay",
"assay"
)
)
expect_error(
check_all(
data = data1,
annotations = annots,
study = "foo",
syn = syn
)
)
expect_error(
check_all(
data = data2,
annotations = annots,
study = "foo",
syn = syn
)
)
})
test_that("check_all runs check_ages_over_90 for human data", {
skip_if_not(logged_in(syn = syn))
data_human <- tibble::tibble(
metadataType = c(
"manifest",
"individual",
"biospecimen",
"assay"
),
name = c("file1", "file2", "file3", "file4"),
species = "human",
assay = "rnaSeq",
file_data = c(
list(data.frame(a = 1)),
list(data.frame(ageDeath = 95)),
list(data.frame(a = 1)),
list(data.frame(a = 1))
)
)
data_animal <- data_human
data_animal$species <- "mouse or other animal model"
data_has_na <- data_human
data_has_na$species <- c(NA, "human", "human", NA)
res1 <- check_all(
data = data_human,
annotations = annots,
study = "foo",
syn = syn
)
res2 <- check_all(
data = data_animal,
annotations = annots,
study = "foo",
syn = syn
)
res3 <- check_all(
data = data_has_na,
annotations = annots,
study = "foo",
syn = syn
)
expect_true(inherits(res1$ages_over_90_indiv, "check_warn"))
expect_null(res2$ages_over_90_indiv)
expect_true(inherits(res3$ages_over_90_indiv, "check_warn"))
})
test_that("check_all runs check_ages_over_90 on biospecimen file", {
skip_if_not(logged_in(syn = syn))
dat <- tibble::tibble(
metadataType = c(
"manifest",
"individual",
"biospecimen",
"assay"
),
name = c("file1", "file2", "file3", "file4"),
species = "human",
assay = "rnaSeq",
file_data = c(
list(data.frame(a = 1)),
list(data.frame(a = 1)),
list(data.frame(samplingAge = 100)),
list(data.frame(a = 1))
)
)
res <- check_all(
data = dat,
annotations = annots,
study = "foo",
syn = syn
)
expect_true(inherits(res$ages_over_90_biosp, "check_warn"))
})
test_that("check_all catches duplicate file paths in manifest", {
skip_if_not(logged_in(syn = syn))
data <- tibble::tibble(
metadataType = c(
"manifest",
"individual",
"biospecimen",
"assay"
),
name = c("file1", "file2", "file3", "file4"),
species = "human",
assay = "rnaSeq",
file_data = c(
list(data.frame(path = c("/file.txt", "/file.txt"))),
list(data.frame(a = 1)),
list(data.frame(a = 1)),
list(data.frame(a = 1))
)
)
res1 <- check_all(
data = data,
annotations = annots,
study = "foo",
syn = syn,
samples_table = get_golem_config("samples_table")
)
expect_true(inherits(res1$duplicate_file_paths, "check_fail"))
})
test_that("check_all() catches missing IDs from existing studies", {
skip_if_not(logged_in(syn = syn))
data <- tibble::tibble(
metadataType = c(
"manifest",
"individual",
"biospecimen",
"assay"
),
name = c("file1", "file2", "file3", "file4"),
species = "human",
assay = "rnaSeq",
file_data = c(
list(data.frame(path = c("/file.txt", "/file.txt"))),
list(data.frame(individualID = "B")),
list(data.frame(individualID = "B", specimenID = "b1")),
list(data.frame(specimenID = "b1"))
)
)
res <- check_all(
data = data,
annotations = annots,
study = "study1",
syn = syn,
samples_table = get_golem_config("samples_table")
)
expect_true(inherits(res$complete_ids_indiv, "check_fail"))
expect_equal(res$complete_ids_indiv$data, "A")
expect_true(inherits(res$complete_ids_biosp, "check_fail"))
expect_equal(res$complete_ids_biosp$data, c("a1", "a2", "b2"))
expect_true(inherits(res$complete_ids_assay, "check_fail"))
expect_equal(res$complete_ids_assay$data, c("a1", "a2"))
})
test_that("check_all() doesn't run check_complete_ids if study isn't in table", { # nolint
skip_if_not(logged_in(syn = syn))
data <- tibble::tibble(
metadataType = c(
"manifest",
"individual",
"biospecimen",
"assay"
),
name = c("file1", "file2", "file3", "file4"),
species = "human",
assay = "rnaSeq",
file_data = c(
list(data.frame(path = c("/file.txt", "/file.txt"))),
list(data.frame(individualID = "B")),
list(data.frame(individualID = "B", specimenID = "b1")),
list(data.frame(specimenID = "b1"))
)
)
res <- check_all(
data = data,
annotations = annots,
study = "not a study in this table",
syn = syn,
samples_table = get_golem_config("samples_table")
)
expect_null(res$complete_ids_indiv)
expect_null(res$complete_ids_biosp)
expect_null(res$complete_ids_assay)
})
test_that("check_all() doesn't run check_complete_ids if study or table not provided", { # nolint
skip_if_not(logged_in(syn = syn))
data <- tibble::tibble(
metadataType = c(
"manifest",
"individual",
"biospecimen",
"assay"
),
name = c("file1", "file2", "file3", "file4"),
species = "human",
assay = "rnaSeq",
file_data = c(
list(data.frame(path = c("/file.txt", "/file.txt"))),
list(data.frame(individualID = "B")),
list(data.frame(individualID = "B", specimenID = "b1")),
list(data.frame(specimenID = "b1"))
)
)
res1 <- check_all(
data = data,
annotations = annots,
syn = syn
)
res2 <- check_all(
data = data,
annotations = annots,
syn = syn,
study = "foo"
)
res3 <- check_all(
data = data,
annotations = annots,
syn = syn,
samples_table = "foo"
)
expect_null(res1$complete_ids_indiv)
expect_null(res1$complete_ids_biosp)
expect_null(res1$complete_ids_assay)
expect_null(res2$complete_ids_indiv)
expect_null(res2$complete_ids_biosp)
expect_null(res2$complete_ids_assay)
expect_null(res3$complete_ids_indiv)
expect_null(res3$complete_ids_biosp)
expect_null(res3$complete_ids_assay)
})
test_that("check_all doesn't run check_cols if missing template col", {
data <- tibble::tibble(
metadataType = c(
"manifest",
"individual",
"biospecimen",
"assay"
),
name = c("file1", "file2", "file3", "file4"),
species = "human",
assay = "rnaSeq",
file_data = c(
list(data.frame(path = c("/file.txt", "/file.txt"))),
list(data.frame(individualID = "B")),
list(data.frame(individualID = "B", specimenID = "b1")),
list(data.frame(specimenID = "b1"))
)
)
res <- check_all(data = data, annotations = annots, syn = syn)
expect_null(res$missing_cols_indiv)
expect_null(res$missing_cols_biosp)
expect_null(res$missing_cols_assay)
expect_null(res$missing_cols_manifest)
})
test_that("check_all runs check_cols if not missing template col", {
skip_if_not(logged_in(syn))
data <- tibble::tibble(
metadataType = c(
"manifest",
"individual",
"biospecimen",
"assay"
),
name = c("file1", "file2", "file3", "file4"),
species = "human",
assay = "rnaSeq",
file_data = c(
list(data.frame(path = c("/file.txt", "/file.txt"))),
list(data.frame(individualID = "B")),
list(data.frame(individualID = "B", specimenID = "b1")),
list(data.frame(specimenID = "b1"))
),
template = c(
"syn20820080",
"syn12973254",
"syn12973252",
"syn12973256"
)
)
res <- check_all(data = data, annotations = annots, syn = syn)
expect_true(!is.null(res$missing_cols_indiv))
expect_true(!is.null(res$missing_cols_biosp))
expect_true(!is.null(res$missing_cols_assay))
expect_true(!is.null(res$missing_cols_manifest))
})
test_that("config works", {
expect_equal(get_golem_config("samples_table"), "syn22089767")
})
|
/tests/testthat/test-check-all.R
|
permissive
|
Sage-Bionetworks/dccvalidator
|
R
| false
| false
| 13,526
|
r
|
context("test-check-all.R")
library("tibble")
syn <- attempt_instantiate()
tryCatch(
attempt_login(syn),
error = function(e) {
print(glue::glue("Did not log into Synapse: {e$message}"))
}
)
annots <- tribble(
~key, ~value, ~columnType,
"assay", "rnaSeq", "STRING",
"fileFormat", "fastq", "STRING",
"fileFormat", "txt", "STRING",
"fileFormat", "csv", "STRING",
"species", "Human", "STRING"
)
Sys.setenv(R_CONFIG_ACTIVE = "testing")
test_that("check_all() returns a list of check conditions or NULLs", {
skip_if_not(logged_in(syn = syn))
data <- tibble::tibble(
metadataType = c(
"manifest",
"individual",
"biospecimen",
"assay"
),
name = c("file1", "file2", "file3", "file4"),
species = "human",
assay = "rnaSeq",
file_data = c(
list(data.frame(a = c(TRUE, FALSE), b = c(1, 3))),
list(data.frame(a = c(TRUE, FALSE), b = c(1, 3))),
list(data.frame(a = c(TRUE, FALSE), b = c(1, 3))),
list(data.frame(a = c(TRUE, FALSE), b = c(1, 3)))
)
)
res <- check_all(
data = data,
annotations = annots,
study = "foo",
syn = syn
)
expect_equal(class(res), "list")
expect_true(all(unlist(
purrr::map(
res,
function(x) {
inherits(x, "check_fail") | inherits(x, "check_pass") | inherits(x, "check_warn") | is.null(x) # nolint
}
)
)))
})
test_that("check_all() returns NULL for checks with missing data", {
skip_if_not(logged_in(syn = syn))
data1 <- tibble::tibble(
metadataType = c(
"manifest",
"individual",
"biospecimen",
"assay"
),
name = c(NA, NA, NA, NA),
species = "human",
assay = "rnaSeq",
file_data = c(
list(NULL),
list(NULL),
list(NULL),
list(NULL)
)
)
data2 <- tibble::tibble(
metadataType = c(
"manifest",
"individual",
"biospecimen",
"assay"
),
name = c("file1", NA, NA, NA),
species = "human",
assay = "rnaSeq",
file_data = c(
list(data.frame(a = c(TRUE, FALSE), b = c(1, 3))),
list(NULL),
list(NULL),
list(NULL)
)
)
data3 <- tibble::tibble(
metadataType = c(
"manifest",
"individual",
"biospecimen",
"assay"
),
name = c(NA, "file2", NA, "file4"),
species = "human",
assay = "rnaSeq",
file_data = c(
list(NULL),
list(data.frame(a = c(TRUE, FALSE), b = c(1, 3))),
list(NULL),
list(data.frame(a = c(TRUE, FALSE), b = c(1, 3)))
)
)
data4 <- tibble::tibble(
metadataType = c(
"manifest",
"individual",
"biospecimen",
"assay"
),
name = c("file1", "file2", "file3", NA),
species = "human",
assay = "rnaSeq",
file_data = c(
list(data.frame(a = c(TRUE, FALSE), b = c(1, 3))),
list(data.frame(a = c(TRUE, FALSE), b = c(1, 3))),
list(data.frame(a = c(TRUE, FALSE), b = c(1, 3))),
list(NULL)
)
)
res1 <- check_all(
data = data1,
annotations = annots,
study = "foo",
syn = syn
)
res2 <- check_all(
data = data2,
annotations = annots,
study = "foo",
syn = syn
)
res3 <- check_all(
data = data3,
annotations = annots,
study = "foo",
syn = syn
)
res4 <- check_all(
data = data4,
annotations = annots,
study = "foo",
syn = syn
)
# Some checks should be NULL based on which data is missing
# Since all of these have missing data, the # of checks done
# should be less than the total # of checks possible
expect_true(all(purrr::map_lgl(res1, ~ is.null(.x))))
expect_true(sum(purrr::map_lgl(res2, ~ !is.null(.x))) < length(res2))
expect_true(sum(purrr::map_lgl(res3, ~ !is.null(.x))) < length(res3))
expect_true(sum(purrr::map_lgl(res4, ~ !is.null(.x))) < length(res4))
})
test_that("check_all() returns expected conditions", {
skip_if_not(logged_in(syn = syn))
data <- tibble::tibble(
metadataType = c(
"manifest",
"individual",
"biospecimen",
"assay"
),
name = c("file1", "file2", "file3", "file4"),
species = "human",
assay = "rnaSeq",
file_data = c(
list(data.frame(
path = c("file1", "file2", "file3", "file4", NA, NA, NA),
individualID = c(NA, NA, NA, NA, "a", "b", "c"),
specimenID = c(NA, NA, NA, NA, NA, "1", "3"),
stringsAsFactors = FALSE
)),
list(data.frame(
individualID = c("a", "b"),
age = c(27, 32),
stringsAsFactors = FALSE
)),
list(data.frame(
individualID = c("a", "b"),
specimenID = c("1", "3"),
fileFormat = c("xlsx", "tex"),
stringsAsFactors = FALSE
)),
list(data.frame(
specimenID = c("1", "3"),
assay = c("rnaSeq", "rnaSeq"),
stringsAsFactors = FALSE
))
)
)
res <- check_all(
data = data,
annotations = annots,
study = "foo",
syn = syn
)
# All metadata filenames in manifest passes
expect_true(inherits(res$meta_files_in_manifest, "check_pass"))
# Missing individualID "c" from individual metadata
expect_equal(
res$individual_ids_indiv_manifest$data$`Missing from individual`[1],
"c"
)
# Invalid tissue annotation values
expect_equal(res$annotation_values_biosp$data$fileFormat, c("xlsx", "tex"))
})
test_that("check_all() throws error if not exactly 1 metadata type each", {
skip_if_not(logged_in(syn = syn))
# Missing biospecimen
data1 <- tibble::tibble(
metadataType = c(
"manifest",
"individual",
"assay"
)
)
# Duplicate assay
data2 <- tibble::tibble(
metadataType = c(
"manifest",
"individual",
"assay",
"assay"
)
)
expect_error(
check_all(
data = data1,
annotations = annots,
study = "foo",
syn = syn
)
)
expect_error(
check_all(
data = data2,
annotations = annots,
study = "foo",
syn = syn
)
)
})
test_that("check_all runs check_ages_over_90 for human data", {
skip_if_not(logged_in(syn = syn))
data_human <- tibble::tibble(
metadataType = c(
"manifest",
"individual",
"biospecimen",
"assay"
),
name = c("file1", "file2", "file3", "file4"),
species = "human",
assay = "rnaSeq",
file_data = c(
list(data.frame(a = 1)),
list(data.frame(ageDeath = 95)),
list(data.frame(a = 1)),
list(data.frame(a = 1))
)
)
data_animal <- data_human
data_animal$species <- "mouse or other animal model"
data_has_na <- data_human
data_has_na$species <- c(NA, "human", "human", NA)
res1 <- check_all(
data = data_human,
annotations = annots,
study = "foo",
syn = syn
)
res2 <- check_all(
data = data_animal,
annotations = annots,
study = "foo",
syn = syn
)
res3 <- check_all(
data = data_has_na,
annotations = annots,
study = "foo",
syn = syn
)
expect_true(inherits(res1$ages_over_90_indiv, "check_warn"))
expect_null(res2$ages_over_90_indiv)
expect_true(inherits(res3$ages_over_90_indiv, "check_warn"))
})
test_that("check_all runs check_ages_over_90 on biospecimen file", {
skip_if_not(logged_in(syn = syn))
dat <- tibble::tibble(
metadataType = c(
"manifest",
"individual",
"biospecimen",
"assay"
),
name = c("file1", "file2", "file3", "file4"),
species = "human",
assay = "rnaSeq",
file_data = c(
list(data.frame(a = 1)),
list(data.frame(a = 1)),
list(data.frame(samplingAge = 100)),
list(data.frame(a = 1))
)
)
res <- check_all(
data = dat,
annotations = annots,
study = "foo",
syn = syn
)
expect_true(inherits(res$ages_over_90_biosp, "check_warn"))
})
test_that("check_all catches duplicate file paths in manifest", {
skip_if_not(logged_in(syn = syn))
data <- tibble::tibble(
metadataType = c(
"manifest",
"individual",
"biospecimen",
"assay"
),
name = c("file1", "file2", "file3", "file4"),
species = "human",
assay = "rnaSeq",
file_data = c(
list(data.frame(path = c("/file.txt", "/file.txt"))),
list(data.frame(a = 1)),
list(data.frame(a = 1)),
list(data.frame(a = 1))
)
)
res1 <- check_all(
data = data,
annotations = annots,
study = "foo",
syn = syn,
samples_table = get_golem_config("samples_table")
)
expect_true(inherits(res1$duplicate_file_paths, "check_fail"))
})
test_that("check_all() catches missing IDs from existing studies", {
skip_if_not(logged_in(syn = syn))
data <- tibble::tibble(
metadataType = c(
"manifest",
"individual",
"biospecimen",
"assay"
),
name = c("file1", "file2", "file3", "file4"),
species = "human",
assay = "rnaSeq",
file_data = c(
list(data.frame(path = c("/file.txt", "/file.txt"))),
list(data.frame(individualID = "B")),
list(data.frame(individualID = "B", specimenID = "b1")),
list(data.frame(specimenID = "b1"))
)
)
res <- check_all(
data = data,
annotations = annots,
study = "study1",
syn = syn,
samples_table = get_golem_config("samples_table")
)
expect_true(inherits(res$complete_ids_indiv, "check_fail"))
expect_equal(res$complete_ids_indiv$data, "A")
expect_true(inherits(res$complete_ids_biosp, "check_fail"))
expect_equal(res$complete_ids_biosp$data, c("a1", "a2", "b2"))
expect_true(inherits(res$complete_ids_assay, "check_fail"))
expect_equal(res$complete_ids_assay$data, c("a1", "a2"))
})
test_that("check_all() doesn't run check_complete_ids if study isn't in table", { # nolint
skip_if_not(logged_in(syn = syn))
data <- tibble::tibble(
metadataType = c(
"manifest",
"individual",
"biospecimen",
"assay"
),
name = c("file1", "file2", "file3", "file4"),
species = "human",
assay = "rnaSeq",
file_data = c(
list(data.frame(path = c("/file.txt", "/file.txt"))),
list(data.frame(individualID = "B")),
list(data.frame(individualID = "B", specimenID = "b1")),
list(data.frame(specimenID = "b1"))
)
)
res <- check_all(
data = data,
annotations = annots,
study = "not a study in this table",
syn = syn,
samples_table = get_golem_config("samples_table")
)
expect_null(res$complete_ids_indiv)
expect_null(res$complete_ids_biosp)
expect_null(res$complete_ids_assay)
})
test_that("check_all() doesn't run check_complete_ids if study or table not provided", { # nolint
skip_if_not(logged_in(syn = syn))
data <- tibble::tibble(
metadataType = c(
"manifest",
"individual",
"biospecimen",
"assay"
),
name = c("file1", "file2", "file3", "file4"),
species = "human",
assay = "rnaSeq",
file_data = c(
list(data.frame(path = c("/file.txt", "/file.txt"))),
list(data.frame(individualID = "B")),
list(data.frame(individualID = "B", specimenID = "b1")),
list(data.frame(specimenID = "b1"))
)
)
res1 <- check_all(
data = data,
annotations = annots,
syn = syn
)
res2 <- check_all(
data = data,
annotations = annots,
syn = syn,
study = "foo"
)
res3 <- check_all(
data = data,
annotations = annots,
syn = syn,
samples_table = "foo"
)
expect_null(res1$complete_ids_indiv)
expect_null(res1$complete_ids_biosp)
expect_null(res1$complete_ids_assay)
expect_null(res2$complete_ids_indiv)
expect_null(res2$complete_ids_biosp)
expect_null(res2$complete_ids_assay)
expect_null(res3$complete_ids_indiv)
expect_null(res3$complete_ids_biosp)
expect_null(res3$complete_ids_assay)
})
test_that("check_all doesn't run check_cols if missing template col", {
data <- tibble::tibble(
metadataType = c(
"manifest",
"individual",
"biospecimen",
"assay"
),
name = c("file1", "file2", "file3", "file4"),
species = "human",
assay = "rnaSeq",
file_data = c(
list(data.frame(path = c("/file.txt", "/file.txt"))),
list(data.frame(individualID = "B")),
list(data.frame(individualID = "B", specimenID = "b1")),
list(data.frame(specimenID = "b1"))
)
)
res <- check_all(data = data, annotations = annots, syn = syn)
expect_null(res$missing_cols_indiv)
expect_null(res$missing_cols_biosp)
expect_null(res$missing_cols_assay)
expect_null(res$missing_cols_manifest)
})
test_that("check_all runs check_cols if not missing template col", {
skip_if_not(logged_in(syn))
data <- tibble::tibble(
metadataType = c(
"manifest",
"individual",
"biospecimen",
"assay"
),
name = c("file1", "file2", "file3", "file4"),
species = "human",
assay = "rnaSeq",
file_data = c(
list(data.frame(path = c("/file.txt", "/file.txt"))),
list(data.frame(individualID = "B")),
list(data.frame(individualID = "B", specimenID = "b1")),
list(data.frame(specimenID = "b1"))
),
template = c(
"syn20820080",
"syn12973254",
"syn12973252",
"syn12973256"
)
)
res <- check_all(data = data, annotations = annots, syn = syn)
expect_true(!is.null(res$missing_cols_indiv))
expect_true(!is.null(res$missing_cols_biosp))
expect_true(!is.null(res$missing_cols_assay))
expect_true(!is.null(res$missing_cols_manifest))
})
test_that("config works", {
expect_equal(get_golem_config("samples_table"), "syn22089767")
})
|
# Pacotes utilizados
library(cepespR)
library(tidyverse)
library(abjutils)
# Objetivos
#' - Agregar os dados em funcao do numero de habitantes e de eleitores aptos;
#' - Sao 7 faixas de agregacao em funcao do numero eleitores aptos:
#'
#' 1. Até 5 mil eleitores;
#' 2. De 5 a 10 mil eleitores;
#' 3. De 10 a 20 mil eleitores;
#' 4. De 20 a 50 mil eleitores;
#' 5. De 50 a 100 mil eleitores;
#' 6. De 100 a 200 mil eleitores;
#' 7. Acima de 200 mil eleitores;
#'
#' - Sao 7 faixas de agregacao em funcao do numero de habitantes:
#'
#' 1. Até 5 mil habitantes (23,39% dos municípios e 2,3% da população total);
#' 2. De 5 a 10 mil habitantes (21,79% dos municípios e 4,49% da população total);
#' 3. De 10 a 20 mil habitantes (25,15% dos municípios e 10,35% da população total);
#' 4. De 20 a 50 mil habitantes (18,74% dos municípios e 16,45% da população total);
#' 5. De 50 a 100 mil habitantes (5,82% dos municípios e 11,67% da população total);
#' 6. De 100 a 500 mil habitantes (4,40% dos municípios e 25,46% da população total);
#' 7. Acima de 500 mil habitantes (0,62% dos municípios e 29,28% da população total);
# 1. Data -----------------------------------------------------------------
## Faz o download do banco que contem o numero de eleitores aptos por ano, uf e municipio
vrcm <- get_elections(year = "2000,2004,2008,2012,2016",
position = "Vereador",
regional_aggregation = "Municipio",
political_aggregation = "Consolidado",
cached = TRUE)
## Carrega os indicadores ja calculados
files <- list.files(file.path(getwd(),"/data/output"),
pattern = "mun.rds")
for(i in files){
df <- readRDS(paste0("data/output/",i))
df <- df[,1:length(df)]
assign(paste(substr(i,1,nchar(i)-4)), df)
}
rm(df,i,files)
### Cria um banco com o numero de eleitores aptos por municipio
aptos <- vrcm %>%
select("ANO_ELEICAO",
"UF",
"COD_MUN_TSE",
"QTD_APTOS") %>%
rename("Ano da eleição" = "ANO_ELEICAO",
"Código do município" = "COD_MUN_TSE",
"Eleitores aptos" = "QTD_APTOS")
### Transforma a coluna ano da eleição em caracter
aptos$`Ano da eleição` <- as.character(aptos$`Ano da eleição`)
# 2. Join ------------------------------------------------------
# 2.1. Eleitores aptos ----------------------------------------------------
files <- ls(pattern = "_mun")
for(i in 1:length(ls(pattern = "_mun"))){
cat("Lendo", ls(pattern = "_mun")[i] , "\n")
df <- left_join(get(ls(pattern = "_mun")[i]), aptos)
df <- df[,1:length(df)]
assign(paste(substr(ls(pattern = "_mun")[i],1,nchar(ls(pattern = "_mun")[i]))), df)
}
# 3. Cria os intervalos ---------------------------------------------------
options(scipen = 999)
## Cria os intervalos de eleitores aptos
pretty_breaks <- c(0,5000,10000,20000,50000,100000,200000)
## Cria uma variavel com o maior numero de eleitores aptos
max <- max(frag_leg_mun$`Eleitores aptos`)
## Cria as quebras e as legendas
labels <- c()
brks <- c(pretty_breaks, max)
for(idx in 1:length(brks)){
labels <- c(labels,round(brks[idx + 1], 2))
}
labels <- labels[1:length(labels)-1]
## Cria uma variavel com os valores dos intervalos
### Fragmentacao legislativa
frag_leg_mun$`Eleitores aptos` <- cut(frag_leg_mun$`Eleitores aptos`,
breaks = brks,
include.lowest = TRUE,
labels = labels)
frag_leg_mun <- frag_leg_mun %>%
mutate(`Eleitores aptos` = ifelse(`Eleitores aptos` == 5000,
"Até 5 mil eleitores",
ifelse(`Eleitores aptos` == 10000,
"De 5 a 10 mil eleitores",
ifelse(`Eleitores aptos` == 20000,
"De 10 a 20 mil eleitores",
ifelse(`Eleitores aptos` == 50000,
"De 20 a 50 mil eleitores",
ifelse(`Eleitores aptos` == 100000,
"De 50 a 100 mil eleitores",
ifelse(`Eleitores aptos` == 200000,
"De 100 a 200 mil eleitores",
ifelse(`Eleitores aptos` == 8886195,
"Acima de 200 mil eleitores",
NA))))))))
### Distribuicao de cadeiras
distcad_mun$`Eleitores aptos` <- cut(distcad_mun$`Eleitores aptos`,
breaks = brks,
include.lowest = TRUE,
labels = labels)
distcad_mun <- distcad_mun %>%
mutate(`Eleitores aptos` = ifelse(`Eleitores aptos` == 5000,
"Até 5 mil eleitores",
ifelse(`Eleitores aptos` == 10000,
"De 5 a 10 mil eleitores",
ifelse(`Eleitores aptos` == 20000,
"De 10 a 20 mil eleitores",
ifelse(`Eleitores aptos` == 50000,
"De 20 a 50 mil eleitores",
ifelse(`Eleitores aptos` == 100000,
"De 50 a 100 mil eleitores",
ifelse(`Eleitores aptos` == 200000,
"De 100 a 200 mil eleitores",
ifelse(`Eleitores aptos` == 8886195,
"Acima de 200 mil eleitores",
NA))))))))
### Renovacao parlamentar
renov_parl_mun$`Eleitores aptos` <- cut(renov_parl_mun$`Eleitores aptos`,
breaks = brks,
include.lowest = TRUE,
labels = labels)
renov_parl_mun <- renov_parl_mun %>%
mutate(`Eleitores aptos` = ifelse(`Eleitores aptos` == 5000,
"Até 5 mil eleitores",
ifelse(`Eleitores aptos` == 10000,
"De 5 a 10 mil eleitores",
ifelse(`Eleitores aptos` == 20000,
"De 10 a 20 mil eleitores",
ifelse(`Eleitores aptos` == 50000,
"De 20 a 50 mil eleitores",
ifelse(`Eleitores aptos` == 100000,
"De 50 a 100 mil eleitores",
ifelse(`Eleitores aptos` == 200000,
"De 100 a 200 mil eleitores",
ifelse(`Eleitores aptos` == 8886195,
"Acima de 200 mil eleitores",
NA))))))))
### Alienacao
alien_mun$`Eleitores aptos` <- cut(alien_mun$`Eleitores aptos`,
breaks = brks,
include.lowest = TRUE,
labels = labels)
alien_mun <- alien_mun %>%
mutate(`Eleitores aptos` = ifelse(`Eleitores aptos` == 5000,
"Até 5 mil eleitores",
ifelse(`Eleitores aptos` == 10000,
"De 5 a 10 mil eleitores",
ifelse(`Eleitores aptos` == 20000,
"De 10 a 20 mil eleitores",
ifelse(`Eleitores aptos` == 50000,
"De 20 a 50 mil eleitores",
ifelse(`Eleitores aptos` == 100000,
"De 50 a 100 mil eleitores",
ifelse(`Eleitores aptos` == 200000,
"De 100 a 200 mil eleitores",
ifelse(`Eleitores aptos` == 8886195,
"Acima de 200 mil eleitores",
NA))))))))
### Volatilidade eleitoral
vol_mun$`Eleitores aptos` <- cut(vol_mun$`Eleitores aptos`,
breaks = brks,
include.lowest = TRUE,
labels = labels)
vol_mun <- vol_mun %>%
mutate(`Eleitores aptos` = ifelse(`Eleitores aptos` == 5000,
"Até 5 mil eleitores",
ifelse(`Eleitores aptos` == 10000,
"De 5 a 10 mil eleitores",
ifelse(`Eleitores aptos` == 20000,
"De 10 a 20 mil eleitores",
ifelse(`Eleitores aptos` == 50000,
"De 20 a 50 mil eleitores",
ifelse(`Eleitores aptos` == 100000,
"De 50 a 100 mil eleitores",
ifelse(`Eleitores aptos` == 200000,
"De 100 a 200 mil eleitores",
ifelse(`Eleitores aptos` == 8886195,
"Acima de 200 mil eleitores",
NA))))))))
# 4. Padroniza os dados ---------------------------------------------------
### Fragmentacao legislativa
frag_leg_mun <- frag_leg_mun %>%
dplyr::select(`Ano da eleição`,
UF,
`Código do município`,
`Nome do município`,
Cargo,
`Vagas`,
`Votos válidos`,
`Sigla do partido`,
`Total de votos conquistados`,
`Total de cadeiras conquistadas`,
`Percentual de votos conquistados`,
`Percentual de cadeiras conquistadas`,
`Número efetivo de partidos eleitoral`,
`Número efetivo de partidos legislativo`,
Fracionalização,
`Fracionalização máxima`,
Fragmentação,
`Desproporcionalidade`,
`Eleitores aptos`) %>%
dplyr::rename("Cadeiras disponíveis" = "Vagas")
### Distribuicao de cadeiras
distcad_mun <- distcad_mun %>%
select(`Ano da eleição`,
UF,
`Código do município`,
`Nome do município`,
Cargo,
`Cadeiras oferecidas`,
`Votos válidos`,
`Sigla do partido`,
`Votos do partido`,
`Quociente eleitoral`,
`Quociente partidário`,
`Eleitores aptos`) %>%
rename("Cadeiras disponíveis" = "Cadeiras oferecidas")
### Renovacao parlamentar
renov_parl_mun <- renov_parl_mun %>%
select(`Ano da eleição`,
UF,
`Código do município`,
`Nome do município`,
Cargo,
`Cadeiras disponíveis`,
Reapresentação,
Reeleitos,
Conservação,
`Renovação bruta`,
`Renovação líquida`,
`Eleitores aptos`
)
### Alienacao
alien_mun <- alien_mun %>%
select(`Ano da eleição`,
UF,
`Código do município`,
`Nome do município`,
Cargo,
Turno,
`Quantidade de eleitores aptos`,
`Quantidade de abstenções`,
`Percentual de abstenções`,
`Quantidade de votos brancos`,
`Percentual de votos brancos`,
`Quantidade de votos nulos`,
`Percentual de votos nulos`,
`Alienação absoluta`,
`Alienação percentual`,
`Eleitores aptos`)
### Volatilidade eleitoral
vol_mun <- vol_mun %>%
select(`Ano da eleição`,
UF,
`Código do município`,
`Nome do município`,
Cargo,
`Volatilidade eleitoral`,
`Volatilidade parlamentar`,
`Eleitores aptos`)
# 5. Salva os arquivos ----------------------------------------------------
for (i in 1:length(ls(pattern = "_mun"))) {
saveRDS(get(ls(pattern = "_mun")[i]),paste0("data/output/",ls(pattern = "_mun")[i],'.rds'),
compress=TRUE)
}
|
/scripts/06_agregmunicipio.R
|
no_license
|
rebecacarvalho/cepespIndicadores
|
R
| false
| false
| 13,431
|
r
|
# Pacotes utilizados
library(cepespR)
library(tidyverse)
library(abjutils)
# Objetivos
#' - Agregar os dados em funcao do numero de habitantes e de eleitores aptos;
#' - Sao 7 faixas de agregacao em funcao do numero eleitores aptos:
#'
#' 1. Até 5 mil eleitores;
#' 2. De 5 a 10 mil eleitores;
#' 3. De 10 a 20 mil eleitores;
#' 4. De 20 a 50 mil eleitores;
#' 5. De 50 a 100 mil eleitores;
#' 6. De 100 a 200 mil eleitores;
#' 7. Acima de 200 mil eleitores;
#'
#' - Sao 7 faixas de agregacao em funcao do numero de habitantes:
#'
#' 1. Até 5 mil habitantes (23,39% dos municípios e 2,3% da população total);
#' 2. De 5 a 10 mil habitantes (21,79% dos municípios e 4,49% da população total);
#' 3. De 10 a 20 mil habitantes (25,15% dos municípios e 10,35% da população total);
#' 4. De 20 a 50 mil habitantes (18,74% dos municípios e 16,45% da população total);
#' 5. De 50 a 100 mil habitantes (5,82% dos municípios e 11,67% da população total);
#' 6. De 100 a 500 mil habitantes (4,40% dos municípios e 25,46% da população total);
#' 7. Acima de 500 mil habitantes (0,62% dos municípios e 29,28% da população total);
# 1. Data -----------------------------------------------------------------
## Faz o download do banco que contem o numero de eleitores aptos por ano, uf e municipio
vrcm <- get_elections(year = "2000,2004,2008,2012,2016",
position = "Vereador",
regional_aggregation = "Municipio",
political_aggregation = "Consolidado",
cached = TRUE)
## Carrega os indicadores ja calculados
files <- list.files(file.path(getwd(),"/data/output"),
pattern = "mun.rds")
for(i in files){
df <- readRDS(paste0("data/output/",i))
df <- df[,1:length(df)]
assign(paste(substr(i,1,nchar(i)-4)), df)
}
rm(df,i,files)
### Cria um banco com o numero de eleitores aptos por municipio
aptos <- vrcm %>%
select("ANO_ELEICAO",
"UF",
"COD_MUN_TSE",
"QTD_APTOS") %>%
rename("Ano da eleição" = "ANO_ELEICAO",
"Código do município" = "COD_MUN_TSE",
"Eleitores aptos" = "QTD_APTOS")
### Transforma a coluna ano da eleição em caracter
aptos$`Ano da eleição` <- as.character(aptos$`Ano da eleição`)
# 2. Join ------------------------------------------------------
# 2.1. Eleitores aptos ----------------------------------------------------
files <- ls(pattern = "_mun")
for(i in 1:length(ls(pattern = "_mun"))){
cat("Lendo", ls(pattern = "_mun")[i] , "\n")
df <- left_join(get(ls(pattern = "_mun")[i]), aptos)
df <- df[,1:length(df)]
assign(paste(substr(ls(pattern = "_mun")[i],1,nchar(ls(pattern = "_mun")[i]))), df)
}
# 3. Cria os intervalos ---------------------------------------------------
options(scipen = 999)
## Cria os intervalos de eleitores aptos
pretty_breaks <- c(0,5000,10000,20000,50000,100000,200000)
## Cria uma variavel com o maior numero de eleitores aptos
max <- max(frag_leg_mun$`Eleitores aptos`)
## Cria as quebras e as legendas
labels <- c()
brks <- c(pretty_breaks, max)
for(idx in 1:length(brks)){
labels <- c(labels,round(brks[idx + 1], 2))
}
labels <- labels[1:length(labels)-1]
## Cria uma variavel com os valores dos intervalos
### Fragmentacao legislativa
frag_leg_mun$`Eleitores aptos` <- cut(frag_leg_mun$`Eleitores aptos`,
breaks = brks,
include.lowest = TRUE,
labels = labels)
frag_leg_mun <- frag_leg_mun %>%
mutate(`Eleitores aptos` = ifelse(`Eleitores aptos` == 5000,
"Até 5 mil eleitores",
ifelse(`Eleitores aptos` == 10000,
"De 5 a 10 mil eleitores",
ifelse(`Eleitores aptos` == 20000,
"De 10 a 20 mil eleitores",
ifelse(`Eleitores aptos` == 50000,
"De 20 a 50 mil eleitores",
ifelse(`Eleitores aptos` == 100000,
"De 50 a 100 mil eleitores",
ifelse(`Eleitores aptos` == 200000,
"De 100 a 200 mil eleitores",
ifelse(`Eleitores aptos` == 8886195,
"Acima de 200 mil eleitores",
NA))))))))
### Distribuicao de cadeiras
distcad_mun$`Eleitores aptos` <- cut(distcad_mun$`Eleitores aptos`,
breaks = brks,
include.lowest = TRUE,
labels = labels)
distcad_mun <- distcad_mun %>%
mutate(`Eleitores aptos` = ifelse(`Eleitores aptos` == 5000,
"Até 5 mil eleitores",
ifelse(`Eleitores aptos` == 10000,
"De 5 a 10 mil eleitores",
ifelse(`Eleitores aptos` == 20000,
"De 10 a 20 mil eleitores",
ifelse(`Eleitores aptos` == 50000,
"De 20 a 50 mil eleitores",
ifelse(`Eleitores aptos` == 100000,
"De 50 a 100 mil eleitores",
ifelse(`Eleitores aptos` == 200000,
"De 100 a 200 mil eleitores",
ifelse(`Eleitores aptos` == 8886195,
"Acima de 200 mil eleitores",
NA))))))))
### Renovacao parlamentar
renov_parl_mun$`Eleitores aptos` <- cut(renov_parl_mun$`Eleitores aptos`,
breaks = brks,
include.lowest = TRUE,
labels = labels)
renov_parl_mun <- renov_parl_mun %>%
mutate(`Eleitores aptos` = ifelse(`Eleitores aptos` == 5000,
"Até 5 mil eleitores",
ifelse(`Eleitores aptos` == 10000,
"De 5 a 10 mil eleitores",
ifelse(`Eleitores aptos` == 20000,
"De 10 a 20 mil eleitores",
ifelse(`Eleitores aptos` == 50000,
"De 20 a 50 mil eleitores",
ifelse(`Eleitores aptos` == 100000,
"De 50 a 100 mil eleitores",
ifelse(`Eleitores aptos` == 200000,
"De 100 a 200 mil eleitores",
ifelse(`Eleitores aptos` == 8886195,
"Acima de 200 mil eleitores",
NA))))))))
### Alienacao
alien_mun$`Eleitores aptos` <- cut(alien_mun$`Eleitores aptos`,
breaks = brks,
include.lowest = TRUE,
labels = labels)
alien_mun <- alien_mun %>%
mutate(`Eleitores aptos` = ifelse(`Eleitores aptos` == 5000,
"Até 5 mil eleitores",
ifelse(`Eleitores aptos` == 10000,
"De 5 a 10 mil eleitores",
ifelse(`Eleitores aptos` == 20000,
"De 10 a 20 mil eleitores",
ifelse(`Eleitores aptos` == 50000,
"De 20 a 50 mil eleitores",
ifelse(`Eleitores aptos` == 100000,
"De 50 a 100 mil eleitores",
ifelse(`Eleitores aptos` == 200000,
"De 100 a 200 mil eleitores",
ifelse(`Eleitores aptos` == 8886195,
"Acima de 200 mil eleitores",
NA))))))))
### Volatilidade eleitoral
vol_mun$`Eleitores aptos` <- cut(vol_mun$`Eleitores aptos`,
breaks = brks,
include.lowest = TRUE,
labels = labels)
vol_mun <- vol_mun %>%
mutate(`Eleitores aptos` = ifelse(`Eleitores aptos` == 5000,
"Até 5 mil eleitores",
ifelse(`Eleitores aptos` == 10000,
"De 5 a 10 mil eleitores",
ifelse(`Eleitores aptos` == 20000,
"De 10 a 20 mil eleitores",
ifelse(`Eleitores aptos` == 50000,
"De 20 a 50 mil eleitores",
ifelse(`Eleitores aptos` == 100000,
"De 50 a 100 mil eleitores",
ifelse(`Eleitores aptos` == 200000,
"De 100 a 200 mil eleitores",
ifelse(`Eleitores aptos` == 8886195,
"Acima de 200 mil eleitores",
NA))))))))
# 4. Padroniza os dados ---------------------------------------------------
### Fragmentacao legislativa
frag_leg_mun <- frag_leg_mun %>%
dplyr::select(`Ano da eleição`,
UF,
`Código do município`,
`Nome do município`,
Cargo,
`Vagas`,
`Votos válidos`,
`Sigla do partido`,
`Total de votos conquistados`,
`Total de cadeiras conquistadas`,
`Percentual de votos conquistados`,
`Percentual de cadeiras conquistadas`,
`Número efetivo de partidos eleitoral`,
`Número efetivo de partidos legislativo`,
Fracionalização,
`Fracionalização máxima`,
Fragmentação,
`Desproporcionalidade`,
`Eleitores aptos`) %>%
dplyr::rename("Cadeiras disponíveis" = "Vagas")
### Distribuicao de cadeiras
distcad_mun <- distcad_mun %>%
select(`Ano da eleição`,
UF,
`Código do município`,
`Nome do município`,
Cargo,
`Cadeiras oferecidas`,
`Votos válidos`,
`Sigla do partido`,
`Votos do partido`,
`Quociente eleitoral`,
`Quociente partidário`,
`Eleitores aptos`) %>%
rename("Cadeiras disponíveis" = "Cadeiras oferecidas")
### Renovacao parlamentar
renov_parl_mun <- renov_parl_mun %>%
select(`Ano da eleição`,
UF,
`Código do município`,
`Nome do município`,
Cargo,
`Cadeiras disponíveis`,
Reapresentação,
Reeleitos,
Conservação,
`Renovação bruta`,
`Renovação líquida`,
`Eleitores aptos`
)
### Alienacao
alien_mun <- alien_mun %>%
select(`Ano da eleição`,
UF,
`Código do município`,
`Nome do município`,
Cargo,
Turno,
`Quantidade de eleitores aptos`,
`Quantidade de abstenções`,
`Percentual de abstenções`,
`Quantidade de votos brancos`,
`Percentual de votos brancos`,
`Quantidade de votos nulos`,
`Percentual de votos nulos`,
`Alienação absoluta`,
`Alienação percentual`,
`Eleitores aptos`)
### Volatilidade eleitoral
vol_mun <- vol_mun %>%
select(`Ano da eleição`,
UF,
`Código do município`,
`Nome do município`,
Cargo,
`Volatilidade eleitoral`,
`Volatilidade parlamentar`,
`Eleitores aptos`)
# 5. Salva os arquivos ----------------------------------------------------
for (i in 1:length(ls(pattern = "_mun"))) {
saveRDS(get(ls(pattern = "_mun")[i]),paste0("data/output/",ls(pattern = "_mun")[i],'.rds'),
compress=TRUE)
}
|
\name{create.commit}
\alias{create.commit}
\title{create a commit}
\usage{
create.commit(owner, repo, content,
ctx = get.github.context())
}
\arguments{
\item{owner}{the repo owner}
\item{repo}{the name of the repo}
\item{content}{the JSON object describing the content.
See}
\item{ctx}{the github context object}
}
\value{
the commit
}
\description{
create a commit
}
|
/man/create.commit.Rd
|
permissive
|
prateek05/rgithub
|
R
| false
| false
| 393
|
rd
|
\name{create.commit}
\alias{create.commit}
\title{create a commit}
\usage{
create.commit(owner, repo, content,
ctx = get.github.context())
}
\arguments{
\item{owner}{the repo owner}
\item{repo}{the name of the repo}
\item{content}{the JSON object describing the content.
See}
\item{ctx}{the github context object}
}
\value{
the commit
}
\description{
create a commit
}
|
#' @export
#' @importFrom zip zip
#' @title compress a folder
#' @description compress a folder to a target file. The
#' function returns the complete path to target file.
#' @param folder folder to compress
#' @param target path of the archive to create
pack_folder <- function( folder, target ){
target <- absolute_path(target)
dir_fi <- dirname(target)
if( !file.exists(dir_fi) ){
stop("directory ", shQuote(dir_fi), " does not exist.", call. = FALSE)
} else if( file.access(dir_fi) < 0 ){
stop("can not write to directory ", shQuote(dir_fi), call. = FALSE)
} else if( file.exists(target) && file.access(target) < 0 ){
stop(shQuote(target), " already exists and is not writable", call. = FALSE)
} else if( !file.exists(target) ){
old_warn <- getOption("warn")
options(warn = -1)
x <- tryCatch({cat("", file = target);TRUE}, error = function(e) FALSE, finally = unlink(target, force = TRUE) )
options(warn = old_warn)
if( !x )
stop(shQuote(target), " cannot be written, please check your permissions.", call. = FALSE)
}
curr_wd <- getwd()
setwd(folder)
if( .Platform$OS.type %in% "windows")
target <- enc2native(target)
tryCatch(
suppressMessages(
zip::zip(zipfile = target,
files = list.files(all.files = TRUE, recursive = TRUE)) )
, error = function(e) {
stop("Could not write ", shQuote(target), " [", e$message, "]")
},
# deprecated = function(e) {
# if( !grepl("zip::zipr", e))
# warning(e)
# },
finally = {
setwd(curr_wd)
})
target
}
#' @export
#' @importFrom zip unzip
#' @title Extract files from a zip file
#' @description Extract files from a zip file to a folder. The
#' function returns the complete path to destination folder.
#' @param file path of the archive to unzip
#' @param folder folder to create
unpack_folder <- function( file, folder ){
stopifnot(file.exists(file))
file_type <- gsub("(.*)(\\.[a-zA-Z0-0]+)$", "\\2", file)
# force deletion if already existing
unlink(folder, recursive = TRUE, force = TRUE)
if( l10n_info()$`UTF-8` ){
zip::unzip( zipfile = file, exdir = folder )
} else {
# unable to unzip a file with accent when on windows
newfile <- tempfile(fileext = file_type)
file.copy(from = file, to = newfile)
zip::unzip( zipfile = newfile, exdir = folder )
unlink(newfile, force = TRUE)
}
absolute_path(folder)
}
absolute_path <- function(x){
if (length(x) != 1L)
stop("'x' must be a single character string")
epath <- path.expand(x)
if( file.exists(epath)){
epath <- normalizePath(epath, "/", mustWork = TRUE)
} else {
if( !dir.exists(dirname(epath)) ){
stop("directory of ", x, " does not exist.", call. = FALSE)
}
cat("", file = epath)
epath <- normalizePath(epath, "/", mustWork = TRUE)
unlink(epath)
}
epath
}
|
/R/pack_folder.R
|
no_license
|
AshesITR/officer
|
R
| false
| false
| 2,885
|
r
|
#' @export
#' @importFrom zip zip
#' @title compress a folder
#' @description compress a folder to a target file. The
#' function returns the complete path to target file.
#' @param folder folder to compress
#' @param target path of the archive to create
pack_folder <- function( folder, target ){
target <- absolute_path(target)
dir_fi <- dirname(target)
if( !file.exists(dir_fi) ){
stop("directory ", shQuote(dir_fi), " does not exist.", call. = FALSE)
} else if( file.access(dir_fi) < 0 ){
stop("can not write to directory ", shQuote(dir_fi), call. = FALSE)
} else if( file.exists(target) && file.access(target) < 0 ){
stop(shQuote(target), " already exists and is not writable", call. = FALSE)
} else if( !file.exists(target) ){
old_warn <- getOption("warn")
options(warn = -1)
x <- tryCatch({cat("", file = target);TRUE}, error = function(e) FALSE, finally = unlink(target, force = TRUE) )
options(warn = old_warn)
if( !x )
stop(shQuote(target), " cannot be written, please check your permissions.", call. = FALSE)
}
curr_wd <- getwd()
setwd(folder)
if( .Platform$OS.type %in% "windows")
target <- enc2native(target)
tryCatch(
suppressMessages(
zip::zip(zipfile = target,
files = list.files(all.files = TRUE, recursive = TRUE)) )
, error = function(e) {
stop("Could not write ", shQuote(target), " [", e$message, "]")
},
# deprecated = function(e) {
# if( !grepl("zip::zipr", e))
# warning(e)
# },
finally = {
setwd(curr_wd)
})
target
}
#' @export
#' @importFrom zip unzip
#' @title Extract files from a zip file
#' @description Extract files from a zip file to a folder. The
#' function returns the complete path to destination folder.
#' @param file path of the archive to unzip
#' @param folder folder to create
unpack_folder <- function( file, folder ){
stopifnot(file.exists(file))
file_type <- gsub("(.*)(\\.[a-zA-Z0-0]+)$", "\\2", file)
# force deletion if already existing
unlink(folder, recursive = TRUE, force = TRUE)
if( l10n_info()$`UTF-8` ){
zip::unzip( zipfile = file, exdir = folder )
} else {
# unable to unzip a file with accent when on windows
newfile <- tempfile(fileext = file_type)
file.copy(from = file, to = newfile)
zip::unzip( zipfile = newfile, exdir = folder )
unlink(newfile, force = TRUE)
}
absolute_path(folder)
}
absolute_path <- function(x){
if (length(x) != 1L)
stop("'x' must be a single character string")
epath <- path.expand(x)
if( file.exists(epath)){
epath <- normalizePath(epath, "/", mustWork = TRUE)
} else {
if( !dir.exists(dirname(epath)) ){
stop("directory of ", x, " does not exist.", call. = FALSE)
}
cat("", file = epath)
epath <- normalizePath(epath, "/", mustWork = TRUE)
unlink(epath)
}
epath
}
|
ui <- dashboardPage(skin = "red",
dashboardHeader(title = "ICHEM"),
dashboardSidebar(
sidebarMenu(
br(),
br(),
br(),
# menuItem("MAX & MIN", tabName = "M_MAXMIN"),
# menuItem("RADAR", tabName = "M_RADAR"),
hr(),
"POR REGIÓN",br(),
menuItem("ÍNDICE", tabName = "M_INDEX_R"),
menuItem("DATOS", tabName = "M_DATA_R"),
hr(),
"TODO CHILE",br(),
menuItem("ÍNDICE", tabName = "M_INDEX_CHILE"),
menuItem("DATOS", tabName = "M_DATA_CHILE"),
hr()
)
),
dashboardBody(
# leafletOutput("mymap")
tabItems(
tabItem(tabName="M_INDEX_R", selectInput("INPUT_R", "Seleccione una región:", choices=unique(DATA_INDEX_RPC$NAME_REG)), parcoordsOutput("pc_R",height=700),style="height: 700px"),
tabItem(tabName="M_DATA_R",selectInput("INPUT_R2", "Seleccione una región:", choices=unique(DATA_INDEX_RPC$NAME_REG)),DTOutput('DATA_INDEX_R')),
tabItem(tabName="M_INDEX_CHILE", parcoordsOutput("pc",height=4000),style="height: 4000px;"),
tabItem(tabName="M_DATA_CHILE",DTOutput('DATA_INDEX_CHILE') )
)
)
)
|
/myapp/ui.R
|
no_license
|
David-Munoz-B/Shiny_1
|
R
| false
| false
| 1,675
|
r
|
ui <- dashboardPage(skin = "red",
dashboardHeader(title = "ICHEM"),
dashboardSidebar(
sidebarMenu(
br(),
br(),
br(),
# menuItem("MAX & MIN", tabName = "M_MAXMIN"),
# menuItem("RADAR", tabName = "M_RADAR"),
hr(),
"POR REGIÓN",br(),
menuItem("ÍNDICE", tabName = "M_INDEX_R"),
menuItem("DATOS", tabName = "M_DATA_R"),
hr(),
"TODO CHILE",br(),
menuItem("ÍNDICE", tabName = "M_INDEX_CHILE"),
menuItem("DATOS", tabName = "M_DATA_CHILE"),
hr()
)
),
dashboardBody(
# leafletOutput("mymap")
tabItems(
tabItem(tabName="M_INDEX_R", selectInput("INPUT_R", "Seleccione una región:", choices=unique(DATA_INDEX_RPC$NAME_REG)), parcoordsOutput("pc_R",height=700),style="height: 700px"),
tabItem(tabName="M_DATA_R",selectInput("INPUT_R2", "Seleccione una región:", choices=unique(DATA_INDEX_RPC$NAME_REG)),DTOutput('DATA_INDEX_R')),
tabItem(tabName="M_INDEX_CHILE", parcoordsOutput("pc",height=4000),style="height: 4000px;"),
tabItem(tabName="M_DATA_CHILE",DTOutput('DATA_INDEX_CHILE') )
)
)
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.