content
large_stringlengths
0
6.46M
path
large_stringlengths
3
331
license_type
large_stringclasses
2 values
repo_name
large_stringlengths
5
125
language
large_stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
4
6.46M
extension
large_stringclasses
75 values
text
stringlengths
0
6.46M
# evaluate mixture of MV gaussian densities at point x gaussian_mixture_log_density = function(x, lambda, mu, Sigma){ n_components = length(lambda) log_contribution = rep(NA, n_components) for(k in 1:n_components){ log_contribution[k] = log(lambda[[k]]) + dmvnorm(x, mu[[k]], Sigma[[k]], log=TRUE) } log_density = log(sum(exp(log_contribution))) return(log_density) } calculate_density_grid = function(xval, yval, beta){ values = outer(xval, yval) for(i in 1:length(xval)){ for(j in 1:length(yval)){ values[i, j] = beta * gaussian_mixture_log_density(c(xval[i], yval[j]), lambda, mu, Sigma) } } rownames(values) = xval colnames(values) = yval return(exp(values)) } MH_step = function(x0, loglik, logprior, beta){ proposal = x0 + rnorm(length(x0), 0, sqrt(1)) if(runif(1) < exp(beta * loglik(proposal) + logprior(proposal) - beta * loglik(x0) - logprior(x0))){ return(proposal) } else{ return(x0) } } MH_step_between_chains = function(x0, loglik, temperatures, create_plots = TRUE){ # pick an index j j = sample(1:length(temperatures), 1) # pick an adjacent one jj = ifelse(j == 1, j+1, ifelse(j == length(temperatures), j-1, sample(c(j-1, j+1), 1))) # propose to switch j and jj x1 = x0[[j]] x2 = x0[[jj]] beta1 = 1/temperatures[j] beta2 = 1/temperatures[jj] if(runif(1) < exp((beta1-beta2)*(loglik(x2)-loglik(x1)))){ x0[[j]] = x2 x0[[jj]] = x1 if(create_plots) { df1 = data.frame(do.call("rbind", x0[c(jj, j)]), temperature = temperatures[c(j, jj)]) print(p2 + geom_point(aes(X1, X2), data=df1, size=4, col="red")) df2 = data.frame(do.call("rbind", x0[c(j, jj)]), temperature = temperatures[c(j, jj)]) df3 = data.frame(x = df1$X1, y = df1$X2, xend = df2$X1, yend=df2$X2, temperature = temperatures[c(j, jj)]) print(p2 + geom_point(aes(X1, X2), data=df1, size=4, col="red") + geom_point(aes(X1, X2), data=df2, size=4, col="red") + geom_segment(aes(x, y, xend=xend, yend=yend), data=df3, col="red", linetype="dashed")) print(p2 + geom_point(aes(X1, X2), data=df2, size=4, col="red")) } } return(x0) }
/helpers_parallel_tempering.R
no_license
kasparmartens/crossovers
R
false
false
2,169
r
# evaluate mixture of MV gaussian densities at point x gaussian_mixture_log_density = function(x, lambda, mu, Sigma){ n_components = length(lambda) log_contribution = rep(NA, n_components) for(k in 1:n_components){ log_contribution[k] = log(lambda[[k]]) + dmvnorm(x, mu[[k]], Sigma[[k]], log=TRUE) } log_density = log(sum(exp(log_contribution))) return(log_density) } calculate_density_grid = function(xval, yval, beta){ values = outer(xval, yval) for(i in 1:length(xval)){ for(j in 1:length(yval)){ values[i, j] = beta * gaussian_mixture_log_density(c(xval[i], yval[j]), lambda, mu, Sigma) } } rownames(values) = xval colnames(values) = yval return(exp(values)) } MH_step = function(x0, loglik, logprior, beta){ proposal = x0 + rnorm(length(x0), 0, sqrt(1)) if(runif(1) < exp(beta * loglik(proposal) + logprior(proposal) - beta * loglik(x0) - logprior(x0))){ return(proposal) } else{ return(x0) } } MH_step_between_chains = function(x0, loglik, temperatures, create_plots = TRUE){ # pick an index j j = sample(1:length(temperatures), 1) # pick an adjacent one jj = ifelse(j == 1, j+1, ifelse(j == length(temperatures), j-1, sample(c(j-1, j+1), 1))) # propose to switch j and jj x1 = x0[[j]] x2 = x0[[jj]] beta1 = 1/temperatures[j] beta2 = 1/temperatures[jj] if(runif(1) < exp((beta1-beta2)*(loglik(x2)-loglik(x1)))){ x0[[j]] = x2 x0[[jj]] = x1 if(create_plots) { df1 = data.frame(do.call("rbind", x0[c(jj, j)]), temperature = temperatures[c(j, jj)]) print(p2 + geom_point(aes(X1, X2), data=df1, size=4, col="red")) df2 = data.frame(do.call("rbind", x0[c(j, jj)]), temperature = temperatures[c(j, jj)]) df3 = data.frame(x = df1$X1, y = df1$X2, xend = df2$X1, yend=df2$X2, temperature = temperatures[c(j, jj)]) print(p2 + geom_point(aes(X1, X2), data=df1, size=4, col="red") + geom_point(aes(X1, X2), data=df2, size=4, col="red") + geom_segment(aes(x, y, xend=xend, yend=yend), data=df3, col="red", linetype="dashed")) print(p2 + geom_point(aes(X1, X2), data=df2, size=4, col="red")) } } return(x0) }
library(MASS) data("Boston") library(caret) set.seed(1992) intrain<-createDataPartition(y=Boston$medv , p=0.7,list=FALSE) training <- Boston[ intrain , ] validation <- Boston[-intrain , ] library(party) model.RF <- cforest(medv ~ ., data = training, control = cforest_unbiased(ntree = 50)) model.RF pred.RF <- predict(model.RF, newdata = validation[,-14]) postResample(pred.RF , validation$medv) MAPE <- function(y, yhat) { mean(abs((y - yhat)/y)) } MAPE(validation$medv , pred.RF) RMSPE<- function(y, yhat) { sqrt(mean((y-yhat)/y)^2) } RMSPE(validation$medv , pred.RF)
/Model Ensembling/Random Forest/RF_Boston_cforest.R
no_license
prathmesh31/machine_learning_classwork
R
false
false
611
r
library(MASS) data("Boston") library(caret) set.seed(1992) intrain<-createDataPartition(y=Boston$medv , p=0.7,list=FALSE) training <- Boston[ intrain , ] validation <- Boston[-intrain , ] library(party) model.RF <- cforest(medv ~ ., data = training, control = cforest_unbiased(ntree = 50)) model.RF pred.RF <- predict(model.RF, newdata = validation[,-14]) postResample(pred.RF , validation$medv) MAPE <- function(y, yhat) { mean(abs((y - yhat)/y)) } MAPE(validation$medv , pred.RF) RMSPE<- function(y, yhat) { sqrt(mean((y-yhat)/y)^2) } RMSPE(validation$medv , pred.RF)
### ### Test with q=1 (each feature has one parameter) ### library(sglOptim) # warnings = errors options(warn=2) data(TestData) x <- test.data$x y <- test.data$y grp <- test.data$grp weights <- rep(1/nrow(x), nrow(x)) sampleGrouping <- factor(rep(1, nrow(x))) #Note covariateGrouping <- factor(1:ncol(x)) groupWeights <- c(sqrt(length(levels(sampleGrouping))*table(covariateGrouping))) parameterWeights <- matrix(1, nrow = length(levels(sampleGrouping)), ncol = ncol(x)) d <- 50L lambda.min <- 2 algorithm.config <- sgl.standard.config # To check dimension do #data <- create.sgldata(x, y, weights, sampleGrouping) #args <- prepare.args(data, covariateGrouping, groupWeights, parameterWeights, alpha = 0) #args$block.dim # Fit tests dense # create data data <- create.sgldata(x, y, weights, sampleGrouping) # alpha = 0 lambda <- sgl_lambda_sequence("sgl_test_dense", "sglOptim", data, covariateGrouping, groupWeights, parameterWeights, alpha = 0, d = d, lambda.min, algorithm.config) fit1a <- sgl_fit("sgl_test_dense", "sglOptim", data, covariateGrouping, groupWeights, parameterWeights, alpha = 0, lambda, return = 1:length(lambda), algorithm.config) # alpha = 0.5 lambda <- sgl_lambda_sequence("sgl_test_dense", "sglOptim", data, covariateGrouping, groupWeights, parameterWeights, alpha = 0.5, d = d, lambda.min, algorithm.config) fit1a <- sgl_fit("sgl_test_dense", "sglOptim", data, covariateGrouping, groupWeights, parameterWeights, alpha = 0.5, lambda, return = 1:length(lambda), algorithm.config) # alpha = 1 lambda <- sgl_lambda_sequence("sgl_test_dense", "sglOptim", data, covariateGrouping, groupWeights, parameterWeights, alpha = 1, d = d, lambda.min, algorithm.config) fit1a <- sgl_fit("sgl_test_dense", "sglOptim", data, covariateGrouping, groupWeights, parameterWeights, alpha = 1, lambda, return = 1:length(lambda), algorithm.config) # Predict test res1a <- sgl_predict("sgl_test_dense", "sglOptim", fit1a, data) # Fit tests sparse data <- create.sgldata(x, y, weights, sampleGrouping, sparseX = TRUE) fit1b <- sgl_fit("sgl_test_sparse", "sglOptim", data, covariateGrouping, groupWeights, parameterWeights, alpha = 0, lambda, return = 1:length(lambda), algorithm.config) fit1b <- sgl_fit("sgl_test_sparse", "sglOptim", data, covariateGrouping, groupWeights, parameterWeights, alpha = 0.5, lambda, return = 1:length(lambda), algorithm.config) fit1b <- sgl_fit("sgl_test_sparse", "sglOptim", data, covariateGrouping, groupWeights, parameterWeights, alpha = 1, lambda, return = 1:length(lambda), algorithm.config) # Predict test res1b <- sgl_predict("sgl_test_sparse", "sglOptim", fit1b, data) if(max(abs(fit1a$beta[[25]]-fit1b$beta[[25]])) > 1e-5) stop() if(max(abs(res1a$responses$link[[25]]-res1b$responses$link[[25]])) > 1e-5) stop()
/sglOptim/tests/B_fit_test_4.R
no_license
ingted/R-Examples
R
false
false
2,763
r
### ### Test with q=1 (each feature has one parameter) ### library(sglOptim) # warnings = errors options(warn=2) data(TestData) x <- test.data$x y <- test.data$y grp <- test.data$grp weights <- rep(1/nrow(x), nrow(x)) sampleGrouping <- factor(rep(1, nrow(x))) #Note covariateGrouping <- factor(1:ncol(x)) groupWeights <- c(sqrt(length(levels(sampleGrouping))*table(covariateGrouping))) parameterWeights <- matrix(1, nrow = length(levels(sampleGrouping)), ncol = ncol(x)) d <- 50L lambda.min <- 2 algorithm.config <- sgl.standard.config # To check dimension do #data <- create.sgldata(x, y, weights, sampleGrouping) #args <- prepare.args(data, covariateGrouping, groupWeights, parameterWeights, alpha = 0) #args$block.dim # Fit tests dense # create data data <- create.sgldata(x, y, weights, sampleGrouping) # alpha = 0 lambda <- sgl_lambda_sequence("sgl_test_dense", "sglOptim", data, covariateGrouping, groupWeights, parameterWeights, alpha = 0, d = d, lambda.min, algorithm.config) fit1a <- sgl_fit("sgl_test_dense", "sglOptim", data, covariateGrouping, groupWeights, parameterWeights, alpha = 0, lambda, return = 1:length(lambda), algorithm.config) # alpha = 0.5 lambda <- sgl_lambda_sequence("sgl_test_dense", "sglOptim", data, covariateGrouping, groupWeights, parameterWeights, alpha = 0.5, d = d, lambda.min, algorithm.config) fit1a <- sgl_fit("sgl_test_dense", "sglOptim", data, covariateGrouping, groupWeights, parameterWeights, alpha = 0.5, lambda, return = 1:length(lambda), algorithm.config) # alpha = 1 lambda <- sgl_lambda_sequence("sgl_test_dense", "sglOptim", data, covariateGrouping, groupWeights, parameterWeights, alpha = 1, d = d, lambda.min, algorithm.config) fit1a <- sgl_fit("sgl_test_dense", "sglOptim", data, covariateGrouping, groupWeights, parameterWeights, alpha = 1, lambda, return = 1:length(lambda), algorithm.config) # Predict test res1a <- sgl_predict("sgl_test_dense", "sglOptim", fit1a, data) # Fit tests sparse data <- create.sgldata(x, y, weights, sampleGrouping, sparseX = TRUE) fit1b <- sgl_fit("sgl_test_sparse", "sglOptim", data, covariateGrouping, groupWeights, parameterWeights, alpha = 0, lambda, return = 1:length(lambda), algorithm.config) fit1b <- sgl_fit("sgl_test_sparse", "sglOptim", data, covariateGrouping, groupWeights, parameterWeights, alpha = 0.5, lambda, return = 1:length(lambda), algorithm.config) fit1b <- sgl_fit("sgl_test_sparse", "sglOptim", data, covariateGrouping, groupWeights, parameterWeights, alpha = 1, lambda, return = 1:length(lambda), algorithm.config) # Predict test res1b <- sgl_predict("sgl_test_sparse", "sglOptim", fit1b, data) if(max(abs(fit1a$beta[[25]]-fit1b$beta[[25]])) > 1e-5) stop() if(max(abs(res1a$responses$link[[25]]-res1b$responses$link[[25]])) > 1e-5) stop()
#' Get list of all cbs thematic entries. #' #' Returns a list of all cbs themes. #' @param ... Use this to add a filter to the query e.g. `get_themes(ID=10)`. #' @param verbose Print extra messages what is happening. #' @param cache Should the result be cached? #' @param select `character` vector with names of wanted properties. default is all #' @param base_url optionally specify a different server. Useful for #' third party data services implementing the same protocol. #' @return A `data.frame` with various properties of SN/CBS themes. #' #' The filter is specified with `<column_name> = <values>` in which `<values>` is a character vector. #' Rows with values that are not part of the character vector are not returned. #' @export #' @examples #' \dontrun{ #' # get list of all themes #' cbs+get_themes() #' #' # get list of all dutch themes from the Catalog "CBS" #' cbs_get_themes(Language="nl", Catalog="CBS") #' } #' @importFrom whisker whisker.render cbs_get_themes <- function(..., select=NULL, verbose = TRUE, cache = FALSE , base_url = getOption("cbsodataR.base_url", BASE_URL)){ url <- whisker.render("{{BASEURL}}/{{CATALOG}}/Themes?$format=json" , list( BASEURL = base_url , CATALOG = CATALOG ) ) url <- paste0(url, get_query(..., select=select)) themes <- resolve_resource(url, "Retrieving themes from ", verbose = verbose, cache = cache) themes } #' Get a the list of tables connected to themes #' @export #' @param ... Use this to add a filter to the query e.g. `get_tables_themes(ID=10)`. #' @param select `character` vector with names of wanted properties. default is all #' @param verbose Print extra messages what is happening. #' @param cache Should the result be cached? #' @param base_url optionally specify a different server. Useful for #' third party data services implementing the same protocal. #' @return A `data.frame` with various properties of SN/CBS themes. cbs_get_tables_themes <- function(..., select=NULL, verbose = FALSE, cache = TRUE , base_url = getOption("cbsodataR.base_url", BASE_URL)){ url <- whisker.render("{{BASEURL}}/{{CATALOG}}/Tables_Themes?$format=json" , list( BASEURL = base_url , CATALOG = CATALOG ) ) url <- paste0(url, get_query(..., select=select)) table_themes <- resolve_resource(url, "Retrieving themes from ", cache = cache, verbose = verbose) table_themes } ## testing # library(dplyr) #themes <- get_themes()
/R/cbs_get_themes.R
no_license
edwindj/cbsodataR
R
false
false
2,656
r
#' Get list of all cbs thematic entries. #' #' Returns a list of all cbs themes. #' @param ... Use this to add a filter to the query e.g. `get_themes(ID=10)`. #' @param verbose Print extra messages what is happening. #' @param cache Should the result be cached? #' @param select `character` vector with names of wanted properties. default is all #' @param base_url optionally specify a different server. Useful for #' third party data services implementing the same protocol. #' @return A `data.frame` with various properties of SN/CBS themes. #' #' The filter is specified with `<column_name> = <values>` in which `<values>` is a character vector. #' Rows with values that are not part of the character vector are not returned. #' @export #' @examples #' \dontrun{ #' # get list of all themes #' cbs+get_themes() #' #' # get list of all dutch themes from the Catalog "CBS" #' cbs_get_themes(Language="nl", Catalog="CBS") #' } #' @importFrom whisker whisker.render cbs_get_themes <- function(..., select=NULL, verbose = TRUE, cache = FALSE , base_url = getOption("cbsodataR.base_url", BASE_URL)){ url <- whisker.render("{{BASEURL}}/{{CATALOG}}/Themes?$format=json" , list( BASEURL = base_url , CATALOG = CATALOG ) ) url <- paste0(url, get_query(..., select=select)) themes <- resolve_resource(url, "Retrieving themes from ", verbose = verbose, cache = cache) themes } #' Get a the list of tables connected to themes #' @export #' @param ... Use this to add a filter to the query e.g. `get_tables_themes(ID=10)`. #' @param select `character` vector with names of wanted properties. default is all #' @param verbose Print extra messages what is happening. #' @param cache Should the result be cached? #' @param base_url optionally specify a different server. Useful for #' third party data services implementing the same protocal. #' @return A `data.frame` with various properties of SN/CBS themes. cbs_get_tables_themes <- function(..., select=NULL, verbose = FALSE, cache = TRUE , base_url = getOption("cbsodataR.base_url", BASE_URL)){ url <- whisker.render("{{BASEURL}}/{{CATALOG}}/Tables_Themes?$format=json" , list( BASEURL = base_url , CATALOG = CATALOG ) ) url <- paste0(url, get_query(..., select=select)) table_themes <- resolve_resource(url, "Retrieving themes from ", cache = cache, verbose = verbose) table_themes } ## testing # library(dplyr) #themes <- get_themes()
#' Poolwise Logistic Regression with Normal Exposure Subject to Errors #' #' Assumes normal linear model for exposure given covariates, and additive #' normal processing errors and measurement errors acting on the poolwise mean #' exposure. Manuscript fully describing the approach is under review. #' #' #' @param g Numeric vector with pool sizes, i.e. number of members in each pool. #' @param y Numeric vector with poolwise Y values, coded 0 if all members are #' controls and 1 if all members are cases. #' @param xtilde Numeric vector (or list of numeric vectors, if some pools have #' replicates) with Xtilde values. #' @param c Numeric matrix with poolwise \strong{C} values (if any), with one #' row for each pool. Can be a vector if there is only 1 covariate. #' @param errors Character string specifying the errors that X is subject to. #' Choices are \code{"neither"}, \code{"processing"} for processing error #' only, \code{"measurement"} for measurement error only, and \code{"both"}. #' @param nondiff_pe Logical value for whether to assume the processing error #' variance is non-differential, i.e. the same in case pools and control pools. #' @param nondiff_me Logical value for whether to assume the measurement error #' variance is non-differential, i.e. the same in case pools and control pools. #' @param constant_pe Logical value for whether to assume the processing error #' variance is constant with pool size. If \code{FALSE}, assumption is that #' processing error variance increase with pool size such that, for example, the #' processing error affecting a pool 2x as large as another has 2x the variance. #' @param prev Numeric value specifying disease prevalence, allowing #' for valid estimation of the intercept with case-control sampling. Can specify #' \code{samp_y1y0} instead if sampling rates are known. #' @param samp_y1y0 Numeric vector of length 2 specifying sampling probabilities #' for cases and controls, allowing for valid estimation of the intercept with #' case-control sampling. Can specify \code{prev} instead if it's easier. #' @param approx_integral Logical value for whether to use the probit #' approximation for the logistic-normal integral, to avoid numerically #' integrating X's out of the likelihood function. #' @param estimate_var Logical value for whether to return variance-covariance #' matrix for parameter estimates. #' @param start_nonvar_var Numeric vector of length 2 specifying starting value #' for non-variance terms and variance terms, respectively. #' @param lower_nonvar_var Numeric vector of length 2 specifying lower bound for #' non-variance terms and variance terms, respectively. #' @param upper_nonvar_var Numeric vector of length 2 specifying upper bound for #' non-variance terms and variance terms, respectively. #' @param jitter_start Numeric value specifying standard deviation for mean-0 #' normal jitters to add to starting values for a second try at maximizing the #' log-likelihood, should the initial call to \code{\link[stats]{nlminb}} result #' in non-convergence. Set to \code{NULL} for no second try. #' @param hcubature_list List of arguments to pass to #' \code{\link[cubature]{hcubature}} for numerical integration. Only used if #' \code{approx_integral = FALSE}. #' @param nlminb_list List of arguments to pass to \code{\link[stats]{nlminb}} #' for log-likelihood maximization. #' @param hessian_list List of arguments to pass to #' \code{\link[numDeriv]{hessian}} for approximating the Hessian matrix. Only #' used if \code{estimate_var = TRUE}. #' @param nlminb_object Object returned from \code{\link[stats]{nlminb}} in a #' prior call. Useful for bypassing log-likelihood maximization if you just want #' to re-estimate the Hessian matrix with different options. #' #' #' @return #' List containing: #' \enumerate{ #' \item Numeric vector of parameter estimates. #' \item Variance-covariance matrix (if \code{estimate_var = TRUE}). #' \item Returned \code{\link[stats]{nlminb}} object from maximizing the #' log-likelihood function. #' \item Akaike information criterion (AIC). #' } #' #' #' @references #' Schisterman, E.F., Vexler, A., Mumford, S.L. and Perkins, N.J. (2010) "Hybrid #' pooled-unpooled design for cost-efficient measurement of biomarkers." #' \emph{Stat. Med.} \strong{29}(5): 597--613. #' #' Weinberg, C.R. and Umbach, D.M. (1999) "Using pooled exposure assessment to #' improve efficiency in case-control studies." \emph{Biometrics} \strong{55}: #' 718--726. #' #' Weinberg, C.R. and Umbach, D.M. (2014) "Correction to 'Using pooled exposure #' assessment to improve efficiency in case-control studies' by Clarice R. #' Weinberg and David M. Umbach; 55, 718--726, September 1999." #' \emph{Biometrics} \strong{70}: 1061. #' #' #' @examples #' # Load dataset containing (Y, Xtilde, C) values for pools of size 1, 2, and #' # 3. Xtilde values are affected by processing error. #' data(pdat1) #' #' # Estimate log-OR for X and Y adjusted for C, ignoring processing error #' fit1 <- p_logreg_xerrors( #' g = pdat1$g, #' y = pdat1$allcases, #' xtilde = pdat1$xtilde, #' c = pdat1$c, #' errors = "neither" #' ) #' fit1$theta.hat #' #' # Repeat, but accounting for processing error. Closer to true log-OR of 0.5. #' fit2 <- p_logreg_xerrors( #' g = pdat1$g, #' y = pdat1$allcases, #' xtilde = pdat1$xtilde, #' c = pdat1$c, #' errors = "processing" #' ) #' fit2$theta.hat #' #' #' @export p_logreg_xerrors <- function( g, y, xtilde, c = NULL, errors = "processing", nondiff_pe = TRUE, nondiff_me = TRUE, constant_pe = TRUE, prev = NULL, samp_y1y0 = NULL, approx_integral = TRUE, estimate_var = TRUE, start_nonvar_var = c(0.01, 1), lower_nonvar_var = c(-Inf, 1e-4), upper_nonvar_var = c(Inf, Inf), jitter_start = 0.01, hcubature_list = list(tol = 1e-8), nlminb_list = list(control = list(trace = 1, eval.max = 500, iter.max = 500)), hessian_list = list(method.args = list(r = 4)), nlminb_object = NULL ) { # Check that inputs are valid if (! errors %in% c("neither", "processing", "measurement", "both")) { stop("The input 'errors' should be set to 'neither', 'processing', 'measurement', or 'both'.") } if (! is.logical(nondiff_pe)) { stop("The input 'nondiff_pe' should be TRUE if you want to assume non-differential processing error and FALSE otherwise.") } if (! is.logical(nondiff_me)) { stop("The input 'nondiff_me' should be TRUE if you want to assume non-differential measurement error and FALSE otherwise.") } if (! is.logical(constant_pe)) { stop("The input 'constant_pe' should be TRUE if you want to assume that processing error variance is constant with pool size and FALSE otherwise.") } if (! is.null(prev)) { if (prev < 0 | prev > 1) { stop("The input 'prev' is the disease prevalence, and must be between 0 and 1.") } } if (! is.null(samp_y1y0)) { if (! (length(samp_y1y0) == 2 & min(samp_y1y0) > 0 & max(samp_y1y0) < 1)) { stop("The input 'samp_y1y0' is the sampling probabilities for cases and controls, and should be a numeric vector of two probabilities.") } } if (! is.logical(approx_integral)) { stop("The input 'approx_integral' should be TRUE or FALSE.") } if (! is.logical(estimate_var)) { stop("The input 'estimate_var' should be TRUE or FALSE.") } if (! (is.numeric(start_nonvar_var) & length(start_nonvar_var) == 2)) { stop("The input 'start_nonvar_var' should be a numeric vector of length 2.") } if (! (is.numeric(lower_nonvar_var) & length(lower_nonvar_var) == 2)) { stop("The input 'lower_nonvar_var' should be a numeric vector of length 2.") } if (! (is.numeric(upper_nonvar_var) & length(upper_nonvar_var) == 2)) { stop("The input 'upper_nonvar_var' should be a numeric vector of length 2.") } if (! is.null(jitter_start) & jitter_start <= 0) { stop("The input 'jitter_start' should be a non-negative value, if specified.") } # Get name of xtilde input x.varname <- deparse(substitute(xtilde)) if (length(grep("$", x.varname, fixed = TRUE)) > 0) { x.varname <- substr(x.varname, start = which(unlist(strsplit(x.varname, "")) == "$") + 1, stop = nchar(x.varname)) } # Get information about covariates C if (is.null(c)) { c.varnames <- NULL n.cvars <- 0 some.cs <- FALSE } else { c.varname <- deparse(substitute(c)) if (! is.matrix(c)) { c <- as.matrix(c) } n.cvars <- ncol(c) some.cs <- TRUE c.varnames <- colnames(c) if (is.null(c.varnames)) { if (n.cvars == 1) { if (length(grep("$", c.varname, fixed = TRUE)) > 0) { c.varname <- substr(c.varname, start = which(unlist(strsplit(c.varname, "")) == "$") + 1, stop = nchar(c.varname)) } c.varnames <- c.varname } else { c.varnames <- paste("c", 1: n.cvars, sep = "") } } } # Get number of betas and alphas n.betas <- 2 + n.cvars n.alphas <- 1 + n.cvars # Create indicator vector I(g > 1) Ig <- ifelse(g > 1, 1, 0) # Calculate offsets according to Weinberg and Umbach formula, incorporating # disease prevalence or sampling probabilities if known n <- length(y) locs.cases <- which(y == 1) n_1 <- sum(g[locs.cases]) n_0 <- sum(g[-locs.cases]) g.vals <- unique(g) qg <- rep(NA, n) if (! is.null(prev)) { for (jj in 1: length(g.vals)) { g.jj <- g.vals[jj] locs.g <- which(g == g.jj) n.casepools <- sum(g == g.jj & y == 1) n.controlpools <- sum(g == g.jj & y == 0) qg[locs.g] <- log(n.casepools / n.controlpools) - g.jj * log(prev / (1 - prev)) } } else if (! is.null(samp_y1y0)) { for (jj in 1: length(g.vals)) { g.jj <- g.vals[jj] locs.g <- which(g == g.jj) n.casepools <- sum(g == g.jj & y == 1) n.controlpools <- sum(g == g.jj & y == 0) qg[locs.g] <- log(n.casepools / n.controlpools) - g.jj * log(n_1 / n_0) - g.jj * log(samp_y1y0[2] / samp_y1y0[1]) } } else { for (jj in 1: length(g.vals)) { g.jj <- g.vals[jj] locs.g <- which(g == g.jj) n.casepools <- sum(g == g.jj & y == 1) n.controlpools <- sum(g == g.jj & y == 0) qg[locs.g] <- log(n.casepools / n.controlpools) - g.jj * log(n_1 / n_0) } } # Separate out pools with precisely measured X if (errors == "neither") { which.p <- 1: n } else if (errors == "processing") { which.p <- which(Ig == 0) } else { which.p <- NULL } n.p <- length(which.p) some.p <- n.p > 0 if (some.p) { g.p <- g[which.p] y.p <- y[which.p] x.p <- unlist(xtilde[which.p]) c.p <- c[which.p, , drop = FALSE] qg.p <- qg[which.p] gxc.p <- cbind(g.p, x.p, c.p) gc.p <- gxc.p[, -2, drop = FALSE] } # Separate out pools with replicates class.xtilde <- class(xtilde) if (class.xtilde == "list") { k <- sapply(xtilde, length) which.r <- which(k > 1) n.r <- length(which.r) some.r <- n.r > 0 if (some.r) { k.r <- k[which.r] g.r <- g[which.r] Ig.r <- Ig[which.r] y.r <- y[which.r] c.r <- c[which.r, , drop = FALSE] qg.r <- qg[which.r] gc.r <- cbind(g.r, c.r) xtilde.r <- xtilde[which.r] } } else { k <- rep(1, n) some.r <- FALSE } # Separate out pools with single Xtilde if (errors == "neither") { which.i <- NULL } else if (errors == "processing") { which.i <- which(Ig == 1 & k == 1) } else if (errors %in% c("measurement", "both")) { which.i <- which(k == 1) } n.i <- length(which.i) some.i <- n.i > 0 if (some.i) { g.i <- g[which.i] Ig.i <- Ig[which.i] y.i <- y[which.i] c.i <- c[which.i, , drop = FALSE] qg.i <- qg[which.i] gc.i <- cbind(g.i, c.i) xtilde.i <- unlist(xtilde[which.i]) } # Estimate (alpha, sigsq_x.c, sigsq_p, sigsq_m) if pseudo-lik... # Get indices for parameters being estimated and create labels loc.betas <- 1: n.betas beta.labels <- paste("beta", c("0", x.varname, c.varnames), sep = "_") loc.alphas <- (n.betas + 1): (n.betas + n.alphas) alpha.labels <- paste("alpha", c("0", c.varnames), sep = "_") loc.sigsq_x.c <- n.betas + n.alphas + 1 if (errors == "neither") { theta.labels <- c(beta.labels, alpha.labels, "sigsq_x.c") } else if (errors == "processing") { if (! nondiff_pe) { loc.sigsq_p1 <- loc.sigsq_x.c + 1 loc.sigsq_p0 <- loc.sigsq_x.c + 2 theta.labels <- c(beta.labels, alpha.labels, "sigsq_x.c", "sigsq_p1", "sigsq_p0") } else { loc.sigsq_p1 <- loc.sigsq_p0 <- loc.sigsq_x.c + 1 theta.labels <- c(beta.labels, alpha.labels, "sigsq_x.c", "sigsq_p") } } else if (errors == "measurement") { if (! nondiff_me) { loc.sigsq_m1 <- loc.sigsq_x.c + 1 loc.sigsq_m0 <- loc.sigsq_x.c + 2 theta.labels <- c(beta.labels, alpha.labels, "sigsq_x.c", "sigsq_m1", "sigsq_m0") } else { loc.sigsq_m1 <- loc.sigsq_m0 <- loc.sigsq_x.c + 1 theta.labels <- c(beta.labels, alpha.labels, "sigsq_x.c", "sigsq_m") } } else if (errors == "both") { if (! nondiff_pe & ! nondiff_me) { loc.sigsq_p1 <- loc.sigsq_x.c + 1 loc.sigsq_p0 <- loc.sigsq_x.c + 2 loc.sigsq_m1 <- loc.sigsq_x.c + 3 loc.sigsq_m0 <- loc.sigsq_x.c + 4 theta.labels <- c(beta.labels, alpha.labels, "sigsq_x.c", "sigsq_p1", "sigsq_p0", "sigsq_m1", "sigsq_m0") } else if (! nondiff_pe & nondiff_me) { loc.sigsq_p1 <- loc.sigsq_x.c + 1 loc.sigsq_p0 <- loc.sigsq_x.c + 2 loc.sigsq_m1 <- loc.sigsq_m0 <- loc.sigsq_x.c + 3 theta.labels <- c(beta.labels, alpha.labels, "sigsq_x.c", "sigsq_p1", "sigsq_p0", "sigsq_m") } else if (nondiff_pe & ! nondiff_me) { loc.sigsq_p1 <- loc.sigsq_p0 <- loc.sigsq_x.c + 1 loc.sigsq_m1 <- loc.sigsq_x.c + 2 loc.sigsq_m0 <- loc.sigsq_x.c + 3 theta.labels <- c(beta.labels, alpha.labels, "sigsq_x.c", "sigsq_p", "sigsq_m1", "sigsq_m0") } else if (nondiff_pe & nondiff_me) { loc.sigsq_p1 <- loc.sigsq_p0 <- loc.sigsq_x.c + 1 loc.sigsq_m1 <- loc.sigsq_m0 <- loc.sigsq_x.c + 2 theta.labels <- c(beta.labels, alpha.labels, "sigsq_x.c", "sigsq_p", "sigsq_m") } } # Log-likelihood function llf <- function(f.theta) { # Extract parameters f.betas <- matrix(f.theta[loc.betas], ncol = 1) f.beta_0 <- f.betas[1] f.beta_x <- f.betas[2] f.beta_c <- matrix(f.betas[-c(1: 2)], ncol = 1) f.alphas <- matrix(f.theta[loc.alphas], ncol = 1) f.alpha_0 <- f.alphas[1] f.alpha_c <- matrix(f.alphas[-1], ncol = 1) f.sigsq_x.c <- f.theta[loc.sigsq_x.c] if (errors == "neither") { f.sigsq_p1 <- f.sigsq_p0 <- f.sigsq_m1 <- f.sigsq_m0 <- 0 } if (errors %in% c("processing", "both")) { f.sigsq_p1 <- f.theta[loc.sigsq_p1] f.sigsq_p0 <- f.theta[loc.sigsq_p0] } else { f.sigsq_p1 <- f.sigsq_p0 <- 0 } if (errors %in% c("measurement", "both")) { f.sigsq_m1 <- f.theta[loc.sigsq_m1] f.sigsq_m0 <- f.theta[loc.sigsq_m0] } else { f.sigsq_m1 <- f.sigsq_m0 <- 0 } if (some.p) { # Likelihood for pools with precisely measured X: # L = f(Y|X,C) f(X|C) # P(Y|X,C) eta <- gxc.p %*% f.betas + qg.p p_y.xc <- (1 + exp(-eta))^(-1) # E(X|C) and V(X|C) mu_x.c <- gc.p %*% f.alphas sigsq_x.c <- g.p * f.sigsq_x.c # Log-likelihood ll.p <- sum(dbinom(x = y.p, size = 1, prob = p_y.xc, log = TRUE) + dnorm(x = x.p, mean = mu_x.c, sd = sqrt(sigsq_x.c), log = TRUE)) } else { ll.p <- 0 } # Set skip.rest flag to FALSE skip.rest <- FALSE if (some.r) { # Likelihood for pools with replicates # L = f(Y, Xtilde|C) # = [\int_X f(Y|X,C) f(X|Xtilde,C) dX] f(Xtilde|C) # = int_X f(Y|X,C) f(Xtilde|X) f(X|C) dX # Create error vectors sigsq_p <- ifelse(y.r, f.sigsq_p1, f.sigsq_p0) * Ig.r sigsq_m <- ifelse(y.r, f.sigsq_m1, f.sigsq_m0) # Calculate E(X|C) and V(X|C) mu_x.c <- gc.r %*% f.alphas sigsq_x.c <- g.r * f.sigsq_x.c if (approx_integral) { # Probit approximation for logistic-normal integral ll.vals <- c() for (ii in 1: length(xtilde.r)) { # Values for ith subject k_i <- k.r[ii] g_i <- g.r[ii] y_i <- y.r[ii] c_i <- c.r[ii, ] qg_i <- qg.r[ii] mu_x.c_i <- mu_x.c[ii] sigsq_x.c_i <- sigsq_x.c[ii] xtilde_i <- xtilde.r[[ii]] sigsq_p_i <- sigsq_p[ii] sigsq_m_i <- sigsq_m[ii] # E(X|Xtilde,C) and V(X|Xtilde,C) Mu_xxtilde.c <- matrix(mu_x.c_i, nrow = k_i + 1) Sigma_xxtilde.c_11 <- sigsq_x.c_i Sigma_xxtilde.c_12 <- matrix(sigsq_x.c_i, ncol = k_i) Sigma_xxtilde.c_21 <- t(Sigma_xxtilde.c_12) Sigma_xxtilde.c_22 <- g_i * f.sigsq_x.c + g_i^2 * ifelse(constant_pe, 1, g_i) * sigsq_p_i + diag(x = g_i^2 * sigsq_m_i, ncol = k_i, nrow = k_i) mu_x.xtildec <- Mu_xxtilde.c[1] + Sigma_xxtilde.c_12 %*% solve(Sigma_xxtilde.c_22) %*% (xtilde_i - Mu_xxtilde.c[-1]) sigsq_x.xtildec <- Sigma_xxtilde.c_11 - Sigma_xxtilde.c_12 %*% solve(Sigma_xxtilde.c_22) %*% Sigma_xxtilde.c_21 # Approximation of \int_X f(Y|X,C) f(X|Xtilde,C) dX if (some.cs) { t <- (g_i * f.beta_0 + f.beta_x * mu_x.xtildec + c_i %*% f.beta_c + qg_i) / sqrt(1 + sigsq_x.xtildec * f.beta_x^2 / 1.7^2) } else { t <- (g_i * f.beta_0 + f.beta_x * mu_x.xtildec + qg_i) / sqrt(1 + sigsq_x.xtildec * f.beta_x^2 / 1.7^2) } p <- exp(t) / (1 + exp(t)) part1 <- dbinom(x = y_i, size = 1, prob = p, log = TRUE) # log[f(Xtilde|C)] if (k_i == 2) { mu_xtilde1.xtilde2c <- mu_x.c_i + Sigma_xxtilde.c_22[1, 2] / Sigma_xxtilde.c_22[2, 2] * (xtilde_i[2] - mu_x.c_i) sigsq_xtilde1.xtilde2c <- Sigma_xxtilde.c_22[1, 1] - Sigma_xxtilde.c_22[1, 2]^2 / Sigma_xxtilde.c_22[2, 2] part2 <- sum(dnorm(x = xtilde_i, log = TRUE, mean = c(mu_xtilde1.xtilde2c, mu_x.c_i), sd = sqrt(c(sigsq_xtilde1.xtilde2c, Sigma_xxtilde.c_22[2, 2])))) } else { part2 <- dmvnorm(x = xtilde_i, log = TRUE, mean = Mu_xxtilde.c[-1], sigma = Sigma_xxtilde.c_22) } # Log-likelihood ll.vals[ii] <- part1 + part2 } ll.r <- sum(ll.vals) } else { # Full likelihood # Function for integrating out X's int.f_i1 <- function(k_i, g_i, y_i, x_i, gc_i, qg_i, mu_x.c_i, sigsq_x.c_i, xtilde_i, sigsq_p_i, sigsq_m_i) { x_i <- matrix(x_i, nrow = 1) f_yxtildex.c <- apply(x_i, 2, function(z) { # Transformation s_i <- z / (1 - z^2) # P(Y_i|X_i^*,C_i^*) p_y.xc <- (1 + exp(-as.numeric(gc_i %*% f.betas[-2, , drop = FALSE]) - s_i * f.beta_x - qg_i))^(-1) if (g_i == 1) { # f(Y,X,Xtilde|C) = f(Y|X,C) f(Xtilde1|Xtilde2,X) f(Xtilde2|X) f(X|C) dbinom(x = y_i, size = 1, prob = p_y.xc) * prod(dnorm(x = xtilde_i, mean = s_i, sd = sqrt(sigsq_m_i))) * dnorm(x = s_i, mean = mu_x.c_i, sd = sqrt(sigsq_x.c_i)) } else { # E(Xtilde|X) and V(Xtilde|X) Mu_xtilde.x <- rep(s_i, k_i) # Sigma_xtilde.x <- g_i^2 * sigsq_p_i + # diag(x = g_i^2 * sigsq_m_i, ncol = k_i, nrow = k_i) Sigma_xtilde.x <- g_i^2 * ifelse(constant_pe, 1, g_i) * sigsq_p_i + diag(x = g_i^2 * sigsq_m_i, ncol = k_i, nrow = k_i) # f(Y,X,Xtilde|C) = f(Y|X,C) f(Xtilde|X) f(X|C) dbinom(x = y_i, size = 1, prob = p_y.xc) * dmvnorm(x = xtilde_i, mean = Mu_xtilde.x, sigma = Sigma_xtilde.x) * dnorm(x = s_i, mean = mu_x.c_i, sd = sqrt(sigsq_x.c_i)) } }) # Back-transformation out <- matrix(f_yxtildex.c * (1 + x_i^2) / (1 - x_i^2)^2, ncol = ncol(x_i)) return(out) } int.vals <- c() for (ii in 1: length(xtilde.r)) { # Get values for ith participant k_i <- k.r[ii] g_i <- g.r[ii] y_i <- y.r[ii] gc_i <- gc.r[ii, ] qg_i <- qg.r[ii] mu_x.c_i <- mu_x.c[ii] sigsq_x.c_i <- sigsq_x.c[ii] xtilde_i <- xtilde.r[[ii]] sigsq_p_i <- sigsq_p[ii] sigsq_m_i <- sigsq_m[ii] # Try integrating out X with default settings int.ii <- do.call(hcubature, c(list(f = int.f_i1, lowerLimit = -1, upperLimit = 1, vectorInterface = TRUE, k_i = k_i, g_i = g_i, y_i = y_i, gc_i = gc_i, qg_i = qg_i, mu_x.c_i = mu_x.c_i, sigsq_x.c_i = sigsq_x.c_i, xtilde_i = xtilde_i, sigsq_p_i = sigsq_p_i, sigsq_m_i = sigsq_m_i), hcubature_list)) # If integral 0, find region with density if (is.na(int.ii$integral) | int.ii$integral == 0) { limits <- seq(-1 + 1e-5, 1 - 1e-5, 1e-5) fs <- int.f_i1(x_i = limits, k_i = k_i, g_i = g_i, y_i = y_i, gc_i = gc_i, qg_i = qg_i, mu_x.c_i = mu_x.c_i, sigsq_x.c_i = sigsq_x.c_i, xtilde_i = xtilde_i, sigsq_p_i = sigsq_p_i, sigsq_m_i = sigsq_m_i) limits <- limits[fs > 0] if (length(limits) > 0) { limits <- c(max(-1, min(limits) - 1e-5), min(1, max(limits) + 1e-5)) int.ii <- do.call(hcubature, c(list(f = int.f_i1, lowerLimit = limits[1], upperLimit = limits[2], vectorInterface = TRUE, k_i = k_i, g_i = g_i, y_i = y_i, gc_i = gc_i, qg_i = qg_i, mu_x.c_i = mu_x.c_i, sigsq_x.c_i = sigsq_x.c_i, xtilde_i = xtilde_i, sigsq_p_i = sigsq_p_i, sigsq_m_i = sigsq_m_i), hcubature_list)) } } int.vals[ii] <- int.ii$integral # If integral 0, set skip.rest to TRUE to skip further LL calculations if (is.na(int.ii$integral) | int.ii$integral == 0) { print(paste("Integral is ", int.ii$integral, " for ii = ", ii, sep = "")) print(f.theta) skip.rest <- TRUE break } } ll.r <- sum(log(int.vals)) } } else { ll.r <- 0 } if (some.i & ! skip.rest) { # Likelihood for pools with single Xtilde: # L = f(Y,Xtilde|C) = [\int_X f(Y|X,C) f(X|Xtilde,C) dX] f(Xtilde|C) # = \int_X f(Y|X,C) f(Xtilde|X) f(X|C) dX # Create error vectors sigsq_p <- ifelse(y.i, f.sigsq_p1, f.sigsq_p0) * Ig.i sigsq_m <- ifelse(y.i, f.sigsq_m1, f.sigsq_m0) # Calculate E(X|C) and V(X|C) mu_x.c <- gc.i %*% f.alphas sigsq_x.c <- g.i * f.sigsq_x.c if (approx_integral) { # Probit approximation for logistic-normal integral # E(X,Xtilde|C) and V(X,Xtilde|C) Mu_xxtilde.c_1 <- mu_x.c Mu_xxtilde.c_2 <- mu_x.c Sigma_xxtilde.c_11 <- sigsq_x.c Sigma_xxtilde.c_12 <- sigsq_x.c if (constant_pe) { Sigma_xxtilde.c_22 <- g.i * f.sigsq_x.c + g.i^2 * sigsq_p + g.i^2 * sigsq_m } else { Sigma_xxtilde.c_22 <- g.i * f.sigsq_x.c + g.i^2 * g.i * sigsq_p + g.i^2 * sigsq_m } # E(X|Xtilde,C) and V(X|Xtilde,C) mu_x.xtildec <- Mu_xxtilde.c_1 + Sigma_xxtilde.c_12 / Sigma_xxtilde.c_22 * (xtilde.i - Mu_xxtilde.c_2) sigsq_x.xtildec <- Sigma_xxtilde.c_11 - Sigma_xxtilde.c_12^2 / Sigma_xxtilde.c_22 # Approximation of \int_x f(Y|X,C) f(X|Xtilde,C) dx if (some.cs) { t <- as.numeric(g.i * f.beta_0 + f.beta_x * mu_x.xtildec + c.i %*% f.beta_c + qg.i) / sqrt(1 + sigsq_x.xtildec * f.beta_x^2 / 1.7^2) } else { t <- (g.i * f.beta_0 + f.beta_x * mu_x.xtildec + qg.i) / sqrt(1 + sigsq_x.xtildec * f.beta_x^2 / 1.7^2) } p <- exp(t) / (1 + exp(t)) part1 <- dbinom(x = y.i, size = 1, prob = p, log = TRUE) # log[f(Xtilde|C)] part2 <- dnorm(x = xtilde.i, log = TRUE, mean = Mu_xxtilde.c_2, sd = sqrt(Sigma_xxtilde.c_22)) # Log-likelihood ll.vals <- part1 + part2 ll.i <- sum(ll.vals) } else { # Full likelihood # Function for integrating out X int.f_i2 <- function(g_i, y_i, x_i, gc_i, qg_i, mu_x.c_i, sigsq_x.c_i, xtilde_i, sigsq_p_i, sigsq_m_i) { # Transformation s_i <- x_i / (1 - x_i^2) # P(Y|X,C) p_y.xc <- (1 + exp(as.numeric(-gc_i %*% f.betas[-2, , drop = FALSE]) - s_i * f.beta_x - qg_i))^(-1) # E(Xtilde|X) and V(Xtilde|X) mu_xtilde.x <- s_i sigsq_xtilde.x <- g_i^2 * sigsq_p_i + g_i^2 * sigsq_m_i # f(Y,X,Xtilde|C) = f(Y|X,C) f(Xtilde|X) f(X|C) f_yx.xtildec <- dbinom(x = y_i, size = 1, prob = p_y.xc) * dnorm(x = xtilde_i, mean = mu_xtilde.x, sd = sqrt(sigsq_xtilde.x)) * dnorm(x = s_i, mean = mu_x.c_i, sd = sqrt(sigsq_x.c_i)) # Back-transformation out <- f_yx.xtildec * (1 + x_i^2) / (1 - x_i^2)^2 return(out) } int.vals <- c() for (ii in 1: length(xtilde.i)) { # Get values for ith participant g_i <- g.i[ii] y_i <- y.i[ii] gc_i <- gc.i[ii, ] qg_i <- qg.i[ii] mu_x.c_i <- mu_x.c[ii] sigsq_x.c_i <- sigsq_x.c[ii] xtilde_i <- xtilde.i[ii] sigsq_p_i <- sigsq_p[ii] sigsq_m_i <- sigsq_m[ii] # Try integrating out X_i with default settings int.ii <- do.call(hcubature, c(list(f = int.f_i2, lowerLimit = -1, upperLimit = 1, vectorInterface = TRUE, g_i = g_i, y_i = y_i, gc_i = gc_i, qg_i = qg_i, mu_x.c_i = mu_x.c_i, sigsq_x.c_i = sigsq_x.c_i, xtilde_i = xtilde_i, sigsq_p_i = sigsq_p_i, sigsq_m_i = sigsq_m_i), hcubature_list)) # If integral 0, find region with density if (is.na(int.ii$integral) | int.ii$integral == 0) { limits <- seq(-1 + 1e-5, 1 - 1e-5, 1e-5) fs <- int.f_i2(x_i = limits, g_i = g_i, y_i = y_i, gc_i = gc_i, qg_i = qg_i, mu_x.c_i = mu_x.c_i, sigsq_x.c_i = sigsq_x.c_i, xtilde_i = xtilde_i, sigsq_p_i = sigsq_p_i, sigsq_m_i = sigsq_m_i) limits <- limits[fs > 0] if (length(limits) > 0) { limits <- c(max(-1, min(limits) - 1e-5), min(1, max(limits) + 1e-5)) int.ii <- do.call(hcubature, c(list(f = int.f_i2, lowerLimit = -1, upperLimit = 1, vectorInterface = TRUE, g_i = g_i, y_i = y_i, gc_i = gc_i, qg_i = qg_i, mu_x.c_i = mu_x.c_i, sigsq_x.c_i = sigsq_x.c_i, xtilde_i = xtilde_i, sigsq_p_i = sigsq_p_i, sigsq_m_i = sigsq_m_i), hcubature_list)) } } int.vals[ii] <- int.ii$integral # If integral 0, set skip.rest to TRUE to skip further LL calculations if (is.na(int.ii$integral) | int.ii$integral == 0) { print(paste("Integral is ", int.ii$integral, " for ii = ", ii, sep = "")) print(f.theta) skip.rest <- TRUE break } } ll.i <- sum(log(int.vals)) } } else { ll.i <- 0 } # Return negative log-likelihood ll <- ll.p + ll.r + ll.i return(-ll) } # Starting values if (is.null(nlminb_list$start)) { if (errors == "neither") { nlminb_list$start <- c(rep(start_nonvar_var[1], n.betas + n.alphas), start_nonvar_var[2]) } else if (errors == "processing") { nlminb_list$start <- c(rep(start_nonvar_var[1], n.betas + n.alphas), rep(start_nonvar_var[2], loc.sigsq_p0 - loc.sigsq_x.c + 1)) } else if (errors %in% c("measurement", "both")) { nlminb_list$start <- c(rep(start_nonvar_var[1], n.betas + n.alphas), rep(start_nonvar_var[2], loc.sigsq_m0 - loc.sigsq_x.c + 1)) } } names(nlminb_list$start) <- theta.labels # Lower bounds if (is.null(nlminb_list$lower)) { if (errors == "neither") { nlminb_list$lower <- c(rep(lower_nonvar_var[1], n.betas + n.alphas), lower_nonvar_var[2]) } else if (errors == "processing") { nlminb_list$lower <- c(rep(lower_nonvar_var[1], n.betas + n.alphas), rep(lower_nonvar_var[2], loc.sigsq_p0 - loc.sigsq_x.c + 1)) } else if (errors %in% c("measurement", "both")) { nlminb_list$lower <- c(rep(lower_nonvar_var[1], n.betas + n.alphas), rep(lower_nonvar_var[2], loc.sigsq_m0 - loc.sigsq_x.c + 1)) } } # Upper bounds if (is.null(nlminb_list$upper)) { if (errors == "neither") { nlminb_list$upper <- c(rep(upper_nonvar_var[1], n.betas + n.alphas), upper_nonvar_var[2]) } else if (errors == "processing") { nlminb_list$upper <- c(rep(upper_nonvar_var[1], n.betas + n.alphas), rep(upper_nonvar_var[2], loc.sigsq_p0 - loc.sigsq_x.c + 1)) } else if (errors %in% c("measurement", "both")) { nlminb_list$upper <- c(rep(upper_nonvar_var[1], n.betas + n.alphas), rep(upper_nonvar_var[2], loc.sigsq_m0 - loc.sigsq_x.c + 1)) } } if (is.null(nlminb_object)) { # Obtain ML estimates ml.max <- do.call(nlminb, c(list(objective = llf), nlminb_list)) # If non-convergence, try with jittered starting values if requested if (ml.max$convergence == 1) { if (! is.null(jitter_start)) { message("Trying jittered starting values...") nlminb_list$start <- nlminb_list$start + rnorm(n = length(nlminb_list$start), sd = jitter_start) ml.max2 <- do.call(nlminb, c(list(objective = llf), nlminb_list)) if (ml.max2$objective < ml.max$objective) ml.max <- ml.max2 } if (ml.max$convergence == 1) { message("Object returned by 'nlminb' function indicates non-convergence. You may want to try different starting values.") } } } else { ml.max <- nlminb_object } # Create list to return theta.hat <- ml.max$par names(theta.hat) <- theta.labels ret.list <- list(theta.hat = theta.hat) # If requested, add variance-covariance matrix to ret.list if (estimate_var) { # Estimate Hessian hessian.mat <- do.call(numDeriv::hessian, c(list(func = llf, x = theta.hat), hessian_list)) # Estimate variance-covariance matrix theta.variance <- try(solve(hessian.mat), silent = TRUE) if (class(theta.variance)[1] == "try-error" | sum(is.na(hessian.mat)) > 0) { print(hessian.mat) message("The estimated Hessian matrix (printed here) is singular, so variance-covariance matrix could not be obtained. You could try tweaking 'start_nonvar_var' or 'hessian_list' (e.g. increase 'r')") ret.list$theta.var <- NULL } else { colnames(theta.variance) <- rownames(theta.variance) <- theta.labels ret.list$theta.var <- theta.variance if (sum(diag(theta.variance) <= 0) > 0) { print(theta.variance) message("The estimated variance-covariance matrix (printed here) has some non-positive diagonal elements, so it may not be reliable. You could try tweaking 'start_nonvar_var' or 'hessian_list' (e.g. increase 'r')") } } } # Add nlminb object and AIC to ret.list ret.list$nlminb.object <- ml.max ret.list$aic <- 2 * (length(theta.hat) + ml.max$objective) # Return ret.list return(ret.list) }
/R/p_logreg_xerrors.R
no_license
vandomed/pooling
R
false
false
35,189
r
#' Poolwise Logistic Regression with Normal Exposure Subject to Errors #' #' Assumes normal linear model for exposure given covariates, and additive #' normal processing errors and measurement errors acting on the poolwise mean #' exposure. Manuscript fully describing the approach is under review. #' #' #' @param g Numeric vector with pool sizes, i.e. number of members in each pool. #' @param y Numeric vector with poolwise Y values, coded 0 if all members are #' controls and 1 if all members are cases. #' @param xtilde Numeric vector (or list of numeric vectors, if some pools have #' replicates) with Xtilde values. #' @param c Numeric matrix with poolwise \strong{C} values (if any), with one #' row for each pool. Can be a vector if there is only 1 covariate. #' @param errors Character string specifying the errors that X is subject to. #' Choices are \code{"neither"}, \code{"processing"} for processing error #' only, \code{"measurement"} for measurement error only, and \code{"both"}. #' @param nondiff_pe Logical value for whether to assume the processing error #' variance is non-differential, i.e. the same in case pools and control pools. #' @param nondiff_me Logical value for whether to assume the measurement error #' variance is non-differential, i.e. the same in case pools and control pools. #' @param constant_pe Logical value for whether to assume the processing error #' variance is constant with pool size. If \code{FALSE}, assumption is that #' processing error variance increase with pool size such that, for example, the #' processing error affecting a pool 2x as large as another has 2x the variance. #' @param prev Numeric value specifying disease prevalence, allowing #' for valid estimation of the intercept with case-control sampling. Can specify #' \code{samp_y1y0} instead if sampling rates are known. #' @param samp_y1y0 Numeric vector of length 2 specifying sampling probabilities #' for cases and controls, allowing for valid estimation of the intercept with #' case-control sampling. Can specify \code{prev} instead if it's easier. #' @param approx_integral Logical value for whether to use the probit #' approximation for the logistic-normal integral, to avoid numerically #' integrating X's out of the likelihood function. #' @param estimate_var Logical value for whether to return variance-covariance #' matrix for parameter estimates. #' @param start_nonvar_var Numeric vector of length 2 specifying starting value #' for non-variance terms and variance terms, respectively. #' @param lower_nonvar_var Numeric vector of length 2 specifying lower bound for #' non-variance terms and variance terms, respectively. #' @param upper_nonvar_var Numeric vector of length 2 specifying upper bound for #' non-variance terms and variance terms, respectively. #' @param jitter_start Numeric value specifying standard deviation for mean-0 #' normal jitters to add to starting values for a second try at maximizing the #' log-likelihood, should the initial call to \code{\link[stats]{nlminb}} result #' in non-convergence. Set to \code{NULL} for no second try. #' @param hcubature_list List of arguments to pass to #' \code{\link[cubature]{hcubature}} for numerical integration. Only used if #' \code{approx_integral = FALSE}. #' @param nlminb_list List of arguments to pass to \code{\link[stats]{nlminb}} #' for log-likelihood maximization. #' @param hessian_list List of arguments to pass to #' \code{\link[numDeriv]{hessian}} for approximating the Hessian matrix. Only #' used if \code{estimate_var = TRUE}. #' @param nlminb_object Object returned from \code{\link[stats]{nlminb}} in a #' prior call. Useful for bypassing log-likelihood maximization if you just want #' to re-estimate the Hessian matrix with different options. #' #' #' @return #' List containing: #' \enumerate{ #' \item Numeric vector of parameter estimates. #' \item Variance-covariance matrix (if \code{estimate_var = TRUE}). #' \item Returned \code{\link[stats]{nlminb}} object from maximizing the #' log-likelihood function. #' \item Akaike information criterion (AIC). #' } #' #' #' @references #' Schisterman, E.F., Vexler, A., Mumford, S.L. and Perkins, N.J. (2010) "Hybrid #' pooled-unpooled design for cost-efficient measurement of biomarkers." #' \emph{Stat. Med.} \strong{29}(5): 597--613. #' #' Weinberg, C.R. and Umbach, D.M. (1999) "Using pooled exposure assessment to #' improve efficiency in case-control studies." \emph{Biometrics} \strong{55}: #' 718--726. #' #' Weinberg, C.R. and Umbach, D.M. (2014) "Correction to 'Using pooled exposure #' assessment to improve efficiency in case-control studies' by Clarice R. #' Weinberg and David M. Umbach; 55, 718--726, September 1999." #' \emph{Biometrics} \strong{70}: 1061. #' #' #' @examples #' # Load dataset containing (Y, Xtilde, C) values for pools of size 1, 2, and #' # 3. Xtilde values are affected by processing error. #' data(pdat1) #' #' # Estimate log-OR for X and Y adjusted for C, ignoring processing error #' fit1 <- p_logreg_xerrors( #' g = pdat1$g, #' y = pdat1$allcases, #' xtilde = pdat1$xtilde, #' c = pdat1$c, #' errors = "neither" #' ) #' fit1$theta.hat #' #' # Repeat, but accounting for processing error. Closer to true log-OR of 0.5. #' fit2 <- p_logreg_xerrors( #' g = pdat1$g, #' y = pdat1$allcases, #' xtilde = pdat1$xtilde, #' c = pdat1$c, #' errors = "processing" #' ) #' fit2$theta.hat #' #' #' @export p_logreg_xerrors <- function( g, y, xtilde, c = NULL, errors = "processing", nondiff_pe = TRUE, nondiff_me = TRUE, constant_pe = TRUE, prev = NULL, samp_y1y0 = NULL, approx_integral = TRUE, estimate_var = TRUE, start_nonvar_var = c(0.01, 1), lower_nonvar_var = c(-Inf, 1e-4), upper_nonvar_var = c(Inf, Inf), jitter_start = 0.01, hcubature_list = list(tol = 1e-8), nlminb_list = list(control = list(trace = 1, eval.max = 500, iter.max = 500)), hessian_list = list(method.args = list(r = 4)), nlminb_object = NULL ) { # Check that inputs are valid if (! errors %in% c("neither", "processing", "measurement", "both")) { stop("The input 'errors' should be set to 'neither', 'processing', 'measurement', or 'both'.") } if (! is.logical(nondiff_pe)) { stop("The input 'nondiff_pe' should be TRUE if you want to assume non-differential processing error and FALSE otherwise.") } if (! is.logical(nondiff_me)) { stop("The input 'nondiff_me' should be TRUE if you want to assume non-differential measurement error and FALSE otherwise.") } if (! is.logical(constant_pe)) { stop("The input 'constant_pe' should be TRUE if you want to assume that processing error variance is constant with pool size and FALSE otherwise.") } if (! is.null(prev)) { if (prev < 0 | prev > 1) { stop("The input 'prev' is the disease prevalence, and must be between 0 and 1.") } } if (! is.null(samp_y1y0)) { if (! (length(samp_y1y0) == 2 & min(samp_y1y0) > 0 & max(samp_y1y0) < 1)) { stop("The input 'samp_y1y0' is the sampling probabilities for cases and controls, and should be a numeric vector of two probabilities.") } } if (! is.logical(approx_integral)) { stop("The input 'approx_integral' should be TRUE or FALSE.") } if (! is.logical(estimate_var)) { stop("The input 'estimate_var' should be TRUE or FALSE.") } if (! (is.numeric(start_nonvar_var) & length(start_nonvar_var) == 2)) { stop("The input 'start_nonvar_var' should be a numeric vector of length 2.") } if (! (is.numeric(lower_nonvar_var) & length(lower_nonvar_var) == 2)) { stop("The input 'lower_nonvar_var' should be a numeric vector of length 2.") } if (! (is.numeric(upper_nonvar_var) & length(upper_nonvar_var) == 2)) { stop("The input 'upper_nonvar_var' should be a numeric vector of length 2.") } if (! is.null(jitter_start) & jitter_start <= 0) { stop("The input 'jitter_start' should be a non-negative value, if specified.") } # Get name of xtilde input x.varname <- deparse(substitute(xtilde)) if (length(grep("$", x.varname, fixed = TRUE)) > 0) { x.varname <- substr(x.varname, start = which(unlist(strsplit(x.varname, "")) == "$") + 1, stop = nchar(x.varname)) } # Get information about covariates C if (is.null(c)) { c.varnames <- NULL n.cvars <- 0 some.cs <- FALSE } else { c.varname <- deparse(substitute(c)) if (! is.matrix(c)) { c <- as.matrix(c) } n.cvars <- ncol(c) some.cs <- TRUE c.varnames <- colnames(c) if (is.null(c.varnames)) { if (n.cvars == 1) { if (length(grep("$", c.varname, fixed = TRUE)) > 0) { c.varname <- substr(c.varname, start = which(unlist(strsplit(c.varname, "")) == "$") + 1, stop = nchar(c.varname)) } c.varnames <- c.varname } else { c.varnames <- paste("c", 1: n.cvars, sep = "") } } } # Get number of betas and alphas n.betas <- 2 + n.cvars n.alphas <- 1 + n.cvars # Create indicator vector I(g > 1) Ig <- ifelse(g > 1, 1, 0) # Calculate offsets according to Weinberg and Umbach formula, incorporating # disease prevalence or sampling probabilities if known n <- length(y) locs.cases <- which(y == 1) n_1 <- sum(g[locs.cases]) n_0 <- sum(g[-locs.cases]) g.vals <- unique(g) qg <- rep(NA, n) if (! is.null(prev)) { for (jj in 1: length(g.vals)) { g.jj <- g.vals[jj] locs.g <- which(g == g.jj) n.casepools <- sum(g == g.jj & y == 1) n.controlpools <- sum(g == g.jj & y == 0) qg[locs.g] <- log(n.casepools / n.controlpools) - g.jj * log(prev / (1 - prev)) } } else if (! is.null(samp_y1y0)) { for (jj in 1: length(g.vals)) { g.jj <- g.vals[jj] locs.g <- which(g == g.jj) n.casepools <- sum(g == g.jj & y == 1) n.controlpools <- sum(g == g.jj & y == 0) qg[locs.g] <- log(n.casepools / n.controlpools) - g.jj * log(n_1 / n_0) - g.jj * log(samp_y1y0[2] / samp_y1y0[1]) } } else { for (jj in 1: length(g.vals)) { g.jj <- g.vals[jj] locs.g <- which(g == g.jj) n.casepools <- sum(g == g.jj & y == 1) n.controlpools <- sum(g == g.jj & y == 0) qg[locs.g] <- log(n.casepools / n.controlpools) - g.jj * log(n_1 / n_0) } } # Separate out pools with precisely measured X if (errors == "neither") { which.p <- 1: n } else if (errors == "processing") { which.p <- which(Ig == 0) } else { which.p <- NULL } n.p <- length(which.p) some.p <- n.p > 0 if (some.p) { g.p <- g[which.p] y.p <- y[which.p] x.p <- unlist(xtilde[which.p]) c.p <- c[which.p, , drop = FALSE] qg.p <- qg[which.p] gxc.p <- cbind(g.p, x.p, c.p) gc.p <- gxc.p[, -2, drop = FALSE] } # Separate out pools with replicates class.xtilde <- class(xtilde) if (class.xtilde == "list") { k <- sapply(xtilde, length) which.r <- which(k > 1) n.r <- length(which.r) some.r <- n.r > 0 if (some.r) { k.r <- k[which.r] g.r <- g[which.r] Ig.r <- Ig[which.r] y.r <- y[which.r] c.r <- c[which.r, , drop = FALSE] qg.r <- qg[which.r] gc.r <- cbind(g.r, c.r) xtilde.r <- xtilde[which.r] } } else { k <- rep(1, n) some.r <- FALSE } # Separate out pools with single Xtilde if (errors == "neither") { which.i <- NULL } else if (errors == "processing") { which.i <- which(Ig == 1 & k == 1) } else if (errors %in% c("measurement", "both")) { which.i <- which(k == 1) } n.i <- length(which.i) some.i <- n.i > 0 if (some.i) { g.i <- g[which.i] Ig.i <- Ig[which.i] y.i <- y[which.i] c.i <- c[which.i, , drop = FALSE] qg.i <- qg[which.i] gc.i <- cbind(g.i, c.i) xtilde.i <- unlist(xtilde[which.i]) } # Estimate (alpha, sigsq_x.c, sigsq_p, sigsq_m) if pseudo-lik... # Get indices for parameters being estimated and create labels loc.betas <- 1: n.betas beta.labels <- paste("beta", c("0", x.varname, c.varnames), sep = "_") loc.alphas <- (n.betas + 1): (n.betas + n.alphas) alpha.labels <- paste("alpha", c("0", c.varnames), sep = "_") loc.sigsq_x.c <- n.betas + n.alphas + 1 if (errors == "neither") { theta.labels <- c(beta.labels, alpha.labels, "sigsq_x.c") } else if (errors == "processing") { if (! nondiff_pe) { loc.sigsq_p1 <- loc.sigsq_x.c + 1 loc.sigsq_p0 <- loc.sigsq_x.c + 2 theta.labels <- c(beta.labels, alpha.labels, "sigsq_x.c", "sigsq_p1", "sigsq_p0") } else { loc.sigsq_p1 <- loc.sigsq_p0 <- loc.sigsq_x.c + 1 theta.labels <- c(beta.labels, alpha.labels, "sigsq_x.c", "sigsq_p") } } else if (errors == "measurement") { if (! nondiff_me) { loc.sigsq_m1 <- loc.sigsq_x.c + 1 loc.sigsq_m0 <- loc.sigsq_x.c + 2 theta.labels <- c(beta.labels, alpha.labels, "sigsq_x.c", "sigsq_m1", "sigsq_m0") } else { loc.sigsq_m1 <- loc.sigsq_m0 <- loc.sigsq_x.c + 1 theta.labels <- c(beta.labels, alpha.labels, "sigsq_x.c", "sigsq_m") } } else if (errors == "both") { if (! nondiff_pe & ! nondiff_me) { loc.sigsq_p1 <- loc.sigsq_x.c + 1 loc.sigsq_p0 <- loc.sigsq_x.c + 2 loc.sigsq_m1 <- loc.sigsq_x.c + 3 loc.sigsq_m0 <- loc.sigsq_x.c + 4 theta.labels <- c(beta.labels, alpha.labels, "sigsq_x.c", "sigsq_p1", "sigsq_p0", "sigsq_m1", "sigsq_m0") } else if (! nondiff_pe & nondiff_me) { loc.sigsq_p1 <- loc.sigsq_x.c + 1 loc.sigsq_p0 <- loc.sigsq_x.c + 2 loc.sigsq_m1 <- loc.sigsq_m0 <- loc.sigsq_x.c + 3 theta.labels <- c(beta.labels, alpha.labels, "sigsq_x.c", "sigsq_p1", "sigsq_p0", "sigsq_m") } else if (nondiff_pe & ! nondiff_me) { loc.sigsq_p1 <- loc.sigsq_p0 <- loc.sigsq_x.c + 1 loc.sigsq_m1 <- loc.sigsq_x.c + 2 loc.sigsq_m0 <- loc.sigsq_x.c + 3 theta.labels <- c(beta.labels, alpha.labels, "sigsq_x.c", "sigsq_p", "sigsq_m1", "sigsq_m0") } else if (nondiff_pe & nondiff_me) { loc.sigsq_p1 <- loc.sigsq_p0 <- loc.sigsq_x.c + 1 loc.sigsq_m1 <- loc.sigsq_m0 <- loc.sigsq_x.c + 2 theta.labels <- c(beta.labels, alpha.labels, "sigsq_x.c", "sigsq_p", "sigsq_m") } } # Log-likelihood function llf <- function(f.theta) { # Extract parameters f.betas <- matrix(f.theta[loc.betas], ncol = 1) f.beta_0 <- f.betas[1] f.beta_x <- f.betas[2] f.beta_c <- matrix(f.betas[-c(1: 2)], ncol = 1) f.alphas <- matrix(f.theta[loc.alphas], ncol = 1) f.alpha_0 <- f.alphas[1] f.alpha_c <- matrix(f.alphas[-1], ncol = 1) f.sigsq_x.c <- f.theta[loc.sigsq_x.c] if (errors == "neither") { f.sigsq_p1 <- f.sigsq_p0 <- f.sigsq_m1 <- f.sigsq_m0 <- 0 } if (errors %in% c("processing", "both")) { f.sigsq_p1 <- f.theta[loc.sigsq_p1] f.sigsq_p0 <- f.theta[loc.sigsq_p0] } else { f.sigsq_p1 <- f.sigsq_p0 <- 0 } if (errors %in% c("measurement", "both")) { f.sigsq_m1 <- f.theta[loc.sigsq_m1] f.sigsq_m0 <- f.theta[loc.sigsq_m0] } else { f.sigsq_m1 <- f.sigsq_m0 <- 0 } if (some.p) { # Likelihood for pools with precisely measured X: # L = f(Y|X,C) f(X|C) # P(Y|X,C) eta <- gxc.p %*% f.betas + qg.p p_y.xc <- (1 + exp(-eta))^(-1) # E(X|C) and V(X|C) mu_x.c <- gc.p %*% f.alphas sigsq_x.c <- g.p * f.sigsq_x.c # Log-likelihood ll.p <- sum(dbinom(x = y.p, size = 1, prob = p_y.xc, log = TRUE) + dnorm(x = x.p, mean = mu_x.c, sd = sqrt(sigsq_x.c), log = TRUE)) } else { ll.p <- 0 } # Set skip.rest flag to FALSE skip.rest <- FALSE if (some.r) { # Likelihood for pools with replicates # L = f(Y, Xtilde|C) # = [\int_X f(Y|X,C) f(X|Xtilde,C) dX] f(Xtilde|C) # = int_X f(Y|X,C) f(Xtilde|X) f(X|C) dX # Create error vectors sigsq_p <- ifelse(y.r, f.sigsq_p1, f.sigsq_p0) * Ig.r sigsq_m <- ifelse(y.r, f.sigsq_m1, f.sigsq_m0) # Calculate E(X|C) and V(X|C) mu_x.c <- gc.r %*% f.alphas sigsq_x.c <- g.r * f.sigsq_x.c if (approx_integral) { # Probit approximation for logistic-normal integral ll.vals <- c() for (ii in 1: length(xtilde.r)) { # Values for ith subject k_i <- k.r[ii] g_i <- g.r[ii] y_i <- y.r[ii] c_i <- c.r[ii, ] qg_i <- qg.r[ii] mu_x.c_i <- mu_x.c[ii] sigsq_x.c_i <- sigsq_x.c[ii] xtilde_i <- xtilde.r[[ii]] sigsq_p_i <- sigsq_p[ii] sigsq_m_i <- sigsq_m[ii] # E(X|Xtilde,C) and V(X|Xtilde,C) Mu_xxtilde.c <- matrix(mu_x.c_i, nrow = k_i + 1) Sigma_xxtilde.c_11 <- sigsq_x.c_i Sigma_xxtilde.c_12 <- matrix(sigsq_x.c_i, ncol = k_i) Sigma_xxtilde.c_21 <- t(Sigma_xxtilde.c_12) Sigma_xxtilde.c_22 <- g_i * f.sigsq_x.c + g_i^2 * ifelse(constant_pe, 1, g_i) * sigsq_p_i + diag(x = g_i^2 * sigsq_m_i, ncol = k_i, nrow = k_i) mu_x.xtildec <- Mu_xxtilde.c[1] + Sigma_xxtilde.c_12 %*% solve(Sigma_xxtilde.c_22) %*% (xtilde_i - Mu_xxtilde.c[-1]) sigsq_x.xtildec <- Sigma_xxtilde.c_11 - Sigma_xxtilde.c_12 %*% solve(Sigma_xxtilde.c_22) %*% Sigma_xxtilde.c_21 # Approximation of \int_X f(Y|X,C) f(X|Xtilde,C) dX if (some.cs) { t <- (g_i * f.beta_0 + f.beta_x * mu_x.xtildec + c_i %*% f.beta_c + qg_i) / sqrt(1 + sigsq_x.xtildec * f.beta_x^2 / 1.7^2) } else { t <- (g_i * f.beta_0 + f.beta_x * mu_x.xtildec + qg_i) / sqrt(1 + sigsq_x.xtildec * f.beta_x^2 / 1.7^2) } p <- exp(t) / (1 + exp(t)) part1 <- dbinom(x = y_i, size = 1, prob = p, log = TRUE) # log[f(Xtilde|C)] if (k_i == 2) { mu_xtilde1.xtilde2c <- mu_x.c_i + Sigma_xxtilde.c_22[1, 2] / Sigma_xxtilde.c_22[2, 2] * (xtilde_i[2] - mu_x.c_i) sigsq_xtilde1.xtilde2c <- Sigma_xxtilde.c_22[1, 1] - Sigma_xxtilde.c_22[1, 2]^2 / Sigma_xxtilde.c_22[2, 2] part2 <- sum(dnorm(x = xtilde_i, log = TRUE, mean = c(mu_xtilde1.xtilde2c, mu_x.c_i), sd = sqrt(c(sigsq_xtilde1.xtilde2c, Sigma_xxtilde.c_22[2, 2])))) } else { part2 <- dmvnorm(x = xtilde_i, log = TRUE, mean = Mu_xxtilde.c[-1], sigma = Sigma_xxtilde.c_22) } # Log-likelihood ll.vals[ii] <- part1 + part2 } ll.r <- sum(ll.vals) } else { # Full likelihood # Function for integrating out X's int.f_i1 <- function(k_i, g_i, y_i, x_i, gc_i, qg_i, mu_x.c_i, sigsq_x.c_i, xtilde_i, sigsq_p_i, sigsq_m_i) { x_i <- matrix(x_i, nrow = 1) f_yxtildex.c <- apply(x_i, 2, function(z) { # Transformation s_i <- z / (1 - z^2) # P(Y_i|X_i^*,C_i^*) p_y.xc <- (1 + exp(-as.numeric(gc_i %*% f.betas[-2, , drop = FALSE]) - s_i * f.beta_x - qg_i))^(-1) if (g_i == 1) { # f(Y,X,Xtilde|C) = f(Y|X,C) f(Xtilde1|Xtilde2,X) f(Xtilde2|X) f(X|C) dbinom(x = y_i, size = 1, prob = p_y.xc) * prod(dnorm(x = xtilde_i, mean = s_i, sd = sqrt(sigsq_m_i))) * dnorm(x = s_i, mean = mu_x.c_i, sd = sqrt(sigsq_x.c_i)) } else { # E(Xtilde|X) and V(Xtilde|X) Mu_xtilde.x <- rep(s_i, k_i) # Sigma_xtilde.x <- g_i^2 * sigsq_p_i + # diag(x = g_i^2 * sigsq_m_i, ncol = k_i, nrow = k_i) Sigma_xtilde.x <- g_i^2 * ifelse(constant_pe, 1, g_i) * sigsq_p_i + diag(x = g_i^2 * sigsq_m_i, ncol = k_i, nrow = k_i) # f(Y,X,Xtilde|C) = f(Y|X,C) f(Xtilde|X) f(X|C) dbinom(x = y_i, size = 1, prob = p_y.xc) * dmvnorm(x = xtilde_i, mean = Mu_xtilde.x, sigma = Sigma_xtilde.x) * dnorm(x = s_i, mean = mu_x.c_i, sd = sqrt(sigsq_x.c_i)) } }) # Back-transformation out <- matrix(f_yxtildex.c * (1 + x_i^2) / (1 - x_i^2)^2, ncol = ncol(x_i)) return(out) } int.vals <- c() for (ii in 1: length(xtilde.r)) { # Get values for ith participant k_i <- k.r[ii] g_i <- g.r[ii] y_i <- y.r[ii] gc_i <- gc.r[ii, ] qg_i <- qg.r[ii] mu_x.c_i <- mu_x.c[ii] sigsq_x.c_i <- sigsq_x.c[ii] xtilde_i <- xtilde.r[[ii]] sigsq_p_i <- sigsq_p[ii] sigsq_m_i <- sigsq_m[ii] # Try integrating out X with default settings int.ii <- do.call(hcubature, c(list(f = int.f_i1, lowerLimit = -1, upperLimit = 1, vectorInterface = TRUE, k_i = k_i, g_i = g_i, y_i = y_i, gc_i = gc_i, qg_i = qg_i, mu_x.c_i = mu_x.c_i, sigsq_x.c_i = sigsq_x.c_i, xtilde_i = xtilde_i, sigsq_p_i = sigsq_p_i, sigsq_m_i = sigsq_m_i), hcubature_list)) # If integral 0, find region with density if (is.na(int.ii$integral) | int.ii$integral == 0) { limits <- seq(-1 + 1e-5, 1 - 1e-5, 1e-5) fs <- int.f_i1(x_i = limits, k_i = k_i, g_i = g_i, y_i = y_i, gc_i = gc_i, qg_i = qg_i, mu_x.c_i = mu_x.c_i, sigsq_x.c_i = sigsq_x.c_i, xtilde_i = xtilde_i, sigsq_p_i = sigsq_p_i, sigsq_m_i = sigsq_m_i) limits <- limits[fs > 0] if (length(limits) > 0) { limits <- c(max(-1, min(limits) - 1e-5), min(1, max(limits) + 1e-5)) int.ii <- do.call(hcubature, c(list(f = int.f_i1, lowerLimit = limits[1], upperLimit = limits[2], vectorInterface = TRUE, k_i = k_i, g_i = g_i, y_i = y_i, gc_i = gc_i, qg_i = qg_i, mu_x.c_i = mu_x.c_i, sigsq_x.c_i = sigsq_x.c_i, xtilde_i = xtilde_i, sigsq_p_i = sigsq_p_i, sigsq_m_i = sigsq_m_i), hcubature_list)) } } int.vals[ii] <- int.ii$integral # If integral 0, set skip.rest to TRUE to skip further LL calculations if (is.na(int.ii$integral) | int.ii$integral == 0) { print(paste("Integral is ", int.ii$integral, " for ii = ", ii, sep = "")) print(f.theta) skip.rest <- TRUE break } } ll.r <- sum(log(int.vals)) } } else { ll.r <- 0 } if (some.i & ! skip.rest) { # Likelihood for pools with single Xtilde: # L = f(Y,Xtilde|C) = [\int_X f(Y|X,C) f(X|Xtilde,C) dX] f(Xtilde|C) # = \int_X f(Y|X,C) f(Xtilde|X) f(X|C) dX # Create error vectors sigsq_p <- ifelse(y.i, f.sigsq_p1, f.sigsq_p0) * Ig.i sigsq_m <- ifelse(y.i, f.sigsq_m1, f.sigsq_m0) # Calculate E(X|C) and V(X|C) mu_x.c <- gc.i %*% f.alphas sigsq_x.c <- g.i * f.sigsq_x.c if (approx_integral) { # Probit approximation for logistic-normal integral # E(X,Xtilde|C) and V(X,Xtilde|C) Mu_xxtilde.c_1 <- mu_x.c Mu_xxtilde.c_2 <- mu_x.c Sigma_xxtilde.c_11 <- sigsq_x.c Sigma_xxtilde.c_12 <- sigsq_x.c if (constant_pe) { Sigma_xxtilde.c_22 <- g.i * f.sigsq_x.c + g.i^2 * sigsq_p + g.i^2 * sigsq_m } else { Sigma_xxtilde.c_22 <- g.i * f.sigsq_x.c + g.i^2 * g.i * sigsq_p + g.i^2 * sigsq_m } # E(X|Xtilde,C) and V(X|Xtilde,C) mu_x.xtildec <- Mu_xxtilde.c_1 + Sigma_xxtilde.c_12 / Sigma_xxtilde.c_22 * (xtilde.i - Mu_xxtilde.c_2) sigsq_x.xtildec <- Sigma_xxtilde.c_11 - Sigma_xxtilde.c_12^2 / Sigma_xxtilde.c_22 # Approximation of \int_x f(Y|X,C) f(X|Xtilde,C) dx if (some.cs) { t <- as.numeric(g.i * f.beta_0 + f.beta_x * mu_x.xtildec + c.i %*% f.beta_c + qg.i) / sqrt(1 + sigsq_x.xtildec * f.beta_x^2 / 1.7^2) } else { t <- (g.i * f.beta_0 + f.beta_x * mu_x.xtildec + qg.i) / sqrt(1 + sigsq_x.xtildec * f.beta_x^2 / 1.7^2) } p <- exp(t) / (1 + exp(t)) part1 <- dbinom(x = y.i, size = 1, prob = p, log = TRUE) # log[f(Xtilde|C)] part2 <- dnorm(x = xtilde.i, log = TRUE, mean = Mu_xxtilde.c_2, sd = sqrt(Sigma_xxtilde.c_22)) # Log-likelihood ll.vals <- part1 + part2 ll.i <- sum(ll.vals) } else { # Full likelihood # Function for integrating out X int.f_i2 <- function(g_i, y_i, x_i, gc_i, qg_i, mu_x.c_i, sigsq_x.c_i, xtilde_i, sigsq_p_i, sigsq_m_i) { # Transformation s_i <- x_i / (1 - x_i^2) # P(Y|X,C) p_y.xc <- (1 + exp(as.numeric(-gc_i %*% f.betas[-2, , drop = FALSE]) - s_i * f.beta_x - qg_i))^(-1) # E(Xtilde|X) and V(Xtilde|X) mu_xtilde.x <- s_i sigsq_xtilde.x <- g_i^2 * sigsq_p_i + g_i^2 * sigsq_m_i # f(Y,X,Xtilde|C) = f(Y|X,C) f(Xtilde|X) f(X|C) f_yx.xtildec <- dbinom(x = y_i, size = 1, prob = p_y.xc) * dnorm(x = xtilde_i, mean = mu_xtilde.x, sd = sqrt(sigsq_xtilde.x)) * dnorm(x = s_i, mean = mu_x.c_i, sd = sqrt(sigsq_x.c_i)) # Back-transformation out <- f_yx.xtildec * (1 + x_i^2) / (1 - x_i^2)^2 return(out) } int.vals <- c() for (ii in 1: length(xtilde.i)) { # Get values for ith participant g_i <- g.i[ii] y_i <- y.i[ii] gc_i <- gc.i[ii, ] qg_i <- qg.i[ii] mu_x.c_i <- mu_x.c[ii] sigsq_x.c_i <- sigsq_x.c[ii] xtilde_i <- xtilde.i[ii] sigsq_p_i <- sigsq_p[ii] sigsq_m_i <- sigsq_m[ii] # Try integrating out X_i with default settings int.ii <- do.call(hcubature, c(list(f = int.f_i2, lowerLimit = -1, upperLimit = 1, vectorInterface = TRUE, g_i = g_i, y_i = y_i, gc_i = gc_i, qg_i = qg_i, mu_x.c_i = mu_x.c_i, sigsq_x.c_i = sigsq_x.c_i, xtilde_i = xtilde_i, sigsq_p_i = sigsq_p_i, sigsq_m_i = sigsq_m_i), hcubature_list)) # If integral 0, find region with density if (is.na(int.ii$integral) | int.ii$integral == 0) { limits <- seq(-1 + 1e-5, 1 - 1e-5, 1e-5) fs <- int.f_i2(x_i = limits, g_i = g_i, y_i = y_i, gc_i = gc_i, qg_i = qg_i, mu_x.c_i = mu_x.c_i, sigsq_x.c_i = sigsq_x.c_i, xtilde_i = xtilde_i, sigsq_p_i = sigsq_p_i, sigsq_m_i = sigsq_m_i) limits <- limits[fs > 0] if (length(limits) > 0) { limits <- c(max(-1, min(limits) - 1e-5), min(1, max(limits) + 1e-5)) int.ii <- do.call(hcubature, c(list(f = int.f_i2, lowerLimit = -1, upperLimit = 1, vectorInterface = TRUE, g_i = g_i, y_i = y_i, gc_i = gc_i, qg_i = qg_i, mu_x.c_i = mu_x.c_i, sigsq_x.c_i = sigsq_x.c_i, xtilde_i = xtilde_i, sigsq_p_i = sigsq_p_i, sigsq_m_i = sigsq_m_i), hcubature_list)) } } int.vals[ii] <- int.ii$integral # If integral 0, set skip.rest to TRUE to skip further LL calculations if (is.na(int.ii$integral) | int.ii$integral == 0) { print(paste("Integral is ", int.ii$integral, " for ii = ", ii, sep = "")) print(f.theta) skip.rest <- TRUE break } } ll.i <- sum(log(int.vals)) } } else { ll.i <- 0 } # Return negative log-likelihood ll <- ll.p + ll.r + ll.i return(-ll) } # Starting values if (is.null(nlminb_list$start)) { if (errors == "neither") { nlminb_list$start <- c(rep(start_nonvar_var[1], n.betas + n.alphas), start_nonvar_var[2]) } else if (errors == "processing") { nlminb_list$start <- c(rep(start_nonvar_var[1], n.betas + n.alphas), rep(start_nonvar_var[2], loc.sigsq_p0 - loc.sigsq_x.c + 1)) } else if (errors %in% c("measurement", "both")) { nlminb_list$start <- c(rep(start_nonvar_var[1], n.betas + n.alphas), rep(start_nonvar_var[2], loc.sigsq_m0 - loc.sigsq_x.c + 1)) } } names(nlminb_list$start) <- theta.labels # Lower bounds if (is.null(nlminb_list$lower)) { if (errors == "neither") { nlminb_list$lower <- c(rep(lower_nonvar_var[1], n.betas + n.alphas), lower_nonvar_var[2]) } else if (errors == "processing") { nlminb_list$lower <- c(rep(lower_nonvar_var[1], n.betas + n.alphas), rep(lower_nonvar_var[2], loc.sigsq_p0 - loc.sigsq_x.c + 1)) } else if (errors %in% c("measurement", "both")) { nlminb_list$lower <- c(rep(lower_nonvar_var[1], n.betas + n.alphas), rep(lower_nonvar_var[2], loc.sigsq_m0 - loc.sigsq_x.c + 1)) } } # Upper bounds if (is.null(nlminb_list$upper)) { if (errors == "neither") { nlminb_list$upper <- c(rep(upper_nonvar_var[1], n.betas + n.alphas), upper_nonvar_var[2]) } else if (errors == "processing") { nlminb_list$upper <- c(rep(upper_nonvar_var[1], n.betas + n.alphas), rep(upper_nonvar_var[2], loc.sigsq_p0 - loc.sigsq_x.c + 1)) } else if (errors %in% c("measurement", "both")) { nlminb_list$upper <- c(rep(upper_nonvar_var[1], n.betas + n.alphas), rep(upper_nonvar_var[2], loc.sigsq_m0 - loc.sigsq_x.c + 1)) } } if (is.null(nlminb_object)) { # Obtain ML estimates ml.max <- do.call(nlminb, c(list(objective = llf), nlminb_list)) # If non-convergence, try with jittered starting values if requested if (ml.max$convergence == 1) { if (! is.null(jitter_start)) { message("Trying jittered starting values...") nlminb_list$start <- nlminb_list$start + rnorm(n = length(nlminb_list$start), sd = jitter_start) ml.max2 <- do.call(nlminb, c(list(objective = llf), nlminb_list)) if (ml.max2$objective < ml.max$objective) ml.max <- ml.max2 } if (ml.max$convergence == 1) { message("Object returned by 'nlminb' function indicates non-convergence. You may want to try different starting values.") } } } else { ml.max <- nlminb_object } # Create list to return theta.hat <- ml.max$par names(theta.hat) <- theta.labels ret.list <- list(theta.hat = theta.hat) # If requested, add variance-covariance matrix to ret.list if (estimate_var) { # Estimate Hessian hessian.mat <- do.call(numDeriv::hessian, c(list(func = llf, x = theta.hat), hessian_list)) # Estimate variance-covariance matrix theta.variance <- try(solve(hessian.mat), silent = TRUE) if (class(theta.variance)[1] == "try-error" | sum(is.na(hessian.mat)) > 0) { print(hessian.mat) message("The estimated Hessian matrix (printed here) is singular, so variance-covariance matrix could not be obtained. You could try tweaking 'start_nonvar_var' or 'hessian_list' (e.g. increase 'r')") ret.list$theta.var <- NULL } else { colnames(theta.variance) <- rownames(theta.variance) <- theta.labels ret.list$theta.var <- theta.variance if (sum(diag(theta.variance) <= 0) > 0) { print(theta.variance) message("The estimated variance-covariance matrix (printed here) has some non-positive diagonal elements, so it may not be reliable. You could try tweaking 'start_nonvar_var' or 'hessian_list' (e.g. increase 'r')") } } } # Add nlminb object and AIC to ret.list ret.list$nlminb.object <- ml.max ret.list$aic <- 2 * (length(theta.hat) + ml.max$objective) # Return ret.list return(ret.list) }
library(ggplot2) response <- read.csv(file.choose(), na.strings='') response <- na.omit(response) list<- c("Opera", "Romantic", "Shopping", "Spiders", "Life.struggles", "Age", "Gender", "Left...right.handed", "Only.child","Village...town", "House...block.of.flats") summary(response[list]) #analyses + viz------------------------------------------ #1 m1 <- lm(Age~Opera,response) summary(m1) temp <- aggregate(Opera ~ Age, response, mean) ggplot(temp, aes(x =Age, y = Opera)) + geom_point() + geom_smooth(method = "lm") # There was a significant (p < 0.05) relationship between age and the enjoyment # of opera. The relationship between age and the enjoyment of opera is positive. # As age goes on, the level of how they enjoy opera goes up. So older people tend # to enjoy opera more than younger people. #2 m2 <- aov(Spiders~Gender, data = response) summary(m2) ggplot(response, aes(x = Gender, y = Spiders)) + geom_boxplot(aes(fill = Gender)) # There was a statistically significant relationship between gender and the fears # of spider(p<0.05). Female had a significantly higher fears of spider compared to # male, which really surprised me. #3 life1 <- response[response$Left...right.handed == "left handed","Life.struggles"] life2 <- response[response$Left...right.handed == "right handed","Life.struggles"] t.test(life1,life2,var.equal = T) ggplot(response, aes(x = Left...right.handed, y = Life.struggles)) + geom_boxplot(aes(fill = Left...right.handed)) + xlab("Left or right handed") + ylab("Life struggle")+ labs(fill = "Left or right handed") # The result shows that there was no significant difference of life struggles between # two groups of people(p>0.05). In contrast, the distribution of life struggle level # was really similar in tow groups. #4 m4 <- glm(Gender ~ Shopping + Romantic, data = response, family = "binomial") summary(m4) predict <- predict(m4, type = 'response') table(response$Gender, predict > 0.5) ggplot(response, aes(Shopping, Romantic)) + geom_jitter(aes(color = Gender)) # FALSE TRUE # FALSE 335 67 # TRUE 123 149 # Overall, this model performed relatively well. Using the # interest in shopping and romantic movies, the model was able # to predict 484 out of 674 genders in the dataset correctly. # The accuracy is about 70%, and it seems to have higher accuracy # in female than male. #5 t <- xtabs(~Only.child+Village...town, data = response) chisq.test(t) ggplot(response, aes(Village...town, fill = House...block.of.flats)) + geom_bar() + facet_grid(~Only.child) + xlab("City or village") + labs(fill = "House or Block of flats") + ggtitle("Only child vs City or Village") # Being an only child is not significantly related to spending most # of childhood time in city or village. However, the bar chart tells # that people spending most of childhood time in city tend to live # most of childhood in a block of flats more than house or bungalow, # and people spending most of childhood time in village tend to live # most of childhood in house or bungalow more than a block of flats, # which makes sense.
/Project.R
no_license
cassielo/Exploring-Young-People
R
false
false
3,153
r
library(ggplot2) response <- read.csv(file.choose(), na.strings='') response <- na.omit(response) list<- c("Opera", "Romantic", "Shopping", "Spiders", "Life.struggles", "Age", "Gender", "Left...right.handed", "Only.child","Village...town", "House...block.of.flats") summary(response[list]) #analyses + viz------------------------------------------ #1 m1 <- lm(Age~Opera,response) summary(m1) temp <- aggregate(Opera ~ Age, response, mean) ggplot(temp, aes(x =Age, y = Opera)) + geom_point() + geom_smooth(method = "lm") # There was a significant (p < 0.05) relationship between age and the enjoyment # of opera. The relationship between age and the enjoyment of opera is positive. # As age goes on, the level of how they enjoy opera goes up. So older people tend # to enjoy opera more than younger people. #2 m2 <- aov(Spiders~Gender, data = response) summary(m2) ggplot(response, aes(x = Gender, y = Spiders)) + geom_boxplot(aes(fill = Gender)) # There was a statistically significant relationship between gender and the fears # of spider(p<0.05). Female had a significantly higher fears of spider compared to # male, which really surprised me. #3 life1 <- response[response$Left...right.handed == "left handed","Life.struggles"] life2 <- response[response$Left...right.handed == "right handed","Life.struggles"] t.test(life1,life2,var.equal = T) ggplot(response, aes(x = Left...right.handed, y = Life.struggles)) + geom_boxplot(aes(fill = Left...right.handed)) + xlab("Left or right handed") + ylab("Life struggle")+ labs(fill = "Left or right handed") # The result shows that there was no significant difference of life struggles between # two groups of people(p>0.05). In contrast, the distribution of life struggle level # was really similar in tow groups. #4 m4 <- glm(Gender ~ Shopping + Romantic, data = response, family = "binomial") summary(m4) predict <- predict(m4, type = 'response') table(response$Gender, predict > 0.5) ggplot(response, aes(Shopping, Romantic)) + geom_jitter(aes(color = Gender)) # FALSE TRUE # FALSE 335 67 # TRUE 123 149 # Overall, this model performed relatively well. Using the # interest in shopping and romantic movies, the model was able # to predict 484 out of 674 genders in the dataset correctly. # The accuracy is about 70%, and it seems to have higher accuracy # in female than male. #5 t <- xtabs(~Only.child+Village...town, data = response) chisq.test(t) ggplot(response, aes(Village...town, fill = House...block.of.flats)) + geom_bar() + facet_grid(~Only.child) + xlab("City or village") + labs(fill = "House or Block of flats") + ggtitle("Only child vs City or Village") # Being an only child is not significantly related to spending most # of childhood time in city or village. However, the bar chart tells # that people spending most of childhood time in city tend to live # most of childhood in a block of flats more than house or bungalow, # and people spending most of childhood time in village tend to live # most of childhood in house or bungalow more than a block of flats, # which makes sense.
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/rotate.R \name{click_rotate} \alias{click_rotate} \alias{click_rotate.default} \alias{click_rotate.dendrogram} \title{Interactively rotate a tree object} \usage{ click_rotate(x, ...) \method{click_rotate}{dendrogram}(x, plot = TRUE, plot_after = plot, horiz = FALSE, continue = FALSE, ...) } \arguments{ \item{x}{a tree object (either a \code{dendrogram} or \code{hclust})} \item{...}{parameters passed to the plot} \item{plot}{(logical) should the dendrogram first be plotted.} \item{plot_after}{(logical) should the dendrogram be plotted after the rotation?} \item{horiz}{logical. Should the plot be normal or horizontal?} \item{continue}{logical. If TRUE, allows the user to keep clicking the plot until a click is made on the labels.} } \value{ A rotated tree object } \description{ Lets te user click a plot of dendrogram and rotates the tree based on the location of the click. Code for mouse selection of (sub-)cluster to be rotated } \examples{ # create the dend: dend <- USArrests \%>\% dist \%>\% hclust("ave") \%>\% as.dendrogram \%>\% color_labels \dontrun{ # play with the rotation once dend <- click_rotate(dend) dend <- click_rotate(dend, horiz = TRUE) # keep playing with the rotation: while(TRUE) dend <- click_rotate(dend) # the same as dend <- click_rotate(dend, continue = TRUE) } } \author{ Andrej-Nikolai Spiess, Tal Galili } \seealso{ \code{\link{rotate.dendrogram}} }
/man/click_rotate.Rd
no_license
CathG/dendextend
R
false
true
1,482
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/rotate.R \name{click_rotate} \alias{click_rotate} \alias{click_rotate.default} \alias{click_rotate.dendrogram} \title{Interactively rotate a tree object} \usage{ click_rotate(x, ...) \method{click_rotate}{dendrogram}(x, plot = TRUE, plot_after = plot, horiz = FALSE, continue = FALSE, ...) } \arguments{ \item{x}{a tree object (either a \code{dendrogram} or \code{hclust})} \item{...}{parameters passed to the plot} \item{plot}{(logical) should the dendrogram first be plotted.} \item{plot_after}{(logical) should the dendrogram be plotted after the rotation?} \item{horiz}{logical. Should the plot be normal or horizontal?} \item{continue}{logical. If TRUE, allows the user to keep clicking the plot until a click is made on the labels.} } \value{ A rotated tree object } \description{ Lets te user click a plot of dendrogram and rotates the tree based on the location of the click. Code for mouse selection of (sub-)cluster to be rotated } \examples{ # create the dend: dend <- USArrests \%>\% dist \%>\% hclust("ave") \%>\% as.dendrogram \%>\% color_labels \dontrun{ # play with the rotation once dend <- click_rotate(dend) dend <- click_rotate(dend, horiz = TRUE) # keep playing with the rotation: while(TRUE) dend <- click_rotate(dend) # the same as dend <- click_rotate(dend, continue = TRUE) } } \author{ Andrej-Nikolai Spiess, Tal Galili } \seealso{ \code{\link{rotate.dendrogram}} }
\name{wmlung5070} \alias{wmlung5070} \docType{data} \title{ Lung cancer mortality data for white males, 1950-69 and 1970-94 } \description{ Counts and rates of lung cancer mortality data among white men by state, aggregated for 1950-1969 and 1970-1994 } \usage{wmlung5070} \format{ A data frame with 51 observations, 1 for each state + DC, on the following 5 variables. \describe{ \item{RATEWM_50}{a numeric vector, state age-adjusted rates during 1950-69} \item{COUNTWM_50}{a numeric vector, the number of lung cancer deaths during 1950-69} \item{RATEWM_70}{a numeric vector, state age-adjusted rates during 1970-94} \item{COUNTWM_70}{a numeric vector, the number of lung cancer deaths during 1970-94} \item{PERCENT}{a numeric vector of the percent change in rate from 1950-69 to 1970-94} } } \details{ The rates on this file are directly age adjusted to the US 1970 standard population and are expressed as the number of deaths per 100,000 person-years. The row names are the 2 character postal codes for the states. Note that the data currently available on the NCI web site are from a later data submission and so may differ slightly (in first decimal place) from the rates provided here due to corrections to the dataset after its first publication. The name of each row is the state abbreviation - 2 characters. This dataset is used by the \var{micromapSEER} examples using the border group of "USStatesDF". } \source{ Surveillance Research Program, National Cancer Institute SEER*Stat software (\url{http://www.seer.cancer.gov/seerstat}), November 2007 data submission, released April 2008. Data originally provided to NCI by the National Center for Health Statistics. } \author{Linda W. Pickle and Jim Pearson of StatNet Consulting, LLC, Gaithersburg, MD} \references{ Devesa SS, Grauman DJ, Blot WJ, Pennello GA, Hoover RN, Fraumeni, JF Jr. Atlas of cancer mortality in the United States: 1950-94, NIH Publication 99-4564, Bethesda, MD: National Cancer Institute } \keyword{datasets}
/man/wmlung5070.Rd
no_license
Suppaman/micromapST
R
false
false
2,133
rd
\name{wmlung5070} \alias{wmlung5070} \docType{data} \title{ Lung cancer mortality data for white males, 1950-69 and 1970-94 } \description{ Counts and rates of lung cancer mortality data among white men by state, aggregated for 1950-1969 and 1970-1994 } \usage{wmlung5070} \format{ A data frame with 51 observations, 1 for each state + DC, on the following 5 variables. \describe{ \item{RATEWM_50}{a numeric vector, state age-adjusted rates during 1950-69} \item{COUNTWM_50}{a numeric vector, the number of lung cancer deaths during 1950-69} \item{RATEWM_70}{a numeric vector, state age-adjusted rates during 1970-94} \item{COUNTWM_70}{a numeric vector, the number of lung cancer deaths during 1970-94} \item{PERCENT}{a numeric vector of the percent change in rate from 1950-69 to 1970-94} } } \details{ The rates on this file are directly age adjusted to the US 1970 standard population and are expressed as the number of deaths per 100,000 person-years. The row names are the 2 character postal codes for the states. Note that the data currently available on the NCI web site are from a later data submission and so may differ slightly (in first decimal place) from the rates provided here due to corrections to the dataset after its first publication. The name of each row is the state abbreviation - 2 characters. This dataset is used by the \var{micromapSEER} examples using the border group of "USStatesDF". } \source{ Surveillance Research Program, National Cancer Institute SEER*Stat software (\url{http://www.seer.cancer.gov/seerstat}), November 2007 data submission, released April 2008. Data originally provided to NCI by the National Center for Health Statistics. } \author{Linda W. Pickle and Jim Pearson of StatNet Consulting, LLC, Gaithersburg, MD} \references{ Devesa SS, Grauman DJ, Blot WJ, Pennello GA, Hoover RN, Fraumeni, JF Jr. Atlas of cancer mortality in the United States: 1950-94, NIH Publication 99-4564, Bethesda, MD: National Cancer Institute } \keyword{datasets}
## TESTING FOR getPermlink METHOD ##### unitTestGetPermlink <- function(){ ## TRY PASSING A CHARACTER VECTOR OF A REPOSITORY getPermlink("brian-bot/rGithubClient") getPermlink("brian-bot/rGithubClient", ref="tag", refName="rGithubClient-0.8") getPermlink("brian-bot/rGithubClient", ref="branch", refName="dev") getPermlink("brian-bot/rGithubClient", ref="commit", refName="9382e7191073c1a5dc554ec8b6658d07d405b89e") checkException(getPermlink("brian-bot/rGithubClient", type="raw"), silent=T) ## TRY PASSING A CHARACTER VECTOR OF A REPOSITORY - NOW WITH A FILE PATH getPermlink("brian-bot/rGithubClient", "DESCRIPTION") getPermlink("brian-bot/rGithubClient", "DESCRIPTION", ref="tag", refName="rGithubClient-0.8") getPermlink("brian-bot/rGithubClient", "DESCRIPTION", ref="branch", refName="dev") getPermlink("brian-bot/rGithubClient", "DESCRIPTION", ref="commit", refName="9382e7191073c1a5dc554ec8b6658d07d405b89e") getPermlink("brian-bot/rGithubClient", "DESCRIPTION", type="raw") getPermlink("brian-bot/rGithubClient", "DESCRIPTION", ref="tag", refName="rGithubClient-0.8", type="raw") getPermlink("brian-bot/rGithubClient", "DESCRIPTION", ref="branch", refName="dev", type="raw") getPermlink("brian-bot/rGithubClient", "DESCRIPTION", ref="commit", refName="9382e7191073c1a5dc554ec8b6658d07d405b89e", type="raw") ## NOW TRY BY PASSING A githubRepo OBJECT myRepo <- getRepo("brian-bot/rGithubClient", ref="tag", refName="rGithubClient-0.8") getPermlink(myRepo) getPermlink(myRepo, "DESCRIPTION") getPermlink(myRepo, "DESCRIPTION", type="raw") }
/inst/unitTests/test_getPermlink.R
no_license
brian-bot/rGithubClient
R
false
false
1,596
r
## TESTING FOR getPermlink METHOD ##### unitTestGetPermlink <- function(){ ## TRY PASSING A CHARACTER VECTOR OF A REPOSITORY getPermlink("brian-bot/rGithubClient") getPermlink("brian-bot/rGithubClient", ref="tag", refName="rGithubClient-0.8") getPermlink("brian-bot/rGithubClient", ref="branch", refName="dev") getPermlink("brian-bot/rGithubClient", ref="commit", refName="9382e7191073c1a5dc554ec8b6658d07d405b89e") checkException(getPermlink("brian-bot/rGithubClient", type="raw"), silent=T) ## TRY PASSING A CHARACTER VECTOR OF A REPOSITORY - NOW WITH A FILE PATH getPermlink("brian-bot/rGithubClient", "DESCRIPTION") getPermlink("brian-bot/rGithubClient", "DESCRIPTION", ref="tag", refName="rGithubClient-0.8") getPermlink("brian-bot/rGithubClient", "DESCRIPTION", ref="branch", refName="dev") getPermlink("brian-bot/rGithubClient", "DESCRIPTION", ref="commit", refName="9382e7191073c1a5dc554ec8b6658d07d405b89e") getPermlink("brian-bot/rGithubClient", "DESCRIPTION", type="raw") getPermlink("brian-bot/rGithubClient", "DESCRIPTION", ref="tag", refName="rGithubClient-0.8", type="raw") getPermlink("brian-bot/rGithubClient", "DESCRIPTION", ref="branch", refName="dev", type="raw") getPermlink("brian-bot/rGithubClient", "DESCRIPTION", ref="commit", refName="9382e7191073c1a5dc554ec8b6658d07d405b89e", type="raw") ## NOW TRY BY PASSING A githubRepo OBJECT myRepo <- getRepo("brian-bot/rGithubClient", ref="tag", refName="rGithubClient-0.8") getPermlink(myRepo) getPermlink(myRepo, "DESCRIPTION") getPermlink(myRepo, "DESCRIPTION", type="raw") }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/http.R \name{appveyorHTTP} \alias{appveyorHTTP} \title{Appveyor API HTTP Requests} \usage{ appveyorHTTP(verb = "GET", path = "", query = list(), body = "", base = "https://ci.appveyor.com/api", token = Sys.getenv("APPVEYOR_TOKEN"), ...) } \arguments{ \item{verb}{A character string containing an HTTP verb, defaulting to \dQuote{GET}.} \item{path}{A character string with the API endpoint (should begin with a slash).} \item{query}{A list specifying any query string arguments to pass to the API.} \item{body}{A character string of request body data.} \item{base}{A character string specifying the base URL for the API.} \item{token}{A character string containing a Travis-CI API token. If missing, defaults to value stored in environment variable \dQuote{APPVEYOR_TOKEN}.} \item{...}{Additional arguments passed to an HTTP request function, such as \code{\link[httr]{GET}}.} } \value{ A list. } \description{ This is the workhorse function for executing API requests for Appveyor. } \details{ This is mostly an internal function for executing API requests. In almost all cases, users do not need to access this directly. }
/man/appveyorHTTP.Rd
no_license
muschellij2/appveyor
R
false
true
1,213
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/http.R \name{appveyorHTTP} \alias{appveyorHTTP} \title{Appveyor API HTTP Requests} \usage{ appveyorHTTP(verb = "GET", path = "", query = list(), body = "", base = "https://ci.appveyor.com/api", token = Sys.getenv("APPVEYOR_TOKEN"), ...) } \arguments{ \item{verb}{A character string containing an HTTP verb, defaulting to \dQuote{GET}.} \item{path}{A character string with the API endpoint (should begin with a slash).} \item{query}{A list specifying any query string arguments to pass to the API.} \item{body}{A character string of request body data.} \item{base}{A character string specifying the base URL for the API.} \item{token}{A character string containing a Travis-CI API token. If missing, defaults to value stored in environment variable \dQuote{APPVEYOR_TOKEN}.} \item{...}{Additional arguments passed to an HTTP request function, such as \code{\link[httr]{GET}}.} } \value{ A list. } \description{ This is the workhorse function for executing API requests for Appveyor. } \details{ This is mostly an internal function for executing API requests. In almost all cases, users do not need to access this directly. }
require(shiny) folder_address = 'C://Users//guoyi//Documents//Code//gemini//linkMRN' runApp(folder_address, launch.browser=TRUE)
/linkMRN/run.R
no_license
yishan-guo/gemini
R
false
false
128
r
require(shiny) folder_address = 'C://Users//guoyi//Documents//Code//gemini//linkMRN' runApp(folder_address, launch.browser=TRUE)
library(car) ;library(dae) ;library(nlme) ;library(effects) library(ggplot2) ;library(psych) ;library(interplot) library(plyr) ;library(devtools);library(ez) library(Rmisc) ; library(car) library(wesanderson) ;library(dae) library(lmerTest) library(multcompView) rm(list=ls()) name1 <- "/Volumes/dycog/Aurelie/DATA/MEG/PAT_EXPE22/documents/4R/" name2 <- "NewAVBroad_Age_CnD_Alpha_MinEvoked_80Slct_separate_time_separate_freq.txt" pat <- read.table(paste0(name1,name2),header=T) #pat <- pat[pat$MOD == "Auditory",] #pat$MOD <- factor(pat$MOD) #pat$CHAN <- factor(pat$CHAN) model.pat <- lme4::lmer(POW ~ (GROUP+CUE_ORIG+CHAN+FREQ+TIME)^6 + (1|SUB), data =pat) model_anova <- Anova(model.pat,type=2,test.statistic=c("F")) print(model_anova) pat_yc <- pat[pat$GROUP == "young",] pat_oc <- pat[pat$GROUP == "old",] model.pat_yc <- lme4::lmer(POW ~ (CUE_ORIG+MOD+HEMI)^3 + (1|SUB), data =pat_yc) model.pat_oc <- lme4::lmer(POW ~ (CUE_ORIG+MOD+HEMI)^3 + (1|SUB), data =pat_oc) model_anova_yc <- Anova(model.pat_yc,type=2,test.statistic=c("F")) model_anova_oc <- Anova(model.pat_oc,type=2,test.statistic=c("F")) print(model_anova_yc) print(model_anova_oc) pat_oc_audL <- pat[pat$GROUP == "young" & pat$MOD == "aud",] pat_oc_audR <- pat[pat$GROUP == "young" & pat$MOD == "vis",] model.pat_oc_audL <- lme4::lmer(POW ~ (CUE_ORIG+HEMI)^2 + (1|SUB), data =pat_oc_audL) model.pat_oc_audR <- lme4::lmer(POW ~ (CUE_ORIG+HEMI)^2 + (1|SUB), data =pat_oc_audR) model_anova_oc_audL <- Anova(model.pat_oc_audL,type=2,test.statistic=c("F")) model_anova_oc_audR <- Anova(model.pat_oc_audR,type=2,test.statistic=c("F")) print(model_anova_oc_audL) print(model_anova_oc_audR) lsmeans::cld(lsmeans::lsmeans(model.pat_oc_audL, pairwise~CUE_ORIG|HEMI),details= TRUE) lsmeans::cld(lsmeans::lsmeans(model.pat_oc_audR, pairwise~CUE_ORIG|HEMI),details= TRUE) tgc <- summarySE(pat, measurevar="POW", groupvars=c("CUE_ORIG","CHAN")) pd <- position_dodge(0.1) # move them .05 to the left and right ggplot(tgc, aes(x=CHAN, y=POW, color=CUE_ORIG,group=CUE_ORIG)) + geom_errorbar(aes(ymin=POW-se, ymax=POW+se), width=.1, position=pd,colour="black") + geom_line(position=pd) + geom_point(position=pd, size=3, shape=21,fill="white") + ylim(-0.1,0.1) #aud_broad41_L aud_broad41_R #aud_broad42_L aud_broad42_R #aud_broad22_L aud_broad22_R pat_yc <- pat[pat$GROUP =="young",] pat_yc$CHAN <- factor(pat_yc$CHAN) pat_yc$GROUP <- factor(pat_yc$GROUP) tgc <- summarySE(pat_yc, measurevar="POW", groupvars=c("CUE_ORIG","HEMI","MOD")) interaction.ABC.plot(POW, x.factor=HEMI, groups.factor=CUE_ORIG, trace.factor=MOD, data=pat_yc, c,ggplotFunc=list(labs(x="Target Side",y="Relative power Change"), ggtitle(""), ylim(-0.25,0.25), geom_errorbar(data=tgc,aes(ymax=POW+se, ymin=POW-se),width=0.2)))
/rstudio/AgeingEffcet_alpha_no_index.R
no_license
elshafeh/own
R
false
false
3,092
r
library(car) ;library(dae) ;library(nlme) ;library(effects) library(ggplot2) ;library(psych) ;library(interplot) library(plyr) ;library(devtools);library(ez) library(Rmisc) ; library(car) library(wesanderson) ;library(dae) library(lmerTest) library(multcompView) rm(list=ls()) name1 <- "/Volumes/dycog/Aurelie/DATA/MEG/PAT_EXPE22/documents/4R/" name2 <- "NewAVBroad_Age_CnD_Alpha_MinEvoked_80Slct_separate_time_separate_freq.txt" pat <- read.table(paste0(name1,name2),header=T) #pat <- pat[pat$MOD == "Auditory",] #pat$MOD <- factor(pat$MOD) #pat$CHAN <- factor(pat$CHAN) model.pat <- lme4::lmer(POW ~ (GROUP+CUE_ORIG+CHAN+FREQ+TIME)^6 + (1|SUB), data =pat) model_anova <- Anova(model.pat,type=2,test.statistic=c("F")) print(model_anova) pat_yc <- pat[pat$GROUP == "young",] pat_oc <- pat[pat$GROUP == "old",] model.pat_yc <- lme4::lmer(POW ~ (CUE_ORIG+MOD+HEMI)^3 + (1|SUB), data =pat_yc) model.pat_oc <- lme4::lmer(POW ~ (CUE_ORIG+MOD+HEMI)^3 + (1|SUB), data =pat_oc) model_anova_yc <- Anova(model.pat_yc,type=2,test.statistic=c("F")) model_anova_oc <- Anova(model.pat_oc,type=2,test.statistic=c("F")) print(model_anova_yc) print(model_anova_oc) pat_oc_audL <- pat[pat$GROUP == "young" & pat$MOD == "aud",] pat_oc_audR <- pat[pat$GROUP == "young" & pat$MOD == "vis",] model.pat_oc_audL <- lme4::lmer(POW ~ (CUE_ORIG+HEMI)^2 + (1|SUB), data =pat_oc_audL) model.pat_oc_audR <- lme4::lmer(POW ~ (CUE_ORIG+HEMI)^2 + (1|SUB), data =pat_oc_audR) model_anova_oc_audL <- Anova(model.pat_oc_audL,type=2,test.statistic=c("F")) model_anova_oc_audR <- Anova(model.pat_oc_audR,type=2,test.statistic=c("F")) print(model_anova_oc_audL) print(model_anova_oc_audR) lsmeans::cld(lsmeans::lsmeans(model.pat_oc_audL, pairwise~CUE_ORIG|HEMI),details= TRUE) lsmeans::cld(lsmeans::lsmeans(model.pat_oc_audR, pairwise~CUE_ORIG|HEMI),details= TRUE) tgc <- summarySE(pat, measurevar="POW", groupvars=c("CUE_ORIG","CHAN")) pd <- position_dodge(0.1) # move them .05 to the left and right ggplot(tgc, aes(x=CHAN, y=POW, color=CUE_ORIG,group=CUE_ORIG)) + geom_errorbar(aes(ymin=POW-se, ymax=POW+se), width=.1, position=pd,colour="black") + geom_line(position=pd) + geom_point(position=pd, size=3, shape=21,fill="white") + ylim(-0.1,0.1) #aud_broad41_L aud_broad41_R #aud_broad42_L aud_broad42_R #aud_broad22_L aud_broad22_R pat_yc <- pat[pat$GROUP =="young",] pat_yc$CHAN <- factor(pat_yc$CHAN) pat_yc$GROUP <- factor(pat_yc$GROUP) tgc <- summarySE(pat_yc, measurevar="POW", groupvars=c("CUE_ORIG","HEMI","MOD")) interaction.ABC.plot(POW, x.factor=HEMI, groups.factor=CUE_ORIG, trace.factor=MOD, data=pat_yc, c,ggplotFunc=list(labs(x="Target Side",y="Relative power Change"), ggtitle(""), ylim(-0.25,0.25), geom_errorbar(data=tgc,aes(ymax=POW+se, ymin=POW-se),width=0.2)))
app <- ShinyDriver$new("../../", seed = 100, shinyOptions = list(display.mode = "normal")) app$snapshotInit("mytest") app$snapshot() app$setInputs(date = "2020-03-13") app$setInputs(date2 = "2020-01-12") app$setInputs(dateRange = c("2019-12-01", "2020-01-10")) app$setInputs(dateRange = c("2019-12-01", "2020-04-24")) app$setInputs(dateRange2 = c("2020-01-02", "2020-01-11")) app$setInputs(dateRange2 = c("2020-01-02", "2020-01-17")) app$snapshot()
/shinycoreci-apps-master/shinycoreci-apps-master/037-date-and-date-range/tests/shinytests/mytest.R
no_license
RohanYashraj/R-Tutorials-Code
R
false
false
450
r
app <- ShinyDriver$new("../../", seed = 100, shinyOptions = list(display.mode = "normal")) app$snapshotInit("mytest") app$snapshot() app$setInputs(date = "2020-03-13") app$setInputs(date2 = "2020-01-12") app$setInputs(dateRange = c("2019-12-01", "2020-01-10")) app$setInputs(dateRange = c("2019-12-01", "2020-04-24")) app$setInputs(dateRange2 = c("2020-01-02", "2020-01-11")) app$setInputs(dateRange2 = c("2020-01-02", "2020-01-17")) app$snapshot()
\name{readLandmarksToArray} \alias{readLandmarksToArray} \title{Reads landmark file(s) into an array} \description{ Reads landmarks from one or more files into an array. The files can be input as a vector or matrix and will be grouped into the returned array according to their input grouping. In every case, the first dimension corresponds to the number of landmarks and the second dimension corresponds to the number of landmark dimensions (2 for 2D landmarks, 3 for 3D landmarks, etc.). } \usage{ readLandmarksToArray(file, na.omit = FALSE, ...) } \arguments{ \item{file}{a vector or matrix of file paths to be read. Each file should contain a single landmark matrix.} \item{na.omit}{whether landmarks with NA values in any file should be omitted.} \item{...}{further arguments to be passed to \code{read.table()}.} } \details{ This function will read landmark matrices from one or more files and use the row names in each matrix to match up corresponding landmarks into a single array, filling in missing landmarks with \code{NA}. The landmark files are read by \code{read.file()} and should thus conform to all requirements of \code{read.file()}. Arguments for \code{read.file()} can be passed through \code{readLandmarksToList()} (e.g. \code{header}, \code{row.names}, etc.). If the landmark matrices do not have row names, this function assumes that landmarks in the same rows correspond and the number of rows in each landmark file should be the same. Since stereo camera setups involve at least two camera views and usually more than one orientation per view, \code{readLandmarksToArray()} can be used to create a single array with multiple orientations and views of the same or overlapping landmark sets. If a vector of file paths is input, \code{readLandmarksToArray()} returns a three-dimensional array in which the first two dimensions are the landmark matrices and the last dimension is the index in the input file vector. If a matrix of file paths is input, \code{readLandmarksToArray()} returns a four-dimensional array in which the first two dimensions are the landmark matrices and the last two dimensions are the indices in the input file matrix (see "Examples"). This function is called by \code{\link{readCheckerboardsToArray}} to read in checkerboard corners with additional arguments available through \code{\link{readCheckerboardsToArray}} for manipulating the corner point order. } \value{a landmark array of three or four dimensions.} \author{Aaron Olsen} \seealso{ \code{\link{readCheckerboardsToArray}}, \code{\link{readLandmarksToList}}, \code{\link{readLandmarksToMatrix}} } \examples{ ## READING IN LANDMARKS WITH ROW NAMES ## ## GET FILE DIRECTORY FOR PACKAGE FILES fdir <- paste0(path.package("StereoMorph"), "/extdata/") ## SET FILES TO LOAD file <- paste0(fdir, "lm_3d_even_a", 1:3, ".txt") ## LOAD FILES INTO AN ARRAY lm.array <- readLandmarksToArray(file=file, header=FALSE, row.names=1) ## VIEW THE FIRST FIVE LANDMARKS lm.array[1:5, , ] ## LOAD FILES INTO AN ARRAY OMITTING ALL NA lm.array <- readLandmarksToArray(file=file, header=FALSE, row.names=1, na.omit=TRUE) ## VIEW ARRAY ## NOTE THERE ARE ONLY THREE LANDMARKS SHARED AMONG FILES THAT ARE NOT NA lm.array ## READING SINGLE VS. VECTOR VS. MATRIX FILE INPUTS ## ## SET FILE PATHS file <- matrix(c(paste0(fdir, "rcta_a", 1:3, "_v1.txt"), paste0(fdir, "rcta_a", 1:3, "_v2.txt")), ncol=2) ## READ A SINGLE FILE PATH ## TREATED AS A VECTOR OF LENGTH ONE readLandmarksToArray(file=file[1, 1]) ## READ A FILE PATH VECTOR readLandmarksToArray(file=file[, 1]) ## READ A FILE PATH MATRIX readLandmarksToArray(file=file) } \keyword{ landmarks } \keyword{ read functions }
/man/readLandmarksToArray.Rd
no_license
hiweller/Eartheaters
R
false
false
3,677
rd
\name{readLandmarksToArray} \alias{readLandmarksToArray} \title{Reads landmark file(s) into an array} \description{ Reads landmarks from one or more files into an array. The files can be input as a vector or matrix and will be grouped into the returned array according to their input grouping. In every case, the first dimension corresponds to the number of landmarks and the second dimension corresponds to the number of landmark dimensions (2 for 2D landmarks, 3 for 3D landmarks, etc.). } \usage{ readLandmarksToArray(file, na.omit = FALSE, ...) } \arguments{ \item{file}{a vector or matrix of file paths to be read. Each file should contain a single landmark matrix.} \item{na.omit}{whether landmarks with NA values in any file should be omitted.} \item{...}{further arguments to be passed to \code{read.table()}.} } \details{ This function will read landmark matrices from one or more files and use the row names in each matrix to match up corresponding landmarks into a single array, filling in missing landmarks with \code{NA}. The landmark files are read by \code{read.file()} and should thus conform to all requirements of \code{read.file()}. Arguments for \code{read.file()} can be passed through \code{readLandmarksToList()} (e.g. \code{header}, \code{row.names}, etc.). If the landmark matrices do not have row names, this function assumes that landmarks in the same rows correspond and the number of rows in each landmark file should be the same. Since stereo camera setups involve at least two camera views and usually more than one orientation per view, \code{readLandmarksToArray()} can be used to create a single array with multiple orientations and views of the same or overlapping landmark sets. If a vector of file paths is input, \code{readLandmarksToArray()} returns a three-dimensional array in which the first two dimensions are the landmark matrices and the last dimension is the index in the input file vector. If a matrix of file paths is input, \code{readLandmarksToArray()} returns a four-dimensional array in which the first two dimensions are the landmark matrices and the last two dimensions are the indices in the input file matrix (see "Examples"). This function is called by \code{\link{readCheckerboardsToArray}} to read in checkerboard corners with additional arguments available through \code{\link{readCheckerboardsToArray}} for manipulating the corner point order. } \value{a landmark array of three or four dimensions.} \author{Aaron Olsen} \seealso{ \code{\link{readCheckerboardsToArray}}, \code{\link{readLandmarksToList}}, \code{\link{readLandmarksToMatrix}} } \examples{ ## READING IN LANDMARKS WITH ROW NAMES ## ## GET FILE DIRECTORY FOR PACKAGE FILES fdir <- paste0(path.package("StereoMorph"), "/extdata/") ## SET FILES TO LOAD file <- paste0(fdir, "lm_3d_even_a", 1:3, ".txt") ## LOAD FILES INTO AN ARRAY lm.array <- readLandmarksToArray(file=file, header=FALSE, row.names=1) ## VIEW THE FIRST FIVE LANDMARKS lm.array[1:5, , ] ## LOAD FILES INTO AN ARRAY OMITTING ALL NA lm.array <- readLandmarksToArray(file=file, header=FALSE, row.names=1, na.omit=TRUE) ## VIEW ARRAY ## NOTE THERE ARE ONLY THREE LANDMARKS SHARED AMONG FILES THAT ARE NOT NA lm.array ## READING SINGLE VS. VECTOR VS. MATRIX FILE INPUTS ## ## SET FILE PATHS file <- matrix(c(paste0(fdir, "rcta_a", 1:3, "_v1.txt"), paste0(fdir, "rcta_a", 1:3, "_v2.txt")), ncol=2) ## READ A SINGLE FILE PATH ## TREATED AS A VECTOR OF LENGTH ONE readLandmarksToArray(file=file[1, 1]) ## READ A FILE PATH VECTOR readLandmarksToArray(file=file[, 1]) ## READ A FILE PATH MATRIX readLandmarksToArray(file=file) } \keyword{ landmarks } \keyword{ read functions }
library(shiny) library(rhandsontable) library(ggplot2) library(plotly) library(RColorBrewer) library(broom) library(forecast) library(lubridate) library(zoo) library(dplyr) library(plyr) pdf(NULL) ui <- shinyUI(fluidPage( titlePanel("easy analytics"), tabsetPanel( tabPanel("Upload File", titlePanel("Uploading Files"), sidebarLayout( sidebarPanel( fileInput('file1', 'Choose CSV File', accept=c('text/csv', 'text/comma-separated-values,text/plain', '.csv')), downloadButton('downloadtemplate', 'Download template'), # added interface for uploading / downloading data from # http://shiny.rstudio.com/gallery/file-upload.html tags$br(), checkboxInput('header', 'Header', TRUE), radioButtons('sep', 'Separator', c(Comma=',', Semicolon=';', Tab='\t'), ','), radioButtons('quote', 'Quote', c(None='', 'Double Quote'='"', 'Single Quote'="'"), '"') ), mainPanel( textOutput(outputId = "inst", container = div), tableOutput('contents') ) ) ), tabPanel("Time Series", pageWithSidebar( headerPanel('Time Series'), sidebarPanel( # "Empty inputs" - they will be updated after the data is uploaded selectInput('xcol', 'Date Variable', ""), selectInput('ycol', 'Y Variable', "", selected = ""), selectInput('zcol', "Color Variable", "", selected = ""), numericInput(inputId = "date_breaks", label = "Date breaks", value = 250) ), mainPanel( textOutput(outputId = "instTS", container = div), plotlyOutput('MyPlot') ) ) ), tabPanel("3d scatter plot", pageWithSidebar( headerPanel('3d scatter'), sidebarPanel( # "Empty inputs" - they will be updated after the data is uploaded selectInput('xcol2', 'Day or Hour Variable', ""), selectInput('ycol2', 'Hour or Day Variable', "", selected = ""), selectInput('zcol2', "kW Variable", "", selected = ""), selectInput('qcol2', "color Variable", "", selected = "") ), mainPanel( textOutput(outputId = "inst3d", container = div), plotlyOutput('MyPlot2') ) ) ), tabPanel("Heat Map plot", pageWithSidebar( headerPanel('Heat Map'), sidebarPanel( # "Empty inputs" - they will be updated after the data is uploaded selectInput('xcol3', 'Date Variable', ""), selectInput('ycol3', 'Hour Variable', "", selected = ""), selectInput('zcol3', "Color Variable", "", selected = "") ), mainPanel( textOutput(outputId = "heat map", container = div), plotlyOutput('MyPlot3') ) ) ), tabPanel("Reg. analysis", pageWithSidebar( headerPanel('OAT regression'), sidebarPanel( # "Empty inputs" - they will be updated after the data is uploaded selectInput('xcol4', 'OAT Variable', ""), selectInput('ycol4', 'kW Variable', "", selected = ""), selectInput('zcol4', 'Date Variable', "", selected = ""), numericInput(inputId = "n", label = "number of scheduled periods", value = 1), numericInput(inputId = "p", label = "number of segments", value = 4), numericInput(inputId = "m", label = "interval in minutes", value = 15), actionButton(inputId = "clicks", label = "Click to run model"), tableOutput("statsable") ), mainPanel( textOutput(outputId = "instoat", container = div), rHandsontableOutput('hot'), plotlyOutput('MyPlot4'), plotlyOutput('MyPlot5') ) ) ), tabPanel("Normalize-it", pageWithSidebar( headerPanel('Let the Good Times Role'), sidebarPanel( numericInput(inputId = "station", label = "Station ID", value = 690150), htmlOutput("mySite"), numericInput(inputId = "psave", label = "estimated % Savings", value = .1), numericInput(inputId = "dur", label = "number of M&V measurements", value = 8640), actionButton(inputId = "clicks2", label = "Click to run model"), tableOutput("statsable2") ), mainPanel( textOutput(outputId = "instnorm", container = div), rHandsontableOutput('hot2'), plotlyOutput('MyPlot6'), plotlyOutput('MyPlot7') ) ) ) ) ))
/ui.R
no_license
jlonergan35/regression
R
false
false
6,225
r
library(shiny) library(rhandsontable) library(ggplot2) library(plotly) library(RColorBrewer) library(broom) library(forecast) library(lubridate) library(zoo) library(dplyr) library(plyr) pdf(NULL) ui <- shinyUI(fluidPage( titlePanel("easy analytics"), tabsetPanel( tabPanel("Upload File", titlePanel("Uploading Files"), sidebarLayout( sidebarPanel( fileInput('file1', 'Choose CSV File', accept=c('text/csv', 'text/comma-separated-values,text/plain', '.csv')), downloadButton('downloadtemplate', 'Download template'), # added interface for uploading / downloading data from # http://shiny.rstudio.com/gallery/file-upload.html tags$br(), checkboxInput('header', 'Header', TRUE), radioButtons('sep', 'Separator', c(Comma=',', Semicolon=';', Tab='\t'), ','), radioButtons('quote', 'Quote', c(None='', 'Double Quote'='"', 'Single Quote'="'"), '"') ), mainPanel( textOutput(outputId = "inst", container = div), tableOutput('contents') ) ) ), tabPanel("Time Series", pageWithSidebar( headerPanel('Time Series'), sidebarPanel( # "Empty inputs" - they will be updated after the data is uploaded selectInput('xcol', 'Date Variable', ""), selectInput('ycol', 'Y Variable', "", selected = ""), selectInput('zcol', "Color Variable", "", selected = ""), numericInput(inputId = "date_breaks", label = "Date breaks", value = 250) ), mainPanel( textOutput(outputId = "instTS", container = div), plotlyOutput('MyPlot') ) ) ), tabPanel("3d scatter plot", pageWithSidebar( headerPanel('3d scatter'), sidebarPanel( # "Empty inputs" - they will be updated after the data is uploaded selectInput('xcol2', 'Day or Hour Variable', ""), selectInput('ycol2', 'Hour or Day Variable', "", selected = ""), selectInput('zcol2', "kW Variable", "", selected = ""), selectInput('qcol2', "color Variable", "", selected = "") ), mainPanel( textOutput(outputId = "inst3d", container = div), plotlyOutput('MyPlot2') ) ) ), tabPanel("Heat Map plot", pageWithSidebar( headerPanel('Heat Map'), sidebarPanel( # "Empty inputs" - they will be updated after the data is uploaded selectInput('xcol3', 'Date Variable', ""), selectInput('ycol3', 'Hour Variable', "", selected = ""), selectInput('zcol3', "Color Variable", "", selected = "") ), mainPanel( textOutput(outputId = "heat map", container = div), plotlyOutput('MyPlot3') ) ) ), tabPanel("Reg. analysis", pageWithSidebar( headerPanel('OAT regression'), sidebarPanel( # "Empty inputs" - they will be updated after the data is uploaded selectInput('xcol4', 'OAT Variable', ""), selectInput('ycol4', 'kW Variable', "", selected = ""), selectInput('zcol4', 'Date Variable', "", selected = ""), numericInput(inputId = "n", label = "number of scheduled periods", value = 1), numericInput(inputId = "p", label = "number of segments", value = 4), numericInput(inputId = "m", label = "interval in minutes", value = 15), actionButton(inputId = "clicks", label = "Click to run model"), tableOutput("statsable") ), mainPanel( textOutput(outputId = "instoat", container = div), rHandsontableOutput('hot'), plotlyOutput('MyPlot4'), plotlyOutput('MyPlot5') ) ) ), tabPanel("Normalize-it", pageWithSidebar( headerPanel('Let the Good Times Role'), sidebarPanel( numericInput(inputId = "station", label = "Station ID", value = 690150), htmlOutput("mySite"), numericInput(inputId = "psave", label = "estimated % Savings", value = .1), numericInput(inputId = "dur", label = "number of M&V measurements", value = 8640), actionButton(inputId = "clicks2", label = "Click to run model"), tableOutput("statsable2") ), mainPanel( textOutput(outputId = "instnorm", container = div), rHandsontableOutput('hot2'), plotlyOutput('MyPlot6'), plotlyOutput('MyPlot7') ) ) ) ) ))
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/InsuranceParameters.R \name{costs.scaleAlpha} \alias{costs.scaleAlpha} \title{Helper function to modify alpha costs of an insurance contract individually} \usage{ costs.scaleAlpha(scale) } \arguments{ \item{scale}{The scale for alpha / Zillmer cost} } \description{ Returns a function that modifies alpha (and Zillmer) costs by the given scale, but otherwise uses the full costs defined by the Costs parameter. } \details{ This function can be set as adjustCosts or adjustMinCosts hook parameters for a tariff or contract and can be used to apply cost adjustments on a per-contract basis. }
/man/costs.scaleAlpha.Rd
no_license
kainhofer/r-life-insurance-contracts
R
false
true
670
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/InsuranceParameters.R \name{costs.scaleAlpha} \alias{costs.scaleAlpha} \title{Helper function to modify alpha costs of an insurance contract individually} \usage{ costs.scaleAlpha(scale) } \arguments{ \item{scale}{The scale for alpha / Zillmer cost} } \description{ Returns a function that modifies alpha (and Zillmer) costs by the given scale, but otherwise uses the full costs defined by the Costs parameter. } \details{ This function can be set as adjustCosts or adjustMinCosts hook parameters for a tariff or contract and can be used to apply cost adjustments on a per-contract basis. }
#' @rdname ctp #' @importFrom hypergeo complex_gamma #' @export #' #' @examples #' # Examples for the function qctp #' qctp(0.5,1,2,3) #' qctp(c(.8,.9),1,2,3) #' qctp <- function(p, a, b, gamma, lower.tail = TRUE ){ if ( mode(c(p,a,b,gamma)) != "numeric") stop( "non-numeric argument to mathematical function") if( (gamma <= 2 * a) || (gamma <= 0) ) stop("gamma must be greater than max(0,2a)") icomplex <- sqrt(as.complex(-1)) auxP=p[p<1 & p>0] if (length(auxP)>0) if (lower.tail){ maxP <- max(auxP) }else{ maxP <- 1-min(auxP) } else maxP=0 n=length(p) result<-vector(mode="numeric",length=n) lf0 <- 2 * Re(complex_gamma(gamma - a + b * icomplex, log = TRUE)) - lgamma(gamma) - lgamma(gamma - 2 * a) pmfAux<-exp(lf0) i=1 Fd <-c(pmfAux) #Generating distribution function while( Fd[[i]] < maxP ){ pmfAux <- exp(log(pmfAux)+log(((a+i-1)^2+b^2))-log((gamma+i-1))-log(i)) Fd <- c( Fd, Fd[[i]] + pmfAux ) i <- i + 1 } #Searching values for (i in 1:n){ pMin=1 pMax=length(Fd) if (! lower.tail){ p[i]<-1-p[i] } if (p[i]>1 || p[i]<0){ warning( paste ("p[[", i, "]] must be a probability", sep = "")) result[[i]]=NaN }else if (p[i]==1){ result[[i]]=Inf }else{ while (Fd[pMin] < p[i]){ mitad = floor ((pMin + pMax)/2) if (Fd[mitad] <= p[i] && Fd[mitad+1] >= p[i]) pMin = mitad + 1 else if (Fd[mitad] <= p[i]) pMin = mitad else pMax = mitad } result[[i]]=pMin-1 } } return (result) } #' @rdname cbp #' @importFrom hypergeo complex_gamma #' @export #' #' @examples #' # Examples for the function qcbp #' qcbp(0.5,2,3) #' qcbp(c(.8,.9),2,3) #' qcbp <- function(p, b, gamma, lower.tail = TRUE) { if ( mode(c(p,b,gamma)) != "numeric") stop( "non-numeric argument to mathematical function") if( gamma <= 0 ) stop("gamma must be greater than 0") icomplex <- sqrt(as.complex(-1)) auxP=p[p<1 & p>0] if (length(auxP)>0) if (lower.tail){ maxP <- max(auxP) }else{ maxP <- 1-min(auxP) } else maxP=0 n=length(p) result<-vector(mode="numeric",length=n) lf0 <- 2 * Re(complex_gamma(gamma + b * icomplex, log = TRUE)) - lgamma(gamma) - lgamma(gamma ) pmfAux<-exp(lf0) i=1 Fd <-c(pmfAux) #Generating distribution function while( Fd[[i]] < maxP ){ pmfAux <- exp(log(pmfAux)+log(((i-1)^2+b^2))-log((gamma+i-1))-log(i)) Fd <- c( Fd, Fd[[i]] + pmfAux ) i <- i + 1 } #Searching values for (i in 1:n){ pMin=1 pMax=length(Fd) if (! lower.tail){ p[i]<-1-p[i] } if (p[i]>1 || p[i]<0){ warning( paste ("p[[", i, "]] must be a probability", sep = "")) result[[i]]=NaN }else if (p[i]==1){ result[[i]]=Inf }else{ while (Fd[pMin] < p[i]){ mitad = floor ((pMin + pMax)/2) if (Fd[mitad] <= p[i] && Fd[mitad+1] >= p[i]) pMin = mitad + 1 else if (Fd[mitad] <= p[i]) pMin = mitad else pMax = mitad } result[[i]]=pMin-1 } } return (result) } #' @rdname ebw #' @importFrom hypergeo complex_gamma #' @export #' #' #' @examples #' # Examples for the function qebw #' qebw(0.5,-2.1,gamma=0.1) #' qebw(c(.8,.9),-2.1,gamma=0.1) #' qebw(0.5,2,rho=5) #' qebw(c(.8,.9),2,rho=5) #' qebw <- function(p,alpha,gamma,rho, lower.tail = TRUE ) { if (!missing(gamma) & !missing(rho)) stop("Specify only 'gamma' or 'rho'") if (missing(gamma) & missing(rho)) stop("Specify 'gamma' or 'rho'") if ( !((missing(rho) && (mode(c(p,alpha,gamma)) == "numeric")) | (missing(gamma) && (mode(c(p,alpha,rho)) == "numeric")))) stop( "non-numeric argument to mathematical function" ) if (!missing(gamma)){ if (alpha>0){ warning("The usual parametrization when alpha>0 is ('alpha','rho')") if (gamma <= 2*alpha) stop("gamma must be greater than 2a") } else{ if (gamma <= 0) stop("gamma must be positive") } } if (!missing(rho)){ if (alpha<0) stop("In the parametrization ('alpha','rho'), 'alpha' must be positive") if (rho<=0) stop (("rho must be positive") ) } if (alpha>0 && !missing(rho)){ auxgamma=2*alpha+rho }else{ auxgamma=gamma } auxP=p[p<1 & p>0] if (length(auxP)>0) if (lower.tail){ maxP <- max(auxP) }else{ maxP <- 1-min(auxP) } else maxP=0 n=length(p) result<-vector(mode="numeric",length=n) lf0 <- 2 * lgamma(auxgamma-alpha)-lgamma(auxgamma-2*alpha)-lgamma(auxgamma) pmfAux<-exp(lf0) i=1 Fd <-c(pmfAux) #Generating distribution function while( Fd[[i]] < maxP ){ pmfAux <- exp(log(pmfAux)+log((alpha+i-1)^2)-log(auxgamma+i-1)-log(i)) #pmfAux <- pmfAux * (alpha+i-1)^2 / ((auxgamma+i-1) *i) Fd <- c( Fd, Fd[[i]] + pmfAux ) i <- i + 1 } #Searching values for (i in 1:n){ pMin=1 pMax=length(Fd) if (! lower.tail){ p[i]<-1-p[i] } if (p[i]>1 || p[i]<0){ warning( paste ("p[[", i, "]] must be a probability", sep = "")) result[[i]]=NaN }else if (p[i]==1){ result[[i]]=Inf }else{ while (Fd[pMin] < p[i]){ mitad = floor ((pMin + pMax)/2) if (Fd[mitad] <= p[i] && Fd[mitad+1] >= p[i]) pMin = mitad + 1 else if (Fd[mitad] <= p[i]) pMin = mitad else pMax = mitad } result[[i]]=pMin-1 } } return (result) }
/R/qctp.R
no_license
ujaen-statistics/cpd
R
false
false
5,852
r
#' @rdname ctp #' @importFrom hypergeo complex_gamma #' @export #' #' @examples #' # Examples for the function qctp #' qctp(0.5,1,2,3) #' qctp(c(.8,.9),1,2,3) #' qctp <- function(p, a, b, gamma, lower.tail = TRUE ){ if ( mode(c(p,a,b,gamma)) != "numeric") stop( "non-numeric argument to mathematical function") if( (gamma <= 2 * a) || (gamma <= 0) ) stop("gamma must be greater than max(0,2a)") icomplex <- sqrt(as.complex(-1)) auxP=p[p<1 & p>0] if (length(auxP)>0) if (lower.tail){ maxP <- max(auxP) }else{ maxP <- 1-min(auxP) } else maxP=0 n=length(p) result<-vector(mode="numeric",length=n) lf0 <- 2 * Re(complex_gamma(gamma - a + b * icomplex, log = TRUE)) - lgamma(gamma) - lgamma(gamma - 2 * a) pmfAux<-exp(lf0) i=1 Fd <-c(pmfAux) #Generating distribution function while( Fd[[i]] < maxP ){ pmfAux <- exp(log(pmfAux)+log(((a+i-1)^2+b^2))-log((gamma+i-1))-log(i)) Fd <- c( Fd, Fd[[i]] + pmfAux ) i <- i + 1 } #Searching values for (i in 1:n){ pMin=1 pMax=length(Fd) if (! lower.tail){ p[i]<-1-p[i] } if (p[i]>1 || p[i]<0){ warning( paste ("p[[", i, "]] must be a probability", sep = "")) result[[i]]=NaN }else if (p[i]==1){ result[[i]]=Inf }else{ while (Fd[pMin] < p[i]){ mitad = floor ((pMin + pMax)/2) if (Fd[mitad] <= p[i] && Fd[mitad+1] >= p[i]) pMin = mitad + 1 else if (Fd[mitad] <= p[i]) pMin = mitad else pMax = mitad } result[[i]]=pMin-1 } } return (result) } #' @rdname cbp #' @importFrom hypergeo complex_gamma #' @export #' #' @examples #' # Examples for the function qcbp #' qcbp(0.5,2,3) #' qcbp(c(.8,.9),2,3) #' qcbp <- function(p, b, gamma, lower.tail = TRUE) { if ( mode(c(p,b,gamma)) != "numeric") stop( "non-numeric argument to mathematical function") if( gamma <= 0 ) stop("gamma must be greater than 0") icomplex <- sqrt(as.complex(-1)) auxP=p[p<1 & p>0] if (length(auxP)>0) if (lower.tail){ maxP <- max(auxP) }else{ maxP <- 1-min(auxP) } else maxP=0 n=length(p) result<-vector(mode="numeric",length=n) lf0 <- 2 * Re(complex_gamma(gamma + b * icomplex, log = TRUE)) - lgamma(gamma) - lgamma(gamma ) pmfAux<-exp(lf0) i=1 Fd <-c(pmfAux) #Generating distribution function while( Fd[[i]] < maxP ){ pmfAux <- exp(log(pmfAux)+log(((i-1)^2+b^2))-log((gamma+i-1))-log(i)) Fd <- c( Fd, Fd[[i]] + pmfAux ) i <- i + 1 } #Searching values for (i in 1:n){ pMin=1 pMax=length(Fd) if (! lower.tail){ p[i]<-1-p[i] } if (p[i]>1 || p[i]<0){ warning( paste ("p[[", i, "]] must be a probability", sep = "")) result[[i]]=NaN }else if (p[i]==1){ result[[i]]=Inf }else{ while (Fd[pMin] < p[i]){ mitad = floor ((pMin + pMax)/2) if (Fd[mitad] <= p[i] && Fd[mitad+1] >= p[i]) pMin = mitad + 1 else if (Fd[mitad] <= p[i]) pMin = mitad else pMax = mitad } result[[i]]=pMin-1 } } return (result) } #' @rdname ebw #' @importFrom hypergeo complex_gamma #' @export #' #' #' @examples #' # Examples for the function qebw #' qebw(0.5,-2.1,gamma=0.1) #' qebw(c(.8,.9),-2.1,gamma=0.1) #' qebw(0.5,2,rho=5) #' qebw(c(.8,.9),2,rho=5) #' qebw <- function(p,alpha,gamma,rho, lower.tail = TRUE ) { if (!missing(gamma) & !missing(rho)) stop("Specify only 'gamma' or 'rho'") if (missing(gamma) & missing(rho)) stop("Specify 'gamma' or 'rho'") if ( !((missing(rho) && (mode(c(p,alpha,gamma)) == "numeric")) | (missing(gamma) && (mode(c(p,alpha,rho)) == "numeric")))) stop( "non-numeric argument to mathematical function" ) if (!missing(gamma)){ if (alpha>0){ warning("The usual parametrization when alpha>0 is ('alpha','rho')") if (gamma <= 2*alpha) stop("gamma must be greater than 2a") } else{ if (gamma <= 0) stop("gamma must be positive") } } if (!missing(rho)){ if (alpha<0) stop("In the parametrization ('alpha','rho'), 'alpha' must be positive") if (rho<=0) stop (("rho must be positive") ) } if (alpha>0 && !missing(rho)){ auxgamma=2*alpha+rho }else{ auxgamma=gamma } auxP=p[p<1 & p>0] if (length(auxP)>0) if (lower.tail){ maxP <- max(auxP) }else{ maxP <- 1-min(auxP) } else maxP=0 n=length(p) result<-vector(mode="numeric",length=n) lf0 <- 2 * lgamma(auxgamma-alpha)-lgamma(auxgamma-2*alpha)-lgamma(auxgamma) pmfAux<-exp(lf0) i=1 Fd <-c(pmfAux) #Generating distribution function while( Fd[[i]] < maxP ){ pmfAux <- exp(log(pmfAux)+log((alpha+i-1)^2)-log(auxgamma+i-1)-log(i)) #pmfAux <- pmfAux * (alpha+i-1)^2 / ((auxgamma+i-1) *i) Fd <- c( Fd, Fd[[i]] + pmfAux ) i <- i + 1 } #Searching values for (i in 1:n){ pMin=1 pMax=length(Fd) if (! lower.tail){ p[i]<-1-p[i] } if (p[i]>1 || p[i]<0){ warning( paste ("p[[", i, "]] must be a probability", sep = "")) result[[i]]=NaN }else if (p[i]==1){ result[[i]]=Inf }else{ while (Fd[pMin] < p[i]){ mitad = floor ((pMin + pMax)/2) if (Fd[mitad] <= p[i] && Fd[mitad+1] >= p[i]) pMin = mitad + 1 else if (Fd[mitad] <= p[i]) pMin = mitad else pMax = mitad } result[[i]]=pMin-1 } } return (result) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/interaction_score.R \name{load_interaction_score_output} \alias{load_interaction_score_output} \title{Loads output of python script for interaction score calculation} \usage{ load_interaction_score_output(loading_path) } \arguments{ \item{loading_path}{Directory to load from} } \value{ A named list (elements `group1` and `group2`). Each element contains an iGraph object containing the interaction score as edge attribute. } \description{ (INTERNAL) Loads data generated by \code{\link{calculate_interaction_score}}. Output files are graphs in gml format for both groups. }
/man/load_interaction_score_output.Rd
permissive
molnet-org/molnet
R
false
true
654
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/interaction_score.R \name{load_interaction_score_output} \alias{load_interaction_score_output} \title{Loads output of python script for interaction score calculation} \usage{ load_interaction_score_output(loading_path) } \arguments{ \item{loading_path}{Directory to load from} } \value{ A named list (elements `group1` and `group2`). Each element contains an iGraph object containing the interaction score as edge attribute. } \description{ (INTERNAL) Loads data generated by \code{\link{calculate_interaction_score}}. Output files are graphs in gml format for both groups. }
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/4_NetworkModels.R \name{ClosedJacksonNetwork} \alias{ClosedJacksonNetwork} \title{Obtains the main characteristics of a Closed Jackson Network model} \usage{ ClosedJacksonNetwork(mu = c(5, 5, 10, 15), s = c(2, 2, 1, 1), p = matrix(c(0.25, 0.15, 0.2, 0.4, 0.15, 0.35, 0.2, 0.3, 0.5, 0.25, 0.15, 0.1, 0.4, 0.3, 0.25, 0.05), 4, byrow = TRUE), n = 3) } \arguments{ \item{mu}{Vector of mean service rates} \item{s}{Vector of servers at each node} \item{p}{Routing matrix, where \ifelse{latex}{\eqn{p_{ij}}}{\out{<i>p<sub>ij</sub></i>}} is the routing probability from node i to node j} \item{n}{Number of customers in the network} } \value{ Returns the next information of a Closed Jackson Network model: \item{rho}{Traffic intensity: \eqn{\rho}} \item{l}{Number of customers in the system: \eqn{L}} \item{lq}{Number of customers in the queue: \ifelse{latex}{\eqn{L_{q}}}{\out{<i>L<sub>q</sub></i>}}} \item{w}{Waiting time in the system: \eqn{W}} \item{wq}{Waiting time in the queue: \ifelse{latex}{\eqn{W_{q}}}{\out{<i>W<sub>q</sub></i>}}} \item{eff}{System efficiency: \ifelse{latex}{\eqn{Eff = W/(W-W_q)}}{\out{<i>Eff = W/(W-W<sub>q</sub></i>)}}} } \description{ Obtains the main characteristics of a Closed Jackson Network model } \examples{ #An system have 4 workstations interconnected. #For the control of the system there is three #tasks in continuous execution in some of the #workstations. Once the task ends, this creates #a copy of itself and sends it tu execute in #some of the other three, following the next #probabilities table # Origin-destiny 1 2 3 4 # 1 0.25 0.15 0.20 0.40 # 2 0.15 0.35 0.20 0.30 # 3 0.50 0.25 0.15 0.10 # 4 0.40 0.30 0.25 0.05 #The servers 1 and 2 have two processors and #each of one have a process time with exponential #distribution and capacitiy of 5 tasks for #minute. #The servers 3 and 4 have a single processor #and they can serve 10 and 15 task for minute #respectively. ClosedJacksonNetwork(mu=c(5,5,10,15), s=c(2,2,1,1), p=matrix(c(0.25, 0.15, 0.20, 0.40, 0.15, 0.35, 0.20, 0.30, 0.50, 0.25, 0.15, 0.10, 0.40, 0.30, 0.25, 0.05), 4, byrow = TRUE), n = 3) } \seealso{ Other AnaliticalModels: \code{\link{M_M_1_INF_H}}; \code{\link{M_M_1_K}}; \code{\link{M_M_1}}; \code{\link{M_M_INF}}; \code{\link{M_M_S_INF_H_Y}}; \code{\link{M_M_S_INF_H}}; \code{\link{M_M_S_K}}; \code{\link{M_M_S}}; \code{\link{OpenJacksonNetwork}} }
/man/ClosedJacksonNetwork.Rd
no_license
ghobbs9495/arqas
R
false
false
2,790
rd
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/4_NetworkModels.R \name{ClosedJacksonNetwork} \alias{ClosedJacksonNetwork} \title{Obtains the main characteristics of a Closed Jackson Network model} \usage{ ClosedJacksonNetwork(mu = c(5, 5, 10, 15), s = c(2, 2, 1, 1), p = matrix(c(0.25, 0.15, 0.2, 0.4, 0.15, 0.35, 0.2, 0.3, 0.5, 0.25, 0.15, 0.1, 0.4, 0.3, 0.25, 0.05), 4, byrow = TRUE), n = 3) } \arguments{ \item{mu}{Vector of mean service rates} \item{s}{Vector of servers at each node} \item{p}{Routing matrix, where \ifelse{latex}{\eqn{p_{ij}}}{\out{<i>p<sub>ij</sub></i>}} is the routing probability from node i to node j} \item{n}{Number of customers in the network} } \value{ Returns the next information of a Closed Jackson Network model: \item{rho}{Traffic intensity: \eqn{\rho}} \item{l}{Number of customers in the system: \eqn{L}} \item{lq}{Number of customers in the queue: \ifelse{latex}{\eqn{L_{q}}}{\out{<i>L<sub>q</sub></i>}}} \item{w}{Waiting time in the system: \eqn{W}} \item{wq}{Waiting time in the queue: \ifelse{latex}{\eqn{W_{q}}}{\out{<i>W<sub>q</sub></i>}}} \item{eff}{System efficiency: \ifelse{latex}{\eqn{Eff = W/(W-W_q)}}{\out{<i>Eff = W/(W-W<sub>q</sub></i>)}}} } \description{ Obtains the main characteristics of a Closed Jackson Network model } \examples{ #An system have 4 workstations interconnected. #For the control of the system there is three #tasks in continuous execution in some of the #workstations. Once the task ends, this creates #a copy of itself and sends it tu execute in #some of the other three, following the next #probabilities table # Origin-destiny 1 2 3 4 # 1 0.25 0.15 0.20 0.40 # 2 0.15 0.35 0.20 0.30 # 3 0.50 0.25 0.15 0.10 # 4 0.40 0.30 0.25 0.05 #The servers 1 and 2 have two processors and #each of one have a process time with exponential #distribution and capacitiy of 5 tasks for #minute. #The servers 3 and 4 have a single processor #and they can serve 10 and 15 task for minute #respectively. ClosedJacksonNetwork(mu=c(5,5,10,15), s=c(2,2,1,1), p=matrix(c(0.25, 0.15, 0.20, 0.40, 0.15, 0.35, 0.20, 0.30, 0.50, 0.25, 0.15, 0.10, 0.40, 0.30, 0.25, 0.05), 4, byrow = TRUE), n = 3) } \seealso{ Other AnaliticalModels: \code{\link{M_M_1_INF_H}}; \code{\link{M_M_1_K}}; \code{\link{M_M_1}}; \code{\link{M_M_INF}}; \code{\link{M_M_S_INF_H_Y}}; \code{\link{M_M_S_INF_H}}; \code{\link{M_M_S_K}}; \code{\link{M_M_S}}; \code{\link{OpenJacksonNetwork}} }
library(JGR) ### Name: jgr.removeMenu ### Title: remove JGR Console menus ### Aliases: jgr.removeMenu jgr.removeMenuItem ### Keywords: programming ### ** Examples jgr.addMenu("added menu") jgr.addMenuItem("added menu", "print 1","print(1)") jgr.insertMenuItem("added menu","print 1 as if entered in console","print(1)",1,FALSE) jgr.addMenuSeparator("added menu") jgr.addSubMenu("added menu","sub menu",c("a","b","c"),c("print('a')","print('b')","print('c')")) jgr.removeMenuItem("added menu",1) jgr.removeMenu(length(jgr.getMenuNames()))
/data/genthat_extracted_code/JGR/examples/jgr.removeMenu.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
546
r
library(JGR) ### Name: jgr.removeMenu ### Title: remove JGR Console menus ### Aliases: jgr.removeMenu jgr.removeMenuItem ### Keywords: programming ### ** Examples jgr.addMenu("added menu") jgr.addMenuItem("added menu", "print 1","print(1)") jgr.insertMenuItem("added menu","print 1 as if entered in console","print(1)",1,FALSE) jgr.addMenuSeparator("added menu") jgr.addSubMenu("added menu","sub menu",c("a","b","c"),c("print('a')","print('b')","print('c')")) jgr.removeMenuItem("added menu",1) jgr.removeMenu(length(jgr.getMenuNames()))
#Reading all the Test files subject_test <- read.table("./test/subject_test.txt") X_test <- read.table("./test/X_test.txt") y_test <- read.table("./test/y_test.txt") #Reading all the Train files subject_train <- read.table("./train/subject_train.txt") X_train <- read.table("./train/X_train.txt") y_train <- read.table("./train/y_train.txt") #Reading feature.txt file and assigning the Column V2 value of feature.txt to the Column names of X_test and X_train files features <- read.table("features.txt") colnames(X_test) <- features$V2 colnames(X_train) <- features$V2 #Reading activity_labels.txt & assigning names to the activity_names data frame activity_names <- read.table("activity_labels.txt") colnames(activity_names) <- c("V1","Activity_Names") #Assigning names to the y_test and y_train data frames colnames(y_test) <- c("Activity_Labels") colnames(y_train) <- c("Activity_Labels") #Assigning names to the subject_test and subject_train data frames colnames(subject_test) <- c("Subjects") colnames(subject_train) <- c("Subjects") #Column binding the X_train(7352x561) and y_train(7352x1) tables train_merged_data <- cbind(X_train,y_train) #Column binding the train_merged_data(7352x562) and subject_train(7352x1) tables train_merged_data <- cbind(train_merged_data,subject_train) #Subselect variables that either have "mean" or "std" names in the features data frame in train_merged_data data frame train_merged_data_subset_mean_std <- train_merged_data[,grepl("-mean()", features$V2) | grepl("-std()", features$V2)] #Repeating the same process that was done in train data set to test data set test_merged_data <- cbind(X_test,y_test) #Column binding test_merged_data <- cbind(test_merged_data,subject_test)#Column binding test_merged_data_subset_mean_std <- test_merged_data[,grepl("-mean()", features$V2) | grepl("-std()", features$V2)]#Subselecting "mean" and "std" #Row Binding both the train and test data sets test_train_merged_data <- rbind(train_merged_data_subset_mean_std,test_merged_data_subset_mean_std) #Merging test_train_merged_data data set with activity_names set by activity lablels. Since the lables are already binded in the orginal set, sorting that happens after merging will not affect the position of data test_train_merged_data <- merge(test_train_merged_data,activity_names, by.x ="Activity_Labels", by.y = "V1") library(reshape2) library(dplyr) #melting the data set based on measured variables melted_data <- melt(test_train_merged_data, id =c("Subjects","Activity_Names"), measure.vars = filter(features, grepl("-mean()", features$V2) | grepl("-std()", features$V2))$V2) #reording the final data set based on subjects and activity names tidy_data <- dcast(melted_data,Subjects + Activity_Names ~ variable, mean ) write.table(tidy_data, "tidy.txt", row.name=FALSE)
/run_analysis.R
no_license
varoon10688/Cleaning_Data_Course_Project
R
false
false
2,816
r
#Reading all the Test files subject_test <- read.table("./test/subject_test.txt") X_test <- read.table("./test/X_test.txt") y_test <- read.table("./test/y_test.txt") #Reading all the Train files subject_train <- read.table("./train/subject_train.txt") X_train <- read.table("./train/X_train.txt") y_train <- read.table("./train/y_train.txt") #Reading feature.txt file and assigning the Column V2 value of feature.txt to the Column names of X_test and X_train files features <- read.table("features.txt") colnames(X_test) <- features$V2 colnames(X_train) <- features$V2 #Reading activity_labels.txt & assigning names to the activity_names data frame activity_names <- read.table("activity_labels.txt") colnames(activity_names) <- c("V1","Activity_Names") #Assigning names to the y_test and y_train data frames colnames(y_test) <- c("Activity_Labels") colnames(y_train) <- c("Activity_Labels") #Assigning names to the subject_test and subject_train data frames colnames(subject_test) <- c("Subjects") colnames(subject_train) <- c("Subjects") #Column binding the X_train(7352x561) and y_train(7352x1) tables train_merged_data <- cbind(X_train,y_train) #Column binding the train_merged_data(7352x562) and subject_train(7352x1) tables train_merged_data <- cbind(train_merged_data,subject_train) #Subselect variables that either have "mean" or "std" names in the features data frame in train_merged_data data frame train_merged_data_subset_mean_std <- train_merged_data[,grepl("-mean()", features$V2) | grepl("-std()", features$V2)] #Repeating the same process that was done in train data set to test data set test_merged_data <- cbind(X_test,y_test) #Column binding test_merged_data <- cbind(test_merged_data,subject_test)#Column binding test_merged_data_subset_mean_std <- test_merged_data[,grepl("-mean()", features$V2) | grepl("-std()", features$V2)]#Subselecting "mean" and "std" #Row Binding both the train and test data sets test_train_merged_data <- rbind(train_merged_data_subset_mean_std,test_merged_data_subset_mean_std) #Merging test_train_merged_data data set with activity_names set by activity lablels. Since the lables are already binded in the orginal set, sorting that happens after merging will not affect the position of data test_train_merged_data <- merge(test_train_merged_data,activity_names, by.x ="Activity_Labels", by.y = "V1") library(reshape2) library(dplyr) #melting the data set based on measured variables melted_data <- melt(test_train_merged_data, id =c("Subjects","Activity_Names"), measure.vars = filter(features, grepl("-mean()", features$V2) | grepl("-std()", features$V2))$V2) #reording the final data set based on subjects and activity names tidy_data <- dcast(melted_data,Subjects + Activity_Names ~ variable, mean ) write.table(tidy_data, "tidy.txt", row.name=FALSE)
library(biggr) test_that("resource_ec2 return the correct class", { expect_equal(class(resource_ec2())[[1]], "boto3.resources.factory.ec2.ServiceResource") })
/tests/testthat/test-resource_ec2.R
no_license
fdrennan/biggr
R
false
false
162
r
library(biggr) test_that("resource_ec2 return the correct class", { expect_equal(class(resource_ec2())[[1]], "boto3.resources.factory.ec2.ServiceResource") })
#bigmac_eg_slr bigmac <- read.csv("Data.csv") lm.fit <- lm(NetHourlyWage ~ BigMacPrice, data = bigmac) summary(lm.fit) x <- bigmac[,2] y <- bigmac[,3] xbar <- mean(x) ybar <- mean(y) ###Computing Estimates #Computing Slope a <- sum((x-xbar)*(y-ybar)) b <- sum((x-xbar)^2) slope = a/b #Computing Intercept intercept <- ybar-(slope*xbar) ###Computing Std.Error yhat <- predict(lm.fit) sse <- sum((y-yhat)^2) #sum of sq.errors or sum of sq. residuals mse <- sse/25 stderror <- sqrt(mse) stderror_var_x <- stderror/(sqrt(sum((x-xbar)^2))) stderror_intercept <- stderror_var_x * sqrt(mean(x^2)) ###Computing t-Value #tValue <- ESTIMATE/STDERROR tval_intercept <- intercept/stderror_intercept tval_var_x <- slope/stderror_var_x tval_intercept; tval_var_x pval_intercept <- pt(q = tval_intercept, n=27, df=1) pval_var_x <- pt(q = tval_var_x, df=25) pval_intercept;pval_var_x qt(p = .102, df = 25) pt(q = 1.697051, df = 25, lower.tail = F)*2 pt(q = -1.697051, df = 25, lower.tail = T)+(1-pt(q = 1.697051, df = 25, lower.tail = T)) pt(q = 5.144, df = 25, lower.tail = F)+(1-pt(q = 5.144, df = 25, lower.tail = T)) pt(q = 5.144, df = 25, lower.tail = F)*2 #Computing fitted values - yhat yhat <- as.numeric(lm.fit$fitted.values) coef(lm.fit)[1]+coef(lm.fit)[2]*0.1449411 #computing rSquared ssx <- sum((x-xbar)^2) ssy <- sum((y-ybar)^2) ssxy <- sum((x-xbar)*(y-ybar)) rSquared = cov((x-xbar),(y-ybar))^2/(var(x)*var(y)) summary(lm.fit)$r.squared rSquared = sum((x-xbar)*(y-ybar))^2/(ssx*ssy) summary(lm.fit)$r.squared ss_tot <- sum((y-ybar)^2) ss_reg <- sum((yhat-ybar)^2) ss_res <- sum((y-yhat)^2) rSquared = 1- (ss_res/ss_tot)
/20170107_Batch25_CSE7202c_Lab01_SimpleLinearRegression_BigMac.R
no_license
nursnaaz/simple-Linear-Regression
R
false
false
1,641
r
#bigmac_eg_slr bigmac <- read.csv("Data.csv") lm.fit <- lm(NetHourlyWage ~ BigMacPrice, data = bigmac) summary(lm.fit) x <- bigmac[,2] y <- bigmac[,3] xbar <- mean(x) ybar <- mean(y) ###Computing Estimates #Computing Slope a <- sum((x-xbar)*(y-ybar)) b <- sum((x-xbar)^2) slope = a/b #Computing Intercept intercept <- ybar-(slope*xbar) ###Computing Std.Error yhat <- predict(lm.fit) sse <- sum((y-yhat)^2) #sum of sq.errors or sum of sq. residuals mse <- sse/25 stderror <- sqrt(mse) stderror_var_x <- stderror/(sqrt(sum((x-xbar)^2))) stderror_intercept <- stderror_var_x * sqrt(mean(x^2)) ###Computing t-Value #tValue <- ESTIMATE/STDERROR tval_intercept <- intercept/stderror_intercept tval_var_x <- slope/stderror_var_x tval_intercept; tval_var_x pval_intercept <- pt(q = tval_intercept, n=27, df=1) pval_var_x <- pt(q = tval_var_x, df=25) pval_intercept;pval_var_x qt(p = .102, df = 25) pt(q = 1.697051, df = 25, lower.tail = F)*2 pt(q = -1.697051, df = 25, lower.tail = T)+(1-pt(q = 1.697051, df = 25, lower.tail = T)) pt(q = 5.144, df = 25, lower.tail = F)+(1-pt(q = 5.144, df = 25, lower.tail = T)) pt(q = 5.144, df = 25, lower.tail = F)*2 #Computing fitted values - yhat yhat <- as.numeric(lm.fit$fitted.values) coef(lm.fit)[1]+coef(lm.fit)[2]*0.1449411 #computing rSquared ssx <- sum((x-xbar)^2) ssy <- sum((y-ybar)^2) ssxy <- sum((x-xbar)*(y-ybar)) rSquared = cov((x-xbar),(y-ybar))^2/(var(x)*var(y)) summary(lm.fit)$r.squared rSquared = sum((x-xbar)*(y-ybar))^2/(ssx*ssy) summary(lm.fit)$r.squared ss_tot <- sum((y-ybar)^2) ss_reg <- sum((yhat-ybar)^2) ss_res <- sum((y-yhat)^2) rSquared = 1- (ss_res/ss_tot)
#Read the dataset data <- read.table("household_power_consumption.txt",sep = ";",header = TRUE) #Filter by desired dates data$Date <- as.Date(data$Date,format="%d/%m/%Y") data <- subset(data,Date == "2007-02-01" | Date == "2007-02-02") #Remove "?" on Global_active_power field data <- subset(data,Global_active_power != "?") #Convert to numeric Global_active_power field data$Global_active_power <- as.numeric(as.character(data$Global_active_power)) #Create final histogram png(file ="plot1.png",width = 480,height = 480) hist(data$Global_active_power,col = "red",xlab = "Global Active Power (kilowatts)",ylab = "Frequency",main = "Global Active Power") dev.off()
/plot1.R
no_license
jjdahdah/ExData_Plotting1
R
false
false
668
r
#Read the dataset data <- read.table("household_power_consumption.txt",sep = ";",header = TRUE) #Filter by desired dates data$Date <- as.Date(data$Date,format="%d/%m/%Y") data <- subset(data,Date == "2007-02-01" | Date == "2007-02-02") #Remove "?" on Global_active_power field data <- subset(data,Global_active_power != "?") #Convert to numeric Global_active_power field data$Global_active_power <- as.numeric(as.character(data$Global_active_power)) #Create final histogram png(file ="plot1.png",width = 480,height = 480) hist(data$Global_active_power,col = "red",xlab = "Global Active Power (kilowatts)",ylab = "Frequency",main = "Global Active Power") dev.off()
library(FlexParamCurve) ### Name: modpar ### Title: Estimate Values to be Used for Fixed FlexParamCurve Parameters ### Aliases: modpar ### ** Examples # estimate fixed parameters use data object posneg.data modpar(posneg.data$age, posneg.data$mass, pn.options = "myoptions") # estimate fixed parameters use data object posneg.data (only first # 4 group levels for example's sake) and specify a fixed hatching # mass for curve optimization using \code{SSposnegRichards} modpar(posneg.data$age, posneg.data$mass, pn.options = "myoptions") subdata <- subset(posneg.data,posneg.data$id == as.character(36) | posneg.data$id == as.character(9) | posneg.data$id == as.character(32) | posneg.data$id == as.character(43)) richardsR22.lis <- nlsList(mass ~ SSposnegRichards(age, Asym = Asym, K = K, Infl = Infl, RAsym = RAsym, Rk = Rk, Ri = Ri, modno = 22, pn.options = "myoptions"), data = subdata) # force an 8 parameter estimate on logistic data modpar(logist.data$age,logist.data$mass,force8par=TRUE, pn.options = "myoptions") # force an 4 parameter model on logistic data modpar(logist.data$age,logist.data$mass,force4par=TRUE, pn.options = "myoptions") # troubleshoot the fit of a model modpar(posneg.data$age,posneg.data$mass,verbose=TRUE, pn.options = "myoptions") # fit a two component model - enter your own data in place of "mydata" # this details an approach but is not run for want of appropriate data # if x of intersection unknown ## Not run: ##D ##D ##D modpar(mydata$x,mydata$y,twocomponent.x=TRUE, pn.options = "myoptions") ##D ##D ##D # if x of intersection = 75 ##D ##D ##D modpar(mydata$x,mydata$y,twocomponent.x=75, pn.options = "myoptions") ##D ##D ##D richardsR1.nls <- nls(y~ SSposnegRichards(x, Asym = Asym, K = K, ##D ##D ##D Infl = Infl, M = M, RAsym = RAsym, Rk = Rk, Ri = Ri, RM = RM, ##D ##D ##D modno = 1, pn.options = "myoptions") ##D ##D ##D , data = mydata) ## End(Not run)
/data/genthat_extracted_code/FlexParamCurve/examples/modpar.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
2,147
r
library(FlexParamCurve) ### Name: modpar ### Title: Estimate Values to be Used for Fixed FlexParamCurve Parameters ### Aliases: modpar ### ** Examples # estimate fixed parameters use data object posneg.data modpar(posneg.data$age, posneg.data$mass, pn.options = "myoptions") # estimate fixed parameters use data object posneg.data (only first # 4 group levels for example's sake) and specify a fixed hatching # mass for curve optimization using \code{SSposnegRichards} modpar(posneg.data$age, posneg.data$mass, pn.options = "myoptions") subdata <- subset(posneg.data,posneg.data$id == as.character(36) | posneg.data$id == as.character(9) | posneg.data$id == as.character(32) | posneg.data$id == as.character(43)) richardsR22.lis <- nlsList(mass ~ SSposnegRichards(age, Asym = Asym, K = K, Infl = Infl, RAsym = RAsym, Rk = Rk, Ri = Ri, modno = 22, pn.options = "myoptions"), data = subdata) # force an 8 parameter estimate on logistic data modpar(logist.data$age,logist.data$mass,force8par=TRUE, pn.options = "myoptions") # force an 4 parameter model on logistic data modpar(logist.data$age,logist.data$mass,force4par=TRUE, pn.options = "myoptions") # troubleshoot the fit of a model modpar(posneg.data$age,posneg.data$mass,verbose=TRUE, pn.options = "myoptions") # fit a two component model - enter your own data in place of "mydata" # this details an approach but is not run for want of appropriate data # if x of intersection unknown ## Not run: ##D ##D ##D modpar(mydata$x,mydata$y,twocomponent.x=TRUE, pn.options = "myoptions") ##D ##D ##D # if x of intersection = 75 ##D ##D ##D modpar(mydata$x,mydata$y,twocomponent.x=75, pn.options = "myoptions") ##D ##D ##D richardsR1.nls <- nls(y~ SSposnegRichards(x, Asym = Asym, K = K, ##D ##D ##D Infl = Infl, M = M, RAsym = RAsym, Rk = Rk, Ri = Ri, RM = RM, ##D ##D ##D modno = 1, pn.options = "myoptions") ##D ##D ##D , data = mydata) ## End(Not run)
library(ape) testtree <- read.tree("10409_0.txt") unrooted_tr <- unroot(testtree) write.tree(unrooted_tr, file="10409_0_unrooted.txt")
/codeml_files/newick_trees_processed/10409_0/rinput.R
no_license
DaniBoo/cyanobacteria_project
R
false
false
137
r
library(ape) testtree <- read.tree("10409_0.txt") unrooted_tr <- unroot(testtree) write.tree(unrooted_tr, file="10409_0_unrooted.txt")
context("iotsitewise") svc <- paws::iotsitewise() test_that("describe_default_encryption_configuration", { expect_error(svc$describe_default_encryption_configuration(), NA) }) test_that("describe_logging_options", { expect_error(svc$describe_logging_options(), NA) }) test_that("list_access_policies", { expect_error(svc$list_access_policies(), NA) }) test_that("list_asset_models", { expect_error(svc$list_asset_models(), NA) }) test_that("list_assets", { expect_error(svc$list_assets(), NA) }) test_that("list_gateways", { expect_error(svc$list_gateways(), NA) }) test_that("list_portals", { expect_error(svc$list_portals(), NA) })
/paws/tests/testthat/test_iotsitewise.R
permissive
fdrennan/paws
R
false
false
656
r
context("iotsitewise") svc <- paws::iotsitewise() test_that("describe_default_encryption_configuration", { expect_error(svc$describe_default_encryption_configuration(), NA) }) test_that("describe_logging_options", { expect_error(svc$describe_logging_options(), NA) }) test_that("list_access_policies", { expect_error(svc$list_access_policies(), NA) }) test_that("list_asset_models", { expect_error(svc$list_asset_models(), NA) }) test_that("list_assets", { expect_error(svc$list_assets(), NA) }) test_that("list_gateways", { expect_error(svc$list_gateways(), NA) }) test_that("list_portals", { expect_error(svc$list_portals(), NA) })
#' @include ISCon.R NULL # PUBLIC ----------------------------------------------------------------------- #' @importFrom jsonlite fromJSON ISCon$set( which = "public", name = "listParticipantGroups", value = function() { private$.assertAllStudyConnection() participantGroupApi <- paste0( self$config$labkey.url.base, "/participant-group", "/Studies", "/browseParticipantGroups.api?", "distinctCatgories=false&", "type=participantGroup&", "includeUnassigned=false&", "includeParticipantIds=false" ) # execute via Rlabkey's standard GET function response <- Rlabkey:::labkey.get(participantGroupApi) # parse JSON response via jsonlite's fromJSON parsing function parsed <- fromJSON(response, simplifyDataFrame = FALSE) # construct a data.table for each group groupsList <- lapply(parsed$groups, function(group) { data.table( group_id = group$id, group_name = group$label, created = as.Date(group$created), subjects = length(group$category$participantIds), studies = length(unique(gsub("SUB\\d+.", "", group$category$participantIds))) ) }) # merge the list to data.table participantGroups <- rbindlist(groupsList) if (nrow(participantGroups) == 0) { warning( "No participant groups found for the current user", immediate. = TRUE ) } else { # set order by id setorder(participantGroups, group_id) setkey(participantGroups, group_id) } participantGroups } ) # Retrieve a dataset by participant group ISCon$set( which = "public", name = "getParticipantData", value = function(group, dataType, original_view = FALSE, reload = FALSE, colFilter = NULL, transformMethod = "none", ...) { private$.assertAllStudyConnection() groupName <- private$.checkParticipantGroup(group) colFilter <- rbind( colFilter, makeFilter( c(paste0("ParticipantId/", groupName), "EQUAL", groupName) ) ) return(self$getDataset( dataType, original_view = original_view, reload = reload, colFilter = colFilter, transformMethod = transformMethod, ... )) } ) ISCon$set( which = "public", name = "listParticipantGEMatrices", value = function(group, verbose = FALSE) { private$.assertAllStudyConnection() groupName <- private$.checkParticipantGroup(group) participantIds <- private$.getParticipantIdsFromGroup(groupName) matrices <- self$listGEMatrices(verbose = verbose, participantIds = participantIds) return(matrices) } ) ISCon$set( which = "public", name = "getParticipantGEMatrix", value = function(group, outputType = "summary", annotation = "latest", reload = FALSE) { private$.assertAllStudyConnection() groupName <- private$.checkParticipantGroup(group) ids <- private$.getParticipantIdsFromGroup(groupName) matNames <- self$listParticipantGEMatrices(groupName)$name message(paste0(length(matNames), " matrices found for ", groupName)) eset <- self$getGEMatrix(matNames, outputType = outputType, annotation = annotation, reload = reload ) return(eset[, eset$participant_id %in% ids]) } ) # PRIVATE ---------------------------------------------------------------------- # Check if all study connection ISCon$set( which = "private", name = ".assertAllStudyConnection", value = function() { if (!identical(self$config$labkey.url.path, "/Studies/")) { stop( "This method only works with connection to all studies. ", 'Create a connection to all studies by `con <- CreateConnection("")`' ) } } ) ISCon$set( which = "private", name = ".checkParticipantGroup", value = function(group) { # Must rerun this to ensure valid groups are only for that user and are not # changed within cache. validGroups <- self$listParticipantGroups() if (is.numeric(group)) { col <- "group_id" groupName <- validGroups$group_name[validGroups$group_id == group] } else if (is.character(group)) { col <- "group_name" groupName <- group } else { stop( "`group` should be a number or string. ", "Try again with valid `group`.", "\n Call `listParticipantGroups()` to see the available groups." ) } if (!(group %in% validGroups[[col]])) { stop( "'", group, "' is not in the set of `", col, "` created by the current user", " on ", self$config$labkey.url.base, "\n Call `listParticipantGroups()` to see the available groups." ) } return(groupName) } ) # Return a vector of participantIDs for a group ISCon$set( which = "private", name = ".getParticipantIdsFromGroup", value = function(group) { private$.assertAllStudyConnection() # ---- Get groups and associated participantIDs ---- participantGroupApi <- paste0( self$config$labkey.url.base, "/participant-group", "/Studies", "/browseParticipantGroups.api?", "distinctCatgories=false&", "type=participantGroup&", "includeUnassigned=false&", "includeParticipantIds=false" ) # execute via Rlabkey's standard GET function response <- Rlabkey:::labkey.get(participantGroupApi) # parse JSON response via jsonlite's fromJSON parsing function parsed <- jsonlite::fromJSON(response, simplifyDataFrame = FALSE) # Transform parsed json into a data.table with a row for each group # and a column containing a vector of relevant subjectids groupsList <- lapply(parsed$groups, function(group) { data.table( group_id = group$id, group_name = group$label, subjects = list(group$category$participantIds) ) }) validGroups <- rbindlist(groupsList) # ---- Get groupName ---- if (is.numeric(group)) { col <- "group_id" groupName <- validGroups$group_name[validGroups$group_id == group] } else if (is.character(group)) { col <- "group_name" groupName <- group } else { stop( "`group` should be a number or string. ", "Try again with valid `group`.", "\n Call `listParticipantGroups()` to see the available groups." ) } if (!(group %in% validGroups[[col]])) { stop( "'", group, "' is not in the set of `", col, "` created by the current user", " on ", self$config$labkey.url.base, "\n Call `listParticipantGroups()` to see the available groups." ) } # ---- Return participantIds ---- return(validGroups[group_name == groupName, subjects][[1]]) } ) # HELPER -----------------------------------------------------------------------
/R/ISCon-participantGroup.R
no_license
RGLab/ImmuneSpaceR
R
false
false
6,815
r
#' @include ISCon.R NULL # PUBLIC ----------------------------------------------------------------------- #' @importFrom jsonlite fromJSON ISCon$set( which = "public", name = "listParticipantGroups", value = function() { private$.assertAllStudyConnection() participantGroupApi <- paste0( self$config$labkey.url.base, "/participant-group", "/Studies", "/browseParticipantGroups.api?", "distinctCatgories=false&", "type=participantGroup&", "includeUnassigned=false&", "includeParticipantIds=false" ) # execute via Rlabkey's standard GET function response <- Rlabkey:::labkey.get(participantGroupApi) # parse JSON response via jsonlite's fromJSON parsing function parsed <- fromJSON(response, simplifyDataFrame = FALSE) # construct a data.table for each group groupsList <- lapply(parsed$groups, function(group) { data.table( group_id = group$id, group_name = group$label, created = as.Date(group$created), subjects = length(group$category$participantIds), studies = length(unique(gsub("SUB\\d+.", "", group$category$participantIds))) ) }) # merge the list to data.table participantGroups <- rbindlist(groupsList) if (nrow(participantGroups) == 0) { warning( "No participant groups found for the current user", immediate. = TRUE ) } else { # set order by id setorder(participantGroups, group_id) setkey(participantGroups, group_id) } participantGroups } ) # Retrieve a dataset by participant group ISCon$set( which = "public", name = "getParticipantData", value = function(group, dataType, original_view = FALSE, reload = FALSE, colFilter = NULL, transformMethod = "none", ...) { private$.assertAllStudyConnection() groupName <- private$.checkParticipantGroup(group) colFilter <- rbind( colFilter, makeFilter( c(paste0("ParticipantId/", groupName), "EQUAL", groupName) ) ) return(self$getDataset( dataType, original_view = original_view, reload = reload, colFilter = colFilter, transformMethod = transformMethod, ... )) } ) ISCon$set( which = "public", name = "listParticipantGEMatrices", value = function(group, verbose = FALSE) { private$.assertAllStudyConnection() groupName <- private$.checkParticipantGroup(group) participantIds <- private$.getParticipantIdsFromGroup(groupName) matrices <- self$listGEMatrices(verbose = verbose, participantIds = participantIds) return(matrices) } ) ISCon$set( which = "public", name = "getParticipantGEMatrix", value = function(group, outputType = "summary", annotation = "latest", reload = FALSE) { private$.assertAllStudyConnection() groupName <- private$.checkParticipantGroup(group) ids <- private$.getParticipantIdsFromGroup(groupName) matNames <- self$listParticipantGEMatrices(groupName)$name message(paste0(length(matNames), " matrices found for ", groupName)) eset <- self$getGEMatrix(matNames, outputType = outputType, annotation = annotation, reload = reload ) return(eset[, eset$participant_id %in% ids]) } ) # PRIVATE ---------------------------------------------------------------------- # Check if all study connection ISCon$set( which = "private", name = ".assertAllStudyConnection", value = function() { if (!identical(self$config$labkey.url.path, "/Studies/")) { stop( "This method only works with connection to all studies. ", 'Create a connection to all studies by `con <- CreateConnection("")`' ) } } ) ISCon$set( which = "private", name = ".checkParticipantGroup", value = function(group) { # Must rerun this to ensure valid groups are only for that user and are not # changed within cache. validGroups <- self$listParticipantGroups() if (is.numeric(group)) { col <- "group_id" groupName <- validGroups$group_name[validGroups$group_id == group] } else if (is.character(group)) { col <- "group_name" groupName <- group } else { stop( "`group` should be a number or string. ", "Try again with valid `group`.", "\n Call `listParticipantGroups()` to see the available groups." ) } if (!(group %in% validGroups[[col]])) { stop( "'", group, "' is not in the set of `", col, "` created by the current user", " on ", self$config$labkey.url.base, "\n Call `listParticipantGroups()` to see the available groups." ) } return(groupName) } ) # Return a vector of participantIDs for a group ISCon$set( which = "private", name = ".getParticipantIdsFromGroup", value = function(group) { private$.assertAllStudyConnection() # ---- Get groups and associated participantIDs ---- participantGroupApi <- paste0( self$config$labkey.url.base, "/participant-group", "/Studies", "/browseParticipantGroups.api?", "distinctCatgories=false&", "type=participantGroup&", "includeUnassigned=false&", "includeParticipantIds=false" ) # execute via Rlabkey's standard GET function response <- Rlabkey:::labkey.get(participantGroupApi) # parse JSON response via jsonlite's fromJSON parsing function parsed <- jsonlite::fromJSON(response, simplifyDataFrame = FALSE) # Transform parsed json into a data.table with a row for each group # and a column containing a vector of relevant subjectids groupsList <- lapply(parsed$groups, function(group) { data.table( group_id = group$id, group_name = group$label, subjects = list(group$category$participantIds) ) }) validGroups <- rbindlist(groupsList) # ---- Get groupName ---- if (is.numeric(group)) { col <- "group_id" groupName <- validGroups$group_name[validGroups$group_id == group] } else if (is.character(group)) { col <- "group_name" groupName <- group } else { stop( "`group` should be a number or string. ", "Try again with valid `group`.", "\n Call `listParticipantGroups()` to see the available groups." ) } if (!(group %in% validGroups[[col]])) { stop( "'", group, "' is not in the set of `", col, "` created by the current user", " on ", self$config$labkey.url.base, "\n Call `listParticipantGroups()` to see the available groups." ) } # ---- Return participantIds ---- return(validGroups[group_name == groupName, subjects][[1]]) } ) # HELPER -----------------------------------------------------------------------
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/user-drop.R \name{drop_by_rank} \alias{drop_by_rank} \title{Reduce clusters to specific rank} \usage{ drop_by_rank(phylota, rnk = "species", keep_higher = FALSE, n = 10, choose_by = c("pambgs", "age", "nncltds"), greatest = c(FALSE, FALSE, TRUE)) } \arguments{ \item{phylota}{Phylota object} \item{rnk}{Taxonomic rank} \item{keep_higher}{Keep higher taxonomic ranks?} \item{n}{Number of sequences per taxon} \item{choose_by}{Vector of selection functions} \item{greatest}{Greatest of lowest for each choose_by function} } \value{ phylota } \description{ Identifies higher level taxa for each sequence in clusters for given rank. Selects representative sequences for each unique taxon using the choose_by functions. By default, the function will choose the top ten sequences by first sorting by those with fewest number of ambiguous sequences, then by youngest, then by sequence length. } \examples{ data("dragonflies") # For faster computations, let's only work with the 5 clusters. dragonflies <- drop_clstrs(phylota = dragonflies, cid = dragonflies@cids[10:15]) # We can use drop_by_rank() to reduce to 10 sequences per genus for each cluster (reduced_1 <- drop_by_rank(phylota = dragonflies, rnk = 'genus', n = 10, choose_by = c('pambgs', 'age', 'nncltds'), greatest = c(FALSE, FALSE, TRUE))) # We can specify what aspects of the sequences we would like to select per genus # By default we select the sequences with fewest ambiguous nucleotides (e.g. # we avoid Ns), the youngest age and then longest sequence. # We can reverse the 'greatest' to get the opposite. (reduced_2 <- drop_by_rank(phylota = dragonflies, rnk = 'genus', n = 10, choose_by = c('pambgs', 'age', 'nncltds'), greatest = c(TRUE, TRUE, FALSE))) # Leading to smaller sequnces ... r1_sqlngth <- mean(get_sq_slot(phylota = reduced_1, sid = reduced_1@sids, slt_nm = 'nncltds')) r2_sqlngth <- mean(get_sq_slot(phylota = reduced_2, sid = reduced_2@sids, slt_nm = 'nncltds')) (r1_sqlngth > r2_sqlngth) # ... with more ambigous characters .... r1_pambgs <- mean(get_sq_slot(phylota = reduced_1, sid = reduced_1@sids, slt_nm = 'pambgs')) r2_pambgs <- mean(get_sq_slot(phylota = reduced_2, sid = reduced_2@sids, slt_nm = 'pambgs')) (r1_pambgs < r2_pambgs) # .... and older ages (measured in days since being added to GenBank). r1_age <- mean(get_sq_slot(phylota = reduced_1, sid = reduced_1@sids, slt_nm = 'age')) r2_age <- mean(get_sq_slot(phylota = reduced_2, sid = reduced_2@sids, slt_nm = 'age')) (r1_age < r2_age) # Or... we can simply reduce the clusters to just one sequence per genus (dragonflies <- drop_by_rank(phylota = dragonflies, rnk = 'genus', n = 1)) } \seealso{ Other tools-public: \code{\link{calc_mad}}, \code{\link{calc_wrdfrq}}, \code{\link{drop_clstrs}}, \code{\link{drop_sqs}}, \code{\link{get_clstr_slot}}, \code{\link{get_nsqs}}, \code{\link{get_ntaxa}}, \code{\link{get_sq_slot}}, \code{\link{get_stage_times}}, \code{\link{get_tx_slot}}, \code{\link{get_txids}}, \code{\link{is_txid_in_clstr}}, \code{\link{is_txid_in_sq}}, \code{\link{list_clstrrec_slots}}, \code{\link{list_ncbi_ranks}}, \code{\link{list_seqrec_slots}}, \code{\link{list_taxrec_slots}}, \code{\link{plot_phylota_pa}}, \code{\link{plot_phylota_treemap}}, \code{\link{read_phylota}}, \code{\link{write_sqs}} } \concept{tools-public}
/man/drop_by_rank.Rd
permissive
haithamsghaier/phylotaR
R
false
true
3,697
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/user-drop.R \name{drop_by_rank} \alias{drop_by_rank} \title{Reduce clusters to specific rank} \usage{ drop_by_rank(phylota, rnk = "species", keep_higher = FALSE, n = 10, choose_by = c("pambgs", "age", "nncltds"), greatest = c(FALSE, FALSE, TRUE)) } \arguments{ \item{phylota}{Phylota object} \item{rnk}{Taxonomic rank} \item{keep_higher}{Keep higher taxonomic ranks?} \item{n}{Number of sequences per taxon} \item{choose_by}{Vector of selection functions} \item{greatest}{Greatest of lowest for each choose_by function} } \value{ phylota } \description{ Identifies higher level taxa for each sequence in clusters for given rank. Selects representative sequences for each unique taxon using the choose_by functions. By default, the function will choose the top ten sequences by first sorting by those with fewest number of ambiguous sequences, then by youngest, then by sequence length. } \examples{ data("dragonflies") # For faster computations, let's only work with the 5 clusters. dragonflies <- drop_clstrs(phylota = dragonflies, cid = dragonflies@cids[10:15]) # We can use drop_by_rank() to reduce to 10 sequences per genus for each cluster (reduced_1 <- drop_by_rank(phylota = dragonflies, rnk = 'genus', n = 10, choose_by = c('pambgs', 'age', 'nncltds'), greatest = c(FALSE, FALSE, TRUE))) # We can specify what aspects of the sequences we would like to select per genus # By default we select the sequences with fewest ambiguous nucleotides (e.g. # we avoid Ns), the youngest age and then longest sequence. # We can reverse the 'greatest' to get the opposite. (reduced_2 <- drop_by_rank(phylota = dragonflies, rnk = 'genus', n = 10, choose_by = c('pambgs', 'age', 'nncltds'), greatest = c(TRUE, TRUE, FALSE))) # Leading to smaller sequnces ... r1_sqlngth <- mean(get_sq_slot(phylota = reduced_1, sid = reduced_1@sids, slt_nm = 'nncltds')) r2_sqlngth <- mean(get_sq_slot(phylota = reduced_2, sid = reduced_2@sids, slt_nm = 'nncltds')) (r1_sqlngth > r2_sqlngth) # ... with more ambigous characters .... r1_pambgs <- mean(get_sq_slot(phylota = reduced_1, sid = reduced_1@sids, slt_nm = 'pambgs')) r2_pambgs <- mean(get_sq_slot(phylota = reduced_2, sid = reduced_2@sids, slt_nm = 'pambgs')) (r1_pambgs < r2_pambgs) # .... and older ages (measured in days since being added to GenBank). r1_age <- mean(get_sq_slot(phylota = reduced_1, sid = reduced_1@sids, slt_nm = 'age')) r2_age <- mean(get_sq_slot(phylota = reduced_2, sid = reduced_2@sids, slt_nm = 'age')) (r1_age < r2_age) # Or... we can simply reduce the clusters to just one sequence per genus (dragonflies <- drop_by_rank(phylota = dragonflies, rnk = 'genus', n = 1)) } \seealso{ Other tools-public: \code{\link{calc_mad}}, \code{\link{calc_wrdfrq}}, \code{\link{drop_clstrs}}, \code{\link{drop_sqs}}, \code{\link{get_clstr_slot}}, \code{\link{get_nsqs}}, \code{\link{get_ntaxa}}, \code{\link{get_sq_slot}}, \code{\link{get_stage_times}}, \code{\link{get_tx_slot}}, \code{\link{get_txids}}, \code{\link{is_txid_in_clstr}}, \code{\link{is_txid_in_sq}}, \code{\link{list_clstrrec_slots}}, \code{\link{list_ncbi_ranks}}, \code{\link{list_seqrec_slots}}, \code{\link{list_taxrec_slots}}, \code{\link{plot_phylota_pa}}, \code{\link{plot_phylota_treemap}}, \code{\link{read_phylota}}, \code{\link{write_sqs}} } \concept{tools-public}
setwd('~/Desktop/SLEEP/SleepExercise/') dat <- read.csv('test3.csv') dat <- dat[2,] nrow(dat) ncol(dat) selections <- read.csv('test4.csv') col2<-colnames(dat) col3<-grep("^S[0-9]+_AAT[0-9]+$",selections$which) selections_excludeAATs <- selections[col3,] col4<-subset(selections_excludeAATs, select=("which")) col2<-col2[!is.element(col2,col4$which)] print (col2) col5<-col2[grep("^S[0-9]+_AAT[0-9]+$",col2)] print(col5) datAAT <- subset(dat, select=(col5)) col2<-colnames(dat) col3<-grep("^S[0-9]+_PARA$",selections$which) selections_PARAs <- selections[col3,] nrow(daAAT) ncol(datAAT) nrow(datneu) ncol(datneu) ncol(selections_PARAs) colAATs <- col5 i<- NULL write("<item run_story_negative_options>", file ="output.txt",append=TRUE) counter <- 1 for (i in 1:nrow(selections_PARAs)) { usage1 <- selections_PARAs[i,1] usage4<- sub("anger","", usage1) usage2 <- (selections_PARAs[i,2]) usage3 <- sub("_PARA","", usage2) print(usage3) grepfind <- paste(usage3,sep="","_AAT[0-9]+$") print(grepfind) #find all X value for S[0-9]_ATTX colAATs<-col5[grep(grepfind,col5)] datAATs <- subset(datAAT, select=(colAATs)) for(j in 1:ncol(datAATs)) { write(paste("/",counter,"= \"", datAATs[1,j],"\"",sep=""),file="output.txt", append=TRUE) counter <- counter + 1 } } write("</item>", file ="output.txt",append=TRUE) for (i in 1:ncol(datAAT)) { print(colnames(datAAT)[i]) dat2<- (datAAT[1,i]) dat2<- levels(dat2) option <- NULL for (j in 1:4) { statement<- (dat2[j]) if(nchar(statement)>3) { option <- statement } } print(option) write(colnames(datAAT)[i], file = "output.txt", append=TRUE) write(option, file = "output.txt", append=TRUE) } i <- NULL columns_neu <- grep("^neu_select_[0-9]+",colnames(dat)) datneu <- dat[,columns_neu] for (i in 1:ncol(datneu)) { print(colnames(datneu)[i]) dat2<- (datneu[1,i]) dat2<- levels(dat2) option <- NULL for (j in 1:4) { statement<- (dat2[j]) if(nchar(statement)>3) { option <- statement } } print(option) write(colnames(datneu)[i], file = "output.txt", append=TRUE) write(option, file = "output.txt", append=TRUE) } colnames(datAAT)[1] dat[1,1] dat2<- (dat[1,1]) attributes(dat2) levels(dat2)[3] columns_AAT <- grep("^S[0-9]+_AAT[0-9]+$",col2) print(columns_AAT) datAAT <- dat[,columns_AAT]
/archives/autobio_inputparsing/test.R
no_license
nanditamangal/Physiopsychology_stimuli
R
false
false
2,392
r
setwd('~/Desktop/SLEEP/SleepExercise/') dat <- read.csv('test3.csv') dat <- dat[2,] nrow(dat) ncol(dat) selections <- read.csv('test4.csv') col2<-colnames(dat) col3<-grep("^S[0-9]+_AAT[0-9]+$",selections$which) selections_excludeAATs <- selections[col3,] col4<-subset(selections_excludeAATs, select=("which")) col2<-col2[!is.element(col2,col4$which)] print (col2) col5<-col2[grep("^S[0-9]+_AAT[0-9]+$",col2)] print(col5) datAAT <- subset(dat, select=(col5)) col2<-colnames(dat) col3<-grep("^S[0-9]+_PARA$",selections$which) selections_PARAs <- selections[col3,] nrow(daAAT) ncol(datAAT) nrow(datneu) ncol(datneu) ncol(selections_PARAs) colAATs <- col5 i<- NULL write("<item run_story_negative_options>", file ="output.txt",append=TRUE) counter <- 1 for (i in 1:nrow(selections_PARAs)) { usage1 <- selections_PARAs[i,1] usage4<- sub("anger","", usage1) usage2 <- (selections_PARAs[i,2]) usage3 <- sub("_PARA","", usage2) print(usage3) grepfind <- paste(usage3,sep="","_AAT[0-9]+$") print(grepfind) #find all X value for S[0-9]_ATTX colAATs<-col5[grep(grepfind,col5)] datAATs <- subset(datAAT, select=(colAATs)) for(j in 1:ncol(datAATs)) { write(paste("/",counter,"= \"", datAATs[1,j],"\"",sep=""),file="output.txt", append=TRUE) counter <- counter + 1 } } write("</item>", file ="output.txt",append=TRUE) for (i in 1:ncol(datAAT)) { print(colnames(datAAT)[i]) dat2<- (datAAT[1,i]) dat2<- levels(dat2) option <- NULL for (j in 1:4) { statement<- (dat2[j]) if(nchar(statement)>3) { option <- statement } } print(option) write(colnames(datAAT)[i], file = "output.txt", append=TRUE) write(option, file = "output.txt", append=TRUE) } i <- NULL columns_neu <- grep("^neu_select_[0-9]+",colnames(dat)) datneu <- dat[,columns_neu] for (i in 1:ncol(datneu)) { print(colnames(datneu)[i]) dat2<- (datneu[1,i]) dat2<- levels(dat2) option <- NULL for (j in 1:4) { statement<- (dat2[j]) if(nchar(statement)>3) { option <- statement } } print(option) write(colnames(datneu)[i], file = "output.txt", append=TRUE) write(option, file = "output.txt", append=TRUE) } colnames(datAAT)[1] dat[1,1] dat2<- (dat[1,1]) attributes(dat2) levels(dat2)[3] columns_AAT <- grep("^S[0-9]+_AAT[0-9]+$",col2) print(columns_AAT) datAAT <- dat[,columns_AAT]
############################################################################## ## ## R package dynsurv by Xiaojing Wang, Jun Yan, and Ming-Hui Chen ## Copyright (C) 2011 ## ## This file is part of the R package dynsurv. ## ## The R package dynsurv is free software: you can redistribute it and/or ## modify it under the terms of the GNU General Public License as published ## by the Free Software Foundation, either version 3 of the License, or ## (at your option) any later version. ## ## The R package dynsurv is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with the R package dynsurv. If not, see <http://www.gnu.org/licenses/>. ## ############################################################################## ############################################################################## # Extract the coefficient from "bayesCox" object ############################################################################## coef.bayesCox <- function(object, ...) { # Monte Carlo samples ms <- as.matrix(read.table(file=object$out)) dimnames(ms) <- NULL ms <- ms[seq(object$gibbs$burn + 1, nrow(ms), by=object$gibbs$thin), ] iter <- nrow(ms) # Dimension of baseline grid <- object$grid K <- length(grid) cK <- ifelse(object$model == "TimeIndep", 1, K) nBeta <- length(object$cov.names) betaMat <- as.matrix(ms[, seq(K + 1, K + nBeta * cK)]) f <- function(x) { c(quantile(x, probs=0.025, names=FALSE), mean(x), quantile(x, probs=0.975, names=FALSE)) } betaMatQT <- t(apply(betaMat, 2, f)) if (object$model == "TimeIndep") betaMatQT <- betaMatQT[rep(1:nBeta, each=K), ] # Insert one more value at time zero betaMatQT <- betaMatQT[rep(seq(1, nBeta * K), rep(c(2, rep(1, K - 1)), nBeta)), ] res <- data.frame(betaMatQT, rep(c(0, grid), nBeta), rep(object$cov.names, each=K + 1), rep(object$model, nBeta * (K + 1))) colnames(res) <- c("Low", "Mid", "High", "Time", "Cov", "Model") # Make sure the Cov retains the original order res$Cov <- factor(res$Cov, levels=as.character(unique(res$Cov))) res } ############################################################################## # Extract the coefficient data from "tvTran" object ############################################################################## coef.tvTran <- function(object, ...) { K <- object$K nBeta <- object$nBeta rsMat <- object$rsEst[, seq(1, nBeta * K)] betaMat <- cbind(apply(rsMat, 2, quantile, probs=0.025, na.rm=TRUE, names=FALSE), object$pEst[seq(1, nBeta * K)], apply(rsMat, 2, quantile, probs=0.975, na.rm=TRUE, names=FALSE)) # betaMat[betaMat < -bound | betaMat > bound] <- NA # Insert one more value at time zero betaMat <- betaMat[rep(seq(1, nBeta * K), rep(c(2, rep(1, K - 1)), nBeta)), ] res <- data.frame(betaMat, rep(c(0, object$eTime), nBeta), rep(object$cov.names, each=K + 1), rep("tvTran", nBeta * (K + 1))) colnames(res) <- c("Low", "Mid", "High", "Time", "Cov", "Model") # Make sure the Cov retains the original orde res$Cov <- factor(res$Cov, levels=as.character(unique(res$Cov))) res } ############################################################################## # Extract the coefficient from "splineCox" object ############################################################################## coef.splineCox <- function(object, ...) { fit <- object$coxph.fit basis <- object$bsp.basis K <- 101 x <- seq(basis$Boundary.knots[1], basis$Boundary.knots[2], length=K) bspMat <- do.call("bs", c(list(x=x), basis)) curInd <- 1 res <- data.frame() for (j in seq(1, object$nBeta)) { if (!object$is.tv[j]) { yMid <- rep(fit$coef[curInd], K) ySE <- sqrt(fit$var[curInd, curInd]) curInd <- curInd + 1 } else { sq <- seq(curInd, curInd + basis$df - 1) yMid <- c(bspMat %*% fit$coef[sq]) yVar <- diag(bspMat %*% fit$var[sq, sq] %*% t(bspMat)) yVar[which(yVar < 0)] <- 0 ySE <- sqrt(yVar) curInd <- curInd + basis$df } yLow <- yMid - 1.96 * ySE yHigh <- yMid + 1.96 * ySE res <- rbind(res, data.frame(Low=yLow, Mid=yMid, High=yHigh, Time=x, Cov=object$cov.names[j], Model="Spline")) } # Make sure the Cov retains the original orde res$Cov <- factor(res$Cov, levels=as.character(unique(res$Cov))) res }
/dynsurv/R/coef.R
no_license
ingted/R-Examples
R
false
false
5,035
r
############################################################################## ## ## R package dynsurv by Xiaojing Wang, Jun Yan, and Ming-Hui Chen ## Copyright (C) 2011 ## ## This file is part of the R package dynsurv. ## ## The R package dynsurv is free software: you can redistribute it and/or ## modify it under the terms of the GNU General Public License as published ## by the Free Software Foundation, either version 3 of the License, or ## (at your option) any later version. ## ## The R package dynsurv is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with the R package dynsurv. If not, see <http://www.gnu.org/licenses/>. ## ############################################################################## ############################################################################## # Extract the coefficient from "bayesCox" object ############################################################################## coef.bayesCox <- function(object, ...) { # Monte Carlo samples ms <- as.matrix(read.table(file=object$out)) dimnames(ms) <- NULL ms <- ms[seq(object$gibbs$burn + 1, nrow(ms), by=object$gibbs$thin), ] iter <- nrow(ms) # Dimension of baseline grid <- object$grid K <- length(grid) cK <- ifelse(object$model == "TimeIndep", 1, K) nBeta <- length(object$cov.names) betaMat <- as.matrix(ms[, seq(K + 1, K + nBeta * cK)]) f <- function(x) { c(quantile(x, probs=0.025, names=FALSE), mean(x), quantile(x, probs=0.975, names=FALSE)) } betaMatQT <- t(apply(betaMat, 2, f)) if (object$model == "TimeIndep") betaMatQT <- betaMatQT[rep(1:nBeta, each=K), ] # Insert one more value at time zero betaMatQT <- betaMatQT[rep(seq(1, nBeta * K), rep(c(2, rep(1, K - 1)), nBeta)), ] res <- data.frame(betaMatQT, rep(c(0, grid), nBeta), rep(object$cov.names, each=K + 1), rep(object$model, nBeta * (K + 1))) colnames(res) <- c("Low", "Mid", "High", "Time", "Cov", "Model") # Make sure the Cov retains the original order res$Cov <- factor(res$Cov, levels=as.character(unique(res$Cov))) res } ############################################################################## # Extract the coefficient data from "tvTran" object ############################################################################## coef.tvTran <- function(object, ...) { K <- object$K nBeta <- object$nBeta rsMat <- object$rsEst[, seq(1, nBeta * K)] betaMat <- cbind(apply(rsMat, 2, quantile, probs=0.025, na.rm=TRUE, names=FALSE), object$pEst[seq(1, nBeta * K)], apply(rsMat, 2, quantile, probs=0.975, na.rm=TRUE, names=FALSE)) # betaMat[betaMat < -bound | betaMat > bound] <- NA # Insert one more value at time zero betaMat <- betaMat[rep(seq(1, nBeta * K), rep(c(2, rep(1, K - 1)), nBeta)), ] res <- data.frame(betaMat, rep(c(0, object$eTime), nBeta), rep(object$cov.names, each=K + 1), rep("tvTran", nBeta * (K + 1))) colnames(res) <- c("Low", "Mid", "High", "Time", "Cov", "Model") # Make sure the Cov retains the original orde res$Cov <- factor(res$Cov, levels=as.character(unique(res$Cov))) res } ############################################################################## # Extract the coefficient from "splineCox" object ############################################################################## coef.splineCox <- function(object, ...) { fit <- object$coxph.fit basis <- object$bsp.basis K <- 101 x <- seq(basis$Boundary.knots[1], basis$Boundary.knots[2], length=K) bspMat <- do.call("bs", c(list(x=x), basis)) curInd <- 1 res <- data.frame() for (j in seq(1, object$nBeta)) { if (!object$is.tv[j]) { yMid <- rep(fit$coef[curInd], K) ySE <- sqrt(fit$var[curInd, curInd]) curInd <- curInd + 1 } else { sq <- seq(curInd, curInd + basis$df - 1) yMid <- c(bspMat %*% fit$coef[sq]) yVar <- diag(bspMat %*% fit$var[sq, sq] %*% t(bspMat)) yVar[which(yVar < 0)] <- 0 ySE <- sqrt(yVar) curInd <- curInd + basis$df } yLow <- yMid - 1.96 * ySE yHigh <- yMid + 1.96 * ySE res <- rbind(res, data.frame(Low=yLow, Mid=yMid, High=yHigh, Time=x, Cov=object$cov.names[j], Model="Spline")) } # Make sure the Cov retains the original orde res$Cov <- factor(res$Cov, levels=as.character(unique(res$Cov))) res }
### R code from vignette source 'Anx7.Rnw' ### Encoding: ISO8859-1 ################################################### ### code chunk number 1: Anx7.Rnw:135-139 ################################################### owidth <- getOption("width") # largeur des sorties options(width=60, continue="+ ","warn"=-1 ) .PngNo <- 0 nom.fich = "./Figures/annexe_simul-bitmap-" ################################################### ### code chunk number 2: bfig (eval = FALSE) ################################################### ## .PngNo <- .PngNo + 1; file = paste(nom.fich, .PngNo, sep="") ## pdf(file=paste(file,".pdf",sep=""), width = 7, height = 7, pointsize = 12, bg = "white") ################################################### ### code chunk number 3: bfigps (eval = FALSE) ################################################### ## postscript(file=paste(file,".ps",sep=""), width = 7, height = 7, pointsize = 12, bg = "white",horizontal= FALSE,paper="special") ################################################### ### code chunk number 4: bfig1 (eval = FALSE) ################################################### ## .PngNo <- .PngNo + 1; file = paste(nom.fich, .PngNo, sep="") ## pdf(file=paste(file,".pdf",sep=""), width = 5, height = 2, pointsize = 10, bg = "white") ################################################### ### code chunk number 5: bfigps1 (eval = FALSE) ################################################### ## postscript(file=paste(file,".ps",sep=""), width = 5, height =2, pointsize = 10, bg = "white",horizontal= FALSE,paper="special") ################################################### ### code chunk number 6: bfig2 (eval = FALSE) ################################################### ## .PngNo <- .PngNo + 1; file = paste(nom.fich, .PngNo, sep="") ## pdf(file=paste(file,".pdf",sep=""), width = 3.9, height = 3.1, pointsize = 10, bg = "white") ################################################### ### code chunk number 7: bfigps2 (eval = FALSE) ################################################### ## postscript(file=paste(file,".ps",sep=""), width = 3.9, height = 3.1, pointsize = 10, bg = "white",horizontal= FALSE,paper="special") ################################################### ### code chunk number 8: bfig3 (eval = FALSE) ################################################### ## .PngNo <- .PngNo + 1; file = paste(nom.fich, .PngNo, sep="") ## pdf(file=paste(file,".pdf",sep=""), width = 5.92, height = 6.74, pointsize = 10, bg = "white") ################################################### ### code chunk number 9: bfigps3 (eval = FALSE) ################################################### ## postscript(file=paste(file,".ps",sep=""), width = 5.92, height = 6.74, pointsize = 10, bg = "white",horizontal= FALSE,paper="special") ################################################### ### code chunk number 10: bfig4 (eval = FALSE) ################################################### ## .PngNo <- .PngNo + 1; file = paste(nom.fich, .PngNo, sep="") ## pdf(file=paste(file,".pdf",sep=""), width = 6, height = 6, pointsize = 10, bg = "white") ################################################### ### code chunk number 11: bfigps4 (eval = FALSE) ################################################### ## postscript(file=paste(file,".ps",sep=""), width = 6, height = 6, pointsize = 10, bg = "white",horizontal= FALSE,paper="special") ################################################### ### code chunk number 12: zfig2 (eval = FALSE) ################################################### ## dev.null <- dev.off() ################################################### ### code chunk number 13: zfiginclude (eval = FALSE) ################################################### ## cat("\\includegraphics[width=0.9\\textwidth]{", file, "}\n\n", sep="") ################################################### ### code chunk number 14: simusar.ini ################################################### # graine set.seed(2761) innov1 = rnorm(290, sd = 4.18) y = arima.sim(list(order = c(12, 0, 1), ma = -.7, ar = c(rep(0, 11), .9)), innov = innov1, n.start = 50, n = 240) + 50 y.ts = ts(y, frequency = 12, start = c(1920, 1)) ytr = cbind(y.ts, nottem) colnames(ytr) = c("serie simulee", "temperature") ################################################### ### code chunk number 15: autop ################################################### require("polynom") autop = polynomial(c(1, -1/1.4))*polynomial(c(1, -1))*polynomial(c(1, -1/1.9)) ################################################### ### code chunk number 16: autop2 ################################################### autop1 = polynomial(c(1, -1/1.4))*polynomial(c(1, -1/1.9)) asim8b = arima.sim(n = 60, list(ar = -autop1[-1], order = c(2, 1, 0))) ################################################### ### code chunk number 17: construct3 ################################################### require(dse) AR = array(autop1, c(length(autop1), 1, 1)) MA = array(1, c(1, 1, 1)) mod2 = ARMA(A = AR, B = MA) asim8c = simulate(mod2, sampleT = 60, sd = 1.5)
/inst/doc/Anx7.R
no_license
learning-freak/caschrono
R
false
false
5,152
r
### R code from vignette source 'Anx7.Rnw' ### Encoding: ISO8859-1 ################################################### ### code chunk number 1: Anx7.Rnw:135-139 ################################################### owidth <- getOption("width") # largeur des sorties options(width=60, continue="+ ","warn"=-1 ) .PngNo <- 0 nom.fich = "./Figures/annexe_simul-bitmap-" ################################################### ### code chunk number 2: bfig (eval = FALSE) ################################################### ## .PngNo <- .PngNo + 1; file = paste(nom.fich, .PngNo, sep="") ## pdf(file=paste(file,".pdf",sep=""), width = 7, height = 7, pointsize = 12, bg = "white") ################################################### ### code chunk number 3: bfigps (eval = FALSE) ################################################### ## postscript(file=paste(file,".ps",sep=""), width = 7, height = 7, pointsize = 12, bg = "white",horizontal= FALSE,paper="special") ################################################### ### code chunk number 4: bfig1 (eval = FALSE) ################################################### ## .PngNo <- .PngNo + 1; file = paste(nom.fich, .PngNo, sep="") ## pdf(file=paste(file,".pdf",sep=""), width = 5, height = 2, pointsize = 10, bg = "white") ################################################### ### code chunk number 5: bfigps1 (eval = FALSE) ################################################### ## postscript(file=paste(file,".ps",sep=""), width = 5, height =2, pointsize = 10, bg = "white",horizontal= FALSE,paper="special") ################################################### ### code chunk number 6: bfig2 (eval = FALSE) ################################################### ## .PngNo <- .PngNo + 1; file = paste(nom.fich, .PngNo, sep="") ## pdf(file=paste(file,".pdf",sep=""), width = 3.9, height = 3.1, pointsize = 10, bg = "white") ################################################### ### code chunk number 7: bfigps2 (eval = FALSE) ################################################### ## postscript(file=paste(file,".ps",sep=""), width = 3.9, height = 3.1, pointsize = 10, bg = "white",horizontal= FALSE,paper="special") ################################################### ### code chunk number 8: bfig3 (eval = FALSE) ################################################### ## .PngNo <- .PngNo + 1; file = paste(nom.fich, .PngNo, sep="") ## pdf(file=paste(file,".pdf",sep=""), width = 5.92, height = 6.74, pointsize = 10, bg = "white") ################################################### ### code chunk number 9: bfigps3 (eval = FALSE) ################################################### ## postscript(file=paste(file,".ps",sep=""), width = 5.92, height = 6.74, pointsize = 10, bg = "white",horizontal= FALSE,paper="special") ################################################### ### code chunk number 10: bfig4 (eval = FALSE) ################################################### ## .PngNo <- .PngNo + 1; file = paste(nom.fich, .PngNo, sep="") ## pdf(file=paste(file,".pdf",sep=""), width = 6, height = 6, pointsize = 10, bg = "white") ################################################### ### code chunk number 11: bfigps4 (eval = FALSE) ################################################### ## postscript(file=paste(file,".ps",sep=""), width = 6, height = 6, pointsize = 10, bg = "white",horizontal= FALSE,paper="special") ################################################### ### code chunk number 12: zfig2 (eval = FALSE) ################################################### ## dev.null <- dev.off() ################################################### ### code chunk number 13: zfiginclude (eval = FALSE) ################################################### ## cat("\\includegraphics[width=0.9\\textwidth]{", file, "}\n\n", sep="") ################################################### ### code chunk number 14: simusar.ini ################################################### # graine set.seed(2761) innov1 = rnorm(290, sd = 4.18) y = arima.sim(list(order = c(12, 0, 1), ma = -.7, ar = c(rep(0, 11), .9)), innov = innov1, n.start = 50, n = 240) + 50 y.ts = ts(y, frequency = 12, start = c(1920, 1)) ytr = cbind(y.ts, nottem) colnames(ytr) = c("serie simulee", "temperature") ################################################### ### code chunk number 15: autop ################################################### require("polynom") autop = polynomial(c(1, -1/1.4))*polynomial(c(1, -1))*polynomial(c(1, -1/1.9)) ################################################### ### code chunk number 16: autop2 ################################################### autop1 = polynomial(c(1, -1/1.4))*polynomial(c(1, -1/1.9)) asim8b = arima.sim(n = 60, list(ar = -autop1[-1], order = c(2, 1, 0))) ################################################### ### code chunk number 17: construct3 ################################################### require(dse) AR = array(autop1, c(length(autop1), 1, 1)) MA = array(1, c(1, 1, 1)) mod2 = ARMA(A = AR, B = MA) asim8c = simulate(mod2, sampleT = 60, sd = 1.5)
rm(list=ls()) require(jsonlite) require(leaflet) require(stringr) grupos <- fromJSON("http://cuecaller.son0p.net/api/grupos/apariciones") ## Para ver la estructura del objeto str(grupos) ## Para ver los nombres del contenido del objeto, que puede ser columnas u otros objetos names(grupos) # Corresponden con los nombres y el índice ## Para acceder a los elementos de un objeto se puede usar $masnombredecolumna grupos[1:2,c(20,12)] grupos[1:2,c("nombre","facebook")] # Para ver la longitud se usa length length(grupos[1:2,c("nombre","facebook")]) length(grupos[1:20,c("nombre","facebook")]) length(grupos[1:20,c("nombre")]) grupos$`nombre con espacios y tíldes` #Para poder acceder a columnas con se usan las tildes invertidas ## Si no pongo nada en el campo de col o row se trae todo lo que hay apariciones <- grupos[,c("nombre","apariciones")] str(apariciones) ## lapply permite aplicar una función a una lista apariciones$apariciones[unlist(lapply(apariciones$apariciones, is.null))] <- "null" apariciones_ev <- unlist(apariciones$apariciones) ######################## ## Generar mapa a partir de los archivos de geojson en el repositorio github.com/son0p/mapasGrupos ######################## masacre <- fromJSON("https://raw.githubusercontent.com/son0p/mapasGrupos/master/masacre.geojson") ## http://rstudio.github.io/leaflet/json.html m <- leaflet() %>% addProviderTiles("Stamen.Toner") %>% addGeoJSON(toJSON(masacre)) m # Print the map htmlwidgets::saveWidget(m, "./mapa.html")
/scripts/mapa.R
no_license
son0p/cueCaller
R
false
false
1,513
r
rm(list=ls()) require(jsonlite) require(leaflet) require(stringr) grupos <- fromJSON("http://cuecaller.son0p.net/api/grupos/apariciones") ## Para ver la estructura del objeto str(grupos) ## Para ver los nombres del contenido del objeto, que puede ser columnas u otros objetos names(grupos) # Corresponden con los nombres y el índice ## Para acceder a los elementos de un objeto se puede usar $masnombredecolumna grupos[1:2,c(20,12)] grupos[1:2,c("nombre","facebook")] # Para ver la longitud se usa length length(grupos[1:2,c("nombre","facebook")]) length(grupos[1:20,c("nombre","facebook")]) length(grupos[1:20,c("nombre")]) grupos$`nombre con espacios y tíldes` #Para poder acceder a columnas con se usan las tildes invertidas ## Si no pongo nada en el campo de col o row se trae todo lo que hay apariciones <- grupos[,c("nombre","apariciones")] str(apariciones) ## lapply permite aplicar una función a una lista apariciones$apariciones[unlist(lapply(apariciones$apariciones, is.null))] <- "null" apariciones_ev <- unlist(apariciones$apariciones) ######################## ## Generar mapa a partir de los archivos de geojson en el repositorio github.com/son0p/mapasGrupos ######################## masacre <- fromJSON("https://raw.githubusercontent.com/son0p/mapasGrupos/master/masacre.geojson") ## http://rstudio.github.io/leaflet/json.html m <- leaflet() %>% addProviderTiles("Stamen.Toner") %>% addGeoJSON(toJSON(masacre)) m # Print the map htmlwidgets::saveWidget(m, "./mapa.html")
# Assign a value to the variables my_apples and my_oranges my_apples <- 5 my_oranges <- 6 # Add these two variables together and print the result print(my_apples + my_oranges) # Create the variable my_fruit my_fruit <- (my_apples + my_oranges)
/Subject_Statistics/basics5.R
permissive
mahnooranjum/R_Programming
R
false
false
248
r
# Assign a value to the variables my_apples and my_oranges my_apples <- 5 my_oranges <- 6 # Add these two variables together and print the result print(my_apples + my_oranges) # Create the variable my_fruit my_fruit <- (my_apples + my_oranges)
## Coursera ## Exploratory Data Analysis ## Course Project 2 - plot4.R ## This code requires the following package: install.packages("ggplot2") library(ggplot2) ## set working directory setwd("~/Data Science/Assignments/ExplDataAnalysis") ## creating a data directory if (!file.exists("data")) { dir.create("data") } ## download a file and place in "data" directory if (!file.exists("data")) { fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip" zipfile="data/projdata.zip" download.file(fileUrl, destfile=zipfile) unzip(zipfile, exdir="data") } ## read data data <- readRDS("./data/summarySCC_PM25.rds", refhook = NULL) SCC <- readRDS("./data/Source_Classification_Code.rds") dfdata <- data.frame(data) ## Question 4 plot ## Extract "Coal" from SCC coal1 <- SCC[grepl("Coal",SCC$EI.Sector),] coal2 <- SCC[grepl("Coal",SCC$SCC.Level.Three),] ## combine and resolve unique rows coaltot <- rbind(coal1,coal2) scc <- coaltot$SCC coal <- unique(scc) ## Extract unique rows from dfdata P4 <- dfdata[dfdata$SCC %in% coal, ] unique(P4$year) p4data <- aggregate(Emissions~year,data=P4,FUN=sum) ## plot using ggplot ggplot(p4data, aes(x = year, y = Emissions)) + geom_point(alpha=1, size=4) + geom_line(stat="identity") + ggtitle("Total PM2.5 Coal Combustion Emissions in the US") ## Create plot4.png dev.copy(png, file = "plot4.png") dev.off() ## end plot4.R
/plot4.R
no_license
ealmand/ExplDataAnalysis_Proj2
R
false
false
1,556
r
## Coursera ## Exploratory Data Analysis ## Course Project 2 - plot4.R ## This code requires the following package: install.packages("ggplot2") library(ggplot2) ## set working directory setwd("~/Data Science/Assignments/ExplDataAnalysis") ## creating a data directory if (!file.exists("data")) { dir.create("data") } ## download a file and place in "data" directory if (!file.exists("data")) { fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip" zipfile="data/projdata.zip" download.file(fileUrl, destfile=zipfile) unzip(zipfile, exdir="data") } ## read data data <- readRDS("./data/summarySCC_PM25.rds", refhook = NULL) SCC <- readRDS("./data/Source_Classification_Code.rds") dfdata <- data.frame(data) ## Question 4 plot ## Extract "Coal" from SCC coal1 <- SCC[grepl("Coal",SCC$EI.Sector),] coal2 <- SCC[grepl("Coal",SCC$SCC.Level.Three),] ## combine and resolve unique rows coaltot <- rbind(coal1,coal2) scc <- coaltot$SCC coal <- unique(scc) ## Extract unique rows from dfdata P4 <- dfdata[dfdata$SCC %in% coal, ] unique(P4$year) p4data <- aggregate(Emissions~year,data=P4,FUN=sum) ## plot using ggplot ggplot(p4data, aes(x = year, y = Emissions)) + geom_point(alpha=1, size=4) + geom_line(stat="identity") + ggtitle("Total PM2.5 Coal Combustion Emissions in the US") ## Create plot4.png dev.copy(png, file = "plot4.png") dev.off() ## end plot4.R
% Generated by roxygen2 (4.1.0): do not edit by hand % Please edit documentation in R/session-info.r \name{session_info} \alias{session_info} \title{Print session information} \usage{ session_info() } \description{ This is \code{\link{sessionInfo}()} re-written from scratch to both exclude data that's rarely useful (e.g., the full collate string or base packages loaded) and include stuff you'd like to know (e.g., where a package was installed from). }
/man/session_info.Rd
no_license
wush978/devtools
R
false
false
457
rd
% Generated by roxygen2 (4.1.0): do not edit by hand % Please edit documentation in R/session-info.r \name{session_info} \alias{session_info} \title{Print session information} \usage{ session_info() } \description{ This is \code{\link{sessionInfo}()} re-written from scratch to both exclude data that's rarely useful (e.g., the full collate string or base packages loaded) and include stuff you'd like to know (e.g., where a package was installed from). }
testlist <- list(G = numeric(0), Rn = numeric(0), atmp = numeric(0), ra = numeric(0), relh = c(1.32498004722073e+213, 6.95644429344475e-320, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), rs = numeric(0), temp = 5.29947702289773e-169) result <- do.call(meteor:::ET0_PenmanMonteith,testlist) str(result)
/meteor/inst/testfiles/ET0_PenmanMonteith/libFuzzer_ET0_PenmanMonteith/ET0_PenmanMonteith_valgrind_files/1612736585-test.R
no_license
akhikolla/updatedatatype-list3
R
false
false
476
r
testlist <- list(G = numeric(0), Rn = numeric(0), atmp = numeric(0), ra = numeric(0), relh = c(1.32498004722073e+213, 6.95644429344475e-320, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), rs = numeric(0), temp = 5.29947702289773e-169) result <- do.call(meteor:::ET0_PenmanMonteith,testlist) str(result)
#date and time date() Sys.time() #import library and set up url library(XML) yahooUrl="http://finance.yahoo.com/q/hp?s=AAPL" table=readHTMLTable(yahooUrl) class(table) #parse doc doc=htmlParse(yahooUrl) tableNodes=getNodeSet(doc, "//table") #read HTML table into an R object for easy reading table=readHTMLTable(tableNodes[[15]], as.data.frame=TRUE, trim=TRUE, stringsAsFactors=FALSE, header=TRUE, colClasses = c("character", rep("FormattedNumber",5)), skip.rows=c(1:34, 46:67)) str(table) class(table) print(table[c("Date", "Close")], right=FALSE, row.names=TRUE) #import twitter library library(twitteR) #set up twitter api reqURL = "https://api.twitter.com/oauth/request_token" accessURL = "https://api.twitter.com/oauth/access_token" authURL = "https://api.twitter.com/oauth/authorize" consumerKey = "r0zFOLDE87mEg09ROfMGQEAGj" consumerSecret = "5HyDXKW4emrL20flD4fiMrzk7HHffvTACfNfmeaGr4ekhkcrkD" #twit creditials authoriziation twitCred <- OAuthFactory$new(consumerKey=consumerKey, consumerSecret=consumerSecret, requestURL=reqURL, accessURL=accessURL, authURL=authURL) #Do I have to run this everytime i open the program? twitCred$handshake() #Register twitCred registerTwitterOAuth(twitCred) #search Twitter and run what is found. Dates are not working, mainly until will break the code ST=searchTwitter("iPhone Apple", n=100, lang='en', since=NULL, until=NULL, locale=NULL, geocode=NULL, sinceID=NULL, retryOnRateLimit=120) ST #install and grab tm and wordcload packages install.packages("tm") library(tm) install.packages("wordcloud") library(wordcloud) #search twitter for what I believe is a vector of words #Phlly 39.9522, -75.164 #Miami geocode='25.7739, -80.194, 10mi' date() Sys.time() r_stats<- searchTwitter("alibaba", n=100, lang="en", since='2014-11-11') r_stats2<-sapply(r_stats,function(x) x$getText()) #standard run for wordcloud graphic WSC<-Corpus(VectorSource(r_stats2)) WSC<-tm_map(WSC,removePunctuation) WSC<-tm_map(WSC,removeNumbers) WSC<-tm_map(WSC,removeWords,stopwords("en")) #run wordcloud. This works wordcloud(WSC) #carolin r_stats<- searchTwitter("panthers nfl football", n=100, lang="en") r_stats2<-sapply(r_stats,function(x) x$getText()) #standard run for wordcloud graphic WSC<-Corpus(VectorSource(r_stats2)) WSC<-tm_map(WSC,removePunctuation) WSC<-tm_map(WSC,removeNumbers) WSC<-tm_map(WSC,removeWords,stopwords("en")) #run wordcloud. This works wordcloud(WSC) #the same process as above but it does not work r_stats<- searchTwitter("philly eagles football", n=100, lang="en") r_stats2<-sapply(r_stats,function(x) x$getText()) r_stats WSC<-Corpus(VectorSource(r_stats2)) WSC<-tm_map(WSC,removePunctuation) WSC<-tm_map(WSC,removeNumbers) WSC<-tm_map(WSC,removeWords,stopwords("en")) wordcloud(WSC) r_stats<- searchTwitter("apple iphone 6 launch", n=500, lang="en", since='2014-09-09') r_stats2<-sapply(r_stats,function(x) x$getText()) WSC<-Corpus(VectorSource(r_stats2)) WSC<-tm_map(WSC,removePunctuation) #WSC<-tm_map(WSC,removeNumbers) WSC<-tm_map(WSC,removeWords,stopwords("en")) wordcloud(WSC) r_stats<- searchTwitter("apple iphone 6 launch", n=500, lang="en", since='2014-09-19') r_stats2<-sapply(r_stats,function(x) x$getText()) WSC<-Corpus(VectorSource(r_stats2)) WSC<-tm_map(WSC,removePunctuation) #WSC<-tm_map(WSC,removeNumbers) WSC<-tm_map(WSC,removeWords,stopwords("en")) wordcloud(WSC)
/TwitterWordCloud.R
no_license
Harrison1/R
R
false
false
3,375
r
#date and time date() Sys.time() #import library and set up url library(XML) yahooUrl="http://finance.yahoo.com/q/hp?s=AAPL" table=readHTMLTable(yahooUrl) class(table) #parse doc doc=htmlParse(yahooUrl) tableNodes=getNodeSet(doc, "//table") #read HTML table into an R object for easy reading table=readHTMLTable(tableNodes[[15]], as.data.frame=TRUE, trim=TRUE, stringsAsFactors=FALSE, header=TRUE, colClasses = c("character", rep("FormattedNumber",5)), skip.rows=c(1:34, 46:67)) str(table) class(table) print(table[c("Date", "Close")], right=FALSE, row.names=TRUE) #import twitter library library(twitteR) #set up twitter api reqURL = "https://api.twitter.com/oauth/request_token" accessURL = "https://api.twitter.com/oauth/access_token" authURL = "https://api.twitter.com/oauth/authorize" consumerKey = "r0zFOLDE87mEg09ROfMGQEAGj" consumerSecret = "5HyDXKW4emrL20flD4fiMrzk7HHffvTACfNfmeaGr4ekhkcrkD" #twit creditials authoriziation twitCred <- OAuthFactory$new(consumerKey=consumerKey, consumerSecret=consumerSecret, requestURL=reqURL, accessURL=accessURL, authURL=authURL) #Do I have to run this everytime i open the program? twitCred$handshake() #Register twitCred registerTwitterOAuth(twitCred) #search Twitter and run what is found. Dates are not working, mainly until will break the code ST=searchTwitter("iPhone Apple", n=100, lang='en', since=NULL, until=NULL, locale=NULL, geocode=NULL, sinceID=NULL, retryOnRateLimit=120) ST #install and grab tm and wordcload packages install.packages("tm") library(tm) install.packages("wordcloud") library(wordcloud) #search twitter for what I believe is a vector of words #Phlly 39.9522, -75.164 #Miami geocode='25.7739, -80.194, 10mi' date() Sys.time() r_stats<- searchTwitter("alibaba", n=100, lang="en", since='2014-11-11') r_stats2<-sapply(r_stats,function(x) x$getText()) #standard run for wordcloud graphic WSC<-Corpus(VectorSource(r_stats2)) WSC<-tm_map(WSC,removePunctuation) WSC<-tm_map(WSC,removeNumbers) WSC<-tm_map(WSC,removeWords,stopwords("en")) #run wordcloud. This works wordcloud(WSC) #carolin r_stats<- searchTwitter("panthers nfl football", n=100, lang="en") r_stats2<-sapply(r_stats,function(x) x$getText()) #standard run for wordcloud graphic WSC<-Corpus(VectorSource(r_stats2)) WSC<-tm_map(WSC,removePunctuation) WSC<-tm_map(WSC,removeNumbers) WSC<-tm_map(WSC,removeWords,stopwords("en")) #run wordcloud. This works wordcloud(WSC) #the same process as above but it does not work r_stats<- searchTwitter("philly eagles football", n=100, lang="en") r_stats2<-sapply(r_stats,function(x) x$getText()) r_stats WSC<-Corpus(VectorSource(r_stats2)) WSC<-tm_map(WSC,removePunctuation) WSC<-tm_map(WSC,removeNumbers) WSC<-tm_map(WSC,removeWords,stopwords("en")) wordcloud(WSC) r_stats<- searchTwitter("apple iphone 6 launch", n=500, lang="en", since='2014-09-09') r_stats2<-sapply(r_stats,function(x) x$getText()) WSC<-Corpus(VectorSource(r_stats2)) WSC<-tm_map(WSC,removePunctuation) #WSC<-tm_map(WSC,removeNumbers) WSC<-tm_map(WSC,removeWords,stopwords("en")) wordcloud(WSC) r_stats<- searchTwitter("apple iphone 6 launch", n=500, lang="en", since='2014-09-19') r_stats2<-sapply(r_stats,function(x) x$getText()) WSC<-Corpus(VectorSource(r_stats2)) WSC<-tm_map(WSC,removePunctuation) #WSC<-tm_map(WSC,removeNumbers) WSC<-tm_map(WSC,removeWords,stopwords("en")) wordcloud(WSC)
#' Convert input to a character vector #' #' By default, `to_character()` is a wrapper for [base::as.character()]. #' For labelled vector, to_character allows to specify if value, labels or labels prefixed #' with values should be used for conversion. #' #' @param x Object to coerce to a character vector. #' @param ... Other arguments passed down to method. #' @param explicit_tagged_na should tagged NA be kept? #' @export to_character <- function(x, ...) { UseMethod("to_character") } #' @export to_character.default <- function(x, ...) { vl <- var_label(x) x <- as.character(x) var_label(x) <- vl x } #' @rdname to_character #' @export to_character.double <- function(x, explicit_tagged_na = FALSE, ...) { res <- as.character(x) if (explicit_tagged_na) res[is_tagged_na(x)] <- format_tagged_na(x[is_tagged_na(x)]) var_label(res) <- var_label(x) names(res) <- names(x) res } #' @rdname to_character #' @param levels What should be used for the factor levels: the labels, the values or labels prefixed with values? #' @param nolabel_to_na Should values with no label be converted to `NA`? #' @param user_na_to_na user defined missing values into NA? #' @details #' If some values doesn't have a label, automatic labels will be created, except if #' `nolabel_to_na` is `TRUE`. #' @examples #' v <- labelled(c(1,2,2,2,3,9,1,3,2,NA), c(yes = 1, no = 3, "don't know" = 9)) #' to_character(v) #' to_character(v, nolabel_to_na = TRUE) #' to_character(v, "v") #' to_character(v, "p") #' @export to_character.haven_labelled <- function(x, levels = c("labels", "values", "prefixed"), nolabel_to_na = FALSE, user_na_to_na = FALSE, explicit_tagged_na = FALSE, ...) { vl <- var_label(x) levels <- match.arg(levels) x <- as.character(to_factor( x, levels = levels, nolabel_to_na = nolabel_to_na, user_na_to_na = user_na_to_na, explicit_tagged_na = explicit_tagged_na )) var_label(x) <- vl x }
/R/to_character.R
no_license
henrydoth/labelled
R
false
false
1,936
r
#' Convert input to a character vector #' #' By default, `to_character()` is a wrapper for [base::as.character()]. #' For labelled vector, to_character allows to specify if value, labels or labels prefixed #' with values should be used for conversion. #' #' @param x Object to coerce to a character vector. #' @param ... Other arguments passed down to method. #' @param explicit_tagged_na should tagged NA be kept? #' @export to_character <- function(x, ...) { UseMethod("to_character") } #' @export to_character.default <- function(x, ...) { vl <- var_label(x) x <- as.character(x) var_label(x) <- vl x } #' @rdname to_character #' @export to_character.double <- function(x, explicit_tagged_na = FALSE, ...) { res <- as.character(x) if (explicit_tagged_na) res[is_tagged_na(x)] <- format_tagged_na(x[is_tagged_na(x)]) var_label(res) <- var_label(x) names(res) <- names(x) res } #' @rdname to_character #' @param levels What should be used for the factor levels: the labels, the values or labels prefixed with values? #' @param nolabel_to_na Should values with no label be converted to `NA`? #' @param user_na_to_na user defined missing values into NA? #' @details #' If some values doesn't have a label, automatic labels will be created, except if #' `nolabel_to_na` is `TRUE`. #' @examples #' v <- labelled(c(1,2,2,2,3,9,1,3,2,NA), c(yes = 1, no = 3, "don't know" = 9)) #' to_character(v) #' to_character(v, nolabel_to_na = TRUE) #' to_character(v, "v") #' to_character(v, "p") #' @export to_character.haven_labelled <- function(x, levels = c("labels", "values", "prefixed"), nolabel_to_na = FALSE, user_na_to_na = FALSE, explicit_tagged_na = FALSE, ...) { vl <- var_label(x) levels <- match.arg(levels) x <- as.character(to_factor( x, levels = levels, nolabel_to_na = nolabel_to_na, user_na_to_na = user_na_to_na, explicit_tagged_na = explicit_tagged_na )) var_label(x) <- vl x }
library(scales) source('definitions.R') source('helpers_misc.R') #' Plots Website Traffic over time #' #' @param experiment_data list of data-frames from load_data #' @param only_first_time_visits only count the first time the user appears in the dataset (i.e. first time to website) #' @param is_weekly if TRUE, groups/cohorts data by week, if FALSE then groups by month #' @param filter_year_end_beginning_weeks if TRUE (and if is_weekly is TRUE) then it excludes weeks 0 (first #' week number of the year) and 53 (last week number of the year) because both are only partial weeks, and #' will make the traffic appear to drop during those weeks. #' @param top_n_paths if specified, the graph color the lines by path, and count by the top (i.e. highest #' traffic) paths, grouping the remaining paths into an 'Other' category. plot__website_traffic <- function(experiment_data, only_first_time_visits = FALSE, is_weekly = TRUE, filter_year_end_beginning_weeks = TRUE, top_n_paths = NULL) { if(is_weekly) { cohort_format <- '%W' cohort_name <- "Week" } else { cohort_format <- '%m' cohort_name <- "Month" } caption <- "" if(only_first_time_visits) { title <- paste("First-Time User Visits Per", cohort_name) subtitle <- paste0("Represents the number of new users to the website for a given ", tolower(cohort_name),".") y_label <- paste0("First-Time User Visits (per ", cohort_name, ")") } else { title <- paste("Unique User Visits Per", cohort_name) subtitle <- paste0("Users are only counted once in the given ", tolower(cohort_name), ", but the same user may be represented in multiple ", tolower(cohort_name), ".") y_label <- paste0("Unique User Visits (per ", cohort_name, ")") } num_users_data <- website_traffic__to_cohort_num_users(experiment_data, cohort_format = cohort_format, top_n_paths = top_n_paths, only_first_time_visits = only_first_time_visits) if(is_weekly && filter_year_end_beginning_weeks) { num_users_data <- num_users_data %>% filter(!str_detect(string=cohort, pattern='-00') & !str_detect(string=cohort, pattern='-53')) caption <- "\nPartial weeks at the end and beginning of the year are excluded." } if(is.null(top_n_paths)) { plot_object <- num_users_data %>% ggplot(aes(x=cohort, y=num_users, group = 1)) } else { plot_object <- num_users_data %>% rename(Path=path) %>% ggplot(aes(x=cohort, y=num_users, group=Path, color=Path)) } plot_object + geom_line() + geom_point() + expand_limits(y = 0) + geom_text(aes(label = prettify_numerics(num_users)), check_overlap=TRUE, vjust = -0.5, size=rel(global__text_size)) + scale_y_continuous(labels = comma_format()) + theme_light(base_size=global__theme_base_size) + theme(axis.text.x = element_text(angle = 30, hjust = 1)) + labs(title = title, subtitle = subtitle, x = cohort_name, y = y_label, caption = caption) } #' Plots the bayesian posterior control/variant/prior distributions of a particular experiment/metric. #' #' @param experiment_data list of data-frames from load_data #' @param experiment #' @param metric #' @param confidence_level the confidence level passed to `experiments__get_summary()` which generates #' `experiment_data` #' @param show_prior_distribution plot__bayesian_posterior <- function(experiments_summary, experiment, metric, confidence_level, show_prior_distribution=TRUE) { local_experiment <- experiments_summary %>% filter(experiment_id == experiment & metric_id == metric) prior_alpha <- local_experiment$prior_alpha prior_beta <- local_experiment$prior_beta control_alpha <- local_experiment$control_alpha control_beta <- local_experiment$control_beta variant_alpha <- local_experiment$variant_alpha variant_beta <- local_experiment$variant_beta bayesian_prob_variant_gt_control <- local_experiment$bayesian_prob_variant_gt_control control_name <- local_experiment$control_name variant_name <- local_experiment$variant_name alpha_vector <- c(control_alpha, variant_alpha, prior_alpha) beta_vector <- c(control_beta, variant_beta, prior_beta) if(show_prior_distribution) { x_min <- min(qbeta(0.001, alpha_vector, beta_vector)) x_max <- max(qbeta(0.999, alpha_vector, beta_vector)) } else { x_min <- min(qbeta(0.001, alpha_vector[1:2], beta_vector[1:2])) x_max <- max(qbeta(0.999, alpha_vector[1:2], beta_vector[1:2])) } x_axis_spread <- x_max - x_min # depending on the where we want to graph and how spread out the values are, we will want to get more/less # granualar with our plot distro_names <- c("Control", "Variant", "Prior") # don't change order, controlled from above distros <- data.frame(alpha = alpha_vector, beta = beta_vector, group = distro_names) %>% group_by(alpha, beta, group) %>% do(tibble(x = seq(x_min, x_max, x_axis_spread / 1000))) %>% ungroup() %>% mutate(y = dbeta(x, alpha, beta), Parameters = factor(paste0(group, ": alpha= ", comma_format()(alpha), ", beta= ", comma_format()(beta)))) x_axis_break_steps <- 0.05 if(x_axis_spread <= 0.02) { x_axis_break_steps <- 0.001 } else if(x_axis_spread <= 0.05) { x_axis_break_steps <- 0.005 } else if(x_axis_spread <= 0.15) { x_axis_break_steps <- 0.01 } else if(x_axis_spread <= 0.5) { x_axis_break_steps <- 0.02 } #custom_colors <- rev(hue_pal()(3)) custom_colors <- global__colors_bayesian if(!show_prior_distribution) { distros <- distros %>% filter(!str_detect(Parameters, "Prior")) custom_colors <- custom_colors %>% remove_val(global__colors_prior) } confidence_difference <- 1 - confidence_level two_sided_difference <- confidence_difference / 2 control_cred_low <- qbeta(two_sided_difference, control_alpha, control_beta) control_cred_high <- qbeta(1 - two_sided_difference, control_alpha, control_beta) variant_cred_low <- qbeta(two_sided_difference, variant_alpha, variant_beta) variant_cred_high <- qbeta(1 - two_sided_difference, variant_alpha, variant_beta) # a_cr_simulation <- rbeta(1e6, control_alpha, control_beta) # b_cr_simulation <- rbeta(1e6, variant_alpha, variant_beta) # percent_of_time_b_wins <- mean(b_cr_simulation > a_cr_simulation) max_distros_20th <- max(distros$y) / 20 # re-level (re-order) the levels of Params so the order is Control->Variant->Prior param_levels <- levels(distros$Parameters) distros <- distros %>% mutate(Parameters = factor(distros$Parameters, levels = c(param_levels[3], param_levels[1], param_levels[2]))) plot_object <- ggplot(data=distros, aes(x, y, color = Parameters)) + geom_line() + geom_area(aes(fill=Parameters, group=Parameters), alpha=0.3, position = 'identity') + geom_errorbarh(aes(xmin = control_cred_low, xmax = control_cred_high, y = max_distros_20th * -1), height = max_distros_20th * 0.75, color = global__colors_control, alpha=0.3) + geom_errorbarh(aes(xmin = variant_cred_low, xmax = variant_cred_high, y = max_distros_20th * -2), height = max_distros_20th * 0.75, color = global__colors_variant, alpha=0.3) + scale_x_continuous(breaks = seq(0, 1, x_axis_break_steps), labels = percent_format()) + theme_light(base_size=global__theme_base_size) + theme(axis.text.x = element_text(angle = 30, hjust = 1), legend.text=element_text(size=rel(0.9)), plot.subtitle=element_text(size=rel(0.9))) + coord_cartesian(xlim=c(x_min, x_max)) + scale_fill_manual(values=custom_colors) + scale_color_manual(values=custom_colors) + labs(#title='Posterior Probability Distributions of Control & Variant', subtitle=paste0(paste0('The probability the Variant is better is ', percent(bayesian_prob_variant_gt_control), ".\n"), #paste0('\nExperiment: "', experiment, '"'), #paste0('\nMetric: "', metric, '"'), paste0('\nControl Name: "', control_name, '"'), paste0('\nVariant Name: "', variant_name, '"')), x="Conversion Rates", y="Density of beta", caption=paste("\nThe lines under each distribution show the corresponding", percent(confidence_level), "confidence interval.")) return (plot_object) } #' Plots the control vs variant conversion rates for all metrics #' #' @param experiment_data list of data-frames from load_data #' @param experiment plot__conversion_rates <- function(experiments_summary, experiment) { get_type <- function(type) { if(type == 'p_value') { return (type) } return (str_split(type, '_', simplify=TRUE)[2]) } modified_summary <- experiments_summary %>% filter(experiment_id == experiment) %>% select(metric_id, control_successes, control_trials, control_conversion_rate, variant_successes, variant_trials, variant_conversion_rate, p_value) %>% gather(type, value, -metric_id) %>% mutate(variation=ifelse(type != 'p_value' & str_detect(type, 'control'), 'Control', 'Variant')) %>% mutate(actual_type=map_chr(type, ~ get_type(.))) %>% mutate(actual_type=ifelse(actual_type == 'conversion', 'conversion_rate', actual_type)) %>% select(-type) %>% spread(actual_type, value) %>% group_by(metric_id) %>% mutate(p_value=min(p_value, na.rm=TRUE), highest_conversion_rate = max(conversion_rate)) %>% ungroup() modified_summary %>% ggplot(aes(x=metric_id, y=conversion_rate, group=variation, fill=variation)) + #geom_col(position='dodge') + geom_bar(stat="identity", position=position_dodge(width=0.8), width=0.75) + scale_y_continuous(labels=percent_format()) + coord_cartesian(ylim=c(0, max(modified_summary$conversion_rate) + 0.05)) + geom_text(aes(y=highest_conversion_rate, label= percent_format()(conversion_rate)), position=position_dodge(width=.8), check_overlap=TRUE, vjust=-4, size=rel(global__text_size + 0.5)) + geom_text(aes(label= comma_format()(successes)), position=position_dodge(width=.8), check_overlap=TRUE, vjust=-1.75, size=rel(global__text_size)) + geom_text(aes(label= comma_format()(trials)), position=position_dodge(width=.8), check_overlap=TRUE, vjust=-0.5, size=rel(global__text_size)) + #facet_wrap(~ track_field_name, ncol=1, scales='free_x') + scale_fill_manual(values=c(global__colors_control, global__colors_variant)) + theme_light(base_size=global__theme_base_size) + theme(axis.text.x=element_text(angle=35, hjust=1)) + labs(caption="", x="Metric", y="Conversion Rate", fill="Variation") } #' Plots the control vs variant conversion rates based on the bayesian methodology, for all metrics #' #' @param experiment_data list of data-frames from load_data #' @param experiment plot__conversion_rates_bayesian <- function(experiments_summary, experiment) { get_type <- function(type) { if(type == 'bayesian_prob_variant_gt_control') { return (type) } return (str_split(type, '_', simplify=TRUE)[2]) } modified_summary <- experiments_summary %>% filter(experiment_id == experiment) %>% select(metric_id, control_alpha, control_beta, bayesian_control_cr, variant_alpha, variant_beta, bayesian_variant_cr, bayesian_prob_variant_gt_control) %>% gather(type, value, -metric_id) %>% mutate(variation=ifelse(type != 'bayesian_prob_variant_gt_control' & str_detect(type, 'control'), 'Control', 'Variant')) %>% mutate(actual_type=map_chr(type, ~ get_type(.))) %>% mutate(actual_type=ifelse(actual_type == 'control' | actual_type == 'variant', 'conversion_rate', actual_type)) %>% select(-type) %>% spread(actual_type, value) %>% group_by(metric_id) %>% mutate(bayesian_prob_variant_gt_control=min(bayesian_prob_variant_gt_control, na.rm=TRUE), highest_conversion_rate = max(conversion_rate)) %>% ungroup() modified_summary %>% ggplot(aes(x=metric_id, y=conversion_rate, group=variation, fill=variation)) + #geom_col(position='dodge') + geom_bar(stat="identity", position=position_dodge(width=0.8), width=0.75) + scale_y_continuous(labels=percent_format()) + coord_cartesian(ylim=c(0, max(modified_summary$conversion_rate) + 0.05)) + geom_text(aes(y=highest_conversion_rate, label= percent_format()(conversion_rate)), position=position_dodge(width=.8), check_overlap=TRUE, vjust=-4, size=rel(global__text_size + 0.5)) + geom_text(aes(label=paste('a:', comma_format()(alpha))), position=position_dodge(width=.8), check_overlap=TRUE, vjust=-1.75, size=rel(global__text_size)) + geom_text(aes(label=paste('b:', comma_format()(beta))), position=position_dodge(width=.8), check_overlap=TRUE, vjust=-0.5, size=rel(global__text_size)) + #facet_wrap(~ track_field_name, ncol=1, scales='free_x') + scale_fill_manual(values=c(global__colors_control, global__colors_variant)) + theme_light(base_size=global__theme_base_size) + theme(axis.text.x=element_text(angle=35, hjust=1)) + labs(caption="", x="Metric", y="Conversion Rate", fill="Variation") } #' Plots the percent change from the Control to the Variant along with p-values #' #' @param experiment_data list of data-frames from load_data #' @param experiment plot__percent_change_frequentist <- function(experiments_summary, experiment, p_value_threshold=0.05) { current_experiments <- experiments_summary %>% filter(experiment_id == experiment) nearest_x <- 0.05 min_perc_change <- min(0, ceiling_nearest_x(min(current_experiments$percent_change_from_control), nearest_x)) max_perc_change <- ceiling_nearest_x(max(current_experiments$percent_change_from_control), nearest_x) current_experiments <- current_experiments %>% mutate(p_value_sig_category=case_when( p_value > p_value_threshold ~ 'No', p_value <= p_value_threshold & percent_change_from_control > 0 ~ 'Yes - Increase', p_value <= p_value_threshold & percent_change_from_control < 0 ~ 'Yes - Decrease', TRUE ~ 'Unknown' )) plot_object <- current_experiments %>% ggplot(aes(x=metric_id, y=percent_change_from_control, fill=p_value_sig_category)) + geom_hline(yintercept=0, color='#EB5424', size=0.5, alpha=0.5) + geom_col(alpha=0.85) # if there are any experiments where the percent change is greater than 0, add text # if i don't do the check, i get an error t <- current_experiments %>% filter(percent_change_from_control >= 0) if(nrow(t) > 0) { plot_object <- plot_object + geom_text(data=t, aes(label=paste(percent(percent_change_from_control), 'Change')), vjust=-2, size=rel(global__text_size), check_overlap=TRUE) + geom_text(data=t, aes(label=paste(round(p_value, 3), 'p-value')), vjust=-0.5, size=rel(global__text_size), check_overlap=TRUE) } # if there are any experiments where the percent change is less than 0, add text # if i don't do the check, i get an error t <- current_experiments %>% filter(percent_change_from_control < 0) if(nrow(t) > 0) { plot_object <- plot_object + geom_text(data=t, aes(label=paste(percent(percent_change_from_control), 'Change')), vjust=2.7, size=rel(global__text_size), check_overlap=TRUE) + geom_text(data=t, aes(label=paste(round(p_value, 3), 'p-value')), vjust=1.2, size=rel(global__text_size), check_overlap=TRUE) } bad_good_colors <- c('#B4B7B9', '#DF585C', '#37B57F') names(bad_good_colors) <- c("No", "Yes - Decrease", "Yes - Increase") fill_colors <- bad_good_colors[sort(unique(current_experiments$p_value_sig_category))] plot_object + coord_cartesian(ylim=c(min_perc_change - 0.02, max_perc_change + 0.02)) + scale_y_continuous(labels=percent_format(), breaks=seq(min_perc_change, max_perc_change, nearest_x)) + scale_fill_manual(values=fill_colors) + theme_light(base_size=global__theme_base_size) + theme(axis.text.x=element_text(angle=35, hjust=1)) + labs(caption=paste("\nThe p-value threshold for statistical significance is", p_value_threshold), x="Metric", y="Percent Change from Control to Variant", fill="Statistically Significant") } #' Plots the percent change from the Control to the Variant along with the probability that the Variant is #' better than the control. #' #' @param experiment_data list of data-frames from load_data #' @param experiment plot__percent_change_bayesian <- function(experiments_summary, experiment) { current_experiments <- experiments_summary %>% filter(experiment_id == experiment) %>% select(-percent_change_from_control) %>% mutate(percent_change_from_control = bayesian_cr_difference / bayesian_control_cr) nearest_x <- 0.05 min_perc_change <- ceiling_nearest_x(min(current_experiments$percent_change_from_control), nearest_x) max_perc_change <- ceiling_nearest_x(max(current_experiments$percent_change_from_control), nearest_x) plot_object <- current_experiments %>% ggplot(aes(x=metric_id, y=percent_change_from_control, fill=bayesian_prob_variant_gt_control)) + geom_col(alpha=0.85) # if there are any experiments where the percent change is greater than 0, add text # if i don't do the check, i get an error t <- current_experiments %>% filter(percent_change_from_control >= 0) if(nrow(t) > 0) { plot_object <- plot_object + geom_text(data=t, aes(label=paste(percent(percent_change_from_control), 'Change')), vjust=-2, size=rel(global__text_size), check_overlap=TRUE) + geom_text(data=t, aes(label=paste(percent(bayesian_prob_variant_gt_control), 'Probability')), vjust=-0.5, size=rel(global__text_size), check_overlap=TRUE) } # if there are any experiments where the percent change is less than 0, add text # if i don't do the check, i get an error t <- current_experiments %>% filter(percent_change_from_control < 0) if(nrow(t) > 0) { plot_object <- plot_object + geom_text(data=t, aes(label=paste(percent(percent_change_from_control), 'Change')), vjust=2.7, size=rel(global__text_size), check_overlap=TRUE) + geom_text(data=t, aes(label=paste(percent(bayesian_prob_variant_gt_control), 'Probability')), vjust=1.2, size=rel(global__text_size), check_overlap=TRUE) } plot_object + coord_cartesian(ylim=c(min_perc_change - 0.02, max_perc_change + 0.02)) + scale_y_continuous(labels=percent_format(), breaks=seq(min_perc_change, max_perc_change, nearest_x)) + #scale_fill_manual(values=c(global__colors_bad, global__colors_good)) + scale_fill_gradient2(low = global__colors_bad, mid = "gray", high = global__colors_good, midpoint = 0.5, limits=c(0,1)) + theme_light(base_size=global__theme_base_size) + theme(axis.text.x=element_text(angle=35, hjust=1)) + labs(#caption="\nThe decimal value next to the bar represents the Probability that the Variant is Better.", x="Metric", y="Percent Change from Control to Variant", fill="Probability Variant is Better") } #' Plots the percent change (with confidence intervals) from the Control to the Variant along with p-values #' #' @param experiment_data list of data-frames from load_data #' @param experiment plot__percent_change_conf_frequentist <- function(experiments_summary, experiment, p_value_threshold=0.05) { current_experiments <- experiments_summary %>% filter(experiment_id == experiment) current_experiments <- current_experiments %>% select(metric_id, control_conversion_rate, p_value, percent_change_from_control, contains('frequentist')) %>% mutate(percent_change_conf_low=frequentist_conf_low / control_conversion_rate, percent_change_conf_high=frequentist_conf_high / control_conversion_rate, p_value_sig = p_value <= p_value_threshold) %>% select(metric_id, contains('percent_change'), p_value, p_value_sig) current_experiments <- current_experiments %>% mutate(p_value_sig_category=case_when( p_value > p_value_threshold ~ 'No', p_value <= p_value_threshold & percent_change_from_control > 0 ~ 'Yes - Increase', p_value <= p_value_threshold & percent_change_from_control < 0 ~ 'Yes - Decrease', TRUE ~ 'Unknown' )) y_expand <- 0.10 min_y <- ceiling_nearest_x(min(current_experiments$percent_change_conf_low), y_expand) max_y <- ceiling_nearest_x(max(current_experiments$percent_change_conf_high), y_expand) bad_good_colors <- c('#B4B7B9', '#DF585C', '#37B57F') names(bad_good_colors) <- c("No", "Yes - Decrease", "Yes - Increase") fill_colors <- bad_good_colors[sort(unique(current_experiments$p_value_sig_category))] current_experiments %>% ggplot(aes(x=metric_id, y=percent_change_from_control, color=p_value_sig_category)) + geom_point(size=2) + geom_errorbar(aes(ymin=percent_change_conf_low, ymax=percent_change_conf_high), size=0.8) + geom_text(aes(label=percent(percent_change_from_control)), hjust=1.2, size=rel(global__text_size), check_overlap=TRUE, color='black') + geom_text(aes(y=max_y + y_expand, label=paste('p-value:', round(p_value, 3))), vjust=1, size=rel(global__text_size), check_overlap=TRUE, color='black') + geom_hline(yintercept=0, color='#EB5424', size=1.2, alpha=0.5) + coord_cartesian(ylim=c(min_y - y_expand, max_y + y_expand)) + scale_color_manual(values=fill_colors) + scale_y_continuous(labels=percent_format(), breaks=seq(min_y - y_expand, max_y + y_expand, y_expand)) + theme_light(base_size=global__theme_base_size) + theme(axis.text.x=element_text(angle=35, hjust=1)) + labs(caption=paste(paste("\nThe p-value threshold for statistical significance is", p_value_threshold), "\nThe error bars show the", percent(1 - p_value_threshold), "confidence interval."), x="Metric", y="Percent Change from Control to Variant", color="Statistically Significant") } #' Plots the percent change (with confidence intervals) from the Control to the Variant along with the #' probability that the Variant is better than the control. #' #' @param experiment_data list of data-frames from load_data #' @param experiment plot__percent_change_conf_bayesian <- function(experiments_summary, experiment) { current_experiments <- experiments_summary %>% filter(experiment_id == experiment) current_experiments <- current_experiments %>% select(metric_id, contains('bayesian')) %>% mutate(percent_change_from_control=bayesian_cr_difference / bayesian_control_cr, percent_change_conf_low=bayesian_conf_low / bayesian_control_cr, percent_change_conf_high=bayesian_conf_high / bayesian_control_cr) %>% select(metric_id, contains('percent_change'), bayesian_prob_variant_gt_control) y_expand <- 0.10 min_y <- ceiling_nearest_x(min(current_experiments$percent_change_conf_low), y_expand) max_y <- ceiling_nearest_x(max(current_experiments$percent_change_conf_high), y_expand) current_experiments %>% ggplot(aes(x=metric_id, y=percent_change_from_control, color=bayesian_prob_variant_gt_control)) + geom_point(size=2) + geom_errorbar(aes(ymin=percent_change_conf_low, ymax=percent_change_conf_high), size=0.8) + geom_text(aes(label=percent(percent_change_from_control)), hjust=1.2, size=rel(global__text_size), check_overlap=TRUE, color='black') + geom_text(aes(y=max_y + y_expand, label=paste(percent(bayesian_prob_variant_gt_control), "Probability")), vjust=1, size=rel(global__text_size), check_overlap=TRUE, color='black') + geom_hline(yintercept=0, color='#EB5424', size=1.2, alpha=0.5) + coord_cartesian(ylim=c(min_y - y_expand, max_y + y_expand)) + scale_color_gradient2(low = global__colors_bad, mid = "gray", high = global__colors_good, midpoint = 0.5, limits=c(0,1)) + #scale_color_manual(values=c(global__colors_bad, global__colors_good)) + scale_y_continuous(labels=percent_format(), breaks=seq(min_y - y_expand, max_y + y_expand, y_expand)) + theme_light(base_size=global__theme_base_size) + theme(axis.text.x=element_text(angle=35, hjust=1)) + labs(caption=paste("\nThe error bars show the", percent(global__confidence_level), "confidence interval."), x="Metric", y="Percent Change from Control to Variant", color="Probability Variant is Better") } plot__daily_p_value <- function(experiments_daily_summary, experiment, metric, p_value_threshold=0.05) { current_daily_summary <- experiments_daily_summary %>% filter(experiment_id == experiment, metric_id == metric) error_bar_height <- (max(current_daily_summary$p_value, na.rm=TRUE) - min(current_daily_summary$p_value, na.rm=TRUE)) / 25 median_p_value <- median(current_daily_summary$p_value, na.rm = TRUE) missing_dates <- current_daily_summary %>% filter(is.na(control_conversion_rate)) %>% select(day_expired_attribution) %>% mutate(y_axis_location = p_value_threshold + error_bar_height) #median_p_value) missing_dates <- missing_dates %>% mutate(message = ifelse(as.character(missing_dates$day_expired_attribution) == as.character(min(missing_dates$day_expired_attribution)), 'Lag from\nAttribution\nWindow', NA)) current_daily_summary %>% mutate(day_expired_attribution = as.Date(day_expired_attribution)) %>% ggplot(aes(x=day_expired_attribution, y=p_value)) + geom_line(na.rm = TRUE) + geom_point(na.rm = TRUE) + geom_text(aes(label=round(p_value, 3)), vjust=-0.5, check_overlap = TRUE, na.rm = TRUE, size=rel(global__text_size)) + geom_hline(yintercept = p_value_threshold, color ='red', alpha=0.5, size=1.5) + geom_errorbarh(data=missing_dates, aes(y = y_axis_location, xmin = min(day_expired_attribution), xmax = max(day_expired_attribution)), color='#828282', height=rel(error_bar_height), size=rel(0.45)) + geom_text(data=missing_dates, aes(y=y_axis_location, label=message), vjust=-0.5, hjust='left',#0.325, check_overlap = TRUE, na.rm=TRUE, size=rel(global__text_size)) + scale_x_date(date_breaks = '1 days') + #scale_y_continuous(breaks = seq(0, 1, 0.05)) + expand_limits(y=0) + labs(title='P-value over time', y='P-Value', x='Day of Experiment (and days after)') + theme_light(base_size=global__theme_base_size) + theme(axis.text.x = element_text(angle = 30, hjust = 1)) } plot__daily_percent_change_frequentist <- function(experiments_daily_summary, experiment, metric, p_value_threshold=0.05) { current_daily_summary <- experiments_daily_summary %>% filter(experiment_id == experiment, metric_id == metric) %>% mutate(day_expired_attribution = as.Date(day_expired_attribution)) %>% mutate(perc_change = frequentist_cr_difference / control_conversion_rate, perc_change_conf_low = frequentist_conf_low / control_conversion_rate, perc_change_conf_high = frequentist_conf_high / control_conversion_rate) error_bar_height <- (max(current_daily_summary$perc_change_conf_high, na.rm=TRUE) - min(current_daily_summary$perc_change_conf_low, na.rm=TRUE)) / 25 median_percent_change <- median(current_daily_summary$perc_change, na.rm = TRUE) missing_dates <- current_daily_summary %>% filter(is.na(control_conversion_rate)) %>% select(day_expired_attribution) %>% mutate(y_axis_location = error_bar_height) #median_percent_change) missing_dates <- missing_dates %>% mutate(message = ifelse(as.character(missing_dates$day_expired_attribution) == as.character(min(missing_dates$day_expired_attribution)), 'Lag from\nAttribution\nWindow', NA)) # plot_object <- current_daily_summary %>% # ggplot(aes(x=day_expired_attribution, y=perc_change)) + # geom_line(na.rm = TRUE) + # #coord_cartesian(ylim=c(-0.10, 0.3)) + # scale_y_continuous(labels = percent_format()) + # scale_x_date(date_breaks = '1 days') + # geom_ribbon(aes(ymin = perc_change_conf_low, ymax = perc_change_conf_high), fill = 'green', alpha=0.15) # if(any(current_daily_summary$p_value > p_value_threshold, na.rm=TRUE)) { # plot_object <- plot_object + # geom_ribbon(aes(ymin = ifelse(p_value > p_value_threshold, perc_change_conf_low, NA), # ymax = ifelse(p_value > p_value_threshold, perc_change_conf_high, NA)), # fill = 'red', alpha=0.45) # } # if(any(current_daily_summary$p_value <= p_value_threshold, na.rm=TRUE)) { # plot_object <- plot_object + # geom_ribbon(aes(ymin = ifelse(p_value <= p_value_threshold, perc_change_conf_low, NA), # ymax = ifelse(p_value <= p_value_threshold, perc_change_conf_high, NA)), # fill = 'green', alpha=0.2) # } date_breaks_width <- '1 day' if(nrow(current_daily_summary) >= 90) { date_breaks_width <- '14 day' } else if(nrow(current_daily_summary) >= 60) { date_breaks_width <- '5 day' } else if(nrow(current_daily_summary) >= 30) { date_breaks_width <- '2 day' } plot_object <- current_daily_summary %>% ggplot(aes(x=day_expired_attribution, y=perc_change)) + #coord_cartesian(ylim=c(-0.10, 0.3)) + scale_y_continuous(labels = percent_format()) + scale_x_date(labels = date_format('%Y-%m-%d'), breaks=date_breaks_width) + geom_ribbon(aes(ymin = perc_change_conf_low, ymax = perc_change_conf_high), fill = '#7A7A7A', alpha=0.35) if(any(current_daily_summary$p_value <= p_value_threshold & current_daily_summary$perc_change < 0, na.rm=TRUE)) { plot_object <- plot_object + geom_ribbon(aes(ymin = ifelse(p_value <= p_value_threshold & perc_change < 0, perc_change_conf_low, NA), ymax = ifelse(p_value <= p_value_threshold & perc_change < 0, perc_change_conf_high, NA)), fill = 'red', alpha=0.35) } if(any(current_daily_summary$p_value <= p_value_threshold & current_daily_summary$perc_change > 0, na.rm=TRUE)) { plot_object <- plot_object + geom_ribbon(aes(ymin = ifelse(p_value <= p_value_threshold & perc_change > 0, perc_change_conf_low, NA), ymax = ifelse(p_value <= p_value_threshold & perc_change > 0, perc_change_conf_high, NA)), fill = 'green', alpha=0.35) } plot_object + geom_hline(yintercept = 0, color='red', alpha=0.5, size=1.5) + geom_line(na.rm = TRUE) + geom_text(aes(label=percent(perc_change)), vjust=-1, check_overlap = TRUE, na.rm=TRUE, size=rel(global__text_size)) + geom_errorbarh(data=missing_dates, aes(y = y_axis_location, xmin = min(day_expired_attribution), xmax = max(day_expired_attribution)), color='#828282', height=rel(error_bar_height), size=rel(0.45)) + geom_text(data=missing_dates, aes(y=y_axis_location, label=message), vjust=-0.5, hjust='left',#0.325, check_overlap = TRUE, na.rm=TRUE, size=rel(global__text_size)) + labs(#title='Difference in Conversion Rate of `B` - `A`, with Frequentist Confidence Interval - \nWith Attribution Window', caption=paste("\n", percent(global__confidence_level), "confidence interval"), y='Lift (i.e. Percent change from Control to Variant)', x='Day of Experiment') + theme_light(base_size=global__theme_base_size) + theme(axis.text.x = element_text(angle = 30, hjust = 1)) } plot__daily_prob_variant_gt_control <- function(experiments_daily_summary, experiment, metric) { current_daily_summary <- experiments_daily_summary %>% filter(experiment_id == experiment, metric_id == metric) error_bar_height <- 0.04 # we can hard code this value because the y-axis is always between 0-1 median_probability <- median(current_daily_summary$bayesian_prob_variant_gt_control, na.rm = TRUE) missing_dates <- current_daily_summary %>% filter(is.na(control_conversion_rate)) %>% select(day_expired_attribution) %>% mutate(y_axis_location = 0.5 + error_bar_height) #median_probability) missing_dates <- missing_dates %>% mutate(message = ifelse(as.character(missing_dates$day_expired_attribution) == as.character(min(missing_dates$day_expired_attribution)), 'Lag from\nAttribution\nWindow', NA)) current_daily_summary %>% mutate(day_expired_attribution = as.Date(day_expired_attribution)) %>% ggplot(aes(x=day_expired_attribution, y=bayesian_prob_variant_gt_control)) + geom_line(na.rm=TRUE) + geom_point(na.rm=TRUE) + geom_text(aes(label=percent(bayesian_prob_variant_gt_control)), vjust=-0.5, check_overlap = TRUE, na.rm=TRUE, size=rel(global__text_size)) + geom_hline(yintercept = 0.5, color ='red', alpha=0.5, size=1.5) + geom_errorbarh(data=missing_dates, aes(y = y_axis_location, xmin = min(day_expired_attribution), xmax = max(day_expired_attribution)), color='#828282', height=rel(error_bar_height), size=rel(0.45)) + geom_text(data=missing_dates, aes(y=y_axis_location, label=message), vjust=-0.5, hjust='left',#0.325, check_overlap = TRUE, na.rm=TRUE, size=rel(global__text_size)) + scale_x_date(date_breaks = '1 days') + #scale_y_continuous(breaks = seq(0, 1, 0.05)) + expand_limits(y=c(0, 1)) + labs(title='Probability Variant is Better', y='Probability Variant is Better', x='Day of Experiment (and days after)') + theme_light(base_size=global__theme_base_size) + theme(axis.text.x = element_text(angle = 30, hjust = 1)) } plot__daily_percent_change_bayesian <- function(experiments_daily_summary, experiment, metric) { current_daily_summary <- experiments_daily_summary %>% filter(experiment_id == experiment, metric_id == metric) %>% mutate(day_expired_attribution = as.Date(day_expired_attribution)) %>% mutate(perc_change = bayesian_percent_change, perc_change_conf_low = bayesian_conf_low / bayesian_control_cr, perc_change_conf_high = bayesian_conf_high / bayesian_control_cr, # it is "statistically significant" if the confidence interval is completely above or below 0 is_stat_sig=(perc_change_conf_low < 0 & perc_change_conf_high < 0) | (perc_change_conf_low > 0 & perc_change_conf_high > 0)) error_bar_height <- (max(current_daily_summary$perc_change_conf_high, na.rm=TRUE) - min(current_daily_summary$perc_change_conf_low, na.rm=TRUE)) / 25 median_percent_change <- median(current_daily_summary$perc_change, na.rm = TRUE) missing_dates <- current_daily_summary %>% filter(is.na(control_conversion_rate)) %>% select(day_expired_attribution) %>% mutate(y_axis_location = error_bar_height) #median_percent_change) missing_dates <- missing_dates %>% mutate(message = ifelse(as.character(missing_dates$day_expired_attribution) == as.character(min(missing_dates$day_expired_attribution)), 'Lag from\nAttribution\nWindow', NA)) date_breaks_width <- '1 day' if(nrow(current_daily_summary) >= 90) { date_breaks_width <- '14 day' } else if(nrow(current_daily_summary) >= 60) { date_breaks_width <- '5 day' } else if(nrow(current_daily_summary) >= 30) { date_breaks_width <- '2 day' } plot_object <- current_daily_summary %>% ggplot(aes(x=day_expired_attribution, y=perc_change)) + #coord_cartesian(ylim=c(-0.10, 0.3)) + scale_y_continuous(labels = percent_format()) + scale_x_date(labels = date_format('%Y-%m-%d'), breaks=date_breaks_width) + geom_ribbon(aes(ymin = perc_change_conf_low, ymax = perc_change_conf_high), fill = '#7A7A7A', alpha=0.35) if(any(current_daily_summary$is_stat_sig & current_daily_summary$perc_change < 0, na.rm=TRUE)) { plot_object <- plot_object + geom_ribbon(aes(ymin = ifelse(current_daily_summary$is_stat_sig & perc_change < 0, perc_change_conf_low, NA), ymax = ifelse(current_daily_summary$is_stat_sig & perc_change < 0, perc_change_conf_high, NA)), fill = 'red', alpha=0.35) } if(any(current_daily_summary$is_stat_sig & current_daily_summary$perc_change > 0, na.rm=TRUE)) { plot_object <- plot_object + geom_ribbon(aes(ymin = ifelse(current_daily_summary$is_stat_sig & perc_change > 0, perc_change_conf_low, NA), ymax = ifelse(current_daily_summary$is_stat_sig & perc_change > 0, perc_change_conf_high, NA)), fill = 'green', alpha=0.35) } plot_object + geom_hline(yintercept = 0, color='red', alpha=0.5, size=1.5) + geom_line(na.rm=TRUE) + geom_text(aes(label=percent(perc_change)), vjust=-1, check_overlap = TRUE, na.rm=TRUE, size=rel(global__text_size)) + geom_errorbarh(data=missing_dates, aes(y = y_axis_location, xmin = min(day_expired_attribution), xmax = max(day_expired_attribution)), color='#828282', height=rel(error_bar_height), size=rel(0.45)) + geom_text(data=missing_dates, aes(y=y_axis_location, label=message), vjust=-0.5, hjust='left',#0.325, check_overlap = TRUE, na.rm=TRUE, size=rel(global__text_size)) + labs(#title='Difference in Conversion Rate of `B` - `A`, with Frequentist Confidence Interval - \nWith Attribution Window', caption=paste("\n", percent(global__confidence_level), "confidence interval"), y='Lift (i.e. Percent change from Control to Variant)', x='Day of Experiment') + theme_light(base_size=global__theme_base_size) + theme(axis.text.x = element_text(angle = 30, hjust = 1)) } #' returns conversion rate data over time for the cohorts defined in cohorted_snapshots; #' The user defines several snapshots (i.e. number of days after each person's first visit) to view the #' overall conversion rates for each cohort #' @param cohorted_snapshots the data returned by get__cohorted_conversions_snapshot plot__conversion_rates_snapshot_absolute <- function(cohorted_snapshots, cohort_label) { cohorted_snapshots %>% ggplot(aes(x=cohort, y=conversion_rate, group=snapshot_label, color=snapshot_label)) + geom_line(na.rm = TRUE) + geom_point(na.rm = TRUE) + geom_text(aes(label=percent(conversion_rate)), vjust=-0.5, check_overlap = TRUE, na.rm = TRUE, size=rel(global__text_size)) + expand_limits(y=0) + scale_y_continuous(labels = percent_format()) + labs(#title='P-value over time', y="Conversion Rate", x=cohort_label, color="Snapshot", caption="\nAll users for a given cohort must have had at least N days since their first website visit,\nwhere N is the number of days for the corresponding snapshot.") + theme_light(base_size=global__theme_base_size) + theme(axis.text.x = element_text(angle = 30, hjust = 1)) } # like plot__conversion_rates_snapshot_absolute, but gives the percent of total conversions # Generate new graph but instead of absolute conversion rate # calcualte the % of conversions (have to actually have another setting, Max days Number of Days allowed to Convert) #' @param cohorted_snapshots the data returned by get__cohorted_conversions_snapshot plot__conversion_rates_snapshot_percent <- function(cohorted_snapshots, snapshot_max_days, cohort_label) { cohorted_snapshots %>% ggplot(aes(x=cohort, y=conversion_rate_percent_of_all, group=snapshot_label, color=snapshot_label)) + geom_line(na.rm = TRUE) + geom_point(na.rm = TRUE) + geom_text(aes(label=percent(conversion_rate_percent_of_all)), vjust=-0.5, check_overlap = TRUE, na.rm = TRUE, size=rel(global__text_size)) + expand_limits(y=0) + scale_y_continuous(labels = percent_format()) + labs(#title='P-value over time', y="Percent of Total Conversions", x=cohort_label, color="Snapshot", caption=paste("\nAll users within a given cohort must have had at least", snapshot_max_days, "days since their first website visit to be included.")) + theme_light(base_size=global__theme_base_size) + theme(axis.text.x = element_text(angle = 30, hjust = 1)) } #' @param historical_crs the object returned from `get_historical_conversion_rates()` plot__conversion_rates_historical <- function(historical_crs) { historical_crs %>% ggplot(aes(x=metric_id, y=historical_conversion_rate, fill=metric_id)) + geom_col() + scale_y_continuous(labels = percent_format()) + labs(#title='P-value over time', y='Historical Conversion Rate', x='Metric') + geom_text(aes(label=percent(historical_conversion_rate)), vjust=-0.5, size=rel(global__text_size)) + geom_text(aes(label=paste("Median # Days From\nFirst-Visit to Conversion:", round(median_days_from_first_visit_to_conversion, 1))), vjust=1.5, size=rel(global__text_size)) + scale_fill_manual(values=global__metric_colors) + theme_light(base_size=global__theme_base_size) + theme(axis.text.x = element_text(angle = 30, hjust = 1), legend.position='none') } #' gives historical conversion rates, but considering the *median* attribution rates for each metric over #' all experiments #' #' @param historical_crs the object returned from `get_historical_conversion_rates()` plot__conversion_rates_attribution <- function(historical_crs) { historical_crs %>% ggplot(aes(x=metric_id, y=conversion_rate_within_window, fill=metric_id)) + geom_col(aes(y=historical_conversion_rate), fill='black', alpha=0.2) + geom_col() + geom_text(aes(label=paste(percent(conversion_rate_within_window), "(within Attribution)")), vjust=-0.5, size=rel(global__text_size)) + geom_text(aes(label=paste(percent(percent_cr_window_realized), "of total")), vjust=1.5, size=rel(global__text_size)) + geom_text(aes(y=historical_conversion_rate, label=paste(percent(historical_conversion_rate), "(Historical)")), vjust=-0.5, size=rel(global__text_size)) + scale_y_continuous(labels = percent_format()) + labs(#title='P-value over time', y='Conversion Rates within Attribution Window', x='Metric') + scale_fill_manual(values=global__metric_colors) + theme_light(base_size=global__theme_base_size) + theme(axis.text.x = element_text(angle = 30, hjust = 1), legend.position='none') }
/shiny-app/r_scripts/helpers_plots.R
no_license
Rajesh16702/shiny-ab-testing
R
false
false
48,199
r
library(scales) source('definitions.R') source('helpers_misc.R') #' Plots Website Traffic over time #' #' @param experiment_data list of data-frames from load_data #' @param only_first_time_visits only count the first time the user appears in the dataset (i.e. first time to website) #' @param is_weekly if TRUE, groups/cohorts data by week, if FALSE then groups by month #' @param filter_year_end_beginning_weeks if TRUE (and if is_weekly is TRUE) then it excludes weeks 0 (first #' week number of the year) and 53 (last week number of the year) because both are only partial weeks, and #' will make the traffic appear to drop during those weeks. #' @param top_n_paths if specified, the graph color the lines by path, and count by the top (i.e. highest #' traffic) paths, grouping the remaining paths into an 'Other' category. plot__website_traffic <- function(experiment_data, only_first_time_visits = FALSE, is_weekly = TRUE, filter_year_end_beginning_weeks = TRUE, top_n_paths = NULL) { if(is_weekly) { cohort_format <- '%W' cohort_name <- "Week" } else { cohort_format <- '%m' cohort_name <- "Month" } caption <- "" if(only_first_time_visits) { title <- paste("First-Time User Visits Per", cohort_name) subtitle <- paste0("Represents the number of new users to the website for a given ", tolower(cohort_name),".") y_label <- paste0("First-Time User Visits (per ", cohort_name, ")") } else { title <- paste("Unique User Visits Per", cohort_name) subtitle <- paste0("Users are only counted once in the given ", tolower(cohort_name), ", but the same user may be represented in multiple ", tolower(cohort_name), ".") y_label <- paste0("Unique User Visits (per ", cohort_name, ")") } num_users_data <- website_traffic__to_cohort_num_users(experiment_data, cohort_format = cohort_format, top_n_paths = top_n_paths, only_first_time_visits = only_first_time_visits) if(is_weekly && filter_year_end_beginning_weeks) { num_users_data <- num_users_data %>% filter(!str_detect(string=cohort, pattern='-00') & !str_detect(string=cohort, pattern='-53')) caption <- "\nPartial weeks at the end and beginning of the year are excluded." } if(is.null(top_n_paths)) { plot_object <- num_users_data %>% ggplot(aes(x=cohort, y=num_users, group = 1)) } else { plot_object <- num_users_data %>% rename(Path=path) %>% ggplot(aes(x=cohort, y=num_users, group=Path, color=Path)) } plot_object + geom_line() + geom_point() + expand_limits(y = 0) + geom_text(aes(label = prettify_numerics(num_users)), check_overlap=TRUE, vjust = -0.5, size=rel(global__text_size)) + scale_y_continuous(labels = comma_format()) + theme_light(base_size=global__theme_base_size) + theme(axis.text.x = element_text(angle = 30, hjust = 1)) + labs(title = title, subtitle = subtitle, x = cohort_name, y = y_label, caption = caption) } #' Plots the bayesian posterior control/variant/prior distributions of a particular experiment/metric. #' #' @param experiment_data list of data-frames from load_data #' @param experiment #' @param metric #' @param confidence_level the confidence level passed to `experiments__get_summary()` which generates #' `experiment_data` #' @param show_prior_distribution plot__bayesian_posterior <- function(experiments_summary, experiment, metric, confidence_level, show_prior_distribution=TRUE) { local_experiment <- experiments_summary %>% filter(experiment_id == experiment & metric_id == metric) prior_alpha <- local_experiment$prior_alpha prior_beta <- local_experiment$prior_beta control_alpha <- local_experiment$control_alpha control_beta <- local_experiment$control_beta variant_alpha <- local_experiment$variant_alpha variant_beta <- local_experiment$variant_beta bayesian_prob_variant_gt_control <- local_experiment$bayesian_prob_variant_gt_control control_name <- local_experiment$control_name variant_name <- local_experiment$variant_name alpha_vector <- c(control_alpha, variant_alpha, prior_alpha) beta_vector <- c(control_beta, variant_beta, prior_beta) if(show_prior_distribution) { x_min <- min(qbeta(0.001, alpha_vector, beta_vector)) x_max <- max(qbeta(0.999, alpha_vector, beta_vector)) } else { x_min <- min(qbeta(0.001, alpha_vector[1:2], beta_vector[1:2])) x_max <- max(qbeta(0.999, alpha_vector[1:2], beta_vector[1:2])) } x_axis_spread <- x_max - x_min # depending on the where we want to graph and how spread out the values are, we will want to get more/less # granualar with our plot distro_names <- c("Control", "Variant", "Prior") # don't change order, controlled from above distros <- data.frame(alpha = alpha_vector, beta = beta_vector, group = distro_names) %>% group_by(alpha, beta, group) %>% do(tibble(x = seq(x_min, x_max, x_axis_spread / 1000))) %>% ungroup() %>% mutate(y = dbeta(x, alpha, beta), Parameters = factor(paste0(group, ": alpha= ", comma_format()(alpha), ", beta= ", comma_format()(beta)))) x_axis_break_steps <- 0.05 if(x_axis_spread <= 0.02) { x_axis_break_steps <- 0.001 } else if(x_axis_spread <= 0.05) { x_axis_break_steps <- 0.005 } else if(x_axis_spread <= 0.15) { x_axis_break_steps <- 0.01 } else if(x_axis_spread <= 0.5) { x_axis_break_steps <- 0.02 } #custom_colors <- rev(hue_pal()(3)) custom_colors <- global__colors_bayesian if(!show_prior_distribution) { distros <- distros %>% filter(!str_detect(Parameters, "Prior")) custom_colors <- custom_colors %>% remove_val(global__colors_prior) } confidence_difference <- 1 - confidence_level two_sided_difference <- confidence_difference / 2 control_cred_low <- qbeta(two_sided_difference, control_alpha, control_beta) control_cred_high <- qbeta(1 - two_sided_difference, control_alpha, control_beta) variant_cred_low <- qbeta(two_sided_difference, variant_alpha, variant_beta) variant_cred_high <- qbeta(1 - two_sided_difference, variant_alpha, variant_beta) # a_cr_simulation <- rbeta(1e6, control_alpha, control_beta) # b_cr_simulation <- rbeta(1e6, variant_alpha, variant_beta) # percent_of_time_b_wins <- mean(b_cr_simulation > a_cr_simulation) max_distros_20th <- max(distros$y) / 20 # re-level (re-order) the levels of Params so the order is Control->Variant->Prior param_levels <- levels(distros$Parameters) distros <- distros %>% mutate(Parameters = factor(distros$Parameters, levels = c(param_levels[3], param_levels[1], param_levels[2]))) plot_object <- ggplot(data=distros, aes(x, y, color = Parameters)) + geom_line() + geom_area(aes(fill=Parameters, group=Parameters), alpha=0.3, position = 'identity') + geom_errorbarh(aes(xmin = control_cred_low, xmax = control_cred_high, y = max_distros_20th * -1), height = max_distros_20th * 0.75, color = global__colors_control, alpha=0.3) + geom_errorbarh(aes(xmin = variant_cred_low, xmax = variant_cred_high, y = max_distros_20th * -2), height = max_distros_20th * 0.75, color = global__colors_variant, alpha=0.3) + scale_x_continuous(breaks = seq(0, 1, x_axis_break_steps), labels = percent_format()) + theme_light(base_size=global__theme_base_size) + theme(axis.text.x = element_text(angle = 30, hjust = 1), legend.text=element_text(size=rel(0.9)), plot.subtitle=element_text(size=rel(0.9))) + coord_cartesian(xlim=c(x_min, x_max)) + scale_fill_manual(values=custom_colors) + scale_color_manual(values=custom_colors) + labs(#title='Posterior Probability Distributions of Control & Variant', subtitle=paste0(paste0('The probability the Variant is better is ', percent(bayesian_prob_variant_gt_control), ".\n"), #paste0('\nExperiment: "', experiment, '"'), #paste0('\nMetric: "', metric, '"'), paste0('\nControl Name: "', control_name, '"'), paste0('\nVariant Name: "', variant_name, '"')), x="Conversion Rates", y="Density of beta", caption=paste("\nThe lines under each distribution show the corresponding", percent(confidence_level), "confidence interval.")) return (plot_object) } #' Plots the control vs variant conversion rates for all metrics #' #' @param experiment_data list of data-frames from load_data #' @param experiment plot__conversion_rates <- function(experiments_summary, experiment) { get_type <- function(type) { if(type == 'p_value') { return (type) } return (str_split(type, '_', simplify=TRUE)[2]) } modified_summary <- experiments_summary %>% filter(experiment_id == experiment) %>% select(metric_id, control_successes, control_trials, control_conversion_rate, variant_successes, variant_trials, variant_conversion_rate, p_value) %>% gather(type, value, -metric_id) %>% mutate(variation=ifelse(type != 'p_value' & str_detect(type, 'control'), 'Control', 'Variant')) %>% mutate(actual_type=map_chr(type, ~ get_type(.))) %>% mutate(actual_type=ifelse(actual_type == 'conversion', 'conversion_rate', actual_type)) %>% select(-type) %>% spread(actual_type, value) %>% group_by(metric_id) %>% mutate(p_value=min(p_value, na.rm=TRUE), highest_conversion_rate = max(conversion_rate)) %>% ungroup() modified_summary %>% ggplot(aes(x=metric_id, y=conversion_rate, group=variation, fill=variation)) + #geom_col(position='dodge') + geom_bar(stat="identity", position=position_dodge(width=0.8), width=0.75) + scale_y_continuous(labels=percent_format()) + coord_cartesian(ylim=c(0, max(modified_summary$conversion_rate) + 0.05)) + geom_text(aes(y=highest_conversion_rate, label= percent_format()(conversion_rate)), position=position_dodge(width=.8), check_overlap=TRUE, vjust=-4, size=rel(global__text_size + 0.5)) + geom_text(aes(label= comma_format()(successes)), position=position_dodge(width=.8), check_overlap=TRUE, vjust=-1.75, size=rel(global__text_size)) + geom_text(aes(label= comma_format()(trials)), position=position_dodge(width=.8), check_overlap=TRUE, vjust=-0.5, size=rel(global__text_size)) + #facet_wrap(~ track_field_name, ncol=1, scales='free_x') + scale_fill_manual(values=c(global__colors_control, global__colors_variant)) + theme_light(base_size=global__theme_base_size) + theme(axis.text.x=element_text(angle=35, hjust=1)) + labs(caption="", x="Metric", y="Conversion Rate", fill="Variation") } #' Plots the control vs variant conversion rates based on the bayesian methodology, for all metrics #' #' @param experiment_data list of data-frames from load_data #' @param experiment plot__conversion_rates_bayesian <- function(experiments_summary, experiment) { get_type <- function(type) { if(type == 'bayesian_prob_variant_gt_control') { return (type) } return (str_split(type, '_', simplify=TRUE)[2]) } modified_summary <- experiments_summary %>% filter(experiment_id == experiment) %>% select(metric_id, control_alpha, control_beta, bayesian_control_cr, variant_alpha, variant_beta, bayesian_variant_cr, bayesian_prob_variant_gt_control) %>% gather(type, value, -metric_id) %>% mutate(variation=ifelse(type != 'bayesian_prob_variant_gt_control' & str_detect(type, 'control'), 'Control', 'Variant')) %>% mutate(actual_type=map_chr(type, ~ get_type(.))) %>% mutate(actual_type=ifelse(actual_type == 'control' | actual_type == 'variant', 'conversion_rate', actual_type)) %>% select(-type) %>% spread(actual_type, value) %>% group_by(metric_id) %>% mutate(bayesian_prob_variant_gt_control=min(bayesian_prob_variant_gt_control, na.rm=TRUE), highest_conversion_rate = max(conversion_rate)) %>% ungroup() modified_summary %>% ggplot(aes(x=metric_id, y=conversion_rate, group=variation, fill=variation)) + #geom_col(position='dodge') + geom_bar(stat="identity", position=position_dodge(width=0.8), width=0.75) + scale_y_continuous(labels=percent_format()) + coord_cartesian(ylim=c(0, max(modified_summary$conversion_rate) + 0.05)) + geom_text(aes(y=highest_conversion_rate, label= percent_format()(conversion_rate)), position=position_dodge(width=.8), check_overlap=TRUE, vjust=-4, size=rel(global__text_size + 0.5)) + geom_text(aes(label=paste('a:', comma_format()(alpha))), position=position_dodge(width=.8), check_overlap=TRUE, vjust=-1.75, size=rel(global__text_size)) + geom_text(aes(label=paste('b:', comma_format()(beta))), position=position_dodge(width=.8), check_overlap=TRUE, vjust=-0.5, size=rel(global__text_size)) + #facet_wrap(~ track_field_name, ncol=1, scales='free_x') + scale_fill_manual(values=c(global__colors_control, global__colors_variant)) + theme_light(base_size=global__theme_base_size) + theme(axis.text.x=element_text(angle=35, hjust=1)) + labs(caption="", x="Metric", y="Conversion Rate", fill="Variation") } #' Plots the percent change from the Control to the Variant along with p-values #' #' @param experiment_data list of data-frames from load_data #' @param experiment plot__percent_change_frequentist <- function(experiments_summary, experiment, p_value_threshold=0.05) { current_experiments <- experiments_summary %>% filter(experiment_id == experiment) nearest_x <- 0.05 min_perc_change <- min(0, ceiling_nearest_x(min(current_experiments$percent_change_from_control), nearest_x)) max_perc_change <- ceiling_nearest_x(max(current_experiments$percent_change_from_control), nearest_x) current_experiments <- current_experiments %>% mutate(p_value_sig_category=case_when( p_value > p_value_threshold ~ 'No', p_value <= p_value_threshold & percent_change_from_control > 0 ~ 'Yes - Increase', p_value <= p_value_threshold & percent_change_from_control < 0 ~ 'Yes - Decrease', TRUE ~ 'Unknown' )) plot_object <- current_experiments %>% ggplot(aes(x=metric_id, y=percent_change_from_control, fill=p_value_sig_category)) + geom_hline(yintercept=0, color='#EB5424', size=0.5, alpha=0.5) + geom_col(alpha=0.85) # if there are any experiments where the percent change is greater than 0, add text # if i don't do the check, i get an error t <- current_experiments %>% filter(percent_change_from_control >= 0) if(nrow(t) > 0) { plot_object <- plot_object + geom_text(data=t, aes(label=paste(percent(percent_change_from_control), 'Change')), vjust=-2, size=rel(global__text_size), check_overlap=TRUE) + geom_text(data=t, aes(label=paste(round(p_value, 3), 'p-value')), vjust=-0.5, size=rel(global__text_size), check_overlap=TRUE) } # if there are any experiments where the percent change is less than 0, add text # if i don't do the check, i get an error t <- current_experiments %>% filter(percent_change_from_control < 0) if(nrow(t) > 0) { plot_object <- plot_object + geom_text(data=t, aes(label=paste(percent(percent_change_from_control), 'Change')), vjust=2.7, size=rel(global__text_size), check_overlap=TRUE) + geom_text(data=t, aes(label=paste(round(p_value, 3), 'p-value')), vjust=1.2, size=rel(global__text_size), check_overlap=TRUE) } bad_good_colors <- c('#B4B7B9', '#DF585C', '#37B57F') names(bad_good_colors) <- c("No", "Yes - Decrease", "Yes - Increase") fill_colors <- bad_good_colors[sort(unique(current_experiments$p_value_sig_category))] plot_object + coord_cartesian(ylim=c(min_perc_change - 0.02, max_perc_change + 0.02)) + scale_y_continuous(labels=percent_format(), breaks=seq(min_perc_change, max_perc_change, nearest_x)) + scale_fill_manual(values=fill_colors) + theme_light(base_size=global__theme_base_size) + theme(axis.text.x=element_text(angle=35, hjust=1)) + labs(caption=paste("\nThe p-value threshold for statistical significance is", p_value_threshold), x="Metric", y="Percent Change from Control to Variant", fill="Statistically Significant") } #' Plots the percent change from the Control to the Variant along with the probability that the Variant is #' better than the control. #' #' @param experiment_data list of data-frames from load_data #' @param experiment plot__percent_change_bayesian <- function(experiments_summary, experiment) { current_experiments <- experiments_summary %>% filter(experiment_id == experiment) %>% select(-percent_change_from_control) %>% mutate(percent_change_from_control = bayesian_cr_difference / bayesian_control_cr) nearest_x <- 0.05 min_perc_change <- ceiling_nearest_x(min(current_experiments$percent_change_from_control), nearest_x) max_perc_change <- ceiling_nearest_x(max(current_experiments$percent_change_from_control), nearest_x) plot_object <- current_experiments %>% ggplot(aes(x=metric_id, y=percent_change_from_control, fill=bayesian_prob_variant_gt_control)) + geom_col(alpha=0.85) # if there are any experiments where the percent change is greater than 0, add text # if i don't do the check, i get an error t <- current_experiments %>% filter(percent_change_from_control >= 0) if(nrow(t) > 0) { plot_object <- plot_object + geom_text(data=t, aes(label=paste(percent(percent_change_from_control), 'Change')), vjust=-2, size=rel(global__text_size), check_overlap=TRUE) + geom_text(data=t, aes(label=paste(percent(bayesian_prob_variant_gt_control), 'Probability')), vjust=-0.5, size=rel(global__text_size), check_overlap=TRUE) } # if there are any experiments where the percent change is less than 0, add text # if i don't do the check, i get an error t <- current_experiments %>% filter(percent_change_from_control < 0) if(nrow(t) > 0) { plot_object <- plot_object + geom_text(data=t, aes(label=paste(percent(percent_change_from_control), 'Change')), vjust=2.7, size=rel(global__text_size), check_overlap=TRUE) + geom_text(data=t, aes(label=paste(percent(bayesian_prob_variant_gt_control), 'Probability')), vjust=1.2, size=rel(global__text_size), check_overlap=TRUE) } plot_object + coord_cartesian(ylim=c(min_perc_change - 0.02, max_perc_change + 0.02)) + scale_y_continuous(labels=percent_format(), breaks=seq(min_perc_change, max_perc_change, nearest_x)) + #scale_fill_manual(values=c(global__colors_bad, global__colors_good)) + scale_fill_gradient2(low = global__colors_bad, mid = "gray", high = global__colors_good, midpoint = 0.5, limits=c(0,1)) + theme_light(base_size=global__theme_base_size) + theme(axis.text.x=element_text(angle=35, hjust=1)) + labs(#caption="\nThe decimal value next to the bar represents the Probability that the Variant is Better.", x="Metric", y="Percent Change from Control to Variant", fill="Probability Variant is Better") } #' Plots the percent change (with confidence intervals) from the Control to the Variant along with p-values #' #' @param experiment_data list of data-frames from load_data #' @param experiment plot__percent_change_conf_frequentist <- function(experiments_summary, experiment, p_value_threshold=0.05) { current_experiments <- experiments_summary %>% filter(experiment_id == experiment) current_experiments <- current_experiments %>% select(metric_id, control_conversion_rate, p_value, percent_change_from_control, contains('frequentist')) %>% mutate(percent_change_conf_low=frequentist_conf_low / control_conversion_rate, percent_change_conf_high=frequentist_conf_high / control_conversion_rate, p_value_sig = p_value <= p_value_threshold) %>% select(metric_id, contains('percent_change'), p_value, p_value_sig) current_experiments <- current_experiments %>% mutate(p_value_sig_category=case_when( p_value > p_value_threshold ~ 'No', p_value <= p_value_threshold & percent_change_from_control > 0 ~ 'Yes - Increase', p_value <= p_value_threshold & percent_change_from_control < 0 ~ 'Yes - Decrease', TRUE ~ 'Unknown' )) y_expand <- 0.10 min_y <- ceiling_nearest_x(min(current_experiments$percent_change_conf_low), y_expand) max_y <- ceiling_nearest_x(max(current_experiments$percent_change_conf_high), y_expand) bad_good_colors <- c('#B4B7B9', '#DF585C', '#37B57F') names(bad_good_colors) <- c("No", "Yes - Decrease", "Yes - Increase") fill_colors <- bad_good_colors[sort(unique(current_experiments$p_value_sig_category))] current_experiments %>% ggplot(aes(x=metric_id, y=percent_change_from_control, color=p_value_sig_category)) + geom_point(size=2) + geom_errorbar(aes(ymin=percent_change_conf_low, ymax=percent_change_conf_high), size=0.8) + geom_text(aes(label=percent(percent_change_from_control)), hjust=1.2, size=rel(global__text_size), check_overlap=TRUE, color='black') + geom_text(aes(y=max_y + y_expand, label=paste('p-value:', round(p_value, 3))), vjust=1, size=rel(global__text_size), check_overlap=TRUE, color='black') + geom_hline(yintercept=0, color='#EB5424', size=1.2, alpha=0.5) + coord_cartesian(ylim=c(min_y - y_expand, max_y + y_expand)) + scale_color_manual(values=fill_colors) + scale_y_continuous(labels=percent_format(), breaks=seq(min_y - y_expand, max_y + y_expand, y_expand)) + theme_light(base_size=global__theme_base_size) + theme(axis.text.x=element_text(angle=35, hjust=1)) + labs(caption=paste(paste("\nThe p-value threshold for statistical significance is", p_value_threshold), "\nThe error bars show the", percent(1 - p_value_threshold), "confidence interval."), x="Metric", y="Percent Change from Control to Variant", color="Statistically Significant") } #' Plots the percent change (with confidence intervals) from the Control to the Variant along with the #' probability that the Variant is better than the control. #' #' @param experiment_data list of data-frames from load_data #' @param experiment plot__percent_change_conf_bayesian <- function(experiments_summary, experiment) { current_experiments <- experiments_summary %>% filter(experiment_id == experiment) current_experiments <- current_experiments %>% select(metric_id, contains('bayesian')) %>% mutate(percent_change_from_control=bayesian_cr_difference / bayesian_control_cr, percent_change_conf_low=bayesian_conf_low / bayesian_control_cr, percent_change_conf_high=bayesian_conf_high / bayesian_control_cr) %>% select(metric_id, contains('percent_change'), bayesian_prob_variant_gt_control) y_expand <- 0.10 min_y <- ceiling_nearest_x(min(current_experiments$percent_change_conf_low), y_expand) max_y <- ceiling_nearest_x(max(current_experiments$percent_change_conf_high), y_expand) current_experiments %>% ggplot(aes(x=metric_id, y=percent_change_from_control, color=bayesian_prob_variant_gt_control)) + geom_point(size=2) + geom_errorbar(aes(ymin=percent_change_conf_low, ymax=percent_change_conf_high), size=0.8) + geom_text(aes(label=percent(percent_change_from_control)), hjust=1.2, size=rel(global__text_size), check_overlap=TRUE, color='black') + geom_text(aes(y=max_y + y_expand, label=paste(percent(bayesian_prob_variant_gt_control), "Probability")), vjust=1, size=rel(global__text_size), check_overlap=TRUE, color='black') + geom_hline(yintercept=0, color='#EB5424', size=1.2, alpha=0.5) + coord_cartesian(ylim=c(min_y - y_expand, max_y + y_expand)) + scale_color_gradient2(low = global__colors_bad, mid = "gray", high = global__colors_good, midpoint = 0.5, limits=c(0,1)) + #scale_color_manual(values=c(global__colors_bad, global__colors_good)) + scale_y_continuous(labels=percent_format(), breaks=seq(min_y - y_expand, max_y + y_expand, y_expand)) + theme_light(base_size=global__theme_base_size) + theme(axis.text.x=element_text(angle=35, hjust=1)) + labs(caption=paste("\nThe error bars show the", percent(global__confidence_level), "confidence interval."), x="Metric", y="Percent Change from Control to Variant", color="Probability Variant is Better") } plot__daily_p_value <- function(experiments_daily_summary, experiment, metric, p_value_threshold=0.05) { current_daily_summary <- experiments_daily_summary %>% filter(experiment_id == experiment, metric_id == metric) error_bar_height <- (max(current_daily_summary$p_value, na.rm=TRUE) - min(current_daily_summary$p_value, na.rm=TRUE)) / 25 median_p_value <- median(current_daily_summary$p_value, na.rm = TRUE) missing_dates <- current_daily_summary %>% filter(is.na(control_conversion_rate)) %>% select(day_expired_attribution) %>% mutate(y_axis_location = p_value_threshold + error_bar_height) #median_p_value) missing_dates <- missing_dates %>% mutate(message = ifelse(as.character(missing_dates$day_expired_attribution) == as.character(min(missing_dates$day_expired_attribution)), 'Lag from\nAttribution\nWindow', NA)) current_daily_summary %>% mutate(day_expired_attribution = as.Date(day_expired_attribution)) %>% ggplot(aes(x=day_expired_attribution, y=p_value)) + geom_line(na.rm = TRUE) + geom_point(na.rm = TRUE) + geom_text(aes(label=round(p_value, 3)), vjust=-0.5, check_overlap = TRUE, na.rm = TRUE, size=rel(global__text_size)) + geom_hline(yintercept = p_value_threshold, color ='red', alpha=0.5, size=1.5) + geom_errorbarh(data=missing_dates, aes(y = y_axis_location, xmin = min(day_expired_attribution), xmax = max(day_expired_attribution)), color='#828282', height=rel(error_bar_height), size=rel(0.45)) + geom_text(data=missing_dates, aes(y=y_axis_location, label=message), vjust=-0.5, hjust='left',#0.325, check_overlap = TRUE, na.rm=TRUE, size=rel(global__text_size)) + scale_x_date(date_breaks = '1 days') + #scale_y_continuous(breaks = seq(0, 1, 0.05)) + expand_limits(y=0) + labs(title='P-value over time', y='P-Value', x='Day of Experiment (and days after)') + theme_light(base_size=global__theme_base_size) + theme(axis.text.x = element_text(angle = 30, hjust = 1)) } plot__daily_percent_change_frequentist <- function(experiments_daily_summary, experiment, metric, p_value_threshold=0.05) { current_daily_summary <- experiments_daily_summary %>% filter(experiment_id == experiment, metric_id == metric) %>% mutate(day_expired_attribution = as.Date(day_expired_attribution)) %>% mutate(perc_change = frequentist_cr_difference / control_conversion_rate, perc_change_conf_low = frequentist_conf_low / control_conversion_rate, perc_change_conf_high = frequentist_conf_high / control_conversion_rate) error_bar_height <- (max(current_daily_summary$perc_change_conf_high, na.rm=TRUE) - min(current_daily_summary$perc_change_conf_low, na.rm=TRUE)) / 25 median_percent_change <- median(current_daily_summary$perc_change, na.rm = TRUE) missing_dates <- current_daily_summary %>% filter(is.na(control_conversion_rate)) %>% select(day_expired_attribution) %>% mutate(y_axis_location = error_bar_height) #median_percent_change) missing_dates <- missing_dates %>% mutate(message = ifelse(as.character(missing_dates$day_expired_attribution) == as.character(min(missing_dates$day_expired_attribution)), 'Lag from\nAttribution\nWindow', NA)) # plot_object <- current_daily_summary %>% # ggplot(aes(x=day_expired_attribution, y=perc_change)) + # geom_line(na.rm = TRUE) + # #coord_cartesian(ylim=c(-0.10, 0.3)) + # scale_y_continuous(labels = percent_format()) + # scale_x_date(date_breaks = '1 days') + # geom_ribbon(aes(ymin = perc_change_conf_low, ymax = perc_change_conf_high), fill = 'green', alpha=0.15) # if(any(current_daily_summary$p_value > p_value_threshold, na.rm=TRUE)) { # plot_object <- plot_object + # geom_ribbon(aes(ymin = ifelse(p_value > p_value_threshold, perc_change_conf_low, NA), # ymax = ifelse(p_value > p_value_threshold, perc_change_conf_high, NA)), # fill = 'red', alpha=0.45) # } # if(any(current_daily_summary$p_value <= p_value_threshold, na.rm=TRUE)) { # plot_object <- plot_object + # geom_ribbon(aes(ymin = ifelse(p_value <= p_value_threshold, perc_change_conf_low, NA), # ymax = ifelse(p_value <= p_value_threshold, perc_change_conf_high, NA)), # fill = 'green', alpha=0.2) # } date_breaks_width <- '1 day' if(nrow(current_daily_summary) >= 90) { date_breaks_width <- '14 day' } else if(nrow(current_daily_summary) >= 60) { date_breaks_width <- '5 day' } else if(nrow(current_daily_summary) >= 30) { date_breaks_width <- '2 day' } plot_object <- current_daily_summary %>% ggplot(aes(x=day_expired_attribution, y=perc_change)) + #coord_cartesian(ylim=c(-0.10, 0.3)) + scale_y_continuous(labels = percent_format()) + scale_x_date(labels = date_format('%Y-%m-%d'), breaks=date_breaks_width) + geom_ribbon(aes(ymin = perc_change_conf_low, ymax = perc_change_conf_high), fill = '#7A7A7A', alpha=0.35) if(any(current_daily_summary$p_value <= p_value_threshold & current_daily_summary$perc_change < 0, na.rm=TRUE)) { plot_object <- plot_object + geom_ribbon(aes(ymin = ifelse(p_value <= p_value_threshold & perc_change < 0, perc_change_conf_low, NA), ymax = ifelse(p_value <= p_value_threshold & perc_change < 0, perc_change_conf_high, NA)), fill = 'red', alpha=0.35) } if(any(current_daily_summary$p_value <= p_value_threshold & current_daily_summary$perc_change > 0, na.rm=TRUE)) { plot_object <- plot_object + geom_ribbon(aes(ymin = ifelse(p_value <= p_value_threshold & perc_change > 0, perc_change_conf_low, NA), ymax = ifelse(p_value <= p_value_threshold & perc_change > 0, perc_change_conf_high, NA)), fill = 'green', alpha=0.35) } plot_object + geom_hline(yintercept = 0, color='red', alpha=0.5, size=1.5) + geom_line(na.rm = TRUE) + geom_text(aes(label=percent(perc_change)), vjust=-1, check_overlap = TRUE, na.rm=TRUE, size=rel(global__text_size)) + geom_errorbarh(data=missing_dates, aes(y = y_axis_location, xmin = min(day_expired_attribution), xmax = max(day_expired_attribution)), color='#828282', height=rel(error_bar_height), size=rel(0.45)) + geom_text(data=missing_dates, aes(y=y_axis_location, label=message), vjust=-0.5, hjust='left',#0.325, check_overlap = TRUE, na.rm=TRUE, size=rel(global__text_size)) + labs(#title='Difference in Conversion Rate of `B` - `A`, with Frequentist Confidence Interval - \nWith Attribution Window', caption=paste("\n", percent(global__confidence_level), "confidence interval"), y='Lift (i.e. Percent change from Control to Variant)', x='Day of Experiment') + theme_light(base_size=global__theme_base_size) + theme(axis.text.x = element_text(angle = 30, hjust = 1)) } plot__daily_prob_variant_gt_control <- function(experiments_daily_summary, experiment, metric) { current_daily_summary <- experiments_daily_summary %>% filter(experiment_id == experiment, metric_id == metric) error_bar_height <- 0.04 # we can hard code this value because the y-axis is always between 0-1 median_probability <- median(current_daily_summary$bayesian_prob_variant_gt_control, na.rm = TRUE) missing_dates <- current_daily_summary %>% filter(is.na(control_conversion_rate)) %>% select(day_expired_attribution) %>% mutate(y_axis_location = 0.5 + error_bar_height) #median_probability) missing_dates <- missing_dates %>% mutate(message = ifelse(as.character(missing_dates$day_expired_attribution) == as.character(min(missing_dates$day_expired_attribution)), 'Lag from\nAttribution\nWindow', NA)) current_daily_summary %>% mutate(day_expired_attribution = as.Date(day_expired_attribution)) %>% ggplot(aes(x=day_expired_attribution, y=bayesian_prob_variant_gt_control)) + geom_line(na.rm=TRUE) + geom_point(na.rm=TRUE) + geom_text(aes(label=percent(bayesian_prob_variant_gt_control)), vjust=-0.5, check_overlap = TRUE, na.rm=TRUE, size=rel(global__text_size)) + geom_hline(yintercept = 0.5, color ='red', alpha=0.5, size=1.5) + geom_errorbarh(data=missing_dates, aes(y = y_axis_location, xmin = min(day_expired_attribution), xmax = max(day_expired_attribution)), color='#828282', height=rel(error_bar_height), size=rel(0.45)) + geom_text(data=missing_dates, aes(y=y_axis_location, label=message), vjust=-0.5, hjust='left',#0.325, check_overlap = TRUE, na.rm=TRUE, size=rel(global__text_size)) + scale_x_date(date_breaks = '1 days') + #scale_y_continuous(breaks = seq(0, 1, 0.05)) + expand_limits(y=c(0, 1)) + labs(title='Probability Variant is Better', y='Probability Variant is Better', x='Day of Experiment (and days after)') + theme_light(base_size=global__theme_base_size) + theme(axis.text.x = element_text(angle = 30, hjust = 1)) } plot__daily_percent_change_bayesian <- function(experiments_daily_summary, experiment, metric) { current_daily_summary <- experiments_daily_summary %>% filter(experiment_id == experiment, metric_id == metric) %>% mutate(day_expired_attribution = as.Date(day_expired_attribution)) %>% mutate(perc_change = bayesian_percent_change, perc_change_conf_low = bayesian_conf_low / bayesian_control_cr, perc_change_conf_high = bayesian_conf_high / bayesian_control_cr, # it is "statistically significant" if the confidence interval is completely above or below 0 is_stat_sig=(perc_change_conf_low < 0 & perc_change_conf_high < 0) | (perc_change_conf_low > 0 & perc_change_conf_high > 0)) error_bar_height <- (max(current_daily_summary$perc_change_conf_high, na.rm=TRUE) - min(current_daily_summary$perc_change_conf_low, na.rm=TRUE)) / 25 median_percent_change <- median(current_daily_summary$perc_change, na.rm = TRUE) missing_dates <- current_daily_summary %>% filter(is.na(control_conversion_rate)) %>% select(day_expired_attribution) %>% mutate(y_axis_location = error_bar_height) #median_percent_change) missing_dates <- missing_dates %>% mutate(message = ifelse(as.character(missing_dates$day_expired_attribution) == as.character(min(missing_dates$day_expired_attribution)), 'Lag from\nAttribution\nWindow', NA)) date_breaks_width <- '1 day' if(nrow(current_daily_summary) >= 90) { date_breaks_width <- '14 day' } else if(nrow(current_daily_summary) >= 60) { date_breaks_width <- '5 day' } else if(nrow(current_daily_summary) >= 30) { date_breaks_width <- '2 day' } plot_object <- current_daily_summary %>% ggplot(aes(x=day_expired_attribution, y=perc_change)) + #coord_cartesian(ylim=c(-0.10, 0.3)) + scale_y_continuous(labels = percent_format()) + scale_x_date(labels = date_format('%Y-%m-%d'), breaks=date_breaks_width) + geom_ribbon(aes(ymin = perc_change_conf_low, ymax = perc_change_conf_high), fill = '#7A7A7A', alpha=0.35) if(any(current_daily_summary$is_stat_sig & current_daily_summary$perc_change < 0, na.rm=TRUE)) { plot_object <- plot_object + geom_ribbon(aes(ymin = ifelse(current_daily_summary$is_stat_sig & perc_change < 0, perc_change_conf_low, NA), ymax = ifelse(current_daily_summary$is_stat_sig & perc_change < 0, perc_change_conf_high, NA)), fill = 'red', alpha=0.35) } if(any(current_daily_summary$is_stat_sig & current_daily_summary$perc_change > 0, na.rm=TRUE)) { plot_object <- plot_object + geom_ribbon(aes(ymin = ifelse(current_daily_summary$is_stat_sig & perc_change > 0, perc_change_conf_low, NA), ymax = ifelse(current_daily_summary$is_stat_sig & perc_change > 0, perc_change_conf_high, NA)), fill = 'green', alpha=0.35) } plot_object + geom_hline(yintercept = 0, color='red', alpha=0.5, size=1.5) + geom_line(na.rm=TRUE) + geom_text(aes(label=percent(perc_change)), vjust=-1, check_overlap = TRUE, na.rm=TRUE, size=rel(global__text_size)) + geom_errorbarh(data=missing_dates, aes(y = y_axis_location, xmin = min(day_expired_attribution), xmax = max(day_expired_attribution)), color='#828282', height=rel(error_bar_height), size=rel(0.45)) + geom_text(data=missing_dates, aes(y=y_axis_location, label=message), vjust=-0.5, hjust='left',#0.325, check_overlap = TRUE, na.rm=TRUE, size=rel(global__text_size)) + labs(#title='Difference in Conversion Rate of `B` - `A`, with Frequentist Confidence Interval - \nWith Attribution Window', caption=paste("\n", percent(global__confidence_level), "confidence interval"), y='Lift (i.e. Percent change from Control to Variant)', x='Day of Experiment') + theme_light(base_size=global__theme_base_size) + theme(axis.text.x = element_text(angle = 30, hjust = 1)) } #' returns conversion rate data over time for the cohorts defined in cohorted_snapshots; #' The user defines several snapshots (i.e. number of days after each person's first visit) to view the #' overall conversion rates for each cohort #' @param cohorted_snapshots the data returned by get__cohorted_conversions_snapshot plot__conversion_rates_snapshot_absolute <- function(cohorted_snapshots, cohort_label) { cohorted_snapshots %>% ggplot(aes(x=cohort, y=conversion_rate, group=snapshot_label, color=snapshot_label)) + geom_line(na.rm = TRUE) + geom_point(na.rm = TRUE) + geom_text(aes(label=percent(conversion_rate)), vjust=-0.5, check_overlap = TRUE, na.rm = TRUE, size=rel(global__text_size)) + expand_limits(y=0) + scale_y_continuous(labels = percent_format()) + labs(#title='P-value over time', y="Conversion Rate", x=cohort_label, color="Snapshot", caption="\nAll users for a given cohort must have had at least N days since their first website visit,\nwhere N is the number of days for the corresponding snapshot.") + theme_light(base_size=global__theme_base_size) + theme(axis.text.x = element_text(angle = 30, hjust = 1)) } # like plot__conversion_rates_snapshot_absolute, but gives the percent of total conversions # Generate new graph but instead of absolute conversion rate # calcualte the % of conversions (have to actually have another setting, Max days Number of Days allowed to Convert) #' @param cohorted_snapshots the data returned by get__cohorted_conversions_snapshot plot__conversion_rates_snapshot_percent <- function(cohorted_snapshots, snapshot_max_days, cohort_label) { cohorted_snapshots %>% ggplot(aes(x=cohort, y=conversion_rate_percent_of_all, group=snapshot_label, color=snapshot_label)) + geom_line(na.rm = TRUE) + geom_point(na.rm = TRUE) + geom_text(aes(label=percent(conversion_rate_percent_of_all)), vjust=-0.5, check_overlap = TRUE, na.rm = TRUE, size=rel(global__text_size)) + expand_limits(y=0) + scale_y_continuous(labels = percent_format()) + labs(#title='P-value over time', y="Percent of Total Conversions", x=cohort_label, color="Snapshot", caption=paste("\nAll users within a given cohort must have had at least", snapshot_max_days, "days since their first website visit to be included.")) + theme_light(base_size=global__theme_base_size) + theme(axis.text.x = element_text(angle = 30, hjust = 1)) } #' @param historical_crs the object returned from `get_historical_conversion_rates()` plot__conversion_rates_historical <- function(historical_crs) { historical_crs %>% ggplot(aes(x=metric_id, y=historical_conversion_rate, fill=metric_id)) + geom_col() + scale_y_continuous(labels = percent_format()) + labs(#title='P-value over time', y='Historical Conversion Rate', x='Metric') + geom_text(aes(label=percent(historical_conversion_rate)), vjust=-0.5, size=rel(global__text_size)) + geom_text(aes(label=paste("Median # Days From\nFirst-Visit to Conversion:", round(median_days_from_first_visit_to_conversion, 1))), vjust=1.5, size=rel(global__text_size)) + scale_fill_manual(values=global__metric_colors) + theme_light(base_size=global__theme_base_size) + theme(axis.text.x = element_text(angle = 30, hjust = 1), legend.position='none') } #' gives historical conversion rates, but considering the *median* attribution rates for each metric over #' all experiments #' #' @param historical_crs the object returned from `get_historical_conversion_rates()` plot__conversion_rates_attribution <- function(historical_crs) { historical_crs %>% ggplot(aes(x=metric_id, y=conversion_rate_within_window, fill=metric_id)) + geom_col(aes(y=historical_conversion_rate), fill='black', alpha=0.2) + geom_col() + geom_text(aes(label=paste(percent(conversion_rate_within_window), "(within Attribution)")), vjust=-0.5, size=rel(global__text_size)) + geom_text(aes(label=paste(percent(percent_cr_window_realized), "of total")), vjust=1.5, size=rel(global__text_size)) + geom_text(aes(y=historical_conversion_rate, label=paste(percent(historical_conversion_rate), "(Historical)")), vjust=-0.5, size=rel(global__text_size)) + scale_y_continuous(labels = percent_format()) + labs(#title='P-value over time', y='Conversion Rates within Attribution Window', x='Metric') + scale_fill_manual(values=global__metric_colors) + theme_light(base_size=global__theme_base_size) + theme(axis.text.x = element_text(angle = 30, hjust = 1), legend.position='none') }
context("Testing reading data") library(notame) test_that("Column conversion works", { set.seed(38) df <- data.frame(Injection = 1:100, Group = letters[1:4], Time = 1:2, Sample_ID = sample(letters, size = 100, replace = TRUE), few_numbers = c(1.3, 2.5)) converted <- best_classes(df) converted_classes <- unname(sapply(converted, class)) expected_classes <- c("numeric", "factor", "factor", "character", "numeric") expect_equal(converted_classes, expected_classes) }) test_that("Pheno data checking works", { df <- data.frame(Group = letters[1:2]) expect_error(check_pheno_data(df), '"Injection_order" not found') df <- data.frame(Injection_order = c(1:5, 3:9)) expect_error(check_pheno_data(df), "Injection_order is not unique") # Check QC generator df <- data.frame(Injection_order = seq_len(10), Sample_ID = c(letters[1:5], letters[1:5]) ) expect_warning(expect_error(check_pheno_data(df), "Sample_ID is not unique"), "QC not found" ) df <- data.frame(Injection_order = seq_len(5), Sample_ID = c(letters[1:5]) ) expect_warning(check_pheno_data(df), "QC not found") df <- data.frame(Injection_order = seq_len(10), Sample_ID = c(letters[1:5], rep("QC", 5)), QC = as.factor(rep(c("Sample", "QC"), each = 5))) checked <- check_pheno_data(df) expected <- data.frame(Sample_ID = c(letters[1:5], paste0("QC_", 1:5)), Injection_order = seq_len(10), stringsAsFactors = FALSE, QC = as.factor(rep(c("Sample", "QC"), each = 5))) rownames(expected) <- expected$Sample_ID expect_equal(checked, expected) }) test_that("Feature data checking works", { df <- data.frame(Feature_ID = 1:5) expect_error(check_feature_data(df), "Numbers are not allowed as feature IDs") df <- data.frame(Feature_ID = c(letters[1:5], letters[3:5])) expect_error(check_feature_data(df), "Feature_ID values are not unique") df <- data.frame(Feature_ID = letters[1:9]) df$Feature_ID[6] <- NA expect_error(check_feature_data(df), "Missing values in Feature IDs") }) test_that("Easy example data is read correctly", { # Pheno data pd <- data.frame(Sample_ID = paste0("TEST_", seq_len(12)), Injection_order = seq_len(12), Group = factor(rep(LETTERS[1:2], times = c(5,7))), QC = as.factor("Sample"), easy_Datafile = paste0("190102SR_RP_pos_0", 10:21), stringsAsFactors = FALSE) rownames(pd) <- pd$Sample_ID # Feature data fd <- data.frame(Feature_ID = "", Split = "easy", Alignment = as.numeric(seq_len(10)), Mass = 50 * seq_len(10), RetentionTime = 0.5 *seq_len(10), "MS_MS_Spectrum" = c("(123.45; 678)", rep(NA, 9)), stringsAsFactors = FALSE) fd <- name_features(fd) rownames(fd) <- fd$Feature_ID # Assay data ad <- matrix(0, nrow = 10, ncol = 12) for (i in seq_len(10)) { ad[i, ] <- 50000 -(i-1)*3000 + 1000 * (0:11) } dimnames(ad) <- list(rownames(fd), rownames(pd)) # Read the file read <- read_from_excel(system.file("extdata", "easy_data.xlsx", package = "notame"), sheet = 1, corner_row = 4, corner_column = "D", name = "easy", id_prefix = "TEST_") # Test that the parts are read as expected expect_equal(read$exprs, ad) expect_equal(read$pheno_data, pd) expect_equal(read$feature_data, fd) }) test_that("Data is split correctly", { # Pheno data pd <- data.frame(Sample_ID = paste0("TEST_", seq_len(12)), Injection_order = seq_len(12), Group = rep(LETTERS[1:2], times = c(5,7)), QC = "Sample", Datafile = paste0("190102SR_RP_pos_0", 10:21), stringsAsFactors = FALSE) rownames(pd) <- pd$Sample_ID qc_idx <- c(1, 6, 9, 12) pd$Group[qc_idx] <- "QC" pd$QC[qc_idx] <- "QC" pd$QC <- factor(pd$QC) # Feature data fd <- data.frame(Feature_ID = "", Split = rep(c("RP_pos", "Hilic_pos", "Hilic_neg", "RP_neg"), each = 4), Alignment = as.numeric(seq_len(16)), Mass = 50 * seq_len(16), RetentionTime = 0.5 *seq_len(16), Column = rep(c("RP", "Hilic", "RP"), times = c(4, 8, 4)), Mode = rep(c("pos", "neg"), each = 8), "MS_MS_Spectrum" = c("(123.45; 678)", rep(NA, 15)), stringsAsFactors = FALSE) fd <- name_features(fd) rownames(fd) <- fd$Feature_ID # Assay data ad <- matrix(0, nrow = 16, ncol = 12) for (i in seq_len(16)) { ad[i, ] <- 50000 -(i-1)*3000 + 1000 * (0:11) } row_zeros <- c(1, 3, 3, 4, 5, 8, 8, 10, rep(11, 5)) col_zeros <- c(3, 7, 12, 1, 5, 10, 12, 1, 1, 3, 5, 8, 12) for (j in seq_along(row_zeros)) { ad[row_zeros[j], col_zeros[j]] <- 0 } dimnames(ad) <- list(rownames(fd), rownames(pd)) # Read the file read <- read_from_excel(system.file("extdata", "split_data.xlsx", package = "notame"), sheet = 1, corner_row = 4, corner_column = "F", split_by = c("Column", "Mode"), id_prefix = "TEST_") # Test that the parts are read as expected expect_equal(read$exprs, ad) expect_equal(read$pheno_data, pd) expect_equal(read$feature_data, fd) }) test_that("Splitting data works as expected", { split_by <- c("Ion mode", "gswregh") # Wrong column name expect_error(read_from_excel(system.file("extdata", "sample_data_whole.xlsx", package = "notame"), corner_row = 4, corner_column = "X", split_by = split_by) ) }) test_that("Creating dummy injection order works as expected", { names <- list("HILIC_neg", "HILIC_pos", "RP_neg", "RP_pos") modes <- list() for (name in names) { file <- system.file("extdata", paste0(name, "_sample.xlsx"), package = "notame") mode <- read_from_excel(file, name = name) modes[name] <- construct_metabosets(mode$exprs, mode$pheno_data, mode$feature_data) } # Modify data modes$HILIC_neg$Injection_order <- modes$HILIC_neg$Injection_order + 1 inj_ord_rn <- modes$RP_neg$Injection_order + 2 modes$RP_neg$Injection_order <- inj_ord_rn inj_ord_rp <- modes$RP_pos$Injection_order[5:221] + 5 modes$RP_pos$Injection_order[5:221] <- inj_ord_rp sampleNames(modes$HILIC_neg)[2] <- "ID_666" sampleNames(modes$RP_pos)[22] <- "ID_999" expect_warning(merged <- merge_metabosets(modes), regexp = "Sample IDs are not identical|Unequal amount of samples" ) # Dummy injection expect_equal(merged$Injection_order, -seq_along(merged$Sample_ID)) # Original IOs expect_equal(sort(as.numeric(na.omit(merged$HILIC_neg_Injection_order))), modes$HILIC_neg$Injection_order ) expect_equal(sort(as.numeric(na.omit(merged$HILIC_pos_Injection_order))), modes$HILIC_pos$Injection_order ) expect_equal(sort(as.numeric(na.omit(merged$RP_neg_Injection_order))), modes$RP_neg$Injection_order ) expect_equal(sort(as.numeric(na.omit(merged$RP_pos_Injection_order))), modes$RP_pos$Injection_order ) })
/tests/testthat/test_data_reading.R
permissive
antonvsdata/notame
R
false
false
7,801
r
context("Testing reading data") library(notame) test_that("Column conversion works", { set.seed(38) df <- data.frame(Injection = 1:100, Group = letters[1:4], Time = 1:2, Sample_ID = sample(letters, size = 100, replace = TRUE), few_numbers = c(1.3, 2.5)) converted <- best_classes(df) converted_classes <- unname(sapply(converted, class)) expected_classes <- c("numeric", "factor", "factor", "character", "numeric") expect_equal(converted_classes, expected_classes) }) test_that("Pheno data checking works", { df <- data.frame(Group = letters[1:2]) expect_error(check_pheno_data(df), '"Injection_order" not found') df <- data.frame(Injection_order = c(1:5, 3:9)) expect_error(check_pheno_data(df), "Injection_order is not unique") # Check QC generator df <- data.frame(Injection_order = seq_len(10), Sample_ID = c(letters[1:5], letters[1:5]) ) expect_warning(expect_error(check_pheno_data(df), "Sample_ID is not unique"), "QC not found" ) df <- data.frame(Injection_order = seq_len(5), Sample_ID = c(letters[1:5]) ) expect_warning(check_pheno_data(df), "QC not found") df <- data.frame(Injection_order = seq_len(10), Sample_ID = c(letters[1:5], rep("QC", 5)), QC = as.factor(rep(c("Sample", "QC"), each = 5))) checked <- check_pheno_data(df) expected <- data.frame(Sample_ID = c(letters[1:5], paste0("QC_", 1:5)), Injection_order = seq_len(10), stringsAsFactors = FALSE, QC = as.factor(rep(c("Sample", "QC"), each = 5))) rownames(expected) <- expected$Sample_ID expect_equal(checked, expected) }) test_that("Feature data checking works", { df <- data.frame(Feature_ID = 1:5) expect_error(check_feature_data(df), "Numbers are not allowed as feature IDs") df <- data.frame(Feature_ID = c(letters[1:5], letters[3:5])) expect_error(check_feature_data(df), "Feature_ID values are not unique") df <- data.frame(Feature_ID = letters[1:9]) df$Feature_ID[6] <- NA expect_error(check_feature_data(df), "Missing values in Feature IDs") }) test_that("Easy example data is read correctly", { # Pheno data pd <- data.frame(Sample_ID = paste0("TEST_", seq_len(12)), Injection_order = seq_len(12), Group = factor(rep(LETTERS[1:2], times = c(5,7))), QC = as.factor("Sample"), easy_Datafile = paste0("190102SR_RP_pos_0", 10:21), stringsAsFactors = FALSE) rownames(pd) <- pd$Sample_ID # Feature data fd <- data.frame(Feature_ID = "", Split = "easy", Alignment = as.numeric(seq_len(10)), Mass = 50 * seq_len(10), RetentionTime = 0.5 *seq_len(10), "MS_MS_Spectrum" = c("(123.45; 678)", rep(NA, 9)), stringsAsFactors = FALSE) fd <- name_features(fd) rownames(fd) <- fd$Feature_ID # Assay data ad <- matrix(0, nrow = 10, ncol = 12) for (i in seq_len(10)) { ad[i, ] <- 50000 -(i-1)*3000 + 1000 * (0:11) } dimnames(ad) <- list(rownames(fd), rownames(pd)) # Read the file read <- read_from_excel(system.file("extdata", "easy_data.xlsx", package = "notame"), sheet = 1, corner_row = 4, corner_column = "D", name = "easy", id_prefix = "TEST_") # Test that the parts are read as expected expect_equal(read$exprs, ad) expect_equal(read$pheno_data, pd) expect_equal(read$feature_data, fd) }) test_that("Data is split correctly", { # Pheno data pd <- data.frame(Sample_ID = paste0("TEST_", seq_len(12)), Injection_order = seq_len(12), Group = rep(LETTERS[1:2], times = c(5,7)), QC = "Sample", Datafile = paste0("190102SR_RP_pos_0", 10:21), stringsAsFactors = FALSE) rownames(pd) <- pd$Sample_ID qc_idx <- c(1, 6, 9, 12) pd$Group[qc_idx] <- "QC" pd$QC[qc_idx] <- "QC" pd$QC <- factor(pd$QC) # Feature data fd <- data.frame(Feature_ID = "", Split = rep(c("RP_pos", "Hilic_pos", "Hilic_neg", "RP_neg"), each = 4), Alignment = as.numeric(seq_len(16)), Mass = 50 * seq_len(16), RetentionTime = 0.5 *seq_len(16), Column = rep(c("RP", "Hilic", "RP"), times = c(4, 8, 4)), Mode = rep(c("pos", "neg"), each = 8), "MS_MS_Spectrum" = c("(123.45; 678)", rep(NA, 15)), stringsAsFactors = FALSE) fd <- name_features(fd) rownames(fd) <- fd$Feature_ID # Assay data ad <- matrix(0, nrow = 16, ncol = 12) for (i in seq_len(16)) { ad[i, ] <- 50000 -(i-1)*3000 + 1000 * (0:11) } row_zeros <- c(1, 3, 3, 4, 5, 8, 8, 10, rep(11, 5)) col_zeros <- c(3, 7, 12, 1, 5, 10, 12, 1, 1, 3, 5, 8, 12) for (j in seq_along(row_zeros)) { ad[row_zeros[j], col_zeros[j]] <- 0 } dimnames(ad) <- list(rownames(fd), rownames(pd)) # Read the file read <- read_from_excel(system.file("extdata", "split_data.xlsx", package = "notame"), sheet = 1, corner_row = 4, corner_column = "F", split_by = c("Column", "Mode"), id_prefix = "TEST_") # Test that the parts are read as expected expect_equal(read$exprs, ad) expect_equal(read$pheno_data, pd) expect_equal(read$feature_data, fd) }) test_that("Splitting data works as expected", { split_by <- c("Ion mode", "gswregh") # Wrong column name expect_error(read_from_excel(system.file("extdata", "sample_data_whole.xlsx", package = "notame"), corner_row = 4, corner_column = "X", split_by = split_by) ) }) test_that("Creating dummy injection order works as expected", { names <- list("HILIC_neg", "HILIC_pos", "RP_neg", "RP_pos") modes <- list() for (name in names) { file <- system.file("extdata", paste0(name, "_sample.xlsx"), package = "notame") mode <- read_from_excel(file, name = name) modes[name] <- construct_metabosets(mode$exprs, mode$pheno_data, mode$feature_data) } # Modify data modes$HILIC_neg$Injection_order <- modes$HILIC_neg$Injection_order + 1 inj_ord_rn <- modes$RP_neg$Injection_order + 2 modes$RP_neg$Injection_order <- inj_ord_rn inj_ord_rp <- modes$RP_pos$Injection_order[5:221] + 5 modes$RP_pos$Injection_order[5:221] <- inj_ord_rp sampleNames(modes$HILIC_neg)[2] <- "ID_666" sampleNames(modes$RP_pos)[22] <- "ID_999" expect_warning(merged <- merge_metabosets(modes), regexp = "Sample IDs are not identical|Unequal amount of samples" ) # Dummy injection expect_equal(merged$Injection_order, -seq_along(merged$Sample_ID)) # Original IOs expect_equal(sort(as.numeric(na.omit(merged$HILIC_neg_Injection_order))), modes$HILIC_neg$Injection_order ) expect_equal(sort(as.numeric(na.omit(merged$HILIC_pos_Injection_order))), modes$HILIC_pos$Injection_order ) expect_equal(sort(as.numeric(na.omit(merged$RP_neg_Injection_order))), modes$RP_neg$Injection_order ) expect_equal(sort(as.numeric(na.omit(merged$RP_pos_Injection_order))), modes$RP_pos$Injection_order ) })
#' Estimate distances between oscillators #' #' Estimates the distances between the oscillators of a \code{\link{ODEnetwork}} #' from an equilibrium state. #' #' @param odenet [\code{ODEnetwork}]\cr #' List of class \code{\link{ODEnetwork}}. #' @param equilibrium [\code{numeric(n)}]\cr #' The desired equilibrium positions of the oscillators. #' @param distGround [\code{character(1)}] or [\code{character(n)}]\cr #' \code{"combined"} estimates one value for all distances of the oscillators to the ground. #' Optimisation starts from \code{median(equilibrium)}.\cr #' \code{"individual"} estimates individual distance values for every oscillator. #' Optimisation starts from \code{equilibrium}.\cr #' \code{"fixed"} no estimation of the distances to the ground. #' Set to diagonal of distances matrix in \code{\link{ODEnetwork}}.\cr #' \code{character(n)} specifies groups of oscillators which distances to the ground are #' estimated by the same value. Optimisation starts from \code{median(equilibrium)} of the #' specified groups.\cr #' Default is \code{"combined"} #' @param optim.control [\code{list()}]\cr #' A list of control parameters for optim. #' See \code{\link{optim}}. #' @return an extended list of class \code{\link{ODEnetwork}}.\cr #' Matrix of distances is added or overwritten. #' @export #' @examples #' masses <- c(1, 1) #' dampers <- diag(c(1, 1)) #' springs <- diag(c(1, 1)) #' springs[1, 2] <- 1 #' equilibrium <- c(1/3, 5/3) #' odenet <- ODEnetwork(masses, dampers, springs) #' estimateDistances(odenet, equilibrium)$distances #' estimateDistances(odenet, equilibrium, distGround="individual")$distances estimateDistances <- function(odenet, equilibrium , distGround=c("combined", "individual", "fixed", c("A", "B", "123", "A")) , optim.control=list()) { UseMethod("estimateDistances") } #' @method estimateDistances ODEnetwork #' @export estimateDistances.ODEnetwork <- function(odenet, equilibrium, distGround="combined" , optim.control=list()) { # number of oscillators cN <- length(odenet$masses) # Equilibrium assertNumeric(equilibrium, any.missing=FALSE, len=cN) assertVector(equilibrium, strict=TRUE) # distances to the ground assert( checkCharacter(distGround, any.missing=FALSE, len=1L) , checkCharacter(distGround, any.missing=FALSE, len=cN) ) # check arguments of distGround if (cN > 1 && length(distGround) == 1) { assertChoice(distGround, c("combined", "individual", "fixed")) } # delete names names(equilibrium) <- NULL # exception for one mass if (cN == 1 && distGround != "fixed") { odenet <- updateOscillators(odenet, ParamVec=c(r.1=equilibrium)) return(odenet) } # create parameter vector cParams <- numeric() # check distance estimation to the ground if (length(distGround) == 1) { if (distGround == "combined") { # one parameter for all distances to the ground cParams <- c(r.glob = stats::median(equilibrium)) } else if (distGround == "individual") { # one parameter for each distance cParams <- c(equilibrium) names(cParams) <- paste("r.glob", 1:cN, sep=".") } } else { # character vector indicates the groups for the parameter estimation for (grp in unique(distGround)) { cParams <- c(cParams, stats::median(equilibrium[distGround == grp])) names(cParams)[length(cParams)] <- paste("r.glob", paste(which(distGround == grp), collapse = "."), sep = ".") } } # add distances between oscillators with respect to springs and dampers to parameter vector locat.spring <- which(odenet$springs != 0, arr.ind=TRUE) ## Ohne Diagnoale, Eintraege doppelt nicht noetig locat.ok <- apply(locat.spring, 1, function(x) x[1] < x[2]) if (sum(locat.ok) == 0) { # exit, if no free parameters available message("All parameters are fixed.") return(odenet) } ## matrix() neotig, falls nur eine Verbindung locat.spring <- matrix(locat.spring[locat.ok, ], ncol=2) if (is.null(nrow(locat.spring))) locat.spring <- t(locat.spring) for (i in 1:nrow(locat.spring)) { cParams <- c(cParams, odenet$distances[locat.spring[i,1], locat.spring[i,2]]) names(cParams)[length(cParams)] <- paste(c("r", locat.spring[i ,]), collapse = ".") } # calculate target vector mK <- odenet$springs diag(mK) <- -rowSums(mK) mK <- -mK bTarget <- -mK %*% equilibrium # Cacluate regularisation target for distances # dista <- diag(equilibrium) dista <- odenet$distances if (nrow(dista) == 30) { dista <- rep(345, 30) dista[c(12, 20)] <- 220 dista <- diag(dista) } for (i in 1:nrow(locat.spring)) { row <- locat.spring[i,1] col <- locat.spring[i,2] dista[row, col] <- diff(c(dista[row,row], dista[col, col])) } pTarget <- dista[locat.spring] names(pTarget) <- paste("r.", apply(locat.spring, 1, paste, collapse="."), sep="") pTarget <- c(cParams[grep("glob", names(cParams))], pTarget) if (nrow(dista) == 30) { pTarget[c(1, 2)] <- c(345, 220) } # print(pTarget) # define cost function distCost <- function(cParameters, pTarget) { # cParameters <- splitGlobalParams(cParameters) odenet <- updateOscillators(odenet, ParamVec=splitGlobalParams(cParameters)) # get distances and convert to correct form mR <- odenet$distances diag(mR) <- -diag(mR) mR[lower.tri(mR)] <- -mR[lower.tri(mR)] # calculate vector b with b_i = sum(k_ij*r_ij, j=1..n) b <- diag(odenet$springs %*% t(mR)) ## Gewichte definieren # target.zero <- pTarget == 0 # gewi <- rep(1, length(pTarget)) # if (sum(target.zero) > 0) gewi[target.zero] <- 1 # if (sum(!target.zero) > 0) gewi[!target.zero] <- 1/pTarget[!target.zero] # gewi <- rep(1, length(cParameters)) # return(sum((b-bTarget)^2) + sum(gewi %*% (cParameters - pTarget)^2)) # print("cParams:\n") # print(cParams) # print("cParameters:\n") # print(cParameters) # print("pTarget:\n") # print(pTarget) delta.b <- sum((b-bTarget)^2) # print(sprintf("Resid b: %.2f", delta.b)) # print(sprintf("Resid Param: %.2f", sum((cParameters-pTarget)^2))) return(delta.b + sum((cParameters-pTarget)^2) * exp(-10*delta.b) ) # return SSE # return(sum((b-bTarget)^2)) # return residuals # return(bTarget-b) } # split the parameter vector with respect to estimate (grouped) global distances splitGlobalParams <- function(cParameters) { if (sum(grepl("r\\.glob", names(cParameters))) > 0) { # estimate different groups of global distances # extract the values globVal <- cParameters[grep("r\\.glob", names(cParameters))] cParameters <- cParameters[-grep("r\\.glob", names(cParameters))] # one global distance, or different ones if (length(globVal) == 1) { lstMassGrps <- list(1:length(odenet$masses)) } else { lstMassGrps <- gsub("r\\.glob\\.", "", names(globVal)) lstMassGrps <- strsplit(lstMassGrps, ".", fixed = TRUE) } # multiply values to the correct r.i's for (i in length(lstMassGrps):1) { cParameters <- c(rep(globVal[i], length(lstMassGrps[[i]])), cParameters) names(cParameters)[1:length(lstMassGrps[[i]])] <- paste("r", lstMassGrps[[i]], sep = ".") } } return(cParameters) } # optimise parameters # print(paste("Params:", length(cParams))) # print(paste("Resid:", length(distCost(cParams, pTarget)))) firstFit <- stats::optim(cParams, distCost, pTarget=pTarget, method="BFGS", control=optim.control) ## Check, ob neuer Lauf nennenswert besseres Ergebnis bringt checkFit <- stats::optim(firstFit$par, distCost, pTarget=pTarget, method="BFGS", control=optim.control) if (checkFit$value/firstFit$value < 0.999) warning("Optimization by estimateDistances() seems to be unsuccessful!") # # Throw warnings # if (firstFit$convergence != 0) { # warningf(paste("No successful completition. Code:", firstFit$convergence)) # } # if (firstFit$value > 1e-7 * distCost(cParams)) { # if (firstFit$value > 10 * sqrt(.Machine$double.eps) * distCost(cParams)) { # warningf(paste("The SSE of the distances is large:", firstFit$value)) # } # update the optimal values to the odenet odenet <- updateOscillators(odenet, ParamVec=splitGlobalParams(checkFit$par)) return(odenet) }
/R/estimateDistances.R
no_license
cran/ODEnetwork
R
false
false
8,720
r
#' Estimate distances between oscillators #' #' Estimates the distances between the oscillators of a \code{\link{ODEnetwork}} #' from an equilibrium state. #' #' @param odenet [\code{ODEnetwork}]\cr #' List of class \code{\link{ODEnetwork}}. #' @param equilibrium [\code{numeric(n)}]\cr #' The desired equilibrium positions of the oscillators. #' @param distGround [\code{character(1)}] or [\code{character(n)}]\cr #' \code{"combined"} estimates one value for all distances of the oscillators to the ground. #' Optimisation starts from \code{median(equilibrium)}.\cr #' \code{"individual"} estimates individual distance values for every oscillator. #' Optimisation starts from \code{equilibrium}.\cr #' \code{"fixed"} no estimation of the distances to the ground. #' Set to diagonal of distances matrix in \code{\link{ODEnetwork}}.\cr #' \code{character(n)} specifies groups of oscillators which distances to the ground are #' estimated by the same value. Optimisation starts from \code{median(equilibrium)} of the #' specified groups.\cr #' Default is \code{"combined"} #' @param optim.control [\code{list()}]\cr #' A list of control parameters for optim. #' See \code{\link{optim}}. #' @return an extended list of class \code{\link{ODEnetwork}}.\cr #' Matrix of distances is added or overwritten. #' @export #' @examples #' masses <- c(1, 1) #' dampers <- diag(c(1, 1)) #' springs <- diag(c(1, 1)) #' springs[1, 2] <- 1 #' equilibrium <- c(1/3, 5/3) #' odenet <- ODEnetwork(masses, dampers, springs) #' estimateDistances(odenet, equilibrium)$distances #' estimateDistances(odenet, equilibrium, distGround="individual")$distances estimateDistances <- function(odenet, equilibrium , distGround=c("combined", "individual", "fixed", c("A", "B", "123", "A")) , optim.control=list()) { UseMethod("estimateDistances") } #' @method estimateDistances ODEnetwork #' @export estimateDistances.ODEnetwork <- function(odenet, equilibrium, distGround="combined" , optim.control=list()) { # number of oscillators cN <- length(odenet$masses) # Equilibrium assertNumeric(equilibrium, any.missing=FALSE, len=cN) assertVector(equilibrium, strict=TRUE) # distances to the ground assert( checkCharacter(distGround, any.missing=FALSE, len=1L) , checkCharacter(distGround, any.missing=FALSE, len=cN) ) # check arguments of distGround if (cN > 1 && length(distGround) == 1) { assertChoice(distGround, c("combined", "individual", "fixed")) } # delete names names(equilibrium) <- NULL # exception for one mass if (cN == 1 && distGround != "fixed") { odenet <- updateOscillators(odenet, ParamVec=c(r.1=equilibrium)) return(odenet) } # create parameter vector cParams <- numeric() # check distance estimation to the ground if (length(distGround) == 1) { if (distGround == "combined") { # one parameter for all distances to the ground cParams <- c(r.glob = stats::median(equilibrium)) } else if (distGround == "individual") { # one parameter for each distance cParams <- c(equilibrium) names(cParams) <- paste("r.glob", 1:cN, sep=".") } } else { # character vector indicates the groups for the parameter estimation for (grp in unique(distGround)) { cParams <- c(cParams, stats::median(equilibrium[distGround == grp])) names(cParams)[length(cParams)] <- paste("r.glob", paste(which(distGround == grp), collapse = "."), sep = ".") } } # add distances between oscillators with respect to springs and dampers to parameter vector locat.spring <- which(odenet$springs != 0, arr.ind=TRUE) ## Ohne Diagnoale, Eintraege doppelt nicht noetig locat.ok <- apply(locat.spring, 1, function(x) x[1] < x[2]) if (sum(locat.ok) == 0) { # exit, if no free parameters available message("All parameters are fixed.") return(odenet) } ## matrix() neotig, falls nur eine Verbindung locat.spring <- matrix(locat.spring[locat.ok, ], ncol=2) if (is.null(nrow(locat.spring))) locat.spring <- t(locat.spring) for (i in 1:nrow(locat.spring)) { cParams <- c(cParams, odenet$distances[locat.spring[i,1], locat.spring[i,2]]) names(cParams)[length(cParams)] <- paste(c("r", locat.spring[i ,]), collapse = ".") } # calculate target vector mK <- odenet$springs diag(mK) <- -rowSums(mK) mK <- -mK bTarget <- -mK %*% equilibrium # Cacluate regularisation target for distances # dista <- diag(equilibrium) dista <- odenet$distances if (nrow(dista) == 30) { dista <- rep(345, 30) dista[c(12, 20)] <- 220 dista <- diag(dista) } for (i in 1:nrow(locat.spring)) { row <- locat.spring[i,1] col <- locat.spring[i,2] dista[row, col] <- diff(c(dista[row,row], dista[col, col])) } pTarget <- dista[locat.spring] names(pTarget) <- paste("r.", apply(locat.spring, 1, paste, collapse="."), sep="") pTarget <- c(cParams[grep("glob", names(cParams))], pTarget) if (nrow(dista) == 30) { pTarget[c(1, 2)] <- c(345, 220) } # print(pTarget) # define cost function distCost <- function(cParameters, pTarget) { # cParameters <- splitGlobalParams(cParameters) odenet <- updateOscillators(odenet, ParamVec=splitGlobalParams(cParameters)) # get distances and convert to correct form mR <- odenet$distances diag(mR) <- -diag(mR) mR[lower.tri(mR)] <- -mR[lower.tri(mR)] # calculate vector b with b_i = sum(k_ij*r_ij, j=1..n) b <- diag(odenet$springs %*% t(mR)) ## Gewichte definieren # target.zero <- pTarget == 0 # gewi <- rep(1, length(pTarget)) # if (sum(target.zero) > 0) gewi[target.zero] <- 1 # if (sum(!target.zero) > 0) gewi[!target.zero] <- 1/pTarget[!target.zero] # gewi <- rep(1, length(cParameters)) # return(sum((b-bTarget)^2) + sum(gewi %*% (cParameters - pTarget)^2)) # print("cParams:\n") # print(cParams) # print("cParameters:\n") # print(cParameters) # print("pTarget:\n") # print(pTarget) delta.b <- sum((b-bTarget)^2) # print(sprintf("Resid b: %.2f", delta.b)) # print(sprintf("Resid Param: %.2f", sum((cParameters-pTarget)^2))) return(delta.b + sum((cParameters-pTarget)^2) * exp(-10*delta.b) ) # return SSE # return(sum((b-bTarget)^2)) # return residuals # return(bTarget-b) } # split the parameter vector with respect to estimate (grouped) global distances splitGlobalParams <- function(cParameters) { if (sum(grepl("r\\.glob", names(cParameters))) > 0) { # estimate different groups of global distances # extract the values globVal <- cParameters[grep("r\\.glob", names(cParameters))] cParameters <- cParameters[-grep("r\\.glob", names(cParameters))] # one global distance, or different ones if (length(globVal) == 1) { lstMassGrps <- list(1:length(odenet$masses)) } else { lstMassGrps <- gsub("r\\.glob\\.", "", names(globVal)) lstMassGrps <- strsplit(lstMassGrps, ".", fixed = TRUE) } # multiply values to the correct r.i's for (i in length(lstMassGrps):1) { cParameters <- c(rep(globVal[i], length(lstMassGrps[[i]])), cParameters) names(cParameters)[1:length(lstMassGrps[[i]])] <- paste("r", lstMassGrps[[i]], sep = ".") } } return(cParameters) } # optimise parameters # print(paste("Params:", length(cParams))) # print(paste("Resid:", length(distCost(cParams, pTarget)))) firstFit <- stats::optim(cParams, distCost, pTarget=pTarget, method="BFGS", control=optim.control) ## Check, ob neuer Lauf nennenswert besseres Ergebnis bringt checkFit <- stats::optim(firstFit$par, distCost, pTarget=pTarget, method="BFGS", control=optim.control) if (checkFit$value/firstFit$value < 0.999) warning("Optimization by estimateDistances() seems to be unsuccessful!") # # Throw warnings # if (firstFit$convergence != 0) { # warningf(paste("No successful completition. Code:", firstFit$convergence)) # } # if (firstFit$value > 1e-7 * distCost(cParams)) { # if (firstFit$value > 10 * sqrt(.Machine$double.eps) * distCost(cParams)) { # warningf(paste("The SSE of the distances is large:", firstFit$value)) # } # update the optimal values to the odenet odenet <- updateOscillators(odenet, ParamVec=splitGlobalParams(checkFit$par)) return(odenet) }
# Evaluate: https://github.com/RJEGR/Cancer_sete_T_assembly/blob/main/functions.R split_blast <- function (x, hit = "BLASTP") { # Upgraded function form trinotateR package: require(tidyverse) hit <- paste("sprot_Top_", hit, "_hit", sep = "") which_vars <- c(hit, "gene_id", "transcript_id", "prot_id") y <- x %>% select_at(vars(all_of(which_vars))) %>% drop_na(any_of(hit)) z <- y %>% pull(hit) z <- strsplit(z, "`") n <- sapply(z, length) z <- strsplit(unlist(z), "\\^") if (any(sapply(z, "[", 1) != sapply(z, "[", 2))) print("WARNING: check different values in columns 1 and 2") NAME <- gsub("^RecName: Full=", "", sapply(z, "[", 6)) NAME <- gsub("SubName: Full=", "", NAME) NAME <- gsub(";$", "", NAME) NAME <- gsub(" \\{[^}]+}", "", NAME) gene <- rep(y$gene_id, n) transcript <- rep(y$transcript_id, n) protein <- rep(gsub(".*\\|", "", y$prot_id), n) uniprot <- sapply(z, "[", 1) align <- sapply(z, "[", 3) identity <- as.numeric(gsub("%ID", "", sapply(z, "[", 4))) evalue <- as.numeric(gsub("E:", "", sapply(z, "[", 5))) domain <- gsub("; .*", "", sapply(z, "[", 7)) lineage <- sapply(z, "[", 7) genus <- gsub(".*; ", "", sapply(z, "[", 7)) x1 <- data.frame(gene, transcript, protein , uniprot, align, identity, evalue, name = NAME, lineage, domain, genus, stringsAsFactors = FALSE) message(nrow(x1), " ", hit, " annotations") as_tibble(x1) } split_gene_ontology <- function(x, hit = "BLASTP") { # Upgraded function form trinotateR package: require(tidyverse) gene_ontology_hit <- paste("gene_ontology_", hit, sep = "") which_vars <- c(gene_ontology_hit, "gene_id", "transcript_id", "prot_id") y <- x %>% select_at(vars(all_of(which_vars))) %>% drop_na(any_of(gene_ontology_hit)) z <- y %>% pull(gene_ontology_hit) z <- strsplit(z, "`") n <- sapply(z, length) z <- strsplit(unlist(z), "\\^") x1 <- data.frame(gene = rep(y$gene_id, n), transcript = rep(y$transcript_id, n), protein = rep(gsub(".*\\|", "", y$prot_id), n), go = sapply(z, "[", 1), ontology = sapply(z, "[", 2), name = sapply(z, "[", 3), stringsAsFactors = FALSE) message(nrow(x1), " ", gene_ontology_hit, " annotations") as_tibble(x1) } split_pfam <- function(x, hit = "Pfam"){ # Upgraded function form trinotateR package: require(tidyverse) which_vars <- c(hit, "gene_id", "transcript_id", "prot_id") y <- x %>% select_at(vars(all_of(which_vars))) %>% drop_na(any_of(hit)) z <- y %>% pull(hit) z <- strsplit(z, "`") n <- sapply(z, length) # split annotation into 5 columns z <- strsplit( unlist(z), "\\^" ) gene <- rep(y$gene_id, n) transcript <- rep(y$transcript_id, n) protein <- rep(gsub(".*\\|", "", y$prot_id), n) pfam <- gsub("\\.[0-9]*", "", sapply(z, "[", 1)) x1 <- data.frame(gene, transcript, protein, pfam, symbol = sapply(z, "[", 2), name = sapply(z, "[", 3), align = sapply(z, "[", 4), evalue = as.numeric(gsub("E:", "", sapply(z, "[", 5) )), stringsAsFactors = FALSE) message(nrow(x1), " ", hit, " annotations") as_tibble(x1) } split_kegg <- function (x, hit = "Kegg") { library(data.table) y <- x[!is.na(get(hit)), .(get(hit), gene_id, transcript_id, prot_id)] z <- strsplit(y$V1, "`") n <- sapply(z, length) z <- strsplit(unlist(z), "\\^") x1 <- data.frame(gene = rep(y$gene_id, n), transcript = rep(y$transcript_id, n), protein = rep(gsub(".*\\|", "", y$prot_id), n), Kegg = sapply(z, "[", 1), stringsAsFactors = FALSE) message(nrow(x1), " ", hit, " annotations") data.table(x1) }
/FUNCTIONS.R
no_license
RJEGR/Small-RNASeq-data-analysis
R
false
false
3,772
r
# Evaluate: https://github.com/RJEGR/Cancer_sete_T_assembly/blob/main/functions.R split_blast <- function (x, hit = "BLASTP") { # Upgraded function form trinotateR package: require(tidyverse) hit <- paste("sprot_Top_", hit, "_hit", sep = "") which_vars <- c(hit, "gene_id", "transcript_id", "prot_id") y <- x %>% select_at(vars(all_of(which_vars))) %>% drop_na(any_of(hit)) z <- y %>% pull(hit) z <- strsplit(z, "`") n <- sapply(z, length) z <- strsplit(unlist(z), "\\^") if (any(sapply(z, "[", 1) != sapply(z, "[", 2))) print("WARNING: check different values in columns 1 and 2") NAME <- gsub("^RecName: Full=", "", sapply(z, "[", 6)) NAME <- gsub("SubName: Full=", "", NAME) NAME <- gsub(";$", "", NAME) NAME <- gsub(" \\{[^}]+}", "", NAME) gene <- rep(y$gene_id, n) transcript <- rep(y$transcript_id, n) protein <- rep(gsub(".*\\|", "", y$prot_id), n) uniprot <- sapply(z, "[", 1) align <- sapply(z, "[", 3) identity <- as.numeric(gsub("%ID", "", sapply(z, "[", 4))) evalue <- as.numeric(gsub("E:", "", sapply(z, "[", 5))) domain <- gsub("; .*", "", sapply(z, "[", 7)) lineage <- sapply(z, "[", 7) genus <- gsub(".*; ", "", sapply(z, "[", 7)) x1 <- data.frame(gene, transcript, protein , uniprot, align, identity, evalue, name = NAME, lineage, domain, genus, stringsAsFactors = FALSE) message(nrow(x1), " ", hit, " annotations") as_tibble(x1) } split_gene_ontology <- function(x, hit = "BLASTP") { # Upgraded function form trinotateR package: require(tidyverse) gene_ontology_hit <- paste("gene_ontology_", hit, sep = "") which_vars <- c(gene_ontology_hit, "gene_id", "transcript_id", "prot_id") y <- x %>% select_at(vars(all_of(which_vars))) %>% drop_na(any_of(gene_ontology_hit)) z <- y %>% pull(gene_ontology_hit) z <- strsplit(z, "`") n <- sapply(z, length) z <- strsplit(unlist(z), "\\^") x1 <- data.frame(gene = rep(y$gene_id, n), transcript = rep(y$transcript_id, n), protein = rep(gsub(".*\\|", "", y$prot_id), n), go = sapply(z, "[", 1), ontology = sapply(z, "[", 2), name = sapply(z, "[", 3), stringsAsFactors = FALSE) message(nrow(x1), " ", gene_ontology_hit, " annotations") as_tibble(x1) } split_pfam <- function(x, hit = "Pfam"){ # Upgraded function form trinotateR package: require(tidyverse) which_vars <- c(hit, "gene_id", "transcript_id", "prot_id") y <- x %>% select_at(vars(all_of(which_vars))) %>% drop_na(any_of(hit)) z <- y %>% pull(hit) z <- strsplit(z, "`") n <- sapply(z, length) # split annotation into 5 columns z <- strsplit( unlist(z), "\\^" ) gene <- rep(y$gene_id, n) transcript <- rep(y$transcript_id, n) protein <- rep(gsub(".*\\|", "", y$prot_id), n) pfam <- gsub("\\.[0-9]*", "", sapply(z, "[", 1)) x1 <- data.frame(gene, transcript, protein, pfam, symbol = sapply(z, "[", 2), name = sapply(z, "[", 3), align = sapply(z, "[", 4), evalue = as.numeric(gsub("E:", "", sapply(z, "[", 5) )), stringsAsFactors = FALSE) message(nrow(x1), " ", hit, " annotations") as_tibble(x1) } split_kegg <- function (x, hit = "Kegg") { library(data.table) y <- x[!is.na(get(hit)), .(get(hit), gene_id, transcript_id, prot_id)] z <- strsplit(y$V1, "`") n <- sapply(z, length) z <- strsplit(unlist(z), "\\^") x1 <- data.frame(gene = rep(y$gene_id, n), transcript = rep(y$transcript_id, n), protein = rep(gsub(".*\\|", "", y$prot_id), n), Kegg = sapply(z, "[", 1), stringsAsFactors = FALSE) message(nrow(x1), " ", hit, " annotations") data.table(x1) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Simulate.R \name{SimGeno} \alias{SimGeno} \title{Simulate Genotypes} \usage{ SimGeno( Pedigree, nSnp = 400, ParMis = 0.4, MAF = 0.3, CallRate = 0.99, SnpError = 5e-04, ErrorFM = "version2.0", ReturnStats = FALSE, OutFile = NA, Inherit = "autosomal", InheritFile = NA, quiet = FALSE ) } \arguments{ \item{Pedigree}{dataframe, pedigree with the first three columns being id - dam - sire. Column names are ignored, as are additional columns, with the exception of a 'Sex' column when Inherit is not 'autosomal'.} \item{nSnp}{number of SNPs to simulate.} \item{ParMis}{single number or vector length two with proportion of parents with fully missing genotype. Ignored if CallRate is a named vector.} \item{MAF}{minimum minor allele frequency, and allele frequencies will be sampled uniformly between this minimum and 0.5, OR a vector with minor allele frequency at each locus. In both cases, this is the MAF among pedigree founders, the MAF in the sample will deviate due to drift.} \item{CallRate}{either a single number for the mean call rate (genotyping success), OR a vector with the call rate at each SNP, OR a named vector with the call rate for each individual. In the third case, ParMis is ignored, and individuals in the pedigree (as id or parent) not included in this vector are presumed non-genotyped.} \item{SnpError}{mean per-locus genotyping error rate across SNPs, and a beta-distribution will be used to simulate the number of missing cases per SNP, OR a vector with the genotyping error for each SNP.} \item{ErrorFM}{function taking the error rate (scalar) as argument and returning a 3x3 matrix with probabilities that actual genotype i (rows) is observed as genotype j (columns). Inbuilt ones are as used in sequoia 'version2.0', 'version1.3', or 'version1.1'. See details.} \item{ReturnStats}{in addition to the genotype matrix, return the input parameters and mean & quantiles of MAF, error rate and call rates.} \item{OutFile}{file name for simulated genotypes. If NA (default), return results within R.} \item{Inherit}{inheritance pattern, scalar or vector of length nSnp, Defaults to 'autosomal'. An excel file included in the package has inheritance patterns for the X and Y chromosome and mtDNA, and allows custom inheritance patterns. Note that these are experimental, and NOT currently supported by the pedigree reconstruction with \code{\link{sequoia}} !} \item{InheritFile}{file name of file with inheritance patterns, with extension csv, txt, xls or xlsx (the latter two require library \pkg{openxlsx}).} \item{quiet}{suppress messages.} } \value{ If \code{ReturnStats=FALSE} (the default), a matrix with genotype data in sequoia's input format, encoded as 0/1/2/-9. If \code{ReturnStats=TRUE}, a named list with three elements: list 'ParamsIN', matrix 'SGeno', and list 'StatsOUT': \item{AF}{Frequency in 'observed' genotypes of '1' allele} \item{AF.act}{Allele frequency in 'actual' (without genotyping errors & missingness)} \item{SnpError}{Error rate per SNP (actual /= observed AND observed /= missing)} \item{SnpCallRate}{Non-missing per SNP} \item{IndivError}{Error rate per individual} \item{IndivCallRate}{Non-missing per individual} } \description{ Simulate SNP genotype data from a pedigree, with optional missingess and errors. } \details{ Please ensure the pedigree is a valid pedigree, for example by first running \code{\link{PedPolish}}. For founders, i.e. individuals with no known parents, genotypes are drawn according to the provided MAF and assuming Hardy-Weinberg equilibrium. Offspring genotypes are generated following Mendelian inheritance, assuming all loci are completely independent. Individuals with one known parent are allowed: at each locus, one allele is inherited from the known parent, and the other drawn from the genepool according to the provided MAF. Genotyping errors are generated following a user-definable 3x3 matrix with probabilities that actual genotype \eqn{i} (rows) is observed as genotype \eqn{j} (columns). This is specified as \code{ErrorFM}, which is a function of \code{SnpError}. By default (\code{ErrorFM} = "version2.0"), \code{SnpError} is interpreted as a locus-level error rate (rather than allele-level), and equals the probability that a homozygote is observed as heterozygote, and the probability that a heterozygote is observed as either homozygote (i.e., the probability that it is observed as AA = probability that observed as aa = \code{SnpError}/2). The probability that one homozygote is observed as the other is (\code{SnpError}/2\eqn{)^2}. Note that this differs from versions up to 1.1.1, where a proportion of \code{SnpError}*3/2 of genotypes were replaced with random genotypes. This corresponds to \code{ErrorFM} = "Version111". Error rates differ between SNPs, but the same error pattern is used across all SNPs, even when inheritance patterns vary. When two or more different error patterns are required, SimGeno should be run on the different SNP subsets separately, and results combined. Variation in call rates is assumed to follow a highly skewed (beta) distribution, with many samples having call rates close to 1, and a narrowing tail of lower call rates. The first shape parameter defaults to 1 (but see \code{\link{MkGenoErrors}}), and the second shape parameter is defined via the mean as \code{CallRate}. For 99.9\% of SNPs to have a call rate of 0.8 (0.9; 0.95) or higher, use a mean call rate of 0.969 (0.985; 0.993). Variation in call rate between samples can be specified by providing a named vector to \code{CallRate}, which supersedes PropLQ in versions up to 1.1.1. Otherwise, variation in call rate and error rate between samples occurs only as side-effect of the random nature of which individuals are hit by per-SNP errors and drop-outs. Finer control is possible by first generating an error-free genotype matrix, and then calling \code{\link{MkGenoErrors}} directly on subsets of the matrix. } \section{Disclaimer}{ This simulation is highly simplistic and assumes that all SNPs segregate completely independently, that the SNPs are in Hardy-Weinberg equilibrium in the pedigree founders. It assumes that genotyping errors are not due to heritable mutations of the SNPs, and that missingness is random and not e.g. due to heritable mutations of SNP flanking regions. Results based on this simulated data will provide an minimum estimate of the number of SNPs required, and an optimistic estimate of pedigree reconstruction performance. } \examples{ GenoM <- SimGeno(Pedigree = Ped_HSg5, nSnp = 100, ParMis = c(0.2, 0.7)) \dontrun{ # Alternative genotyping error model EFM <- function(E) { # Whalen, Gorjanc & Hickey 2018 matrix(c(1-E*3/4, E/4, E/4, E/4, 1/2-E/4, 1/2-E/4, E/4, E/4, E/4, 1-E*3/4), 3,3, byrow=TRUE) } EFM(0.01) GenoM <- SimGeno(Pedigree = Ped_HSg5, nSnp = 100, ParMis = 0.2, SnpError = 5e-3, ErrorFM = EFM) # combination of high & low quality SNPs Geno.HQ <- SimGeno(Ped_HSg5, nSnp=50, MAF=0.3, CallRate=runif(50, 0.7, 1)) Geno.LQ <- SimGeno(Ped_HSg5, nSnp=20, MAF=0.1, CallRate=runif(20, 0.1, 5)) Geno.HQLQ <- merge(Geno.HQ, Geno.LQ, by="row.names") } } \seealso{ The wrapper \code{\link{EstConf}} for repeated simulation and pedigree reconstruction; \code{\link{MkGenoErrors}} for fine control over the distribution of genotyping errors in simulated data. } \author{ Jisca Huisman, \email{jisca.huisman@gmail.com} }
/man/SimGeno.Rd
no_license
cran/sequoia
R
false
true
7,815
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Simulate.R \name{SimGeno} \alias{SimGeno} \title{Simulate Genotypes} \usage{ SimGeno( Pedigree, nSnp = 400, ParMis = 0.4, MAF = 0.3, CallRate = 0.99, SnpError = 5e-04, ErrorFM = "version2.0", ReturnStats = FALSE, OutFile = NA, Inherit = "autosomal", InheritFile = NA, quiet = FALSE ) } \arguments{ \item{Pedigree}{dataframe, pedigree with the first three columns being id - dam - sire. Column names are ignored, as are additional columns, with the exception of a 'Sex' column when Inherit is not 'autosomal'.} \item{nSnp}{number of SNPs to simulate.} \item{ParMis}{single number or vector length two with proportion of parents with fully missing genotype. Ignored if CallRate is a named vector.} \item{MAF}{minimum minor allele frequency, and allele frequencies will be sampled uniformly between this minimum and 0.5, OR a vector with minor allele frequency at each locus. In both cases, this is the MAF among pedigree founders, the MAF in the sample will deviate due to drift.} \item{CallRate}{either a single number for the mean call rate (genotyping success), OR a vector with the call rate at each SNP, OR a named vector with the call rate for each individual. In the third case, ParMis is ignored, and individuals in the pedigree (as id or parent) not included in this vector are presumed non-genotyped.} \item{SnpError}{mean per-locus genotyping error rate across SNPs, and a beta-distribution will be used to simulate the number of missing cases per SNP, OR a vector with the genotyping error for each SNP.} \item{ErrorFM}{function taking the error rate (scalar) as argument and returning a 3x3 matrix with probabilities that actual genotype i (rows) is observed as genotype j (columns). Inbuilt ones are as used in sequoia 'version2.0', 'version1.3', or 'version1.1'. See details.} \item{ReturnStats}{in addition to the genotype matrix, return the input parameters and mean & quantiles of MAF, error rate and call rates.} \item{OutFile}{file name for simulated genotypes. If NA (default), return results within R.} \item{Inherit}{inheritance pattern, scalar or vector of length nSnp, Defaults to 'autosomal'. An excel file included in the package has inheritance patterns for the X and Y chromosome and mtDNA, and allows custom inheritance patterns. Note that these are experimental, and NOT currently supported by the pedigree reconstruction with \code{\link{sequoia}} !} \item{InheritFile}{file name of file with inheritance patterns, with extension csv, txt, xls or xlsx (the latter two require library \pkg{openxlsx}).} \item{quiet}{suppress messages.} } \value{ If \code{ReturnStats=FALSE} (the default), a matrix with genotype data in sequoia's input format, encoded as 0/1/2/-9. If \code{ReturnStats=TRUE}, a named list with three elements: list 'ParamsIN', matrix 'SGeno', and list 'StatsOUT': \item{AF}{Frequency in 'observed' genotypes of '1' allele} \item{AF.act}{Allele frequency in 'actual' (without genotyping errors & missingness)} \item{SnpError}{Error rate per SNP (actual /= observed AND observed /= missing)} \item{SnpCallRate}{Non-missing per SNP} \item{IndivError}{Error rate per individual} \item{IndivCallRate}{Non-missing per individual} } \description{ Simulate SNP genotype data from a pedigree, with optional missingess and errors. } \details{ Please ensure the pedigree is a valid pedigree, for example by first running \code{\link{PedPolish}}. For founders, i.e. individuals with no known parents, genotypes are drawn according to the provided MAF and assuming Hardy-Weinberg equilibrium. Offspring genotypes are generated following Mendelian inheritance, assuming all loci are completely independent. Individuals with one known parent are allowed: at each locus, one allele is inherited from the known parent, and the other drawn from the genepool according to the provided MAF. Genotyping errors are generated following a user-definable 3x3 matrix with probabilities that actual genotype \eqn{i} (rows) is observed as genotype \eqn{j} (columns). This is specified as \code{ErrorFM}, which is a function of \code{SnpError}. By default (\code{ErrorFM} = "version2.0"), \code{SnpError} is interpreted as a locus-level error rate (rather than allele-level), and equals the probability that a homozygote is observed as heterozygote, and the probability that a heterozygote is observed as either homozygote (i.e., the probability that it is observed as AA = probability that observed as aa = \code{SnpError}/2). The probability that one homozygote is observed as the other is (\code{SnpError}/2\eqn{)^2}. Note that this differs from versions up to 1.1.1, where a proportion of \code{SnpError}*3/2 of genotypes were replaced with random genotypes. This corresponds to \code{ErrorFM} = "Version111". Error rates differ between SNPs, but the same error pattern is used across all SNPs, even when inheritance patterns vary. When two or more different error patterns are required, SimGeno should be run on the different SNP subsets separately, and results combined. Variation in call rates is assumed to follow a highly skewed (beta) distribution, with many samples having call rates close to 1, and a narrowing tail of lower call rates. The first shape parameter defaults to 1 (but see \code{\link{MkGenoErrors}}), and the second shape parameter is defined via the mean as \code{CallRate}. For 99.9\% of SNPs to have a call rate of 0.8 (0.9; 0.95) or higher, use a mean call rate of 0.969 (0.985; 0.993). Variation in call rate between samples can be specified by providing a named vector to \code{CallRate}, which supersedes PropLQ in versions up to 1.1.1. Otherwise, variation in call rate and error rate between samples occurs only as side-effect of the random nature of which individuals are hit by per-SNP errors and drop-outs. Finer control is possible by first generating an error-free genotype matrix, and then calling \code{\link{MkGenoErrors}} directly on subsets of the matrix. } \section{Disclaimer}{ This simulation is highly simplistic and assumes that all SNPs segregate completely independently, that the SNPs are in Hardy-Weinberg equilibrium in the pedigree founders. It assumes that genotyping errors are not due to heritable mutations of the SNPs, and that missingness is random and not e.g. due to heritable mutations of SNP flanking regions. Results based on this simulated data will provide an minimum estimate of the number of SNPs required, and an optimistic estimate of pedigree reconstruction performance. } \examples{ GenoM <- SimGeno(Pedigree = Ped_HSg5, nSnp = 100, ParMis = c(0.2, 0.7)) \dontrun{ # Alternative genotyping error model EFM <- function(E) { # Whalen, Gorjanc & Hickey 2018 matrix(c(1-E*3/4, E/4, E/4, E/4, 1/2-E/4, 1/2-E/4, E/4, E/4, E/4, 1-E*3/4), 3,3, byrow=TRUE) } EFM(0.01) GenoM <- SimGeno(Pedigree = Ped_HSg5, nSnp = 100, ParMis = 0.2, SnpError = 5e-3, ErrorFM = EFM) # combination of high & low quality SNPs Geno.HQ <- SimGeno(Ped_HSg5, nSnp=50, MAF=0.3, CallRate=runif(50, 0.7, 1)) Geno.LQ <- SimGeno(Ped_HSg5, nSnp=20, MAF=0.1, CallRate=runif(20, 0.1, 5)) Geno.HQLQ <- merge(Geno.HQ, Geno.LQ, by="row.names") } } \seealso{ The wrapper \code{\link{EstConf}} for repeated simulation and pedigree reconstruction; \code{\link{MkGenoErrors}} for fine control over the distribution of genotyping errors in simulated data. } \author{ Jisca Huisman, \email{jisca.huisman@gmail.com} }
vpredict <- function(object, transform) { UseMethod("vpredict") } vpredict.mmer <- function (object, transform){ pframe <- as.list(summary(object)$varcomp[,1]) names(pframe) <- paste("V", seq(1, length(pframe)), sep = "") ## deriv creates a derivative for a simple expression # i.e. dx2x <- deriv(~ x^2, "x") ; dx2x ## eval evaluates a derivative expression for certain values specified in the deriv() expresion ## 1) get derivatives of the expression provided, ## i.e. V1/(V1+V4) with respect to each of the variables existing, i.e. v1, v2, v3, v4 ## 2) Evaluate the expression (derivatives) for values provided for each variable ## i.e. with the actual values of the variance components tvalue <- eval(deriv(transform[[length(transform)]], names(pframe)), pframe) X <- as.vector(attr(tvalue, "gradient")) # just make it a sinpe vector of derivatives tname <- if (length(transform) == 3) transform[[2]] else "" n <- length(pframe) ## number of parameters available, i.e. V1,V2,V3,V4 i <- rep(1:n, 1:n) ## repeat each parameter by its own j <- sequence(1:n) ## makes a sequence from 1 to the number provided, i.e. if sequence(1:2) = 1 1 2, because it makes the sequence for 1:1 and then 1:2 k <- 1 + (i > j) # all where i <= j get a 1, all i > j get a 2 Vmat <- object$sigmaSE toext <- upper.tri(Vmat) diag(toext) <- TRUE Vmat <- Vmat[which(toext,arr.ind = TRUE)] ## extract the upper triangular se <- sqrt(abs(sum(Vmat * X[i] * X[j] * k))) ## only X[i] and X[j] that match with the Vi's indicated are != than zero ### Vmat are the second derivatives ### X are the derivatives of the expression with respect to each parameter of interest ## X[i] * X[j] * k multiplies once the var(var.i) and var(var.j) and twice the covar(covar.ij) ## then takes the sqrt(abs( sum[c(var(i),covar(i,j),var(j)] ))) , that's the SE ## Vmat * X[i] * X[j] * k --> 2nd derivatives * derivatives of i.e. h2 with respect to each term ## d''(x) * d'(x) * d' ## those var(vc.i) and covar(covar.ij) from the variance comp. come from the inverse if the second derivatives (Fisher's) # toreturn2 <- data.frame(row.names = tname, Estimate = tvalue, SE = se) # class(toreturn2) <- "vpredict.mmer" toreturn2 <- data.frame(Estimate = tvalue, SE = se) rownames(toreturn2 ) <- tname # seemed not to be evaluated correctly before in all cases class(toreturn2) <- c("vpredict.mmer","data.frame") # allows data.frame inheritance # attr(toreturn2, "class")<-c("vpredict.mmer", "data.frame") return(toreturn2) }
/R/vpredict.R
no_license
covaruber/sommer
R
false
false
2,583
r
vpredict <- function(object, transform) { UseMethod("vpredict") } vpredict.mmer <- function (object, transform){ pframe <- as.list(summary(object)$varcomp[,1]) names(pframe) <- paste("V", seq(1, length(pframe)), sep = "") ## deriv creates a derivative for a simple expression # i.e. dx2x <- deriv(~ x^2, "x") ; dx2x ## eval evaluates a derivative expression for certain values specified in the deriv() expresion ## 1) get derivatives of the expression provided, ## i.e. V1/(V1+V4) with respect to each of the variables existing, i.e. v1, v2, v3, v4 ## 2) Evaluate the expression (derivatives) for values provided for each variable ## i.e. with the actual values of the variance components tvalue <- eval(deriv(transform[[length(transform)]], names(pframe)), pframe) X <- as.vector(attr(tvalue, "gradient")) # just make it a sinpe vector of derivatives tname <- if (length(transform) == 3) transform[[2]] else "" n <- length(pframe) ## number of parameters available, i.e. V1,V2,V3,V4 i <- rep(1:n, 1:n) ## repeat each parameter by its own j <- sequence(1:n) ## makes a sequence from 1 to the number provided, i.e. if sequence(1:2) = 1 1 2, because it makes the sequence for 1:1 and then 1:2 k <- 1 + (i > j) # all where i <= j get a 1, all i > j get a 2 Vmat <- object$sigmaSE toext <- upper.tri(Vmat) diag(toext) <- TRUE Vmat <- Vmat[which(toext,arr.ind = TRUE)] ## extract the upper triangular se <- sqrt(abs(sum(Vmat * X[i] * X[j] * k))) ## only X[i] and X[j] that match with the Vi's indicated are != than zero ### Vmat are the second derivatives ### X are the derivatives of the expression with respect to each parameter of interest ## X[i] * X[j] * k multiplies once the var(var.i) and var(var.j) and twice the covar(covar.ij) ## then takes the sqrt(abs( sum[c(var(i),covar(i,j),var(j)] ))) , that's the SE ## Vmat * X[i] * X[j] * k --> 2nd derivatives * derivatives of i.e. h2 with respect to each term ## d''(x) * d'(x) * d' ## those var(vc.i) and covar(covar.ij) from the variance comp. come from the inverse if the second derivatives (Fisher's) # toreturn2 <- data.frame(row.names = tname, Estimate = tvalue, SE = se) # class(toreturn2) <- "vpredict.mmer" toreturn2 <- data.frame(Estimate = tvalue, SE = se) rownames(toreturn2 ) <- tname # seemed not to be evaluated correctly before in all cases class(toreturn2) <- c("vpredict.mmer","data.frame") # allows data.frame inheritance # attr(toreturn2, "class")<-c("vpredict.mmer", "data.frame") return(toreturn2) }
# load packages library(ggplot2) library(maps) # plot casualties per year gg.months <- ggplot(gtd, aes(iyear, nkill)) gg.months + geom_bar(stat="identity") + xlab("Year") + ylab("Number of Casualties") + ggtitle("Number of Terrorist Attack Casualties per Year (1970-2015)") + scale_x_continuous(breaks=1970:2015) + theme_gray() # plot attack success by attack type gg.success.att <- ggplot(gtd, aes(attacktype1_txt, nkill)) gg.success.att + geom_bar(stat="identity") + ylab("Number of Successful Attacks") + ggtitle("Number of Casualties per Attack Type (1970-2015") + xlab("Attack Type") # plot attack success by weapon type gg.success.weap <- ggplot(gtd, aes(weaptype1_txt, nkill)) gg.success.weap + geom_bar(stat="identity") + ylab("Number of Successful Attacks") + ggtitle("Number of Casualties per Weapon Type 1970-2015") + xlab("Weapon Type") + scale_x_discrete(limits=weap.positions) # plot attacks by attack type in the 48 contiguous united states us <- map_data("state") q <- ggplot() + geom_polygon(data=us, aes(x=long, y=lat, group=group), color="darkgray", fill="white") + geom_point(data=gtd, aes(x=longitude, y=latitude, color=attacktype1_txt), size=2, alpha=0.5) + coord_map() + xlim(-125, -65) + ylim(25, 50) + xlab("Longitude") + ylab("Latitude") + theme_bw() + theme(legend.position="bottom", legend.title=element_blank()) + ggtitle("Terrorist Attacks in the United States, by Attack Type (1970-2015)") q
/plots.R
no_license
mrstepanovic/GlobalTerrorismAnalysis
R
false
false
1,450
r
# load packages library(ggplot2) library(maps) # plot casualties per year gg.months <- ggplot(gtd, aes(iyear, nkill)) gg.months + geom_bar(stat="identity") + xlab("Year") + ylab("Number of Casualties") + ggtitle("Number of Terrorist Attack Casualties per Year (1970-2015)") + scale_x_continuous(breaks=1970:2015) + theme_gray() # plot attack success by attack type gg.success.att <- ggplot(gtd, aes(attacktype1_txt, nkill)) gg.success.att + geom_bar(stat="identity") + ylab("Number of Successful Attacks") + ggtitle("Number of Casualties per Attack Type (1970-2015") + xlab("Attack Type") # plot attack success by weapon type gg.success.weap <- ggplot(gtd, aes(weaptype1_txt, nkill)) gg.success.weap + geom_bar(stat="identity") + ylab("Number of Successful Attacks") + ggtitle("Number of Casualties per Weapon Type 1970-2015") + xlab("Weapon Type") + scale_x_discrete(limits=weap.positions) # plot attacks by attack type in the 48 contiguous united states us <- map_data("state") q <- ggplot() + geom_polygon(data=us, aes(x=long, y=lat, group=group), color="darkgray", fill="white") + geom_point(data=gtd, aes(x=longitude, y=latitude, color=attacktype1_txt), size=2, alpha=0.5) + coord_map() + xlim(-125, -65) + ylim(25, 50) + xlab("Longitude") + ylab("Latitude") + theme_bw() + theme(legend.position="bottom", legend.title=element_blank()) + ggtitle("Terrorist Attacks in the United States, by Attack Type (1970-2015)") q
\name{stratasize} \alias{stratasize} \title{Sample Size Determination for Stratified Sampling} \description{ The function \code{stratasize} determinates the total size of stratified samples depending on type of allocation and determinated by specified precision. } \usage{ stratasize(e, Nh, Sh, level = 0.95, type = 'prop') } \arguments{ \item{e}{positive number specifying sampling precision.} \item{Nh}{vector of population sizes in each stratum.} \item{Sh}{vector of standard deviation in each stratum.} \item{level}{coverage probability for confidence intervals. Default is \code{level=0.95}.} \item{type}{type of allocation. Default is \code{type='prop'} for proportional, alternative is \code{type='opt'} for optimal.} } \value{ The function \code{stratasize} returns a value, which is a list consisting of the components \item{call}{is a list of call components: \code{e} specified precision, \code{Nh} population sizes of every stratum, \code{Sh} standard diviation of every stratum, \code{method} type of allocation, \code{level} coverage probability for confidence intervals.} \item{n}{determinated total sample size.} } \references{ Kauermann, Goeran/Kuechenhoff, Helmut (2011): Stichproben. Methoden und praktische Umsetzung mit R. Springer. } \author{Shuai Shao} \seealso{\code{\link{stratasamp}}, \code{\link{stratamean}}} \examples{ #random proportional stratified sample stratasize(e=0.1, Nh=c(100000,300000,600000), Sh=c(1,2,3)) #random optimal stratified sample stratasize(e=0.1, Nh=c(100000,300000,600000), Sh=c(1,2,3), type="opt") }
/man/stratasize.Rd
no_license
cran/samplingbook
R
false
false
1,604
rd
\name{stratasize} \alias{stratasize} \title{Sample Size Determination for Stratified Sampling} \description{ The function \code{stratasize} determinates the total size of stratified samples depending on type of allocation and determinated by specified precision. } \usage{ stratasize(e, Nh, Sh, level = 0.95, type = 'prop') } \arguments{ \item{e}{positive number specifying sampling precision.} \item{Nh}{vector of population sizes in each stratum.} \item{Sh}{vector of standard deviation in each stratum.} \item{level}{coverage probability for confidence intervals. Default is \code{level=0.95}.} \item{type}{type of allocation. Default is \code{type='prop'} for proportional, alternative is \code{type='opt'} for optimal.} } \value{ The function \code{stratasize} returns a value, which is a list consisting of the components \item{call}{is a list of call components: \code{e} specified precision, \code{Nh} population sizes of every stratum, \code{Sh} standard diviation of every stratum, \code{method} type of allocation, \code{level} coverage probability for confidence intervals.} \item{n}{determinated total sample size.} } \references{ Kauermann, Goeran/Kuechenhoff, Helmut (2011): Stichproben. Methoden und praktische Umsetzung mit R. Springer. } \author{Shuai Shao} \seealso{\code{\link{stratasamp}}, \code{\link{stratamean}}} \examples{ #random proportional stratified sample stratasize(e=0.1, Nh=c(100000,300000,600000), Sh=c(1,2,3)) #random optimal stratified sample stratasize(e=0.1, Nh=c(100000,300000,600000), Sh=c(1,2,3), type="opt") }
# ------ library(shiny) library(shinydashboard) library(leaflet) library(shinythemes) library(shinytoastr) library(plotly) #theme = shinythemes::shinytheme("slate") dashboardPage( skin = 'purple', dashboardHeader(title = "Philadelphia Bike Count", titleWidth = 250), dashboardSidebar( sidebarMenu( menuItem("Map of Philadelphia", tabName = "map", icon = icon("map")), menuItem("Graphs & Metrics", tabName = "graphs", icon = icon("signal", lib = "glyphicon")), menuItem("About", tabName = "about", icon = icon("question-circle")), menuItem("Source Code", href = "https://github.com/KristenZhao/Philadelphia-Bike-Count", icon = icon("github-alt")) ) ), dashboardBody( tabItems( tabItem(tabName = "map", fluidRow( column(width = 8, box(width = NULL, leafletOutput("bike_count_map", height = 500)) ), column(width = 3, box(width = NULL, dateRangeInput("date1", "Select dates to visualize.", start = min(bike_philly$UPDATED), end = max(bike_philly$UPDATED), min = min(bike_philly$UPDATED), max = max(bike_philly$UPDATED)) ), box(width = NULL, h3('Bike Counts'), h4(textOutput("total_count"))), box(width = NULL, h3("Most Popular Biking Area"), h4(textOutput("popular_area"))), box(width = NULL, h3("Most Popular Biking Municipality"), h4(textOutput("muni"))) ) ) ), tabItem(tabName = "graphs", fluidRow( column(width = 12, box(width = NULL, plotlyOutput("count_by_muni"))) # column(width = 6, # box(width = NULL, # plotOutput("count_by_muni_per_dir"))) ), fluidRow( column(width = 6, box(width = NULL, dateRangeInput("date2", "Select dates to visualize.", start = min(bike_philly$UPDATED), end = max(bike_philly$UPDATED), min = min(bike_philly$UPDATED), max = max(bike_philly$UPDATED)) ) ), column(width = 6, box(width = NULL, selectInput("CNTDIR", "Which direction are you looking at?", #'county' is an ID, it is to help you keep organized. can be any other names choices = c('all','both','east','north','south','west')) ) ) )), tabItem(tabName = "about", fluidRow( column(width = 12, box(width = NULL, includeMarkdown("about.md"))) ) ) ) ) )
/ui.R
no_license
KristenZhao/Philadelphia-Bike-Count
R
false
false
3,268
r
# ------ library(shiny) library(shinydashboard) library(leaflet) library(shinythemes) library(shinytoastr) library(plotly) #theme = shinythemes::shinytheme("slate") dashboardPage( skin = 'purple', dashboardHeader(title = "Philadelphia Bike Count", titleWidth = 250), dashboardSidebar( sidebarMenu( menuItem("Map of Philadelphia", tabName = "map", icon = icon("map")), menuItem("Graphs & Metrics", tabName = "graphs", icon = icon("signal", lib = "glyphicon")), menuItem("About", tabName = "about", icon = icon("question-circle")), menuItem("Source Code", href = "https://github.com/KristenZhao/Philadelphia-Bike-Count", icon = icon("github-alt")) ) ), dashboardBody( tabItems( tabItem(tabName = "map", fluidRow( column(width = 8, box(width = NULL, leafletOutput("bike_count_map", height = 500)) ), column(width = 3, box(width = NULL, dateRangeInput("date1", "Select dates to visualize.", start = min(bike_philly$UPDATED), end = max(bike_philly$UPDATED), min = min(bike_philly$UPDATED), max = max(bike_philly$UPDATED)) ), box(width = NULL, h3('Bike Counts'), h4(textOutput("total_count"))), box(width = NULL, h3("Most Popular Biking Area"), h4(textOutput("popular_area"))), box(width = NULL, h3("Most Popular Biking Municipality"), h4(textOutput("muni"))) ) ) ), tabItem(tabName = "graphs", fluidRow( column(width = 12, box(width = NULL, plotlyOutput("count_by_muni"))) # column(width = 6, # box(width = NULL, # plotOutput("count_by_muni_per_dir"))) ), fluidRow( column(width = 6, box(width = NULL, dateRangeInput("date2", "Select dates to visualize.", start = min(bike_philly$UPDATED), end = max(bike_philly$UPDATED), min = min(bike_philly$UPDATED), max = max(bike_philly$UPDATED)) ) ), column(width = 6, box(width = NULL, selectInput("CNTDIR", "Which direction are you looking at?", #'county' is an ID, it is to help you keep organized. can be any other names choices = c('all','both','east','north','south','west')) ) ) )), tabItem(tabName = "about", fluidRow( column(width = 12, box(width = NULL, includeMarkdown("about.md"))) ) ) ) ) )
mtemp <- read.csv("mt2master.csv") attach(mtemp) #attempt with glm glm.out = glm(col_diam_mm ~ dpi, family=binomial(link="logit"), data=mtemp) #using grofit library(grofit) growth <- gcFitSpline(hpi_corr,col_diam_mm_corr) print(summary(growth)) plot(growth) grtable<-NULL temp<-unique(temp_C) is<-levels(isolate) for (i in seq_along(is)){ for (t in 1:6){ growth <- gcFitSpline(mtemp$hpi_corr[mtemp$isolate==is[[i]]&mtemp$temp_C==temp[[t]]], mtemp$col_diam_mm_corr[mtemp$isolate==is[[i]]&mtemp$temp_C==temp[[t]]]) print(summary(growth)[1]) plot(growth) grtable_i=data.frame(cbind(is[[i]], temp[[t]], summary(growth)[1])) grtable=rbind(grtable, grtable_i) } } print(grtable) class(grtable) colnames(grtable)=c("isolate", "temp", "grate") print(grtable) lmgr=lm(grtable$grate~grtable$temp+grtable$isolate) summary(lmgr) aov=aov(grtable$grate~as.factor(grtable$temp)) summary(aov) TukeyHSD(aov) box=boxplot(grtable$grate~as.factor(grtable$temp)) boxis=boxplot(grtable$grate~grtable$temp+grtable$isolate) #test if formula is working growth=gcFitSpline(mtemp$dpi[mtemp$isolate=="Dm13"&mtemp$temp_C==15], mtemp$col_diam_mm[mtemp$isolate=="Dm13"&mtemp$temp_C==15]) print(summary(growth)) plot(growth) #calculate growth rate for each rep mtemp <- read.csv("mt2master.csv") attach(mtemp) library(grofit) growth <- gcFitSpline(hpi,col_diam_mm_corr) print(summary(growth)) plot(growth) grtable<-NULL temp<-unique(temp_C) is<-levels(isolate) rep<-unique(replicate) for (i in seq_along(is)){ for (t in 1:2){ for (r in 1:3){ growth <- gcFitSpline(mtemp$hpi_corr[mtemp$isolate==is[[i]]&mtemp$temp_C==temp[[t]]&mtemp$replicate==rep[[r]]], mtemp$col_diam_mm_corr[mtemp$isolate==is[[i]]&mtemp$temp_C==temp[[t]]&mtemp$replicate==rep[[r]]]) print(summary(growth)[1]) plot(growth) grtable_i=data.frame(cbind(is[[i]],temp[[t]],rep[[r]],summary(growth)[1])) grtable=rbind(grtable, grtable_i) } } } print(grtable) class(grtable) colnames(grtable)=c("isolate", "temp", "rep", "grate") print(grtable) #lm lmgr=lm(grtable$grate~grtable$temp+grtable$isolate) summary(lmgr) aovt=aov(grtable$grate~as.factor(grtable$temp)) summary(aovt) TukeyHSD(aovt) aovi=aov(grtable$grate~as.factor(grtable$isolate)) summary(aovi) TukeyHSD(aovi) box=boxplot(grtable$grate~as.factor(grtable$temp)) boxis=boxplot(grtable$grate~grtable$temp+grtable$isolate) boxt=boxplot(grtable$grate~grtable$isolate) #boxplot grate~isolate each temp tempgr=unique(temp) for (t in 1:6){ boxt=boxplot(grtable$grate[grtable$temp==tempgr[[t]]]~grtable$isolate[grtable$temp==tempgr[[t]]]) } #grate~isolate each temp tempgr=unique(temp) for (t in 1:6){ lmgrt=lm(grtable$grate[grtable$temp==tempgr[[t]]]~grtable$isolate[grtable$temp==tempgr[[t]]]) print(summary(lmgrt)) plot(lmgrt) boxplot(lmgrt) }
/growthratemt2.R
no_license
vivianaortizl/Macrophomina_temp_R
R
false
false
2,770
r
mtemp <- read.csv("mt2master.csv") attach(mtemp) #attempt with glm glm.out = glm(col_diam_mm ~ dpi, family=binomial(link="logit"), data=mtemp) #using grofit library(grofit) growth <- gcFitSpline(hpi_corr,col_diam_mm_corr) print(summary(growth)) plot(growth) grtable<-NULL temp<-unique(temp_C) is<-levels(isolate) for (i in seq_along(is)){ for (t in 1:6){ growth <- gcFitSpline(mtemp$hpi_corr[mtemp$isolate==is[[i]]&mtemp$temp_C==temp[[t]]], mtemp$col_diam_mm_corr[mtemp$isolate==is[[i]]&mtemp$temp_C==temp[[t]]]) print(summary(growth)[1]) plot(growth) grtable_i=data.frame(cbind(is[[i]], temp[[t]], summary(growth)[1])) grtable=rbind(grtable, grtable_i) } } print(grtable) class(grtable) colnames(grtable)=c("isolate", "temp", "grate") print(grtable) lmgr=lm(grtable$grate~grtable$temp+grtable$isolate) summary(lmgr) aov=aov(grtable$grate~as.factor(grtable$temp)) summary(aov) TukeyHSD(aov) box=boxplot(grtable$grate~as.factor(grtable$temp)) boxis=boxplot(grtable$grate~grtable$temp+grtable$isolate) #test if formula is working growth=gcFitSpline(mtemp$dpi[mtemp$isolate=="Dm13"&mtemp$temp_C==15], mtemp$col_diam_mm[mtemp$isolate=="Dm13"&mtemp$temp_C==15]) print(summary(growth)) plot(growth) #calculate growth rate for each rep mtemp <- read.csv("mt2master.csv") attach(mtemp) library(grofit) growth <- gcFitSpline(hpi,col_diam_mm_corr) print(summary(growth)) plot(growth) grtable<-NULL temp<-unique(temp_C) is<-levels(isolate) rep<-unique(replicate) for (i in seq_along(is)){ for (t in 1:2){ for (r in 1:3){ growth <- gcFitSpline(mtemp$hpi_corr[mtemp$isolate==is[[i]]&mtemp$temp_C==temp[[t]]&mtemp$replicate==rep[[r]]], mtemp$col_diam_mm_corr[mtemp$isolate==is[[i]]&mtemp$temp_C==temp[[t]]&mtemp$replicate==rep[[r]]]) print(summary(growth)[1]) plot(growth) grtable_i=data.frame(cbind(is[[i]],temp[[t]],rep[[r]],summary(growth)[1])) grtable=rbind(grtable, grtable_i) } } } print(grtable) class(grtable) colnames(grtable)=c("isolate", "temp", "rep", "grate") print(grtable) #lm lmgr=lm(grtable$grate~grtable$temp+grtable$isolate) summary(lmgr) aovt=aov(grtable$grate~as.factor(grtable$temp)) summary(aovt) TukeyHSD(aovt) aovi=aov(grtable$grate~as.factor(grtable$isolate)) summary(aovi) TukeyHSD(aovi) box=boxplot(grtable$grate~as.factor(grtable$temp)) boxis=boxplot(grtable$grate~grtable$temp+grtable$isolate) boxt=boxplot(grtable$grate~grtable$isolate) #boxplot grate~isolate each temp tempgr=unique(temp) for (t in 1:6){ boxt=boxplot(grtable$grate[grtable$temp==tempgr[[t]]]~grtable$isolate[grtable$temp==tempgr[[t]]]) } #grate~isolate each temp tempgr=unique(temp) for (t in 1:6){ lmgrt=lm(grtable$grate[grtable$temp==tempgr[[t]]]~grtable$isolate[grtable$temp==tempgr[[t]]]) print(summary(lmgrt)) plot(lmgrt) boxplot(lmgrt) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/stanModels-data.R \docType{data} \name{stan.models} \alias{stan.models} \title{stan.models} \format{An object of class \code{"data.frame"}} \usage{ stan.models } \description{ data.frame containing the structure of the github repository https://github.com/stan-dev/example-models that contains examples to run STAN models in R from the book by Gelman and Hill 'Data Analysis Using Regression Analysis and Multilevel/Hierarchical Models'. } \examples{ data(stan.models) stan.models } \keyword{datasets}
/man/stan.models.Rd
no_license
orami01/d3Tree
R
false
true
581
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/stanModels-data.R \docType{data} \name{stan.models} \alias{stan.models} \title{stan.models} \format{An object of class \code{"data.frame"}} \usage{ stan.models } \description{ data.frame containing the structure of the github repository https://github.com/stan-dev/example-models that contains examples to run STAN models in R from the book by Gelman and Hill 'Data Analysis Using Regression Analysis and Multilevel/Hierarchical Models'. } \examples{ data(stan.models) stan.models } \keyword{datasets}
library(rstan) # source('config_HMC') dvers="v0.1" mvers="v0.1" site = "GILLBROOK" fname_data = paste0('tree_data_GILLBROOK_STAN_', dvers) fname_model = "ring_model_t_pdbh_sigd_STAN" load(paste0('output/ring_model_t_pdbh_HMC_NOCOVAR_v3.0.Rdata')) col_names = sapply(strsplit(colnames(out), '\\['), function(x) x[[1]]) sig_d_obs = mean(out[,which(col_names=="sig_d_obs")]) dat = readRDS(paste0('data/dump/', fname_data, '.RDS')) dat$sig_d_obs = sig_d_obs ####################################################################################################################################### # full model but with zero covariance; not efficient ####################################################################################################################################### compiled <- stan_model(file = paste0('models/stan/ring_model_t_pdbh_sigd_STAN.stan')) fit <- sampling(compiled, data = dat, iter = 5000, chains = 1, verbose=TRUE) rm(compiled) post=rstan::extract(fit) rm(fit) save(post, file = paste0('sites/', site, '/output/', fname_model, '_', site, '_', mvers, '.Rdata'))
/r/run_rw_model_GILLBROOK_STAN.R
no_license
andydawson/npp-stat-model
R
false
false
1,166
r
library(rstan) # source('config_HMC') dvers="v0.1" mvers="v0.1" site = "GILLBROOK" fname_data = paste0('tree_data_GILLBROOK_STAN_', dvers) fname_model = "ring_model_t_pdbh_sigd_STAN" load(paste0('output/ring_model_t_pdbh_HMC_NOCOVAR_v3.0.Rdata')) col_names = sapply(strsplit(colnames(out), '\\['), function(x) x[[1]]) sig_d_obs = mean(out[,which(col_names=="sig_d_obs")]) dat = readRDS(paste0('data/dump/', fname_data, '.RDS')) dat$sig_d_obs = sig_d_obs ####################################################################################################################################### # full model but with zero covariance; not efficient ####################################################################################################################################### compiled <- stan_model(file = paste0('models/stan/ring_model_t_pdbh_sigd_STAN.stan')) fit <- sampling(compiled, data = dat, iter = 5000, chains = 1, verbose=TRUE) rm(compiled) post=rstan::extract(fit) rm(fit) save(post, file = paste0('sites/', site, '/output/', fname_model, '_', site, '_', mvers, '.Rdata'))
\name{simulate.binary.dated.tree} \alias{simulate.binary.dated.tree} \title{Simulate a binary dated tree using a coalescent model and given a demographic history} \usage{ simulate.binary.dated.tree(births, deaths, nonDemeDynamics, t0, x0, sampleTimes, sampleStates, migrations=NA, parms=NA, fgyResolution = 2000, integrationMethod = 'rk4') } \arguments{ \item{births}{ A vector or matrix of strings. These are evaluated as equations for the number of births within and between demes. Must have rownames and colnames corresponding to the names of demes.} \item{deaths}{ A vector of strings. These are evaluated as equations for the rate that lineages in each deme are terminated. Must have rownames corresponding to the names of demes.} \item{nonDemeDynamics}{ A vector of strings. These are evaluated as equations for the rate of change of state variables that do not correspond to demes. Vector must have names of state variables. } \item{t0}{ The time of origin of the process. Should be before root of genealogy. } \item{x0}{ A vector of initial conditions for the demographic process (the state of the system at time t0). Should include the name and value of all variables mentioned in births and nonDemeDynamics. } \item{sampleTimes}{A vector of times for each sample. The names attribute will match tip.label of the generated tree. } \item{sampleStates}{A matrix of states for each sample. There is one row for each sample, and row names should match the names of sampleTimes. Each column gives the probability that the sample is in the deme 1..m at the time of sampling. } \item{migrations}{ A vector or matrix of strings. These are evaluated as equations for the number of migrations between demes. Must have rownames and colnames corresponding to the names of demes. Should be omitted if there is only one deme.} \item{parms}{A list of parameters that will be accessible to differential equations specified in births, migrations and nonDemeDynamics.} \item{fgyResolution}{Determines timestep of ODEs (larger values gives smaller time step)} \item{integrationMethod}{Passed to lsoda. Higher precision may be obtained with methods such as \emph{adams} at some computational cost. } } \value{ A binaryDatedTree, extends ape::phylo and includes heights of all nodes, time before most recent sample when node occurs. } \references{ E. M. Volz, Complex population dynamics and the coalescent under neutrality, Genetics, January, 2012 } \author{ Erik M Volz }
/rcolgem/pkg/man/simulate.binary.dated.tree.Rd
permissive
ArtPoon/kamphir
R
false
false
2,473
rd
\name{simulate.binary.dated.tree} \alias{simulate.binary.dated.tree} \title{Simulate a binary dated tree using a coalescent model and given a demographic history} \usage{ simulate.binary.dated.tree(births, deaths, nonDemeDynamics, t0, x0, sampleTimes, sampleStates, migrations=NA, parms=NA, fgyResolution = 2000, integrationMethod = 'rk4') } \arguments{ \item{births}{ A vector or matrix of strings. These are evaluated as equations for the number of births within and between demes. Must have rownames and colnames corresponding to the names of demes.} \item{deaths}{ A vector of strings. These are evaluated as equations for the rate that lineages in each deme are terminated. Must have rownames corresponding to the names of demes.} \item{nonDemeDynamics}{ A vector of strings. These are evaluated as equations for the rate of change of state variables that do not correspond to demes. Vector must have names of state variables. } \item{t0}{ The time of origin of the process. Should be before root of genealogy. } \item{x0}{ A vector of initial conditions for the demographic process (the state of the system at time t0). Should include the name and value of all variables mentioned in births and nonDemeDynamics. } \item{sampleTimes}{A vector of times for each sample. The names attribute will match tip.label of the generated tree. } \item{sampleStates}{A matrix of states for each sample. There is one row for each sample, and row names should match the names of sampleTimes. Each column gives the probability that the sample is in the deme 1..m at the time of sampling. } \item{migrations}{ A vector or matrix of strings. These are evaluated as equations for the number of migrations between demes. Must have rownames and colnames corresponding to the names of demes. Should be omitted if there is only one deme.} \item{parms}{A list of parameters that will be accessible to differential equations specified in births, migrations and nonDemeDynamics.} \item{fgyResolution}{Determines timestep of ODEs (larger values gives smaller time step)} \item{integrationMethod}{Passed to lsoda. Higher precision may be obtained with methods such as \emph{adams} at some computational cost. } } \value{ A binaryDatedTree, extends ape::phylo and includes heights of all nodes, time before most recent sample when node occurs. } \references{ E. M. Volz, Complex population dynamics and the coalescent under neutrality, Genetics, January, 2012 } \author{ Erik M Volz }
#' Variable Clustering with Multiple Latent Components Clustering algorithm #' #' Package varclust performs clustering of variables, according to a #' probabilistic model, which assumes that each cluster lies in a low #' dimensional subspace. Segmentation of variables, number of clusters and their #' dimensions are selected based on the appropriate implementation of the #' Bayesian Information Criterion. #' #' The best candidate models are identified by the specific implementation of #' K-means algorithm, in which cluster centers are represented by some number of #' orthogonal factors(principal components of the variables within a cluster) #' and similarity between a given variable and a cluster center depends on #' residuals from a linear model fit. Based on the Bayesian Information #' Criterion (BIC), sums of squares of residuals are appropriately scaled, which #' allows to avoid an over-excessive attraction by clusters with larger #' dimensions. To reduce the chance that the local minimum of modified BIC #' (mBIC) is obtained instead of the global one, for every fixed number of #' clusters in a given range K-means algorithm is run large number of times, #' with different random initializations of cluster centers. #' #' The main function of package \pkg{varclust} is \code{\link{mlcc.bic}} which #' allows clustering variables in a data with unknown number of clusters. #' Variable partition is computed with k-means based algorithm. Number of #' clusters and their dimensions are estimated using mBIC and PESEL #' respectively. If the number of clusters is known one might use function #' \code{\link{mlcc.reps}}, which takes number of clusters as a parameter. For #' \code{\link{mlcc.reps}} one might specify as well some initial segmentation #' for k-means algorithm. This can be useful if user has some a priori knowledge #' about clustering. #' #' We provide also two functions to simulate datasets with described structure. #' The function \code{\link{data.simulation}} generates the data so that the #' subspaces are indepentend and \code{\link{data.simulation.factors}} generates #' the data where some factores are shared between the subspaces. #' #' We also provide function measures of quality of clustering. #' \code{\link{misclassification}} computes misclassification rate between two #' partitions. This performance measure is extensively used in image #' segmentation. The other measure is implemented as \code{\link{integration}} #' function. #' #' @docType package #' @name varclust #' @details Version: 0.9.4 #' @importFrom RcppEigen fastLmPure #' @importFrom doParallel registerDoParallel #' @importFrom parallel makeCluster #' @importFrom parallel stopCluster #' @importFrom parallel detectCores #' @importFrom pesel pesel #' @importFrom graphics axis plot plot.default points #' @importFrom stats cov dnorm pnorm prcomp rnorm runif var #' @importFrom utils str #' @import doRNG #' @import foreach #' @author Piotr Sobczyk, Stanislaw Wilczynski, Julie Josse, Malgorzata Bogdan #' #' Maintainer: Piotr Sobczyk \email{pj.sobczyk@@gmail.com} #' #' @examples #' \donttest{ #' sim.data <- data.simulation(n = 50, SNR = 1, K = 3, numb.vars = 50, max.dim = 3) #' mlcc.bic(sim.data$X, numb.clusters = 1:5, numb.runs = 20, numb.cores = 1, verbose = TRUE) #' mlcc.reps(sim.data$X, numb.clusters = 3, numb.runs = 20, numb.cores = 1)} NULL
/R/varclust.R
no_license
cran/varclust
R
false
false
3,408
r
#' Variable Clustering with Multiple Latent Components Clustering algorithm #' #' Package varclust performs clustering of variables, according to a #' probabilistic model, which assumes that each cluster lies in a low #' dimensional subspace. Segmentation of variables, number of clusters and their #' dimensions are selected based on the appropriate implementation of the #' Bayesian Information Criterion. #' #' The best candidate models are identified by the specific implementation of #' K-means algorithm, in which cluster centers are represented by some number of #' orthogonal factors(principal components of the variables within a cluster) #' and similarity between a given variable and a cluster center depends on #' residuals from a linear model fit. Based on the Bayesian Information #' Criterion (BIC), sums of squares of residuals are appropriately scaled, which #' allows to avoid an over-excessive attraction by clusters with larger #' dimensions. To reduce the chance that the local minimum of modified BIC #' (mBIC) is obtained instead of the global one, for every fixed number of #' clusters in a given range K-means algorithm is run large number of times, #' with different random initializations of cluster centers. #' #' The main function of package \pkg{varclust} is \code{\link{mlcc.bic}} which #' allows clustering variables in a data with unknown number of clusters. #' Variable partition is computed with k-means based algorithm. Number of #' clusters and their dimensions are estimated using mBIC and PESEL #' respectively. If the number of clusters is known one might use function #' \code{\link{mlcc.reps}}, which takes number of clusters as a parameter. For #' \code{\link{mlcc.reps}} one might specify as well some initial segmentation #' for k-means algorithm. This can be useful if user has some a priori knowledge #' about clustering. #' #' We provide also two functions to simulate datasets with described structure. #' The function \code{\link{data.simulation}} generates the data so that the #' subspaces are indepentend and \code{\link{data.simulation.factors}} generates #' the data where some factores are shared between the subspaces. #' #' We also provide function measures of quality of clustering. #' \code{\link{misclassification}} computes misclassification rate between two #' partitions. This performance measure is extensively used in image #' segmentation. The other measure is implemented as \code{\link{integration}} #' function. #' #' @docType package #' @name varclust #' @details Version: 0.9.4 #' @importFrom RcppEigen fastLmPure #' @importFrom doParallel registerDoParallel #' @importFrom parallel makeCluster #' @importFrom parallel stopCluster #' @importFrom parallel detectCores #' @importFrom pesel pesel #' @importFrom graphics axis plot plot.default points #' @importFrom stats cov dnorm pnorm prcomp rnorm runif var #' @importFrom utils str #' @import doRNG #' @import foreach #' @author Piotr Sobczyk, Stanislaw Wilczynski, Julie Josse, Malgorzata Bogdan #' #' Maintainer: Piotr Sobczyk \email{pj.sobczyk@@gmail.com} #' #' @examples #' \donttest{ #' sim.data <- data.simulation(n = 50, SNR = 1, K = 3, numb.vars = 50, max.dim = 3) #' mlcc.bic(sim.data$X, numb.clusters = 1:5, numb.runs = 20, numb.cores = 1, verbose = TRUE) #' mlcc.reps(sim.data$X, numb.clusters = 3, numb.runs = 20, numb.cores = 1)} NULL
#' To plot the popularity of names in times #' #' @param Name name from input #' @param Sex sex from input #' @import prenoms assertthat dygraphs dplyr #' @return the plot #' @export #' @examples #' draw_a_name("Vincent","M") #' draw_a_name <- function(Name,Sex){ assert_that(is.character(Name)) assert_that(Sex == "F" | Sex == "M") prenoms::prenoms %>% filter(name == Name, sex == Sex) %>% group_by(year) %>% summarize(total=sum(n)) %>% dygraph() }
/R/draw_a_name.R
no_license
Lavin9/Package
R
false
false
473
r
#' To plot the popularity of names in times #' #' @param Name name from input #' @param Sex sex from input #' @import prenoms assertthat dygraphs dplyr #' @return the plot #' @export #' @examples #' draw_a_name("Vincent","M") #' draw_a_name <- function(Name,Sex){ assert_that(is.character(Name)) assert_that(Sex == "F" | Sex == "M") prenoms::prenoms %>% filter(name == Name, sex == Sex) %>% group_by(year) %>% summarize(total=sum(n)) %>% dygraph() }
par(mar=c(0.5, 0.5, 0.5, 0.5)) par(oma=c(0.5, 0.5, 0.5, 0.5)) pchart_data = read.csv(file="freqs.csv",head=TRUE,sep=",") head(pchart_data) lines <- scan('test.csv',what="character",sep="\n",skip=1) lines=gsub("[ ]+$","",gsub("[ ]+"," ",lines)) # remove trailing and multiple spaces. adjlist=strsplit(lines,",") # splits the character strings into list with different vector for each line col1=unlist(lapply(adjlist,function(x) rep(x[1],length(x)-1))) # establish first column of edgelist by replicating the 1st element (=ID number) by the length of the line minus 1 (itself) col2=unlist(lapply(adjlist,"[",-1)) # the second line I actually don't fully understand this command, but it takes the rest of the ID numbers in the character string and transposes it to list vertically el=cbind(col1,col2) # creates the edgelist by combining column 1 and 2. bsk.network<-graph.edgelist( el ) # We can also color the connecting edges differently depending on the 'grade': E(bsk.network)$color<-ifelse(E(bsk.network)$grade<=50, "red", "grey") # or depending on the different specialization ('spec'): E(bsk.network)$color<-ifelse(E(bsk.network)$spec=='X', "red", ifelse(E(bsk.network)$spec=='Y', "blue", "grey")) V(bsk.network)$size<-degree(bsk.network)#here the size of the vertices is specified by the degree of the vertex, so that people supervising more have get proportionally bigger dots. Getting the right scale gets some playing around with the parameters of the scale function (from the 'base' package) graphs <- decompose.graph(bsk.network) operator <- ">" gsize <- "6" if (operator == "=") { strong_connected <- which(sapply(graphs, vcount) == strtoi(gsize, base = 0L) ) } if (operator == ">") { strong_connected <- which(sapply(graphs, vcount) >= strtoi(gsize, base = 0L) ) } if (operator == "<") { strong_connected <- which(sapply(graphs, vcount) <= strtoi(gsize, base = 0L) ) } strong_connected #Layout matrix_layout = matrix(c(1,2,3,4,5),1, 5,byrow=TRUE) max <- 5 for(i in seq(1:ceiling(length(strong_connected)/5)) ) { seq_i <- seq(max+1,max+5) print(seq_i) matrix_layout <- rbind(matrix_layout, seq_i) max <- max+5 print(max) } df<-layout(matrix_layout, respect=TRUE) layout.show(df) for(g in strong_connected ) { cnt <- 0.0 slices <- seq(1: dim(pchart_data)[1] ) for(l in seq(1:dim(pchart_data)[1])) { frame=pchart_data[l,V(graphs[[g]])$name] cnt = sum(frame[1,]) #bv slices[l] <- cnt } #print(paste(names(frame), collapse = '\n')) degr = names(sort(degree(graphs[[g]]))) lbls <- c("bacterial vaginosis", "yeast infection", "preterm birth","other") tslices <- slices[slices != 0] tlbls <- lbls[ slices != 0 ] print(tslices) print(tlbls) pie(tslices, labels = tlbls, cex.main=1.3,cex=1.2,main=tail(degr,1),radius=0.5,col=rainbow(length(lbls))) }
/pchart_test.R
no_license
izhbannikov/CoPubNet
R
false
false
2,823
r
par(mar=c(0.5, 0.5, 0.5, 0.5)) par(oma=c(0.5, 0.5, 0.5, 0.5)) pchart_data = read.csv(file="freqs.csv",head=TRUE,sep=",") head(pchart_data) lines <- scan('test.csv',what="character",sep="\n",skip=1) lines=gsub("[ ]+$","",gsub("[ ]+"," ",lines)) # remove trailing and multiple spaces. adjlist=strsplit(lines,",") # splits the character strings into list with different vector for each line col1=unlist(lapply(adjlist,function(x) rep(x[1],length(x)-1))) # establish first column of edgelist by replicating the 1st element (=ID number) by the length of the line minus 1 (itself) col2=unlist(lapply(adjlist,"[",-1)) # the second line I actually don't fully understand this command, but it takes the rest of the ID numbers in the character string and transposes it to list vertically el=cbind(col1,col2) # creates the edgelist by combining column 1 and 2. bsk.network<-graph.edgelist( el ) # We can also color the connecting edges differently depending on the 'grade': E(bsk.network)$color<-ifelse(E(bsk.network)$grade<=50, "red", "grey") # or depending on the different specialization ('spec'): E(bsk.network)$color<-ifelse(E(bsk.network)$spec=='X', "red", ifelse(E(bsk.network)$spec=='Y', "blue", "grey")) V(bsk.network)$size<-degree(bsk.network)#here the size of the vertices is specified by the degree of the vertex, so that people supervising more have get proportionally bigger dots. Getting the right scale gets some playing around with the parameters of the scale function (from the 'base' package) graphs <- decompose.graph(bsk.network) operator <- ">" gsize <- "6" if (operator == "=") { strong_connected <- which(sapply(graphs, vcount) == strtoi(gsize, base = 0L) ) } if (operator == ">") { strong_connected <- which(sapply(graphs, vcount) >= strtoi(gsize, base = 0L) ) } if (operator == "<") { strong_connected <- which(sapply(graphs, vcount) <= strtoi(gsize, base = 0L) ) } strong_connected #Layout matrix_layout = matrix(c(1,2,3,4,5),1, 5,byrow=TRUE) max <- 5 for(i in seq(1:ceiling(length(strong_connected)/5)) ) { seq_i <- seq(max+1,max+5) print(seq_i) matrix_layout <- rbind(matrix_layout, seq_i) max <- max+5 print(max) } df<-layout(matrix_layout, respect=TRUE) layout.show(df) for(g in strong_connected ) { cnt <- 0.0 slices <- seq(1: dim(pchart_data)[1] ) for(l in seq(1:dim(pchart_data)[1])) { frame=pchart_data[l,V(graphs[[g]])$name] cnt = sum(frame[1,]) #bv slices[l] <- cnt } #print(paste(names(frame), collapse = '\n')) degr = names(sort(degree(graphs[[g]]))) lbls <- c("bacterial vaginosis", "yeast infection", "preterm birth","other") tslices <- slices[slices != 0] tlbls <- lbls[ slices != 0 ] print(tslices) print(tlbls) pie(tslices, labels = tlbls, cex.main=1.3,cex=1.2,main=tail(degr,1),radius=0.5,col=rainbow(length(lbls))) }
## functions for Chromatograms class .validChromatograms <- function(x) { msg <- character() ## All elements have to be of type Chromatogram if (length(x)) { res <- vapply(x, FUN = is, FUN.VALUE = logical(1L), "Chromatogram") if (!all(res)) msg <- c(msg, paste0("All elements have to be of type ", "'Chromatogram'.")) ## Shall we also ensure that fromFile in each column is the same? } if (nrow(x@phenoData) != ncol(x)) msg <- c(msg, paste0("nrow of phenoData has to match ncol ", "of the Chromatograms object")) ## Check colnames .Data with rownames phenoData. if (any(colnames(x) != rownames(x@phenoData))) msg <- c(msg, paste0("colnames of object has to match rownames of", " phenoData")) if (length(msg)) msg else TRUE } #' @description \code{Chromatograms}: create an instance of class #' \code{Chromatograms}. #' #' @param data A \code{list} of \code{\link{Chromatogram}} objects. #' #' @param phenoData either a \code{data.frame}, \code{AnnotatedDataFrame} or #' \code{NAnnotatedDataFrame} describing the phenotypical information of the #' samples. #' #' @param ... Additional parameters to be passed to the #' \code{\link[base]{matrix}} constructor, such as \code{nrow}, \code{ncol} #' and \code{byrow}. #' #' @rdname Chromatograms-class Chromatograms <- function(data, phenoData, ...) { if (missing(data)) return(new("Chromatograms")) datmat <- matrix(data, ...) if (missing(phenoData)) phenoData <- annotatedDataFrameFrom(datmat, byrow = FALSE) if (ncol(datmat) != nrow(phenoData)) stop("Dimensions of the data matrix and the phenoData do not match") ## If colnames of datmat are NULL, use the rownames of phenoData if (is.null(colnames(datmat))) colnames(datmat) <- rownames(phenoData) ## Convert phenoData... if (is(phenoData, "data.frame")) phenoData <- AnnotatedDataFrame(phenoData) if (is(phenoData, "AnnotatedDataFrame")) phenoData <- as(phenoData, "NAnnotatedDataFrame") res <- new("Chromatograms", .Data = datmat, phenoData = phenoData) if (validObject(res)) res } #' @description Plot the data from a list of Chromatogram objects (all #' representing the same MS data slice across multiple files) into the #' same plot. #' #' @note We are using the matplot here, since that is much faster than lapply #' on the individual chromatogram objects. #' #' @author Johannes Rainer #' #' @noRd .plotChromatogramList <- function(x, col = "#00000060", lty = 1, type = "l", xlab = "retention time", ylab = "intensity", main = NULL, ...) { if (!is.list(x) & !all(vapply(x, FUN = is, FUN.VALUE = logical(1L), "Chromatogram"))) stop("'x' has to be a list of Chromatogram objects") ## Check col, lty and type parameters if (length(col) != length(x)) col <- rep(col[1], length(x)) if (length(lty) != length(x)) lty <- rep(lty[1], length(x)) if (length(type) != length(x)) type <- rep(type, length(x)) if (is.null(main)) { suppressWarnings( mzr <- range(lapply(x, mz), na.rm = TRUE, finite = TRUE) ) main <- paste0(format(mzr, digits = 7), collapse = " - ") } ## Number of measurements we've got per chromatogram. This can be different ## between samples, from none (if not a single measurement in the rt/mz) ## to the number of data points that were actually measured. lens <- lengths(x) maxLens <- max(lens) ints <- rts <- matrix(NA_real_, nrow = maxLens, ncol = length(x)) for (i in seq(along = x)) { if (lens[i]) { rows <- seq_len(lens[i]) rts[rows, i] <- rtime(x[[i]]) ints[rows, i] <- intensity(x[[i]]) } } ## Identify columns/samples that have only NAs in the intensity matrix. ## Such columns represent samples for which no valid intensity was measured ## in the respective mz slice (these would still have valid retention time ## values), or samples that don't have a single scan in the respective rt ## range. keep <- colSums(!is.na(ints)) > 0 ## Finally plot the data. if (any(keep)) { matplot(x = rts[, keep, drop = FALSE], y = ints[, keep, drop = FALSE], type = type[keep], lty = lty[keep], col = col[keep], xlab = xlab, ylab = ylab, main = main, ...) } else { warning("Chromatograms empty") plot(3, 3, pch = NA, xlab = xlab, ylab = ylab, main = main) text(3, 3, labels = "Empty Chromatograms", col = "red") } }
/R/functions-Chromatograms.R
no_license
arnesmits/MSnbase
R
false
false
4,862
r
## functions for Chromatograms class .validChromatograms <- function(x) { msg <- character() ## All elements have to be of type Chromatogram if (length(x)) { res <- vapply(x, FUN = is, FUN.VALUE = logical(1L), "Chromatogram") if (!all(res)) msg <- c(msg, paste0("All elements have to be of type ", "'Chromatogram'.")) ## Shall we also ensure that fromFile in each column is the same? } if (nrow(x@phenoData) != ncol(x)) msg <- c(msg, paste0("nrow of phenoData has to match ncol ", "of the Chromatograms object")) ## Check colnames .Data with rownames phenoData. if (any(colnames(x) != rownames(x@phenoData))) msg <- c(msg, paste0("colnames of object has to match rownames of", " phenoData")) if (length(msg)) msg else TRUE } #' @description \code{Chromatograms}: create an instance of class #' \code{Chromatograms}. #' #' @param data A \code{list} of \code{\link{Chromatogram}} objects. #' #' @param phenoData either a \code{data.frame}, \code{AnnotatedDataFrame} or #' \code{NAnnotatedDataFrame} describing the phenotypical information of the #' samples. #' #' @param ... Additional parameters to be passed to the #' \code{\link[base]{matrix}} constructor, such as \code{nrow}, \code{ncol} #' and \code{byrow}. #' #' @rdname Chromatograms-class Chromatograms <- function(data, phenoData, ...) { if (missing(data)) return(new("Chromatograms")) datmat <- matrix(data, ...) if (missing(phenoData)) phenoData <- annotatedDataFrameFrom(datmat, byrow = FALSE) if (ncol(datmat) != nrow(phenoData)) stop("Dimensions of the data matrix and the phenoData do not match") ## If colnames of datmat are NULL, use the rownames of phenoData if (is.null(colnames(datmat))) colnames(datmat) <- rownames(phenoData) ## Convert phenoData... if (is(phenoData, "data.frame")) phenoData <- AnnotatedDataFrame(phenoData) if (is(phenoData, "AnnotatedDataFrame")) phenoData <- as(phenoData, "NAnnotatedDataFrame") res <- new("Chromatograms", .Data = datmat, phenoData = phenoData) if (validObject(res)) res } #' @description Plot the data from a list of Chromatogram objects (all #' representing the same MS data slice across multiple files) into the #' same plot. #' #' @note We are using the matplot here, since that is much faster than lapply #' on the individual chromatogram objects. #' #' @author Johannes Rainer #' #' @noRd .plotChromatogramList <- function(x, col = "#00000060", lty = 1, type = "l", xlab = "retention time", ylab = "intensity", main = NULL, ...) { if (!is.list(x) & !all(vapply(x, FUN = is, FUN.VALUE = logical(1L), "Chromatogram"))) stop("'x' has to be a list of Chromatogram objects") ## Check col, lty and type parameters if (length(col) != length(x)) col <- rep(col[1], length(x)) if (length(lty) != length(x)) lty <- rep(lty[1], length(x)) if (length(type) != length(x)) type <- rep(type, length(x)) if (is.null(main)) { suppressWarnings( mzr <- range(lapply(x, mz), na.rm = TRUE, finite = TRUE) ) main <- paste0(format(mzr, digits = 7), collapse = " - ") } ## Number of measurements we've got per chromatogram. This can be different ## between samples, from none (if not a single measurement in the rt/mz) ## to the number of data points that were actually measured. lens <- lengths(x) maxLens <- max(lens) ints <- rts <- matrix(NA_real_, nrow = maxLens, ncol = length(x)) for (i in seq(along = x)) { if (lens[i]) { rows <- seq_len(lens[i]) rts[rows, i] <- rtime(x[[i]]) ints[rows, i] <- intensity(x[[i]]) } } ## Identify columns/samples that have only NAs in the intensity matrix. ## Such columns represent samples for which no valid intensity was measured ## in the respective mz slice (these would still have valid retention time ## values), or samples that don't have a single scan in the respective rt ## range. keep <- colSums(!is.na(ints)) > 0 ## Finally plot the data. if (any(keep)) { matplot(x = rts[, keep, drop = FALSE], y = ints[, keep, drop = FALSE], type = type[keep], lty = lty[keep], col = col[keep], xlab = xlab, ylab = ylab, main = main, ...) } else { warning("Chromatograms empty") plot(3, 3, pch = NA, xlab = xlab, ylab = ylab, main = main) text(3, 3, labels = "Empty Chromatograms", col = "red") } }
#Nombre: Estefany Paredes ############################ #PRIMERA FORMA ############################ #Factorial_loop Factorial_loop<-function(x){ if(x<0){ stop("Factorials can only be computed when x is equal to, or greater than, zero") } if(x == 0){ return(1) } else{ y <- 1 for(i in 1:x){ y <- y * ((1:x)[i]) } return(y) } } ############################ #sEGUNDA FORMA ############################ #Factorial_reduce Factorial_reduce <- function(x){ if(x < 0){ stop("Factorials can only be computed when x is equal to, or greater than, zero") } # ensure purrr package is installed if (!require('purrr', quietly = TRUE)) { stop('Please install the purrr package') } if(x == 0){ return(1) } else{ reduce(as.numeric(1:x), `*`) %>% return() } } ############################ #TERCERA FORMA ############################ #Factorial_func Factorial_func <- function(x){ if(x < 0){ stop("Factorials can only be computed when x is equal to, or greater than, zero") } if (x == 0){ return (1) } else{ return (x * Factorial_func(x-1)) } } ############################ #CUARTA FORMA ############################ #Factorial_mem memoization <- function(){ values <- 1 Factorial_mem <- function(x){ if(x < 0){ stop("Factorials can only be computed when x is equal to, or greater than, zero") } if (x == 0 | x == 1){ return(1) } if (length(values) < x){ values <<- `length<-`(values, x) } if (!is.na(values[x])){ return(values[x]) } #calculate new values values[x] <<- x * factorial(x-1) values[x] } Factorial_mem } Factorial_mem <- memoization() # benchmarking these four functions library(microbenchmark) microbenchmark( Factorial_loop(5), Factorial_reduce(5), Factorial_func(5), Factorial_mem(5) ) microbenchmark( Factorial_loop(15), Factorial_reduce(15), Factorial_func(15), Factorial_mem(15) ) microbenchmark( Factorial_loop(50), Factorial_reduce(50), Factorial_func(50), Factorial_mem(50) )
/factorial_code.R
no_license
emparedes3/Functional-and-Object-Oriented-Programming
R
false
false
2,263
r
#Nombre: Estefany Paredes ############################ #PRIMERA FORMA ############################ #Factorial_loop Factorial_loop<-function(x){ if(x<0){ stop("Factorials can only be computed when x is equal to, or greater than, zero") } if(x == 0){ return(1) } else{ y <- 1 for(i in 1:x){ y <- y * ((1:x)[i]) } return(y) } } ############################ #sEGUNDA FORMA ############################ #Factorial_reduce Factorial_reduce <- function(x){ if(x < 0){ stop("Factorials can only be computed when x is equal to, or greater than, zero") } # ensure purrr package is installed if (!require('purrr', quietly = TRUE)) { stop('Please install the purrr package') } if(x == 0){ return(1) } else{ reduce(as.numeric(1:x), `*`) %>% return() } } ############################ #TERCERA FORMA ############################ #Factorial_func Factorial_func <- function(x){ if(x < 0){ stop("Factorials can only be computed when x is equal to, or greater than, zero") } if (x == 0){ return (1) } else{ return (x * Factorial_func(x-1)) } } ############################ #CUARTA FORMA ############################ #Factorial_mem memoization <- function(){ values <- 1 Factorial_mem <- function(x){ if(x < 0){ stop("Factorials can only be computed when x is equal to, or greater than, zero") } if (x == 0 | x == 1){ return(1) } if (length(values) < x){ values <<- `length<-`(values, x) } if (!is.na(values[x])){ return(values[x]) } #calculate new values values[x] <<- x * factorial(x-1) values[x] } Factorial_mem } Factorial_mem <- memoization() # benchmarking these four functions library(microbenchmark) microbenchmark( Factorial_loop(5), Factorial_reduce(5), Factorial_func(5), Factorial_mem(5) ) microbenchmark( Factorial_loop(15), Factorial_reduce(15), Factorial_func(15), Factorial_mem(15) ) microbenchmark( Factorial_loop(50), Factorial_reduce(50), Factorial_func(50), Factorial_mem(50) )
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/emails.R \docType{data} \name{emails} \alias{emails} \title{Hillary Rodham Clinton emails} \format{A data frame with 29444 rows and 9 variables: \describe{ \item{docID}{Primary key} \item{docDate}{Date when document was sent or received} \item{to}{Who the emails was sent to} \item{from}{Who the email is received from} \item{originalTo}{From whom the email originally comes from} \item{originalFrom}{To whom the email was originally sent to} \item{subject}{Subject of the email} \item{interesting}{Rating, relevancy of email} \item{not_interesting}{Rating, irrelevancy of email} }} \source{ \url{http://graphics.wsj.com/hillary-clinton-email-documents/api/search.php?subject=&to=&from=&start=&end=&sort=docDate&order=desc&docid=&limit=27159&offset=0} } \usage{ emails } \description{ A dataset containing 29444 emails from/to Hillary Rodham Clinton sent/received between 2009-08-14 and 2014-08-13. } \keyword{datasets}
/man/emails.Rd
no_license
Mithileysh/rodham
R
false
true
1,017
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/emails.R \docType{data} \name{emails} \alias{emails} \title{Hillary Rodham Clinton emails} \format{A data frame with 29444 rows and 9 variables: \describe{ \item{docID}{Primary key} \item{docDate}{Date when document was sent or received} \item{to}{Who the emails was sent to} \item{from}{Who the email is received from} \item{originalTo}{From whom the email originally comes from} \item{originalFrom}{To whom the email was originally sent to} \item{subject}{Subject of the email} \item{interesting}{Rating, relevancy of email} \item{not_interesting}{Rating, irrelevancy of email} }} \source{ \url{http://graphics.wsj.com/hillary-clinton-email-documents/api/search.php?subject=&to=&from=&start=&end=&sort=docDate&order=desc&docid=&limit=27159&offset=0} } \usage{ emails } \description{ A dataset containing 29444 emails from/to Hillary Rodham Clinton sent/received between 2009-08-14 and 2014-08-13. } \keyword{datasets}
library(testthat) simpf <- function(a = 1, b, d, ...) return(5) not_expected_error <- 'Function is called with arguments different from expected!' test_that('Returns the specified value if called with the exact arguments specified', { stub_of_simpf <- stub(simpf) stub_of_simpf$withExactArgs(a = 1, b = 2, d = 3, c = 4)$returns(10) stub_func <- stub_of_simpf$f expect_equal(stub_func(1, 2, 3, c = 4), 10) expect_null(stub_func(2, 2, 3)) expect_equal(stub_func(1, 2, 3, c = 4), 10) expect_equal(stub_func(1, 2, c = 4, 3), 10) }) test_that('Throws error with specified message if called with the exact arguments specified', { stub_of_simpf <- stub(simpf) err_msg <- 'error is good' stub_of_simpf$withExactArgs(a = 1, b = 2, d = 3, c = 4)$throws(err_msg) stub_func <- stub_of_simpf$f expect_error(stub_func(1, 2, 3, c = 4), err_msg) expect_null(stub_func(2, 2, 3)) expect_error(stub_func(1, 2, 3, c = 4), err_msg) expect_error(stub_func(1, c = 4, 2, 3), err_msg) }) test_that('It does the right thing even when there are multiple expectations - withExactArgs.return/throw and default return', { stub_of_simpf <- stub(simpf) stub_of_simpf$withExactArgs(a = 1, b = 2, d = 3, c = 4)$returns(10) stub_of_simpf$withExactArgs(a = 2, b = 2, d = 3, c = 4)$throws('err') stub_of_simpf$returns('a') stub_func <- stub_of_simpf$f expect_equal(stub_func(1, 2, 3, c = 4), 10) expect_equal(stub_func(3, 2, 3, c = 4), 'a') expect_error(stub_func(2, 2, 3, c = 4), 'err') expect_equal(stub_func(3, 2, 3, c = 4), 'a') expect_error(stub_func(2, c = 4, 2, 3), 'err') expect_equal(stub_func(c = 4, 1, 2, 3), 10) }) test_that('It does the right thing even when there are multiple expectations - withExactArgs.return/throw and default throw', { stub_of_simpf <- stub(simpf) stub_of_simpf$withExactArgs(a = 1, b = 2, d = 3, c = 4)$returns(10) stub_of_simpf$withExactArgs(a = 2, b = 2, d = 3, c = 4)$throws('err') stub_of_simpf$throws('pqrs') stub_func <- stub_of_simpf$f expect_equal(stub_func(1, 2, 3, c = 4), 10) expect_error(stub_func(3, 2, 3, c = 4), 'pqrs') expect_error(stub_func(2, 2, 3, c = 4), 'err') expect_error(stub_func(c = 4, 3, 2, 3), 'pqrs') expect_equal(stub_func(1, 2, 3, c = 4), 10) expect_error(stub_func(2, 2, 3, c = 4), 'err') })
/stubthat/tests/testthat/test-with-exact-args.r
no_license
ingted/R-Examples
R
false
false
2,317
r
library(testthat) simpf <- function(a = 1, b, d, ...) return(5) not_expected_error <- 'Function is called with arguments different from expected!' test_that('Returns the specified value if called with the exact arguments specified', { stub_of_simpf <- stub(simpf) stub_of_simpf$withExactArgs(a = 1, b = 2, d = 3, c = 4)$returns(10) stub_func <- stub_of_simpf$f expect_equal(stub_func(1, 2, 3, c = 4), 10) expect_null(stub_func(2, 2, 3)) expect_equal(stub_func(1, 2, 3, c = 4), 10) expect_equal(stub_func(1, 2, c = 4, 3), 10) }) test_that('Throws error with specified message if called with the exact arguments specified', { stub_of_simpf <- stub(simpf) err_msg <- 'error is good' stub_of_simpf$withExactArgs(a = 1, b = 2, d = 3, c = 4)$throws(err_msg) stub_func <- stub_of_simpf$f expect_error(stub_func(1, 2, 3, c = 4), err_msg) expect_null(stub_func(2, 2, 3)) expect_error(stub_func(1, 2, 3, c = 4), err_msg) expect_error(stub_func(1, c = 4, 2, 3), err_msg) }) test_that('It does the right thing even when there are multiple expectations - withExactArgs.return/throw and default return', { stub_of_simpf <- stub(simpf) stub_of_simpf$withExactArgs(a = 1, b = 2, d = 3, c = 4)$returns(10) stub_of_simpf$withExactArgs(a = 2, b = 2, d = 3, c = 4)$throws('err') stub_of_simpf$returns('a') stub_func <- stub_of_simpf$f expect_equal(stub_func(1, 2, 3, c = 4), 10) expect_equal(stub_func(3, 2, 3, c = 4), 'a') expect_error(stub_func(2, 2, 3, c = 4), 'err') expect_equal(stub_func(3, 2, 3, c = 4), 'a') expect_error(stub_func(2, c = 4, 2, 3), 'err') expect_equal(stub_func(c = 4, 1, 2, 3), 10) }) test_that('It does the right thing even when there are multiple expectations - withExactArgs.return/throw and default throw', { stub_of_simpf <- stub(simpf) stub_of_simpf$withExactArgs(a = 1, b = 2, d = 3, c = 4)$returns(10) stub_of_simpf$withExactArgs(a = 2, b = 2, d = 3, c = 4)$throws('err') stub_of_simpf$throws('pqrs') stub_func <- stub_of_simpf$f expect_equal(stub_func(1, 2, 3, c = 4), 10) expect_error(stub_func(3, 2, 3, c = 4), 'pqrs') expect_error(stub_func(2, 2, 3, c = 4), 'err') expect_error(stub_func(c = 4, 3, 2, 3), 'pqrs') expect_equal(stub_func(1, 2, 3, c = 4), 10) expect_error(stub_func(2, 2, 3, c = 4), 'err') })
rm(list=ls()) library(gtalibrary) library(stringr) library(tidyverse) gta_setwd() source('0 report production/GTA 25/help files/Producer console.R') directories=gta25_setup(internal.name="Sectoral chapters panel 1", in.dev=F, author=NULL, wipe.data=T, wipe.figs=T) data.path = directories$data.path run.calc=T trade.data.year = "base" # Figure 1 data prep ------------------------------------------------------ # Chart 1: Line graph showing share of total sectoral trade # affected by discriminatory (red) and liberalising (green) # measures. X-axis should divided into pre-populist era (2009-2016) # and populist era (2017-2019), by shading the populist era plot # background. Add two dots (green and red) in 2019 showing the # share of global trade affected by discriminatory and liberalising measures. # Calculate trade affected by sectors - harmful ------------------------------------------------------ if (run.calc) { sct.cov.harmful <- data.frame() for (sct in sectors) { codes <- gta_cpc_code_expand(codes = sct) gta_trade_coverage(gta.evaluation = c("Red","Amber"), cpc.sectors = codes, keep.cpc = T, implementation.period = c(NA,cutoff), trade.data = trade.data.year) sct.cov.harmful <- rbind(sct.cov.harmful, data.frame(sector=sct, year=seq(2009,2019,1), coverages=as.numeric(trade.coverage.estimates[1,c(4:14)]))) rm(trade.coverage.estimates) } save(sct.cov.harmful, file=paste0(data.path,"Sector coverages harmful.Rdata")) } load(paste0(data.path,"Sector coverages harmful.Rdata")) # Calculate trade affected by sectors - liberalising ------------------------------------------------------ if (run.calc) { sct.cov.liberalising <- data.frame() for (sct in sectors) { codes <- gta_cpc_code_expand(codes = sct) gta_trade_coverage(gta.evaluation = c("Green"), cpc.sectors = codes, keep.cpc = T, implementation.period = c(NA,cutoff), trade.data = trade.data.year) sct.cov.liberalising <- rbind(sct.cov.liberalising, data.frame(sector=sct, year=seq(2009,2019,1), coverages=as.numeric(trade.coverage.estimates[1,c(4:14)]))) rm(trade.coverage.estimates) } save(sct.cov.liberalising, file=paste0(data.path,"Sector coverages liberalising.Rdata")) } load(paste0(data.path,"Sector coverages liberalising.Rdata")) # # Calculate global trade affected - harmful ------------------------------------------------------ if (run.calc) { gta_trade_coverage(gta.evaluation = c("Red","Amber"), coverage.period = c(2019,2019), implementation.period = c(NA,cutoff), trade.data = trade.data.year) glo.cov.harmful <- trade.coverage.estimates[,ncol(trade.coverage.estimates)] save(glo.cov.harmful, file=paste0(data.path,"Global coverage harmful 2019.Rdata")) } load(paste0(data.path,"Global coverage harmful 2019.Rdata")) # Calculate global trade affected - liberalising ------------------------------------------------------ if (run.calc){ gta_trade_coverage(gta.evaluation = c("Green"), coverage.period = c(2019,2019), implementation.period = c(NA,cutoff), trade.data = trade.data.year) glo.cov.liberalising <- trade.coverage.estimates[,ncol(trade.coverage.estimates)] save(glo.cov.liberalising, file=paste0(data.path,"Global coverage liberalising 2019.Rdata")) } load(paste0(data.path,"Global coverage liberalising 2019.Rdata")) # Figure 2 data prep ------------------------------------------------------ # Chart 2: Line graph showing share of total sectoral trade affected # by (1) all discriminatory measures, (2) export incentives # (3) All except export incentives, (4) subsidies and (5) tariff # incentives. Add shading as before for populist, pre-populist era. # (1) is calculated in fig1 already # Trade affected by intervention types ------------------------------------ if (run.calc) { chapters <- list(c("P"), c(as.character(unique(gtalibrary::int.mast.types$mast.chapter.id[! gtalibrary::int.mast.types$mast.chapter.id %in% c("P")]))), c("L"), c("TARIFF")) type.names <- c("P", "All except P", "L", "TARIFF") sct.cov.types.harmful <- data.frame() for (sct in sectors) { for (mst in 1:length(chapters)) { codes <- gta_cpc_code_expand(codes = sct) gta_trade_coverage(gta.evaluation = c("Red","Amber"), mast.chapters = chapters[[mst]], keep.mast = T, group.mast = T, cpc.sectors = codes, keep.cpc = T, implementation.period = c(NA,cutoff), trade.data = trade.data.year) sct.cov.types.harmful <- rbind(sct.cov.types.harmful, data.frame(sector=sct, year=seq(2009,2019,1), coverages=as.numeric(trade.coverage.estimates[1,c(4:14)]), type = type.names[mst])) rm(trade.coverage.estimates) } } save(sct.cov.types.harmful, file=paste0(data.path,"Sector coverages types harmful.Rdata")) } load(paste0(data.path,"Sector coverages types harmful.Rdata")) # Figure 3 data prep ------------------------------------------------------ # Chart 3: Line graph showing share of total sectoral trade # affected by (1) all liberalising measures, (2) subsidy reductions # and (3) tariff reductions. Add shading for populist, pre-populist era. # (1) is calculated in fig1 already if (run.calc) { chapters <- list(c("L"), c("TARIFF")) type.names <- c("L", "TARIFF") sct.cov.types.liberalising <- data.frame() for (sct in sectors) { for (mst in 1:length(chapters)) { codes <- gta_cpc_code_expand(codes = sct) gta_trade_coverage(gta.evaluation = c("Green"), mast.chapters = chapters[[mst]], keep.mast = T, group.mast = T, cpc.sectors = codes, keep.cpc = T, implementation.period = c(NA,cutoff), trade.data = trade.data.year) sct.cov.types.liberalising <- rbind(sct.cov.types.liberalising, data.frame(sector=sct, year=seq(2009,2019,1), coverages=as.numeric(trade.coverage.estimates[1,c(4:14)]), type = type.names[mst])) rm(trade.coverage.estimates) } } save(sct.cov.types.liberalising, file=paste0(data.path,"Sector coverages types liberalising.Rdata")) } load(paste0(data.path,"Sector coverages types liberalising.Rdata")) # Figure 4 data prep ------------------------------------------------------ # Chart 4: Stacked bar chart (maybe there's a better way of visualising it?) showing # share of sectoral trade affected by harmful interventions hitting 1, 2, 3-5, 6-10 and 11+ # trading partners. Add populist and pre-populist era shading. if (run.calc) { sct.cov.hit.brkts <- data.frame() for (sct in sectors) { codes <- gta_cpc_code_expand(codes = sct) gta_trade_coverage(gta.evaluation = c("Red","Amber"), cpc.sectors = codes, keep.cpc = T, hit.brackets = c(1,1,2,2,3,5,6,10,11,9999999), implementation.period = c(NA,cutoff), trade.data = trade.data.year) names(trade.coverage.estimates) <- c("importer","exporter","hits",2009:2019) temp <- trade.coverage.estimates[,c("hits",2009:2019)] temp <- pivot_longer(data = temp, cols = c(2:ncol(temp)), names_to = "year", values_to = "coverages") sct.cov.hit.brkts <- rbind(sct.cov.hit.brkts, data.frame(sector=sct, year=temp$year, coverages=temp$coverages, hits=temp$hits)) rm(trade.coverage.estimates,temp) } sct.cov.hit.brkts$hits <- as.character(sct.cov.hit.brkts$hits) sct.cov.hit.brkts$hits[sct.cov.hit.brkts$hits == "1 - 1"] <- "1" sct.cov.hit.brkts$hits[sct.cov.hit.brkts$hits == "2 - 2"] <- "2" save(sct.cov.hit.brkts, file=paste0(data.path,"Sector coverages hit brackets.Rdata")) } load(paste0(data.path,"Sector coverages hit brackets.Rdata"))
/code/Sectoral chapters panel 1/code/0 Data prep.R
no_license
global-trade-alert/gta-25
R
false
false
9,300
r
rm(list=ls()) library(gtalibrary) library(stringr) library(tidyverse) gta_setwd() source('0 report production/GTA 25/help files/Producer console.R') directories=gta25_setup(internal.name="Sectoral chapters panel 1", in.dev=F, author=NULL, wipe.data=T, wipe.figs=T) data.path = directories$data.path run.calc=T trade.data.year = "base" # Figure 1 data prep ------------------------------------------------------ # Chart 1: Line graph showing share of total sectoral trade # affected by discriminatory (red) and liberalising (green) # measures. X-axis should divided into pre-populist era (2009-2016) # and populist era (2017-2019), by shading the populist era plot # background. Add two dots (green and red) in 2019 showing the # share of global trade affected by discriminatory and liberalising measures. # Calculate trade affected by sectors - harmful ------------------------------------------------------ if (run.calc) { sct.cov.harmful <- data.frame() for (sct in sectors) { codes <- gta_cpc_code_expand(codes = sct) gta_trade_coverage(gta.evaluation = c("Red","Amber"), cpc.sectors = codes, keep.cpc = T, implementation.period = c(NA,cutoff), trade.data = trade.data.year) sct.cov.harmful <- rbind(sct.cov.harmful, data.frame(sector=sct, year=seq(2009,2019,1), coverages=as.numeric(trade.coverage.estimates[1,c(4:14)]))) rm(trade.coverage.estimates) } save(sct.cov.harmful, file=paste0(data.path,"Sector coverages harmful.Rdata")) } load(paste0(data.path,"Sector coverages harmful.Rdata")) # Calculate trade affected by sectors - liberalising ------------------------------------------------------ if (run.calc) { sct.cov.liberalising <- data.frame() for (sct in sectors) { codes <- gta_cpc_code_expand(codes = sct) gta_trade_coverage(gta.evaluation = c("Green"), cpc.sectors = codes, keep.cpc = T, implementation.period = c(NA,cutoff), trade.data = trade.data.year) sct.cov.liberalising <- rbind(sct.cov.liberalising, data.frame(sector=sct, year=seq(2009,2019,1), coverages=as.numeric(trade.coverage.estimates[1,c(4:14)]))) rm(trade.coverage.estimates) } save(sct.cov.liberalising, file=paste0(data.path,"Sector coverages liberalising.Rdata")) } load(paste0(data.path,"Sector coverages liberalising.Rdata")) # # Calculate global trade affected - harmful ------------------------------------------------------ if (run.calc) { gta_trade_coverage(gta.evaluation = c("Red","Amber"), coverage.period = c(2019,2019), implementation.period = c(NA,cutoff), trade.data = trade.data.year) glo.cov.harmful <- trade.coverage.estimates[,ncol(trade.coverage.estimates)] save(glo.cov.harmful, file=paste0(data.path,"Global coverage harmful 2019.Rdata")) } load(paste0(data.path,"Global coverage harmful 2019.Rdata")) # Calculate global trade affected - liberalising ------------------------------------------------------ if (run.calc){ gta_trade_coverage(gta.evaluation = c("Green"), coverage.period = c(2019,2019), implementation.period = c(NA,cutoff), trade.data = trade.data.year) glo.cov.liberalising <- trade.coverage.estimates[,ncol(trade.coverage.estimates)] save(glo.cov.liberalising, file=paste0(data.path,"Global coverage liberalising 2019.Rdata")) } load(paste0(data.path,"Global coverage liberalising 2019.Rdata")) # Figure 2 data prep ------------------------------------------------------ # Chart 2: Line graph showing share of total sectoral trade affected # by (1) all discriminatory measures, (2) export incentives # (3) All except export incentives, (4) subsidies and (5) tariff # incentives. Add shading as before for populist, pre-populist era. # (1) is calculated in fig1 already # Trade affected by intervention types ------------------------------------ if (run.calc) { chapters <- list(c("P"), c(as.character(unique(gtalibrary::int.mast.types$mast.chapter.id[! gtalibrary::int.mast.types$mast.chapter.id %in% c("P")]))), c("L"), c("TARIFF")) type.names <- c("P", "All except P", "L", "TARIFF") sct.cov.types.harmful <- data.frame() for (sct in sectors) { for (mst in 1:length(chapters)) { codes <- gta_cpc_code_expand(codes = sct) gta_trade_coverage(gta.evaluation = c("Red","Amber"), mast.chapters = chapters[[mst]], keep.mast = T, group.mast = T, cpc.sectors = codes, keep.cpc = T, implementation.period = c(NA,cutoff), trade.data = trade.data.year) sct.cov.types.harmful <- rbind(sct.cov.types.harmful, data.frame(sector=sct, year=seq(2009,2019,1), coverages=as.numeric(trade.coverage.estimates[1,c(4:14)]), type = type.names[mst])) rm(trade.coverage.estimates) } } save(sct.cov.types.harmful, file=paste0(data.path,"Sector coverages types harmful.Rdata")) } load(paste0(data.path,"Sector coverages types harmful.Rdata")) # Figure 3 data prep ------------------------------------------------------ # Chart 3: Line graph showing share of total sectoral trade # affected by (1) all liberalising measures, (2) subsidy reductions # and (3) tariff reductions. Add shading for populist, pre-populist era. # (1) is calculated in fig1 already if (run.calc) { chapters <- list(c("L"), c("TARIFF")) type.names <- c("L", "TARIFF") sct.cov.types.liberalising <- data.frame() for (sct in sectors) { for (mst in 1:length(chapters)) { codes <- gta_cpc_code_expand(codes = sct) gta_trade_coverage(gta.evaluation = c("Green"), mast.chapters = chapters[[mst]], keep.mast = T, group.mast = T, cpc.sectors = codes, keep.cpc = T, implementation.period = c(NA,cutoff), trade.data = trade.data.year) sct.cov.types.liberalising <- rbind(sct.cov.types.liberalising, data.frame(sector=sct, year=seq(2009,2019,1), coverages=as.numeric(trade.coverage.estimates[1,c(4:14)]), type = type.names[mst])) rm(trade.coverage.estimates) } } save(sct.cov.types.liberalising, file=paste0(data.path,"Sector coverages types liberalising.Rdata")) } load(paste0(data.path,"Sector coverages types liberalising.Rdata")) # Figure 4 data prep ------------------------------------------------------ # Chart 4: Stacked bar chart (maybe there's a better way of visualising it?) showing # share of sectoral trade affected by harmful interventions hitting 1, 2, 3-5, 6-10 and 11+ # trading partners. Add populist and pre-populist era shading. if (run.calc) { sct.cov.hit.brkts <- data.frame() for (sct in sectors) { codes <- gta_cpc_code_expand(codes = sct) gta_trade_coverage(gta.evaluation = c("Red","Amber"), cpc.sectors = codes, keep.cpc = T, hit.brackets = c(1,1,2,2,3,5,6,10,11,9999999), implementation.period = c(NA,cutoff), trade.data = trade.data.year) names(trade.coverage.estimates) <- c("importer","exporter","hits",2009:2019) temp <- trade.coverage.estimates[,c("hits",2009:2019)] temp <- pivot_longer(data = temp, cols = c(2:ncol(temp)), names_to = "year", values_to = "coverages") sct.cov.hit.brkts <- rbind(sct.cov.hit.brkts, data.frame(sector=sct, year=temp$year, coverages=temp$coverages, hits=temp$hits)) rm(trade.coverage.estimates,temp) } sct.cov.hit.brkts$hits <- as.character(sct.cov.hit.brkts$hits) sct.cov.hit.brkts$hits[sct.cov.hit.brkts$hits == "1 - 1"] <- "1" sct.cov.hit.brkts$hits[sct.cov.hit.brkts$hits == "2 - 2"] <- "2" save(sct.cov.hit.brkts, file=paste0(data.path,"Sector coverages hit brackets.Rdata")) } load(paste0(data.path,"Sector coverages hit brackets.Rdata"))
#' @import stats NULL ################################################################################### #' Regression-based DID estimator with Panel Data #' Regression-based Difference-in-Differences Estimator for the ATT, with Panel Data #' #' @param y1 An \eqn{n} x \eqn{1} vector of outcomes from the post-treatment period. #' @param y0 An \eqn{n} x \eqn{1} vector of outcomes from the pre-treatment period. #' @param D An \eqn{n} x \eqn{1} vector of Group indicators (=1 if observation is treated in the post-treatment, =0 otherwise). #' @param covariates An \eqn{n} x \eqn{k} matrix of covariates to be used in the regression estimation #' @param i.weights An \eqn{n} x \eqn{1} vector of weights to be used. If NULL, then every observation has the same weights. #' @param boot Logical argument to whether bootstrap should be used for inference. Deafault is FALSE. #' @param boot.type Type of bootstrap to be performed (not relevant if boot = FALSE). Options are "weighted" and "multiplier". #' If boot==T, default is "weighted". #' @param nboot Number of bootstrap repetitions (not relevant if boot = FALSE). Deafault is 999 if boot = TRUE. #' #' @return A list containing the following components: #' \item{ATT}{The Reg DID point estimate} #' \item{se}{The Reg DID standard error} #' \item{uci}{Estimate of the upper boudary of a 95\% CI for the ATT} #' \item{lci}{Estimate of the lower boudary of a 95\% CI for the ATT} #' \item{boots}{All Bootstrap draws of the ATT, in case bootstrap was used to conduct inference. Default is NULL} #' #' @export reg_did_panel <-function(y1, y0, D, covariates, i.weights = NULL, boot = F, boot.type = "weighted", nboot = NULL){ #----------------------------------------------------------------------------- # D as vector D <- as.vector(D) # Sample size n <- length(D) # generate deltaY deltaY <- as.vector(y1 - y0) # Add constant to covariate vector # int.cov <- as.matrix(cbind(1, covariates)) int.cov <- covariates # Weights if(is.null(i.weights)) { i.weights <- as.vector(rep(1, n)) } else if(min(i.weights) < 0) stop("i.weights must be non-negative") #----------------------------------------------------------------------------- #Compute the Outcome regression for the control group using ols. reg.coeff <- stats::coef(stats::lm(deltaY ~ -1 + int.cov, subset = D==0, weights = i.weights)) out.delta <- as.vector(tcrossprod(reg.coeff, int.cov)) #----------------------------------------------------------------------------- #Compute the OR-DID estimator # First, the weights w.treat <- i.weights * D w.cont <- i.weights * D reg.att.treat <- w.treat * deltaY reg.att.cont <- w.cont * out.delta eta.treat <- mean(reg.att.treat) / mean(w.treat) eta.cont <- mean(reg.att.cont) / mean(w.cont) reg.att <- eta.treat - eta.cont #----------------------------------------------------------------------------- #get the influence function to compute standard error #----------------------------------------------------------------------------- # First, the influence function of the nuisance functions # Asymptotic linear representation of OLS parameters weights.ols <- i.weights * (1 - D) wols.x <- weights.ols * int.cov wols.eX <- weights.ols * (deltaY - out.delta) * int.cov XpX.inv <- solve(crossprod(wols.x, int.cov)/n) asy.lin.rep.ols <- wols.eX %*% XpX.inv #----------------------------------------------------------------------------- # Now, the influence function of the "treat" component # Leading term of the influence function inf.treat <- (reg.att.treat - w.treat * eta.treat) / mean(w.treat) #----------------------------------------------------------------------------- # Now, get the influence function of control component # Leading term of the influence function: no estimation effect inf.cont.1 <- (reg.att.cont - w.cont * eta.cont) # Estimation effect from beta hat (OLS using only controls) # Derivative matrix (k x 1 vector) M1 <- base::colMeans(w.cont * int.cov) # Now get the influence function related to the estimation effect related to beta's inf.cont.2 <- asy.lin.rep.ols %*% M1 # Influence function for the control component inf.control <- (inf.cont.1 + inf.cont.2) / mean(w.cont) #----------------------------------------------------------------------------- #get the influence function of the DR estimator (put all pieces together) reg.att.inf.func <- (inf.treat - inf.control) #----------------------------------------------------------------------------- if (boot == F) { # Estimate of standard error se.reg.att <- stats::sd(reg.att.inf.func)/sqrt(n) # Estimate of upper boudary of 95% CI uci <- reg.att + 1.96 * se.reg.att # Estimate of lower doundary of 95% CI lci <- reg.att - 1.96 * se.reg.att #Create this null vector so we can export the bootstrap draws too. reg.boot <- NULL } if (boot == T) { if (is.null(nboot) == T) nboot = 999 if(boot.type == "multiplier"){ # do multiplier bootstrap reg.boot <- mboot.did(reg.att.inf.func, nboot) # get bootstrap std errors based on IQR se.reg.att <- stats::IQR(reg.boot) / (stats::qnorm(0.75) - stats::qnorm(0.25)) # get symmtric critival values cv <- stats::quantile(abs(reg.boot/se.reg.att), probs = 0.95) # Estimate of upper boudary of 95% CI uci <- reg.att + cv * se.reg.att # Estimate of lower doundary of 95% CI lci <- reg.att - cv * se.reg.att } else { # do weighted bootstrap reg.boot <- unlist(lapply(1:nboot, wboot.reg.panel, n = n, deltaY = deltaY, D = D, int.cov = int.cov, i.weights = i.weights)) # get bootstrap std errors based on IQR se.reg.att <- stats::IQR((reg.boot - reg.att)) / (stats::qnorm(0.75) - stats::qnorm(0.25)) # get symmtric critival values cv <- stats::quantile(abs((reg.boot - reg.att)/se.reg.att), probs = 0.95) # Estimate of upper boudary of 95% CI uci <- reg.att + cv * se.reg.att # Estimate of lower doundary of 95% CI lci <- reg.att - cv * se.reg.att } } return(list(ATT = reg.att, se = se.reg.att, uci = uci, lci = lci, boots = reg.boot, inf.func = reg.att.inf.func)) }
/R/reg_did_panel.R
no_license
shizelong1985/DRDID
R
false
false
6,527
r
#' @import stats NULL ################################################################################### #' Regression-based DID estimator with Panel Data #' Regression-based Difference-in-Differences Estimator for the ATT, with Panel Data #' #' @param y1 An \eqn{n} x \eqn{1} vector of outcomes from the post-treatment period. #' @param y0 An \eqn{n} x \eqn{1} vector of outcomes from the pre-treatment period. #' @param D An \eqn{n} x \eqn{1} vector of Group indicators (=1 if observation is treated in the post-treatment, =0 otherwise). #' @param covariates An \eqn{n} x \eqn{k} matrix of covariates to be used in the regression estimation #' @param i.weights An \eqn{n} x \eqn{1} vector of weights to be used. If NULL, then every observation has the same weights. #' @param boot Logical argument to whether bootstrap should be used for inference. Deafault is FALSE. #' @param boot.type Type of bootstrap to be performed (not relevant if boot = FALSE). Options are "weighted" and "multiplier". #' If boot==T, default is "weighted". #' @param nboot Number of bootstrap repetitions (not relevant if boot = FALSE). Deafault is 999 if boot = TRUE. #' #' @return A list containing the following components: #' \item{ATT}{The Reg DID point estimate} #' \item{se}{The Reg DID standard error} #' \item{uci}{Estimate of the upper boudary of a 95\% CI for the ATT} #' \item{lci}{Estimate of the lower boudary of a 95\% CI for the ATT} #' \item{boots}{All Bootstrap draws of the ATT, in case bootstrap was used to conduct inference. Default is NULL} #' #' @export reg_did_panel <-function(y1, y0, D, covariates, i.weights = NULL, boot = F, boot.type = "weighted", nboot = NULL){ #----------------------------------------------------------------------------- # D as vector D <- as.vector(D) # Sample size n <- length(D) # generate deltaY deltaY <- as.vector(y1 - y0) # Add constant to covariate vector # int.cov <- as.matrix(cbind(1, covariates)) int.cov <- covariates # Weights if(is.null(i.weights)) { i.weights <- as.vector(rep(1, n)) } else if(min(i.weights) < 0) stop("i.weights must be non-negative") #----------------------------------------------------------------------------- #Compute the Outcome regression for the control group using ols. reg.coeff <- stats::coef(stats::lm(deltaY ~ -1 + int.cov, subset = D==0, weights = i.weights)) out.delta <- as.vector(tcrossprod(reg.coeff, int.cov)) #----------------------------------------------------------------------------- #Compute the OR-DID estimator # First, the weights w.treat <- i.weights * D w.cont <- i.weights * D reg.att.treat <- w.treat * deltaY reg.att.cont <- w.cont * out.delta eta.treat <- mean(reg.att.treat) / mean(w.treat) eta.cont <- mean(reg.att.cont) / mean(w.cont) reg.att <- eta.treat - eta.cont #----------------------------------------------------------------------------- #get the influence function to compute standard error #----------------------------------------------------------------------------- # First, the influence function of the nuisance functions # Asymptotic linear representation of OLS parameters weights.ols <- i.weights * (1 - D) wols.x <- weights.ols * int.cov wols.eX <- weights.ols * (deltaY - out.delta) * int.cov XpX.inv <- solve(crossprod(wols.x, int.cov)/n) asy.lin.rep.ols <- wols.eX %*% XpX.inv #----------------------------------------------------------------------------- # Now, the influence function of the "treat" component # Leading term of the influence function inf.treat <- (reg.att.treat - w.treat * eta.treat) / mean(w.treat) #----------------------------------------------------------------------------- # Now, get the influence function of control component # Leading term of the influence function: no estimation effect inf.cont.1 <- (reg.att.cont - w.cont * eta.cont) # Estimation effect from beta hat (OLS using only controls) # Derivative matrix (k x 1 vector) M1 <- base::colMeans(w.cont * int.cov) # Now get the influence function related to the estimation effect related to beta's inf.cont.2 <- asy.lin.rep.ols %*% M1 # Influence function for the control component inf.control <- (inf.cont.1 + inf.cont.2) / mean(w.cont) #----------------------------------------------------------------------------- #get the influence function of the DR estimator (put all pieces together) reg.att.inf.func <- (inf.treat - inf.control) #----------------------------------------------------------------------------- if (boot == F) { # Estimate of standard error se.reg.att <- stats::sd(reg.att.inf.func)/sqrt(n) # Estimate of upper boudary of 95% CI uci <- reg.att + 1.96 * se.reg.att # Estimate of lower doundary of 95% CI lci <- reg.att - 1.96 * se.reg.att #Create this null vector so we can export the bootstrap draws too. reg.boot <- NULL } if (boot == T) { if (is.null(nboot) == T) nboot = 999 if(boot.type == "multiplier"){ # do multiplier bootstrap reg.boot <- mboot.did(reg.att.inf.func, nboot) # get bootstrap std errors based on IQR se.reg.att <- stats::IQR(reg.boot) / (stats::qnorm(0.75) - stats::qnorm(0.25)) # get symmtric critival values cv <- stats::quantile(abs(reg.boot/se.reg.att), probs = 0.95) # Estimate of upper boudary of 95% CI uci <- reg.att + cv * se.reg.att # Estimate of lower doundary of 95% CI lci <- reg.att - cv * se.reg.att } else { # do weighted bootstrap reg.boot <- unlist(lapply(1:nboot, wboot.reg.panel, n = n, deltaY = deltaY, D = D, int.cov = int.cov, i.weights = i.weights)) # get bootstrap std errors based on IQR se.reg.att <- stats::IQR((reg.boot - reg.att)) / (stats::qnorm(0.75) - stats::qnorm(0.25)) # get symmtric critival values cv <- stats::quantile(abs((reg.boot - reg.att)/se.reg.att), probs = 0.95) # Estimate of upper boudary of 95% CI uci <- reg.att + cv * se.reg.att # Estimate of lower doundary of 95% CI lci <- reg.att - cv * se.reg.att } } return(list(ATT = reg.att, se = se.reg.att, uci = uci, lci = lci, boots = reg.boot, inf.func = reg.att.inf.func)) }
#source: tlssunpic64.s #source: tlspic.s #as: --64 -Av9 -K PIC #ld: -shared -melf64_sparc #readelf: -WSsrl #target: sparc*-*-* There are [0-9]+ section headers, starting at offset 0x[0-9a-f]+: Section Headers: +\[Nr\] Name +Type +Address +Off +Size +ES Flg Lk Inf Al +\[[ 0-9]+\] +NULL +0+ 0+ 0+ 0+ +0 +0 +0 +\[[ 0-9]+\] .hash +.* +\[[ 0-9]+\] .dynsym +.* +\[[ 0-9]+\] .dynstr +.* +\[[ 0-9]+\] .rela.dyn +.* +\[[ 0-9]+\] .rela.plt +.* +\[[ 0-9]+\] .text +PROGBITS +0+1000 0+1000 0+1000 0+ +AX +0 +0 4096 +\[[ 0-9]+\] .tdata +PROGBITS +0+102000 0+2000 0+60 0+ WAT +0 +0 +4 +\[[ 0-9]+\] .tbss +NOBITS +0+102060 0+2060 0+20 0+ WAT +0 +0 +4 +\[[ 0-9]+\] .dynamic +DYNAMIC +0+102060 0+2060 0+130 10 +WA +3 +0 +8 +\[[ 0-9]+\] .got +PROGBITS +0+102190 0+2190 0+98 08 +WA +0 +0 +8 +\[[ 0-9]+\] .plt +.* +\[[ 0-9]+\] .symtab +.* +\[[ 0-9]+\] .strtab +.* +\[[ 0-9]+\] .shstrtab +.* #... Elf file type is DYN \(Shared object file\) Entry point 0x[0-9a-f]+ There are [0-9]+ program headers, starting at offset [0-9]+ Program Headers: +Type +Offset +VirtAddr +PhysAddr +FileSiz +MemSiz +Flg Align +LOAD +0x0+ 0x0+ 0x0+ 0x0+2000 0x0+2000 R E 0x100000 +LOAD +0x0+2000 0x0+102000 0x0+102000 0x0+3a0 0x0+3a0 RWE 0x100000 +DYNAMIC +0x0+2060 0x0+102060 0x0+102060 0x0+130 0x0+130 RW +0x8 +TLS +0x0+2000 0x0+102000 0x0+102000 0x0+60 0x0+80 R +0x4 #... Relocation section '.rela.dyn' at offset 0x[0-9a-f]+ contains 14 entries: +Offset +Info +Type +Symbol's Value +Symbol's Name \+ Addend [0-9a-f ]+R_SPARC_TLS_DTPMOD64 +0 [0-9a-f ]+R_SPARC_TLS_TPOFF64 +24 [0-9a-f ]+R_SPARC_TLS_TPOFF64 +30 [0-9a-f ]+R_SPARC_TLS_DTPMOD64 +0 [0-9a-f ]+R_SPARC_TLS_DTPMOD64 +0 [0-9a-f ]+R_SPARC_TLS_TPOFF64 +64 [0-9a-f ]+R_SPARC_TLS_TPOFF64 +50 [0-9a-f ]+R_SPARC_TLS_TPOFF64 +70 [0-9a-f ]+R_SPARC_TLS_DTPMOD64 +0 [0-9a-f ]+R_SPARC_TLS_TPOFF64 +44 [0-9a-f ]+R_SPARC_TLS_TPOFF64 +0+10 +sg5 \+ 0 [0-9a-f ]+R_SPARC_TLS_DTPMOD64 +0+ +sg1 \+ 0 [0-9a-f ]+R_SPARC_TLS_DTPOFF64 +0+ +sg1 \+ 0 [0-9a-f ]+R_SPARC_TLS_TPOFF64 +0+4 +sg2 \+ 0 Relocation section '.rela.plt' at offset 0x[0-9a-f]+ contains 1 entry: +Offset +Info +Type +Symbol's Value +Symbol's Name \+ Addend [0-9a-f ]+R_SPARC_JMP_SLOT +0+ +__tls_get_addr \+ 0 Symbol table '\.dynsym' contains [0-9]+ entries: +Num: +Value +Size +Type +Bind +Vis +Ndx +Name .* NOTYPE +LOCAL +DEFAULT +UND * .* SECTION +LOCAL +DEFAULT +6.* .* SECTION +LOCAL +DEFAULT +10.* .* TLS +GLOBAL +DEFAULT +7 sg8 .* TLS +GLOBAL +DEFAULT +7 sg3 .* TLS +GLOBAL +DEFAULT +7 sg4 .* TLS +GLOBAL +DEFAULT +7 sg5 .* NOTYPE +GLOBAL +DEFAULT +UND __tls_get_addr .* TLS +GLOBAL +DEFAULT +7 sg1 .* FUNC +GLOBAL +DEFAULT +6 fn1 .* TLS +GLOBAL +DEFAULT +7 sg2 .* TLS +GLOBAL +DEFAULT +7 sg6 .* TLS +GLOBAL +DEFAULT +7 sg7 Symbol table '\.symtab' contains [0-9]+ entries: +Num: +Value +Size +Type +Bind +Vis +Ndx +Name .* NOTYPE +LOCAL +DEFAULT +UND * .* SECTION +LOCAL +DEFAULT +1.* .* SECTION +LOCAL +DEFAULT +2.* .* SECTION +LOCAL +DEFAULT +3.* .* SECTION +LOCAL +DEFAULT +4.* .* SECTION +LOCAL +DEFAULT +5.* .* SECTION +LOCAL +DEFAULT +6.* .* SECTION +LOCAL +DEFAULT +7.* .* SECTION +LOCAL +DEFAULT +8.* .* SECTION +LOCAL +DEFAULT +9.* .* SECTION +LOCAL +DEFAULT +10.* .* SECTION +LOCAL +DEFAULT +11.* .* FILE +LOCAL +DEFAULT +ABS .* .* TLS +LOCAL +DEFAULT +7 sl1 .* TLS +LOCAL +DEFAULT +7 sl2 .* TLS +LOCAL +DEFAULT +7 sl3 .* TLS +LOCAL +DEFAULT +7 sl4 .* TLS +LOCAL +DEFAULT +7 sl5 .* TLS +LOCAL +DEFAULT +7 sl6 .* TLS +LOCAL +DEFAULT +7 sl7 .* TLS +LOCAL +DEFAULT +7 sl8 .* FILE +LOCAL +DEFAULT +ABS .* TLS +LOCAL +DEFAULT +8 sH1 .* OBJECT +LOCAL +DEFAULT +ABS _DYNAMIC .* TLS +LOCAL +DEFAULT +7 sh3 .* TLS +LOCAL +DEFAULT +8 sH2 .* TLS +LOCAL +DEFAULT +8 sH7 .* OBJECT +LOCAL +DEFAULT +ABS _PROCEDURE_LINKAGE_TABLE_ .* TLS +LOCAL +DEFAULT +7 sh7 .* TLS +LOCAL +DEFAULT +7 sh8 .* TLS +LOCAL +DEFAULT +8 sH4 .* TLS +LOCAL +DEFAULT +7 sh4 .* TLS +LOCAL +DEFAULT +8 sH3 .* TLS +LOCAL +DEFAULT +7 sh5 .* TLS +LOCAL +DEFAULT +8 sH5 .* TLS +LOCAL +DEFAULT +8 sH6 .* TLS +LOCAL +DEFAULT +8 sH8 .* TLS +LOCAL +DEFAULT +7 sh1 .* OBJECT +LOCAL +DEFAULT +ABS _GLOBAL_OFFSET_TABLE_ .* TLS +LOCAL +DEFAULT +7 sh2 .* TLS +LOCAL +DEFAULT +7 sh6 .* TLS +GLOBAL +DEFAULT +7 sg8 .* TLS +GLOBAL +DEFAULT +7 sg3 .* TLS +GLOBAL +DEFAULT +7 sg4 .* TLS +GLOBAL +DEFAULT +7 sg5 .* NOTYPE +GLOBAL +DEFAULT +UND __tls_get_addr .* TLS +GLOBAL +DEFAULT +7 sg1 .* FUNC +GLOBAL +DEFAULT +6 fn1 .* TLS +GLOBAL +DEFAULT +7 sg2 .* TLS +GLOBAL +DEFAULT +7 sg6 .* TLS +GLOBAL +DEFAULT +7 sg7
/external/binutils-2.38/ld/testsuite/ld-sparc/tlssunpic64.rd
permissive
zhmu/ananas
R
false
false
4,468
rd
#source: tlssunpic64.s #source: tlspic.s #as: --64 -Av9 -K PIC #ld: -shared -melf64_sparc #readelf: -WSsrl #target: sparc*-*-* There are [0-9]+ section headers, starting at offset 0x[0-9a-f]+: Section Headers: +\[Nr\] Name +Type +Address +Off +Size +ES Flg Lk Inf Al +\[[ 0-9]+\] +NULL +0+ 0+ 0+ 0+ +0 +0 +0 +\[[ 0-9]+\] .hash +.* +\[[ 0-9]+\] .dynsym +.* +\[[ 0-9]+\] .dynstr +.* +\[[ 0-9]+\] .rela.dyn +.* +\[[ 0-9]+\] .rela.plt +.* +\[[ 0-9]+\] .text +PROGBITS +0+1000 0+1000 0+1000 0+ +AX +0 +0 4096 +\[[ 0-9]+\] .tdata +PROGBITS +0+102000 0+2000 0+60 0+ WAT +0 +0 +4 +\[[ 0-9]+\] .tbss +NOBITS +0+102060 0+2060 0+20 0+ WAT +0 +0 +4 +\[[ 0-9]+\] .dynamic +DYNAMIC +0+102060 0+2060 0+130 10 +WA +3 +0 +8 +\[[ 0-9]+\] .got +PROGBITS +0+102190 0+2190 0+98 08 +WA +0 +0 +8 +\[[ 0-9]+\] .plt +.* +\[[ 0-9]+\] .symtab +.* +\[[ 0-9]+\] .strtab +.* +\[[ 0-9]+\] .shstrtab +.* #... Elf file type is DYN \(Shared object file\) Entry point 0x[0-9a-f]+ There are [0-9]+ program headers, starting at offset [0-9]+ Program Headers: +Type +Offset +VirtAddr +PhysAddr +FileSiz +MemSiz +Flg Align +LOAD +0x0+ 0x0+ 0x0+ 0x0+2000 0x0+2000 R E 0x100000 +LOAD +0x0+2000 0x0+102000 0x0+102000 0x0+3a0 0x0+3a0 RWE 0x100000 +DYNAMIC +0x0+2060 0x0+102060 0x0+102060 0x0+130 0x0+130 RW +0x8 +TLS +0x0+2000 0x0+102000 0x0+102000 0x0+60 0x0+80 R +0x4 #... Relocation section '.rela.dyn' at offset 0x[0-9a-f]+ contains 14 entries: +Offset +Info +Type +Symbol's Value +Symbol's Name \+ Addend [0-9a-f ]+R_SPARC_TLS_DTPMOD64 +0 [0-9a-f ]+R_SPARC_TLS_TPOFF64 +24 [0-9a-f ]+R_SPARC_TLS_TPOFF64 +30 [0-9a-f ]+R_SPARC_TLS_DTPMOD64 +0 [0-9a-f ]+R_SPARC_TLS_DTPMOD64 +0 [0-9a-f ]+R_SPARC_TLS_TPOFF64 +64 [0-9a-f ]+R_SPARC_TLS_TPOFF64 +50 [0-9a-f ]+R_SPARC_TLS_TPOFF64 +70 [0-9a-f ]+R_SPARC_TLS_DTPMOD64 +0 [0-9a-f ]+R_SPARC_TLS_TPOFF64 +44 [0-9a-f ]+R_SPARC_TLS_TPOFF64 +0+10 +sg5 \+ 0 [0-9a-f ]+R_SPARC_TLS_DTPMOD64 +0+ +sg1 \+ 0 [0-9a-f ]+R_SPARC_TLS_DTPOFF64 +0+ +sg1 \+ 0 [0-9a-f ]+R_SPARC_TLS_TPOFF64 +0+4 +sg2 \+ 0 Relocation section '.rela.plt' at offset 0x[0-9a-f]+ contains 1 entry: +Offset +Info +Type +Symbol's Value +Symbol's Name \+ Addend [0-9a-f ]+R_SPARC_JMP_SLOT +0+ +__tls_get_addr \+ 0 Symbol table '\.dynsym' contains [0-9]+ entries: +Num: +Value +Size +Type +Bind +Vis +Ndx +Name .* NOTYPE +LOCAL +DEFAULT +UND * .* SECTION +LOCAL +DEFAULT +6.* .* SECTION +LOCAL +DEFAULT +10.* .* TLS +GLOBAL +DEFAULT +7 sg8 .* TLS +GLOBAL +DEFAULT +7 sg3 .* TLS +GLOBAL +DEFAULT +7 sg4 .* TLS +GLOBAL +DEFAULT +7 sg5 .* NOTYPE +GLOBAL +DEFAULT +UND __tls_get_addr .* TLS +GLOBAL +DEFAULT +7 sg1 .* FUNC +GLOBAL +DEFAULT +6 fn1 .* TLS +GLOBAL +DEFAULT +7 sg2 .* TLS +GLOBAL +DEFAULT +7 sg6 .* TLS +GLOBAL +DEFAULT +7 sg7 Symbol table '\.symtab' contains [0-9]+ entries: +Num: +Value +Size +Type +Bind +Vis +Ndx +Name .* NOTYPE +LOCAL +DEFAULT +UND * .* SECTION +LOCAL +DEFAULT +1.* .* SECTION +LOCAL +DEFAULT +2.* .* SECTION +LOCAL +DEFAULT +3.* .* SECTION +LOCAL +DEFAULT +4.* .* SECTION +LOCAL +DEFAULT +5.* .* SECTION +LOCAL +DEFAULT +6.* .* SECTION +LOCAL +DEFAULT +7.* .* SECTION +LOCAL +DEFAULT +8.* .* SECTION +LOCAL +DEFAULT +9.* .* SECTION +LOCAL +DEFAULT +10.* .* SECTION +LOCAL +DEFAULT +11.* .* FILE +LOCAL +DEFAULT +ABS .* .* TLS +LOCAL +DEFAULT +7 sl1 .* TLS +LOCAL +DEFAULT +7 sl2 .* TLS +LOCAL +DEFAULT +7 sl3 .* TLS +LOCAL +DEFAULT +7 sl4 .* TLS +LOCAL +DEFAULT +7 sl5 .* TLS +LOCAL +DEFAULT +7 sl6 .* TLS +LOCAL +DEFAULT +7 sl7 .* TLS +LOCAL +DEFAULT +7 sl8 .* FILE +LOCAL +DEFAULT +ABS .* TLS +LOCAL +DEFAULT +8 sH1 .* OBJECT +LOCAL +DEFAULT +ABS _DYNAMIC .* TLS +LOCAL +DEFAULT +7 sh3 .* TLS +LOCAL +DEFAULT +8 sH2 .* TLS +LOCAL +DEFAULT +8 sH7 .* OBJECT +LOCAL +DEFAULT +ABS _PROCEDURE_LINKAGE_TABLE_ .* TLS +LOCAL +DEFAULT +7 sh7 .* TLS +LOCAL +DEFAULT +7 sh8 .* TLS +LOCAL +DEFAULT +8 sH4 .* TLS +LOCAL +DEFAULT +7 sh4 .* TLS +LOCAL +DEFAULT +8 sH3 .* TLS +LOCAL +DEFAULT +7 sh5 .* TLS +LOCAL +DEFAULT +8 sH5 .* TLS +LOCAL +DEFAULT +8 sH6 .* TLS +LOCAL +DEFAULT +8 sH8 .* TLS +LOCAL +DEFAULT +7 sh1 .* OBJECT +LOCAL +DEFAULT +ABS _GLOBAL_OFFSET_TABLE_ .* TLS +LOCAL +DEFAULT +7 sh2 .* TLS +LOCAL +DEFAULT +7 sh6 .* TLS +GLOBAL +DEFAULT +7 sg8 .* TLS +GLOBAL +DEFAULT +7 sg3 .* TLS +GLOBAL +DEFAULT +7 sg4 .* TLS +GLOBAL +DEFAULT +7 sg5 .* NOTYPE +GLOBAL +DEFAULT +UND __tls_get_addr .* TLS +GLOBAL +DEFAULT +7 sg1 .* FUNC +GLOBAL +DEFAULT +6 fn1 .* TLS +GLOBAL +DEFAULT +7 sg2 .* TLS +GLOBAL +DEFAULT +7 sg6 .* TLS +GLOBAL +DEFAULT +7 sg7
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/files.R \name{files} \alias{files} \title{Lists files in a given workspace} \usage{ files(workspace) } \arguments{ \item{workspace}{: the workspace in which the files will be located.} \item{login_token}{: generated with the login function.} } \value{ : dataframe containing of the file names in the given directory } \description{ Lists files in a given workspace }
/man/files.Rd
no_license
lubospernis/FNA_package
R
false
true
446
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/files.R \name{files} \alias{files} \title{Lists files in a given workspace} \usage{ files(workspace) } \arguments{ \item{workspace}{: the workspace in which the files will be located.} \item{login_token}{: generated with the login function.} } \value{ : dataframe containing of the file names in the given directory } \description{ Lists files in a given workspace }
#' Percentage Change Transformation #' #' The \code{pct()} function returns a transformation of the provided time #' series using a Percentage Change transformation. \code{pct.rev()} reverses #' the transformation. #' #' The Percentage Change transformation is given approximately by #' \deqn{ log(x[2:n] / x[1:(n-1)] ) = log( x[2:n] ) - log( x[1:(n-1)] ) } where #' \code{n=length(x)}. #' #' @aliases pct pct.rev #' @param x A numeric vector or univariate time series of class \code{ts}. #' @param p A numeric vector or univariate time series of percentage changes. #' Possibly returned by \code{pct()}. #' @param xi Initial value/observation of \code{x} (\code{x[1]}). First known #' non-transformed value used to recursively obtain the original series. #' @param addinit If \code{TRUE}, \code{xi} is included in the return. #' @return A vector of length \code{length(x)-1} containing the transformed #' values. #' @author Rebecca Pontes Salles #' @family transformation methods #' @references R.H. Shumway and D.S. Stoffer, 2010, Time Series Analysis and #' Its Applications: With R Examples. 3rd ed. 2011 edition ed. New York, #' Springer. #' @keywords percentage change transform time series #' @examples #' #' data(NN5.A) #' ts <- na.omit(NN5.A[,10]) #' length(ts) #' #' p <- pct(ts) #' length(p) #' #' p_rev <- pct.rev(p, attributes(p)$xi) #' #' all(round(p_rev,4)==round(ts,4)) #' #' @export pct pct <- function(x){ lag <- 1 n <- length(x) xt <- x[(1+lag):n] xt_1 <- x[1:(n-lag)] pc <- log(xt)-log(xt_1) attr(pc,"xi") <- utils::head(x,lag) attr(pc,"xf") <- utils::tail(x,lag) return(pc) } #' @rdname pct #' @export pct.rev pct.rev <- function(p,xi,addinit=TRUE){ xt <- exp(log(xi)+p[1]) for(i in 2:length(p)) xt <- c(xt, exp(log(xt[length(xt)])+p[i]) ) if(addinit) return(c(xi,xt)) return(xt) } #PCT.rev <- function(p,x0){ # xt <- x0*(1+p[1]) # for(i in 2:length(p)) xt <- c(xt, (1+p[i])*xt[length(xt)] ) # xt #}
/R/PCT.r
no_license
cran/TSPred
R
false
false
2,041
r
#' Percentage Change Transformation #' #' The \code{pct()} function returns a transformation of the provided time #' series using a Percentage Change transformation. \code{pct.rev()} reverses #' the transformation. #' #' The Percentage Change transformation is given approximately by #' \deqn{ log(x[2:n] / x[1:(n-1)] ) = log( x[2:n] ) - log( x[1:(n-1)] ) } where #' \code{n=length(x)}. #' #' @aliases pct pct.rev #' @param x A numeric vector or univariate time series of class \code{ts}. #' @param p A numeric vector or univariate time series of percentage changes. #' Possibly returned by \code{pct()}. #' @param xi Initial value/observation of \code{x} (\code{x[1]}). First known #' non-transformed value used to recursively obtain the original series. #' @param addinit If \code{TRUE}, \code{xi} is included in the return. #' @return A vector of length \code{length(x)-1} containing the transformed #' values. #' @author Rebecca Pontes Salles #' @family transformation methods #' @references R.H. Shumway and D.S. Stoffer, 2010, Time Series Analysis and #' Its Applications: With R Examples. 3rd ed. 2011 edition ed. New York, #' Springer. #' @keywords percentage change transform time series #' @examples #' #' data(NN5.A) #' ts <- na.omit(NN5.A[,10]) #' length(ts) #' #' p <- pct(ts) #' length(p) #' #' p_rev <- pct.rev(p, attributes(p)$xi) #' #' all(round(p_rev,4)==round(ts,4)) #' #' @export pct pct <- function(x){ lag <- 1 n <- length(x) xt <- x[(1+lag):n] xt_1 <- x[1:(n-lag)] pc <- log(xt)-log(xt_1) attr(pc,"xi") <- utils::head(x,lag) attr(pc,"xf") <- utils::tail(x,lag) return(pc) } #' @rdname pct #' @export pct.rev pct.rev <- function(p,xi,addinit=TRUE){ xt <- exp(log(xi)+p[1]) for(i in 2:length(p)) xt <- c(xt, exp(log(xt[length(xt)])+p[i]) ) if(addinit) return(c(xi,xt)) return(xt) } #PCT.rev <- function(p,x0){ # xt <- x0*(1+p[1]) # for(i in 2:length(p)) xt <- c(xt, (1+p[i])*xt[length(xt)] ) # xt #}
field <- structure(function # Capture a field ### Capture a field with a pattern of the form ### list("field.name", between.pattern, ### field.name=list(...)) -- see examples. (field.name, ### Field name, used as a pattern and as a capture group (output ### column) name. between.pattern, ### Pattern to match after field.name but before the field value. ... ### Pattern(s) for matching field value. ){ if(!(is.character(field.name) && length(field.name)==1))stop( "first argument of field must be character string (field name)") list( field.name, between.pattern, group(field.name, ...)) ### Pattern list which can be used in capture_first_vec, ### capture_first_df, or capture_all_str. }, ex=function(){ ## Two ways to create the same pattern. str(list("Alignment ", Alignment="[0-9]+")) ## To avoid typing Alignment twice use: str(nc::field("Alignment", " ", "[0-9]+")) ## An example with lots of different fields. info.txt.gz <- system.file( "extdata", "SweeD_Info.txt.gz", package="nc") info.vec <- readLines(info.txt.gz) info.vec[24:40] ## For each Alignment there are many fields which have a similar ## pattern, and occur in the same order. One way to capture these ## fields is by coding a pattern that says to look for all of those ## fields in that order. Each field is coded using this helper ## function. g <- function(name, fun=identity, suffix=list()){ list( "\t+", nc::field(name, ":\t+", ".*"), fun, suffix, "\n+") } nc::capture_all_str( info.vec, nc::field("Alignment", " ", "[0-9]+"), "\n+", g("Chromosome"), g("Sequences", as.integer), g("Sites", as.integer), g("Discarded sites", as.integer), g("Processing", as.integer, " seconds"), g("Position", as.integer), g("Likelihood", as.numeric), g("Alpha", as.numeric)) ## Another example where field is useful. trackDb.txt.gz <- system.file( "extdata", "trackDb.txt.gz", package="nc") trackDb.vec <- readLines(trackDb.txt.gz) cat(trackDb.vec[101:115], sep="\n") int.pattern <- list("[0-9]+", as.integer) cell.sample.type <- list( cellType="[^ ]*?", "_", sampleName=list( "McGill", sampleID=int.pattern), dataType="Coverage|Peaks") ## Each block in the trackDb file begins with track, followed by a ## space, followed by the track name. That pattern is coded below, ## using field: track.pattern <- nc::field( "track", " ", cell.sample.type, "|", "[^\n]+") nc::capture_all_str(trackDb.vec, track.pattern) ## Each line in a block has the same structure (field name, space, ## field value). Below we use the field function to extract the ## color line, along with columns for each of the three channels ## (red, green, blue). any.lines.pattern <- "(?:\n[^\n]+)*" nc::capture_all_str( trackDb.vec, track.pattern, any.lines.pattern, "\\s+", nc::field( "color", " ", red=int.pattern, ",", green=int.pattern, ",", blue=int.pattern)) })
/R/field.R
no_license
tdhock/nc
R
false
false
3,069
r
field <- structure(function # Capture a field ### Capture a field with a pattern of the form ### list("field.name", between.pattern, ### field.name=list(...)) -- see examples. (field.name, ### Field name, used as a pattern and as a capture group (output ### column) name. between.pattern, ### Pattern to match after field.name but before the field value. ... ### Pattern(s) for matching field value. ){ if(!(is.character(field.name) && length(field.name)==1))stop( "first argument of field must be character string (field name)") list( field.name, between.pattern, group(field.name, ...)) ### Pattern list which can be used in capture_first_vec, ### capture_first_df, or capture_all_str. }, ex=function(){ ## Two ways to create the same pattern. str(list("Alignment ", Alignment="[0-9]+")) ## To avoid typing Alignment twice use: str(nc::field("Alignment", " ", "[0-9]+")) ## An example with lots of different fields. info.txt.gz <- system.file( "extdata", "SweeD_Info.txt.gz", package="nc") info.vec <- readLines(info.txt.gz) info.vec[24:40] ## For each Alignment there are many fields which have a similar ## pattern, and occur in the same order. One way to capture these ## fields is by coding a pattern that says to look for all of those ## fields in that order. Each field is coded using this helper ## function. g <- function(name, fun=identity, suffix=list()){ list( "\t+", nc::field(name, ":\t+", ".*"), fun, suffix, "\n+") } nc::capture_all_str( info.vec, nc::field("Alignment", " ", "[0-9]+"), "\n+", g("Chromosome"), g("Sequences", as.integer), g("Sites", as.integer), g("Discarded sites", as.integer), g("Processing", as.integer, " seconds"), g("Position", as.integer), g("Likelihood", as.numeric), g("Alpha", as.numeric)) ## Another example where field is useful. trackDb.txt.gz <- system.file( "extdata", "trackDb.txt.gz", package="nc") trackDb.vec <- readLines(trackDb.txt.gz) cat(trackDb.vec[101:115], sep="\n") int.pattern <- list("[0-9]+", as.integer) cell.sample.type <- list( cellType="[^ ]*?", "_", sampleName=list( "McGill", sampleID=int.pattern), dataType="Coverage|Peaks") ## Each block in the trackDb file begins with track, followed by a ## space, followed by the track name. That pattern is coded below, ## using field: track.pattern <- nc::field( "track", " ", cell.sample.type, "|", "[^\n]+") nc::capture_all_str(trackDb.vec, track.pattern) ## Each line in a block has the same structure (field name, space, ## field value). Below we use the field function to extract the ## color line, along with columns for each of the three channels ## (red, green, blue). any.lines.pattern <- "(?:\n[^\n]+)*" nc::capture_all_str( trackDb.vec, track.pattern, any.lines.pattern, "\\s+", nc::field( "color", " ", red=int.pattern, ",", green=int.pattern, ",", blue=int.pattern)) })
#' --- #' title: "Taller R-Ladies - procesando el dataset de mujeres programadoras" #' author: "Ana Laura Diedrichs" #' date: "29 de noviembre de 2018" #' --- #' Primer paso, carga de datos #' --- data(cars) #' cars #' La siguiente linea carga el dataset suppressMessages(library(dplyr)) #' Estoy filtrando velocidades mayores a 20 cars %>% filter(speed > 20 ) #' --- #' Grafico #' --- plot(cars) #' --- #' Modelo #' --- lm(speed~dist,cars) plot(cars) abline( lm(dist~speed,cars))
/2018-11-29/ejemplo.R
no_license
rladies/meetup-presentations_mendoza
R
false
false
494
r
#' --- #' title: "Taller R-Ladies - procesando el dataset de mujeres programadoras" #' author: "Ana Laura Diedrichs" #' date: "29 de noviembre de 2018" #' --- #' Primer paso, carga de datos #' --- data(cars) #' cars #' La siguiente linea carga el dataset suppressMessages(library(dplyr)) #' Estoy filtrando velocidades mayores a 20 cars %>% filter(speed > 20 ) #' --- #' Grafico #' --- plot(cars) #' --- #' Modelo #' --- lm(speed~dist,cars) plot(cars) abline( lm(dist~speed,cars))
# read, check and mean-center covData covDataRead <- function(covData, N, binaryToNumeric = FALSE) { if (!is.null(covData)) { # list / path to file if (is.character(covData)) { covData <- read.csv(covData, header = T, sep = ",", strip.white = T) } try(covData <- as.data.frame(covData)) if (nrow(covData) != N) { stop("Number of individuals in 'data' and 'covData' differs!") } if (is.null(colnames(covData))) { stop("Check names of covariates in covData!") } if (binaryToNumeric) { for (k in 1:ncol(covData)) { if (inherits(covData[, k], c("factor", "ordered", "character"))) { if (length(unique(covData[, k])) == 2) { covData[, k] <- as.numeric(as.factor(covData[, k])) } } } } } covData } # get default values for predType predTypeDefault <- function(covData, predType = NULL) { if (!is.null(covData)) { # default: continuous / random covariates if (missing(predType) || is.null(predType)) { cov.class <- sapply(covData, class) predType <- ifelse(cov.class %in% c("character", "factor"), "f", ifelse(cov.class %in% c("integer", "numeric", "matrix"), "c", "") ) } for (cc in 1:length(predType)) { if (!all(predType %in% c("f", "c", "r"))) { stop( "Check definition of predType: should be a vector of the same length\n ", "as there are columns in covData. Possible values are:\n", " 'c' (continuous variable),", "\n 'f' (fixed effects factor; only traitMPT), or", "\n 'r' (random effects factor; only traitMPT)." ) } } } else { predType <- NULL } predType } # mean-centered variables as default (does not matter much for correlational analyses) covDataCenter <- function(covData, predType) { if (!is.null(covData)) { for (i in 1:ncol(covData)) { if (predType[i] == "c") { scaled <- scale(covData[, i], center = TRUE, scale = FALSE) # centering of continuous variables if (any(scaled != covData[, i])) { covData[, i] <- c(scaled) } } } } covData }
/R/covDataRead.R
no_license
mariusbarth/TreeBUGS
R
false
false
2,183
r
# read, check and mean-center covData covDataRead <- function(covData, N, binaryToNumeric = FALSE) { if (!is.null(covData)) { # list / path to file if (is.character(covData)) { covData <- read.csv(covData, header = T, sep = ",", strip.white = T) } try(covData <- as.data.frame(covData)) if (nrow(covData) != N) { stop("Number of individuals in 'data' and 'covData' differs!") } if (is.null(colnames(covData))) { stop("Check names of covariates in covData!") } if (binaryToNumeric) { for (k in 1:ncol(covData)) { if (inherits(covData[, k], c("factor", "ordered", "character"))) { if (length(unique(covData[, k])) == 2) { covData[, k] <- as.numeric(as.factor(covData[, k])) } } } } } covData } # get default values for predType predTypeDefault <- function(covData, predType = NULL) { if (!is.null(covData)) { # default: continuous / random covariates if (missing(predType) || is.null(predType)) { cov.class <- sapply(covData, class) predType <- ifelse(cov.class %in% c("character", "factor"), "f", ifelse(cov.class %in% c("integer", "numeric", "matrix"), "c", "") ) } for (cc in 1:length(predType)) { if (!all(predType %in% c("f", "c", "r"))) { stop( "Check definition of predType: should be a vector of the same length\n ", "as there are columns in covData. Possible values are:\n", " 'c' (continuous variable),", "\n 'f' (fixed effects factor; only traitMPT), or", "\n 'r' (random effects factor; only traitMPT)." ) } } } else { predType <- NULL } predType } # mean-centered variables as default (does not matter much for correlational analyses) covDataCenter <- function(covData, predType) { if (!is.null(covData)) { for (i in 1:ncol(covData)) { if (predType[i] == "c") { scaled <- scale(covData[, i], center = TRUE, scale = FALSE) # centering of continuous variables if (any(scaled != covData[, i])) { covData[, i] <- c(scaled) } } } } covData }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/create_fishery_subset.R \name{create_fishery_data} \alias{create_fishery_data} \title{Create fishery data from an Altantis scenario} \usage{ create_fishery_data(dat, time, species, boxes) } \arguments{ \item{dat}{A \code{data.frame} of numbers at age containing the following columns: \itemize{ \item{species} \item{agecl} \item{polygon} \item{layer} \item{time} \item{atoutput} } The \code{data.frame} is generated from either \code{\link{create_survey}} or \code{\link{create_fishery_data}}.} \item{time}{The timing of the survey (a vector indicating specific time steps, which are typically associated with years) i.e., seq(365,10*3650,365) would be an annual survey for 10 years} \item{species}{The species to sample in the survey (a vector)} \item{boxes}{A vector of box numbers} \item{effic}{Efficiency for each species: a matrix with nrow=length(species). Columns: species: the species name. Matches names in species efficiency:} \item{selex}{Selectivity at age. A dataframe defining selectivity at age for each species. Columns are: species: the species name. Matches names in species agecl: the age class that selectivity represents selex: the proportion selected relative to fully selected age classes (between 0 and 1)} } \value{ A \code{matrix} in the same format as the \code{dat} summed over layers, with \code{NA} in the layer column. } \description{ Create fishery data for an Atlantis scenario, where observation error will be added and certain boxes and species will be sampled. todo: add more information here. } \details{ The function works for a vector of defined species and only returns information in boxes that are sampled. } \author{ Poseidon }
/man/create_fishery_data.Rd
no_license
hmorzaria/atlantisom
R
false
true
1,785
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/create_fishery_subset.R \name{create_fishery_data} \alias{create_fishery_data} \title{Create fishery data from an Altantis scenario} \usage{ create_fishery_data(dat, time, species, boxes) } \arguments{ \item{dat}{A \code{data.frame} of numbers at age containing the following columns: \itemize{ \item{species} \item{agecl} \item{polygon} \item{layer} \item{time} \item{atoutput} } The \code{data.frame} is generated from either \code{\link{create_survey}} or \code{\link{create_fishery_data}}.} \item{time}{The timing of the survey (a vector indicating specific time steps, which are typically associated with years) i.e., seq(365,10*3650,365) would be an annual survey for 10 years} \item{species}{The species to sample in the survey (a vector)} \item{boxes}{A vector of box numbers} \item{effic}{Efficiency for each species: a matrix with nrow=length(species). Columns: species: the species name. Matches names in species efficiency:} \item{selex}{Selectivity at age. A dataframe defining selectivity at age for each species. Columns are: species: the species name. Matches names in species agecl: the age class that selectivity represents selex: the proportion selected relative to fully selected age classes (between 0 and 1)} } \value{ A \code{matrix} in the same format as the \code{dat} summed over layers, with \code{NA} in the layer column. } \description{ Create fishery data for an Atlantis scenario, where observation error will be added and certain boxes and species will be sampled. todo: add more information here. } \details{ The function works for a vector of defined species and only returns information in boxes that are sampled. } \author{ Poseidon }
numberOfPeople <- 5 numberOfTest <- 3 ar <- sample(1:100, numberOfPeople * numberOfTest, replace = TRUE) mat <- matrix(ar,numberOfPeople, numberOfTest) names <- c("a", "b", "c", "d", "e") tests <- c("A", "B", "C") dimnames(mat) <- list(names, tests) mat
/Q024.R
no_license
haradakunihiko/investigation_of_r
R
false
false
254
r
numberOfPeople <- 5 numberOfTest <- 3 ar <- sample(1:100, numberOfPeople * numberOfTest, replace = TRUE) mat <- matrix(ar,numberOfPeople, numberOfTest) names <- c("a", "b", "c", "d", "e") tests <- c("A", "B", "C") dimnames(mat) <- list(names, tests) mat
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/AllGenerics.R \name{nonSplicedReads} \alias{nonSplicedReads} \alias{nonSplicedReads<-} \alias{nonSplicedReads,FraseRDataSet-method} \alias{nonSplicedReads<-,FraseRDataSet-method} \title{Getter/setter for the non spliced reads object within the FraseRDataSet object} \usage{ nonSplicedReads(object) nonSplicedReads(object) <- value \S4method{nonSplicedReads}{FraseRDataSet}(object) \S4method{nonSplicedReads}{FraseRDataSet}(object) <- value } \arguments{ \item{object}{A FraseRDataSet object.} \item{value}{A RangedSummarizedExperiment object containing the counts for the non spliced reads overlapping splice sites in the fds.} } \value{ RangedSummarizedExperiment (getter) or FraseRDataSet (setter) } \description{ Getter/setter for the non spliced reads object within the FraseRDataSet object }
/man/nonSplicedReads.Rd
permissive
mumichae/FraseR
R
false
true
882
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/AllGenerics.R \name{nonSplicedReads} \alias{nonSplicedReads} \alias{nonSplicedReads<-} \alias{nonSplicedReads,FraseRDataSet-method} \alias{nonSplicedReads<-,FraseRDataSet-method} \title{Getter/setter for the non spliced reads object within the FraseRDataSet object} \usage{ nonSplicedReads(object) nonSplicedReads(object) <- value \S4method{nonSplicedReads}{FraseRDataSet}(object) \S4method{nonSplicedReads}{FraseRDataSet}(object) <- value } \arguments{ \item{object}{A FraseRDataSet object.} \item{value}{A RangedSummarizedExperiment object containing the counts for the non spliced reads overlapping splice sites in the fds.} } \value{ RangedSummarizedExperiment (getter) or FraseRDataSet (setter) } \description{ Getter/setter for the non spliced reads object within the FraseRDataSet object }
library(testthat) context("Tests for roll()") test_that("roll works as expected", { fair_die <- device(sides = 1:6, prob = rep(1/6, 6)) set.seed(123) fair_50rolls <- roll(fair_die, times = 50) expect_length(fair_50rolls),50)) expect_equal(fair50$sides, c(1,2)) expect_equal(fair50$prob, c(0.5,0.5)) expect_equal(fair50$total, 50) })
/roller/test/testthat/test-roll.R
no_license
yhed10/stat133-fall-2018
R
false
false
353
r
library(testthat) context("Tests for roll()") test_that("roll works as expected", { fair_die <- device(sides = 1:6, prob = rep(1/6, 6)) set.seed(123) fair_50rolls <- roll(fair_die, times = 50) expect_length(fair_50rolls),50)) expect_equal(fair50$sides, c(1,2)) expect_equal(fair50$prob, c(0.5,0.5)) expect_equal(fair50$total, 50) })
library(shiny) html <- "<a href='/14_linearlog_with_vnames_clean2/'> Click this link for analysis</a>" dir_loc <- "/media/baremetal/shared/alok/shiny-apps/" shinyServer(function(input, output) { output$contents <- renderTable({ uds <- input$udataset if((!is.null(uds)) && uds != 'None'){ file.copy(paste(dir_loc, uds, sep=''), "/media/baremetal/shared/alok/shiny-apps/datafile.csv", overwrite=T) } # input$file1 will be NULL initially. After the user selects and uploads a # file, it will be a data frame with 'name', 'size', 'type', and 'datapath' # columns. The 'datapath' column will contain the local filenames where the # data can be found. inFile <- input$file1 if (is.null(inFile)) return(NULL) df <- read.csv(inFile$datapath, header=input$header, sep=input$sep, quote=input$quote) file_loc <- paste(dir_loc, inFile$name, sep='') dnames <<- c(dnames, inFile$name) print("file1 present") write.csv(df, "/media/baremetal/shared/alok/shiny-apps/datafile.csv", row.names=F) file.copy("/media/baremetal/shared/alok/shiny-apps/datafile.csv", file_loc,overwrite=T) head(df,20) }) output$link <- renderText({ if(is.null(input$file1)) return(NULL) else return(HTML(html)) }) output$dui_si <- renderUI({ selectInput("udataset", "Choose previously uploaded datase", choices = dnames) }) })
/09_upload_save_list_rendergui/server.R
no_license
aloknayak29/shiny-server
R
false
false
1,415
r
library(shiny) html <- "<a href='/14_linearlog_with_vnames_clean2/'> Click this link for analysis</a>" dir_loc <- "/media/baremetal/shared/alok/shiny-apps/" shinyServer(function(input, output) { output$contents <- renderTable({ uds <- input$udataset if((!is.null(uds)) && uds != 'None'){ file.copy(paste(dir_loc, uds, sep=''), "/media/baremetal/shared/alok/shiny-apps/datafile.csv", overwrite=T) } # input$file1 will be NULL initially. After the user selects and uploads a # file, it will be a data frame with 'name', 'size', 'type', and 'datapath' # columns. The 'datapath' column will contain the local filenames where the # data can be found. inFile <- input$file1 if (is.null(inFile)) return(NULL) df <- read.csv(inFile$datapath, header=input$header, sep=input$sep, quote=input$quote) file_loc <- paste(dir_loc, inFile$name, sep='') dnames <<- c(dnames, inFile$name) print("file1 present") write.csv(df, "/media/baremetal/shared/alok/shiny-apps/datafile.csv", row.names=F) file.copy("/media/baremetal/shared/alok/shiny-apps/datafile.csv", file_loc,overwrite=T) head(df,20) }) output$link <- renderText({ if(is.null(input$file1)) return(NULL) else return(HTML(html)) }) output$dui_si <- renderUI({ selectInput("udataset", "Choose previously uploaded datase", choices = dnames) }) })
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/get.veg.module.R \name{get_veg_module} \alias{get_veg_module} \title{Load/extract + match species module} \usage{ get_veg_module( input_veg, outfolder, start_date, end_date, dbparms, new_site, host, machine_host, overwrite ) } \arguments{ \item{input_veg}{list, this is a sublist of settings$run$inputs that has info about source, id, metadata of the requested IC file} \item{outfolder}{path to where the processed files will be written} \item{start_date}{date in "YYYY-MM-DD" format, in case of source==FIA it's the settings$run$start.date, otherwise start_date of the IC file in DB} \item{end_date}{date in "YYYY-MM-DD" format, in case of source==FIA it's the settings$run$end.date, otherwise end_date of the IC file in DB} \item{dbparms}{list, settings$database info reqired for opening a connection to DB} \item{new_site}{data frame, id/lat/lon/name info about the site} \item{host}{list, host info as in settings$host, host$name forced to be "localhost" upstream} \item{machine_host}{local machine hostname, e.g. "pecan2.bu.edu"} \item{overwrite}{logical flag for convert.input} } \description{ Load/extract + match species module } \author{ Istem Fer }
/modules/data.land/man/get_veg_module.Rd
permissive
ashiklom/pecan
R
false
true
1,263
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/get.veg.module.R \name{get_veg_module} \alias{get_veg_module} \title{Load/extract + match species module} \usage{ get_veg_module( input_veg, outfolder, start_date, end_date, dbparms, new_site, host, machine_host, overwrite ) } \arguments{ \item{input_veg}{list, this is a sublist of settings$run$inputs that has info about source, id, metadata of the requested IC file} \item{outfolder}{path to where the processed files will be written} \item{start_date}{date in "YYYY-MM-DD" format, in case of source==FIA it's the settings$run$start.date, otherwise start_date of the IC file in DB} \item{end_date}{date in "YYYY-MM-DD" format, in case of source==FIA it's the settings$run$end.date, otherwise end_date of the IC file in DB} \item{dbparms}{list, settings$database info reqired for opening a connection to DB} \item{new_site}{data frame, id/lat/lon/name info about the site} \item{host}{list, host info as in settings$host, host$name forced to be "localhost" upstream} \item{machine_host}{local machine hostname, e.g. "pecan2.bu.edu"} \item{overwrite}{logical flag for convert.input} } \description{ Load/extract + match species module } \author{ Istem Fer }
# See the readme.md file for more details about how this script works #------------------------------------------------------------------------- ### 0. Download the data, unzip the folders my.url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip" download.file(url=my.url, destfile="getdata-projectfiles-UCI HAR Dataset.zip") unzip("getdata-projectfiles-UCI HAR Dataset.zip") #?unzip # {utils} Extract of List Zip Archives ``` #------------------------------------------------------------------------- ### 1. Merge the training and the test sets to create one data set # Read the meta data activity_labels <- read.table("./UCI HAR Dataset/activity_labels.txt") features <- read.table("./UCI HAR Dataset/features.txt") features <- features[ , 2] # Read the training data X_train <- read.table("./UCI HAR Dataset/train/X_train.txt") y_train <- read.table("./UCI HAR Dataset/train/y_train.txt") subject_train <- read.table("./UCI HAR Dataset/train/subject_train.txt") # Read the testing data X_test <- read.table("./UCI HAR Dataset/test/X_test.txt", header = FALSE) y_test <- read.table("./UCI HAR Dataset/test/y_test.txt", header = FALSE) subject_test <- read.table("./UCI HAR Dataset/test/subject_test.txt") # Name the X training and test data names(X_train) <- features names(X_test) <- features # Combine the training data [subject, y, X] train <- cbind(subject_train, y_train, X_train) #### Combine the test data [subject, y, X] test <- cbind(subject_test, y_test, X_test) #### Combine the training and test data complete <- rbind(train, test) #### Remove the composite parts of the training and test data rm(subject_train, y_train, X_train) rm(subject_test, y_test, X_test) rm(train, test) #------------------------------------------------------------------------- ### 2. Extract the mean and standard deviation for each measurement # Create Indices for mean variables index.mean <- grep("-mean()", features, fixed = TRUE) # Create Indices for std variables index.std <- grep("-std()", features) # Create a single index vector for the mean and std variables index.mean.std <- sort(c(index.mean, index.std)) # Add 2 to the index vector, to allow for the subject and y variables index.df.subset <- c(1, 2, index.mean.std + 2) # Subset the complete dataframe columns, using the index vector df.subset <- complete[ , index.df.subset] # Remove the composite index vectors, and the complete data set rm(index.mean, index.std) rm(index.mean.std, index.df.subset) rm(complete) ### 3. Use descriptive activity names to name the activities in the data set names(df.subset)[1] <- "Subject" names(df.subset)[2] <- "Activity.Code" # Create a new variable containing the activity label df.subset$Activity <- activity_labels[df.subset$Activity.Code, 2] # Keep the subject number, activity name, and measurements df.subset <- df.subset[, c(1, 69, 3:68)] #### #------------------------------------------------------------------------- ### 4. Appropriately label the data set with descriptive activity names # Create a vector of descriptive activity names names <- names(df.subset) # Remove the brackets from each variable name names <- gsub(pattern = "[()]", replacement = "", x = names) # Replace the dash with a dot names <- gsub(pattern = "-", replacement = ".", x = names) # Acceleration Signal - Body names <- gsub(pattern = "tBodyAcc", replacement = "time.domain.body.acceleration.signals", x = names) # Acceleration Signal - Gravity names <- gsub(pattern = "tGravityAcc", replacement = "time.domain.gravity.acceleration.signals", x = names) # Gyroscope Signal names <- gsub(pattern = "tBodyGyro", replacement = "time.domain.body.gyroscope.signals", x = names) # Acceleration Signal - Body names <- gsub(pattern = "fBodyAcc", replacement = "frequency.domain.body.acceleration.signals", x = names) names <- gsub(pattern = "fBodyBodyAcc", replacement = "frequency.domain.body.acceleration.signals", x = names) # Gyroscope Signal names <- gsub(pattern = "fBodyGyro", replacement = "frequency.domain.body.gyroscope.signals", x = names) names <- gsub(pattern = "fBodyBodyGyro", replacement = "frequency.domain.body.gyroscope.signals", x = names) # Jerk names <- gsub(pattern = "signalsJerk", replacement = "Jerk.signals", x = names) # Magnitude names <- gsub(pattern = "Mag", replacement = ".magnitude", x = names) # Assign the names to the dataframe names(df.subset) <- names #------------------------------------------------------------------------- ### 5. Create a second, independent tidy data set with the average of each variable for each activity and each subject # Assign the names to the dataframe df.tidydata <- aggregate(df.subset[, 3:68], by = list("Subject" = df.subset$Subject, "Activity" = df.subset$Activity), FUN = "mean") # Export the data to a .txt file in the working directory write.table(df.tidydata, file = "./UCI HAR Dataset/Tidy Data Export.txt", row.names = FALSE)
/run_analysis.R
no_license
psychonomics/Getting-and-Cleaning-Data-Project
R
false
false
5,262
r
# See the readme.md file for more details about how this script works #------------------------------------------------------------------------- ### 0. Download the data, unzip the folders my.url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip" download.file(url=my.url, destfile="getdata-projectfiles-UCI HAR Dataset.zip") unzip("getdata-projectfiles-UCI HAR Dataset.zip") #?unzip # {utils} Extract of List Zip Archives ``` #------------------------------------------------------------------------- ### 1. Merge the training and the test sets to create one data set # Read the meta data activity_labels <- read.table("./UCI HAR Dataset/activity_labels.txt") features <- read.table("./UCI HAR Dataset/features.txt") features <- features[ , 2] # Read the training data X_train <- read.table("./UCI HAR Dataset/train/X_train.txt") y_train <- read.table("./UCI HAR Dataset/train/y_train.txt") subject_train <- read.table("./UCI HAR Dataset/train/subject_train.txt") # Read the testing data X_test <- read.table("./UCI HAR Dataset/test/X_test.txt", header = FALSE) y_test <- read.table("./UCI HAR Dataset/test/y_test.txt", header = FALSE) subject_test <- read.table("./UCI HAR Dataset/test/subject_test.txt") # Name the X training and test data names(X_train) <- features names(X_test) <- features # Combine the training data [subject, y, X] train <- cbind(subject_train, y_train, X_train) #### Combine the test data [subject, y, X] test <- cbind(subject_test, y_test, X_test) #### Combine the training and test data complete <- rbind(train, test) #### Remove the composite parts of the training and test data rm(subject_train, y_train, X_train) rm(subject_test, y_test, X_test) rm(train, test) #------------------------------------------------------------------------- ### 2. Extract the mean and standard deviation for each measurement # Create Indices for mean variables index.mean <- grep("-mean()", features, fixed = TRUE) # Create Indices for std variables index.std <- grep("-std()", features) # Create a single index vector for the mean and std variables index.mean.std <- sort(c(index.mean, index.std)) # Add 2 to the index vector, to allow for the subject and y variables index.df.subset <- c(1, 2, index.mean.std + 2) # Subset the complete dataframe columns, using the index vector df.subset <- complete[ , index.df.subset] # Remove the composite index vectors, and the complete data set rm(index.mean, index.std) rm(index.mean.std, index.df.subset) rm(complete) ### 3. Use descriptive activity names to name the activities in the data set names(df.subset)[1] <- "Subject" names(df.subset)[2] <- "Activity.Code" # Create a new variable containing the activity label df.subset$Activity <- activity_labels[df.subset$Activity.Code, 2] # Keep the subject number, activity name, and measurements df.subset <- df.subset[, c(1, 69, 3:68)] #### #------------------------------------------------------------------------- ### 4. Appropriately label the data set with descriptive activity names # Create a vector of descriptive activity names names <- names(df.subset) # Remove the brackets from each variable name names <- gsub(pattern = "[()]", replacement = "", x = names) # Replace the dash with a dot names <- gsub(pattern = "-", replacement = ".", x = names) # Acceleration Signal - Body names <- gsub(pattern = "tBodyAcc", replacement = "time.domain.body.acceleration.signals", x = names) # Acceleration Signal - Gravity names <- gsub(pattern = "tGravityAcc", replacement = "time.domain.gravity.acceleration.signals", x = names) # Gyroscope Signal names <- gsub(pattern = "tBodyGyro", replacement = "time.domain.body.gyroscope.signals", x = names) # Acceleration Signal - Body names <- gsub(pattern = "fBodyAcc", replacement = "frequency.domain.body.acceleration.signals", x = names) names <- gsub(pattern = "fBodyBodyAcc", replacement = "frequency.domain.body.acceleration.signals", x = names) # Gyroscope Signal names <- gsub(pattern = "fBodyGyro", replacement = "frequency.domain.body.gyroscope.signals", x = names) names <- gsub(pattern = "fBodyBodyGyro", replacement = "frequency.domain.body.gyroscope.signals", x = names) # Jerk names <- gsub(pattern = "signalsJerk", replacement = "Jerk.signals", x = names) # Magnitude names <- gsub(pattern = "Mag", replacement = ".magnitude", x = names) # Assign the names to the dataframe names(df.subset) <- names #------------------------------------------------------------------------- ### 5. Create a second, independent tidy data set with the average of each variable for each activity and each subject # Assign the names to the dataframe df.tidydata <- aggregate(df.subset[, 3:68], by = list("Subject" = df.subset$Subject, "Activity" = df.subset$Activity), FUN = "mean") # Export the data to a .txt file in the working directory write.table(df.tidydata, file = "./UCI HAR Dataset/Tidy Data Export.txt", row.names = FALSE)
# R function to read descriptor x, a, and xa file # Sept 22, 2011 moggces@gmail.com readXAfile<-function(base, newV=FALSE, sep=""){ if (newV == FALSE) { xfile <- paste(base, '.x', sep="") ds<-read.table(xfile, fill=TRUE, row.names=NULL, header=FALSE,comment.char="",quote="", sep=sep) Ncpd<-as.numeric(as.character(ds[1, "V1"])) Ndes<-as.numeric(as.character(ds[1, "V2"])) dsheader<-as.vector(unlist(ds[2, 1:Ndes])) name <- as.vector(unlist(ds[3:(3+Ncpd-1), 2])) coff <- NULL dsdata<-as.numeric(as.matrix(ds[3:(3+Ncpd-1), -c(1,2)])) dim(dsdata) <- c(length(name), length(dsheader)) colnames(dsdata)<-dsheader rownames(dsdata)<-name if (nrow(ds) == Ncpd + 4) # this x file has coefficients { coff <- ds[ c(nrow(ds)-1, nrow(ds)), 1:Ndes] names(coff) = dsheader } afile <- paste(base, '.a', sep="") if (file.exists(afile)) { dsa<-read.table(afile,row.names=NULL,col.names=c("name","activity"), header=F,comment.char="",quote="", sep=sep) list(x=as.data.frame(dsdata), desName=dsheader, a=dsa$activity,cpdName=as.vector(dsa$name), coff=coff) } else { list(x=as.data.frame(dsdata), desName=dsheader, cpdName=name) } } else { xafile <- paste(base, '.xa', sep="") ds<-read.table(xafile, fill=TRUE, row.names=NULL, header=FALSE,comment.char="",quote="", sep=sep) Ncpd<-as.numeric(as.character(ds[1, "V1"])) Ndes<-as.numeric(as.character(ds[1, "V2"])) dsheader<-as.vector(unlist(ds[2, 1:Ndes])) name <- as.vector(unlist(ds[3:(3+Ncpd-1), 2])) dsdata<-as.numeric(as.matrix(ds[3:(3+Ncpd-1), -c(1,2)])) dim(dsdata) <- c(length(name), length(dsheader)) colnames(dsdata)<-dsheader rownames(dsdata)<-name activity <- as.character(unlist(ds[3:(3+Ncpd-1), 3])) if (nrow(ds) == Ncpd + 4) # this xa file has coefficients { coff <- ds[ c(nrow(ds)-1, nrow(ds)), 1:Ndes] names(coff) = dsheader } list(x=as.data.frame(dsdata), desName=dsheader, a=activity,cpdName=name, coff=coff) } }
/source/customized.R
no_license
moggces/ActivityProfilingGUI
R
false
false
1,980
r
# R function to read descriptor x, a, and xa file # Sept 22, 2011 moggces@gmail.com readXAfile<-function(base, newV=FALSE, sep=""){ if (newV == FALSE) { xfile <- paste(base, '.x', sep="") ds<-read.table(xfile, fill=TRUE, row.names=NULL, header=FALSE,comment.char="",quote="", sep=sep) Ncpd<-as.numeric(as.character(ds[1, "V1"])) Ndes<-as.numeric(as.character(ds[1, "V2"])) dsheader<-as.vector(unlist(ds[2, 1:Ndes])) name <- as.vector(unlist(ds[3:(3+Ncpd-1), 2])) coff <- NULL dsdata<-as.numeric(as.matrix(ds[3:(3+Ncpd-1), -c(1,2)])) dim(dsdata) <- c(length(name), length(dsheader)) colnames(dsdata)<-dsheader rownames(dsdata)<-name if (nrow(ds) == Ncpd + 4) # this x file has coefficients { coff <- ds[ c(nrow(ds)-1, nrow(ds)), 1:Ndes] names(coff) = dsheader } afile <- paste(base, '.a', sep="") if (file.exists(afile)) { dsa<-read.table(afile,row.names=NULL,col.names=c("name","activity"), header=F,comment.char="",quote="", sep=sep) list(x=as.data.frame(dsdata), desName=dsheader, a=dsa$activity,cpdName=as.vector(dsa$name), coff=coff) } else { list(x=as.data.frame(dsdata), desName=dsheader, cpdName=name) } } else { xafile <- paste(base, '.xa', sep="") ds<-read.table(xafile, fill=TRUE, row.names=NULL, header=FALSE,comment.char="",quote="", sep=sep) Ncpd<-as.numeric(as.character(ds[1, "V1"])) Ndes<-as.numeric(as.character(ds[1, "V2"])) dsheader<-as.vector(unlist(ds[2, 1:Ndes])) name <- as.vector(unlist(ds[3:(3+Ncpd-1), 2])) dsdata<-as.numeric(as.matrix(ds[3:(3+Ncpd-1), -c(1,2)])) dim(dsdata) <- c(length(name), length(dsheader)) colnames(dsdata)<-dsheader rownames(dsdata)<-name activity <- as.character(unlist(ds[3:(3+Ncpd-1), 3])) if (nrow(ds) == Ncpd + 4) # this xa file has coefficients { coff <- ds[ c(nrow(ds)-1, nrow(ds)), 1:Ndes] names(coff) = dsheader } list(x=as.data.frame(dsdata), desName=dsheader, a=activity,cpdName=name, coff=coff) } }
## optional arg KERNEL_FILE to estimate kernel from instead of full image args <- commandArgs(TRUE) str(args) if (!(length(args) %in% c(3, 4))) { cat("\nUsage: Rscript denoise-image <FILE> <LAMBDA> <TYPE=grayscale|color> [<KERNEL_FILE>=<FILE>] \n\n") q() } METHOD <- Sys.getenv("METHOD", "direct") # "iterative" PATCH <- if (METHOD == "direct") 150 else 300 suppressMessages(library(rip.recover)) INFILE <- args[1] LAMBDA <- as.numeric(args[2]) TYPE <- args[3] KERNEL_FILE <- if (length(args) == 4) args[4] else NULL ETA <- 0.005 ZAP.DIGITS <- 2 GAMMA <- 2.2 NITER.RL <- 25 rho.iid <- list(along = 0, across = 0) # IID parameters rho.ar <- list(along = 0.3, across = 0.6) # AR parameters y <- rip.import(INFILE, type = TYPE) / 255 y[] <- y^(1/GAMMA) yy <- if (is.null(KERNEL_FILE)) y else (rip.import(KERNEL_FILE, type = TYPE) / 255)^(1/GAMMA) k <- symmetric.blur(rip.desaturate(yy), c(5, 5), g.method = "autoreg", eta.sq = ETA^2, corr.grad = TRUE, trim = (METHOD == "direct"), zap.digits = ZAP.DIGITS) OUTDIR <- sprintf("autoreg-kernel-%g", ETA) if (!dir.exists(OUTDIR)) dir.create(OUTDIR) OUTDIR <- file.path(OUTDIR, sprintf("%s-%g", METHOD, LAMBDA)) if (!dir.exists(OUTDIR)) dir.create(OUTDIR) fout <- function(s) file.path(OUTDIR, sprintf("%s-%s-%s.jpg", INFILE, TYPE, s)) fexists <- function(label) { message(fout(label)) file.exists(fout(label)) } fexport <- function(x, label = deparse(substitute(x))) { rip.export(round(255 * x^GAMMA), file = fout(label)) } ## fexport(y, label = "000-original") if (!fexists("rl")) { rl <- rip.deconvlucy(y, k, niter = NITER.RL) fexport(rl) } if (!fexists("giid")) { giid <- rip.denoise(y, alpha = 2, lambda = LAMBDA, rho = rho.iid, method = METHOD, k = k, patch = PATCH, overlap = 20, verbose = TRUE) fexport(giid) } if (!fexists("gar")) { gar <- rip.denoise(y, alpha = 2, lambda = LAMBDA, rho = rho.ar, method = METHOD, k = k, patch = PATCH, overlap = 20, verbose = TRUE) fexport(gar) } if (!fexists("sar")) { sar <- rip.denoise(y, alpha = 0.8, lambda = LAMBDA, rho = rho.ar, method = METHOD, k = k, patch = PATCH, overlap = 20, verbose = TRUE) fexport(sar) } if (!fexists("siid")) { siid <- rip.denoise(y, alpha = 0.8, lambda = LAMBDA, rho = rho.iid, method = METHOD, k = k, patch = PATCH, overlap = 20, verbose = TRUE) fexport(siid) }
/denoising-real/real-blurred/denoise-image.R
no_license
deepayan/rip.testing
R
false
false
2,549
r
## optional arg KERNEL_FILE to estimate kernel from instead of full image args <- commandArgs(TRUE) str(args) if (!(length(args) %in% c(3, 4))) { cat("\nUsage: Rscript denoise-image <FILE> <LAMBDA> <TYPE=grayscale|color> [<KERNEL_FILE>=<FILE>] \n\n") q() } METHOD <- Sys.getenv("METHOD", "direct") # "iterative" PATCH <- if (METHOD == "direct") 150 else 300 suppressMessages(library(rip.recover)) INFILE <- args[1] LAMBDA <- as.numeric(args[2]) TYPE <- args[3] KERNEL_FILE <- if (length(args) == 4) args[4] else NULL ETA <- 0.005 ZAP.DIGITS <- 2 GAMMA <- 2.2 NITER.RL <- 25 rho.iid <- list(along = 0, across = 0) # IID parameters rho.ar <- list(along = 0.3, across = 0.6) # AR parameters y <- rip.import(INFILE, type = TYPE) / 255 y[] <- y^(1/GAMMA) yy <- if (is.null(KERNEL_FILE)) y else (rip.import(KERNEL_FILE, type = TYPE) / 255)^(1/GAMMA) k <- symmetric.blur(rip.desaturate(yy), c(5, 5), g.method = "autoreg", eta.sq = ETA^2, corr.grad = TRUE, trim = (METHOD == "direct"), zap.digits = ZAP.DIGITS) OUTDIR <- sprintf("autoreg-kernel-%g", ETA) if (!dir.exists(OUTDIR)) dir.create(OUTDIR) OUTDIR <- file.path(OUTDIR, sprintf("%s-%g", METHOD, LAMBDA)) if (!dir.exists(OUTDIR)) dir.create(OUTDIR) fout <- function(s) file.path(OUTDIR, sprintf("%s-%s-%s.jpg", INFILE, TYPE, s)) fexists <- function(label) { message(fout(label)) file.exists(fout(label)) } fexport <- function(x, label = deparse(substitute(x))) { rip.export(round(255 * x^GAMMA), file = fout(label)) } ## fexport(y, label = "000-original") if (!fexists("rl")) { rl <- rip.deconvlucy(y, k, niter = NITER.RL) fexport(rl) } if (!fexists("giid")) { giid <- rip.denoise(y, alpha = 2, lambda = LAMBDA, rho = rho.iid, method = METHOD, k = k, patch = PATCH, overlap = 20, verbose = TRUE) fexport(giid) } if (!fexists("gar")) { gar <- rip.denoise(y, alpha = 2, lambda = LAMBDA, rho = rho.ar, method = METHOD, k = k, patch = PATCH, overlap = 20, verbose = TRUE) fexport(gar) } if (!fexists("sar")) { sar <- rip.denoise(y, alpha = 0.8, lambda = LAMBDA, rho = rho.ar, method = METHOD, k = k, patch = PATCH, overlap = 20, verbose = TRUE) fexport(sar) } if (!fexists("siid")) { siid <- rip.denoise(y, alpha = 0.8, lambda = LAMBDA, rho = rho.iid, method = METHOD, k = k, patch = PATCH, overlap = 20, verbose = TRUE) fexport(siid) }
########################################################################### ###### Script for summarising MySeq reads into per well composition ##### ########################################################################### ### Clear the workspace rm(list=ls()) ### load the libraries you need library(reshape2) library(ggplot2) library(akima) library(plyr) library(dplyr) library(ggplot2bdc) library(ggExtra) library(grid) library(gridExtra) library(RColorBrewer) library(scales) library(lme4) ######################################################################################## ################### stacked bar charts of well composition ########################### ######################################################################################## ### read in the data my.reads<-read.csv(file=paste("DATA/OPM2/0.97_similarity/metaBEAT-processed.tsv",sep=""), sep="\t", stringsAsFactors=FALSE, header=TRUE) ### read in the sample by plate data my.plates<-read.csv(file="DATA/OPM2/OPM2_PlateDATA.csv", sep=",", stringsAsFactors=FALSE, header=TRUE) ### trim the plate data to the necessary columns my.plates<-my.plates[,c(1,3,4)] ### greedy regex to split the sample string by the underscore and leave us with a column for nest and a column for indentifier my.plates<-cbind(my.plates, do.call(rbind, strsplit(as.character(my.plates$sample), "_|_.*_"))) ### trim the plate data to the necessary columns (i.e. drop the identifier column) my.plates<-my.plates[,c(1:3,5)] ### name the columns colnames(my.plates)<-c("sample","plate","plate.numeric","nest") ### set a minimum occurance for an assignment to be trusted occurance=1 ### subset the data frame to drop all assignments occuring fewer than the frequency specified above my.reads.subs<-subset(my.reads, subset = rowSums(my.reads[2:ncol(my.reads)] > 0) >= occurance) ### transpose the read data my.reads.trans<-recast(my.reads.subs, variable~OTU_ID) colnames(my.reads.trans)[1]<-"sample" my.reads.trans$sample<-as.character(my.reads.trans$sample) ### use match to add the plate data to the read data my.reads.trans$plate<-my.plates$plate[match(my.reads.trans$sample,my.plates$sample)] my.reads.trans$plate.numeric<-my.plates$plate.numeric[match(my.reads.trans$sample,my.plates$sample)] ### use match to add the nest data to the read data my.reads.trans$nest<-my.plates$nest[match(my.reads.trans$sample,my.plates$sample)] my.reads.trans$nest.numeric<-my.plates$nest.numeric[match(my.reads.trans$sample,my.plates$sample)] ### Pull out the aissignments for further manual examination ###my.assignments<-colnames(my.reads.trans) ###write.csv(my.assignments, file="DATA/assignments_out.csv") ### Pull in the assigned colours as a .csv ###Taxa.col<-read.csv("DATA/assignments_in.csv", stringsAsFactors=FALSE) my.reads.trans$type<-ifelse(my.reads.trans$nest=="neg1","Negative", ifelse(my.reads.trans$nest=="neg2","Negative", ifelse(my.reads.trans$nest=="neg1R","Negative", ifelse(my.reads.trans$nest=="neg2R","Negative", ifelse(my.reads.trans$nest=="DNApositive","DNApositive", ifelse(my.reads.trans$nest=="DNApositiveR","DNApositive", ifelse(my.reads.trans$nest=="PCRpositiveR","PCRpositive", ifelse(my.reads.trans$nest=="PCRpositive","PCRpositive","Sample")))))))) ### make sample and plate factors for faceting and ordering my.reads.trans$sample<-as.factor(my.reads.trans$sample) my.reads.trans$plate<-as.factor(my.reads.trans$plate) ### total all the reads my.reads.trans$total<-rowSums(my.reads.trans[c(2:5)]) ### calculate the percentage of reads in each well that are OPM my.reads.trans$Percent.Thau<-(my.reads.trans$Thaumetopoea_processionea/my.reads.trans$total)*100 ### set a minimum read coverage for accepting an assignment cutoff<-5 ### replace all the species that are less than the cutoff of the total reads with zero my.reads.trans[,2:5][my.reads.trans[,2:5] < cutoff] <- 0 ### subset the data frame to drop all coloumns containing only zeros my.reads.trans.nozero<-cbind(my.reads.trans[,c(1, 6:12)], subset(my.reads.trans[,c(2:5)], select = colSums(my.reads.trans[,c(2:5)]) !=0)) ### remove the total variable my.reads.trans.drop<-my.reads.trans.nozero[,c(1:6,8:12)] ### to remove the positive and negative samples for OTU counting in a barplot, I need to iteratively subset them out using the nest column. ### I've not found a more elegant way of doing this yet my.reads.trans.samps.only<-my.reads.trans.drop[my.reads.trans$nest != "Negative", ] my.reads.trans.samps.only<-my.reads.trans.samps.only[my.reads.trans.samps.only$nest != "DNApositive", ] my.reads.trans.samps.only<-my.reads.trans.samps.only[my.reads.trans.samps.only$nest != "PCRpositive", ] ### order the type for ordering the samples in the plot my.reads.trans.drop$type <- factor(my.reads.trans.drop$type, levels=c("Sample","DNApositive","PCRpositive","Negative")) ### reorder my.reads.melt by both percentage Thau and plate #my.reads.melt<-my.reads.melt[order(my.reads.melt$Plate.numeric,-my.reads.melt$Percent.Thau),] my.reads.trans.drop<-my.reads.trans.drop[order(-my.reads.trans.drop$Percent.Thau,my.reads.trans.drop$Carcelia_iliaca,my.reads.trans.drop$type),] my.reads.trans.drop <- my.reads.trans.drop[order(my.reads.trans.drop$type, -xtfrm(my.reads.trans.drop$Percent.Thau), my.reads.trans.drop$Carcelia_iliaca), ] ### make a panel factor after setting the decreasing OPM order my.reads.trans.drop$panel<-as.factor(c(rep(1,times=384), rep(2,times=384), rep(3,times=383))) ### melt the data into long format my.reads.melt<-melt(my.reads.trans.drop, id.vars=c("sample","plate", "plate.numeric","Percent.Thau","nest","type","panel")) colnames(my.reads.melt)<-c("Sample","Plate","Plate.numeric","Percent.Thau","Nest","Type","Panel","Species","Reads") ### create an ID variable for the order my.reads.melt$level.order<-seq(1, length(my.reads.melt$Percent.Thau),1) ### see what species remain after filtering levels(my.reads.melt$Species) ### order the species for plotting the legend my.reads.melt$Species <- factor(my.reads.melt$Species, levels=c("Thaumetopoea_processionea", "Carcelia_iliaca", "Astatotilapia_calliptera", "Triops", "unassigned")) ### order the species for plotting the legend my.colours <- c("#FFFF33", "#377EB8", "#E41A1C", "#99A367", "#000000") ### order the samples from highest OPM % to lowest and in increasing plate number my.reads.melt$Sample <- reorder(my.reads.melt$Sample, my.reads.melt$level.order) ### order the plates from lowest to highest #my.reads.melt$Plate <- reorder(my.reads.melt$Plate, my.reads.melt$Plate.numeric) ### create a nice colour scale using colourRampPalette and RColorBrewer - eventually vivid.colours2<-brewer.pal(length(unique(my.reads.melt$Species)),"Set1") ### count the columns greater than zero and write to a new data frame hit.hist<-data.frame(OTUs = rowSums(my.reads.trans.samps.only[c(2,8:11)] != 0), Type=my.reads.trans.samps.only$type) ######################################################################################## ################### make a plot of % composition by PCR plate ###################################### ######################################################################################## svg(file="Diagrams/well_composition_reads_by_plate_OPM2_0.97.svg", width=10, height=8) ### set up the ggplot well.composition<-ggplot(data=my.reads.melt, aes(x=Sample, y=Reads, fill=Species)) + ### make it a stacked barplot and set the bars to be the same height geom_bar(position="fill", stat="identity") + ### wrap the plot by plate facet_wrap(~Panel, scales="free_x", nrow=3, ncol=1) + ### give it a percentage scale scale_y_continuous(labels = percent_format()) + ### set the colours # scale_fill_manual(name="Species", # values = jet.colors3(length(unique(my.reads.melt$Species)))) + scale_fill_manual(name="Species", labels=c("Thaumetopoea processionea", "Carcelia iliaca", "Astatotilapia calliptera", "Triops cancriformis", "unassigned"), values = my.colours) + ### add a sensible y axis label labs(y = "% of reads per well", x="PCR wells") + ### rotate the x-axis labels and resize the text for the svg theme(axis.text.x = element_text(size = rel(0.2), angle =90), axis.ticks.x=element_blank(), axis.text.y = element_text(size = rel(1.1), colour="black"), axis.title.y = element_text(size = rel(1), vjust=2), axis.title.x = element_text(size = rel(1), vjust=-1.3), legend.text = element_text(size = rel(1), face="italic"), legend.title = element_text(size = rel(1)), strip.text.x = element_blank(), strip.background=element_blank(), legend.position = "bottom", legend.background = element_blank(), panel.background = element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.line = element_blank(), panel.border = element_rect(colour = "black", fill=NA, size=0.1), plot.margin=unit(c(0.1, 0.1, 1, 1), "lines")) ### Make a ggplot object of our OTU counts c<- ggplot(hit.hist, aes(factor(reorder(OTUs, -OTUs)))) + geom_bar(alpha=0.5, fill="#3399ff") + labs(y = "Frequency", x="OTUs per well") + coord_flip() + ### rotate the x-axis labels and resize the text for the svg theme(axis.text = element_text(size = rel(1.1), colour="black"), axis.title.y = element_text(size = rel(1), vjust=2), axis.title.x = element_text(size = rel(1), vjust=-1), panel.grid.minor = element_blank(), axis.line = element_blank(), plot.margin=unit(c(0.1, 0.1, 1, 1), "lines")) grid.arrange(well.composition, c, heights=c(3/4, 1/4), ncol=1) dev.off() ######################################################################################## # work out percentage parasitism with carcelia # ######################################################################################## ### divide the number of well containing Carcelia by all well that are not +ve or -ve to get percentage percent.cacelia<-(colSums(my.reads.trans[3] > 0)/1151)*100 ######################################################################################## # Test for difference in OTU number across MyFi and MyTaq # ######################################################################################## ### read in the read stats for the enzyme comparison Reads<-read.csv("DATA/OPM2/reads_stats_tag_comparison.csv") Reads$ratio<-Reads$cluster_above_thres/Reads$queries ### add a variable for enzyme Reads$enzyme<-ifelse(grepl("R", Reads$sample), "MyTaq", "MyFi") ### remove the R suffix for MyTaq Reads$sample<-gsub("R","", Reads$sample) ### drop everything other than the grouping variable and the clusters retained Reads.subs<-subset(Reads, select=c("sample", "ratio", "enzyme")) ggplot(Reads.subs, aes(x= enzyme, y=ratio)) + geom_boxplot() t.test(Reads.subs$ratio~Reads.subs$enzyme)
/well_composition_OPM2.R
no_license
James-Kitson/NMB_stats
R
false
false
11,810
r
########################################################################### ###### Script for summarising MySeq reads into per well composition ##### ########################################################################### ### Clear the workspace rm(list=ls()) ### load the libraries you need library(reshape2) library(ggplot2) library(akima) library(plyr) library(dplyr) library(ggplot2bdc) library(ggExtra) library(grid) library(gridExtra) library(RColorBrewer) library(scales) library(lme4) ######################################################################################## ################### stacked bar charts of well composition ########################### ######################################################################################## ### read in the data my.reads<-read.csv(file=paste("DATA/OPM2/0.97_similarity/metaBEAT-processed.tsv",sep=""), sep="\t", stringsAsFactors=FALSE, header=TRUE) ### read in the sample by plate data my.plates<-read.csv(file="DATA/OPM2/OPM2_PlateDATA.csv", sep=",", stringsAsFactors=FALSE, header=TRUE) ### trim the plate data to the necessary columns my.plates<-my.plates[,c(1,3,4)] ### greedy regex to split the sample string by the underscore and leave us with a column for nest and a column for indentifier my.plates<-cbind(my.plates, do.call(rbind, strsplit(as.character(my.plates$sample), "_|_.*_"))) ### trim the plate data to the necessary columns (i.e. drop the identifier column) my.plates<-my.plates[,c(1:3,5)] ### name the columns colnames(my.plates)<-c("sample","plate","plate.numeric","nest") ### set a minimum occurance for an assignment to be trusted occurance=1 ### subset the data frame to drop all assignments occuring fewer than the frequency specified above my.reads.subs<-subset(my.reads, subset = rowSums(my.reads[2:ncol(my.reads)] > 0) >= occurance) ### transpose the read data my.reads.trans<-recast(my.reads.subs, variable~OTU_ID) colnames(my.reads.trans)[1]<-"sample" my.reads.trans$sample<-as.character(my.reads.trans$sample) ### use match to add the plate data to the read data my.reads.trans$plate<-my.plates$plate[match(my.reads.trans$sample,my.plates$sample)] my.reads.trans$plate.numeric<-my.plates$plate.numeric[match(my.reads.trans$sample,my.plates$sample)] ### use match to add the nest data to the read data my.reads.trans$nest<-my.plates$nest[match(my.reads.trans$sample,my.plates$sample)] my.reads.trans$nest.numeric<-my.plates$nest.numeric[match(my.reads.trans$sample,my.plates$sample)] ### Pull out the aissignments for further manual examination ###my.assignments<-colnames(my.reads.trans) ###write.csv(my.assignments, file="DATA/assignments_out.csv") ### Pull in the assigned colours as a .csv ###Taxa.col<-read.csv("DATA/assignments_in.csv", stringsAsFactors=FALSE) my.reads.trans$type<-ifelse(my.reads.trans$nest=="neg1","Negative", ifelse(my.reads.trans$nest=="neg2","Negative", ifelse(my.reads.trans$nest=="neg1R","Negative", ifelse(my.reads.trans$nest=="neg2R","Negative", ifelse(my.reads.trans$nest=="DNApositive","DNApositive", ifelse(my.reads.trans$nest=="DNApositiveR","DNApositive", ifelse(my.reads.trans$nest=="PCRpositiveR","PCRpositive", ifelse(my.reads.trans$nest=="PCRpositive","PCRpositive","Sample")))))))) ### make sample and plate factors for faceting and ordering my.reads.trans$sample<-as.factor(my.reads.trans$sample) my.reads.trans$plate<-as.factor(my.reads.trans$plate) ### total all the reads my.reads.trans$total<-rowSums(my.reads.trans[c(2:5)]) ### calculate the percentage of reads in each well that are OPM my.reads.trans$Percent.Thau<-(my.reads.trans$Thaumetopoea_processionea/my.reads.trans$total)*100 ### set a minimum read coverage for accepting an assignment cutoff<-5 ### replace all the species that are less than the cutoff of the total reads with zero my.reads.trans[,2:5][my.reads.trans[,2:5] < cutoff] <- 0 ### subset the data frame to drop all coloumns containing only zeros my.reads.trans.nozero<-cbind(my.reads.trans[,c(1, 6:12)], subset(my.reads.trans[,c(2:5)], select = colSums(my.reads.trans[,c(2:5)]) !=0)) ### remove the total variable my.reads.trans.drop<-my.reads.trans.nozero[,c(1:6,8:12)] ### to remove the positive and negative samples for OTU counting in a barplot, I need to iteratively subset them out using the nest column. ### I've not found a more elegant way of doing this yet my.reads.trans.samps.only<-my.reads.trans.drop[my.reads.trans$nest != "Negative", ] my.reads.trans.samps.only<-my.reads.trans.samps.only[my.reads.trans.samps.only$nest != "DNApositive", ] my.reads.trans.samps.only<-my.reads.trans.samps.only[my.reads.trans.samps.only$nest != "PCRpositive", ] ### order the type for ordering the samples in the plot my.reads.trans.drop$type <- factor(my.reads.trans.drop$type, levels=c("Sample","DNApositive","PCRpositive","Negative")) ### reorder my.reads.melt by both percentage Thau and plate #my.reads.melt<-my.reads.melt[order(my.reads.melt$Plate.numeric,-my.reads.melt$Percent.Thau),] my.reads.trans.drop<-my.reads.trans.drop[order(-my.reads.trans.drop$Percent.Thau,my.reads.trans.drop$Carcelia_iliaca,my.reads.trans.drop$type),] my.reads.trans.drop <- my.reads.trans.drop[order(my.reads.trans.drop$type, -xtfrm(my.reads.trans.drop$Percent.Thau), my.reads.trans.drop$Carcelia_iliaca), ] ### make a panel factor after setting the decreasing OPM order my.reads.trans.drop$panel<-as.factor(c(rep(1,times=384), rep(2,times=384), rep(3,times=383))) ### melt the data into long format my.reads.melt<-melt(my.reads.trans.drop, id.vars=c("sample","plate", "plate.numeric","Percent.Thau","nest","type","panel")) colnames(my.reads.melt)<-c("Sample","Plate","Plate.numeric","Percent.Thau","Nest","Type","Panel","Species","Reads") ### create an ID variable for the order my.reads.melt$level.order<-seq(1, length(my.reads.melt$Percent.Thau),1) ### see what species remain after filtering levels(my.reads.melt$Species) ### order the species for plotting the legend my.reads.melt$Species <- factor(my.reads.melt$Species, levels=c("Thaumetopoea_processionea", "Carcelia_iliaca", "Astatotilapia_calliptera", "Triops", "unassigned")) ### order the species for plotting the legend my.colours <- c("#FFFF33", "#377EB8", "#E41A1C", "#99A367", "#000000") ### order the samples from highest OPM % to lowest and in increasing plate number my.reads.melt$Sample <- reorder(my.reads.melt$Sample, my.reads.melt$level.order) ### order the plates from lowest to highest #my.reads.melt$Plate <- reorder(my.reads.melt$Plate, my.reads.melt$Plate.numeric) ### create a nice colour scale using colourRampPalette and RColorBrewer - eventually vivid.colours2<-brewer.pal(length(unique(my.reads.melt$Species)),"Set1") ### count the columns greater than zero and write to a new data frame hit.hist<-data.frame(OTUs = rowSums(my.reads.trans.samps.only[c(2,8:11)] != 0), Type=my.reads.trans.samps.only$type) ######################################################################################## ################### make a plot of % composition by PCR plate ###################################### ######################################################################################## svg(file="Diagrams/well_composition_reads_by_plate_OPM2_0.97.svg", width=10, height=8) ### set up the ggplot well.composition<-ggplot(data=my.reads.melt, aes(x=Sample, y=Reads, fill=Species)) + ### make it a stacked barplot and set the bars to be the same height geom_bar(position="fill", stat="identity") + ### wrap the plot by plate facet_wrap(~Panel, scales="free_x", nrow=3, ncol=1) + ### give it a percentage scale scale_y_continuous(labels = percent_format()) + ### set the colours # scale_fill_manual(name="Species", # values = jet.colors3(length(unique(my.reads.melt$Species)))) + scale_fill_manual(name="Species", labels=c("Thaumetopoea processionea", "Carcelia iliaca", "Astatotilapia calliptera", "Triops cancriformis", "unassigned"), values = my.colours) + ### add a sensible y axis label labs(y = "% of reads per well", x="PCR wells") + ### rotate the x-axis labels and resize the text for the svg theme(axis.text.x = element_text(size = rel(0.2), angle =90), axis.ticks.x=element_blank(), axis.text.y = element_text(size = rel(1.1), colour="black"), axis.title.y = element_text(size = rel(1), vjust=2), axis.title.x = element_text(size = rel(1), vjust=-1.3), legend.text = element_text(size = rel(1), face="italic"), legend.title = element_text(size = rel(1)), strip.text.x = element_blank(), strip.background=element_blank(), legend.position = "bottom", legend.background = element_blank(), panel.background = element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.line = element_blank(), panel.border = element_rect(colour = "black", fill=NA, size=0.1), plot.margin=unit(c(0.1, 0.1, 1, 1), "lines")) ### Make a ggplot object of our OTU counts c<- ggplot(hit.hist, aes(factor(reorder(OTUs, -OTUs)))) + geom_bar(alpha=0.5, fill="#3399ff") + labs(y = "Frequency", x="OTUs per well") + coord_flip() + ### rotate the x-axis labels and resize the text for the svg theme(axis.text = element_text(size = rel(1.1), colour="black"), axis.title.y = element_text(size = rel(1), vjust=2), axis.title.x = element_text(size = rel(1), vjust=-1), panel.grid.minor = element_blank(), axis.line = element_blank(), plot.margin=unit(c(0.1, 0.1, 1, 1), "lines")) grid.arrange(well.composition, c, heights=c(3/4, 1/4), ncol=1) dev.off() ######################################################################################## # work out percentage parasitism with carcelia # ######################################################################################## ### divide the number of well containing Carcelia by all well that are not +ve or -ve to get percentage percent.cacelia<-(colSums(my.reads.trans[3] > 0)/1151)*100 ######################################################################################## # Test for difference in OTU number across MyFi and MyTaq # ######################################################################################## ### read in the read stats for the enzyme comparison Reads<-read.csv("DATA/OPM2/reads_stats_tag_comparison.csv") Reads$ratio<-Reads$cluster_above_thres/Reads$queries ### add a variable for enzyme Reads$enzyme<-ifelse(grepl("R", Reads$sample), "MyTaq", "MyFi") ### remove the R suffix for MyTaq Reads$sample<-gsub("R","", Reads$sample) ### drop everything other than the grouping variable and the clusters retained Reads.subs<-subset(Reads, select=c("sample", "ratio", "enzyme")) ggplot(Reads.subs, aes(x= enzyme, y=ratio)) + geom_boxplot() t.test(Reads.subs$ratio~Reads.subs$enzyme)
# ##Merging all the data from Tschabi etal 2008 # Sudan<-read.csv("TchabiSudanSavana.csv", header = TRUE,stringsAsFactors = FALSE) # NGuinea<-read.csv("TchabiNortherGuinea.csv", header = TRUE,stringsAsFactors = FALSE) # SGuinea<-read.csv("TchabiSouthernGuinea.csv", header = TRUE,stringsAsFactors = FALSE) # TchabiAllData<-Reduce(function(...)merge(...,by="AMF_species",all=TRUE), # list(Sudan,NGuinea,SGuinea)) # # #Changing the NA for 0 in the abundance data # TchabiAllData[,-1][(is.na(as.matrix(TchabiAllData[,-1])))]<-0 # #Replacing empty blanks in the names by an underscore # TchabiAllData[,1]<-sub(" ","_",TchabiAllData[,1]) # # #Checking the names according to the taxonomic reference (AMF_Taxonomy) # # TchabiAllData[!TchabiAllData[,1]%in%AMF_Taxonomy[,1],1] # # #Correcting typos # which(TchabiAllDataCopy$AMF_species=="Scutellospora_callospora") # which(TchabiAllDataCopy$AMF_species=="Glomus_hyderabadense") # which(TchabiAllDataCopy$AMF_species=="Glomus_brohulti") # which(TchabiAllDataCopy$AMF_species=="Glomus_pachycauilis") # # TchabiAllDataCopy[38,1]<-"Glomus_hyderabadensis" # TchabiAllDataCopy[c(20,25),1]<-"Glomus_brohultii" # TchabiAllDataCopy[46,1]<-"Sclerocystis_pachycaulis" # TchabiAllDataCopy[59,1]<-"Scutellospora_calospora" # #Checking the remaining names that do not match correct taxonomy, deleting entries as "sp" #TchabiAllDataCopy[!TchabiAllDataCopy[,1]%in%AMF_Taxonomy[,1],1] # # TchabiAllDataCopy<-TchabiAllDataCopy[-c(13,51,64,65),] # write.csv(TchabiAllData,"TchabiAllData_etal2008.csv")#This is the dataframe that would need to be read to re-do the analysis ################################TCHABI DATA ANALYSIS####################################### library(ggplot2) library(tidyr) library(dplyr) TchabiAllDataCopy<-read.csv("TchabiAllData_etal2008.csv",header = TRUE,stringsAsFactors = FALSE) #Correcting names according to AMF_Taxonomy using secondTrial TchabiAllDataCopy<-secondTrial(AMF_Taxonomy,TchabiAllDataCopy) # #Checking the remaining names that do not match correct taxonomy, deleting entries as "sp" TchabiAllDataCopy[!TchabiAllDataCopy[,1]%in%AMF_Taxonomy[,1],1] # # TchabiAllDataCopy<-TchabiAllDataCopy[-c(13,51,64,65),] names(TchabiAllDataCopy)[2:9]<-sub(".x$","_Sudan",names(TchabiAllDataCopy)[2:9]) names(TchabiAllDataCopy)[10:17]<- sub(".y$","_NGuinea",names(TchabiAllDataCopy)[10:17]) names(TchabiAllDataCopy)[18:25]<-c("NaturalForest1_SGuinea","NaturalForest2_SGuinea","NaturalForest3_SGuinea", "Yam_SGuinea","MixedCropping_SGuinea","Peanuts_SGuinea","Cotton_SGuinea", "Fallow_SGuinea") #Changing the format of the dataframe for the ggplot of spore sizes TchabiData<-cbind(stack(TchabiAllDataCopy[,-1]), rep(TchabiAllDataCopy[,1],24)) names(TchabiData)<-c("abundance","site","good.names") head(TchabiData) rm(TchabiAllDataCopy) #Adding the spore trait information TchabiData<-left_join(TchabiData,AMF_All_Copy[,c(1,13)]) # #Checking which names have not spore data assign in AMF_All_Copy # AMF_All_Copy[which(!is.na(match(AMF_All_Copy[,1],TchabiData[,3]))),c(1,13)] #Organizing the column factor so it follows the pattern less to more disturbance TchabiData$site<-factor(TchabiData$site, levels=c( levels(TchabiData$site)[grep("NaturalForest",levels(TchabiData$site))], levels(TchabiData$site)[grep("Mixed",levels(TchabiData$site))], levels(TchabiData$site)[grep("Yam",levels(TchabiData$site))], levels(TchabiData$site)[grep("Peanuts",levels(TchabiData$site))], levels(TchabiData$site)[grep("Fallow",levels(TchabiData$site))], levels(TchabiData$site)[grep("Cotton",levels(TchabiData$site))])) #Creating the column Site_Type TchabiData$Site_Type<-TchabiData$site TchabiData$Site_Type<-sub("\\d_Sudan","Sudan",TchabiData$Site_Type) TchabiData$Site_Type<-sub("\\d_NGuinea","NGuinea",TchabiData$Site_Type) TchabiData$Site_Type<-sub("\\d_SGuinea","SGuinea",TchabiData$Site_Type) TchabiData$Site_Type<- factor(TchabiData$Site_Type, levels=c("NaturalForestSudan","NaturalForestNGuinea","NaturalForestSGuinea", "MixedCropping_Sudan","MixedCropping_NGuinea","MixedCropping_SGuinea", "Yam_Sudan","Yam_NGuinea","Yam_SGuinea", "Peanuts_Sudan","Peanuts_NGuinea","Peanuts_SGuinea", "Fallow_Sudan","Fallow_NGuinea","Fallow_SGuinea", "Cotton_Sudan","Cotton_NGuinea","Cotton_SGuinea" )) #Doing ggplot to have a view of all sites ggplot(filter(TchabiData,abundance!=0), aes(x=site,y=SporeArea,size=abundance,col=good.names))+ geom_point(alpha=0.5)+scale_y_log10()+ theme(legend.position = "none")+ theme(axis.text.x = element_text(size = 5,angle=45))+ ggtitle("Tchabi2008All") #Doing ggplot, this time grouping by the type of site #Creating the graphic TchabiData %>% group_by(Site_Type,good.names) %>% summarise(sp_frequency=sum(abundance==1)/length(abundance),SporeArea=mean(SporeArea)) %>% filter(sp_frequency>0) %>% ggplot(aes(x=Site_Type,y=SporeArea,size=sp_frequency,col=good.names))+ geom_point(alpha=0.5)+ scale_y_log10()+ theme(legend.position = "none")+ theme(axis.text.x = element_text(size = 7,angle=45))+ ggtitle("Tchabietal2008Frequency") #removing the file from the workspace rm(TchabiData)
/Scripts/Tschabi_etal_2008.R
no_license
aguilart/SporeSize
R
false
false
5,510
r
# ##Merging all the data from Tschabi etal 2008 # Sudan<-read.csv("TchabiSudanSavana.csv", header = TRUE,stringsAsFactors = FALSE) # NGuinea<-read.csv("TchabiNortherGuinea.csv", header = TRUE,stringsAsFactors = FALSE) # SGuinea<-read.csv("TchabiSouthernGuinea.csv", header = TRUE,stringsAsFactors = FALSE) # TchabiAllData<-Reduce(function(...)merge(...,by="AMF_species",all=TRUE), # list(Sudan,NGuinea,SGuinea)) # # #Changing the NA for 0 in the abundance data # TchabiAllData[,-1][(is.na(as.matrix(TchabiAllData[,-1])))]<-0 # #Replacing empty blanks in the names by an underscore # TchabiAllData[,1]<-sub(" ","_",TchabiAllData[,1]) # # #Checking the names according to the taxonomic reference (AMF_Taxonomy) # # TchabiAllData[!TchabiAllData[,1]%in%AMF_Taxonomy[,1],1] # # #Correcting typos # which(TchabiAllDataCopy$AMF_species=="Scutellospora_callospora") # which(TchabiAllDataCopy$AMF_species=="Glomus_hyderabadense") # which(TchabiAllDataCopy$AMF_species=="Glomus_brohulti") # which(TchabiAllDataCopy$AMF_species=="Glomus_pachycauilis") # # TchabiAllDataCopy[38,1]<-"Glomus_hyderabadensis" # TchabiAllDataCopy[c(20,25),1]<-"Glomus_brohultii" # TchabiAllDataCopy[46,1]<-"Sclerocystis_pachycaulis" # TchabiAllDataCopy[59,1]<-"Scutellospora_calospora" # #Checking the remaining names that do not match correct taxonomy, deleting entries as "sp" #TchabiAllDataCopy[!TchabiAllDataCopy[,1]%in%AMF_Taxonomy[,1],1] # # TchabiAllDataCopy<-TchabiAllDataCopy[-c(13,51,64,65),] # write.csv(TchabiAllData,"TchabiAllData_etal2008.csv")#This is the dataframe that would need to be read to re-do the analysis ################################TCHABI DATA ANALYSIS####################################### library(ggplot2) library(tidyr) library(dplyr) TchabiAllDataCopy<-read.csv("TchabiAllData_etal2008.csv",header = TRUE,stringsAsFactors = FALSE) #Correcting names according to AMF_Taxonomy using secondTrial TchabiAllDataCopy<-secondTrial(AMF_Taxonomy,TchabiAllDataCopy) # #Checking the remaining names that do not match correct taxonomy, deleting entries as "sp" TchabiAllDataCopy[!TchabiAllDataCopy[,1]%in%AMF_Taxonomy[,1],1] # # TchabiAllDataCopy<-TchabiAllDataCopy[-c(13,51,64,65),] names(TchabiAllDataCopy)[2:9]<-sub(".x$","_Sudan",names(TchabiAllDataCopy)[2:9]) names(TchabiAllDataCopy)[10:17]<- sub(".y$","_NGuinea",names(TchabiAllDataCopy)[10:17]) names(TchabiAllDataCopy)[18:25]<-c("NaturalForest1_SGuinea","NaturalForest2_SGuinea","NaturalForest3_SGuinea", "Yam_SGuinea","MixedCropping_SGuinea","Peanuts_SGuinea","Cotton_SGuinea", "Fallow_SGuinea") #Changing the format of the dataframe for the ggplot of spore sizes TchabiData<-cbind(stack(TchabiAllDataCopy[,-1]), rep(TchabiAllDataCopy[,1],24)) names(TchabiData)<-c("abundance","site","good.names") head(TchabiData) rm(TchabiAllDataCopy) #Adding the spore trait information TchabiData<-left_join(TchabiData,AMF_All_Copy[,c(1,13)]) # #Checking which names have not spore data assign in AMF_All_Copy # AMF_All_Copy[which(!is.na(match(AMF_All_Copy[,1],TchabiData[,3]))),c(1,13)] #Organizing the column factor so it follows the pattern less to more disturbance TchabiData$site<-factor(TchabiData$site, levels=c( levels(TchabiData$site)[grep("NaturalForest",levels(TchabiData$site))], levels(TchabiData$site)[grep("Mixed",levels(TchabiData$site))], levels(TchabiData$site)[grep("Yam",levels(TchabiData$site))], levels(TchabiData$site)[grep("Peanuts",levels(TchabiData$site))], levels(TchabiData$site)[grep("Fallow",levels(TchabiData$site))], levels(TchabiData$site)[grep("Cotton",levels(TchabiData$site))])) #Creating the column Site_Type TchabiData$Site_Type<-TchabiData$site TchabiData$Site_Type<-sub("\\d_Sudan","Sudan",TchabiData$Site_Type) TchabiData$Site_Type<-sub("\\d_NGuinea","NGuinea",TchabiData$Site_Type) TchabiData$Site_Type<-sub("\\d_SGuinea","SGuinea",TchabiData$Site_Type) TchabiData$Site_Type<- factor(TchabiData$Site_Type, levels=c("NaturalForestSudan","NaturalForestNGuinea","NaturalForestSGuinea", "MixedCropping_Sudan","MixedCropping_NGuinea","MixedCropping_SGuinea", "Yam_Sudan","Yam_NGuinea","Yam_SGuinea", "Peanuts_Sudan","Peanuts_NGuinea","Peanuts_SGuinea", "Fallow_Sudan","Fallow_NGuinea","Fallow_SGuinea", "Cotton_Sudan","Cotton_NGuinea","Cotton_SGuinea" )) #Doing ggplot to have a view of all sites ggplot(filter(TchabiData,abundance!=0), aes(x=site,y=SporeArea,size=abundance,col=good.names))+ geom_point(alpha=0.5)+scale_y_log10()+ theme(legend.position = "none")+ theme(axis.text.x = element_text(size = 5,angle=45))+ ggtitle("Tchabi2008All") #Doing ggplot, this time grouping by the type of site #Creating the graphic TchabiData %>% group_by(Site_Type,good.names) %>% summarise(sp_frequency=sum(abundance==1)/length(abundance),SporeArea=mean(SporeArea)) %>% filter(sp_frequency>0) %>% ggplot(aes(x=Site_Type,y=SporeArea,size=sp_frequency,col=good.names))+ geom_point(alpha=0.5)+ scale_y_log10()+ theme(legend.position = "none")+ theme(axis.text.x = element_text(size = 7,angle=45))+ ggtitle("Tchabietal2008Frequency") #removing the file from the workspace rm(TchabiData)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/gaussian-elimination.R \name{echelon} \alias{echelon} \title{Echelon Form of a Matrix} \usage{ echelon(A, B, reduced = TRUE, ...) } \arguments{ \item{A}{coefficient matrix} \item{B}{right-hand side vector or matrix. If \code{B} is a matrix, the result gives solutions for each column as the right-hand side of the equations with coefficients in \code{A}.} \item{reduced}{logical; should reduced row echelon form be returned? If \code{FALSE} a non-reduced row echelon form will be returned} \item{...}{other arguments passed to \code{gaussianElimination}} } \value{ the reduced echelon form of \code{X}. } \description{ Returns the (reduced) row-echelon form of the matrix \code{A}, using \code{\link{gaussianElimination}}. } \details{ When the matrix \code{A} is square and non-singular, the reduced row-echelon result will be the identity matrix, while the row-echelon from will be an upper triagle matrix. Otherwise, the result will have some all-zero rows, and the rank of the matrix is the number of not all-zero rows. } \examples{ A <- matrix(c(2, 1, -1, -3, -1, 2, -2, 1, 2), 3, 3, byrow=TRUE) b <- c(8, -11, -3) echelon(A, b, verbose=TRUE, fractions=TRUE) # reduced row-echelon form echelon(A, b, reduced=FALSE, verbose=TRUE, fractions=TRUE) # row-echelon form A <- matrix(c(1,2,3,4,5,6,7,8,10), 3, 3) # a nonsingular matrix A echelon(A, reduced=FALSE) # the row-echelon form of A echelon(A) # the reduced row-echelon form of A b <- 1:3 echelon(A, b) # solving the matrix equation Ax = b echelon(A, diag(3)) # inverting A B <- matrix(1:9, 3, 3) # a singular matrix B echelon(B) echelon(B, reduced=FALSE) echelon(B, b) echelon(B, diag(3)) } \author{ John Fox }
/man/echelon.Rd
no_license
iMarcello/matlib
R
false
true
1,784
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/gaussian-elimination.R \name{echelon} \alias{echelon} \title{Echelon Form of a Matrix} \usage{ echelon(A, B, reduced = TRUE, ...) } \arguments{ \item{A}{coefficient matrix} \item{B}{right-hand side vector or matrix. If \code{B} is a matrix, the result gives solutions for each column as the right-hand side of the equations with coefficients in \code{A}.} \item{reduced}{logical; should reduced row echelon form be returned? If \code{FALSE} a non-reduced row echelon form will be returned} \item{...}{other arguments passed to \code{gaussianElimination}} } \value{ the reduced echelon form of \code{X}. } \description{ Returns the (reduced) row-echelon form of the matrix \code{A}, using \code{\link{gaussianElimination}}. } \details{ When the matrix \code{A} is square and non-singular, the reduced row-echelon result will be the identity matrix, while the row-echelon from will be an upper triagle matrix. Otherwise, the result will have some all-zero rows, and the rank of the matrix is the number of not all-zero rows. } \examples{ A <- matrix(c(2, 1, -1, -3, -1, 2, -2, 1, 2), 3, 3, byrow=TRUE) b <- c(8, -11, -3) echelon(A, b, verbose=TRUE, fractions=TRUE) # reduced row-echelon form echelon(A, b, reduced=FALSE, verbose=TRUE, fractions=TRUE) # row-echelon form A <- matrix(c(1,2,3,4,5,6,7,8,10), 3, 3) # a nonsingular matrix A echelon(A, reduced=FALSE) # the row-echelon form of A echelon(A) # the reduced row-echelon form of A b <- 1:3 echelon(A, b) # solving the matrix equation Ax = b echelon(A, diag(3)) # inverting A B <- matrix(1:9, 3, 3) # a singular matrix B echelon(B) echelon(B, reduced=FALSE) echelon(B, b) echelon(B, diag(3)) } \author{ John Fox }
library(readr) library(dplyr) library(tidyr) library(readxl) library(splitstackshape) Sys.setlocale("LC_ALL","English") xlsx <- "F:/Data/Menu and Orders.xlsx" df <- read_excel(xlsx, sheet = 'MENU') %>% pivot_longer(cols=c('Pizza','Pasta', 'House Plates'), names_to='Type', values_to = 'Name', values_drop_na = TRUE) %>% mutate('ID' = as.character(if_else(`Type`=='Pizza', `Pizza ID`, if_else(`Type`=='Pasta', `Pasta ID`, `House Plates ID`))), 'Price' = if_else(`Type`=='Pizza', `Pizza Price`, if_else(`Type`=='Pasta', `Pasta Price`, `House Plates Prices`))) %>% select('ID', 'Price') %>% merge(.,read_excel(xlsx, sheet = 'Order') %>% cSplit(., 'Order', '-', type.convert = FALSE) %>% pivot_longer(cols=starts_with('Order_'), names_to = c(".value",NA), names_pattern = '(Order)_(.*)', values_drop_na = TRUE), by.x='ID', by.y='Order') %>% mutate('Weekday' = strftime(`Order Date`,'%A'), 'Price' = if_else(`Weekday`=='Monday',0.5,1)*`Price`) finalA <- df %>% group_by(`Weekday`) %>% summarise('Price' = sum(`Price`), .groups ='drop') finalB <- df %>% group_by(`Customer Name`) %>% summarise('Count Items' = n(), .groups ='drop') %>% filter(`Count Items` == max(`Count Items`)) View(finalA) View(finalB)
/2021/2021W15/preppindataW15.R
no_license
ArseneXie/Preppindata
R
false
false
1,263
r
library(readr) library(dplyr) library(tidyr) library(readxl) library(splitstackshape) Sys.setlocale("LC_ALL","English") xlsx <- "F:/Data/Menu and Orders.xlsx" df <- read_excel(xlsx, sheet = 'MENU') %>% pivot_longer(cols=c('Pizza','Pasta', 'House Plates'), names_to='Type', values_to = 'Name', values_drop_na = TRUE) %>% mutate('ID' = as.character(if_else(`Type`=='Pizza', `Pizza ID`, if_else(`Type`=='Pasta', `Pasta ID`, `House Plates ID`))), 'Price' = if_else(`Type`=='Pizza', `Pizza Price`, if_else(`Type`=='Pasta', `Pasta Price`, `House Plates Prices`))) %>% select('ID', 'Price') %>% merge(.,read_excel(xlsx, sheet = 'Order') %>% cSplit(., 'Order', '-', type.convert = FALSE) %>% pivot_longer(cols=starts_with('Order_'), names_to = c(".value",NA), names_pattern = '(Order)_(.*)', values_drop_na = TRUE), by.x='ID', by.y='Order') %>% mutate('Weekday' = strftime(`Order Date`,'%A'), 'Price' = if_else(`Weekday`=='Monday',0.5,1)*`Price`) finalA <- df %>% group_by(`Weekday`) %>% summarise('Price' = sum(`Price`), .groups ='drop') finalB <- df %>% group_by(`Customer Name`) %>% summarise('Count Items' = n(), .groups ='drop') %>% filter(`Count Items` == max(`Count Items`)) View(finalA) View(finalB)
habitat.map = function ( ip=NULL, p ) { # dta contains the uninterpolated raw point data if (exists( "init.files", p)) loadfilelist( p$init.files ) if (exists( "libs", p)) loadlibraries( p$libs ) if (is.null(ip)) ip = 1:p$nruns outdir = file.path( p$project.outdir.root, p$spatial.domain, p$season, p$modtype, "maps" ) dir.create(path=outdir, recursive=T, showWarnings=F) pdat = habitat.db( DS=p$project.name, p=p ) dr = list() for ( ww in p$varstomodel ) { dr[[ww]] = quantile( pdat[,ww], probs=c(0.025, 0.975), na.rm=TRUE ) # use 95%CI } rm (pdat); gc() for ( iip in ip ) { yr = p$runs[iip,"yrs"] ww = p$runs[iip,"vars"] hd = habitat.db( DS="baseline", p=p ) hi = habitat.interpolate( p=p, yr=yr, vname=ww ) if (is.null(hi)) next() hd[,ww] = hi if ( ww %in% c("mr", "smr") ) { hd[,ww] = log10( hd[,ww] ) dr = log10(dr) annot = paste( capsword(p$project.name), " (log10) : ", capsword(ww), " (", yr, ")", sep="") } hd = hd[ filter.region.polygon( hd, region=c("4vwx", "5yz" ), planar=T, proj.type=p$internal.projection ) , ] annot = paste( capsword(p$project.name), " : ", capsword(ww), " (", yr, ")", sep="") datarange = seq( dr[[ww]][1], dr[[ww]][2], length.out=100 ) il = which( hd[,ww] < dr[[ww]][1] ) if ( length(il) > 0 ) hd[il,ww] = dr[[ww]][1] iu = which( hd[,ww] > dr[[ww]][2] ) if ( length(iu) > 0 ) hd[iu,ww] = dr[[ww]][2] colourmap = color.code( "blue.black", datarange ) outfn = paste( "maps", ww, yr, sep=".") map( xyz= hd[, c("plon", "plat", ww)], cfa.regions=F, depthcontours=T, pts=NULL, annot=annot, fn=outfn, loc=outdir, at=datarange, col.regions=colourmap, corners=p$corners ) } return("Completed mapping") }
/habitat/src/_Rfunctions/habitat.map.r
no_license
AMCOOK/ecomod
R
false
false
1,941
r
habitat.map = function ( ip=NULL, p ) { # dta contains the uninterpolated raw point data if (exists( "init.files", p)) loadfilelist( p$init.files ) if (exists( "libs", p)) loadlibraries( p$libs ) if (is.null(ip)) ip = 1:p$nruns outdir = file.path( p$project.outdir.root, p$spatial.domain, p$season, p$modtype, "maps" ) dir.create(path=outdir, recursive=T, showWarnings=F) pdat = habitat.db( DS=p$project.name, p=p ) dr = list() for ( ww in p$varstomodel ) { dr[[ww]] = quantile( pdat[,ww], probs=c(0.025, 0.975), na.rm=TRUE ) # use 95%CI } rm (pdat); gc() for ( iip in ip ) { yr = p$runs[iip,"yrs"] ww = p$runs[iip,"vars"] hd = habitat.db( DS="baseline", p=p ) hi = habitat.interpolate( p=p, yr=yr, vname=ww ) if (is.null(hi)) next() hd[,ww] = hi if ( ww %in% c("mr", "smr") ) { hd[,ww] = log10( hd[,ww] ) dr = log10(dr) annot = paste( capsword(p$project.name), " (log10) : ", capsword(ww), " (", yr, ")", sep="") } hd = hd[ filter.region.polygon( hd, region=c("4vwx", "5yz" ), planar=T, proj.type=p$internal.projection ) , ] annot = paste( capsword(p$project.name), " : ", capsword(ww), " (", yr, ")", sep="") datarange = seq( dr[[ww]][1], dr[[ww]][2], length.out=100 ) il = which( hd[,ww] < dr[[ww]][1] ) if ( length(il) > 0 ) hd[il,ww] = dr[[ww]][1] iu = which( hd[,ww] > dr[[ww]][2] ) if ( length(iu) > 0 ) hd[iu,ww] = dr[[ww]][2] colourmap = color.code( "blue.black", datarange ) outfn = paste( "maps", ww, yr, sep=".") map( xyz= hd[, c("plon", "plat", ww)], cfa.regions=F, depthcontours=T, pts=NULL, annot=annot, fn=outfn, loc=outdir, at=datarange, col.regions=colourmap, corners=p$corners ) } return("Completed mapping") }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/tissot_map.R \name{tissot_map} \alias{tissot_map} \alias{tissot_abline} \alias{tissot_get_proj} \title{Get last plot projection} \usage{ tissot_map(..., add = TRUE) tissot_abline(lambda, phi = NULL, ..., proj.in = NULL) tissot_get_proj() } \arguments{ \item{...}{graphical parameters for [lines()] if 'add = TRUE', or for [plot()] if 'add = FALSE'} \item{add}{logical, default 'TRUE' add to existing plot or create new} \item{lambda}{longitude at which to draw a vertical line} \item{phi}{latitude at which to draw a horizontal line} \item{proj.in}{projection for expert use} } \value{ 'tissot_map()' returns the internal world map data (projected if one is current) as a matrix 'tissot_abline()' called for its side effect of drawing on the plot 'tissot_get_proj()' returns the value of the current projection, or NULL } \description{ 'tissot_map()' will add the [world] coastline to any map. } \details{ 'tissot_get_proj()' When the indicatrix is plotted it registers its projection. This string can be obtained with this getter function. 'tissot_abline()' will draw a vertical and horizontal line at a give longitude latitude (where they intersect is the actual lon,lat location) }
/man/tissot_map.Rd
no_license
hypertidy/tissot
R
false
true
1,272
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/tissot_map.R \name{tissot_map} \alias{tissot_map} \alias{tissot_abline} \alias{tissot_get_proj} \title{Get last plot projection} \usage{ tissot_map(..., add = TRUE) tissot_abline(lambda, phi = NULL, ..., proj.in = NULL) tissot_get_proj() } \arguments{ \item{...}{graphical parameters for [lines()] if 'add = TRUE', or for [plot()] if 'add = FALSE'} \item{add}{logical, default 'TRUE' add to existing plot or create new} \item{lambda}{longitude at which to draw a vertical line} \item{phi}{latitude at which to draw a horizontal line} \item{proj.in}{projection for expert use} } \value{ 'tissot_map()' returns the internal world map data (projected if one is current) as a matrix 'tissot_abline()' called for its side effect of drawing on the plot 'tissot_get_proj()' returns the value of the current projection, or NULL } \description{ 'tissot_map()' will add the [world] coastline to any map. } \details{ 'tissot_get_proj()' When the indicatrix is plotted it registers its projection. This string can be obtained with this getter function. 'tissot_abline()' will draw a vertical and horizontal line at a give longitude latitude (where they intersect is the actual lon,lat location) }
context("saving and loading checklists") test_that("checklist can be saved and reloaded", { cl <- cl_create() expect_warning(cl_save(cl, "tmpcl.rds"), regexp = NA) rm(cl) expect_warning(cl_load("tmpcl.rds"), regexp = NA) file.remove("tmpcl.rds") }) test_that("file extensions are handled properly", { cl <- cl_create() expect_error(cl_save(cl, "cl.html")) expect_message(cl_save(cl, "tmpcl")) file.remove("tmpcl.rds") expect_warning(cl_save(cl, "tmpcl.rds"), regexp = NA) file.remove("tmpcl.rds") })
/tests/testthat/test_cl_save_and_load.R
permissive
geoffwlamb/checklist
R
false
false
524
r
context("saving and loading checklists") test_that("checklist can be saved and reloaded", { cl <- cl_create() expect_warning(cl_save(cl, "tmpcl.rds"), regexp = NA) rm(cl) expect_warning(cl_load("tmpcl.rds"), regexp = NA) file.remove("tmpcl.rds") }) test_that("file extensions are handled properly", { cl <- cl_create() expect_error(cl_save(cl, "cl.html")) expect_message(cl_save(cl, "tmpcl")) file.remove("tmpcl.rds") expect_warning(cl_save(cl, "tmpcl.rds"), regexp = NA) file.remove("tmpcl.rds") })
#' @include internal.R ConservationProblem-proto.R NULL #' Evaluate connectivity of solution #' #' Calculate the connectivity held within a solution to a conservation #' planning [problem()]. #' This summary statistic evaluates the connectivity of a solution using #' pair-wise connectivity values between combinations of planning units. #' It is specifically designed for symmetric connectivity data. #' #' @inheritParams add_connectivity_penalties #' @inheritParams eval_cost_summary #' #' @details #' This summary statistic is comparable to the `Connectivity_In` metric #' reported by the #' [*Marxan* software](https://marxansolutions.org) (Ball *et al.* 2009). #' It is calculated using the same equations used to penalize solutions #' with connectivity data (i.e., [add_connectivity_penalties()]). #' Specifically, it is calculated as the sum of the pair-wise connectivity #' values in the argument to `data`, weighted by the value of the planning #' units in the solution. #' #' @inheritSection eval_cost_summary Solution format #' @inheritSection add_connectivity_penalties Data format #' #' @return #' [tibble::tibble()] object describing the connectivity of the #' solution. #' It contains the following columns: #' #' \describe{ #' #' \item{summary}{`character` description of the summary statistic. #' The statistic associated with the `"overall"` value #' in this column is calculated using the entire solution #' (including all management zones if there are multiple zones). #' If multiple management zones are present, then summary statistics #' are also provided for each zone separately #' (indicated using zone names).} #' #' \item{connectivity}{`numeric` connectivity value. #' Greater values correspond to solutions associated with greater #' connectivity. #' Thus conservation planning exercises typically prefer solutions #' with greater values.} #' #' } #' #' @references #' Ball IR, Possingham HP, and Watts M (2009) *Marxan and relatives: #' Software for spatial conservation prioritisation* in Spatial conservation #' prioritisation: Quantitative methods and computational tools. Eds Moilanen #' A, Wilson KA, and Possingham HP. Oxford University Press, Oxford, UK. #' #' @seealso #' See [summaries] for an overview of all functions for summarizing solutions. #' Also, see [add_connectivity_penalties()] to penalize solutions with low #' connectivity. #' #' @family summaries #' #' @examples #' \dontrun{ #' # set seed for reproducibility #' set.seed(500) #' #' # load data #' data(sim_pu_raster, sim_pu_sf, sim_features, #' sim_pu_zones_sf, sim_features_zones) #' #' # build minimal conservation problem with raster data #' p1 <- problem(sim_pu_raster, sim_features) %>% #' add_min_set_objective() %>% #' add_relative_targets(0.1) %>% #' add_binary_decisions() %>% #' add_default_solver(verbose = FALSE) #' #' # solve the problem #' s1 <- solve(p1) #' #' # print solution #' print(s1) #' #' # plot solution #' plot(s1, main = "solution", axes = FALSE, box = FALSE) #' #' # simulate a connectivity matrix to describe the relative strength #' # of connectivity between different planning units #' # for brevity, we will use cost data here so that pairs #' # of adjacent planning units with higher cost values will have a #' # higher connectivity value #' # (but see ?connectivity_matrix for more information) #' cm1 <- connectivity_matrix(sim_pu_raster, sim_pu_raster) #' #' # calculate connectivity associated with the solution #' r1 <- eval_connectivity_summary(p1, s1, data = cm1) #' print(r1) #' #' #' # build multi-zone conservation problem with polygon (sf) data #' p2 <- problem(sim_pu_zones_sf, sim_features_zones, #' cost_column = c("cost_1", "cost_2", "cost_3")) %>% #' add_min_set_objective() %>% #' add_relative_targets(matrix(runif(15, 0.1, 0.2), nrow = 5, #' ncol = 3)) %>% #' add_binary_decisions() %>% #' add_default_solver(verbose = FALSE) #' #' # solve the problem #' s2 <- solve(p2) #' #' # print first six rows of the attribute table #' print(head(s2)) #' #' # create new column representing the zone id that each planning unit #' # was allocated to in the solution #' s2$solution <- category_vector( #' s2[, c("solution_1_zone_1", "solution_1_zone_2", "solution_1_zone_3")]) #' s2$solution <- factor(s2$solution) #' #' # plot solution #' plot(s2[, "solution"]) #' #' # simulate connectivity matrix #' # here, we will add a new column to sim_pu_zones_sf with #' # randomly simulated values and create a connectivity matrix #' # based on the average simulated values of adjacent planning units #' sim_pu_zones_sf$con <- runif(nrow(sim_pu_zones_sf)) #' cm2 <- connectivity_matrix(sim_pu_zones_sf, "con") #' #' # calculate connectivity associated with the solution #' r2 <- eval_connectivity_summary( #' p2, s2[, c("solution_1_zone_1", "solution_1_zone_2", "solution_1_zone_3")], #' data = cm2) #' print(r2) #' #' } #' @name eval_connectivity_summary #' #' @exportMethod eval_connectivity_summary #' #' @aliases eval_connectivity_summary,ConservationProblem,ANY,ANY,Matrix-method eval_connectivity_summary,ConservationProblem,ANY,ANY,matrix-method eval_connectivity_summary,ConservationProblem,ANY,ANY,dgCMatrix-method eval_connectivity_summary,ConservationProblem,ANY,ANY,data.frame-method eval_connectivity_summary,ConservationProblem,ANY,ANY,array-method NULL #' @export methods::setGeneric("eval_connectivity_summary", signature = methods::signature("x", "solution", "zones", "data"), function(x, solution, zones = diag(number_of_zones(x)), data) standardGeneric("eval_connectivity_summary")) #' @name eval_connectivity_summary #' @usage \S4method{eval_connectivity_summary}{ConservationProblem,ANY,ANY,matrix}(x, solution, zones, data) #' @rdname eval_connectivity_summary methods::setMethod("eval_connectivity_summary", methods::signature("ConservationProblem", "ANY", "ANY", "matrix"), function(x, solution, zones, data) { eval_connectivity_summary(x, solution, zones, methods::as(data, "dgCMatrix")) }) #' @name eval_connectivity_summary #' @usage \S4method{eval_connectivity_summary}{ConservationProblem,ANY,ANY,Matrix}(x, solution, zones, data) #' @rdname eval_connectivity_summary methods::setMethod("eval_connectivity_summary", methods::signature("ConservationProblem", "ANY", "ANY", "Matrix"), function(x, solution, zones, data) { eval_connectivity_summary(x, solution, zones, methods::as(data, "dgCMatrix")) }) #' @name eval_connectivity_summary #' @usage \S4method{eval_connectivity_summary}{ConservationProblem,ANY,ANY,data.frame}(x, solution, zones, data) #' @rdname eval_connectivity_summary methods::setMethod("eval_connectivity_summary", methods::signature("ConservationProblem", "ANY", "ANY", "data.frame"), function(x, solution, zones, data) { eval_connectivity_summary( x, solution, zones, marxan_boundary_data_to_matrix(x, data)) }) #' @name eval_connectivity_summary #' @usage \S4method{eval_connectivity_summary}{ConservationProblem,ANY,ANY,dgCMatrix}(x, solution, zones, data) #' @rdname eval_connectivity_summary methods::setMethod("eval_connectivity_summary", methods::signature("ConservationProblem", "ANY", "ANY", "dgCMatrix"), function(x, solution, zones, data) { # assert valid arguments assertthat::assert_that( inherits(x, "ConservationProblem"), inherits(zones, c("matrix", "Matrix")), nrow(zones) == ncol(zones), is.numeric(as.vector(zones)), all(is.finite(as.vector(zones))), is.numeric(data@x), ncol(data) == nrow(data), max(zones) <= 1, min(zones) >= -1, number_of_total_units(x) == ncol(data), number_of_zones(x) == ncol(zones), all(is.finite(data@x))) # coerce zones to matrix zones <- as.matrix(zones) indices <- x$planning_unit_indices() data <- data[indices, indices, drop = FALSE] # check for symmetry if (!Matrix::isSymmetric(data)) { warning( paste0( "argument to data does not contain symmetric connectivity values, ", "it it recommended to use eval_asym_connectivity_summary()" ), call. = FALSE, immediate. = TRUE ) } # convert zones & dgCMatrix data to list of sparse matrices m <- list() for (z1 in seq_len(ncol(zones))) { m[[z1]] <- list() for (z2 in seq_len(nrow(zones))) { m[[z1]][[z2]] <- data * zones[z1, z2] } } # calculate connectivity internal_eval_connectivity_summary( x, planning_unit_solution_status(x, solution), m, data) }) #' @name eval_connectivity_summary #' @usage \S4method{eval_connectivity_summary}{ConservationProblem,ANY,ANY,array}(x, solution, zones, data) #' @rdname eval_connectivity_summary methods::setMethod("eval_connectivity_summary", methods::signature("ConservationProblem", "ANY", "ANY", "array"), function(x, solution, zones, data) { # assert valid arguments assertthat::assert_that(inherits(x, "ConservationProblem"), is.null(zones), is.array(data), length(dim(data)) == 4, dim(data)[1] == number_of_total_units(x), dim(data)[2] == number_of_total_units(x), dim(data)[3] == number_of_zones(x), dim(data)[4] == number_of_zones(x), all(is.finite(data))) # generate indices for units that are planning units indices <- x$planning_unit_indices() # convert array to list of list of sparseMatrix objects m <- list() for (z1 in seq_len(dim(data)[3])) { m[[z1]] <- list() for (z2 in seq_len(dim(data)[4])) { m[[z1]][[z2]] <- methods::as(data[indices, indices, z1, z2], "dgCMatrix") } } # calculate connectivity internal_eval_connectivity_summary( x, planning_unit_solution_status(x, solution), m, NULL) }) internal_eval_connectivity_summary <- function( x, solution, zone_scaled_data, data) { # assert valid arguments assertthat::assert_that( inherits(x, "ConservationProblem"), is.matrix(solution), is.list(zone_scaled_data), inherits(data, c("dgCMatrix", "NULL"))) # manually coerce NA values in solution to 0 solution[!is.finite(solution)] <- 0 # calculate overall connectivity v <- rcpp_connectivity(zone_scaled_data, solution) # main calculations if (number_of_zones(x) == 1) { ## store result for single zone out <- tibble::tibble(summary = "overall", connectivity = v) } else { ## calculate connectivity for each zone separately zv <- vapply(seq_len(ncol(solution)), FUN.VALUE = numeric(1), function(z) { ## prepare data the z'th zone if (is.null(data)) { zd <- methods::as(zone_scaled_data[[z]][[z]], "dgCMatrix") } else { zd <- data } ## calculate connectivity rcpp_connectivity(list(list(zd)), solution[, z, drop = FALSE]) }) ## store results for multiple zones out <- tibble::tibble( summary = c("overall", zone_names(x)), connectivity = c(v, zv)) } # return result out }
/R/eval_connectivity_summary.R
no_license
bbest/prioritizr
R
false
false
11,069
r
#' @include internal.R ConservationProblem-proto.R NULL #' Evaluate connectivity of solution #' #' Calculate the connectivity held within a solution to a conservation #' planning [problem()]. #' This summary statistic evaluates the connectivity of a solution using #' pair-wise connectivity values between combinations of planning units. #' It is specifically designed for symmetric connectivity data. #' #' @inheritParams add_connectivity_penalties #' @inheritParams eval_cost_summary #' #' @details #' This summary statistic is comparable to the `Connectivity_In` metric #' reported by the #' [*Marxan* software](https://marxansolutions.org) (Ball *et al.* 2009). #' It is calculated using the same equations used to penalize solutions #' with connectivity data (i.e., [add_connectivity_penalties()]). #' Specifically, it is calculated as the sum of the pair-wise connectivity #' values in the argument to `data`, weighted by the value of the planning #' units in the solution. #' #' @inheritSection eval_cost_summary Solution format #' @inheritSection add_connectivity_penalties Data format #' #' @return #' [tibble::tibble()] object describing the connectivity of the #' solution. #' It contains the following columns: #' #' \describe{ #' #' \item{summary}{`character` description of the summary statistic. #' The statistic associated with the `"overall"` value #' in this column is calculated using the entire solution #' (including all management zones if there are multiple zones). #' If multiple management zones are present, then summary statistics #' are also provided for each zone separately #' (indicated using zone names).} #' #' \item{connectivity}{`numeric` connectivity value. #' Greater values correspond to solutions associated with greater #' connectivity. #' Thus conservation planning exercises typically prefer solutions #' with greater values.} #' #' } #' #' @references #' Ball IR, Possingham HP, and Watts M (2009) *Marxan and relatives: #' Software for spatial conservation prioritisation* in Spatial conservation #' prioritisation: Quantitative methods and computational tools. Eds Moilanen #' A, Wilson KA, and Possingham HP. Oxford University Press, Oxford, UK. #' #' @seealso #' See [summaries] for an overview of all functions for summarizing solutions. #' Also, see [add_connectivity_penalties()] to penalize solutions with low #' connectivity. #' #' @family summaries #' #' @examples #' \dontrun{ #' # set seed for reproducibility #' set.seed(500) #' #' # load data #' data(sim_pu_raster, sim_pu_sf, sim_features, #' sim_pu_zones_sf, sim_features_zones) #' #' # build minimal conservation problem with raster data #' p1 <- problem(sim_pu_raster, sim_features) %>% #' add_min_set_objective() %>% #' add_relative_targets(0.1) %>% #' add_binary_decisions() %>% #' add_default_solver(verbose = FALSE) #' #' # solve the problem #' s1 <- solve(p1) #' #' # print solution #' print(s1) #' #' # plot solution #' plot(s1, main = "solution", axes = FALSE, box = FALSE) #' #' # simulate a connectivity matrix to describe the relative strength #' # of connectivity between different planning units #' # for brevity, we will use cost data here so that pairs #' # of adjacent planning units with higher cost values will have a #' # higher connectivity value #' # (but see ?connectivity_matrix for more information) #' cm1 <- connectivity_matrix(sim_pu_raster, sim_pu_raster) #' #' # calculate connectivity associated with the solution #' r1 <- eval_connectivity_summary(p1, s1, data = cm1) #' print(r1) #' #' #' # build multi-zone conservation problem with polygon (sf) data #' p2 <- problem(sim_pu_zones_sf, sim_features_zones, #' cost_column = c("cost_1", "cost_2", "cost_3")) %>% #' add_min_set_objective() %>% #' add_relative_targets(matrix(runif(15, 0.1, 0.2), nrow = 5, #' ncol = 3)) %>% #' add_binary_decisions() %>% #' add_default_solver(verbose = FALSE) #' #' # solve the problem #' s2 <- solve(p2) #' #' # print first six rows of the attribute table #' print(head(s2)) #' #' # create new column representing the zone id that each planning unit #' # was allocated to in the solution #' s2$solution <- category_vector( #' s2[, c("solution_1_zone_1", "solution_1_zone_2", "solution_1_zone_3")]) #' s2$solution <- factor(s2$solution) #' #' # plot solution #' plot(s2[, "solution"]) #' #' # simulate connectivity matrix #' # here, we will add a new column to sim_pu_zones_sf with #' # randomly simulated values and create a connectivity matrix #' # based on the average simulated values of adjacent planning units #' sim_pu_zones_sf$con <- runif(nrow(sim_pu_zones_sf)) #' cm2 <- connectivity_matrix(sim_pu_zones_sf, "con") #' #' # calculate connectivity associated with the solution #' r2 <- eval_connectivity_summary( #' p2, s2[, c("solution_1_zone_1", "solution_1_zone_2", "solution_1_zone_3")], #' data = cm2) #' print(r2) #' #' } #' @name eval_connectivity_summary #' #' @exportMethod eval_connectivity_summary #' #' @aliases eval_connectivity_summary,ConservationProblem,ANY,ANY,Matrix-method eval_connectivity_summary,ConservationProblem,ANY,ANY,matrix-method eval_connectivity_summary,ConservationProblem,ANY,ANY,dgCMatrix-method eval_connectivity_summary,ConservationProblem,ANY,ANY,data.frame-method eval_connectivity_summary,ConservationProblem,ANY,ANY,array-method NULL #' @export methods::setGeneric("eval_connectivity_summary", signature = methods::signature("x", "solution", "zones", "data"), function(x, solution, zones = diag(number_of_zones(x)), data) standardGeneric("eval_connectivity_summary")) #' @name eval_connectivity_summary #' @usage \S4method{eval_connectivity_summary}{ConservationProblem,ANY,ANY,matrix}(x, solution, zones, data) #' @rdname eval_connectivity_summary methods::setMethod("eval_connectivity_summary", methods::signature("ConservationProblem", "ANY", "ANY", "matrix"), function(x, solution, zones, data) { eval_connectivity_summary(x, solution, zones, methods::as(data, "dgCMatrix")) }) #' @name eval_connectivity_summary #' @usage \S4method{eval_connectivity_summary}{ConservationProblem,ANY,ANY,Matrix}(x, solution, zones, data) #' @rdname eval_connectivity_summary methods::setMethod("eval_connectivity_summary", methods::signature("ConservationProblem", "ANY", "ANY", "Matrix"), function(x, solution, zones, data) { eval_connectivity_summary(x, solution, zones, methods::as(data, "dgCMatrix")) }) #' @name eval_connectivity_summary #' @usage \S4method{eval_connectivity_summary}{ConservationProblem,ANY,ANY,data.frame}(x, solution, zones, data) #' @rdname eval_connectivity_summary methods::setMethod("eval_connectivity_summary", methods::signature("ConservationProblem", "ANY", "ANY", "data.frame"), function(x, solution, zones, data) { eval_connectivity_summary( x, solution, zones, marxan_boundary_data_to_matrix(x, data)) }) #' @name eval_connectivity_summary #' @usage \S4method{eval_connectivity_summary}{ConservationProblem,ANY,ANY,dgCMatrix}(x, solution, zones, data) #' @rdname eval_connectivity_summary methods::setMethod("eval_connectivity_summary", methods::signature("ConservationProblem", "ANY", "ANY", "dgCMatrix"), function(x, solution, zones, data) { # assert valid arguments assertthat::assert_that( inherits(x, "ConservationProblem"), inherits(zones, c("matrix", "Matrix")), nrow(zones) == ncol(zones), is.numeric(as.vector(zones)), all(is.finite(as.vector(zones))), is.numeric(data@x), ncol(data) == nrow(data), max(zones) <= 1, min(zones) >= -1, number_of_total_units(x) == ncol(data), number_of_zones(x) == ncol(zones), all(is.finite(data@x))) # coerce zones to matrix zones <- as.matrix(zones) indices <- x$planning_unit_indices() data <- data[indices, indices, drop = FALSE] # check for symmetry if (!Matrix::isSymmetric(data)) { warning( paste0( "argument to data does not contain symmetric connectivity values, ", "it it recommended to use eval_asym_connectivity_summary()" ), call. = FALSE, immediate. = TRUE ) } # convert zones & dgCMatrix data to list of sparse matrices m <- list() for (z1 in seq_len(ncol(zones))) { m[[z1]] <- list() for (z2 in seq_len(nrow(zones))) { m[[z1]][[z2]] <- data * zones[z1, z2] } } # calculate connectivity internal_eval_connectivity_summary( x, planning_unit_solution_status(x, solution), m, data) }) #' @name eval_connectivity_summary #' @usage \S4method{eval_connectivity_summary}{ConservationProblem,ANY,ANY,array}(x, solution, zones, data) #' @rdname eval_connectivity_summary methods::setMethod("eval_connectivity_summary", methods::signature("ConservationProblem", "ANY", "ANY", "array"), function(x, solution, zones, data) { # assert valid arguments assertthat::assert_that(inherits(x, "ConservationProblem"), is.null(zones), is.array(data), length(dim(data)) == 4, dim(data)[1] == number_of_total_units(x), dim(data)[2] == number_of_total_units(x), dim(data)[3] == number_of_zones(x), dim(data)[4] == number_of_zones(x), all(is.finite(data))) # generate indices for units that are planning units indices <- x$planning_unit_indices() # convert array to list of list of sparseMatrix objects m <- list() for (z1 in seq_len(dim(data)[3])) { m[[z1]] <- list() for (z2 in seq_len(dim(data)[4])) { m[[z1]][[z2]] <- methods::as(data[indices, indices, z1, z2], "dgCMatrix") } } # calculate connectivity internal_eval_connectivity_summary( x, planning_unit_solution_status(x, solution), m, NULL) }) internal_eval_connectivity_summary <- function( x, solution, zone_scaled_data, data) { # assert valid arguments assertthat::assert_that( inherits(x, "ConservationProblem"), is.matrix(solution), is.list(zone_scaled_data), inherits(data, c("dgCMatrix", "NULL"))) # manually coerce NA values in solution to 0 solution[!is.finite(solution)] <- 0 # calculate overall connectivity v <- rcpp_connectivity(zone_scaled_data, solution) # main calculations if (number_of_zones(x) == 1) { ## store result for single zone out <- tibble::tibble(summary = "overall", connectivity = v) } else { ## calculate connectivity for each zone separately zv <- vapply(seq_len(ncol(solution)), FUN.VALUE = numeric(1), function(z) { ## prepare data the z'th zone if (is.null(data)) { zd <- methods::as(zone_scaled_data[[z]][[z]], "dgCMatrix") } else { zd <- data } ## calculate connectivity rcpp_connectivity(list(list(zd)), solution[, z, drop = FALSE]) }) ## store results for multiple zones out <- tibble::tibble( summary = c("overall", zone_names(x)), connectivity = c(v, zv)) } # return result out }
######################################## #### Libraries Needed ####### ######################################## #install.packages("RCurl") #install.packages("rjson") #install.packages("streamR") #install.packages("RColorBrewer") #install.packages("wordcloud") #install.packages("NLP") #install.packages("tm") #install.packages("ggplot2") #install.packages("sp") #install.packages("maps") #install.packages("maptools") #install.packages("rworldmap") ### stopped installing here #install.packages("Rstem") library(bitops) library(RCurl) library(rjson) library(streamR) library(RColorBrewer) library(wordcloud) library(NLP) library(tm) library(ggplot2) library(sp) library(maps) library(maptools) library(rworldmap) library(grid) library(stringr) library(plyr) # stopped loading libraries here. library(Rstem) gpclibPermit() #################################################### ######## Loading and creating subsets of data ###### #################################################### load("tweets_all_sent_mapped_r.Rdata") tweetsUS <- tweets_all_sent_mapped subset_tw <- function(filename){ filename$text <- sapply(filename$text,function(row) iconv(row, "latin1", "ASCII", sub="")) HC <- subset (filename, grepl(pattern = "Clinton | clinton | Hillary | hillary | Hillaryclinton | hillaryclinton | Hillary Clinton | hillary clinton" , filename$text, ignore.case = TRUE)) BS <- subset (filename, grepl(pattern = "Berniesanders | berniesanders | Bernie Sanders | bernie sanders | Bernie | bernie | Sensanders | sensanders" , filename$text, ignore.case = TRUE)) TC <- subset (filename, grepl(pattern = "Cruz | cruz | Ted | ted | Tedcruz | tedcruz | Ted Cruz | ted cruz" , filename$text, ignore.case = TRUE)) DT <- subset (filename, grepl(pattern = "Donaldtrump | donaldtrump | Donald Trump | donald trump | Trump | trump | Donald | donald | Trumpf | trumpf" , filename$text, ignore.case = TRUE)) MR <- subset (filename, grepl(pattern = "Marcorubio | marcorubio | Marco Rubio | marco rubio" , filename$text, ignore.case = TRUE)) # also by party dem <- rbind(HC, BS) rep <- rbind(TC, DT) save (HC, file= 'HC.Rdata') save (BS, file= 'BS.Rdata') save (TC, file= 'TC.Rdata') save (DT, file= 'DT.Rdata') save (MR, file= 'MR.Rdata') save(dem, file='dem.Rdata') save(rep, file='rep.Rdata') } subset_tw(tweetsUS) load("HC.Rdata") load("BS.Rdata") load("TC.Rdata") load("DT.Rdata") load("dem.Rdata") load("rep.Rdata") testHC <- head(HC, 1000) testBS <- head(BS, 1000) testTC <- head(TC, 1000) testDT <- head(DT, 1000) testdem <- head(dem, 1000) testrep <- head(rep, 1000) ########################################### ####### Creating Word Cloud ######## ########################################### # creating a tweetcorpus, just insert the db filename into function, will receive TweetCorpus as a result # don't forget to set this to a var name #ex. bs_tc <- tweet_corp(BS) tc <- function(filename){ filename$text <- sapply(filename$text,function(row) iconv(row, "latin1", "ASCII", sub="")) TweetCorpus<-paste(unlist(filename$text), collapse =" ") TweetCorpus <- Corpus(VectorSource(TweetCorpus)) TweetCorpus <- tm_map(TweetCorpus, removePunctuation) TweetCorpus <- tm_map(TweetCorpus, removeWords, stopwords("english")) #TweetCorpus <- tm_map(TweetCorpus, stemDocument) TweetCorpus <- tm_map(TweetCorpus, content_transformer(tolower),lazy=TRUE) TweetCorpus <- tm_map(TweetCorpus, PlainTextDocument) TweetCorpus <- tm_map(TweetCorpus, removeWords, c("https", "https...", "via", "use", "just", "think", "say", "that", "its", "like", "this", "will", "the", "lol", "now", "one", "still", "whi", "amp", "let", "ill", "come", "shit", "and", "realli", "your", "you", "fuck", "last", "for", "much", "see", "got", "can", "get" )) return(TweetCorpus) } wc_t <- function(filename){ return(wordcloud(tweet_corp(filename), min.freq = 900, max.words = 500, random.order = FALSE, colors = brewer.pal(4, "Dark2"))) } ########################################### ##### Building Map ####### ########################################### ##### UNABLE TO GET THIS FUNCTION WORKING.......................................................................................................... #### problem installing grid package..... map.data <- map_data("state") map_tweets <- function(filename){ only_coords <- filename[complete.cases(filename) ,] us_coords <- only_coords[only_coords$country_code == 'US',] points <- data.frame(x = as.numeric(as.character(us_coords$place_lon)), y = as.numeric(as.character(us_coords$place_lat))) points <- points[points$y > 25, ] ggplot(map.data) + geom_map(aes(map_id = region), map = map.data, fill = "#fdf9f9", color = "#9d9595", size = 0.25) + expand_limits(x = map.data$long, y = map.data$lat) + theme(axis.line = element_blank(), axis.text = element_blank(), axis.ticks = element_blank(), axis.title = element_blank(), panel.background = element_blank(), panel.border = element_blank(), panel.grid.major = element_blank(), plot.background = element_blank(), plot.margin = unit(0 * c(-1.5, -1.5, -1.5, -1.5), "lines")) + geom_point(data = points, aes(x = x, y = y), size = 1, alpha = 1/5, color = "#CC6666") } ########################################### ####### Building a Social Network ######### ########################################### Force_clinton <- read.csv("force_clinton.csv", header = TRUE, sep = ",", quote = "\"", dec = ".", fill = TRUE, comment.char = "") Force_bernie <- read.csv("force_sanders.csv", header = TRUE, sep = ",", quote = "\"", dec = ".", fill = TRUE, comment.char = "") Force_trump <- read.csv("force_trump.csv", header = TRUE, sep = ",", quote = "\"", dec = ".", fill = TRUE, comment.char = "") Force_cruz <- read.csv("force_cruz.csv", header = TRUE, sep = ",", quote = "\"", dec = ".", fill = TRUE, comment.char = "") output$simple_Clinton <- renderSimpleNetwork({ src <- Force_clinton$source target <- Force_clinton$target networkData <- data.frame(src, target) simpleNetwork(networkData) }) output$simple_Bernie <- renderSimpleNetwork({ src <- Force_bernie$source target <- Force_bernie$target networkData <- data.frame(src, target) simpleNetwork(networkData) }) output$simple_Cruz <- renderSimpleNetwork({ src <- Force_cruz$source target <- Force_cruz$target networkData <- data.frame(src, target) simpleNetwork(networkData) }) output$simple_Trump <- renderSimpleNetwork({ src <- Force_trump$source target <- Force_trump$target networkData <- data.frame(src, target) simpleNetwork(networkData) }) ########################################### ####### More Mapping ######### ########################################### world <- map_data("world") US_states <- map_data("state") map_gen_w <-function(filename){ filtered_file <- filename[complete.cases(filename) ,] ggplot(world) + geom_map(aes(map_id = region), map = world, fill = "grey90", color = "grey50", size = 0.25) + expand_limits(x = as.numeric(as.character(world$long)), y = as.numeric(as.character(world$lat))) + scale_x_continuous("Longitude") + scale_y_continuous("Latitude") + theme_minimal() + geom_point(data = filtered_file, aes(x = as.numeric(as.character(lon)), y = as.numeric(as.character(lat))), size = 1, alpha = 1/5, color = "blue") } map_gen_s <-function(filename){ filtered_file <- filename[complete.cases(filename) ,] filtered_file <- filtered_file[filtered_file$country_code=='US',] ggplot(US_states) + geom_map(aes(map_id = region), map = US_states, fill = "grey90", color = "grey50", size = 0.25) + expand_limits(x = as.numeric(as.character(US_states$long)), y = as.numeric(as.character(US_states$lat))) + scale_x_continuous("Longitude") + scale_y_continuous("Latitude") + theme_minimal() + geom_point(data = filtered_file, aes(x = as.numeric(as.character(place_lon)), y = as.numeric(as.character(place_lat))), size = 1, alpha = 1/5, color = "blue") } # Counting by State # The single argument to this function, pointsDF, is a data.frame in which: # - column 1 contains the longitude in degrees (negative in the US) # - column 2 contains the latitude in degrees latlong2state <- function(pointsDF) { # Prepare SpatialPolygons object with one SpatialPolygon # per state (plus DC, minus HI & AK) states <- map('state', fill=TRUE, col="transparent", plot=FALSE) IDs <- sapply(strsplit(states$names, ":"), function(x) x[1]) states_sp <- map2SpatialPolygons(states, IDs=IDs, proj4string=CRS("+proj=longlat +datum=WGS84")) # Convert pointsDF to a SpatialPoints object pointsSP <- SpatialPoints(pointsDF, proj4string=CRS("+proj=longlat +datum=WGS84")) # Use 'over' to get _indices_ of the Polygons object containing each point indices <- over(pointsSP, states_sp) # Return the state names of the Polygons object containing each point stateNames <- sapply(states_sp@polygons, function(x) x@ID) stateNames[indices] } as.numeric.factor <- function(x) {as.numeric(levels(x))[x]} state_mp_cnt <- function(filename){ filtered_file <- filename[complete.cases(filename) ,] filtered_file <- filtered_file[filtered_file$country_code=='US',] geo_pts <- c("place_lon", "place_lat") df_pt <- filtered_file[geo_pts] df_pt$place_lon <- as.numeric.factor(df_pt$place_lon) df_pt$state <- latlong2state(df_pt) filtered_df <- df_pt[!(is.na(df_pt$state)),] count(filtered_df, "state") state_df <- count(filtered_df, "state") print("two") mapUSA <- map('state', fill = TRUE, plot = FALSE) nms <- sapply(strsplit(mapUSA$names, ':'), function(x)x[1]) USApolygons <- map2SpatialPolygons(mapUSA, IDs = nms, CRS('+proj=longlat')) idx <- match(unique(nms), state_df$state) dat2 <- data.frame(value = state_df$freq[idx], state = unique(nms)) row.names(dat2) <- unique(nms) USAsp <- SpatialPolygonsDataFrame(USApolygons, data = dat2) print("three") spplot(USAsp['value'], col.regions= rainbow(100, start = 3/6, end = 4/6 )) } ############################################## ########## Sentiment Analysis ################ ############################################## # key terms lexicon lexicon <- read.csv("lexicon_ps.csv", stringsAsFactors=F) econ.words <- lexicon$word[lexicon$polarity=="economy"] imm.words <- lexicon$word[lexicon$polarity=="immigration"] health.words <- lexicon$word[lexicon$polarity=="health_care"] military.words <- lexicon$word[lexicon$polarity=="military"] gun.words <- lexicon$word[lexicon$polarity=="gun_control"] china.words <- lexicon$word[lexicon$polarity=="china"] trade.words <- lexicon$word[lexicon$polarity=="trade"] race.words <- lexicon$word[lexicon$polarity=="race"] climate.words <- lexicon$word[lexicon$polarity=="climate_change"] religion.words <- lexicon$word[lexicon$polarity=="religion"] tc_countT <- function(filename, fname, person){ econ <- sum(str_count(filename, econ.words)) imm <- sum(str_count(filename, imm.words)) health <- sum(str_count(filename, health.words)) military <- sum(str_count(filename, military.words)) gun <- sum(str_count(filename, gun.words)) china <- sum(str_count(filename, china.words)) trade <- sum(str_count(filename, trade.words)) race <- sum(str_count(filename, race.words)) climate <- sum(str_count(filename, climate.words)) religion <- sum(str_count(filename, religion.words)) fn_df = data.frame(econ, imm, health, military, gun, china, trade, race, climate, religion) write.csv(fn_df, file = fname) return(cnvrt_df(fn_df, person)) } cnvrt_df <- function(filename, nameC){ filename$X <- NULL filename <-t(filename) filename <- data.frame(filename) names(filename)[1]<-paste("num") filename$term <- rownames(filename) filename$name <- nameC filename$rate <- filename$num / sum(filename$num) return(filename) } term_plots <- function(data, title, color){ ggplot(data=data, aes(x=term, y=rate, fill=name)) + geom_bar(stat="identity", position=position_dodge()) + scale_fill_manual(values=color) + ggtitle(title) } #positivity key terms lexicon lexicon <- read.csv("lexicon.csv", stringsAsFactors=F) positive.words <- lexicon$word[lexicon$polarity=="positive"] negative.words <- lexicon$word[lexicon$polarity=="negative"] sentiment <- function(filename, name_csv){ tc <- tweet_corp(filename) pos_count <- sum(str_count(tc, positive.words)) neg_count <- sum(str_count(tc, negative.words)) polarity_df = data.frame(pos_count, neg_count) write.csv(polarity_df, file = name_csv) } plot_polarity <- function(filename){ ggplot(data=filename, aes(x=polarity, y=rate)) + geom_bar(stat="identity", position=position_dodge()) + scale_fill_brewer() + ggtitle("Polarity of Candidate") } ########################### Calling functions ################################## # wordclouds wc_t(testHC) wc_t(testBS) wc_t(testTC) wc_t(testDT) wc_t(testdem) wc_t(testrep) # generating social network ## TBD # building general map map_tweets(testHC) map_tweets(testBS) map_tweets(testTC) map_tweets(testDT) map_tweets(testdem) map_tweets(testrep) # generating maps # world map not that useful for us.... map_gen_w(testHC) map_gen_s(testHC) map_gen_s(testBS) map_gen_s(testTC) map_gen_s(testDT) map_gen_s(testdem) map_gen_s(testrep) #problem with this function state_mp_cnt(testHC) # sentiment analysis # for terms final_countHC <- tc_countT(tc(testHC) , "HC_topics.csv", "Hillary Clinton") final_countBS <- tc_countT(tc(testBS) , "BS_topics.csv", "Bernie Sanders") final_countTC <- tc_countT(tc(testTC) , "TC_topics.csv", "Ted Cruz") final_countDT <- tc_countT(tc(testDT) , "DT_topics.csv", "Donald Trump") final_count_dem <- tc_countT(tc(testdem) , "dem_topics.csv", "Democrats") final_count_rep <- tc_countT(tc(testrep) , "rep_topics.csv", "Republican") final_count_dems <- rbind(final_countHC, final_countBS) final_count_reps <- rbind(final_countTC, final_countDT) final_count_parties <- rbind(final_count_dem, final_count_rep) #dems color1 <- c("#99CCFF", "#003399") #reps color2 <- c("#FF9999", "#FF6666") #both parties color3 <- c("#6699FF", "#FF6666") term_plots(final_count_dems, "Rate of Topics per Democratic Candidate", color1) term_plots(final_count_reps, "Rate of Topics per Republican Candidate", color2) term_plots(final_count_parties, "Rate of Topics per Political Party", color3) ###### fix this function part.... # for neg and pos sentiment(HC, "HC_polarity.csv") ######## ## before this we have to create rates by hand on csv file........ HC_p<- read.csv("HC_polarity_e.csv") plot_polarity(HC_p)
/final_script.R
no_license
fuserlimon/Elections-Sentiment-Topic-Analyses-
R
false
false
17,021
r
######################################## #### Libraries Needed ####### ######################################## #install.packages("RCurl") #install.packages("rjson") #install.packages("streamR") #install.packages("RColorBrewer") #install.packages("wordcloud") #install.packages("NLP") #install.packages("tm") #install.packages("ggplot2") #install.packages("sp") #install.packages("maps") #install.packages("maptools") #install.packages("rworldmap") ### stopped installing here #install.packages("Rstem") library(bitops) library(RCurl) library(rjson) library(streamR) library(RColorBrewer) library(wordcloud) library(NLP) library(tm) library(ggplot2) library(sp) library(maps) library(maptools) library(rworldmap) library(grid) library(stringr) library(plyr) # stopped loading libraries here. library(Rstem) gpclibPermit() #################################################### ######## Loading and creating subsets of data ###### #################################################### load("tweets_all_sent_mapped_r.Rdata") tweetsUS <- tweets_all_sent_mapped subset_tw <- function(filename){ filename$text <- sapply(filename$text,function(row) iconv(row, "latin1", "ASCII", sub="")) HC <- subset (filename, grepl(pattern = "Clinton | clinton | Hillary | hillary | Hillaryclinton | hillaryclinton | Hillary Clinton | hillary clinton" , filename$text, ignore.case = TRUE)) BS <- subset (filename, grepl(pattern = "Berniesanders | berniesanders | Bernie Sanders | bernie sanders | Bernie | bernie | Sensanders | sensanders" , filename$text, ignore.case = TRUE)) TC <- subset (filename, grepl(pattern = "Cruz | cruz | Ted | ted | Tedcruz | tedcruz | Ted Cruz | ted cruz" , filename$text, ignore.case = TRUE)) DT <- subset (filename, grepl(pattern = "Donaldtrump | donaldtrump | Donald Trump | donald trump | Trump | trump | Donald | donald | Trumpf | trumpf" , filename$text, ignore.case = TRUE)) MR <- subset (filename, grepl(pattern = "Marcorubio | marcorubio | Marco Rubio | marco rubio" , filename$text, ignore.case = TRUE)) # also by party dem <- rbind(HC, BS) rep <- rbind(TC, DT) save (HC, file= 'HC.Rdata') save (BS, file= 'BS.Rdata') save (TC, file= 'TC.Rdata') save (DT, file= 'DT.Rdata') save (MR, file= 'MR.Rdata') save(dem, file='dem.Rdata') save(rep, file='rep.Rdata') } subset_tw(tweetsUS) load("HC.Rdata") load("BS.Rdata") load("TC.Rdata") load("DT.Rdata") load("dem.Rdata") load("rep.Rdata") testHC <- head(HC, 1000) testBS <- head(BS, 1000) testTC <- head(TC, 1000) testDT <- head(DT, 1000) testdem <- head(dem, 1000) testrep <- head(rep, 1000) ########################################### ####### Creating Word Cloud ######## ########################################### # creating a tweetcorpus, just insert the db filename into function, will receive TweetCorpus as a result # don't forget to set this to a var name #ex. bs_tc <- tweet_corp(BS) tc <- function(filename){ filename$text <- sapply(filename$text,function(row) iconv(row, "latin1", "ASCII", sub="")) TweetCorpus<-paste(unlist(filename$text), collapse =" ") TweetCorpus <- Corpus(VectorSource(TweetCorpus)) TweetCorpus <- tm_map(TweetCorpus, removePunctuation) TweetCorpus <- tm_map(TweetCorpus, removeWords, stopwords("english")) #TweetCorpus <- tm_map(TweetCorpus, stemDocument) TweetCorpus <- tm_map(TweetCorpus, content_transformer(tolower),lazy=TRUE) TweetCorpus <- tm_map(TweetCorpus, PlainTextDocument) TweetCorpus <- tm_map(TweetCorpus, removeWords, c("https", "https...", "via", "use", "just", "think", "say", "that", "its", "like", "this", "will", "the", "lol", "now", "one", "still", "whi", "amp", "let", "ill", "come", "shit", "and", "realli", "your", "you", "fuck", "last", "for", "much", "see", "got", "can", "get" )) return(TweetCorpus) } wc_t <- function(filename){ return(wordcloud(tweet_corp(filename), min.freq = 900, max.words = 500, random.order = FALSE, colors = brewer.pal(4, "Dark2"))) } ########################################### ##### Building Map ####### ########################################### ##### UNABLE TO GET THIS FUNCTION WORKING.......................................................................................................... #### problem installing grid package..... map.data <- map_data("state") map_tweets <- function(filename){ only_coords <- filename[complete.cases(filename) ,] us_coords <- only_coords[only_coords$country_code == 'US',] points <- data.frame(x = as.numeric(as.character(us_coords$place_lon)), y = as.numeric(as.character(us_coords$place_lat))) points <- points[points$y > 25, ] ggplot(map.data) + geom_map(aes(map_id = region), map = map.data, fill = "#fdf9f9", color = "#9d9595", size = 0.25) + expand_limits(x = map.data$long, y = map.data$lat) + theme(axis.line = element_blank(), axis.text = element_blank(), axis.ticks = element_blank(), axis.title = element_blank(), panel.background = element_blank(), panel.border = element_blank(), panel.grid.major = element_blank(), plot.background = element_blank(), plot.margin = unit(0 * c(-1.5, -1.5, -1.5, -1.5), "lines")) + geom_point(data = points, aes(x = x, y = y), size = 1, alpha = 1/5, color = "#CC6666") } ########################################### ####### Building a Social Network ######### ########################################### Force_clinton <- read.csv("force_clinton.csv", header = TRUE, sep = ",", quote = "\"", dec = ".", fill = TRUE, comment.char = "") Force_bernie <- read.csv("force_sanders.csv", header = TRUE, sep = ",", quote = "\"", dec = ".", fill = TRUE, comment.char = "") Force_trump <- read.csv("force_trump.csv", header = TRUE, sep = ",", quote = "\"", dec = ".", fill = TRUE, comment.char = "") Force_cruz <- read.csv("force_cruz.csv", header = TRUE, sep = ",", quote = "\"", dec = ".", fill = TRUE, comment.char = "") output$simple_Clinton <- renderSimpleNetwork({ src <- Force_clinton$source target <- Force_clinton$target networkData <- data.frame(src, target) simpleNetwork(networkData) }) output$simple_Bernie <- renderSimpleNetwork({ src <- Force_bernie$source target <- Force_bernie$target networkData <- data.frame(src, target) simpleNetwork(networkData) }) output$simple_Cruz <- renderSimpleNetwork({ src <- Force_cruz$source target <- Force_cruz$target networkData <- data.frame(src, target) simpleNetwork(networkData) }) output$simple_Trump <- renderSimpleNetwork({ src <- Force_trump$source target <- Force_trump$target networkData <- data.frame(src, target) simpleNetwork(networkData) }) ########################################### ####### More Mapping ######### ########################################### world <- map_data("world") US_states <- map_data("state") map_gen_w <-function(filename){ filtered_file <- filename[complete.cases(filename) ,] ggplot(world) + geom_map(aes(map_id = region), map = world, fill = "grey90", color = "grey50", size = 0.25) + expand_limits(x = as.numeric(as.character(world$long)), y = as.numeric(as.character(world$lat))) + scale_x_continuous("Longitude") + scale_y_continuous("Latitude") + theme_minimal() + geom_point(data = filtered_file, aes(x = as.numeric(as.character(lon)), y = as.numeric(as.character(lat))), size = 1, alpha = 1/5, color = "blue") } map_gen_s <-function(filename){ filtered_file <- filename[complete.cases(filename) ,] filtered_file <- filtered_file[filtered_file$country_code=='US',] ggplot(US_states) + geom_map(aes(map_id = region), map = US_states, fill = "grey90", color = "grey50", size = 0.25) + expand_limits(x = as.numeric(as.character(US_states$long)), y = as.numeric(as.character(US_states$lat))) + scale_x_continuous("Longitude") + scale_y_continuous("Latitude") + theme_minimal() + geom_point(data = filtered_file, aes(x = as.numeric(as.character(place_lon)), y = as.numeric(as.character(place_lat))), size = 1, alpha = 1/5, color = "blue") } # Counting by State # The single argument to this function, pointsDF, is a data.frame in which: # - column 1 contains the longitude in degrees (negative in the US) # - column 2 contains the latitude in degrees latlong2state <- function(pointsDF) { # Prepare SpatialPolygons object with one SpatialPolygon # per state (plus DC, minus HI & AK) states <- map('state', fill=TRUE, col="transparent", plot=FALSE) IDs <- sapply(strsplit(states$names, ":"), function(x) x[1]) states_sp <- map2SpatialPolygons(states, IDs=IDs, proj4string=CRS("+proj=longlat +datum=WGS84")) # Convert pointsDF to a SpatialPoints object pointsSP <- SpatialPoints(pointsDF, proj4string=CRS("+proj=longlat +datum=WGS84")) # Use 'over' to get _indices_ of the Polygons object containing each point indices <- over(pointsSP, states_sp) # Return the state names of the Polygons object containing each point stateNames <- sapply(states_sp@polygons, function(x) x@ID) stateNames[indices] } as.numeric.factor <- function(x) {as.numeric(levels(x))[x]} state_mp_cnt <- function(filename){ filtered_file <- filename[complete.cases(filename) ,] filtered_file <- filtered_file[filtered_file$country_code=='US',] geo_pts <- c("place_lon", "place_lat") df_pt <- filtered_file[geo_pts] df_pt$place_lon <- as.numeric.factor(df_pt$place_lon) df_pt$state <- latlong2state(df_pt) filtered_df <- df_pt[!(is.na(df_pt$state)),] count(filtered_df, "state") state_df <- count(filtered_df, "state") print("two") mapUSA <- map('state', fill = TRUE, plot = FALSE) nms <- sapply(strsplit(mapUSA$names, ':'), function(x)x[1]) USApolygons <- map2SpatialPolygons(mapUSA, IDs = nms, CRS('+proj=longlat')) idx <- match(unique(nms), state_df$state) dat2 <- data.frame(value = state_df$freq[idx], state = unique(nms)) row.names(dat2) <- unique(nms) USAsp <- SpatialPolygonsDataFrame(USApolygons, data = dat2) print("three") spplot(USAsp['value'], col.regions= rainbow(100, start = 3/6, end = 4/6 )) } ############################################## ########## Sentiment Analysis ################ ############################################## # key terms lexicon lexicon <- read.csv("lexicon_ps.csv", stringsAsFactors=F) econ.words <- lexicon$word[lexicon$polarity=="economy"] imm.words <- lexicon$word[lexicon$polarity=="immigration"] health.words <- lexicon$word[lexicon$polarity=="health_care"] military.words <- lexicon$word[lexicon$polarity=="military"] gun.words <- lexicon$word[lexicon$polarity=="gun_control"] china.words <- lexicon$word[lexicon$polarity=="china"] trade.words <- lexicon$word[lexicon$polarity=="trade"] race.words <- lexicon$word[lexicon$polarity=="race"] climate.words <- lexicon$word[lexicon$polarity=="climate_change"] religion.words <- lexicon$word[lexicon$polarity=="religion"] tc_countT <- function(filename, fname, person){ econ <- sum(str_count(filename, econ.words)) imm <- sum(str_count(filename, imm.words)) health <- sum(str_count(filename, health.words)) military <- sum(str_count(filename, military.words)) gun <- sum(str_count(filename, gun.words)) china <- sum(str_count(filename, china.words)) trade <- sum(str_count(filename, trade.words)) race <- sum(str_count(filename, race.words)) climate <- sum(str_count(filename, climate.words)) religion <- sum(str_count(filename, religion.words)) fn_df = data.frame(econ, imm, health, military, gun, china, trade, race, climate, religion) write.csv(fn_df, file = fname) return(cnvrt_df(fn_df, person)) } cnvrt_df <- function(filename, nameC){ filename$X <- NULL filename <-t(filename) filename <- data.frame(filename) names(filename)[1]<-paste("num") filename$term <- rownames(filename) filename$name <- nameC filename$rate <- filename$num / sum(filename$num) return(filename) } term_plots <- function(data, title, color){ ggplot(data=data, aes(x=term, y=rate, fill=name)) + geom_bar(stat="identity", position=position_dodge()) + scale_fill_manual(values=color) + ggtitle(title) } #positivity key terms lexicon lexicon <- read.csv("lexicon.csv", stringsAsFactors=F) positive.words <- lexicon$word[lexicon$polarity=="positive"] negative.words <- lexicon$word[lexicon$polarity=="negative"] sentiment <- function(filename, name_csv){ tc <- tweet_corp(filename) pos_count <- sum(str_count(tc, positive.words)) neg_count <- sum(str_count(tc, negative.words)) polarity_df = data.frame(pos_count, neg_count) write.csv(polarity_df, file = name_csv) } plot_polarity <- function(filename){ ggplot(data=filename, aes(x=polarity, y=rate)) + geom_bar(stat="identity", position=position_dodge()) + scale_fill_brewer() + ggtitle("Polarity of Candidate") } ########################### Calling functions ################################## # wordclouds wc_t(testHC) wc_t(testBS) wc_t(testTC) wc_t(testDT) wc_t(testdem) wc_t(testrep) # generating social network ## TBD # building general map map_tweets(testHC) map_tweets(testBS) map_tweets(testTC) map_tweets(testDT) map_tweets(testdem) map_tweets(testrep) # generating maps # world map not that useful for us.... map_gen_w(testHC) map_gen_s(testHC) map_gen_s(testBS) map_gen_s(testTC) map_gen_s(testDT) map_gen_s(testdem) map_gen_s(testrep) #problem with this function state_mp_cnt(testHC) # sentiment analysis # for terms final_countHC <- tc_countT(tc(testHC) , "HC_topics.csv", "Hillary Clinton") final_countBS <- tc_countT(tc(testBS) , "BS_topics.csv", "Bernie Sanders") final_countTC <- tc_countT(tc(testTC) , "TC_topics.csv", "Ted Cruz") final_countDT <- tc_countT(tc(testDT) , "DT_topics.csv", "Donald Trump") final_count_dem <- tc_countT(tc(testdem) , "dem_topics.csv", "Democrats") final_count_rep <- tc_countT(tc(testrep) , "rep_topics.csv", "Republican") final_count_dems <- rbind(final_countHC, final_countBS) final_count_reps <- rbind(final_countTC, final_countDT) final_count_parties <- rbind(final_count_dem, final_count_rep) #dems color1 <- c("#99CCFF", "#003399") #reps color2 <- c("#FF9999", "#FF6666") #both parties color3 <- c("#6699FF", "#FF6666") term_plots(final_count_dems, "Rate of Topics per Democratic Candidate", color1) term_plots(final_count_reps, "Rate of Topics per Republican Candidate", color2) term_plots(final_count_parties, "Rate of Topics per Political Party", color3) ###### fix this function part.... # for neg and pos sentiment(HC, "HC_polarity.csv") ######## ## before this we have to create rates by hand on csv file........ HC_p<- read.csv("HC_polarity_e.csv") plot_polarity(HC_p)
testlist <- list(Rext = numeric(0), Rs = numeric(0), Z = numeric(0), alpha = numeric(0), atmp = c(1.1988711311556e-153, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), relh = c(NA, -3.2670875268094e+126, 3.46013224973186e-111, NA, -Inf, NaN, -2.68464342817217e+45, 4.44398299790022e+96, 4.98717333739482e-156, 4.22817184106748e-307, -4.59220199702648e-303, 0), temp = c(1.06099789548264e-311, 1.60455557047237e+82, -4.51367941637774e-141, -56857994149.4251, 4.60714968529506e+22, 4.94594336083901e-277, 6.98556032546697e-100, -1.15874942296725e-140, 1.66802644272667e-153, 2.197831277967e+109, 2.39828050885494e-124, -4.39547783740517e+225, 2.23714316185293e+183, 2.72877695990519e+48, -2.99453656397724e+232, 2732080554.93861, -1.31213880308799e-29, -4.00505252245271e-246, -5.38280100691954e-169, -4.51575581905204e+118, -3.18836769669394e+228), u = numeric(0)) result <- do.call(meteor:::E_Penman,testlist) str(result)
/meteor/inst/testfiles/E_Penman/AFL_E_Penman/E_Penman_valgrind_files/1615918752-test.R
no_license
akhikolla/updatedatatype-list3
R
false
false
1,233
r
testlist <- list(Rext = numeric(0), Rs = numeric(0), Z = numeric(0), alpha = numeric(0), atmp = c(1.1988711311556e-153, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), relh = c(NA, -3.2670875268094e+126, 3.46013224973186e-111, NA, -Inf, NaN, -2.68464342817217e+45, 4.44398299790022e+96, 4.98717333739482e-156, 4.22817184106748e-307, -4.59220199702648e-303, 0), temp = c(1.06099789548264e-311, 1.60455557047237e+82, -4.51367941637774e-141, -56857994149.4251, 4.60714968529506e+22, 4.94594336083901e-277, 6.98556032546697e-100, -1.15874942296725e-140, 1.66802644272667e-153, 2.197831277967e+109, 2.39828050885494e-124, -4.39547783740517e+225, 2.23714316185293e+183, 2.72877695990519e+48, -2.99453656397724e+232, 2732080554.93861, -1.31213880308799e-29, -4.00505252245271e-246, -5.38280100691954e-169, -4.51575581905204e+118, -3.18836769669394e+228), u = numeric(0)) result <- do.call(meteor:::E_Penman,testlist) str(result)
# Define server logic required to summarize and view the # selected dataset # Return the requested dataset observe( { updatefunctionarray() } ) updatefunctionarray <- function() { g=scidb("users") fd1=iqdf(g) username=input$userlabel st=fd1$arrays[fd1$usernames==username] arrs<-(strsplit(st, ";")) arrs<-unlist(arrs) updateSelectInput(session,"dataset",choices=c(arrs)) } datasetInput <- reactive({ arrayName=input$dataset switch(input$dataset, selectedfunction=scidb(arrayName)) }) datasetFunc <- reactive( { if(input$functions=="join"){ dims1=input$check_func s_im=paste(" ",dims1) s_imm=paste(s_im,collapse = ",") s3<-c(input$check_func,input$functions) s4<-paste(s3,collapse = "_") ##store(join(left_array,right_array),result_array) s_join=("") s_join=paste(s_join,"store(join(",s_imm,"),",s4,")") iquery(s_join) } if(!is.null(input$checkbox)){ s1<-c(input$checkbox,input$functions) s2<-paste(s1,collapse = "_") if(input$functions=="cumulative"){ s_cum="" s_cum=paste(s_cum,"store(cumulate(", input$dataset, ",sum(",input$checkbox,")),",s2,")") iquery(s_cum) } else if(input$functions=="aggregate_sum"){ s_aggs="" s_aggs=paste(s_aggs,"store(aggregate(",input$dataset,",sum(",input$checkbox,")),",s2,")") iquery(s_aggs) } else if(input$functions=="aggregate_avg"){ s_agga="" s_agga=paste(s_agga,"store(aggregate(",input$dataset,",avg(",input$checkbox,")),",s2,")") iquery(s_agga) } else if(input$functions=="aggregate_prod"){ s_aggp="" s_aggp=paste(s_aggp,"store(aggregate(",input$dataset,",prod(",input$checkbox,")),",s2,")") iquery(s_aggp) } else if(input$functions=="bernoulli"){ updateTextInput(session, "func_param",label="Enter probability") s_bern="" s_bern=paste(s_bern,"store(bernoulli(",input$dataset,",",input$func_param,"),",s2,")") iquery(s_bern) } # else # if(input$functions=="selection2"){ # s_sel="" # s_sel=paste(s_sel,input$checkbox,"is selected",input$checkbox) #} else if(input$functions=="filter"){ updateTextInput(session, "func_param",label="Enter filter constraints (<,>,=)") s_fil="" s_fil=paste(s_fil,"store(filter(",input$dataset,",",input$checkbox,input$func_param,"),",s2,")") iquery(s_fil) } } switch(input$functions, "count" = count(scidb(input$dataset)), "cumulative" = head(scidb(s2),n=input$obs), "aggregate_avg" = head(scidb(s2),n=input$obs), "aggregate_prod" = head(scidb(s2),n=input$obs), "aggregate_sum" = head(scidb(s2),n=input$obs), "bernoulli" = head(scidb(s2),n=inp,ut$obs), #"selection2" = print(s_sel), "filter" = head(scidb(s2),n=input$obs), "join" = head(scidb(s4),n=input$obs) ) } ) observeEvent(input$Save, { str<-c(input$checkbox,input$functions) str1<-paste(str,collapse = "_") check_Save(str1) }) check_Save <- function(str1){ a=scidb("users") fd=iqdf(a) username=input$userlabel st=fd$arrays[fd$usernames==username] st=paste(st,str1,sep=';') fd$arrays[fd$usernames==username]<-st iquery("remove(users)") usernames=fd$usernames passwords=fd$passwords arrays=fd$arrays temp<-data.frame(usernames,passwords,arrays) x<-as.scidb(temp,name="users") updateVisualization() load_user_arrays(username) updatefunctionarray() } #observeEvent(input$functions=="join",{ #arrayName=input$dataset #array_data=scidb(arrayName) #attrs=scidb_attributes(array_data) #updateCheckboxGroupInput(session, "checkbox1",choices = c(attrs)) #}) observe( { arrayName=input$dataset array_data=scidb(arrayName) attrs=scidb_attributes(array_data) updateCheckboxGroupInput(session, "checkbox",choices = c(attrs)) } ) #observeEvent(input$functions=="join",{ observe( ###THIS WORKS! { if(input$functions=="join") { f=scidb("users") fd2=iqdf(f) username=input$userlabel st=fd2$arrays[fd2$usernames==username] arrs1<-(strsplit(st, ";")) arrs1<-unlist(arrs1) updateCheckboxGroupInput(session,"check_func",choices=c(arrs1)) } } ) output$value3 <- renderPrint( { dims=input$checkbox print(dims) } ) output$value4 <- renderPrint( { g=scidb("users") fd1=iqdf(g) username=input$userlabel st=fd1$arrays[fd1$usernames==username] nme<-(strsplit(st, ";")[[1]][2]) print(nme) } ) # Generate a summary of the dataset # output$Count <- renderPrint({ # dataset <- datasetInput() # count(dataset) # }) output$Functions <- renderPrint({ datasetFunc() }) # Show the first "n" observations output$view <- renderTable({ head(scidb(input$dataset), n = input$obs) }) output$nText <- renderText({ ntext() })
/server/tab3.R
no_license
purvakolhatkar/sciDB-
R
false
false
5,075
r
# Define server logic required to summarize and view the # selected dataset # Return the requested dataset observe( { updatefunctionarray() } ) updatefunctionarray <- function() { g=scidb("users") fd1=iqdf(g) username=input$userlabel st=fd1$arrays[fd1$usernames==username] arrs<-(strsplit(st, ";")) arrs<-unlist(arrs) updateSelectInput(session,"dataset",choices=c(arrs)) } datasetInput <- reactive({ arrayName=input$dataset switch(input$dataset, selectedfunction=scidb(arrayName)) }) datasetFunc <- reactive( { if(input$functions=="join"){ dims1=input$check_func s_im=paste(" ",dims1) s_imm=paste(s_im,collapse = ",") s3<-c(input$check_func,input$functions) s4<-paste(s3,collapse = "_") ##store(join(left_array,right_array),result_array) s_join=("") s_join=paste(s_join,"store(join(",s_imm,"),",s4,")") iquery(s_join) } if(!is.null(input$checkbox)){ s1<-c(input$checkbox,input$functions) s2<-paste(s1,collapse = "_") if(input$functions=="cumulative"){ s_cum="" s_cum=paste(s_cum,"store(cumulate(", input$dataset, ",sum(",input$checkbox,")),",s2,")") iquery(s_cum) } else if(input$functions=="aggregate_sum"){ s_aggs="" s_aggs=paste(s_aggs,"store(aggregate(",input$dataset,",sum(",input$checkbox,")),",s2,")") iquery(s_aggs) } else if(input$functions=="aggregate_avg"){ s_agga="" s_agga=paste(s_agga,"store(aggregate(",input$dataset,",avg(",input$checkbox,")),",s2,")") iquery(s_agga) } else if(input$functions=="aggregate_prod"){ s_aggp="" s_aggp=paste(s_aggp,"store(aggregate(",input$dataset,",prod(",input$checkbox,")),",s2,")") iquery(s_aggp) } else if(input$functions=="bernoulli"){ updateTextInput(session, "func_param",label="Enter probability") s_bern="" s_bern=paste(s_bern,"store(bernoulli(",input$dataset,",",input$func_param,"),",s2,")") iquery(s_bern) } # else # if(input$functions=="selection2"){ # s_sel="" # s_sel=paste(s_sel,input$checkbox,"is selected",input$checkbox) #} else if(input$functions=="filter"){ updateTextInput(session, "func_param",label="Enter filter constraints (<,>,=)") s_fil="" s_fil=paste(s_fil,"store(filter(",input$dataset,",",input$checkbox,input$func_param,"),",s2,")") iquery(s_fil) } } switch(input$functions, "count" = count(scidb(input$dataset)), "cumulative" = head(scidb(s2),n=input$obs), "aggregate_avg" = head(scidb(s2),n=input$obs), "aggregate_prod" = head(scidb(s2),n=input$obs), "aggregate_sum" = head(scidb(s2),n=input$obs), "bernoulli" = head(scidb(s2),n=inp,ut$obs), #"selection2" = print(s_sel), "filter" = head(scidb(s2),n=input$obs), "join" = head(scidb(s4),n=input$obs) ) } ) observeEvent(input$Save, { str<-c(input$checkbox,input$functions) str1<-paste(str,collapse = "_") check_Save(str1) }) check_Save <- function(str1){ a=scidb("users") fd=iqdf(a) username=input$userlabel st=fd$arrays[fd$usernames==username] st=paste(st,str1,sep=';') fd$arrays[fd$usernames==username]<-st iquery("remove(users)") usernames=fd$usernames passwords=fd$passwords arrays=fd$arrays temp<-data.frame(usernames,passwords,arrays) x<-as.scidb(temp,name="users") updateVisualization() load_user_arrays(username) updatefunctionarray() } #observeEvent(input$functions=="join",{ #arrayName=input$dataset #array_data=scidb(arrayName) #attrs=scidb_attributes(array_data) #updateCheckboxGroupInput(session, "checkbox1",choices = c(attrs)) #}) observe( { arrayName=input$dataset array_data=scidb(arrayName) attrs=scidb_attributes(array_data) updateCheckboxGroupInput(session, "checkbox",choices = c(attrs)) } ) #observeEvent(input$functions=="join",{ observe( ###THIS WORKS! { if(input$functions=="join") { f=scidb("users") fd2=iqdf(f) username=input$userlabel st=fd2$arrays[fd2$usernames==username] arrs1<-(strsplit(st, ";")) arrs1<-unlist(arrs1) updateCheckboxGroupInput(session,"check_func",choices=c(arrs1)) } } ) output$value3 <- renderPrint( { dims=input$checkbox print(dims) } ) output$value4 <- renderPrint( { g=scidb("users") fd1=iqdf(g) username=input$userlabel st=fd1$arrays[fd1$usernames==username] nme<-(strsplit(st, ";")[[1]][2]) print(nme) } ) # Generate a summary of the dataset # output$Count <- renderPrint({ # dataset <- datasetInput() # count(dataset) # }) output$Functions <- renderPrint({ datasetFunc() }) # Show the first "n" observations output$view <- renderTable({ head(scidb(input$dataset), n = input$obs) }) output$nText <- renderText({ ntext() })
# VCF to plink with data imputation using Random Forest #' @name vcf2plink #' @title VCF to plink with filters and data imputation #' @description For full details of the function, please use #' \pkg{radiator} \code{\link[radiator]{genomic_converter}}. This function is a shorcut #' to output only plink tped/tmap files. #' @inheritParams genomic_converter #' @inheritParams tidy_genomic_data #' @inheritParams write_genepop #' @inheritParams write_genind #' @inheritParams write_genlight #' @inheritParams write_structure #' @inheritParams write_plink #' @inheritParams write_vcf #' @inheritParams write_gtypes #' @inheritParams write_hierfstat #' @inheritParams radiator_imputations_module #' @export #' @rdname vcf2plink #' @import dplyr #' @import stringi #' @importFrom data.table fread #' @references Danecek P, Auton A, Abecasis G et al. (2011) #' The variant call format and VCFtools. #' Bioinformatics, 27, 2156-2158. #' @references Purcell S, Neale B, Todd-Brown K, Thomas L, Ferreira MAR, #' Bender D, et al. #' PLINK: a tool set for whole-genome association and population-based linkage #' analyses. #' American Journal of Human Genetics. 2007; 81: 559–575. doi:10.1086/519795 #' @seealso \code{\link[radiator]{genomic_converter}} #' @author Thierry Gosselin \email{thierrygosselin@@icloud.com} vcf2plink <- function( data, output, filename = NULL, blacklist.id = NULL, blacklist.genotype = NULL, whitelist.markers = NULL, monomorphic.out = TRUE, snp.ld = NULL, common.markers = TRUE, maf.thresholds = NULL, maf.pop.num.threshold = 1, maf.approach = "SNP", maf.operator = "OR", max.marker = NULL, strata = NULL, pop.levels = NULL, pop.labels = NULL, pop.select = NULL, imputation.method = NULL, hierarchical.levels = "populations", num.tree = 50, pred.mean.matching = 0, random.seed = NULL, verbose = FALSE, parallel.core = detectCores() - 1 ) { res <- genomic_converter( data, output = "plink", filename = filename, blacklist.id = blacklist.id, blacklist.genotype = blacklist.genotype, whitelist.markers = whitelist.markers, monomorphic.out = monomorphic.out, snp.ld = snp.ld, common.markers = common.markers, maf.thresholds = maf.thresholds, maf.pop.num.threshold = maf.pop.num.threshold, maf.approach = maf.approach, maf.operator = maf.operator, max.marker = max.marker, strata = strata, pop.levels = pop.levels, pop.labels = pop.labels, pop.select = pop.select, imputation.method = imputation.method, hierarchical.levels = hierarchical.levels, num.tree = num.tree, pred.mean.matching = pred.mean.matching, random.seed = random.seed, verbose = verbose, parallel.core = parallel.core ) return(res) }
/R/vcf2plink.R
no_license
chollenbeck/radiator
R
false
false
2,795
r
# VCF to plink with data imputation using Random Forest #' @name vcf2plink #' @title VCF to plink with filters and data imputation #' @description For full details of the function, please use #' \pkg{radiator} \code{\link[radiator]{genomic_converter}}. This function is a shorcut #' to output only plink tped/tmap files. #' @inheritParams genomic_converter #' @inheritParams tidy_genomic_data #' @inheritParams write_genepop #' @inheritParams write_genind #' @inheritParams write_genlight #' @inheritParams write_structure #' @inheritParams write_plink #' @inheritParams write_vcf #' @inheritParams write_gtypes #' @inheritParams write_hierfstat #' @inheritParams radiator_imputations_module #' @export #' @rdname vcf2plink #' @import dplyr #' @import stringi #' @importFrom data.table fread #' @references Danecek P, Auton A, Abecasis G et al. (2011) #' The variant call format and VCFtools. #' Bioinformatics, 27, 2156-2158. #' @references Purcell S, Neale B, Todd-Brown K, Thomas L, Ferreira MAR, #' Bender D, et al. #' PLINK: a tool set for whole-genome association and population-based linkage #' analyses. #' American Journal of Human Genetics. 2007; 81: 559–575. doi:10.1086/519795 #' @seealso \code{\link[radiator]{genomic_converter}} #' @author Thierry Gosselin \email{thierrygosselin@@icloud.com} vcf2plink <- function( data, output, filename = NULL, blacklist.id = NULL, blacklist.genotype = NULL, whitelist.markers = NULL, monomorphic.out = TRUE, snp.ld = NULL, common.markers = TRUE, maf.thresholds = NULL, maf.pop.num.threshold = 1, maf.approach = "SNP", maf.operator = "OR", max.marker = NULL, strata = NULL, pop.levels = NULL, pop.labels = NULL, pop.select = NULL, imputation.method = NULL, hierarchical.levels = "populations", num.tree = 50, pred.mean.matching = 0, random.seed = NULL, verbose = FALSE, parallel.core = detectCores() - 1 ) { res <- genomic_converter( data, output = "plink", filename = filename, blacklist.id = blacklist.id, blacklist.genotype = blacklist.genotype, whitelist.markers = whitelist.markers, monomorphic.out = monomorphic.out, snp.ld = snp.ld, common.markers = common.markers, maf.thresholds = maf.thresholds, maf.pop.num.threshold = maf.pop.num.threshold, maf.approach = maf.approach, maf.operator = maf.operator, max.marker = max.marker, strata = strata, pop.levels = pop.levels, pop.labels = pop.labels, pop.select = pop.select, imputation.method = imputation.method, hierarchical.levels = hierarchical.levels, num.tree = num.tree, pred.mean.matching = pred.mean.matching, random.seed = random.seed, verbose = verbose, parallel.core = parallel.core ) return(res) }
rm(list = ls()) gc(); options(stringsAsFactors = F) # Load up Movies universe TitlesUniverse <- read.xlsx("D:/Personal data/Data Science projects/IMDB Data/Output Data/TitlesUniverse.xlsx") TitlesUniverse <- TitlesUniverse[!is.na(TitlesUniverse$averageRating),] # Source scripts source("ScrapeRT_IMDBdata.R") #load up twitter auth load("MyTwitter_Oauth") # download.file(url="http://curl.haxx.se/ca/cacert.pem",destfile="cacert.pem") # Get Tweet Data getTweets <- function(search.string, no.of.tweets) { tweets <- searchTwitter(search.string, n=no.of.tweets, lang="en") if(length(tweets) > 0){ tweets.df <- twListToDF(tweets) # filter duplicates tweets.df %>% group_by(text,favorited,favoriteCount,replyToSN, screenName,retweetCount,isRetweet,retweeted) %>% dplyr::summarise(id = min(as.numeric(id))) -> tweets.df # Remove special characters from tweets tweets.df2 <- gsub(" ?(f|ht)tp(s?)://(.*)[.][a-z]+","",tweets.df$text) tweets.df2 <- gsub("RT @[a-z,A-Z]*: ","",tweets.df2) tweets.df2 <- gsub("#[a-z,A-Z]*","",tweets.df2) tweets.df2 <- gsub("@[a-z,A-Z]*","",tweets.df2) tweetData <- data.frame(TweetID = as.character(tweets.df$id), TweetText = as.character(tweets.df2), FavoriteCount = as.character(tweets.df$favoriteCount), RetweetCount = as.character(tweets.df$retweetCount)) }else{tweetData <- data.frame(TweetID = as.character(NA), TweetText = as.character(NA), FavoriteCount = as.character(NA), RetweetCount = as.character(NA))} return(tweetData) } # # Get FB page Data # getPageData <- function() # Get sentiment scores scoreSentiment <- function(ID,tweetdf){ library("syuzhet") word.df <- as.vector(tweetdf$TweetText) word.df <- iconv(word.df, to = "ASCII", sub = "") emotion.df <- get_sentiment(word.df) tweetdf$EmotionScore <- emotion.df tweetdf <- data.table(tweetdf) positiveSentiment <- tweetdf[EmotionScore > 0, sum(EmotionScore* sum(c((as.numeric(FavoriteCount)*1/3) ,(as.numeric(RetweetCount)*2/3))))] negativeSentiment <- tweetdf[EmotionScore < 0, sum(EmotionScore* sum(c((as.numeric(FavoriteCount)*1/3) ,(as.numeric(RetweetCount)*2/3))))] emotion.df2 <- data.frame(TitleID = ID, positiveSentiment = positiveSentiment, negativeSentiment = negativeSentiment) return(emotion.df2) } sampleDataRun <- TitlesUniverse#[sample(1:nrow(TitlesUniverse), 5),] library(plyr) library(dplyr) tweetsAndSentimentData <- data.frame() AlltweetsData <- data.frame() errorScrapingRecords <- data.frame() moviesData <- data.frame() for(i in 1:nrow(sampleDataRun)){ print(i) searchString <- paste0("#",gsub(" ","+",sampleDataRun$title[i])) titleID <- sampleDataRun$titleId[i] Movietweets <- getTweets(searchString,1000) Movietweets$TitleID <- titleID AlltweetsData <- rbind.fill(AlltweetsData, Movietweets) if(length(Movietweets) > 0){ SentimentData <- scoreSentiment(titleID,Movietweets) tweetsAndSentimentData <- rbind.fill(tweetsAndSentimentData, SentimentData) } titleid <- sampleDataRun$titleId[i] title <- sampleDataRun$title[i] movieData <- try(parseIMDB_RTData(titleid, title),silent = T) if(class(movieData) == 'try-error'){ errorRecord <- data.frame(TitleID = titleid, Title = title, ErrorLog = as.character(attributes(movieData)$condition)) errorScrapingRecords <- rbind.fill(errorScrapingRecords,errorRecord) }else{ moviesData <- rbind.fill(moviesData,movieData) } } write.xlsx(tweetsAndSentimentData, "D:/Personal data/Data Science projects/IMDB Data/Output Data/TweetSentiments.xlsx") write.csv(AlltweetsData, "D:/Personal data/Data Science projects/IMDB Data/Output Data/AlltweetsData.csv") write.xlsx(moviesData, "D:/Personal data/Data Science projects/IMDB Data/Output Data/sampleMoviesData.xlsx")
/IMDB Data/Push to got/Scripts/SocialImpactAndSentiment.R
no_license
nemesis3192/Data-Science-Projects
R
false
false
4,367
r
rm(list = ls()) gc(); options(stringsAsFactors = F) # Load up Movies universe TitlesUniverse <- read.xlsx("D:/Personal data/Data Science projects/IMDB Data/Output Data/TitlesUniverse.xlsx") TitlesUniverse <- TitlesUniverse[!is.na(TitlesUniverse$averageRating),] # Source scripts source("ScrapeRT_IMDBdata.R") #load up twitter auth load("MyTwitter_Oauth") # download.file(url="http://curl.haxx.se/ca/cacert.pem",destfile="cacert.pem") # Get Tweet Data getTweets <- function(search.string, no.of.tweets) { tweets <- searchTwitter(search.string, n=no.of.tweets, lang="en") if(length(tweets) > 0){ tweets.df <- twListToDF(tweets) # filter duplicates tweets.df %>% group_by(text,favorited,favoriteCount,replyToSN, screenName,retweetCount,isRetweet,retweeted) %>% dplyr::summarise(id = min(as.numeric(id))) -> tweets.df # Remove special characters from tweets tweets.df2 <- gsub(" ?(f|ht)tp(s?)://(.*)[.][a-z]+","",tweets.df$text) tweets.df2 <- gsub("RT @[a-z,A-Z]*: ","",tweets.df2) tweets.df2 <- gsub("#[a-z,A-Z]*","",tweets.df2) tweets.df2 <- gsub("@[a-z,A-Z]*","",tweets.df2) tweetData <- data.frame(TweetID = as.character(tweets.df$id), TweetText = as.character(tweets.df2), FavoriteCount = as.character(tweets.df$favoriteCount), RetweetCount = as.character(tweets.df$retweetCount)) }else{tweetData <- data.frame(TweetID = as.character(NA), TweetText = as.character(NA), FavoriteCount = as.character(NA), RetweetCount = as.character(NA))} return(tweetData) } # # Get FB page Data # getPageData <- function() # Get sentiment scores scoreSentiment <- function(ID,tweetdf){ library("syuzhet") word.df <- as.vector(tweetdf$TweetText) word.df <- iconv(word.df, to = "ASCII", sub = "") emotion.df <- get_sentiment(word.df) tweetdf$EmotionScore <- emotion.df tweetdf <- data.table(tweetdf) positiveSentiment <- tweetdf[EmotionScore > 0, sum(EmotionScore* sum(c((as.numeric(FavoriteCount)*1/3) ,(as.numeric(RetweetCount)*2/3))))] negativeSentiment <- tweetdf[EmotionScore < 0, sum(EmotionScore* sum(c((as.numeric(FavoriteCount)*1/3) ,(as.numeric(RetweetCount)*2/3))))] emotion.df2 <- data.frame(TitleID = ID, positiveSentiment = positiveSentiment, negativeSentiment = negativeSentiment) return(emotion.df2) } sampleDataRun <- TitlesUniverse#[sample(1:nrow(TitlesUniverse), 5),] library(plyr) library(dplyr) tweetsAndSentimentData <- data.frame() AlltweetsData <- data.frame() errorScrapingRecords <- data.frame() moviesData <- data.frame() for(i in 1:nrow(sampleDataRun)){ print(i) searchString <- paste0("#",gsub(" ","+",sampleDataRun$title[i])) titleID <- sampleDataRun$titleId[i] Movietweets <- getTweets(searchString,1000) Movietweets$TitleID <- titleID AlltweetsData <- rbind.fill(AlltweetsData, Movietweets) if(length(Movietweets) > 0){ SentimentData <- scoreSentiment(titleID,Movietweets) tweetsAndSentimentData <- rbind.fill(tweetsAndSentimentData, SentimentData) } titleid <- sampleDataRun$titleId[i] title <- sampleDataRun$title[i] movieData <- try(parseIMDB_RTData(titleid, title),silent = T) if(class(movieData) == 'try-error'){ errorRecord <- data.frame(TitleID = titleid, Title = title, ErrorLog = as.character(attributes(movieData)$condition)) errorScrapingRecords <- rbind.fill(errorScrapingRecords,errorRecord) }else{ moviesData <- rbind.fill(moviesData,movieData) } } write.xlsx(tweetsAndSentimentData, "D:/Personal data/Data Science projects/IMDB Data/Output Data/TweetSentiments.xlsx") write.csv(AlltweetsData, "D:/Personal data/Data Science projects/IMDB Data/Output Data/AlltweetsData.csv") write.xlsx(moviesData, "D:/Personal data/Data Science projects/IMDB Data/Output Data/sampleMoviesData.xlsx")
# # This is a Shiny web application. You can run the application by clicking # the 'Run App' button above. # # Find out more about building applications with Shiny here: # # http://shiny.rstudio.com/ # library(Hmisc) library(SASxport) library(shiny) library(DT) #library("purrr") ui <- navbarPage("wranglR", tabPanel("README", h2("WELCOME TO wranglR release 1", align="center"), p("Objective: To create an web application that will expedite the Data Wrangling Process"), strong("XPT to CSV CONVERTER"), p("If you have XPT files on your local machine that you would like to convert use this tool"), p("NOTE: When specifying output directory, enter full file path for the converted csv (ex. /home/dir1/dir2/....)"), strong("DATA SELECTION"), p("Choose a csv file that you have on your local machine and select the columns that you want subset the observations that you want."), strong("DATA MERGING"), p("DATA MERGING"), p("Choose multiple csv files and merge them together by SEQN variable"), p("NOTE: STRONGLY RECOMMENDED THAT YOU MERGE DATA FILES BY YEAR"), p("DATA STACKING"), p("Choose a table type that has been found across mulitple years (ex. Demo_H,Demo_G,..etc"), p("Data Merging section will allow you to merge multiple tables based on a common index"), strong("NOTE: INDEX MUST BE FOUND THROUGHOUT ALL TABLES"), p("Download the data Here"), downloadButton('downloadNHANES', 'Download NHANES'), p("Dataset documentation"), strong("DISCLAIMER: For this small, sanitized dataset, dataframe columns are consistent across all years"), HTML("<ul> <li><a href='https://wwwn.cdc.gov/Nchs/Nhanes/2013-2014/DEMO_H.htm'>Demographics</a></li> <li><a href='https://wwwn.cdc.gov/Nchs/Nhanes/2013-2014/HEPC_H.htm'>Hepatitis C: RNA (HCV-RNA) and Hepatitis C Genotype </a></li> <li><a href='https://wwwn.cdc.gov/Nchs/Nhanes/2013-2014/TRIGLY_H.htm'>Cholesterol - LDL & Triglycerides </a></li> <li><a href='https://wwwn.cdc.gov/Nchs/Nhanes/2013-2014/HDL_H.htm'>Cholesterol - HDL</a></li> <li><a href='https://wwwn.cdc.gov/Nchs/Nhanes/2013-2014/HIV_H.htm'>HIV Antibody Test</a></li> <li><a href='https://wwwn.cdc.gov/Nchs/Nhanes/2013-2014/BMX_H.htm'>Body Measures</a></li> </ul>") ) , tabPanel("SAS XPT to CSV Converter", fluidRow( sidebarLayout( sidebarPanel( fileInput("xptfile", "Choose XPT File"), #selectInput("col", "Select a column", character(0)) textInput("directoryName", "Please Specify output directory"), textInput("new_dataName", "Name your file", width = "50%"), actionButton('convert','Convert XPT to CSV') ), mainPanel( DT::dataTableOutput("mytable32") # textOutput("selected") ) ) ) ) #end tab panel , tabPanel("Data Selection", fluidRow( sidebarLayout( sidebarPanel( fileInput("file", "Choose CSV File"), #selectInput("col", "Select a column", character(0)) checkboxGroupInput("columns","Select Columns",character(0)), actionButton("updateButton","Update View"), textInput("dataName", "Name your file", width = "50%"), downloadButton('downloadData', 'Download') ), mainPanel( DT::dataTableOutput("mytable1"), textOutput("selected") ) ) ) ) , tabPanel("Data merging", fluidRow( sidebarLayout( sidebarPanel( fileInput("csvs", "Choose CSV File",multiple=TRUE), textInput("dataName2", "Name your file", width = "50%"), downloadButton('downloadData2', 'Download') ), mainPanel( DT::dataTableOutput("table2") ) ) ) ), tabPanel("Data Stacking", fluidRow( sidebarLayout( sidebarPanel( fileInput("csvs2", "Choose CSV File",multiple=FALSE), fileInput("csvs3", "Choose CSV File",multiple=FALSE), textInput("dataName_stack", "Name your file", width = "50%"), downloadButton('downloadData3', 'Download') ), mainPanel( DT::dataTableOutput("table3") ) ) ) ) ) server <- function(input, output, session) { #### Sub Setting Data #### ###get and load data data <- reactive({ inFile <- input$file if (is.null(inFile)) return(NULL) read.csv(inFile$datapath) }) #display output output$mytable1 <- DT::renderDataTable({ df <- as.data.frame(data()) DT::datatable(df, options = list(lengthMenu = c(5, 30, 50), pageLength = 5)) }) ### DATA SELECTION #display and update column selection observeEvent(data(), { removeUI( selector = "div:has(> #rows)") ## clear up any previous slides updateCheckboxGroupInput(session, "columns", choices = names(data())) df <- data.frame(data()) insertUI( print("inside the slide"), #print(nrow(df)), #print(max(nrow(df))), selector = "#updateButton", where = "beforeBegin", ui = sliderInput(inputId = "rows" , label = "Observations", min = 1, max = nrow(df), value = c(1,nrow(df)), width = "200%") ) #end UI_Button }) #update the table observeEvent(input$updateButton, { df <- data.frame(data()) df <- df[min(input$rows):max(input$rows),] print(max(input$rows)) print(min(input$rows)) df <- subset(df,select = input$columns) # column subsetting takes place here output$mytable1 <- DT::renderDataTable({ DT::datatable(df, options = list(lengthMenu = c(5, 30, 50), pageLength = 5)) }) }) ###DATA SELECTION INPUT FOR Downloaded Data datasetInput <- reactive({ df <- subset(data.frame(data()), select = input$columns) ##column selection df <- df[min(input$rows):max(input$rows),] #row selection }) ##DATA SELECTION DOWNLOAD output$downloadData <- downloadHandler( filename = function() { paste(input$dataName, ".csv", sep = "") }, content = function(file) { write.csv(datasetInput(), file, row.names = FALSE) } ) #Merging mycsvs<-reactive({ Reduce(function(x,y) merge(x, y, by = "seqn", all.x = TRUE, all.y = TRUE),lapply(input$csvs$datapath, read.csv)) }) merge_data <- reactive({ df <- mycsvs() }) output$table2 <- renderDataTable({ DT::datatable(merge_data(), options = list(lengthMenu = c(5, 30, 50), pageLength = 5)) }) output$downloadData2 <- downloadHandler( filename = function() { paste(input$dataName2, ".csv", sep = "") }, content = function(file) { write.csv(merge_data(), file, row.names = FALSE) } ) output$downloadNHANES <- downloadHandler( filename = function() { "files.zip" }, content = function(file) { zip(zipfile=file, files="./NHANES_CLEAN") } ) ###XPT To CSV Converter check_dir <-function(directory){ flag <-0 result <- tryCatch({ setwd(directory) print('about to process my data') return(flag) }, warning = function(war) { # warning handler picks up where error was generated print('warning') flag <- -1 return(flag) }, error = function(err) { showNotification(paste("ERROR, DID NOT SELECT PROPER DIRECTORY "), duration = 5,type = 'error') Sys.sleep(5) flag <- -1 return(flag) }, finally = { print('Exiting Try Catch') }) # END tryCatch } XpttoCsv.func <- function(a,b,c=NULL){ xpt.name <- paste(a) #get the name of the xpt file csv.name <- paste(c,b,".csv",sep="") #create path and file name of new csv dat <- sasxport.get(xpt.name) # conversion write.csv(dat[,-1],file=csv.name,row.names = FALSE) } observeEvent(input$convert, { if (is.null(input$xptfile)) print('select a darn file') xpt <-input$xptfile print('data is loaded') xpt.name <- xpt$name dir.output <- input$directoryName new_csv_name <- input$new_dataName val <- check_dir(dir.output) #function to check directory print(val) if(val == 0){ #convert only if we have a valid directory XpttoCsv.func(xpt.name,new_csv_name,dir.output) } }) ###data stacking stufff # Row Merging row.merge.csvs <- reactive({ row1 <- lapply(input$csvs2$datapath, read.csv) row1 <- as.data.frame(row1)[,-1] # Removing Index created by CSV Read in row2 <- lapply(input$csvs3$datapath, read.csv) row2 <- as.data.frame(row2)[,-1] # Removing Index created by CSV Read in merge(row1, row2, by =intersect(names(row1),names(row2)) ,all=TRUE) }) stack_rows <- reactive({ df <- row.merge.csvs() }) output$table3 <- renderDataTable({ DT::datatable(stack_rows(), options = list(lengthMenu = c(5, 30, 50), pageLength = 5)) }) output$downloadData3 <- downloadHandler( filename = function() { paste(input$dataName_stack, ".csv", sep = "") }, content = function(file) { write.csv(stack_rows(), file, row.names = FALSE) } ) } shinyApp(ui, server)
/Spikes/wranglR_v.0.3/app.R
no_license
ApolinarO/SDR
R
false
false
11,176
r
# # This is a Shiny web application. You can run the application by clicking # the 'Run App' button above. # # Find out more about building applications with Shiny here: # # http://shiny.rstudio.com/ # library(Hmisc) library(SASxport) library(shiny) library(DT) #library("purrr") ui <- navbarPage("wranglR", tabPanel("README", h2("WELCOME TO wranglR release 1", align="center"), p("Objective: To create an web application that will expedite the Data Wrangling Process"), strong("XPT to CSV CONVERTER"), p("If you have XPT files on your local machine that you would like to convert use this tool"), p("NOTE: When specifying output directory, enter full file path for the converted csv (ex. /home/dir1/dir2/....)"), strong("DATA SELECTION"), p("Choose a csv file that you have on your local machine and select the columns that you want subset the observations that you want."), strong("DATA MERGING"), p("DATA MERGING"), p("Choose multiple csv files and merge them together by SEQN variable"), p("NOTE: STRONGLY RECOMMENDED THAT YOU MERGE DATA FILES BY YEAR"), p("DATA STACKING"), p("Choose a table type that has been found across mulitple years (ex. Demo_H,Demo_G,..etc"), p("Data Merging section will allow you to merge multiple tables based on a common index"), strong("NOTE: INDEX MUST BE FOUND THROUGHOUT ALL TABLES"), p("Download the data Here"), downloadButton('downloadNHANES', 'Download NHANES'), p("Dataset documentation"), strong("DISCLAIMER: For this small, sanitized dataset, dataframe columns are consistent across all years"), HTML("<ul> <li><a href='https://wwwn.cdc.gov/Nchs/Nhanes/2013-2014/DEMO_H.htm'>Demographics</a></li> <li><a href='https://wwwn.cdc.gov/Nchs/Nhanes/2013-2014/HEPC_H.htm'>Hepatitis C: RNA (HCV-RNA) and Hepatitis C Genotype </a></li> <li><a href='https://wwwn.cdc.gov/Nchs/Nhanes/2013-2014/TRIGLY_H.htm'>Cholesterol - LDL & Triglycerides </a></li> <li><a href='https://wwwn.cdc.gov/Nchs/Nhanes/2013-2014/HDL_H.htm'>Cholesterol - HDL</a></li> <li><a href='https://wwwn.cdc.gov/Nchs/Nhanes/2013-2014/HIV_H.htm'>HIV Antibody Test</a></li> <li><a href='https://wwwn.cdc.gov/Nchs/Nhanes/2013-2014/BMX_H.htm'>Body Measures</a></li> </ul>") ) , tabPanel("SAS XPT to CSV Converter", fluidRow( sidebarLayout( sidebarPanel( fileInput("xptfile", "Choose XPT File"), #selectInput("col", "Select a column", character(0)) textInput("directoryName", "Please Specify output directory"), textInput("new_dataName", "Name your file", width = "50%"), actionButton('convert','Convert XPT to CSV') ), mainPanel( DT::dataTableOutput("mytable32") # textOutput("selected") ) ) ) ) #end tab panel , tabPanel("Data Selection", fluidRow( sidebarLayout( sidebarPanel( fileInput("file", "Choose CSV File"), #selectInput("col", "Select a column", character(0)) checkboxGroupInput("columns","Select Columns",character(0)), actionButton("updateButton","Update View"), textInput("dataName", "Name your file", width = "50%"), downloadButton('downloadData', 'Download') ), mainPanel( DT::dataTableOutput("mytable1"), textOutput("selected") ) ) ) ) , tabPanel("Data merging", fluidRow( sidebarLayout( sidebarPanel( fileInput("csvs", "Choose CSV File",multiple=TRUE), textInput("dataName2", "Name your file", width = "50%"), downloadButton('downloadData2', 'Download') ), mainPanel( DT::dataTableOutput("table2") ) ) ) ), tabPanel("Data Stacking", fluidRow( sidebarLayout( sidebarPanel( fileInput("csvs2", "Choose CSV File",multiple=FALSE), fileInput("csvs3", "Choose CSV File",multiple=FALSE), textInput("dataName_stack", "Name your file", width = "50%"), downloadButton('downloadData3', 'Download') ), mainPanel( DT::dataTableOutput("table3") ) ) ) ) ) server <- function(input, output, session) { #### Sub Setting Data #### ###get and load data data <- reactive({ inFile <- input$file if (is.null(inFile)) return(NULL) read.csv(inFile$datapath) }) #display output output$mytable1 <- DT::renderDataTable({ df <- as.data.frame(data()) DT::datatable(df, options = list(lengthMenu = c(5, 30, 50), pageLength = 5)) }) ### DATA SELECTION #display and update column selection observeEvent(data(), { removeUI( selector = "div:has(> #rows)") ## clear up any previous slides updateCheckboxGroupInput(session, "columns", choices = names(data())) df <- data.frame(data()) insertUI( print("inside the slide"), #print(nrow(df)), #print(max(nrow(df))), selector = "#updateButton", where = "beforeBegin", ui = sliderInput(inputId = "rows" , label = "Observations", min = 1, max = nrow(df), value = c(1,nrow(df)), width = "200%") ) #end UI_Button }) #update the table observeEvent(input$updateButton, { df <- data.frame(data()) df <- df[min(input$rows):max(input$rows),] print(max(input$rows)) print(min(input$rows)) df <- subset(df,select = input$columns) # column subsetting takes place here output$mytable1 <- DT::renderDataTable({ DT::datatable(df, options = list(lengthMenu = c(5, 30, 50), pageLength = 5)) }) }) ###DATA SELECTION INPUT FOR Downloaded Data datasetInput <- reactive({ df <- subset(data.frame(data()), select = input$columns) ##column selection df <- df[min(input$rows):max(input$rows),] #row selection }) ##DATA SELECTION DOWNLOAD output$downloadData <- downloadHandler( filename = function() { paste(input$dataName, ".csv", sep = "") }, content = function(file) { write.csv(datasetInput(), file, row.names = FALSE) } ) #Merging mycsvs<-reactive({ Reduce(function(x,y) merge(x, y, by = "seqn", all.x = TRUE, all.y = TRUE),lapply(input$csvs$datapath, read.csv)) }) merge_data <- reactive({ df <- mycsvs() }) output$table2 <- renderDataTable({ DT::datatable(merge_data(), options = list(lengthMenu = c(5, 30, 50), pageLength = 5)) }) output$downloadData2 <- downloadHandler( filename = function() { paste(input$dataName2, ".csv", sep = "") }, content = function(file) { write.csv(merge_data(), file, row.names = FALSE) } ) output$downloadNHANES <- downloadHandler( filename = function() { "files.zip" }, content = function(file) { zip(zipfile=file, files="./NHANES_CLEAN") } ) ###XPT To CSV Converter check_dir <-function(directory){ flag <-0 result <- tryCatch({ setwd(directory) print('about to process my data') return(flag) }, warning = function(war) { # warning handler picks up where error was generated print('warning') flag <- -1 return(flag) }, error = function(err) { showNotification(paste("ERROR, DID NOT SELECT PROPER DIRECTORY "), duration = 5,type = 'error') Sys.sleep(5) flag <- -1 return(flag) }, finally = { print('Exiting Try Catch') }) # END tryCatch } XpttoCsv.func <- function(a,b,c=NULL){ xpt.name <- paste(a) #get the name of the xpt file csv.name <- paste(c,b,".csv",sep="") #create path and file name of new csv dat <- sasxport.get(xpt.name) # conversion write.csv(dat[,-1],file=csv.name,row.names = FALSE) } observeEvent(input$convert, { if (is.null(input$xptfile)) print('select a darn file') xpt <-input$xptfile print('data is loaded') xpt.name <- xpt$name dir.output <- input$directoryName new_csv_name <- input$new_dataName val <- check_dir(dir.output) #function to check directory print(val) if(val == 0){ #convert only if we have a valid directory XpttoCsv.func(xpt.name,new_csv_name,dir.output) } }) ###data stacking stufff # Row Merging row.merge.csvs <- reactive({ row1 <- lapply(input$csvs2$datapath, read.csv) row1 <- as.data.frame(row1)[,-1] # Removing Index created by CSV Read in row2 <- lapply(input$csvs3$datapath, read.csv) row2 <- as.data.frame(row2)[,-1] # Removing Index created by CSV Read in merge(row1, row2, by =intersect(names(row1),names(row2)) ,all=TRUE) }) stack_rows <- reactive({ df <- row.merge.csvs() }) output$table3 <- renderDataTable({ DT::datatable(stack_rows(), options = list(lengthMenu = c(5, 30, 50), pageLength = 5)) }) output$downloadData3 <- downloadHandler( filename = function() { paste(input$dataName_stack, ".csv", sep = "") }, content = function(file) { write.csv(stack_rows(), file, row.names = FALSE) } ) } shinyApp(ui, server)
aa54b9e9466112fbfc616c0626a3033e s499_d6_s.qdimacs 9487 11269
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Database/Mneimneh-Sakallah/s499/s499_d6_s/s499_d6_s.R
no_license
arey0pushpa/dcnf-autarky
R
false
false
61
r
aa54b9e9466112fbfc616c0626a3033e s499_d6_s.qdimacs 9487 11269
source("R/dbInterface.R") getOrders <- function(name, endDate = Sys.time(), mode = "operation", adjusted = TRUE) { timeFrame <- NULL if(mode == "simulation") { timeFrame <- unlist(strsplit(name, "[.]"))[2] if(is.na(timeFrame)) timeFrame <- "1D" } pos <- getPositions(unlist(strsplit(name, "[.]"))[1], timeFrame, as.Date(endDate), mode = mode) symbol <- base::get(name) if(length(pos) == 0) return(NULL) posit <- NULL i <- 0 for(reg in pos) { if(!is.na(reg$end)) ed <- reg$end else if(!is.null(endDate)) ed <- endDate else ed <- time(tail(symbol, n=1)) period <- sprintf("%s::%s", reg$start, ed) if(nrow(symbol[period]) == 0) next firstReg <- head(symbol[period], n=1) lastReg <- tail(symbol[period], n=1) idx <- unique(c(index(firstReg), index(lastReg))) xNew <- xts(rep(NA, length(idx)), order.by = idx) xNew[time(lastReg)] <- ifelse(is.na(reg$closeVal) == FALSE, reg$closeVal, as.double(Cl(lastReg))) xNew[time(firstReg)] <- reg$openVal xNew <- rbind(xNew, Cl(symbol[index(symbol) > index(lastReg) & as.Date(index(symbol)) <= as.Date(endDate)])) if(adjusted && nrow(xNew) > 1) xNew <- adjustOperations(unlist(strsplit(name, "[.]"))[1], xNew) xt <- xts(rep(NA, length(unique(index(symbol[period])))), order.by = unique(index(symbol[period]))) xt[idx] <- xNew[idx] xNew <- xt if(as.double(xNew[time(firstReg)]) < as.double(xNew[time(lastReg)])) col <- ifelse(mode == "operation", 3, 4) else col <- ifelse(mode == "operation", 2, 7) i <- i + 1 objName <- sprintf("%s.p%d", name, i) position <- xts(na.approx(as.double(xNew)), order.by = index(xNew)) assign(objName, position, .GlobalEnv) posit <- c(posit, sprintf("addTA(%s, lwd=2, on=1, col=%d)", objName, col)) } return(posit) }
/R/orders.R
no_license
mbijon/TraderBot
R
false
false
1,877
r
source("R/dbInterface.R") getOrders <- function(name, endDate = Sys.time(), mode = "operation", adjusted = TRUE) { timeFrame <- NULL if(mode == "simulation") { timeFrame <- unlist(strsplit(name, "[.]"))[2] if(is.na(timeFrame)) timeFrame <- "1D" } pos <- getPositions(unlist(strsplit(name, "[.]"))[1], timeFrame, as.Date(endDate), mode = mode) symbol <- base::get(name) if(length(pos) == 0) return(NULL) posit <- NULL i <- 0 for(reg in pos) { if(!is.na(reg$end)) ed <- reg$end else if(!is.null(endDate)) ed <- endDate else ed <- time(tail(symbol, n=1)) period <- sprintf("%s::%s", reg$start, ed) if(nrow(symbol[period]) == 0) next firstReg <- head(symbol[period], n=1) lastReg <- tail(symbol[period], n=1) idx <- unique(c(index(firstReg), index(lastReg))) xNew <- xts(rep(NA, length(idx)), order.by = idx) xNew[time(lastReg)] <- ifelse(is.na(reg$closeVal) == FALSE, reg$closeVal, as.double(Cl(lastReg))) xNew[time(firstReg)] <- reg$openVal xNew <- rbind(xNew, Cl(symbol[index(symbol) > index(lastReg) & as.Date(index(symbol)) <= as.Date(endDate)])) if(adjusted && nrow(xNew) > 1) xNew <- adjustOperations(unlist(strsplit(name, "[.]"))[1], xNew) xt <- xts(rep(NA, length(unique(index(symbol[period])))), order.by = unique(index(symbol[period]))) xt[idx] <- xNew[idx] xNew <- xt if(as.double(xNew[time(firstReg)]) < as.double(xNew[time(lastReg)])) col <- ifelse(mode == "operation", 3, 4) else col <- ifelse(mode == "operation", 2, 7) i <- i + 1 objName <- sprintf("%s.p%d", name, i) position <- xts(na.approx(as.double(xNew)), order.by = index(xNew)) assign(objName, position, .GlobalEnv) posit <- c(posit, sprintf("addTA(%s, lwd=2, on=1, col=%d)", objName, col)) } return(posit) }
#' @title Helper function for density function approximation #' #' @description Combines density and approxfun. #' WARNING: All data consistency checks have been removed! #' #' @param x data to approximate fun for #' @param N length of x #' @param minV minimum value of x #' @param maxV maximum value of x #' #' @return Function approximating the initial data #' #' @export #' #' @examples #' data <- rnorm(100) #' minV <- min(data) #' maxV <- max(data) #' n <- length(data) #' fitDensityFunGetX(data, n, minV, maxV) fitDensityFunGetX <- function(x, N, minV, maxV) { bw <- 0.9 * .Call(stats:::C_cov, x, NULL, 5, FALSE)^0.5 * N^(-0.2) c1 <- 3*bw from <- minV - c1 to <- maxV + c1 c1 <- c1+bw lo <- from - c1 up <- to + c1 y <- .Call(stats:::C_BinDist, x, rep_len(1/N, N), lo, up, 512) kords <- seq.int(0, 2*(up-lo), length.out = 1024) kords[(514):(1024)] <- -kords[512:2] kords <- dnorm(kords, sd = bw) kords <- fft( fft(y)* Conj(fft(kords)), inverse=TRUE) kords <- pmax.int(0, Re(kords)[1L:512]/512) xords <- seq.int(lo, up, length.out = 512) x <- seq.int(from, to, length.out = 512) y <- .Call(stats:::C_Approx, xords, kords, x, 1, NA, NA, 0) function(v) stats:::.approxfun(x,y,v,1,NA,NA,0) } fitDensityFunGetXVals <- function(x, N, minV, maxV,v ) { bw <- 0.9 * .Call(stats:::C_cov, x, NULL, 5, FALSE)^0.5 * N^(-0.2) c1 <- 3*bw from <- minV - c1 to <- maxV + c1 c1 <- c1+bw lo <- from - c1 up <- to + c1 y <- .Call(stats:::C_BinDist, x, rep_len(1/N, N), lo, up, 512) kords <- seq.int(0, 2*(up-lo), length.out = 1024) kords[(514):(1024)] <- -kords[512:2] kords <- dnorm(kords, sd = bw) kords <- fft( fft(y)* Conj(fft(kords)), inverse=TRUE) kords <- pmax.int(0, Re(kords)[1L:512]/512) xords <- seq.int(lo, up, length.out = 512) x <- seq.int(from, to, length.out = 512) y <- .Call(stats:::C_Approx, xords, kords, x, 1, NA, NA, 0) stats:::.approxfun(x,y,v,1,NA,NA,0) }
/R/approxDensityFun.R
no_license
mknoll/imageanalysisBrain
R
false
false
2,035
r
#' @title Helper function for density function approximation #' #' @description Combines density and approxfun. #' WARNING: All data consistency checks have been removed! #' #' @param x data to approximate fun for #' @param N length of x #' @param minV minimum value of x #' @param maxV maximum value of x #' #' @return Function approximating the initial data #' #' @export #' #' @examples #' data <- rnorm(100) #' minV <- min(data) #' maxV <- max(data) #' n <- length(data) #' fitDensityFunGetX(data, n, minV, maxV) fitDensityFunGetX <- function(x, N, minV, maxV) { bw <- 0.9 * .Call(stats:::C_cov, x, NULL, 5, FALSE)^0.5 * N^(-0.2) c1 <- 3*bw from <- minV - c1 to <- maxV + c1 c1 <- c1+bw lo <- from - c1 up <- to + c1 y <- .Call(stats:::C_BinDist, x, rep_len(1/N, N), lo, up, 512) kords <- seq.int(0, 2*(up-lo), length.out = 1024) kords[(514):(1024)] <- -kords[512:2] kords <- dnorm(kords, sd = bw) kords <- fft( fft(y)* Conj(fft(kords)), inverse=TRUE) kords <- pmax.int(0, Re(kords)[1L:512]/512) xords <- seq.int(lo, up, length.out = 512) x <- seq.int(from, to, length.out = 512) y <- .Call(stats:::C_Approx, xords, kords, x, 1, NA, NA, 0) function(v) stats:::.approxfun(x,y,v,1,NA,NA,0) } fitDensityFunGetXVals <- function(x, N, minV, maxV,v ) { bw <- 0.9 * .Call(stats:::C_cov, x, NULL, 5, FALSE)^0.5 * N^(-0.2) c1 <- 3*bw from <- minV - c1 to <- maxV + c1 c1 <- c1+bw lo <- from - c1 up <- to + c1 y <- .Call(stats:::C_BinDist, x, rep_len(1/N, N), lo, up, 512) kords <- seq.int(0, 2*(up-lo), length.out = 1024) kords[(514):(1024)] <- -kords[512:2] kords <- dnorm(kords, sd = bw) kords <- fft( fft(y)* Conj(fft(kords)), inverse=TRUE) kords <- pmax.int(0, Re(kords)[1L:512]/512) xords <- seq.int(lo, up, length.out = 512) x <- seq.int(from, to, length.out = 512) y <- .Call(stats:::C_Approx, xords, kords, x, 1, NA, NA, 0) stats:::.approxfun(x,y,v,1,NA,NA,0) }
# Variables var_x = sort(rnorm(100)) var_y = var_x * 2 + rnorm(100) var_z = c(rep("A", 25), rep("B", 25), rep("C", 25), rep("D", 25)) df = data.frame(x = var_x, y = var_y, z = var_z)
/ggplot2-tests/create_vars.R
permissive
rvladimiro/QuickCode
R
false
false
184
r
# Variables var_x = sort(rnorm(100)) var_y = var_x * 2 + rnorm(100) var_z = c(rep("A", 25), rep("B", 25), rep("C", 25), rep("D", 25)) df = data.frame(x = var_x, y = var_y, z = var_z)
f = read.table('household_power_consumption.txt', sep = ';', header = TRUE, na.strings = '?', stringsAsFactors = FALSE) f$Date = as.Date(f$Date, format = "%d/%m/%Y") f = subset(f, Date >= as.Date("2007-02-01", format = "%Y-%m-%d")) f = subset(f, Date <= as.Date("2007-02-02", format = "%Y-%m-%d")) f = within(f, {timestamp=format(as.POSIXct(paste(Date, Time)), "%d/%m/%Y %H:%M:%S") }) f$Dtimestamp = as.POSIXct(strptime(f$timestamp, "%d/%m/%Y %H:%M:%S")) png('plot2.png', width = 480, height = 480) plot(f$Global_active_power ~ f$Dtimestamp, type = "l", xlab = '', ylab='Global Active Power (kilowatts)') dev.off()
/EDA - ex1/plot2.R
no_license
Hubbson/Coursera_EDA
R
false
false
618
r
f = read.table('household_power_consumption.txt', sep = ';', header = TRUE, na.strings = '?', stringsAsFactors = FALSE) f$Date = as.Date(f$Date, format = "%d/%m/%Y") f = subset(f, Date >= as.Date("2007-02-01", format = "%Y-%m-%d")) f = subset(f, Date <= as.Date("2007-02-02", format = "%Y-%m-%d")) f = within(f, {timestamp=format(as.POSIXct(paste(Date, Time)), "%d/%m/%Y %H:%M:%S") }) f$Dtimestamp = as.POSIXct(strptime(f$timestamp, "%d/%m/%Y %H:%M:%S")) png('plot2.png', width = 480, height = 480) plot(f$Global_active_power ~ f$Dtimestamp, type = "l", xlab = '', ylab='Global Active Power (kilowatts)') dev.off()
#' GRE Example data #' #' Example data set from Cohen's book. #' #' @format A data frame with 5 rows and 3 variables: #' \describe{ #' \item{id}{id} #' \item{verbalGRE_1}{verbalGRE_1} #' \item{verbalGRE_2}{verbalGRE_2} #' } "GRE"
/R/data_GRE.R
no_license
TysonStanley/educ6600
R
false
false
236
r
#' GRE Example data #' #' Example data set from Cohen's book. #' #' @format A data frame with 5 rows and 3 variables: #' \describe{ #' \item{id}{id} #' \item{verbalGRE_1}{verbalGRE_1} #' \item{verbalGRE_2}{verbalGRE_2} #' } "GRE"
install.packages("statnet") library(statnet) # set working directory # setwd("<setme>") epidat <- read.csv("epidemiology_data.csv") event.times <- c(epidat$infection_begin, epidat$infection_end) event.times <- unique(event.times) event.times <- event.times[order(event.times)] event.times <- c(-12, event.times, event.times[length(event.times)] + 12) # write.csv(event.times, "eventtimes.csv") cont <- read.csv("contact_data.csv") cont.inf <- cont[which(cont$infection == TRUE),] inf.status <- array(0, dim=c(nrow(epidat), length(event.times))) for (i in 1:length(event.times)) { for (j in 1:nrow(epidat)) { if (epidat$infection_begin[j] == event.times[i]) { inf.status[j,i:length(event.times)] = 2 } else { if (epidat$infection_end[j] == event.times[i]) { inf.status[j,i:length(event.times)] = 4 } } } } inf.status[,length(event.times)] = 4 inf.status <- rbind(inf.status[1,1:length(event.times)],inf.status[10:19,1:length(event.times)],inf.status[2,1:length(event.times)],inf.status[20:29,1:length(event.times)],inf.status[3,1:length(event.times)],inf.status[30:38,1:length(event.times)],inf.status[4:9,1:length(event.times)]) # write.csv(inf.status, "infstatus.csv") el=read.csv("undirHEBV.csv",header=FALSE) # read a .csv file el[,1]=as.character(el[,1]) el[,2]=as.character(el[,2]) n = network(el,matrix.type="edgelist",directed=FALSE) gcoord <- gplot.layout.kamadakawai(n, layout.par=NULL) gplot(n, gmode="graph", coord=gcoord, vertex.col=1, cex=2, edge.col=1, displaylabels=FALSE) text(mean(gcoord[,1]), max(gcoord[,2])+.5, "HEBV Transmission Network", cex=2, font=4) text(mean(gcoord[,1]), min(gcoord[,2])-.5, paste("t =",round(event.times[1], digits=2),"hours from outbreak", sep=" "), cex=1.5) legend(x=-4, y=-2, legend=c("Susceptible", "Infectious", "Recovered"), fill=c(0,2,4), border=1, cex=1) #library(animation) #ani.options(convert=shQuote("C:/Program Files/ImageMagick-6.7.9-Q16/convert.exe")) # tell R where to look for ImageMagick's convert.exe #saveGIF(   for (i in 1:length(event.times)){ gplot(n, gmode="graph", coord=gcoord, vertex.col=inf.status[,i], cex=2, edge.col=1, displaylabels=FALSE) text(mean(gcoord[,1]), max(gcoord[,2])+.5, "HEBV Transmission Network", cex=2, font=4) text(mean(gcoord[,1]), min(gcoord[,2])-.5, paste("t =",round(event.times[i], digits=2),"hours from outbreak", sep=" "), cex=1.5) legend(x=-4, y=-2, legend=c("Susceptible", "Infectious", "Recovered"), fill=c(0,2,4), border=1, cex=1) } #, movie.name = "animation.gif", img.name = "Rplot", convert = "convert", outdir="c:/",cmd.fun = system, clean = TRUE)
/Social Network Analysis Teaching/Problem Set 3 - Determining Transmission Mode/graphing/network.r
no_license
collinmmccabe/portfolio
R
false
false
2,597
r
install.packages("statnet") library(statnet) # set working directory # setwd("<setme>") epidat <- read.csv("epidemiology_data.csv") event.times <- c(epidat$infection_begin, epidat$infection_end) event.times <- unique(event.times) event.times <- event.times[order(event.times)] event.times <- c(-12, event.times, event.times[length(event.times)] + 12) # write.csv(event.times, "eventtimes.csv") cont <- read.csv("contact_data.csv") cont.inf <- cont[which(cont$infection == TRUE),] inf.status <- array(0, dim=c(nrow(epidat), length(event.times))) for (i in 1:length(event.times)) { for (j in 1:nrow(epidat)) { if (epidat$infection_begin[j] == event.times[i]) { inf.status[j,i:length(event.times)] = 2 } else { if (epidat$infection_end[j] == event.times[i]) { inf.status[j,i:length(event.times)] = 4 } } } } inf.status[,length(event.times)] = 4 inf.status <- rbind(inf.status[1,1:length(event.times)],inf.status[10:19,1:length(event.times)],inf.status[2,1:length(event.times)],inf.status[20:29,1:length(event.times)],inf.status[3,1:length(event.times)],inf.status[30:38,1:length(event.times)],inf.status[4:9,1:length(event.times)]) # write.csv(inf.status, "infstatus.csv") el=read.csv("undirHEBV.csv",header=FALSE) # read a .csv file el[,1]=as.character(el[,1]) el[,2]=as.character(el[,2]) n = network(el,matrix.type="edgelist",directed=FALSE) gcoord <- gplot.layout.kamadakawai(n, layout.par=NULL) gplot(n, gmode="graph", coord=gcoord, vertex.col=1, cex=2, edge.col=1, displaylabels=FALSE) text(mean(gcoord[,1]), max(gcoord[,2])+.5, "HEBV Transmission Network", cex=2, font=4) text(mean(gcoord[,1]), min(gcoord[,2])-.5, paste("t =",round(event.times[1], digits=2),"hours from outbreak", sep=" "), cex=1.5) legend(x=-4, y=-2, legend=c("Susceptible", "Infectious", "Recovered"), fill=c(0,2,4), border=1, cex=1) #library(animation) #ani.options(convert=shQuote("C:/Program Files/ImageMagick-6.7.9-Q16/convert.exe")) # tell R where to look for ImageMagick's convert.exe #saveGIF(   for (i in 1:length(event.times)){ gplot(n, gmode="graph", coord=gcoord, vertex.col=inf.status[,i], cex=2, edge.col=1, displaylabels=FALSE) text(mean(gcoord[,1]), max(gcoord[,2])+.5, "HEBV Transmission Network", cex=2, font=4) text(mean(gcoord[,1]), min(gcoord[,2])-.5, paste("t =",round(event.times[i], digits=2),"hours from outbreak", sep=" "), cex=1.5) legend(x=-4, y=-2, legend=c("Susceptible", "Infectious", "Recovered"), fill=c(0,2,4), border=1, cex=1) } #, movie.name = "animation.gif", img.name = "Rplot", convert = "convert", outdir="c:/",cmd.fun = system, clean = TRUE)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/sfcr_plots.R \name{.return_loops} \alias{.return_loops} \title{Find cyclical nodes} \usage{ .return_loops(m) } \arguments{ \item{m}{adjacency matrix} } \description{ Find cyclical nodes } \note{ See Networks: an introduction from M.E.J. Newman, 2010, p. 136-139 for a reference on this algorithm. } \keyword{internal}
/man/dot-return_loops.Rd
no_license
cran/sfcr
R
false
true
415
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/sfcr_plots.R \name{.return_loops} \alias{.return_loops} \title{Find cyclical nodes} \usage{ .return_loops(m) } \arguments{ \item{m}{adjacency matrix} } \description{ Find cyclical nodes } \note{ See Networks: an introduction from M.E.J. Newman, 2010, p. 136-139 for a reference on this algorithm. } \keyword{internal}
\name{get_peak_list} \alias{get_peak_list} \title{ Get a list of peak regions } \description{ Get a list of peak regions } \usage{ get_peak_list(mark, sample_id = chipseq_hooks$sample_id(mark)) } \arguments{ \item{mark}{mark type} \item{sample_id}{a vector of sample ids} } \details{ It works after \code{\link{chipseq_hooks}} is set. } \value{ A list of \code{\link[GenomicRanges]{GRanges}} objects. } \author{ Zuguang Gu <z.gu@dkfz.de> } \examples{ # There is no example NULL }
/man/get_peak_list.rd
no_license
AvinashGupta/epic
R
false
false
487
rd
\name{get_peak_list} \alias{get_peak_list} \title{ Get a list of peak regions } \description{ Get a list of peak regions } \usage{ get_peak_list(mark, sample_id = chipseq_hooks$sample_id(mark)) } \arguments{ \item{mark}{mark type} \item{sample_id}{a vector of sample ids} } \details{ It works after \code{\link{chipseq_hooks}} is set. } \value{ A list of \code{\link[GenomicRanges]{GRanges}} objects. } \author{ Zuguang Gu <z.gu@dkfz.de> } \examples{ # There is no example NULL }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/app.R \name{plot_clustering} \alias{plot_clustering} \title{Graphic representation of the evaluation measures.} \usage{ plot_clustering(df, metric) } \arguments{ \item{df}{data matrix or data frame with the result of running the clustering algorithm.} \item{metric}{it's a string with the name of the metric select to evaluate.} } \value{ Generate an image with the distribution of the clusters by metrics. } \description{ Graphical representation of the evaluation measures grouped by cluster. } \details{ In certain cases the review or filtering of the data is necessary to select the data, that is why thanks to the graphic representations this task is much easier. Therefore with this method we will be able to filter the data by metrics and see the data in a graphical way. } \examples{ result = clustering( df = cluster::agriculture, min = 4, max = 5, algorithm='gmm', metrics=c("Precision") ) plot_clustering(result,c("Precision")) }
/man/plot_clustering.Rd
no_license
cran/Clustering
R
false
true
1,107
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/app.R \name{plot_clustering} \alias{plot_clustering} \title{Graphic representation of the evaluation measures.} \usage{ plot_clustering(df, metric) } \arguments{ \item{df}{data matrix or data frame with the result of running the clustering algorithm.} \item{metric}{it's a string with the name of the metric select to evaluate.} } \value{ Generate an image with the distribution of the clusters by metrics. } \description{ Graphical representation of the evaluation measures grouped by cluster. } \details{ In certain cases the review or filtering of the data is necessary to select the data, that is why thanks to the graphic representations this task is much easier. Therefore with this method we will be able to filter the data by metrics and see the data in a graphical way. } \examples{ result = clustering( df = cluster::agriculture, min = 4, max = 5, algorithm='gmm', metrics=c("Precision") ) plot_clustering(result,c("Precision")) }