content
large_stringlengths
0
6.46M
path
large_stringlengths
3
331
license_type
large_stringclasses
2 values
repo_name
large_stringlengths
5
125
language
large_stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
4
6.46M
extension
large_stringclasses
75 values
text
stringlengths
0
6.46M
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plot_models.R \name{plot_models} \alias{plot_models} \alias{sjp.lmm} \alias{sjp.glmm} \title{Forest plot of multiple regression models} \usage{ plot_models(..., exponentiate, std.est = NULL, rm.terms = NULL, title = NULL, m.labels = NULL, legend.title = "Dependent Variables", legend.pval.title = "p-level", axis.labels = NULL, axis.title = NULL, axis.lim = NULL, wrap.title = 50, wrap.labels = 25, wrap.legend.title = 20, grid.breaks = NULL, dot.size = 3, spacing = 0.4, colors = "Set1", show.values = FALSE, show.legend = TRUE, show.intercept = FALSE, show.p = TRUE, p.shape = FALSE, ci.lvl = 0.95, vline.color = NULL, digits = 2, grid = FALSE) sjp.lmm(...) sjp.glmm(...) } \arguments{ \item{...}{One or more regression models, including glm's or mixed models. May also be a \code{list} with fitted models. See 'Examples'.} \item{exponentiate}{Logical, if \code{TRUE} and models inherit from generalized linear models, estimates will be exponentiated (e.g., log-odds will be displayed as odds ratios). By default, \code{exponentiate} will automatically be set to \code{FALSE} or \code{TRUE}, depending on the class of \code{fit}.} \item{std.est}{For linear models, choose whether standardized coefficients should be used for plotting. Default is no standardization. \describe{ \item{\code{NULL}}{(default) no standardization, returns original estimates.} \item{\code{"std"}}{standardized beta values.} \item{\code{"std2"}}{standardized beta values, however, standardization is done by rescaling estimates by dividing them by two sd (see \code{\link[sjstats]{std_beta}}).} }} \item{rm.terms}{Character vector with names that indicate which terms should be removed from the plot. Counterpart to \code{terms}. \code{rm.terms = "t_name"} would remove the term \emph{t_name}. Default is \code{NULL}, i.e. all terms are used. Note that this argument does not apply to \code{type = "eff"}, \code{type = "pred"} or \code{type = "int"}.} \item{title}{Character vector, used as plot title. By default, \code{\link[sjlabelled]{get_dv_labels}} is called to retrieve the label of the dependent variable, which will be used as title. Use \code{title = ""} to remove title.} \item{m.labels}{Character vector, used to indicate the different models in the plot's legend. If not specified, the labels of the dependent variables for each model are used.} \item{legend.title}{Character vector, used as title for the plot legend. Note that only some plot types have legends (e.g. \code{type = "pred"} or when grouping estimates with \code{group.estimates}).} \item{legend.pval.title}{Character vector, used as title of the plot legend that indicates the p-values. Default is \code{"p-level"}. Only applies if \code{p.shape = TRUE}.} \item{axis.labels}{Character vector with labels for the model terms, used as axis labels. By default, \code{\link[sjlabelled]{get_term_labels}} is called to retrieve the labels of the coefficients, which will be used as axis labels. Use \code{axis.labels = ""} or \code{auto.label = FALSE} to use the bare term names as labels instead.} \item{axis.title}{Character vector of length one or two (depending on the plot function and type), used as title(s) for the x and y axis. If not specified, a default labelling is chosen. \strong{Note:} Some plot types may not support this argument sufficiently. In such cases, use the returned ggplot-object and add axis titles manually with \code{\link[ggplot2]{labs}}. Use \code{axis.title = ""} to remove axis titles.} \item{axis.lim}{Numeric vector of length 2, defining the range of the plot axis. Depending on plot-type, may effect either x- or y-axis. For \emph{Marginal Effects} plots, \code{axis.lim} may also be a list of two vectors of length 2, defining axis limits for both the x and y axis.} \item{wrap.title}{Numeric, determines how many chars of the plot title are displayed in one line and when a line break is inserted.} \item{wrap.labels}{Numeric, determines how many chars of the value, variable or axis labels are displayed in one line and when a line break is inserted.} \item{wrap.legend.title}{numeric, determines how many chars of the legend's title are displayed in one line and when a line break is inserted.} \item{grid.breaks}{Numeric; sets the distance between breaks for the axis, i.e. at every \code{grid.breaks}'th position a major grid is plotted.} \item{dot.size}{Numeric, size of the dots that indicate the point estimates.} \item{spacing}{Numeric, spacing between the dots and error bars of the plotted fitted models. Default is 0.3.} \item{colors}{May be a character vector of color values in hex-format, valid color value names (see \code{demo("colors")} or a name of a \href{http://colorbrewer2.org}{color brewer} palette. Following options are valid for the \code{colors} argument: \itemize{ \item If not specified, a default color brewer palette will be used, which is suitable for the plot style. \item If \code{"gs"}, a greyscale will be used. \item If \code{"bw"}, and plot-type is a line-plot, the plot is black/white and uses different line types to distinguish groups (see \href{../doc/blackwhitefigures.html}{this package-vignette}). \item If \code{colors} is any valid color brewer palette name, the related palette will be used. Use \code{\link[RColorBrewer]{display.brewer.all}} to view all available palette names. \item Else specify own color values or names as vector (e.g. \code{colors = "#00ff00"}). }} \item{show.values}{Logical, whether values should be plotted or not.} \item{show.legend}{logical, if \code{TRUE}, and depending on plot type and function, a legend is added to the plot.} \item{show.intercept}{Logical, if \code{TRUE}, the intercept of the fitted model is also plotted. Default is \code{FALSE}. If \code{exponentiate = TRUE}, please note that due to exponential transformation of estimates, the intercept in some cases is non-finite and the plot can not be created.} \item{show.p}{Logical, adds asterisks that indicate the significance level of estimates to the value labels.} \item{p.shape}{Logical, if \code{TRUE}, significant levels are distinguished by different point shapes and a related legend is plotted. Default is \code{FALSE}.} \item{ci.lvl}{Numeric, the level of the confidence intervals (error bars). Use \code{ci.lvl = NA} to remove error bars. For \code{stanreg}-models, \code{ci.lvl} defines the (outer) probability for the \code{\link[sjstats]{hdi}} (High Density Interval) that is plotted. By default, \code{stanreg}-models are printed with two intervals: the "inner" interval, which defaults to the 50\%-HDI; and the "outer" interval, which defaults to the 89\%-HDI. \code{ci.lvl} affects only the outer interval in such cases. See \code{prob.inner} and \code{prob.outer} under the \code{...}-argument for more details.} \item{vline.color}{Color of the vertical "zero effect" line. Default color is inherited from the current theme.} \item{digits}{Numeric, amount of digits after decimal point when rounding estimates or values.} \item{grid}{Logical, if \code{TRUE}, multiple plots are plotted as grid layout.} } \value{ A ggplot-object. } \description{ Plot and compare regression coefficients with confidence intervals of multiple regression models in one plot. } \examples{ data(efc) # fit three models fit1 <- lm(barthtot ~ c160age + c12hour + c161sex + c172code, data = efc) fit2 <- lm(neg_c_7 ~ c160age + c12hour + c161sex + c172code, data = efc) fit3 <- lm(tot_sc_e ~ c160age + c12hour + c161sex + c172code, data = efc) # plot multiple models plot_models(fit1, fit2, fit3, grid = TRUE) # plot multiple models with legend labels and # point shapes instead of value labels plot_models( fit1, fit2, fit3, axis.labels = c( "Carer's Age", "Hours of Care", "Carer's Sex", "Educational Status" ), m.labels = c("Barthel Index", "Negative Impact", "Services used"), show.values = FALSE, show.p = FALSE, p.shape = TRUE ) # plot multiple models from nested lists argument all.models <- list() all.models[[1]] <- fit1 all.models[[2]] <- fit2 all.models[[3]] <- fit3 plot_models(all.models) # plot multiple models with different predictors (stepwise inclusion), # standardized estimates fit1 <- lm(mpg ~ wt + cyl + disp + gear, data = mtcars) fit2 <- update(fit1, . ~ . + hp) fit3 <- update(fit2, . ~ . + am) plot_models(fit1, fit2, fit3, std.est = "std2") }
/man/plot_models.Rd
no_license
derele/sjPlot
R
false
true
8,474
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plot_models.R \name{plot_models} \alias{plot_models} \alias{sjp.lmm} \alias{sjp.glmm} \title{Forest plot of multiple regression models} \usage{ plot_models(..., exponentiate, std.est = NULL, rm.terms = NULL, title = NULL, m.labels = NULL, legend.title = "Dependent Variables", legend.pval.title = "p-level", axis.labels = NULL, axis.title = NULL, axis.lim = NULL, wrap.title = 50, wrap.labels = 25, wrap.legend.title = 20, grid.breaks = NULL, dot.size = 3, spacing = 0.4, colors = "Set1", show.values = FALSE, show.legend = TRUE, show.intercept = FALSE, show.p = TRUE, p.shape = FALSE, ci.lvl = 0.95, vline.color = NULL, digits = 2, grid = FALSE) sjp.lmm(...) sjp.glmm(...) } \arguments{ \item{...}{One or more regression models, including glm's or mixed models. May also be a \code{list} with fitted models. See 'Examples'.} \item{exponentiate}{Logical, if \code{TRUE} and models inherit from generalized linear models, estimates will be exponentiated (e.g., log-odds will be displayed as odds ratios). By default, \code{exponentiate} will automatically be set to \code{FALSE} or \code{TRUE}, depending on the class of \code{fit}.} \item{std.est}{For linear models, choose whether standardized coefficients should be used for plotting. Default is no standardization. \describe{ \item{\code{NULL}}{(default) no standardization, returns original estimates.} \item{\code{"std"}}{standardized beta values.} \item{\code{"std2"}}{standardized beta values, however, standardization is done by rescaling estimates by dividing them by two sd (see \code{\link[sjstats]{std_beta}}).} }} \item{rm.terms}{Character vector with names that indicate which terms should be removed from the plot. Counterpart to \code{terms}. \code{rm.terms = "t_name"} would remove the term \emph{t_name}. Default is \code{NULL}, i.e. all terms are used. Note that this argument does not apply to \code{type = "eff"}, \code{type = "pred"} or \code{type = "int"}.} \item{title}{Character vector, used as plot title. By default, \code{\link[sjlabelled]{get_dv_labels}} is called to retrieve the label of the dependent variable, which will be used as title. Use \code{title = ""} to remove title.} \item{m.labels}{Character vector, used to indicate the different models in the plot's legend. If not specified, the labels of the dependent variables for each model are used.} \item{legend.title}{Character vector, used as title for the plot legend. Note that only some plot types have legends (e.g. \code{type = "pred"} or when grouping estimates with \code{group.estimates}).} \item{legend.pval.title}{Character vector, used as title of the plot legend that indicates the p-values. Default is \code{"p-level"}. Only applies if \code{p.shape = TRUE}.} \item{axis.labels}{Character vector with labels for the model terms, used as axis labels. By default, \code{\link[sjlabelled]{get_term_labels}} is called to retrieve the labels of the coefficients, which will be used as axis labels. Use \code{axis.labels = ""} or \code{auto.label = FALSE} to use the bare term names as labels instead.} \item{axis.title}{Character vector of length one or two (depending on the plot function and type), used as title(s) for the x and y axis. If not specified, a default labelling is chosen. \strong{Note:} Some plot types may not support this argument sufficiently. In such cases, use the returned ggplot-object and add axis titles manually with \code{\link[ggplot2]{labs}}. Use \code{axis.title = ""} to remove axis titles.} \item{axis.lim}{Numeric vector of length 2, defining the range of the plot axis. Depending on plot-type, may effect either x- or y-axis. For \emph{Marginal Effects} plots, \code{axis.lim} may also be a list of two vectors of length 2, defining axis limits for both the x and y axis.} \item{wrap.title}{Numeric, determines how many chars of the plot title are displayed in one line and when a line break is inserted.} \item{wrap.labels}{Numeric, determines how many chars of the value, variable or axis labels are displayed in one line and when a line break is inserted.} \item{wrap.legend.title}{numeric, determines how many chars of the legend's title are displayed in one line and when a line break is inserted.} \item{grid.breaks}{Numeric; sets the distance between breaks for the axis, i.e. at every \code{grid.breaks}'th position a major grid is plotted.} \item{dot.size}{Numeric, size of the dots that indicate the point estimates.} \item{spacing}{Numeric, spacing between the dots and error bars of the plotted fitted models. Default is 0.3.} \item{colors}{May be a character vector of color values in hex-format, valid color value names (see \code{demo("colors")} or a name of a \href{http://colorbrewer2.org}{color brewer} palette. Following options are valid for the \code{colors} argument: \itemize{ \item If not specified, a default color brewer palette will be used, which is suitable for the plot style. \item If \code{"gs"}, a greyscale will be used. \item If \code{"bw"}, and plot-type is a line-plot, the plot is black/white and uses different line types to distinguish groups (see \href{../doc/blackwhitefigures.html}{this package-vignette}). \item If \code{colors} is any valid color brewer palette name, the related palette will be used. Use \code{\link[RColorBrewer]{display.brewer.all}} to view all available palette names. \item Else specify own color values or names as vector (e.g. \code{colors = "#00ff00"}). }} \item{show.values}{Logical, whether values should be plotted or not.} \item{show.legend}{logical, if \code{TRUE}, and depending on plot type and function, a legend is added to the plot.} \item{show.intercept}{Logical, if \code{TRUE}, the intercept of the fitted model is also plotted. Default is \code{FALSE}. If \code{exponentiate = TRUE}, please note that due to exponential transformation of estimates, the intercept in some cases is non-finite and the plot can not be created.} \item{show.p}{Logical, adds asterisks that indicate the significance level of estimates to the value labels.} \item{p.shape}{Logical, if \code{TRUE}, significant levels are distinguished by different point shapes and a related legend is plotted. Default is \code{FALSE}.} \item{ci.lvl}{Numeric, the level of the confidence intervals (error bars). Use \code{ci.lvl = NA} to remove error bars. For \code{stanreg}-models, \code{ci.lvl} defines the (outer) probability for the \code{\link[sjstats]{hdi}} (High Density Interval) that is plotted. By default, \code{stanreg}-models are printed with two intervals: the "inner" interval, which defaults to the 50\%-HDI; and the "outer" interval, which defaults to the 89\%-HDI. \code{ci.lvl} affects only the outer interval in such cases. See \code{prob.inner} and \code{prob.outer} under the \code{...}-argument for more details.} \item{vline.color}{Color of the vertical "zero effect" line. Default color is inherited from the current theme.} \item{digits}{Numeric, amount of digits after decimal point when rounding estimates or values.} \item{grid}{Logical, if \code{TRUE}, multiple plots are plotted as grid layout.} } \value{ A ggplot-object. } \description{ Plot and compare regression coefficients with confidence intervals of multiple regression models in one plot. } \examples{ data(efc) # fit three models fit1 <- lm(barthtot ~ c160age + c12hour + c161sex + c172code, data = efc) fit2 <- lm(neg_c_7 ~ c160age + c12hour + c161sex + c172code, data = efc) fit3 <- lm(tot_sc_e ~ c160age + c12hour + c161sex + c172code, data = efc) # plot multiple models plot_models(fit1, fit2, fit3, grid = TRUE) # plot multiple models with legend labels and # point shapes instead of value labels plot_models( fit1, fit2, fit3, axis.labels = c( "Carer's Age", "Hours of Care", "Carer's Sex", "Educational Status" ), m.labels = c("Barthel Index", "Negative Impact", "Services used"), show.values = FALSE, show.p = FALSE, p.shape = TRUE ) # plot multiple models from nested lists argument all.models <- list() all.models[[1]] <- fit1 all.models[[2]] <- fit2 all.models[[3]] <- fit3 plot_models(all.models) # plot multiple models with different predictors (stepwise inclusion), # standardized estimates fit1 <- lm(mpg ~ wt + cyl + disp + gear, data = mtcars) fit2 <- update(fit1, . ~ . + hp) fit3 <- update(fit2, . ~ . + am) plot_models(fit1, fit2, fit3, std.est = "std2") }
##' @name InventoryGrowthFusion ##' @title InventoryGrowthFusion ##' @description this code fuses forest inventory data with tree growth data (tree ring or dendrometer band) ##' for the same plots. Code is a rewrite of Clark et al 2007 Ecol Appl into JAGS ##' ##' @param dbh.only.data list of data inputs from trees with only dbh measurements (no increment cores) ##' @param posterior.estimates posterior estimates of all model parameters from the first stage to be used as informative priors for 2nd stage ##' @param random = whether or not to include random effects ##' @param n.chunk number of MCMC steps to evaluate at a time. Will only return LAST. If restarting, second number in vector is chunk to start from ##' @param n.burn number of steps to automatically discard as burn-in ##' @param save.state whether or not to include inferred DBH in output (can be large). Enter numeric value to save.state periodically (in terms of n.chunk) ##' @param restart final mcmc.list from previous execution. NULL for new run. TRUE to save final state for new run. ##' @note Requires JAGS ##' @return an mcmc.list object ##' @export InventoryGrowthFusion_stage2 <- function(data, posterior.estimates = NULL, cov.data=NULL, time_data = NULL, informative.time = TRUE, informative.site = TRUE, informative.plot =TRUE, n.iter=5000, n.chunk = n.iter, n.burn = min(n.chunk, 2000), random = NULL, fixed = NULL,time_varying=NULL, burnin_plot = FALSE, output.folder= "/home/rstudio/pecan/IGF_PIPO_AZ_mcmc/", save.jags = "IGF.ragged.txt", model.name = "model",z0 = NULL, save.state=TRUE, restart = NULL, breakearly = TRUE) { library(rjags) print(paste("start of MCMC", Sys.time())) # baseline variables to monitor burnin.variables <- c("tau_add", "tau_dbh", "tau_inc", "mu") # process variability, dbh and tree-ring observation error, intercept out.variables <- c("deviance", "tau_add", "tau_dbh", "tau_inc", "mu") # if(save.state) out.variables <- c(out.variables,"x") if(!exists("model")) model = 0 ## restart if(length(n.chunk)>1){ k_restart = n.chunk[2] n.chunk = n.chunk[1] } else { k_restart = 1 } max.chunks <- ceiling(n.iter/n.chunk) if(max.chunks < k_restart){ PEcAn.utils::logger.warn("MCMC already complete",max.chunks,k_restart) return(NULL) } avail.chunks <- k_restart:ceiling(n.iter/n.chunk) check.dup.data <- function(data,loc){ if(any(duplicated(names(data)))){print("duplicated variable at",loc,names(data))} } # start text object that will be manipulated (to build different linear models, swap in/out covariates) TreeDataFusionMV <- " model{ ### Loop over all individuals for(i in 1:ni){ #### Data Model: DBH for(t in 1:nt){ z[i,t] ~ dnorm(x[i,t],tau_dbh) } #### Data Model: growth for(t in 2:nt){ inc[i,t] <- x[i,t]-x[i,t-1] y[i,t] ~ dnorm(inc[i,t],tau_inc) } #### Process Model for(t in 2:nt){ Dnew[i,t] <- x[i,t-1] + mu ##PROCESS x[i,t]~dnorm(Dnew[i,t],tau_add) } ## initial condition x[i,1] ~ dnorm(x_ic,tau_ic) } ## end loop over individuals ## RANDOM_EFFECTS #### Priors tau_dbh ~ dnorm(a_dbh,r_dbh) tau_inc ~ dnorm(a_inc,r_inc) tau_add ~ dnorm(a_add,r_add) mu ~ dnorm(0.5,0.5) ## FIXED EFFECTS BETAS ## ENDOGENOUS BETAS ## TIME VARYING BETAS ## RANDOM EFFECT TAUS }" ######################################################################## # # get summary of posterior estimates from previous model # ######################################################################## # these will be used to specify informative priors below: Pformula <- NULL ######################################################################## ### ### RANDOM EFFECTS ### ######################################################################## if (!is.null(random)) { Rpriors <- NULL Reffects <- NULL ## parse random effects r_vars <- gsub(" ","",unlist(strsplit(random,"+",fixed=TRUE))) ## split on +, remove whitespace for(i in seq_along(r_vars)){ ## special case: individidual if(r_vars[i] == "i"){ r_var <- "i" counter <- "" index <- "i" nr <- nrow(cov.data) } else if(r_vars[i] == "t"){ r_var <- "t" counter <- "" index <- "t" nr <- ncol(cov.data) } else { index <- counter <- nr <- NA r_var <- gsub("(","",gsub(")","",r_vars[i],fixed = TRUE),fixed="TRUE") r_var <- strsplit(r_var,"|",fixed=TRUE)[[1]] fix <- r_var[1] ## check for nested effects r_var <- strsplit(gsub("\\",":",r_var[2],fixed=TRUE),":",fixed = TRUE)[[1]] for(j in seq_along(length(r_var))){ if(j>1)print("WARNING: not actually nesting random effects at this time") ## HACK: to get started, not actually nesting ## parse j_var <- strsplit(r_var[j],"[",fixed = TRUE)[[1]] index[j] <- gsub("]","",j_var[2],fixed=TRUE) counter[j] <- j_var[1] r_var[j] <- j_var[1] ## add variable to data if(!(r_var[j] %in% names(data))){ data[[length(data)+1]] <- as.numeric(as.factor(as.character(cov.data[,r_var[j]]))) ## multiple conversions to eliminate gaps names(data)[length(data)] <- r_var[j] } check.dup.data(data,"r_var") nr[j] <- max(as.numeric(data[[r_var[j]]])) } index <- paste0("[",index,"]") } ## create formula Pformula <- paste(Pformula, paste0("+ alpha_", r_var,"[",counter,index,"]")) ## create random effect for(j in seq_along(nr)){ Reffects <- paste(Reffects, paste0("for(k in 1:",nr[j],"){\n"), paste0(" alpha_",r_var[j],"[k] ~ dnorm(0,tau_",r_var[j],")\n}\n")) } ## create priors if(informative.plot == FALSE){ Rpriors <- paste(Rpriors,paste0("tau_",r_var," ~ dgamma(1,0.1)\n",collapse = " ")) }else{ Rpriors <- paste(Rpriors,paste0("tau_",r_var," ~ dnorm(",posterior.summary[posterior.summary$parameter %in% "tau_PLOT", ]$means ,",", (1/posterior.summary[posterior.summary$parameter %in% "tau_PLOT", ]$vars) ,")\n",collapse = " ")) } ## track burnin.variables <- c(burnin.variables, paste0("tau_", r_var)) out.variables <- c(out.variables, paste0("tau_", r_var), paste0("alpha_",r_var)) } ## Substitute into code TreeDataFusionMV <- sub(pattern = "## RANDOM EFFECT TAUS", Rpriors, TreeDataFusionMV) TreeDataFusionMV <- gsub(pattern = "## RANDOM_EFFECTS", Reffects, TreeDataFusionMV) } ### END RANDOM EFFECTS ######################################################################## ### ### FIXED EFFECTS ### ######################################################################## if(FALSE){ ## DEV TESTING FOR X, polynomial X, and X interactions fixed <- "X + X^3 + X*bob + bob + dia + X*Tmin[t]" ## faux model, just for testing jags code } ## Design matrix if (is.null(fixed)) { Xf <- NULL } else { ## check for covariate data (note: will falsely fail if only effect is X) if (is.null(cov.data)) { print("formula provided but covariate data is absent:", fixed) } else { cov.data <- as.data.frame(cov.data) } ## check if there's a tilda in the formula if (length(grep("~", fixed)) == 0) { fixed <- paste("~", fixed) } ### BEGIN adding in tree size (endogenous variable X) ## First deal with endogenous terms (X and X*cov interactions) fixedX <- sub("~","",fixed, fixed=TRUE) lm.terms <- gsub("[[:space:]]", "", strsplit(fixedX,split = "+",fixed=TRUE)[[1]]) ## split on + and remove whitespace X.terms <- strsplit(lm.terms,split = c("^"),fixed = TRUE) X.terms <- sapply(X.terms,function(str){unlist(strsplit(str,,split="*",fixed=TRUE))}) X.terms <- which(sapply(X.terms,function(x){any(toupper(x) == "X")})) if(length(X.terms) > 0){ ## rebuild fixed without X.terms fixed <- paste("~",paste(lm.terms[-X.terms],collapse = " + ")) ## isolate terms with X X.terms <- lm.terms[X.terms] Xpriors <- NULL for(i in seq_along(X.terms)){ myBeta <- NULL Xformula <- NULL if(length(grep("*",X.terms[i],fixed = TRUE)) == 1){ ## INTERACTION myIndex <- "[i]" covX <- strsplit(X.terms[i],"*",fixed=TRUE)[[1]] covX <- covX[-which(toupper(covX)=="X")] ## remove X from terms ##is covariate fixed or time varying? tvar <- length(grep("[t]",covX,fixed=TRUE)) > 0 if(tvar){ covX <- sub("[t]","",covX,fixed = TRUE) if(!(covX %in% names(data))){ ## add cov variables to data object data[[covX]] <- time_data[[covX]] } check.dup.data(data,"covX") myIndex <- "[i,t]" } else { ## variable is fixed if(covX %in% colnames(cov.data)){ ## covariate present if(!(covX %in% names(data))){ ## add cov variables to data object data[[covX]] <- cov.data[,covX] } check.dup.data(data,"covX2") } else { ## covariate absent print("covariate absent from covariate data:", covX) } } ## end fixed or time varying myBeta <- paste0("betaX_",covX) Xformula <- paste0(myBeta,"*x[i,t-1]*",covX,myIndex) } else if(length(grep("^",X.terms[i],fixed=TRUE))==1){ ## POLYNOMIAL powX <- strsplit(X.terms[i],"^",fixed=TRUE)[[1]] powX <- powX[-which(toupper(powX)=="X")] ## remove X from terms myBeta <- paste0("betaX",powX) Xformula <- paste0(myBeta,"*x[i,t-1]^",powX) } else { ## JUST X myBeta <- "betaX" Xformula <- paste0(myBeta,"*x[i,t-1]") } ## add variables to Pformula Pformula <- paste(Pformula,"+",Xformula) ## add priors, make them infomative if informative.site ==TRUE if(informative.site == TRUE){ Xpriors <- paste0(Xpriors," ",myBeta, "~dnorm(", posterior.summary[posterior.summary$parameter %in% myBeta,]$means ,",",1/(posterior.summary[posterior.summary$parameter %in% myBeta,]$var ),")", collapse="\n") }else{ Xpriors <- paste(Xpriors," ",myBeta,"~dnorm(0,0.001)\n") } ## add to out.variables out.variables <- c(out.variables, myBeta) } ## END LOOP OVER X TERMS ## create priors TreeDataFusionMV <- sub(pattern = "## ENDOGENOUS BETAS", Xpriors, TreeDataFusionMV) } ## end processing of X terms ## build design matrix from formula Xf <- with(cov.data, model.matrix(formula(fixed))) Xf.cols <- colnames(Xf) Xf.cols <- sub(":","_",Xf.cols) ## for interaction terms, switch separator colnames(Xf) <- Xf.cols Xf.cols <- Xf.cols[Xf.cols != "(Intercept)"] Xf <- as.matrix(Xf[, Xf.cols]) colnames(Xf) <- Xf.cols ##Center the covariate data Xf.center <- apply(Xf, 2, mean, na.rm = TRUE) Xf <- t(t(Xf) - Xf.center) } ## end fixed effects parsing ## build formula in JAGS syntax if (!is.null(Xf)) { Xf.names <- gsub(" ", "_", colnames(Xf)) ## JAGS doesn't like spaces in variable names ## append to process model formula Pformula <- paste(Pformula, paste0("+ beta", Xf.names, "*Xf[rep[i],", seq_along(Xf.names), "]", collapse = " ")) ## create 'rep' variable if not defined if(is.null(data$rep)){ data$rep <- seq_len(nrow(Xf)) } ## create priors if(informative.site == TRUE){ # make informative priors, if informative flag is true Xf.priors <- paste0(" beta", Xf.names[1], "~dnorm(", posterior.summary[posterior.summary$parameter %in% paste0("beta", Xf.names[1]),]$means ,",",1/(posterior.summary[posterior.summary$parameter %in% paste0("beta", Xf.names[1]),]$var ),")", "\n", " beta", Xf.names[2], "~dnorm(", posterior.summary[posterior.summary$parameter %in% paste0("beta", Xf.names[2]),]$means ,",",1/(posterior.summary[posterior.summary$parameter %in% paste0("beta", Xf.names[2]),]$var ),")", collapse="\n") }else{ Xf.priors <- paste0(" beta", Xf.names, "~dnorm(0,0.001)", collapse = "\n") } TreeDataFusionMV <- sub(pattern = "## FIXED EFFECTS BETAS", Xf.priors, TreeDataFusionMV) ## update variables for JAGS to track data[["Xf"]] <- Xf out.variables <- c(out.variables, paste0("beta", Xf.names)) } check.dup.data(data,"Xf") ######################################################################## ### ### TIME-VARYING ### ######################################################################## if(FALSE){ # always false...just for development ## DEVEL TESTING FOR TIME VARYING #time_varying <- "TminJuly + PrecipDec + TminJuly*PrecipDec" time_varying <- "tmax_Jun + ppt_Dec + tmax_Jun*ppt_Dec" time_data <- list(TminJuly = matrix(0,4,4),PrecipDec = matrix(1,4,4)) } if(!is.null(time_varying)){ if (is.null(time_data)) { print("time_varying formula provided but time_data is absent:", time_varying) } Xt.priors <- "" ## parse equation into variable names t_vars <- gsub(" ","",unlist(strsplit(time_varying,"+",fixed=TRUE))) ## split on +, remove whitespace ## check for interaction terms it_vars <- t_vars[grep(pattern = "*",x=t_vars,fixed = TRUE)] if(length(it_vars) > 0){ t_vars <- t_vars[!(t_vars %in% it_vars)] } ## INTERACTIONS WITH TIME-VARYING VARS ## TODO: deal with interactions with catagorical variables ## need to create new data matrices on the fly for(i in seq_along(it_vars)){ ##is covariate fixed or time varying? covX <- strsplit(it_vars[i],"*",fixed=TRUE)[[1]] tvar <- length(grep("[t]",covX[1],fixed=TRUE)) > 0 tvar[2] <- length(grep("[t]",covX[2],fixed=TRUE)) > 0 myBeta <- "beta" for(j in 1:2){ if(j == 2) myBeta <- paste0(myBeta,"_") if(tvar[j]){ covX[j] <- sub("[t]","",covX[j],fixed = TRUE) if(!(covX[j] %in% names(data))){ ## add cov variables to data object data[[covX[j]]] <- time_data[[covX[j]]] } myBeta <- paste0(myBeta,covX[j]) covX[j] <- paste0(covX[j],"[i,t]") } else { ## variable is fixed if(!(covX[j] %in% names(data))){ ## add cov variables to data object data[[covX[j]]] <- cov.data[,covX[j]] } myBeta <- paste0(myBeta,covX[j]) covX[j] <- paste0(covX[j],"[i]") } ## end fixed or time varying } ## end building beta ## append to process model formula Pformula <- paste(Pformula, paste0(" + ",myBeta,"*",covX[1],"*",covX[2])) ## priors if(informative.time == TRUE){ # make informative priors, if informative flag is true Xt.priors <- paste0(Xt.priors," ", myBeta, "~dnorm(", posterior.summary[posterior.summary$parameter %in% myBeta,]$means,",",(1/posterior.summary[posterior.summary$parameter %in% myBeta,]$var) ,")", collapse="\n") }else{ Xt.priors <- paste0(Xt.priors, " ",myBeta,"~dnorm(0,0.001)\n") } #Xt.priors <- paste0(Xt.priors, # " ",myBeta,"~dnorm(0,0.001)\n") # Xt.priors <- paste0(Xt.priors," ", myBeta, "~dnorm(", posterior.summary[posterior.summary$parameter %in% myBeta,]$means ,",",1/(posterior.summary[posterior.summary$parameter %in% myBeta,]$var ),")", collapse="\n") ## add to list of varibles JAGS is tracking out.variables <- c(out.variables, myBeta) } ## end time-varying interaction terms ## loop over variables for(j in seq_along(t_vars)){ tvar <- t_vars[j] if(!(tvar %in% names(data))){ ## add cov variables to data object data[[tvar]] <- time_data[[tvar]] } check.dup.data(data,"tvar") ## append to process model formula Pformula <- paste(Pformula, paste0("+ beta", tvar, "*",tvar,"[i,t]")) ## add to list of varibles JAGS is tracking out.variables <- c(out.variables, paste0("beta", tvar)) } ## build prior if(informative.time == TRUE){ # make informative priors, if informative flag is true # Xt.priors <- paste0(Xt.priors, # paste0(" beta", t_vars, "~dunif(", paste(quantile(rnorm(1000,posterior.summary[posterior.summary$parameter %in% paste0("beta", t_vars),]$means,(posterior.summary[posterior.summary$parameter %in% paste0("beta", t_vars),]$var)), 0)) ,",",paste(quantile(rnorm(1000,posterior.summary[posterior.summary$parameter %in% myBeta,]$mean,(posterior.summary[posterior.summary$parameter %in% myBeta,]$var)), 1)) ,")", collapse="\n")) # Xt.priors <- paste0(Xt.priors, paste0(" beta", t_vars[1], "~dnorm(", posterior.summary[posterior.summary$parameter %in% paste0("beta", t_vars[1]),]$means, ",",(1/posterior.summary[posterior.summary$parameter %in% paste0("beta", t_vars[1]),]$var),")", "\n", " beta", t_vars[2], "~dnorm(",posterior.summary[posterior.summary$parameter %in% paste0("beta", t_vars[2]),]$means, ",",(1/posterior.summary[posterior.summary$parameter %in% paste0("beta", t_vars[2]),]$var),")",collapse="\n")) }else{ Xt.priors <- paste0(Xt.priors, " beta", t_vars,"~dnorm(0,0.001)\n") } TreeDataFusionMV <- sub(pattern = "## TIME VARYING BETAS", Xt.priors, TreeDataFusionMV) } ## END time varying covariates ## insert process model into JAGS template if (!is.null(Pformula)) { TreeDataFusionMV <- sub(pattern = "##PROCESS", Pformula, TreeDataFusionMV) } ## save script if(!is.null(save.jags)){ cat(TreeDataFusionMV,file=save.jags) } ## state variable initial condition if(is.null(z0)){ z0 <- t(apply(data$y, 1, function(y) { -rev(cumsum(rev(y))) })) + data$z[, ncol(data$z)] } ## JAGS initial conditions init <- list() source("pecan/modules/data.land/R/mcmc.list2initIGF.R") # use the new specific mcmc.list2initIGF.R if(is.mcmc.list(restart)){ init <- mcmc.list2initIGF(restart) nchain <- length(init) } else { nchain <- 3 for (i in seq_len(nchain)) { y.samp <- sample(data$y, length(data$y), replace = TRUE) z0ragged <- z0 # lines for z0ragged come from mikes "fix" to help with tree w/no cores for(j in 1:data$ni){ # 1: number of individuals # this creates z0ragged, where we only have z0 for trees w/ cores during the time period of the cores & # we only have z0 for trees w/out cores after the time where we have DBH measurements. # this could help with model fitting if(data$startyr[j]>1){ z0ragged[j,1:(data$startyr[j]-1)] <- NA } if(data$endyr[j]<data$nt){ z0ragged[j,(data$endyr[j]+1):data$nt] <- NA } } init[[i]] <- list(x = z0ragged, tau_add = runif(1, 1, 5) / var(diff(y.samp), na.rm = TRUE), tau_dbh = posterior.summary[posterior.summary$parameter %in% "tau_dbh",]$means, tau_inc = posterior.summary[posterior.summary$parameter %in% "tau_inc",]$means, #tau_ind = posterior.summary[posterior.summary$parameter %in% "tau_inc",]$means, #tau_yr = 100, #betaX2 = 0, ind = rep(0, data$ni), year = rep(0, data$nt)) } } #----------------------------------------------------------------------- # make sure we also use the values for informative normal priors on taus: #----------------------------------------------------------------------- data$a_add <- posterior.summary[posterior.summary$parameter %in% "tau_add",]$means data$r_add <- 1/posterior.summary[posterior.summary$parameter %in% "tau_add",]$vars data$a_inc <- posterior.summary[posterior.summary$parameter %in% "tau_inc",]$means data$r_inc <- 1/posterior.summary[posterior.summary$parameter %in% "tau_inc",]$vars data$a_dbh <- posterior.summary[posterior.summary$parameter %in% "tau_dbh",]$means data$r_dbh <- 1/posterior.summary[posterior.summary$parameter %in% "tau_dbh",]$vars means <- apply(as.matrix(posterior.ests), 2, mean) vars <- apply(as.matrix(posterior.ests), 2, var) SD <- apply(as.matrix(posterior.ests), 2, sd) # generate data frame with a summary of the posterior estimates posterior.summary <- data.frame(means = apply(as.matrix(posterior.ests), 2, mean), vars = apply(as.matrix(posterior.ests), 2, var), SD = apply(as.matrix(posterior.ests), 2, sd)) posterior.summary$parameter <- rownames(posterior.summary) reduced <- posterior.ests[[1]] posterior.cov.matrix <- cov(posterior.ests[[1]]) # should use all the chains.... posterior.mean.matrix <- colMeans(posterior.ests[[1]]) # get only the params of interest: posterior.mean.matrix <- posterior.mean.matrix [1:11] posterior.cov.matrix <- posterior.cov.matrix [1:11, 1:11] posterior.mean.matrix <- posterior.mean.matrix[!names(posterior.mean.matrix) %in% c("mu", "deviance")] posterior.cov.matrix <-posterior.cov.matrix [!rownames(posterior.cov.matrix) %in% c("mu", "deviance"),!colnames(posterior.cov.matrix) %in% c("mu", "deviance")] posterior.prec.matrix <- 1/posterior.cov.matrix data$posterior.param.means = c(posterior.mean.matrix) data$sigma.posterior = posterior.cov.matrix data$P <- length(colnames(data$sigma.posterior)) # test model: print("COMPILE JAGS MODEL") #j.model <- jags.model(file = textConnection(TreeDataFusionMV), data = data, inits = init, n.chains = 3) j.model <- jags.model(file = "stage2_tunc_infom_nvm.txt", data = data, inits = init, n.chains = 3) if(n.burn > 0){ print("BURN IN") jags.out <- coda.samples(model = j.model, variable.names = burnin.variables, n.iter = n.burn) if (burnin_plot) { plot(jags.out) } } print("RUN MCMC") load.module("dic") for(k in avail.chunks){ ## determine whether to sample states if(as.logical(save.state) & k%%as.numeric(save.state) == 0){ vnames <- c("x",out.variables) ## save x periodically (this actually always saves x, from my experience) } else { vnames <- out.variables } ## sample chunk jags.out <- coda.samples(model = j.model, variable.names = vnames, n.iter = n.chunk) ## save chunk ofile <- paste0(output.folder, model.name ,".",k,".RData") print(ofile) save(jags.out,file=ofile) ## update restart if(!is.null(restart) & ((is.logical(restart) && restart) || is.mcmc.list(restart))){ ofile <- paste0(output.folder,"IGF",".",model.name,".","RESTART.RData") jags.final <- coda.samples(model = j.model, variable.names = c("x",out.variables), n.iter = 1) k_restart = k + 1 ## finished k, so would restart at k+1 save(jags.final,k_restart,file=ofile) } if(breakearly == TRUE){ ## check for convergence and break from loop early D <- as.mcmc.list(lapply(jags.out,function(x){x[,'deviance']})) gbr <- coda::gelman.diag(D)$psrf[1,1] trend <- mean(sapply(D,function(x){coef(lm(x~seq_len(n.chunk)))[2]})) if(gbr < 1.005 & abs(trend) < 0.5) break } } print(paste("end of MCMC", Sys.time())) return(jags.out) } # InventoryGrowthFusion
/modules/data.land/R/Inventory_Growth_Fusion_stage_2_mvn.R
permissive
Kah5/pecan
R
false
false
24,331
r
##' @name InventoryGrowthFusion ##' @title InventoryGrowthFusion ##' @description this code fuses forest inventory data with tree growth data (tree ring or dendrometer band) ##' for the same plots. Code is a rewrite of Clark et al 2007 Ecol Appl into JAGS ##' ##' @param dbh.only.data list of data inputs from trees with only dbh measurements (no increment cores) ##' @param posterior.estimates posterior estimates of all model parameters from the first stage to be used as informative priors for 2nd stage ##' @param random = whether or not to include random effects ##' @param n.chunk number of MCMC steps to evaluate at a time. Will only return LAST. If restarting, second number in vector is chunk to start from ##' @param n.burn number of steps to automatically discard as burn-in ##' @param save.state whether or not to include inferred DBH in output (can be large). Enter numeric value to save.state periodically (in terms of n.chunk) ##' @param restart final mcmc.list from previous execution. NULL for new run. TRUE to save final state for new run. ##' @note Requires JAGS ##' @return an mcmc.list object ##' @export InventoryGrowthFusion_stage2 <- function(data, posterior.estimates = NULL, cov.data=NULL, time_data = NULL, informative.time = TRUE, informative.site = TRUE, informative.plot =TRUE, n.iter=5000, n.chunk = n.iter, n.burn = min(n.chunk, 2000), random = NULL, fixed = NULL,time_varying=NULL, burnin_plot = FALSE, output.folder= "/home/rstudio/pecan/IGF_PIPO_AZ_mcmc/", save.jags = "IGF.ragged.txt", model.name = "model",z0 = NULL, save.state=TRUE, restart = NULL, breakearly = TRUE) { library(rjags) print(paste("start of MCMC", Sys.time())) # baseline variables to monitor burnin.variables <- c("tau_add", "tau_dbh", "tau_inc", "mu") # process variability, dbh and tree-ring observation error, intercept out.variables <- c("deviance", "tau_add", "tau_dbh", "tau_inc", "mu") # if(save.state) out.variables <- c(out.variables,"x") if(!exists("model")) model = 0 ## restart if(length(n.chunk)>1){ k_restart = n.chunk[2] n.chunk = n.chunk[1] } else { k_restart = 1 } max.chunks <- ceiling(n.iter/n.chunk) if(max.chunks < k_restart){ PEcAn.utils::logger.warn("MCMC already complete",max.chunks,k_restart) return(NULL) } avail.chunks <- k_restart:ceiling(n.iter/n.chunk) check.dup.data <- function(data,loc){ if(any(duplicated(names(data)))){print("duplicated variable at",loc,names(data))} } # start text object that will be manipulated (to build different linear models, swap in/out covariates) TreeDataFusionMV <- " model{ ### Loop over all individuals for(i in 1:ni){ #### Data Model: DBH for(t in 1:nt){ z[i,t] ~ dnorm(x[i,t],tau_dbh) } #### Data Model: growth for(t in 2:nt){ inc[i,t] <- x[i,t]-x[i,t-1] y[i,t] ~ dnorm(inc[i,t],tau_inc) } #### Process Model for(t in 2:nt){ Dnew[i,t] <- x[i,t-1] + mu ##PROCESS x[i,t]~dnorm(Dnew[i,t],tau_add) } ## initial condition x[i,1] ~ dnorm(x_ic,tau_ic) } ## end loop over individuals ## RANDOM_EFFECTS #### Priors tau_dbh ~ dnorm(a_dbh,r_dbh) tau_inc ~ dnorm(a_inc,r_inc) tau_add ~ dnorm(a_add,r_add) mu ~ dnorm(0.5,0.5) ## FIXED EFFECTS BETAS ## ENDOGENOUS BETAS ## TIME VARYING BETAS ## RANDOM EFFECT TAUS }" ######################################################################## # # get summary of posterior estimates from previous model # ######################################################################## # these will be used to specify informative priors below: Pformula <- NULL ######################################################################## ### ### RANDOM EFFECTS ### ######################################################################## if (!is.null(random)) { Rpriors <- NULL Reffects <- NULL ## parse random effects r_vars <- gsub(" ","",unlist(strsplit(random,"+",fixed=TRUE))) ## split on +, remove whitespace for(i in seq_along(r_vars)){ ## special case: individidual if(r_vars[i] == "i"){ r_var <- "i" counter <- "" index <- "i" nr <- nrow(cov.data) } else if(r_vars[i] == "t"){ r_var <- "t" counter <- "" index <- "t" nr <- ncol(cov.data) } else { index <- counter <- nr <- NA r_var <- gsub("(","",gsub(")","",r_vars[i],fixed = TRUE),fixed="TRUE") r_var <- strsplit(r_var,"|",fixed=TRUE)[[1]] fix <- r_var[1] ## check for nested effects r_var <- strsplit(gsub("\\",":",r_var[2],fixed=TRUE),":",fixed = TRUE)[[1]] for(j in seq_along(length(r_var))){ if(j>1)print("WARNING: not actually nesting random effects at this time") ## HACK: to get started, not actually nesting ## parse j_var <- strsplit(r_var[j],"[",fixed = TRUE)[[1]] index[j] <- gsub("]","",j_var[2],fixed=TRUE) counter[j] <- j_var[1] r_var[j] <- j_var[1] ## add variable to data if(!(r_var[j] %in% names(data))){ data[[length(data)+1]] <- as.numeric(as.factor(as.character(cov.data[,r_var[j]]))) ## multiple conversions to eliminate gaps names(data)[length(data)] <- r_var[j] } check.dup.data(data,"r_var") nr[j] <- max(as.numeric(data[[r_var[j]]])) } index <- paste0("[",index,"]") } ## create formula Pformula <- paste(Pformula, paste0("+ alpha_", r_var,"[",counter,index,"]")) ## create random effect for(j in seq_along(nr)){ Reffects <- paste(Reffects, paste0("for(k in 1:",nr[j],"){\n"), paste0(" alpha_",r_var[j],"[k] ~ dnorm(0,tau_",r_var[j],")\n}\n")) } ## create priors if(informative.plot == FALSE){ Rpriors <- paste(Rpriors,paste0("tau_",r_var," ~ dgamma(1,0.1)\n",collapse = " ")) }else{ Rpriors <- paste(Rpriors,paste0("tau_",r_var," ~ dnorm(",posterior.summary[posterior.summary$parameter %in% "tau_PLOT", ]$means ,",", (1/posterior.summary[posterior.summary$parameter %in% "tau_PLOT", ]$vars) ,")\n",collapse = " ")) } ## track burnin.variables <- c(burnin.variables, paste0("tau_", r_var)) out.variables <- c(out.variables, paste0("tau_", r_var), paste0("alpha_",r_var)) } ## Substitute into code TreeDataFusionMV <- sub(pattern = "## RANDOM EFFECT TAUS", Rpriors, TreeDataFusionMV) TreeDataFusionMV <- gsub(pattern = "## RANDOM_EFFECTS", Reffects, TreeDataFusionMV) } ### END RANDOM EFFECTS ######################################################################## ### ### FIXED EFFECTS ### ######################################################################## if(FALSE){ ## DEV TESTING FOR X, polynomial X, and X interactions fixed <- "X + X^3 + X*bob + bob + dia + X*Tmin[t]" ## faux model, just for testing jags code } ## Design matrix if (is.null(fixed)) { Xf <- NULL } else { ## check for covariate data (note: will falsely fail if only effect is X) if (is.null(cov.data)) { print("formula provided but covariate data is absent:", fixed) } else { cov.data <- as.data.frame(cov.data) } ## check if there's a tilda in the formula if (length(grep("~", fixed)) == 0) { fixed <- paste("~", fixed) } ### BEGIN adding in tree size (endogenous variable X) ## First deal with endogenous terms (X and X*cov interactions) fixedX <- sub("~","",fixed, fixed=TRUE) lm.terms <- gsub("[[:space:]]", "", strsplit(fixedX,split = "+",fixed=TRUE)[[1]]) ## split on + and remove whitespace X.terms <- strsplit(lm.terms,split = c("^"),fixed = TRUE) X.terms <- sapply(X.terms,function(str){unlist(strsplit(str,,split="*",fixed=TRUE))}) X.terms <- which(sapply(X.terms,function(x){any(toupper(x) == "X")})) if(length(X.terms) > 0){ ## rebuild fixed without X.terms fixed <- paste("~",paste(lm.terms[-X.terms],collapse = " + ")) ## isolate terms with X X.terms <- lm.terms[X.terms] Xpriors <- NULL for(i in seq_along(X.terms)){ myBeta <- NULL Xformula <- NULL if(length(grep("*",X.terms[i],fixed = TRUE)) == 1){ ## INTERACTION myIndex <- "[i]" covX <- strsplit(X.terms[i],"*",fixed=TRUE)[[1]] covX <- covX[-which(toupper(covX)=="X")] ## remove X from terms ##is covariate fixed or time varying? tvar <- length(grep("[t]",covX,fixed=TRUE)) > 0 if(tvar){ covX <- sub("[t]","",covX,fixed = TRUE) if(!(covX %in% names(data))){ ## add cov variables to data object data[[covX]] <- time_data[[covX]] } check.dup.data(data,"covX") myIndex <- "[i,t]" } else { ## variable is fixed if(covX %in% colnames(cov.data)){ ## covariate present if(!(covX %in% names(data))){ ## add cov variables to data object data[[covX]] <- cov.data[,covX] } check.dup.data(data,"covX2") } else { ## covariate absent print("covariate absent from covariate data:", covX) } } ## end fixed or time varying myBeta <- paste0("betaX_",covX) Xformula <- paste0(myBeta,"*x[i,t-1]*",covX,myIndex) } else if(length(grep("^",X.terms[i],fixed=TRUE))==1){ ## POLYNOMIAL powX <- strsplit(X.terms[i],"^",fixed=TRUE)[[1]] powX <- powX[-which(toupper(powX)=="X")] ## remove X from terms myBeta <- paste0("betaX",powX) Xformula <- paste0(myBeta,"*x[i,t-1]^",powX) } else { ## JUST X myBeta <- "betaX" Xformula <- paste0(myBeta,"*x[i,t-1]") } ## add variables to Pformula Pformula <- paste(Pformula,"+",Xformula) ## add priors, make them infomative if informative.site ==TRUE if(informative.site == TRUE){ Xpriors <- paste0(Xpriors," ",myBeta, "~dnorm(", posterior.summary[posterior.summary$parameter %in% myBeta,]$means ,",",1/(posterior.summary[posterior.summary$parameter %in% myBeta,]$var ),")", collapse="\n") }else{ Xpriors <- paste(Xpriors," ",myBeta,"~dnorm(0,0.001)\n") } ## add to out.variables out.variables <- c(out.variables, myBeta) } ## END LOOP OVER X TERMS ## create priors TreeDataFusionMV <- sub(pattern = "## ENDOGENOUS BETAS", Xpriors, TreeDataFusionMV) } ## end processing of X terms ## build design matrix from formula Xf <- with(cov.data, model.matrix(formula(fixed))) Xf.cols <- colnames(Xf) Xf.cols <- sub(":","_",Xf.cols) ## for interaction terms, switch separator colnames(Xf) <- Xf.cols Xf.cols <- Xf.cols[Xf.cols != "(Intercept)"] Xf <- as.matrix(Xf[, Xf.cols]) colnames(Xf) <- Xf.cols ##Center the covariate data Xf.center <- apply(Xf, 2, mean, na.rm = TRUE) Xf <- t(t(Xf) - Xf.center) } ## end fixed effects parsing ## build formula in JAGS syntax if (!is.null(Xf)) { Xf.names <- gsub(" ", "_", colnames(Xf)) ## JAGS doesn't like spaces in variable names ## append to process model formula Pformula <- paste(Pformula, paste0("+ beta", Xf.names, "*Xf[rep[i],", seq_along(Xf.names), "]", collapse = " ")) ## create 'rep' variable if not defined if(is.null(data$rep)){ data$rep <- seq_len(nrow(Xf)) } ## create priors if(informative.site == TRUE){ # make informative priors, if informative flag is true Xf.priors <- paste0(" beta", Xf.names[1], "~dnorm(", posterior.summary[posterior.summary$parameter %in% paste0("beta", Xf.names[1]),]$means ,",",1/(posterior.summary[posterior.summary$parameter %in% paste0("beta", Xf.names[1]),]$var ),")", "\n", " beta", Xf.names[2], "~dnorm(", posterior.summary[posterior.summary$parameter %in% paste0("beta", Xf.names[2]),]$means ,",",1/(posterior.summary[posterior.summary$parameter %in% paste0("beta", Xf.names[2]),]$var ),")", collapse="\n") }else{ Xf.priors <- paste0(" beta", Xf.names, "~dnorm(0,0.001)", collapse = "\n") } TreeDataFusionMV <- sub(pattern = "## FIXED EFFECTS BETAS", Xf.priors, TreeDataFusionMV) ## update variables for JAGS to track data[["Xf"]] <- Xf out.variables <- c(out.variables, paste0("beta", Xf.names)) } check.dup.data(data,"Xf") ######################################################################## ### ### TIME-VARYING ### ######################################################################## if(FALSE){ # always false...just for development ## DEVEL TESTING FOR TIME VARYING #time_varying <- "TminJuly + PrecipDec + TminJuly*PrecipDec" time_varying <- "tmax_Jun + ppt_Dec + tmax_Jun*ppt_Dec" time_data <- list(TminJuly = matrix(0,4,4),PrecipDec = matrix(1,4,4)) } if(!is.null(time_varying)){ if (is.null(time_data)) { print("time_varying formula provided but time_data is absent:", time_varying) } Xt.priors <- "" ## parse equation into variable names t_vars <- gsub(" ","",unlist(strsplit(time_varying,"+",fixed=TRUE))) ## split on +, remove whitespace ## check for interaction terms it_vars <- t_vars[grep(pattern = "*",x=t_vars,fixed = TRUE)] if(length(it_vars) > 0){ t_vars <- t_vars[!(t_vars %in% it_vars)] } ## INTERACTIONS WITH TIME-VARYING VARS ## TODO: deal with interactions with catagorical variables ## need to create new data matrices on the fly for(i in seq_along(it_vars)){ ##is covariate fixed or time varying? covX <- strsplit(it_vars[i],"*",fixed=TRUE)[[1]] tvar <- length(grep("[t]",covX[1],fixed=TRUE)) > 0 tvar[2] <- length(grep("[t]",covX[2],fixed=TRUE)) > 0 myBeta <- "beta" for(j in 1:2){ if(j == 2) myBeta <- paste0(myBeta,"_") if(tvar[j]){ covX[j] <- sub("[t]","",covX[j],fixed = TRUE) if(!(covX[j] %in% names(data))){ ## add cov variables to data object data[[covX[j]]] <- time_data[[covX[j]]] } myBeta <- paste0(myBeta,covX[j]) covX[j] <- paste0(covX[j],"[i,t]") } else { ## variable is fixed if(!(covX[j] %in% names(data))){ ## add cov variables to data object data[[covX[j]]] <- cov.data[,covX[j]] } myBeta <- paste0(myBeta,covX[j]) covX[j] <- paste0(covX[j],"[i]") } ## end fixed or time varying } ## end building beta ## append to process model formula Pformula <- paste(Pformula, paste0(" + ",myBeta,"*",covX[1],"*",covX[2])) ## priors if(informative.time == TRUE){ # make informative priors, if informative flag is true Xt.priors <- paste0(Xt.priors," ", myBeta, "~dnorm(", posterior.summary[posterior.summary$parameter %in% myBeta,]$means,",",(1/posterior.summary[posterior.summary$parameter %in% myBeta,]$var) ,")", collapse="\n") }else{ Xt.priors <- paste0(Xt.priors, " ",myBeta,"~dnorm(0,0.001)\n") } #Xt.priors <- paste0(Xt.priors, # " ",myBeta,"~dnorm(0,0.001)\n") # Xt.priors <- paste0(Xt.priors," ", myBeta, "~dnorm(", posterior.summary[posterior.summary$parameter %in% myBeta,]$means ,",",1/(posterior.summary[posterior.summary$parameter %in% myBeta,]$var ),")", collapse="\n") ## add to list of varibles JAGS is tracking out.variables <- c(out.variables, myBeta) } ## end time-varying interaction terms ## loop over variables for(j in seq_along(t_vars)){ tvar <- t_vars[j] if(!(tvar %in% names(data))){ ## add cov variables to data object data[[tvar]] <- time_data[[tvar]] } check.dup.data(data,"tvar") ## append to process model formula Pformula <- paste(Pformula, paste0("+ beta", tvar, "*",tvar,"[i,t]")) ## add to list of varibles JAGS is tracking out.variables <- c(out.variables, paste0("beta", tvar)) } ## build prior if(informative.time == TRUE){ # make informative priors, if informative flag is true # Xt.priors <- paste0(Xt.priors, # paste0(" beta", t_vars, "~dunif(", paste(quantile(rnorm(1000,posterior.summary[posterior.summary$parameter %in% paste0("beta", t_vars),]$means,(posterior.summary[posterior.summary$parameter %in% paste0("beta", t_vars),]$var)), 0)) ,",",paste(quantile(rnorm(1000,posterior.summary[posterior.summary$parameter %in% myBeta,]$mean,(posterior.summary[posterior.summary$parameter %in% myBeta,]$var)), 1)) ,")", collapse="\n")) # Xt.priors <- paste0(Xt.priors, paste0(" beta", t_vars[1], "~dnorm(", posterior.summary[posterior.summary$parameter %in% paste0("beta", t_vars[1]),]$means, ",",(1/posterior.summary[posterior.summary$parameter %in% paste0("beta", t_vars[1]),]$var),")", "\n", " beta", t_vars[2], "~dnorm(",posterior.summary[posterior.summary$parameter %in% paste0("beta", t_vars[2]),]$means, ",",(1/posterior.summary[posterior.summary$parameter %in% paste0("beta", t_vars[2]),]$var),")",collapse="\n")) }else{ Xt.priors <- paste0(Xt.priors, " beta", t_vars,"~dnorm(0,0.001)\n") } TreeDataFusionMV <- sub(pattern = "## TIME VARYING BETAS", Xt.priors, TreeDataFusionMV) } ## END time varying covariates ## insert process model into JAGS template if (!is.null(Pformula)) { TreeDataFusionMV <- sub(pattern = "##PROCESS", Pformula, TreeDataFusionMV) } ## save script if(!is.null(save.jags)){ cat(TreeDataFusionMV,file=save.jags) } ## state variable initial condition if(is.null(z0)){ z0 <- t(apply(data$y, 1, function(y) { -rev(cumsum(rev(y))) })) + data$z[, ncol(data$z)] } ## JAGS initial conditions init <- list() source("pecan/modules/data.land/R/mcmc.list2initIGF.R") # use the new specific mcmc.list2initIGF.R if(is.mcmc.list(restart)){ init <- mcmc.list2initIGF(restart) nchain <- length(init) } else { nchain <- 3 for (i in seq_len(nchain)) { y.samp <- sample(data$y, length(data$y), replace = TRUE) z0ragged <- z0 # lines for z0ragged come from mikes "fix" to help with tree w/no cores for(j in 1:data$ni){ # 1: number of individuals # this creates z0ragged, where we only have z0 for trees w/ cores during the time period of the cores & # we only have z0 for trees w/out cores after the time where we have DBH measurements. # this could help with model fitting if(data$startyr[j]>1){ z0ragged[j,1:(data$startyr[j]-1)] <- NA } if(data$endyr[j]<data$nt){ z0ragged[j,(data$endyr[j]+1):data$nt] <- NA } } init[[i]] <- list(x = z0ragged, tau_add = runif(1, 1, 5) / var(diff(y.samp), na.rm = TRUE), tau_dbh = posterior.summary[posterior.summary$parameter %in% "tau_dbh",]$means, tau_inc = posterior.summary[posterior.summary$parameter %in% "tau_inc",]$means, #tau_ind = posterior.summary[posterior.summary$parameter %in% "tau_inc",]$means, #tau_yr = 100, #betaX2 = 0, ind = rep(0, data$ni), year = rep(0, data$nt)) } } #----------------------------------------------------------------------- # make sure we also use the values for informative normal priors on taus: #----------------------------------------------------------------------- data$a_add <- posterior.summary[posterior.summary$parameter %in% "tau_add",]$means data$r_add <- 1/posterior.summary[posterior.summary$parameter %in% "tau_add",]$vars data$a_inc <- posterior.summary[posterior.summary$parameter %in% "tau_inc",]$means data$r_inc <- 1/posterior.summary[posterior.summary$parameter %in% "tau_inc",]$vars data$a_dbh <- posterior.summary[posterior.summary$parameter %in% "tau_dbh",]$means data$r_dbh <- 1/posterior.summary[posterior.summary$parameter %in% "tau_dbh",]$vars means <- apply(as.matrix(posterior.ests), 2, mean) vars <- apply(as.matrix(posterior.ests), 2, var) SD <- apply(as.matrix(posterior.ests), 2, sd) # generate data frame with a summary of the posterior estimates posterior.summary <- data.frame(means = apply(as.matrix(posterior.ests), 2, mean), vars = apply(as.matrix(posterior.ests), 2, var), SD = apply(as.matrix(posterior.ests), 2, sd)) posterior.summary$parameter <- rownames(posterior.summary) reduced <- posterior.ests[[1]] posterior.cov.matrix <- cov(posterior.ests[[1]]) # should use all the chains.... posterior.mean.matrix <- colMeans(posterior.ests[[1]]) # get only the params of interest: posterior.mean.matrix <- posterior.mean.matrix [1:11] posterior.cov.matrix <- posterior.cov.matrix [1:11, 1:11] posterior.mean.matrix <- posterior.mean.matrix[!names(posterior.mean.matrix) %in% c("mu", "deviance")] posterior.cov.matrix <-posterior.cov.matrix [!rownames(posterior.cov.matrix) %in% c("mu", "deviance"),!colnames(posterior.cov.matrix) %in% c("mu", "deviance")] posterior.prec.matrix <- 1/posterior.cov.matrix data$posterior.param.means = c(posterior.mean.matrix) data$sigma.posterior = posterior.cov.matrix data$P <- length(colnames(data$sigma.posterior)) # test model: print("COMPILE JAGS MODEL") #j.model <- jags.model(file = textConnection(TreeDataFusionMV), data = data, inits = init, n.chains = 3) j.model <- jags.model(file = "stage2_tunc_infom_nvm.txt", data = data, inits = init, n.chains = 3) if(n.burn > 0){ print("BURN IN") jags.out <- coda.samples(model = j.model, variable.names = burnin.variables, n.iter = n.burn) if (burnin_plot) { plot(jags.out) } } print("RUN MCMC") load.module("dic") for(k in avail.chunks){ ## determine whether to sample states if(as.logical(save.state) & k%%as.numeric(save.state) == 0){ vnames <- c("x",out.variables) ## save x periodically (this actually always saves x, from my experience) } else { vnames <- out.variables } ## sample chunk jags.out <- coda.samples(model = j.model, variable.names = vnames, n.iter = n.chunk) ## save chunk ofile <- paste0(output.folder, model.name ,".",k,".RData") print(ofile) save(jags.out,file=ofile) ## update restart if(!is.null(restart) & ((is.logical(restart) && restart) || is.mcmc.list(restart))){ ofile <- paste0(output.folder,"IGF",".",model.name,".","RESTART.RData") jags.final <- coda.samples(model = j.model, variable.names = c("x",out.variables), n.iter = 1) k_restart = k + 1 ## finished k, so would restart at k+1 save(jags.final,k_restart,file=ofile) } if(breakearly == TRUE){ ## check for convergence and break from loop early D <- as.mcmc.list(lapply(jags.out,function(x){x[,'deviance']})) gbr <- coda::gelman.diag(D)$psrf[1,1] trend <- mean(sapply(D,function(x){coef(lm(x~seq_len(n.chunk)))[2]})) if(gbr < 1.005 & abs(trend) < 0.5) break } } print(paste("end of MCMC", Sys.time())) return(jags.out) } # InventoryGrowthFusion
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/RcppExports.R \name{gibbs_sampler} \alias{gibbs_sampler} \title{Obtain draws from gibbs sampler} \usage{ gibbs_sampler( overall_sum_trees_mu, overall_sum_trees_tau, overall_sum_mat_mu, overall_sum_mat_tau, y, BIC_weights, num_iter, burnin, num_obs, a_mu, a_tau, sigma, mu_mu_mu, mu_mu_tau, nu, lambda, resids_mu, resids_tau, z, test_data, test_pihat, z_test, include_pi2, num_propscores, num_test_obs ) } \description{ Obtain draws from gibbs sampler }
/man/gibbs_sampler.Rd
no_license
EoghanONeill/bcfbma
R
false
true
581
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/RcppExports.R \name{gibbs_sampler} \alias{gibbs_sampler} \title{Obtain draws from gibbs sampler} \usage{ gibbs_sampler( overall_sum_trees_mu, overall_sum_trees_tau, overall_sum_mat_mu, overall_sum_mat_tau, y, BIC_weights, num_iter, burnin, num_obs, a_mu, a_tau, sigma, mu_mu_mu, mu_mu_tau, nu, lambda, resids_mu, resids_tau, z, test_data, test_pihat, z_test, include_pi2, num_propscores, num_test_obs ) } \description{ Obtain draws from gibbs sampler }
\name{SiPeriodicArmaModel-class} \Rdversion{1.1} \docType{class} \alias{SiPeriodicArmaModel-class} \title{Class SiPeriodicArmaModel} \description{Class SiPeriodicArmaModel.} \section{Objects from the Class}{ Objects can be created by calls of the form \code{new("SiPeriodicArmaModel", ...)}. %% ~~ describe objects here ~~ } \section{Slots}{ \describe{ \item{\code{iorder}:}{Object of class \code{"numeric"} ~~ } \item{\code{siorder}:}{Object of class \code{"numeric"} ~~ } \item{\code{pcmodel}:}{Object of class \code{"PeriodicArmaModel"} ~~ } } } \section{Extends}{ Class \code{"\linkS4class{VirtualPeriodicFilterModel}"}, directly. Class \code{"\linkS4class{PeriodicIntegratedArmaSpec}"}, directly. } \section{Methods}{ No methods defined with class "SiPeriodicArmaModel" in the signature. } %\references{ %%% ~~put references to the literature/web site here~~ %} %\author{ %%% ~~who you are~~ %} %\note{ %%% ~~further notes~~ %} % %%% ~Make other sections like Warning with \section{Warning }{....} ~ % %\seealso{ %%% ~~objects to See Also as \code{\link{~~fun~~}}, ~~~ %%% ~~or \code{\linkS4class{CLASSNAME}} for links to other classes ~~~ %} %\examples{ %showClass("SiPeriodicArmaModel") %} \keyword{classes}
/man/SiPeriodicArmaModel-class.Rd
no_license
GeoBosh/pcts
R
false
false
1,238
rd
\name{SiPeriodicArmaModel-class} \Rdversion{1.1} \docType{class} \alias{SiPeriodicArmaModel-class} \title{Class SiPeriodicArmaModel} \description{Class SiPeriodicArmaModel.} \section{Objects from the Class}{ Objects can be created by calls of the form \code{new("SiPeriodicArmaModel", ...)}. %% ~~ describe objects here ~~ } \section{Slots}{ \describe{ \item{\code{iorder}:}{Object of class \code{"numeric"} ~~ } \item{\code{siorder}:}{Object of class \code{"numeric"} ~~ } \item{\code{pcmodel}:}{Object of class \code{"PeriodicArmaModel"} ~~ } } } \section{Extends}{ Class \code{"\linkS4class{VirtualPeriodicFilterModel}"}, directly. Class \code{"\linkS4class{PeriodicIntegratedArmaSpec}"}, directly. } \section{Methods}{ No methods defined with class "SiPeriodicArmaModel" in the signature. } %\references{ %%% ~~put references to the literature/web site here~~ %} %\author{ %%% ~~who you are~~ %} %\note{ %%% ~~further notes~~ %} % %%% ~Make other sections like Warning with \section{Warning }{....} ~ % %\seealso{ %%% ~~objects to See Also as \code{\link{~~fun~~}}, ~~~ %%% ~~or \code{\linkS4class{CLASSNAME}} for links to other classes ~~~ %} %\examples{ %showClass("SiPeriodicArmaModel") %} \keyword{classes}
rm(list=ls()) library(XML) library(bitops) library(RCurl) library(httr) orgURL <- 'http://skygene.blogspot.tw/' startPage <- as.numeric(as.Date('2005/12/01',format='%Y/%m/%d')) endPage <- as.numeric(as.Date('2006/12/01',format='%Y/%m/%d')) #endPage <- as.numeric(as.Date(cut(Sys.Date(),"month"))) alldata <- data.frame() for(i in startPage:endPage) { if(as.Date(cut(as.Date(i,origin="1970-01-01"),"month"))>as.Date(cut(as.Date(i-1,origin="1970-01-01"),"month"))) { blogURL <- paste(orgURL, gsub("-","_",as.character(as.Date(i,origin="1970-01-01"))),'_archive.html',sep='') urlExists <- url.exists(blogURL) if(urlExists) { html <- getURL(blogURL, ssl.verifypeer = FALSE) xml <- htmlParse(html, encoding ='utf-8') if(length(xpathSApply(xml, "//h3[@class='post-title entry-title']/a//text()", xmlValue))!=0) { title <- xpathSApply(xml, "//h3[@class='post-title entry-title']/a//text()", xmlValue) author <- xpathSApply(xml, "//span[@class='fn']", xmlValue) path <- xpathSApply(xml, "//h3[@class='post-title entry-title']/a//@href") date.month <- format(as.Date(i,origin="1970-01-01"),format="%Y-%m") html <- getURL(path, ssl.verifypeer = FALSE) xml <- htmlParse(html, encoding ='utf-8') date <- xpathSApply(xml, "//h2[@class='date-header']/span", xmlValue) #response <- xpathSApply(xml, "//span[@id='u_0_2']", xmlValue) response <- ifelse(length(xpathSApply(xml, "//span[@id='u_0_2']", xmlValue))==0,as.character("0"),xpathSApply(xml, "//span[@id='u_0_2']", xmlValue)) tempdata <- data.frame(title, author, path, date, date.month, response) alldata <- rbind(alldata, tempdata) } } } } allDate <- levels(alldata$date.month) res = hist(as.numeric(alldata$date.month), nclass=length(allDate), axes=F, labels=T) axis(1, at=1:length(allDate), labels=allDate) axis(2, at=1:max(res$counts), labels=1:max(res$counts))
/practice02_parsingblog.R
no_license
Drvinc/R-beginner
R
false
false
1,948
r
rm(list=ls()) library(XML) library(bitops) library(RCurl) library(httr) orgURL <- 'http://skygene.blogspot.tw/' startPage <- as.numeric(as.Date('2005/12/01',format='%Y/%m/%d')) endPage <- as.numeric(as.Date('2006/12/01',format='%Y/%m/%d')) #endPage <- as.numeric(as.Date(cut(Sys.Date(),"month"))) alldata <- data.frame() for(i in startPage:endPage) { if(as.Date(cut(as.Date(i,origin="1970-01-01"),"month"))>as.Date(cut(as.Date(i-1,origin="1970-01-01"),"month"))) { blogURL <- paste(orgURL, gsub("-","_",as.character(as.Date(i,origin="1970-01-01"))),'_archive.html',sep='') urlExists <- url.exists(blogURL) if(urlExists) { html <- getURL(blogURL, ssl.verifypeer = FALSE) xml <- htmlParse(html, encoding ='utf-8') if(length(xpathSApply(xml, "//h3[@class='post-title entry-title']/a//text()", xmlValue))!=0) { title <- xpathSApply(xml, "//h3[@class='post-title entry-title']/a//text()", xmlValue) author <- xpathSApply(xml, "//span[@class='fn']", xmlValue) path <- xpathSApply(xml, "//h3[@class='post-title entry-title']/a//@href") date.month <- format(as.Date(i,origin="1970-01-01"),format="%Y-%m") html <- getURL(path, ssl.verifypeer = FALSE) xml <- htmlParse(html, encoding ='utf-8') date <- xpathSApply(xml, "//h2[@class='date-header']/span", xmlValue) #response <- xpathSApply(xml, "//span[@id='u_0_2']", xmlValue) response <- ifelse(length(xpathSApply(xml, "//span[@id='u_0_2']", xmlValue))==0,as.character("0"),xpathSApply(xml, "//span[@id='u_0_2']", xmlValue)) tempdata <- data.frame(title, author, path, date, date.month, response) alldata <- rbind(alldata, tempdata) } } } } allDate <- levels(alldata$date.month) res = hist(as.numeric(alldata$date.month), nclass=length(allDate), axes=F, labels=T) axis(1, at=1:length(allDate), labels=allDate) axis(2, at=1:max(res$counts), labels=1:max(res$counts))
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/compute_genotype_matrix.R \name{gprep} \alias{gprep} \title{Prepare genotype data for all statistical analyses (initial step)} \usage{ gprep(Glist = NULL, task = "prepare", study = NULL, fnRAW = NULL, fnLD = NULL, bedfiles = NULL, bimfiles = NULL, famfiles = NULL, ids = NULL, rsids = NULL, overwrite = FALSE, msize = 100, ncores = 1) } \arguments{ \item{Glist}{only provided if task="summary" or task="sparseld"} \item{task}{character specifying which task to perform ("prepare" is default, "summary", or "sparseld")} \item{study}{name of the study} \item{fnRAW}{path and filename of the binary file .raw or .bed used for storing genotypes on the disk} \item{fnLD}{path and filename of the binary files .ld for storing sparse ld matrix on the disk} \item{bedfiles}{vector of names for the PLINK bed-files} \item{bimfiles}{vector of names for the PLINK bim-files} \item{famfiles}{vector of names for the PLINK fam-files} \item{ids}{vector of individuals used in the study} \item{rsids}{vector of marker rsids used in the study} \item{overwrite}{logical if TRUE overwite binary genotype file} \item{msize}{number of markers used in compuation of sparseld} \item{ncores}{number of cores used to process the genotypes} } \value{ Returns a list structure (Glist) with information about genotypes } \description{ All functions in qgg relies on a simple data infrastructure that takes five main input sources; phenotype data (y), covariate data (X), genotype data (G or Glist), a genomic relationship matrix (GRM or GRMlist) and genetic marker sets (sets). The genotypes are stored in a matrix (n x m (individuals x markers)) in memory (G) or in a binary file on disk (Glist). It is only for small data sets that the genotype matrix (G) can stored in memory. For large data sets the genotype matrix has to stored in a binary file on disk (Glist). Glist is as a list structure that contains information about the genotypes in the binary file. The gprep function prepares the Glist, and is required for downstream analyses of large-scale genetic data. Typically, the Glist is prepared once, and saved as an *.Rdata-file. The gprep function reads genotype information from binary PLINK files, and creates the Glist object that contains general information about the genotypes such as reference alleles, allele frequencies and missing genotypes, and construct a binary file on the disk that contains the genotypes as allele counts of the alternative allele (memory usage = (n x m)/4 bytes). The gprep function can also be used to prepare sparse ld matrices. The r2 metric used is the pairwise correlation between markers (allele count alternative allele) in a specified region of the genome. The marker genotype is allele count of the alternative allele which is assumed to be centered and scaled. The Glist structure is used as input parameter for a number of qgg core functions including: 1) construction of genomic relationship matrices (grm), 2) construction of sparse ld matrices, 3) estimating genomic parameters (greml), 4) single marker association analyses (lma or mlma), 5) gene set enrichment analyses (gsea), and 6) genomic prediction from genotypes and phenotypes (gsolve) or genotypes and summary statistics (gscore). } \examples{ \dontrun{ # Download 1000G Plink files url <- "https://data.broadinstitute.org/alkesgroup/LDSCORE/1000G_Phase3_plinkfiles.tgz" download.file(url=url,dest="./1000G_Phase3_plinkfiles.tgz") cmd <- "tar -xvzf 1000G_Phase3_plinkfiles.tgz" system(cmd) # Prepare Glist bedfiles <- paste("./1000G_EUR_Phase3_plink/1000G.EUR.QC.",1:22,".bed",sep="") bimfiles <- paste("./1000G_EUR_Phase3_plink/1000G.EUR.QC.",1:22,".bim",sep="") famfiles <- paste("./1000G_EUR_Phase3_plink/1000G.EUR.QC.",1:22,".fam",sep="") fnRAW <- "./1000G.raw" Glist <- gprep(study="1000G", fnRAW=fnRAW, bedfiles=bedfiles, bimfiles=bimfiles, famfiles=famfiles, overwrite=TRUE) Glist <- gprep(Glist=Glist, task="summary") fnLD <- paste("./1000G_EUR_Phase3_plink/1000G.EUR.QC.",1:22,".ld",sep="") Glist <- gprep( task="sparseld", Glist=Glist, fnLD=fnLD, msize=200, ncores=4) } } \author{ Peter Soerensen }
/man/gprep.Rd
no_license
nunonog/qgg
R
false
true
4,228
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/compute_genotype_matrix.R \name{gprep} \alias{gprep} \title{Prepare genotype data for all statistical analyses (initial step)} \usage{ gprep(Glist = NULL, task = "prepare", study = NULL, fnRAW = NULL, fnLD = NULL, bedfiles = NULL, bimfiles = NULL, famfiles = NULL, ids = NULL, rsids = NULL, overwrite = FALSE, msize = 100, ncores = 1) } \arguments{ \item{Glist}{only provided if task="summary" or task="sparseld"} \item{task}{character specifying which task to perform ("prepare" is default, "summary", or "sparseld")} \item{study}{name of the study} \item{fnRAW}{path and filename of the binary file .raw or .bed used for storing genotypes on the disk} \item{fnLD}{path and filename of the binary files .ld for storing sparse ld matrix on the disk} \item{bedfiles}{vector of names for the PLINK bed-files} \item{bimfiles}{vector of names for the PLINK bim-files} \item{famfiles}{vector of names for the PLINK fam-files} \item{ids}{vector of individuals used in the study} \item{rsids}{vector of marker rsids used in the study} \item{overwrite}{logical if TRUE overwite binary genotype file} \item{msize}{number of markers used in compuation of sparseld} \item{ncores}{number of cores used to process the genotypes} } \value{ Returns a list structure (Glist) with information about genotypes } \description{ All functions in qgg relies on a simple data infrastructure that takes five main input sources; phenotype data (y), covariate data (X), genotype data (G or Glist), a genomic relationship matrix (GRM or GRMlist) and genetic marker sets (sets). The genotypes are stored in a matrix (n x m (individuals x markers)) in memory (G) or in a binary file on disk (Glist). It is only for small data sets that the genotype matrix (G) can stored in memory. For large data sets the genotype matrix has to stored in a binary file on disk (Glist). Glist is as a list structure that contains information about the genotypes in the binary file. The gprep function prepares the Glist, and is required for downstream analyses of large-scale genetic data. Typically, the Glist is prepared once, and saved as an *.Rdata-file. The gprep function reads genotype information from binary PLINK files, and creates the Glist object that contains general information about the genotypes such as reference alleles, allele frequencies and missing genotypes, and construct a binary file on the disk that contains the genotypes as allele counts of the alternative allele (memory usage = (n x m)/4 bytes). The gprep function can also be used to prepare sparse ld matrices. The r2 metric used is the pairwise correlation between markers (allele count alternative allele) in a specified region of the genome. The marker genotype is allele count of the alternative allele which is assumed to be centered and scaled. The Glist structure is used as input parameter for a number of qgg core functions including: 1) construction of genomic relationship matrices (grm), 2) construction of sparse ld matrices, 3) estimating genomic parameters (greml), 4) single marker association analyses (lma or mlma), 5) gene set enrichment analyses (gsea), and 6) genomic prediction from genotypes and phenotypes (gsolve) or genotypes and summary statistics (gscore). } \examples{ \dontrun{ # Download 1000G Plink files url <- "https://data.broadinstitute.org/alkesgroup/LDSCORE/1000G_Phase3_plinkfiles.tgz" download.file(url=url,dest="./1000G_Phase3_plinkfiles.tgz") cmd <- "tar -xvzf 1000G_Phase3_plinkfiles.tgz" system(cmd) # Prepare Glist bedfiles <- paste("./1000G_EUR_Phase3_plink/1000G.EUR.QC.",1:22,".bed",sep="") bimfiles <- paste("./1000G_EUR_Phase3_plink/1000G.EUR.QC.",1:22,".bim",sep="") famfiles <- paste("./1000G_EUR_Phase3_plink/1000G.EUR.QC.",1:22,".fam",sep="") fnRAW <- "./1000G.raw" Glist <- gprep(study="1000G", fnRAW=fnRAW, bedfiles=bedfiles, bimfiles=bimfiles, famfiles=famfiles, overwrite=TRUE) Glist <- gprep(Glist=Glist, task="summary") fnLD <- paste("./1000G_EUR_Phase3_plink/1000G.EUR.QC.",1:22,".ld",sep="") Glist <- gprep( task="sparseld", Glist=Glist, fnLD=fnLD, msize=200, ncores=4) } } \author{ Peter Soerensen }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/vent.R \name{validate_vent} \alias{validate_vent} \title{vent validator} \usage{ validate_vent(dat) } \arguments{ \item{dat}{object} } \description{ validate an object of class vent }
/man/validate_vent.Rd
permissive
c1au6i0/rvent
R
false
true
262
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/vent.R \name{validate_vent} \alias{validate_vent} \title{vent validator} \usage{ validate_vent(dat) } \arguments{ \item{dat}{object} } \description{ validate an object of class vent }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/buildPredictor_sparseGenetic.R \name{buildPredictor_sparseGenetic} \alias{buildPredictor_sparseGenetic} \title{Performs feature selection using multiple resamplings of the data} \usage{ buildPredictor_sparseGenetic( phenoDF, cnv_GR, predClass, group_GRList, outDir = tempdir(), numSplits = 3L, featScoreMax = 10L, filter_WtSum = 100L, enrichLabels = TRUE, enrichPthresh = 0.07, numPermsEnrich = 2500L, minEnr = -1, numCores = 1L, FS_numCores = NULL, ... ) } \arguments{ \item{phenoDF}{(data.frame) sample metadat. patient ID,STATUS} \item{cnv_GR}{(GRanges) genetic events. Must contain "ID" column mapping the event to a patient. ID must correspond to the ID column in phenoDF} \item{predClass}{(char) patient class to predict} \item{group_GRList}{(list) List of GRangesList indicating grouping rules for CNVs. For example, in a pathway-based design, each key value would be a pathway name, and the value would be a RangesList containing coordinates of the member genes} \item{outDir}{(char) path to dir where results should be stored. Results for resampling i are under \code{<outDir>/part<i>}, while predictor evaluation results are directly in \code{outDir}.} \item{numSplits}{(integer) number of data resamplings to use} \item{featScoreMax}{(integer) max score for features in feature selection} \item{filter_WtSum}{(numeric between 5-100) Limit to top-ranked networks such that cumulative weight is less than this parameter. e.g. If filter_WtSum=20, first order networks by decreasing weight; then keep those whose cumulative weight <= 20.} \item{enrichLabels}{(logical) if TRUE, applies label enrichment to train networks} \item{enrichPthresh}{(numeric between 0 and 1) networks with label enrichment p-value below this threshold pass enrichment} \item{numPermsEnrich}{(integer) number of permutations for label enrichment} \item{minEnr}{(integer -1 to 1) minEnr param in enrichLabelsNets()} \item{numCores}{(integer) num cores for parallel processing} \item{FS_numCores}{(integer) num cores for running GM. If NULL, is set to max(1,numCores-1). Set to a lower value if the default setting gives out-of-memory error. This may happen if networks are denser than expected} \item{...}{params for runFeatureSelection()} } \value{ (list) Predictor results 1) phenoDF (data.frame): subset of phenoDF provided as input, but limited to patients that have at least one event in the universe of possibilities e.g. if using pathway-level features, then this table excludes patients with zero CNVs in pathways 2) netmat (data.frame): Count of genetic events by patients (rows) in pathways (columns). Used as input to the feature selection algorithm 3) pathwayScores (list): pathway scores for each of the data splits. Each value in the list is a data.frame containing pathway names and scores. 4) enrichedNets (list): This entry is only found if enrichLabels is set to TRUE. It contains the vector of features that passed label enrichment in each split of the data. 5 - 9) Output of RR_featureTally: 5) cumulativeFeatScores: pathway name, cumulative score over N-way data resampling. 6) performance_denAllNets: positive,negative calls at each cutoff: network score cutoff (score); num networks at cutoff (numPathways) ; total +, ground truth (pred_tot); + calls (pred_ol); + calls as pct of total (pred_pct); total -, ground truth (other_tot) ; - calls (other_ol) ; - calls as pct of total (other_pct) ; ratio of pred_pct and other_pct (rr) ; min. pred_pct in all resamplings (pred_pct_min) ; max pred_pct in all resamplings (pred_pct_max) ; min other_pct in all resamplings (other_pct_min); max other_pct in all resamplings (other_pct_max) 7) performance_denEnrichedNets: positive, negative calls at each cutoff label enrichment option: format same as performance_denAllNets. However, the denominator here is limited to patients present in networks that pass label enrichment 8) resamplingPerformance: breakdown of performance for each of the resamplings, at each of the cutoffs. This is a list of length 2, one for allNets and one for enrichedNets. The value is a matrix with (resamp * 7) columns and S rows, one row per score. The columns contain the following information per resampling: 1) pred_total: total num patients of predClass 2) pred_OL: num of pred_total with a CNV in the selected net 3) pred_OL_pct: 2) divided by 1) (percent) 4) other_total: total num patients of other class(non-predClass) 5) other_OL: num of other_total with CNV in selected net 6) other_OL_pct: 5) divided by 4) (percent) 7) relEnr: 6) divided by 3). } \description{ Performs feature selection using multiple resamplings of the data } \details{ This function is used for feature selection of patient networks, using multiple resamplings of input data. It is intended for use in the scenario where patient networks are sparse and binary. This function should be called after defining all patient networks. It performs the following steps: For i = 1..numSplits randomly split patients into training and test (optional) filter training networks to exclude random-like networks compile features into database for cross-validation score networks out of 10 end using test samples from all resamplings, measure predictor performance. In short, this function performs all steps involved in building and evaluating the predictor. } \examples{ suppressMessages(require(GenomicRanges)) suppressMessages(require(BiocFileCache)) # read CNV data phenoFile <- system.file("extdata","AGP1_CNV.txt",package="netDx") pheno <- read.delim(phenoFile,sep="\t",header=TRUE,as.is=TRUE) colnames(pheno)[1] <- "ID" pheno <- pheno[!duplicated(pheno$ID),] # create GRanges with patient CNVs cnv_GR <- GRanges(pheno$seqnames,IRanges(pheno$start,pheno$end), ID=pheno$ID,LOCUS_NAMES=pheno$Gene_symbols) # get gene coordinates geneURL <- paste("https://download.baderlab.org/netDx/", "supporting_data/refGene.hg18.bed",sep="") cache <- rappdirs::user_cache_dir(appname = "netDx") bfc <- BiocFileCache::BiocFileCache(cache,ask=FALSE) geneFile <- bfcrpath(bfc,geneURL) genes <- read.delim(geneFile,sep="\t",header=FALSE,as.is=TRUE) genes <- genes[which(genes[,4]!=""),] gene_GR <- GRanges(genes[,1],IRanges(genes[,2],genes[,3]), name=genes[,4]) # create GRangesList of pathways pathFile <- fetchPathwayDefinitions("February",2018,verbose=TRUE) pathwayList <- readPathways(pathFile) path_GRList <- mapNamedRangesToSets(gene_GR,pathwayList) #### uncomment to run - takes 5 min #out <- buildPredictor_sparseGenetic(pheno, cnv_GR, "case", # path_GRList,outDir, # numSplits=3L, featScoreMax=3L, # enrichLabels=TRUE,numPermsEnrich=20L, # numCores=1L) #summary(out) #head(out$cumulativeFeatScores) }
/man/buildPredictor_sparseGenetic.Rd
permissive
BaderLab/netDx
R
false
true
6,956
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/buildPredictor_sparseGenetic.R \name{buildPredictor_sparseGenetic} \alias{buildPredictor_sparseGenetic} \title{Performs feature selection using multiple resamplings of the data} \usage{ buildPredictor_sparseGenetic( phenoDF, cnv_GR, predClass, group_GRList, outDir = tempdir(), numSplits = 3L, featScoreMax = 10L, filter_WtSum = 100L, enrichLabels = TRUE, enrichPthresh = 0.07, numPermsEnrich = 2500L, minEnr = -1, numCores = 1L, FS_numCores = NULL, ... ) } \arguments{ \item{phenoDF}{(data.frame) sample metadat. patient ID,STATUS} \item{cnv_GR}{(GRanges) genetic events. Must contain "ID" column mapping the event to a patient. ID must correspond to the ID column in phenoDF} \item{predClass}{(char) patient class to predict} \item{group_GRList}{(list) List of GRangesList indicating grouping rules for CNVs. For example, in a pathway-based design, each key value would be a pathway name, and the value would be a RangesList containing coordinates of the member genes} \item{outDir}{(char) path to dir where results should be stored. Results for resampling i are under \code{<outDir>/part<i>}, while predictor evaluation results are directly in \code{outDir}.} \item{numSplits}{(integer) number of data resamplings to use} \item{featScoreMax}{(integer) max score for features in feature selection} \item{filter_WtSum}{(numeric between 5-100) Limit to top-ranked networks such that cumulative weight is less than this parameter. e.g. If filter_WtSum=20, first order networks by decreasing weight; then keep those whose cumulative weight <= 20.} \item{enrichLabels}{(logical) if TRUE, applies label enrichment to train networks} \item{enrichPthresh}{(numeric between 0 and 1) networks with label enrichment p-value below this threshold pass enrichment} \item{numPermsEnrich}{(integer) number of permutations for label enrichment} \item{minEnr}{(integer -1 to 1) minEnr param in enrichLabelsNets()} \item{numCores}{(integer) num cores for parallel processing} \item{FS_numCores}{(integer) num cores for running GM. If NULL, is set to max(1,numCores-1). Set to a lower value if the default setting gives out-of-memory error. This may happen if networks are denser than expected} \item{...}{params for runFeatureSelection()} } \value{ (list) Predictor results 1) phenoDF (data.frame): subset of phenoDF provided as input, but limited to patients that have at least one event in the universe of possibilities e.g. if using pathway-level features, then this table excludes patients with zero CNVs in pathways 2) netmat (data.frame): Count of genetic events by patients (rows) in pathways (columns). Used as input to the feature selection algorithm 3) pathwayScores (list): pathway scores for each of the data splits. Each value in the list is a data.frame containing pathway names and scores. 4) enrichedNets (list): This entry is only found if enrichLabels is set to TRUE. It contains the vector of features that passed label enrichment in each split of the data. 5 - 9) Output of RR_featureTally: 5) cumulativeFeatScores: pathway name, cumulative score over N-way data resampling. 6) performance_denAllNets: positive,negative calls at each cutoff: network score cutoff (score); num networks at cutoff (numPathways) ; total +, ground truth (pred_tot); + calls (pred_ol); + calls as pct of total (pred_pct); total -, ground truth (other_tot) ; - calls (other_ol) ; - calls as pct of total (other_pct) ; ratio of pred_pct and other_pct (rr) ; min. pred_pct in all resamplings (pred_pct_min) ; max pred_pct in all resamplings (pred_pct_max) ; min other_pct in all resamplings (other_pct_min); max other_pct in all resamplings (other_pct_max) 7) performance_denEnrichedNets: positive, negative calls at each cutoff label enrichment option: format same as performance_denAllNets. However, the denominator here is limited to patients present in networks that pass label enrichment 8) resamplingPerformance: breakdown of performance for each of the resamplings, at each of the cutoffs. This is a list of length 2, one for allNets and one for enrichedNets. The value is a matrix with (resamp * 7) columns and S rows, one row per score. The columns contain the following information per resampling: 1) pred_total: total num patients of predClass 2) pred_OL: num of pred_total with a CNV in the selected net 3) pred_OL_pct: 2) divided by 1) (percent) 4) other_total: total num patients of other class(non-predClass) 5) other_OL: num of other_total with CNV in selected net 6) other_OL_pct: 5) divided by 4) (percent) 7) relEnr: 6) divided by 3). } \description{ Performs feature selection using multiple resamplings of the data } \details{ This function is used for feature selection of patient networks, using multiple resamplings of input data. It is intended for use in the scenario where patient networks are sparse and binary. This function should be called after defining all patient networks. It performs the following steps: For i = 1..numSplits randomly split patients into training and test (optional) filter training networks to exclude random-like networks compile features into database for cross-validation score networks out of 10 end using test samples from all resamplings, measure predictor performance. In short, this function performs all steps involved in building and evaluating the predictor. } \examples{ suppressMessages(require(GenomicRanges)) suppressMessages(require(BiocFileCache)) # read CNV data phenoFile <- system.file("extdata","AGP1_CNV.txt",package="netDx") pheno <- read.delim(phenoFile,sep="\t",header=TRUE,as.is=TRUE) colnames(pheno)[1] <- "ID" pheno <- pheno[!duplicated(pheno$ID),] # create GRanges with patient CNVs cnv_GR <- GRanges(pheno$seqnames,IRanges(pheno$start,pheno$end), ID=pheno$ID,LOCUS_NAMES=pheno$Gene_symbols) # get gene coordinates geneURL <- paste("https://download.baderlab.org/netDx/", "supporting_data/refGene.hg18.bed",sep="") cache <- rappdirs::user_cache_dir(appname = "netDx") bfc <- BiocFileCache::BiocFileCache(cache,ask=FALSE) geneFile <- bfcrpath(bfc,geneURL) genes <- read.delim(geneFile,sep="\t",header=FALSE,as.is=TRUE) genes <- genes[which(genes[,4]!=""),] gene_GR <- GRanges(genes[,1],IRanges(genes[,2],genes[,3]), name=genes[,4]) # create GRangesList of pathways pathFile <- fetchPathwayDefinitions("February",2018,verbose=TRUE) pathwayList <- readPathways(pathFile) path_GRList <- mapNamedRangesToSets(gene_GR,pathwayList) #### uncomment to run - takes 5 min #out <- buildPredictor_sparseGenetic(pheno, cnv_GR, "case", # path_GRList,outDir, # numSplits=3L, featScoreMax=3L, # enrichLabels=TRUE,numPermsEnrich=20L, # numCores=1L) #summary(out) #head(out$cumulativeFeatScores) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/bwb_rain_data_high.R \name{readYearlyRainHeightsFromOneCsvFile} \alias{readYearlyRainHeightsFromOneCsvFile} \title{Read Yearly Rain Heights From One Csv File} \usage{ readYearlyRainHeightsFromOneCsvFile(csv, sep = ",", dateformat = "\%d/\%m/\%Y") } \arguments{ \item{csv}{full path to csv file} \item{sep}{column separator. default: comma ","} \item{dateformat}{date format, default: "\%d/\%m/\%Y"} } \value{ data frame with columns... } \description{ reads a CSV file containing daily rain heights as provided by BWB (Mario Grunwald). Example filename: "Niederschlaege_1994__BERICHT.csv". The files are expected to contain a header of three rows (first row: DATUM/station names, second row: variable name ["Regenhoehe" or "Regendauer"], third row: unit ["in mm" or "in mm"]) }
/man/readYearlyRainHeightsFromOneCsvFile.Rd
permissive
KWB-R/kwb.read
R
false
true
862
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/bwb_rain_data_high.R \name{readYearlyRainHeightsFromOneCsvFile} \alias{readYearlyRainHeightsFromOneCsvFile} \title{Read Yearly Rain Heights From One Csv File} \usage{ readYearlyRainHeightsFromOneCsvFile(csv, sep = ",", dateformat = "\%d/\%m/\%Y") } \arguments{ \item{csv}{full path to csv file} \item{sep}{column separator. default: comma ","} \item{dateformat}{date format, default: "\%d/\%m/\%Y"} } \value{ data frame with columns... } \description{ reads a CSV file containing daily rain heights as provided by BWB (Mario Grunwald). Example filename: "Niederschlaege_1994__BERICHT.csv". The files are expected to contain a header of three rows (first row: DATUM/station names, second row: variable name ["Regenhoehe" or "Regendauer"], third row: unit ["in mm" or "in mm"]) }
\name{BootstrapSmacof} \alias{BootstrapSmacof} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Bootstrap on the distance matrices used for MDS with Smacof } \description{ Obtains bootstrap replicates of a distance matrix using ramdom samples or permuatations of a distance matrix. The object is to estimate the sampling variability of the results of the Smacof algorithm. } \usage{ BootstrapSmacof(D, W=NULL, Model=c("Identity", "Ratio", "Interval", "Ordinal"), dimsol=2, maxiter=100, maxerror=0.000001, StandardizeDisparities=TRUE, ShowIter=TRUE, nB=200, ProcrustesRot=TRUE, method=c("Sampling", "Permutation")) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{D}{ A distance matrix } \item{W}{ A diagonal matrix containing waiths for the rows of D } \item{Model}{ Mesurement level of the distances } \item{dimsol}{ Dimension of the solution } \item{maxiter}{ Maximum number of iterations for the smacof algorithm } \item{maxerror}{ Tolerance for the smacof algorithm } \item{StandardizeDisparities}{ Should the disparities be standardized in the smacof algorithm? } \item{ShowIter}{ Should the information on each ieration be printed on the screen? } \item{nB}{ Number of Bootstrap replications } \item{ProcrustesRot}{ Should each replication be rotated to match the initial solution? } \item{method}{ The replications are obtained "Sampling" or "Permutating" the residuals. } } \details{ The function calculates bootstrap confidence intervals for coordinates and different stress measures using a distance matrix as a basis. The funcion uses random sampling or permutations of the residuals to obtain the bootstrap replications. The procedure preserves the length of the points in the multidimensional space perturbating only the angles among the vectors. It is done so to preserve the property of positiveness of the diagonal elements of the scalar product matrices. The procedure may result into a scalar product that does not have an euclidean configuration and then has some negative eigenvalues; to avoid this problem the negative eigenvalues are removed to approximate the perturbated matrix by the closest with the required properties. It is well known that the eigenvectors of a matrix are unique except for reflections, that is, if we change the sign of each component of the eigenvector we have the same solution. If that happens, an unwanted increase in the variability due to this artifact may invalidate the results. To avoid this we can calculate the scalar product of each eigenvector of the initial matrix with the corresponding eigenvector of the bootstrap replicate and change the signs of the later if the result is negative. Another artifact of the procedure may arise when the dimension of the solution is higher than 1 because the eigenvectors of a replicate may generate the same subspace although are not in the same directions, i. e., the subspace is referred to a different system. That also may produce an unwanted increase of the variability that invalidates the results. To avoid this, every replicate may be rotated to match as much as possible the subspace generated by the eigenvectors of the initial matrix. This is done by Procrustes Analysis, taking the rotated matrix as solution. The solution to this problem is also a sulution to the reflection, then only this problem is considered. } \value{ Returns an object of class "PCoABootstrap" with the information for each bootstrap replication. \item{Info}{Information about the procedure} \item{InitialDistance}{Initial distance} \item{RawStress}{A vector containing the raw stress for all the bootstrap replicates} \item{stress1}{A vector containing the value of the stress1 formula for all the bootstrap replicates} \item{stress2}{A vector containing the value of the stress2 formula for all the bootstrap replicates} \item{sstress1}{A vector containing the value of the sstress1 formula for all the bootstrap replicates} \item{sstress2}{A vector containing the value of the sstress2 formula for all the bootstrap replicates} \item{Coordinates}{A list with a component for each object. A component contains the coordinates of an object for all the bootstrap replicates (in columns)} \item{NReplicates}{Number of bootstrap replicates} } \references{ Efron, B.; Tibshirani, RJ. (1993). An introduction to the bootstrap. New York: Chapman and Hall. 436p. Ringrose, T. J. (1992). Bootstrapping and correspondence analysis in archaeology. Journal of Archaeological Science, 19(6), 615-629. MILAN, L., & WHITTAKER, J. (1995). Application of the parametric bootstrap to models that incorporate a singular value decomposition. Applied statistics, 44(1), 31-49. Jacoby, W. G., & Armstrong, D. A. (2014). Bootstrap Confidence Regions for Multidimensional Scaling Solutions. American Journal of Political Science, 58(1), 264-278. } \author{ Jose L. Vicente-Villardon } \seealso{ \code{\link{BootstrapScalar}} } \examples{ \donttest{data(spiders) D=BinaryProximities(spiders, coefficient="Jaccard", transformation="sqrt(1-S)") DB=BootstrapDistance(D$Proximities)} }
/man/BootstrapSmacof.Rd
no_license
villardon/MultBiplotR
R
false
false
5,199
rd
\name{BootstrapSmacof} \alias{BootstrapSmacof} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Bootstrap on the distance matrices used for MDS with Smacof } \description{ Obtains bootstrap replicates of a distance matrix using ramdom samples or permuatations of a distance matrix. The object is to estimate the sampling variability of the results of the Smacof algorithm. } \usage{ BootstrapSmacof(D, W=NULL, Model=c("Identity", "Ratio", "Interval", "Ordinal"), dimsol=2, maxiter=100, maxerror=0.000001, StandardizeDisparities=TRUE, ShowIter=TRUE, nB=200, ProcrustesRot=TRUE, method=c("Sampling", "Permutation")) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{D}{ A distance matrix } \item{W}{ A diagonal matrix containing waiths for the rows of D } \item{Model}{ Mesurement level of the distances } \item{dimsol}{ Dimension of the solution } \item{maxiter}{ Maximum number of iterations for the smacof algorithm } \item{maxerror}{ Tolerance for the smacof algorithm } \item{StandardizeDisparities}{ Should the disparities be standardized in the smacof algorithm? } \item{ShowIter}{ Should the information on each ieration be printed on the screen? } \item{nB}{ Number of Bootstrap replications } \item{ProcrustesRot}{ Should each replication be rotated to match the initial solution? } \item{method}{ The replications are obtained "Sampling" or "Permutating" the residuals. } } \details{ The function calculates bootstrap confidence intervals for coordinates and different stress measures using a distance matrix as a basis. The funcion uses random sampling or permutations of the residuals to obtain the bootstrap replications. The procedure preserves the length of the points in the multidimensional space perturbating only the angles among the vectors. It is done so to preserve the property of positiveness of the diagonal elements of the scalar product matrices. The procedure may result into a scalar product that does not have an euclidean configuration and then has some negative eigenvalues; to avoid this problem the negative eigenvalues are removed to approximate the perturbated matrix by the closest with the required properties. It is well known that the eigenvectors of a matrix are unique except for reflections, that is, if we change the sign of each component of the eigenvector we have the same solution. If that happens, an unwanted increase in the variability due to this artifact may invalidate the results. To avoid this we can calculate the scalar product of each eigenvector of the initial matrix with the corresponding eigenvector of the bootstrap replicate and change the signs of the later if the result is negative. Another artifact of the procedure may arise when the dimension of the solution is higher than 1 because the eigenvectors of a replicate may generate the same subspace although are not in the same directions, i. e., the subspace is referred to a different system. That also may produce an unwanted increase of the variability that invalidates the results. To avoid this, every replicate may be rotated to match as much as possible the subspace generated by the eigenvectors of the initial matrix. This is done by Procrustes Analysis, taking the rotated matrix as solution. The solution to this problem is also a sulution to the reflection, then only this problem is considered. } \value{ Returns an object of class "PCoABootstrap" with the information for each bootstrap replication. \item{Info}{Information about the procedure} \item{InitialDistance}{Initial distance} \item{RawStress}{A vector containing the raw stress for all the bootstrap replicates} \item{stress1}{A vector containing the value of the stress1 formula for all the bootstrap replicates} \item{stress2}{A vector containing the value of the stress2 formula for all the bootstrap replicates} \item{sstress1}{A vector containing the value of the sstress1 formula for all the bootstrap replicates} \item{sstress2}{A vector containing the value of the sstress2 formula for all the bootstrap replicates} \item{Coordinates}{A list with a component for each object. A component contains the coordinates of an object for all the bootstrap replicates (in columns)} \item{NReplicates}{Number of bootstrap replicates} } \references{ Efron, B.; Tibshirani, RJ. (1993). An introduction to the bootstrap. New York: Chapman and Hall. 436p. Ringrose, T. J. (1992). Bootstrapping and correspondence analysis in archaeology. Journal of Archaeological Science, 19(6), 615-629. MILAN, L., & WHITTAKER, J. (1995). Application of the parametric bootstrap to models that incorporate a singular value decomposition. Applied statistics, 44(1), 31-49. Jacoby, W. G., & Armstrong, D. A. (2014). Bootstrap Confidence Regions for Multidimensional Scaling Solutions. American Journal of Political Science, 58(1), 264-278. } \author{ Jose L. Vicente-Villardon } \seealso{ \code{\link{BootstrapScalar}} } \examples{ \donttest{data(spiders) D=BinaryProximities(spiders, coefficient="Jaccard", transformation="sqrt(1-S)") DB=BootstrapDistance(D$Proximities)} }
library(testthat) library(drawProteins) test_check("drawProteins")
/tests/testthat.R
permissive
brennanpincardiff/drawProteins
R
false
false
68
r
library(testthat) library(drawProteins) test_check("drawProteins")
# This file is generated by make.paws. Please do not edit here. #' @importFrom paws.common new_handlers new_service set_config merge_config NULL #' Amazon EC2 Container Registry #' #' @description #' Amazon Elastic Container Registry #' #' Amazon Elastic Container Registry (Amazon ECR) is a managed container #' image registry service. Customers can use the familiar Docker CLI, or #' their preferred client, to push, pull, and manage images. Amazon ECR #' provides a secure, scalable, and reliable registry for your Docker or #' Open Container Initiative (OCI) images. Amazon ECR supports private #' repositories with resource-based permissions using IAM so that specific #' users or Amazon EC2 instances can access repositories and images. #' #' Amazon ECR has service endpoints in each supported Region. For more #' information, see [Amazon ECR #' endpoints](https://docs.aws.amazon.com/general/latest/gr/ecr.html) in #' the *Amazon Web Services General Reference*. #' #' @param #' config #' Optional configuration of credentials, endpoint, and/or region. #' \itemize{ #' \item{\strong{credentials}:} {\itemize{ #' \item{\strong{creds}:} {\itemize{ #' \item{\strong{access_key_id}:} {AWS access key ID} #' \item{\strong{secret_access_key}:} {AWS secret access key} #' \item{\strong{session_token}:} {AWS temporary session token} #' }} #' \item{\strong{profile}:} {The name of a profile to use. If not given, then the default profile is used.} #' \item{\strong{anonymous}:} {Set anonymous credentials.} #' \item{\strong{endpoint}:} {The complete URL to use for the constructed client.} #' \item{\strong{region}:} {The AWS Region used in instantiating the client.} #' }} #' \item{\strong{close_connection}:} {Immediately close all HTTP connections.} #' \item{\strong{timeout}:} {The time in seconds till a timeout exception is thrown when attempting to make a connection. The default is 60 seconds.} #' \item{\strong{s3_force_path_style}:} {Set this to `true` to force the request to use path-style addressing, i.e. `http://s3.amazonaws.com/BUCKET/KEY`.} #' \item{\strong{sts_regional_endpoint}:} {Set sts regional endpoint resolver to regional or legacy \url{https://docs.aws.amazon.com/sdkref/latest/guide/feature-sts-regionalized-endpoints.html}} #' } #' @param #' credentials #' Optional credentials shorthand for the config parameter #' \itemize{ #' \item{\strong{creds}:} {\itemize{ #' \item{\strong{access_key_id}:} {AWS access key ID} #' \item{\strong{secret_access_key}:} {AWS secret access key} #' \item{\strong{session_token}:} {AWS temporary session token} #' }} #' \item{\strong{profile}:} {The name of a profile to use. If not given, then the default profile is used.} #' \item{\strong{anonymous}:} {Set anonymous credentials.} #' } #' @param #' endpoint #' Optional shorthand for complete URL to use for the constructed client. #' @param #' region #' Optional shorthand for AWS Region used in instantiating the client. #' #' @section Service syntax: #' ``` #' svc <- ecr( #' config = list( #' credentials = list( #' creds = list( #' access_key_id = "string", #' secret_access_key = "string", #' session_token = "string" #' ), #' profile = "string", #' anonymous = "logical" #' ), #' endpoint = "string", #' region = "string", #' close_connection = "logical", #' timeout = "numeric", #' s3_force_path_style = "logical", #' sts_regional_endpoint = "string" #' ), #' credentials = list( #' creds = list( #' access_key_id = "string", #' secret_access_key = "string", #' session_token = "string" #' ), #' profile = "string", #' anonymous = "logical" #' ), #' endpoint = "string", #' region = "string" #' ) #' ``` #' #' @examples #' \dontrun{ #' svc <- ecr() #' # This example deletes images with the tags precise and trusty in a #' # repository called ubuntu in the default registry for an account. #' svc$batch_delete_image( #' imageIds = list( #' list( #' imageTag = "precise" #' ) #' ), #' repositoryName = "ubuntu" #' ) #' } #' #' @section Operations: #' \tabular{ll}{ #' \link[=ecr_batch_check_layer_availability]{batch_check_layer_availability} \tab Checks the availability of one or more image layers in a repository\cr #' \link[=ecr_batch_delete_image]{batch_delete_image} \tab Deletes a list of specified images within a repository\cr #' \link[=ecr_batch_get_image]{batch_get_image} \tab Gets detailed information for an image\cr #' \link[=ecr_batch_get_repository_scanning_configuration]{batch_get_repository_scanning_configuration} \tab Gets the scanning configuration for one or more repositories\cr #' \link[=ecr_complete_layer_upload]{complete_layer_upload} \tab Informs Amazon ECR that the image layer upload has completed for a specified registry, repository name, and upload ID\cr #' \link[=ecr_create_pull_through_cache_rule]{create_pull_through_cache_rule} \tab Creates a pull through cache rule\cr #' \link[=ecr_create_repository]{create_repository} \tab Creates a repository\cr #' \link[=ecr_delete_lifecycle_policy]{delete_lifecycle_policy} \tab Deletes the lifecycle policy associated with the specified repository\cr #' \link[=ecr_delete_pull_through_cache_rule]{delete_pull_through_cache_rule} \tab Deletes a pull through cache rule\cr #' \link[=ecr_delete_registry_policy]{delete_registry_policy} \tab Deletes the registry permissions policy\cr #' \link[=ecr_delete_repository]{delete_repository} \tab Deletes a repository\cr #' \link[=ecr_delete_repository_policy]{delete_repository_policy} \tab Deletes the repository policy associated with the specified repository\cr #' \link[=ecr_describe_image_replication_status]{describe_image_replication_status} \tab Returns the replication status for a specified image\cr #' \link[=ecr_describe_images]{describe_images} \tab Returns metadata about the images in a repository\cr #' \link[=ecr_describe_image_scan_findings]{describe_image_scan_findings} \tab Returns the scan findings for the specified image\cr #' \link[=ecr_describe_pull_through_cache_rules]{describe_pull_through_cache_rules} \tab Returns the pull through cache rules for a registry\cr #' \link[=ecr_describe_registry]{describe_registry} \tab Describes the settings for a registry\cr #' \link[=ecr_describe_repositories]{describe_repositories} \tab Describes image repositories in a registry\cr #' \link[=ecr_get_authorization_token]{get_authorization_token} \tab Retrieves an authorization token\cr #' \link[=ecr_get_download_url_for_layer]{get_download_url_for_layer} \tab Retrieves the pre-signed Amazon S3 download URL corresponding to an image layer\cr #' \link[=ecr_get_lifecycle_policy]{get_lifecycle_policy} \tab Retrieves the lifecycle policy for the specified repository\cr #' \link[=ecr_get_lifecycle_policy_preview]{get_lifecycle_policy_preview} \tab Retrieves the results of the lifecycle policy preview request for the specified repository\cr #' \link[=ecr_get_registry_policy]{get_registry_policy} \tab Retrieves the permissions policy for a registry\cr #' \link[=ecr_get_registry_scanning_configuration]{get_registry_scanning_configuration} \tab Retrieves the scanning configuration for a registry\cr #' \link[=ecr_get_repository_policy]{get_repository_policy} \tab Retrieves the repository policy for the specified repository\cr #' \link[=ecr_initiate_layer_upload]{initiate_layer_upload} \tab Notifies Amazon ECR that you intend to upload an image layer\cr #' \link[=ecr_list_images]{list_images} \tab Lists all the image IDs for the specified repository\cr #' \link[=ecr_list_tags_for_resource]{list_tags_for_resource} \tab List the tags for an Amazon ECR resource\cr #' \link[=ecr_put_image]{put_image} \tab Creates or updates the image manifest and tags associated with an image\cr #' \link[=ecr_put_image_scanning_configuration]{put_image_scanning_configuration} \tab The PutImageScanningConfiguration API is being deprecated, in favor of specifying the image scanning configuration at the registry level\cr #' \link[=ecr_put_image_tag_mutability]{put_image_tag_mutability} \tab Updates the image tag mutability settings for the specified repository\cr #' \link[=ecr_put_lifecycle_policy]{put_lifecycle_policy} \tab Creates or updates the lifecycle policy for the specified repository\cr #' \link[=ecr_put_registry_policy]{put_registry_policy} \tab Creates or updates the permissions policy for your registry\cr #' \link[=ecr_put_registry_scanning_configuration]{put_registry_scanning_configuration} \tab Creates or updates the scanning configuration for your private registry\cr #' \link[=ecr_put_replication_configuration]{put_replication_configuration} \tab Creates or updates the replication configuration for a registry\cr #' \link[=ecr_set_repository_policy]{set_repository_policy} \tab Applies a repository policy to the specified repository to control access permissions\cr #' \link[=ecr_start_image_scan]{start_image_scan} \tab Starts an image vulnerability scan\cr #' \link[=ecr_start_lifecycle_policy_preview]{start_lifecycle_policy_preview} \tab Starts a preview of a lifecycle policy for the specified repository\cr #' \link[=ecr_tag_resource]{tag_resource} \tab Adds specified tags to a resource with the specified ARN\cr #' \link[=ecr_untag_resource]{untag_resource} \tab Deletes specified tags from a resource\cr #' \link[=ecr_upload_layer_part]{upload_layer_part} \tab Uploads an image layer part to Amazon ECR #' } #' #' @return #' A client for the service. You can call the service's operations using #' syntax like `svc$operation(...)`, where `svc` is the name you've assigned #' to the client. The available operations are listed in the #' Operations section. #' #' @rdname ecr #' @export ecr <- function(config = list(), credentials = list(), endpoint = NULL, region = NULL) { config <- merge_config( config, list( credentials = credentials, endpoint = endpoint, region = region ) ) svc <- .ecr$operations svc <- set_config(svc, config) return(svc) } # Private API objects: metadata, handlers, interfaces, etc. .ecr <- list() .ecr$operations <- list() .ecr$metadata <- list( service_name = "ecr", endpoints = list("*" = list(endpoint = "api.ecr.{region}.amazonaws.com", global = FALSE), "cn-*" = list(endpoint = "api.ecr.{region}.amazonaws.com.cn", global = FALSE), "us-iso-*" = list(endpoint = "api.ecr.{region}.c2s.ic.gov", global = FALSE), "us-isob-*" = list(endpoint = "api.ecr.{region}.sc2s.sgov.gov", global = FALSE)), service_id = "ECR", api_version = "2015-09-21", signing_name = "ecr", json_version = "1.1", target_prefix = "AmazonEC2ContainerRegistry_V20150921" ) .ecr$service <- function(config = list()) { handlers <- new_handlers("jsonrpc", "v4") new_service(.ecr$metadata, handlers, config) }
/cran/paws.compute/R/ecr_service.R
permissive
paws-r/paws
R
false
false
10,853
r
# This file is generated by make.paws. Please do not edit here. #' @importFrom paws.common new_handlers new_service set_config merge_config NULL #' Amazon EC2 Container Registry #' #' @description #' Amazon Elastic Container Registry #' #' Amazon Elastic Container Registry (Amazon ECR) is a managed container #' image registry service. Customers can use the familiar Docker CLI, or #' their preferred client, to push, pull, and manage images. Amazon ECR #' provides a secure, scalable, and reliable registry for your Docker or #' Open Container Initiative (OCI) images. Amazon ECR supports private #' repositories with resource-based permissions using IAM so that specific #' users or Amazon EC2 instances can access repositories and images. #' #' Amazon ECR has service endpoints in each supported Region. For more #' information, see [Amazon ECR #' endpoints](https://docs.aws.amazon.com/general/latest/gr/ecr.html) in #' the *Amazon Web Services General Reference*. #' #' @param #' config #' Optional configuration of credentials, endpoint, and/or region. #' \itemize{ #' \item{\strong{credentials}:} {\itemize{ #' \item{\strong{creds}:} {\itemize{ #' \item{\strong{access_key_id}:} {AWS access key ID} #' \item{\strong{secret_access_key}:} {AWS secret access key} #' \item{\strong{session_token}:} {AWS temporary session token} #' }} #' \item{\strong{profile}:} {The name of a profile to use. If not given, then the default profile is used.} #' \item{\strong{anonymous}:} {Set anonymous credentials.} #' \item{\strong{endpoint}:} {The complete URL to use for the constructed client.} #' \item{\strong{region}:} {The AWS Region used in instantiating the client.} #' }} #' \item{\strong{close_connection}:} {Immediately close all HTTP connections.} #' \item{\strong{timeout}:} {The time in seconds till a timeout exception is thrown when attempting to make a connection. The default is 60 seconds.} #' \item{\strong{s3_force_path_style}:} {Set this to `true` to force the request to use path-style addressing, i.e. `http://s3.amazonaws.com/BUCKET/KEY`.} #' \item{\strong{sts_regional_endpoint}:} {Set sts regional endpoint resolver to regional or legacy \url{https://docs.aws.amazon.com/sdkref/latest/guide/feature-sts-regionalized-endpoints.html}} #' } #' @param #' credentials #' Optional credentials shorthand for the config parameter #' \itemize{ #' \item{\strong{creds}:} {\itemize{ #' \item{\strong{access_key_id}:} {AWS access key ID} #' \item{\strong{secret_access_key}:} {AWS secret access key} #' \item{\strong{session_token}:} {AWS temporary session token} #' }} #' \item{\strong{profile}:} {The name of a profile to use. If not given, then the default profile is used.} #' \item{\strong{anonymous}:} {Set anonymous credentials.} #' } #' @param #' endpoint #' Optional shorthand for complete URL to use for the constructed client. #' @param #' region #' Optional shorthand for AWS Region used in instantiating the client. #' #' @section Service syntax: #' ``` #' svc <- ecr( #' config = list( #' credentials = list( #' creds = list( #' access_key_id = "string", #' secret_access_key = "string", #' session_token = "string" #' ), #' profile = "string", #' anonymous = "logical" #' ), #' endpoint = "string", #' region = "string", #' close_connection = "logical", #' timeout = "numeric", #' s3_force_path_style = "logical", #' sts_regional_endpoint = "string" #' ), #' credentials = list( #' creds = list( #' access_key_id = "string", #' secret_access_key = "string", #' session_token = "string" #' ), #' profile = "string", #' anonymous = "logical" #' ), #' endpoint = "string", #' region = "string" #' ) #' ``` #' #' @examples #' \dontrun{ #' svc <- ecr() #' # This example deletes images with the tags precise and trusty in a #' # repository called ubuntu in the default registry for an account. #' svc$batch_delete_image( #' imageIds = list( #' list( #' imageTag = "precise" #' ) #' ), #' repositoryName = "ubuntu" #' ) #' } #' #' @section Operations: #' \tabular{ll}{ #' \link[=ecr_batch_check_layer_availability]{batch_check_layer_availability} \tab Checks the availability of one or more image layers in a repository\cr #' \link[=ecr_batch_delete_image]{batch_delete_image} \tab Deletes a list of specified images within a repository\cr #' \link[=ecr_batch_get_image]{batch_get_image} \tab Gets detailed information for an image\cr #' \link[=ecr_batch_get_repository_scanning_configuration]{batch_get_repository_scanning_configuration} \tab Gets the scanning configuration for one or more repositories\cr #' \link[=ecr_complete_layer_upload]{complete_layer_upload} \tab Informs Amazon ECR that the image layer upload has completed for a specified registry, repository name, and upload ID\cr #' \link[=ecr_create_pull_through_cache_rule]{create_pull_through_cache_rule} \tab Creates a pull through cache rule\cr #' \link[=ecr_create_repository]{create_repository} \tab Creates a repository\cr #' \link[=ecr_delete_lifecycle_policy]{delete_lifecycle_policy} \tab Deletes the lifecycle policy associated with the specified repository\cr #' \link[=ecr_delete_pull_through_cache_rule]{delete_pull_through_cache_rule} \tab Deletes a pull through cache rule\cr #' \link[=ecr_delete_registry_policy]{delete_registry_policy} \tab Deletes the registry permissions policy\cr #' \link[=ecr_delete_repository]{delete_repository} \tab Deletes a repository\cr #' \link[=ecr_delete_repository_policy]{delete_repository_policy} \tab Deletes the repository policy associated with the specified repository\cr #' \link[=ecr_describe_image_replication_status]{describe_image_replication_status} \tab Returns the replication status for a specified image\cr #' \link[=ecr_describe_images]{describe_images} \tab Returns metadata about the images in a repository\cr #' \link[=ecr_describe_image_scan_findings]{describe_image_scan_findings} \tab Returns the scan findings for the specified image\cr #' \link[=ecr_describe_pull_through_cache_rules]{describe_pull_through_cache_rules} \tab Returns the pull through cache rules for a registry\cr #' \link[=ecr_describe_registry]{describe_registry} \tab Describes the settings for a registry\cr #' \link[=ecr_describe_repositories]{describe_repositories} \tab Describes image repositories in a registry\cr #' \link[=ecr_get_authorization_token]{get_authorization_token} \tab Retrieves an authorization token\cr #' \link[=ecr_get_download_url_for_layer]{get_download_url_for_layer} \tab Retrieves the pre-signed Amazon S3 download URL corresponding to an image layer\cr #' \link[=ecr_get_lifecycle_policy]{get_lifecycle_policy} \tab Retrieves the lifecycle policy for the specified repository\cr #' \link[=ecr_get_lifecycle_policy_preview]{get_lifecycle_policy_preview} \tab Retrieves the results of the lifecycle policy preview request for the specified repository\cr #' \link[=ecr_get_registry_policy]{get_registry_policy} \tab Retrieves the permissions policy for a registry\cr #' \link[=ecr_get_registry_scanning_configuration]{get_registry_scanning_configuration} \tab Retrieves the scanning configuration for a registry\cr #' \link[=ecr_get_repository_policy]{get_repository_policy} \tab Retrieves the repository policy for the specified repository\cr #' \link[=ecr_initiate_layer_upload]{initiate_layer_upload} \tab Notifies Amazon ECR that you intend to upload an image layer\cr #' \link[=ecr_list_images]{list_images} \tab Lists all the image IDs for the specified repository\cr #' \link[=ecr_list_tags_for_resource]{list_tags_for_resource} \tab List the tags for an Amazon ECR resource\cr #' \link[=ecr_put_image]{put_image} \tab Creates or updates the image manifest and tags associated with an image\cr #' \link[=ecr_put_image_scanning_configuration]{put_image_scanning_configuration} \tab The PutImageScanningConfiguration API is being deprecated, in favor of specifying the image scanning configuration at the registry level\cr #' \link[=ecr_put_image_tag_mutability]{put_image_tag_mutability} \tab Updates the image tag mutability settings for the specified repository\cr #' \link[=ecr_put_lifecycle_policy]{put_lifecycle_policy} \tab Creates or updates the lifecycle policy for the specified repository\cr #' \link[=ecr_put_registry_policy]{put_registry_policy} \tab Creates or updates the permissions policy for your registry\cr #' \link[=ecr_put_registry_scanning_configuration]{put_registry_scanning_configuration} \tab Creates or updates the scanning configuration for your private registry\cr #' \link[=ecr_put_replication_configuration]{put_replication_configuration} \tab Creates or updates the replication configuration for a registry\cr #' \link[=ecr_set_repository_policy]{set_repository_policy} \tab Applies a repository policy to the specified repository to control access permissions\cr #' \link[=ecr_start_image_scan]{start_image_scan} \tab Starts an image vulnerability scan\cr #' \link[=ecr_start_lifecycle_policy_preview]{start_lifecycle_policy_preview} \tab Starts a preview of a lifecycle policy for the specified repository\cr #' \link[=ecr_tag_resource]{tag_resource} \tab Adds specified tags to a resource with the specified ARN\cr #' \link[=ecr_untag_resource]{untag_resource} \tab Deletes specified tags from a resource\cr #' \link[=ecr_upload_layer_part]{upload_layer_part} \tab Uploads an image layer part to Amazon ECR #' } #' #' @return #' A client for the service. You can call the service's operations using #' syntax like `svc$operation(...)`, where `svc` is the name you've assigned #' to the client. The available operations are listed in the #' Operations section. #' #' @rdname ecr #' @export ecr <- function(config = list(), credentials = list(), endpoint = NULL, region = NULL) { config <- merge_config( config, list( credentials = credentials, endpoint = endpoint, region = region ) ) svc <- .ecr$operations svc <- set_config(svc, config) return(svc) } # Private API objects: metadata, handlers, interfaces, etc. .ecr <- list() .ecr$operations <- list() .ecr$metadata <- list( service_name = "ecr", endpoints = list("*" = list(endpoint = "api.ecr.{region}.amazonaws.com", global = FALSE), "cn-*" = list(endpoint = "api.ecr.{region}.amazonaws.com.cn", global = FALSE), "us-iso-*" = list(endpoint = "api.ecr.{region}.c2s.ic.gov", global = FALSE), "us-isob-*" = list(endpoint = "api.ecr.{region}.sc2s.sgov.gov", global = FALSE)), service_id = "ECR", api_version = "2015-09-21", signing_name = "ecr", json_version = "1.1", target_prefix = "AmazonEC2ContainerRegistry_V20150921" ) .ecr$service <- function(config = list()) { handlers <- new_handlers("jsonrpc", "v4") new_service(.ecr$metadata, handlers, config) }
library(taxa) ### Name: data_used ### Title: Get values of data used in expressions ### Aliases: data_used ### Keywords: internal ### ** Examples # Get values for variables names used in expressions ex_taxmap$data_used(n_legs + dangerous == invalid_expression) ex_taxmap$data_used(length(unique(taxon_names)))
/data/genthat_extracted_code/taxa/examples/data_used.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
318
r
library(taxa) ### Name: data_used ### Title: Get values of data used in expressions ### Aliases: data_used ### Keywords: internal ### ** Examples # Get values for variables names used in expressions ex_taxmap$data_used(n_legs + dangerous == invalid_expression) ex_taxmap$data_used(length(unique(taxon_names)))
############################################################################ ##### DEFINE ESTIMADOR LOGCUMULANT de orden 2 f2 <- function(alfa,est.2,L) {est.2 - psigamma(L, deriv = 1) - psigamma(-alfa, deriv = 1)} f1 <- function(gama,est.1,L,alfa) {est.1 + log(L) - log(gama) - digamma(L) + digamma(-alfa)} logcumAlfa = function(muestra, L){ logcum.est.1 <- mean(log(muestra)) logcum.est.2 <- mean((log(muestra)-logcum.est.1)^2) if (f2(-20,logcum.est.2,L)*f2(-10^(-3),logcum.est.2,L)<0) alfa.LC <- uniroot(f2, c(-20,-10^(-3)), tol = 0.0001,logcum.est.2,L)$root else alfa.LC <- 0.1 salida<-c(alfa.LC) return(salida) } logcumGama = function(muestra, L,gamaini){ logcum.est.1 <- mean(log(muestra)) alfa.LC<-logcumAlfa(muestra, L) if (f1(1/gamaini,logcum.est.1,L,alfa.LC)*f1(10*gamaini,logcum.est.1,L,alfa.LC)<0) gama.LC <- uniroot(f1, c(1/gamaini,10*gamaini), tol = 0.0001,logcum.est.1,L,alfa.LC)$root else gama.LC <- 0.1 salida<-c(gama.LC) return(salida) }
/Code/Rutinas/LogCumulantAlfayGamma.R
no_license
vicky1505-wlt/KernelEstimationGit
R
false
false
1,055
r
############################################################################ ##### DEFINE ESTIMADOR LOGCUMULANT de orden 2 f2 <- function(alfa,est.2,L) {est.2 - psigamma(L, deriv = 1) - psigamma(-alfa, deriv = 1)} f1 <- function(gama,est.1,L,alfa) {est.1 + log(L) - log(gama) - digamma(L) + digamma(-alfa)} logcumAlfa = function(muestra, L){ logcum.est.1 <- mean(log(muestra)) logcum.est.2 <- mean((log(muestra)-logcum.est.1)^2) if (f2(-20,logcum.est.2,L)*f2(-10^(-3),logcum.est.2,L)<0) alfa.LC <- uniroot(f2, c(-20,-10^(-3)), tol = 0.0001,logcum.est.2,L)$root else alfa.LC <- 0.1 salida<-c(alfa.LC) return(salida) } logcumGama = function(muestra, L,gamaini){ logcum.est.1 <- mean(log(muestra)) alfa.LC<-logcumAlfa(muestra, L) if (f1(1/gamaini,logcum.est.1,L,alfa.LC)*f1(10*gamaini,logcum.est.1,L,alfa.LC)<0) gama.LC <- uniroot(f1, c(1/gamaini,10*gamaini), tol = 0.0001,logcum.est.1,L,alfa.LC)$root else gama.LC <- 0.1 salida<-c(gama.LC) return(salida) }
#library(Biostrings) library(data.table) library(seqinr) getArgs <- function() { myargs.list <- strsplit(grep("=",gsub("--","",commandArgs()),value=TRUE),"=") myargs <- lapply(myargs.list,function(x) x[2] ) names(myargs) <- lapply(myargs.list,function(x) x[1]) return (myargs) } myArgs <- getArgs() print(myArgs) if ('arg1' %in% names(myArgs)) arg1 <- as.character( myArgs[[ 'arg1' ]]) if ('arg2' %in% names(myArgs)) arg2 <- as.character( myArgs[[ 'arg2' ]]) if ('arg3' %in% names(myArgs)) arg3 <- as.character( myArgs[[ 'arg3' ]]) print(arg1) print(arg2) print(arg3) contig.lengths<-matrix(0, nrow=8, ncol=2) colnames(contig.lengths)<-c("H1N1", "H3N2") genome.recov<-matrix(0, nrow=1, ncol=2) colnames(genome.recov)<-c("H1N1", "H3N2") H1N1.length <- 13200 H3N2.length <- 13500 arg4<-"H1N1" if (arg4=="H1N1"){ ordered.contigs<-fread(arg1, sep="\t", select=c(1,2,3,4, 5)) setnames(ordered.contigs, old=c("id","start", "end", "seq", "seg" )) ordered.contigs.df<-as.data.frame(ordered.contigs) updated.contigs<-ordered.contigs.df ###H1N1 H1N1.seg1<-"gi|758967842|ref|NC_026438.1|" H1N1.seg2<-"gi|758899361|ref|NC_026435.1|" H1N1.seg3<-"gi|758967835|ref|NC_026437.1|" H1N1.seg4<-"gi|758899355|ref|NC_026433.1|" H1N1.seg5<-"gi|758899363|ref|NC_026436.1|" H1N1.seg6<-"gi|758899359|ref|NC_026434.1|" H1N1.seg7<-"gi|758899349|ref|NC_026431.1|" H1N1.seg8<-"gi|758899352|ref|NC_026432.1|" H1N1.segs<-list(H1N1.seg1, H1N1.seg2, H1N1.seg3, H1N1.seg4, H1N1.seg5, H1N1.seg6, H1N1.seg7, H1N1.seg8) seg1<-updated.contigs[which(updated.contigs[,5]==H1N1.seg1),] seg2<-updated.contigs[which(updated.contigs[,5]==H1N1.seg2),] seg3<-updated.contigs[which(updated.contigs[,5]==H1N1.seg3),] seg4<-updated.contigs[which(updated.contigs[,5]==H1N1.seg4),] seg5<-updated.contigs[which(updated.contigs[,5]==H1N1.seg5),] seg6<-updated.contigs[which(updated.contigs[,5]==H1N1.seg6),] seg7<-updated.contigs[which(updated.contigs[,5]==H1N1.seg7),] seg8<-updated.contigs[which(updated.contigs[,5]==H1N1.seg8),] seg1.ord<-seg1[order(seg1[,"start"], seg1[,"end"]),] seg2.ord<-seg2[order(seg2[,"start"], seg2[,"end"]),] seg3.ord<-seg3[order(seg3[,"start"], seg3[,"end"]),] seg4.ord<-seg4[order(seg4[,"start"], seg4[,"end"]),] seg5.ord<-seg5[order(seg5[,"start"], seg5[,"end"]),] seg6.ord<-seg6[order(seg6[,"start"], seg6[,"end"]),] seg7.ord<-seg7[order(seg7[,"start"], seg7[,"end"]),] seg8.ord<-seg8[order(seg8[,"start"], seg8[,"end"]),] segments<-list(seg1.ord, seg2.ord, seg3.ord, seg4.ord, seg5.ord, seg6.ord, seg7.ord, seg8.ord ) for (j in 1:8 ){ overlap<-rep(0, n=nrow(segments[[j]])-1) if (nrow(segments[[j]])==1) { segments[[j]]<-segments[[j]] } else if (nrow(segments[[j]])==0) { nocontig<-as.data.frame(matrix(0, ncol=5, nrow=1)) colnames(nocontig)<-c("id", "start", "end", "seq", "seg") segments[[j]]<-nocontig segments[[j]]["seg"]<-H1N1.segs[[j]] } else { for (i in 1:(nrow(segments[[j]])-1)) { print(i) print(j) print(segments[[j]]) overlap[i]<-segments[[j]][i, "end"]-segments[[j]][i+1,"start"] if ( overlap[i]>0 & (segments[[j]][i+1,"end"] > segments[[j]][i,"end"]) ) { message("overlap") print(overlap[i]) segments[[j]][i,"id"]<-paste(segments[[j]][i,"id"], segments[[j]][i+1,"id"], sep="") segments[[j]][i,"start"]<-segments[[j]][i, "start"] segments[[j]][i,"end"]<-segments[[j]][i+1, "end"] segments[[j]][i, "seq"]<-paste(substr(segments[[j]][i,"seq"], start=1, stop=(nchar(segments[[j]][i,"seq"])-overlap[i] -1) ), segments[[j]][i+1,"seq"], sep="") segments[[j]][i+1,]<-segments[[j]][i,] ## overlap[i,]<-segments[[j]][i, "end"]-segments[[j]][i+1,"start"] ## if ( overlap[j,]>0 & (segments[[j]][i+1,"end"] > segments[[j]][i,"end"]) ) { ## message("overlap") ## print(overlap[j,]) ## segments[[j]][i,"id"]<-paste(segments[[j]][i,"id"], segments[[j]][i+1,"id"], sep="") ## segments[[j]][i,"start"]<-segments[[j]][i, "start"] ## segments[[j]][i,"end"]<-segments[[j]][i+1, "end"] ## segments[[j]][i, "seq"]<-paste(substr(segments[[j]][i,"seq"], start=1, stop=(nchar(segments[[j]][i,"seq"])-overlap[j,] -1) ), segments[[j]][i+1,"seq"], sep="") ## segments[[j]][i+1,]<-segments[[j]][i,] } else { segments[[j]][i+1,]<-segments[[j]][i,] } } } segments[[j]]<-unique(segments[[j]]) H1N1.segments<-segments } for (j in 1:8 ){ final.row<-nrow(segments[[j]]) H1N1.segments[[j]]<-H1N1.segments[[j]][final.row,] contig.lengths[j,"H1N1"]<- as.numeric(segments[[j]][final.row, "end"]-segments[[j]][final.row, "start"]+1) } print(arg4) print(contig.lengths) print(sum(contig.lengths[,"H1N1"])/H1N1.length) genome.recov[1,"H1N1"]<-sum(contig.lengths[,"H1N1"])/H1N1.length } arg4<-"H3N2" if (arg4=="H3N2"){ ordered.contigs<-fread(arg2, sep="\t", select=c(1,2,3,4, 5)) setnames(ordered.contigs, old=c("id","start", "end", "seq", "seg" )) ordered.contigs.df<-as.data.frame(ordered.contigs) overlap<-matrix(0, nrow=nrow(ordered.contigs.df)-1, ncol=1) updated.contigs<-ordered.contigs.df H3N2.seg1<-"gi|73919059|ref|NC_007373.1|" H3N2.seg2<-"gi|73919148|ref|NC_007372.1|" H3N2.seg3<-"gi|73919133|ref|NC_007371.1|" H3N2.seg4<-"gi|73919206|ref|NC_007366.1|" H3N2.seg5<-"gi|73919146|ref|NC_007369.1|" H3N2.seg6<-"gi|73919135|ref|NC_007368.1|" H3N2.seg7<-"gi|73919151|ref|NC_007367.1|" H3N2.seg8<-"gi|73919211|ref|NC_007370.1|" H3N2.segs<-list(H3N2.seg1, H3N2.seg2, H3N2.seg3, H3N2.seg4, H3N2.seg5, H3N2.seg6, H3N2.seg7, H3N2.seg8) seg1<-updated.contigs[which(updated.contigs[,5]==H3N2.seg1),] seg2<-updated.contigs[which(updated.contigs[,5]==H3N2.seg2),] seg3<-updated.contigs[which(updated.contigs[,5]==H3N2.seg3),] seg4<-updated.contigs[which(updated.contigs[,5]==H3N2.seg4),] seg5<-updated.contigs[which(updated.contigs[,5]==H3N2.seg5),] seg6<-updated.contigs[which(updated.contigs[,5]==H3N2.seg6),] seg7<-updated.contigs[which(updated.contigs[,5]==H3N2.seg7),] seg8<-updated.contigs[which(updated.contigs[,5]==H3N2.seg8),] seg1.ord<-seg1[order(seg1[,"start"], seg1[,"end"]),] seg2.ord<-seg2[order(seg2[,"start"], seg2[,"end"]),] seg3.ord<-seg3[order(seg3[,"start"], seg3[,"end"]),] seg4.ord<-seg4[order(seg4[,"start"], seg4[,"end"]),] seg5.ord<-seg5[order(seg5[,"start"], seg5[,"end"]),] seg6.ord<-seg6[order(seg6[,"start"], seg6[,"end"]),] seg7.ord<-seg7[order(seg7[,"start"], seg7[,"end"]),] seg8.ord<-seg8[order(seg8[,"start"], seg8[,"end"]),] segments<-list(seg1.ord, seg2.ord, seg3.ord, seg4.ord, seg5.ord, seg6.ord, seg7.ord, seg8.ord ) for (j in 1:8 ){ overlap<-rep(0, n=nrow(segments[[j]])-1) if (nrow(segments[[j]])==1) { segments[[j]]<-segments[[j]] } else if (nrow(segments[[j]])==0) { nocontig<-as.data.frame(matrix(0, ncol=5, nrow=1)) colnames(nocontig)<-c("id", "start", "end", "seq", "seg") segments[[j]]<-nocontig segments[[j]]["seg"]<-H3N2.segs[[j]] } else { for (i in 1:(nrow(segments[[j]])-1)) { print(i) overlap[i]<-segments[[j]][i, "end"]-segments[[j]][i+1,"start"] if ( overlap[i]>0 & (segments[[j]][i+1,"end"] > segments[[j]][i,"end"]) ) { message("overlap") print(overlap[i]) segments[[j]][i,"id"]<-paste(segments[[j]][i,"id"], segments[[j]][i+1,"id"], sep="") segments[[j]][i,"start"]<-segments[[j]][i, "start"] segments[[j]][i,"end"]<-segments[[j]][i+1, "end"] segments[[j]][i, "seq"]<-paste(substr(segments[[j]][i,"seq"], start=1, stop=(nchar(segments[[j]][i,"seq"])-overlap[i] -1) ), segments[[j]][i+1,"seq"], sep="") segments[[j]][i+1,]<-segments[[j]][i,] ## overlap[j,]<-segments[[j]][i, "end"]-segments[[j]][i+1,"start"] ## if ( overlap[j,]>0 & (segments[[j]][i+1,"end"] > segments[[j]][i,"end"]) ) { ## message("overlap") ## print(overlap[j,]) ## segments[[j]][i,"id"]<-paste(segments[[j]][i,"id"], segments[[j]][i+1,"id"], sep="") ## segments[[j]][i,"start"]<-segments[[j]][i, "start"] ## segments[[j]][i,"end"]<-segments[[j]][i+1, "end"] ## segments[[j]][i, "seq"]<-paste(substr(segments[[j]][i,"seq"], start=1, stop=(nchar(segments[[j]][i,"seq"])-overlap[j,] -1) ), segments[[j]][i+1,"seq"], sep="") ## segments[[j]][i+1,]<-segments[[j]][i,] } else { segments[[j]][i+1,]<-segments[[j]][i,] } } } segments[[j]]<-unique(segments[[j]]) H3N2.segments<-segments } for (j in 1:8 ){ final.row<-nrow(segments[[j]]) H3N2.segments[[j]]<-H3N2.segments[[j]][final.row,] contig.lengths[j,"H3N2"]<- as.numeric(segments[[j]][final.row, "end"]-segments[[j]][final.row, "start"]+1) } print(arg4) print(contig.lengths) print(sum(contig.lengths[,"H3N2"])/H3N2.length) genome.recov[1,"H3N2"]<-sum(contig.lengths[,"H3N2"])/H3N2.length } if (genome.recov[,"H1N1"] > genome.recov[,"H3N2"]) { message("H1N1 genotype") print(genome.recov) print(contig.lengths) write.table(x="H1N1", file=paste(arg3, "/genotype.tab", sep=""), quote=F, row.names=F, col.names=F) for (j in 1:8){ write.fasta(sequences=H1N1.segments[[j]]["seq"], names=H1N1.segments[[j]]["seg"], file.out=paste(arg3,"/H1N1segment", j, ".fasta", sep=""), as.string = TRUE) } } else { message("H3N2 genotype") print(genome.recov) print(contig.lengths) write.table(x="H3N2", file=paste(arg3, "/genotype.tab", sep=""), quote=F, row.names=F, col.names=F) for (j in 1:8){ write.fasta(sequences=H3N2.segments[[j]]["seq"], names=H3N2.segments[[j]]["seg"], file.out=paste(arg3,"/H3N2segment", j, ".fasta", sep=""), as.string = TRUE) } } save.image("ordered_contigs.RData")
/scripts/stichContigs_flu.R
no_license
John-Bioinfo/viral_denovo_pipeline
R
false
false
10,091
r
#library(Biostrings) library(data.table) library(seqinr) getArgs <- function() { myargs.list <- strsplit(grep("=",gsub("--","",commandArgs()),value=TRUE),"=") myargs <- lapply(myargs.list,function(x) x[2] ) names(myargs) <- lapply(myargs.list,function(x) x[1]) return (myargs) } myArgs <- getArgs() print(myArgs) if ('arg1' %in% names(myArgs)) arg1 <- as.character( myArgs[[ 'arg1' ]]) if ('arg2' %in% names(myArgs)) arg2 <- as.character( myArgs[[ 'arg2' ]]) if ('arg3' %in% names(myArgs)) arg3 <- as.character( myArgs[[ 'arg3' ]]) print(arg1) print(arg2) print(arg3) contig.lengths<-matrix(0, nrow=8, ncol=2) colnames(contig.lengths)<-c("H1N1", "H3N2") genome.recov<-matrix(0, nrow=1, ncol=2) colnames(genome.recov)<-c("H1N1", "H3N2") H1N1.length <- 13200 H3N2.length <- 13500 arg4<-"H1N1" if (arg4=="H1N1"){ ordered.contigs<-fread(arg1, sep="\t", select=c(1,2,3,4, 5)) setnames(ordered.contigs, old=c("id","start", "end", "seq", "seg" )) ordered.contigs.df<-as.data.frame(ordered.contigs) updated.contigs<-ordered.contigs.df ###H1N1 H1N1.seg1<-"gi|758967842|ref|NC_026438.1|" H1N1.seg2<-"gi|758899361|ref|NC_026435.1|" H1N1.seg3<-"gi|758967835|ref|NC_026437.1|" H1N1.seg4<-"gi|758899355|ref|NC_026433.1|" H1N1.seg5<-"gi|758899363|ref|NC_026436.1|" H1N1.seg6<-"gi|758899359|ref|NC_026434.1|" H1N1.seg7<-"gi|758899349|ref|NC_026431.1|" H1N1.seg8<-"gi|758899352|ref|NC_026432.1|" H1N1.segs<-list(H1N1.seg1, H1N1.seg2, H1N1.seg3, H1N1.seg4, H1N1.seg5, H1N1.seg6, H1N1.seg7, H1N1.seg8) seg1<-updated.contigs[which(updated.contigs[,5]==H1N1.seg1),] seg2<-updated.contigs[which(updated.contigs[,5]==H1N1.seg2),] seg3<-updated.contigs[which(updated.contigs[,5]==H1N1.seg3),] seg4<-updated.contigs[which(updated.contigs[,5]==H1N1.seg4),] seg5<-updated.contigs[which(updated.contigs[,5]==H1N1.seg5),] seg6<-updated.contigs[which(updated.contigs[,5]==H1N1.seg6),] seg7<-updated.contigs[which(updated.contigs[,5]==H1N1.seg7),] seg8<-updated.contigs[which(updated.contigs[,5]==H1N1.seg8),] seg1.ord<-seg1[order(seg1[,"start"], seg1[,"end"]),] seg2.ord<-seg2[order(seg2[,"start"], seg2[,"end"]),] seg3.ord<-seg3[order(seg3[,"start"], seg3[,"end"]),] seg4.ord<-seg4[order(seg4[,"start"], seg4[,"end"]),] seg5.ord<-seg5[order(seg5[,"start"], seg5[,"end"]),] seg6.ord<-seg6[order(seg6[,"start"], seg6[,"end"]),] seg7.ord<-seg7[order(seg7[,"start"], seg7[,"end"]),] seg8.ord<-seg8[order(seg8[,"start"], seg8[,"end"]),] segments<-list(seg1.ord, seg2.ord, seg3.ord, seg4.ord, seg5.ord, seg6.ord, seg7.ord, seg8.ord ) for (j in 1:8 ){ overlap<-rep(0, n=nrow(segments[[j]])-1) if (nrow(segments[[j]])==1) { segments[[j]]<-segments[[j]] } else if (nrow(segments[[j]])==0) { nocontig<-as.data.frame(matrix(0, ncol=5, nrow=1)) colnames(nocontig)<-c("id", "start", "end", "seq", "seg") segments[[j]]<-nocontig segments[[j]]["seg"]<-H1N1.segs[[j]] } else { for (i in 1:(nrow(segments[[j]])-1)) { print(i) print(j) print(segments[[j]]) overlap[i]<-segments[[j]][i, "end"]-segments[[j]][i+1,"start"] if ( overlap[i]>0 & (segments[[j]][i+1,"end"] > segments[[j]][i,"end"]) ) { message("overlap") print(overlap[i]) segments[[j]][i,"id"]<-paste(segments[[j]][i,"id"], segments[[j]][i+1,"id"], sep="") segments[[j]][i,"start"]<-segments[[j]][i, "start"] segments[[j]][i,"end"]<-segments[[j]][i+1, "end"] segments[[j]][i, "seq"]<-paste(substr(segments[[j]][i,"seq"], start=1, stop=(nchar(segments[[j]][i,"seq"])-overlap[i] -1) ), segments[[j]][i+1,"seq"], sep="") segments[[j]][i+1,]<-segments[[j]][i,] ## overlap[i,]<-segments[[j]][i, "end"]-segments[[j]][i+1,"start"] ## if ( overlap[j,]>0 & (segments[[j]][i+1,"end"] > segments[[j]][i,"end"]) ) { ## message("overlap") ## print(overlap[j,]) ## segments[[j]][i,"id"]<-paste(segments[[j]][i,"id"], segments[[j]][i+1,"id"], sep="") ## segments[[j]][i,"start"]<-segments[[j]][i, "start"] ## segments[[j]][i,"end"]<-segments[[j]][i+1, "end"] ## segments[[j]][i, "seq"]<-paste(substr(segments[[j]][i,"seq"], start=1, stop=(nchar(segments[[j]][i,"seq"])-overlap[j,] -1) ), segments[[j]][i+1,"seq"], sep="") ## segments[[j]][i+1,]<-segments[[j]][i,] } else { segments[[j]][i+1,]<-segments[[j]][i,] } } } segments[[j]]<-unique(segments[[j]]) H1N1.segments<-segments } for (j in 1:8 ){ final.row<-nrow(segments[[j]]) H1N1.segments[[j]]<-H1N1.segments[[j]][final.row,] contig.lengths[j,"H1N1"]<- as.numeric(segments[[j]][final.row, "end"]-segments[[j]][final.row, "start"]+1) } print(arg4) print(contig.lengths) print(sum(contig.lengths[,"H1N1"])/H1N1.length) genome.recov[1,"H1N1"]<-sum(contig.lengths[,"H1N1"])/H1N1.length } arg4<-"H3N2" if (arg4=="H3N2"){ ordered.contigs<-fread(arg2, sep="\t", select=c(1,2,3,4, 5)) setnames(ordered.contigs, old=c("id","start", "end", "seq", "seg" )) ordered.contigs.df<-as.data.frame(ordered.contigs) overlap<-matrix(0, nrow=nrow(ordered.contigs.df)-1, ncol=1) updated.contigs<-ordered.contigs.df H3N2.seg1<-"gi|73919059|ref|NC_007373.1|" H3N2.seg2<-"gi|73919148|ref|NC_007372.1|" H3N2.seg3<-"gi|73919133|ref|NC_007371.1|" H3N2.seg4<-"gi|73919206|ref|NC_007366.1|" H3N2.seg5<-"gi|73919146|ref|NC_007369.1|" H3N2.seg6<-"gi|73919135|ref|NC_007368.1|" H3N2.seg7<-"gi|73919151|ref|NC_007367.1|" H3N2.seg8<-"gi|73919211|ref|NC_007370.1|" H3N2.segs<-list(H3N2.seg1, H3N2.seg2, H3N2.seg3, H3N2.seg4, H3N2.seg5, H3N2.seg6, H3N2.seg7, H3N2.seg8) seg1<-updated.contigs[which(updated.contigs[,5]==H3N2.seg1),] seg2<-updated.contigs[which(updated.contigs[,5]==H3N2.seg2),] seg3<-updated.contigs[which(updated.contigs[,5]==H3N2.seg3),] seg4<-updated.contigs[which(updated.contigs[,5]==H3N2.seg4),] seg5<-updated.contigs[which(updated.contigs[,5]==H3N2.seg5),] seg6<-updated.contigs[which(updated.contigs[,5]==H3N2.seg6),] seg7<-updated.contigs[which(updated.contigs[,5]==H3N2.seg7),] seg8<-updated.contigs[which(updated.contigs[,5]==H3N2.seg8),] seg1.ord<-seg1[order(seg1[,"start"], seg1[,"end"]),] seg2.ord<-seg2[order(seg2[,"start"], seg2[,"end"]),] seg3.ord<-seg3[order(seg3[,"start"], seg3[,"end"]),] seg4.ord<-seg4[order(seg4[,"start"], seg4[,"end"]),] seg5.ord<-seg5[order(seg5[,"start"], seg5[,"end"]),] seg6.ord<-seg6[order(seg6[,"start"], seg6[,"end"]),] seg7.ord<-seg7[order(seg7[,"start"], seg7[,"end"]),] seg8.ord<-seg8[order(seg8[,"start"], seg8[,"end"]),] segments<-list(seg1.ord, seg2.ord, seg3.ord, seg4.ord, seg5.ord, seg6.ord, seg7.ord, seg8.ord ) for (j in 1:8 ){ overlap<-rep(0, n=nrow(segments[[j]])-1) if (nrow(segments[[j]])==1) { segments[[j]]<-segments[[j]] } else if (nrow(segments[[j]])==0) { nocontig<-as.data.frame(matrix(0, ncol=5, nrow=1)) colnames(nocontig)<-c("id", "start", "end", "seq", "seg") segments[[j]]<-nocontig segments[[j]]["seg"]<-H3N2.segs[[j]] } else { for (i in 1:(nrow(segments[[j]])-1)) { print(i) overlap[i]<-segments[[j]][i, "end"]-segments[[j]][i+1,"start"] if ( overlap[i]>0 & (segments[[j]][i+1,"end"] > segments[[j]][i,"end"]) ) { message("overlap") print(overlap[i]) segments[[j]][i,"id"]<-paste(segments[[j]][i,"id"], segments[[j]][i+1,"id"], sep="") segments[[j]][i,"start"]<-segments[[j]][i, "start"] segments[[j]][i,"end"]<-segments[[j]][i+1, "end"] segments[[j]][i, "seq"]<-paste(substr(segments[[j]][i,"seq"], start=1, stop=(nchar(segments[[j]][i,"seq"])-overlap[i] -1) ), segments[[j]][i+1,"seq"], sep="") segments[[j]][i+1,]<-segments[[j]][i,] ## overlap[j,]<-segments[[j]][i, "end"]-segments[[j]][i+1,"start"] ## if ( overlap[j,]>0 & (segments[[j]][i+1,"end"] > segments[[j]][i,"end"]) ) { ## message("overlap") ## print(overlap[j,]) ## segments[[j]][i,"id"]<-paste(segments[[j]][i,"id"], segments[[j]][i+1,"id"], sep="") ## segments[[j]][i,"start"]<-segments[[j]][i, "start"] ## segments[[j]][i,"end"]<-segments[[j]][i+1, "end"] ## segments[[j]][i, "seq"]<-paste(substr(segments[[j]][i,"seq"], start=1, stop=(nchar(segments[[j]][i,"seq"])-overlap[j,] -1) ), segments[[j]][i+1,"seq"], sep="") ## segments[[j]][i+1,]<-segments[[j]][i,] } else { segments[[j]][i+1,]<-segments[[j]][i,] } } } segments[[j]]<-unique(segments[[j]]) H3N2.segments<-segments } for (j in 1:8 ){ final.row<-nrow(segments[[j]]) H3N2.segments[[j]]<-H3N2.segments[[j]][final.row,] contig.lengths[j,"H3N2"]<- as.numeric(segments[[j]][final.row, "end"]-segments[[j]][final.row, "start"]+1) } print(arg4) print(contig.lengths) print(sum(contig.lengths[,"H3N2"])/H3N2.length) genome.recov[1,"H3N2"]<-sum(contig.lengths[,"H3N2"])/H3N2.length } if (genome.recov[,"H1N1"] > genome.recov[,"H3N2"]) { message("H1N1 genotype") print(genome.recov) print(contig.lengths) write.table(x="H1N1", file=paste(arg3, "/genotype.tab", sep=""), quote=F, row.names=F, col.names=F) for (j in 1:8){ write.fasta(sequences=H1N1.segments[[j]]["seq"], names=H1N1.segments[[j]]["seg"], file.out=paste(arg3,"/H1N1segment", j, ".fasta", sep=""), as.string = TRUE) } } else { message("H3N2 genotype") print(genome.recov) print(contig.lengths) write.table(x="H3N2", file=paste(arg3, "/genotype.tab", sep=""), quote=F, row.names=F, col.names=F) for (j in 1:8){ write.fasta(sequences=H3N2.segments[[j]]["seq"], names=H3N2.segments[[j]]["seg"], file.out=paste(arg3,"/H3N2segment", j, ".fasta", sep=""), as.string = TRUE) } } save.image("ordered_contigs.RData")
\name{data.ecpe} \alias{data.ecpe} \docType{data} \title{ Dataset ECPE } \description{ ECPE dataset from the Templin and Hoffman (2013) tutorial of specifying cognitive diagnostic models in Mplus. } \usage{ data(data.ecpe) } \format{ The format of the data is a list containing the dichotomous item response data \code{data} (2922 persons at 28 items) and the Q-matrix \code{q.matrix} (28 items and 3 skills): \code{List of 2} \cr \code{ $ data :'data.frame':} \cr \code{ ..$ id : int [1:2922] 1 2 3 4 5 6 7 8 9 10 ...} \cr \code{ ..$ E1 : int [1:2922] 1 1 1 1 1 1 1 0 1 1 ...} \cr \code{ ..$ E2 : int [1:2922] 1 1 1 1 1 1 1 1 1 1 ...} \cr \code{ ..$ E3 : int [1:2922] 1 1 1 1 1 1 1 1 1 1 ...} \cr \code{ ..$ E4 : int [1:2922] 0 1 1 1 1 1 1 1 1 1 ...} \cr \code{ [...] } \cr \code{ ..$ E27: int [1:2922] 1 1 1 1 1 1 1 0 1 1 ...} \cr \code{ ..$ E28: int [1:2922] 1 1 1 1 1 1 1 1 1 1 ...} \cr \code{ $ q.matrix:'data.frame':} \cr \code{ ..$ skill1: int [1:28] 1 0 1 0 0 0 1 0 0 1 ...} \cr \code{ ..$ skill2: int [1:28] 1 1 0 0 0 0 0 1 0 0 ...} \cr \code{ ..$ skill3: int [1:28] 0 0 1 1 1 1 1 0 1 0 ...} \cr The skills are \code{skill1}: Morphosyntactic rules \code{skill2}: Cohesive rules \code{skill3}: Lexical rules. } %\details{ %% ~~ If necessary, more details than the __description__ above ~~ %} \source{ The dataset is used in Templin and Hoffman (2013) and Templin and Bradshaw (2014). The dataset was downloaded from \url{http://psych.unl.edu/jtemplin/teaching/dcm/dcm12ncme/}. } \references{ Templin, J., & Bradshaw, L. (2014). Hierarchical diagnostic classification models: A family of models for estimating and testing attribute hierarchies. \emph{Psychometrika}, \bold{79}, 317-339. Templin, J., & Hoffman, L. (2013). Obtaining diagnostic classification model estimates using Mplus. \emph{Educational Measurement: Issues and Practice}, \bold{32}, 37-50. } \section{Example Index}{ \code{\link{personfit.appropriateness}} (Example 1), \code{\link{sim.din}} (Example 3), \code{\link{sim.gdina}} (Examples 2, 3) } \examples{ \dontrun{ data(data.ecpe) #*** Model 1: LCDM model mod1 <- CDM::gdina( data.ecpe$data[,-1], q.matrix= data.ecpe$q.matrix , link="logit") summary(mod1) #*** Model 2: DINA model mod2 <- CDM::gdina( data.ecpe$data[,-1], q.matrix= data.ecpe$q.matrix , rule="DINA") summary(mod2) # Model comparison using likelihood ratio test anova(mod1,mod2) ## Model loglike Deviance Npars AIC BIC Chisq df p ## 2 Model 2 -42841.61 85683.23 63 85809.23 86185.97 206.0359 18 0 ## 1 Model 1 -42738.60 85477.19 81 85639.19 86123.57 NA NA NA #*** Model 3: Hierarchical LCDM (HLCDM) | Templin and Bradshaw (2014) # Testing a linear hierarchy hier <- "skill3 > skill2 > skill1" skill.names <- colnames( data.ecpe$q.matrix ) # define skill space with hierarchy skillspace <- CDM::skillspace.hierarchy( hier , skill.names= skill.names ) skillspace$skillspace.reduced ## skill1 skill2 skill3 ## A000 0 0 0 ## A001 0 0 1 ## A011 0 1 1 ## A111 1 1 1 zeroprob.skillclasses <- skillspace$zeroprob.skillclasses # define user-defined parameters in LCDM: hierarchical LCDM (HLCDM) Mj.user <- mod1$Mj # select items with require two attributes items <- which( rowSums( data.ecpe$q.matrix ) > 1 ) # modify design matrix for item parameters for (ii in items){ m1 <- Mj.user[[ii]] Mj.user[[ii]][[1]] <- (m1[[1]])[,-2] Mj.user[[ii]][[2]] <- (m1[[2]])[-2] } # estimate model # note that avoid.zeroprobs is set to TRUE to avoid algorithmic instabilities mod3 <- CDM::gdina( data.ecpe$data[,-1] , q.matrix= data.ecpe$q.matrix , link="logit" , zeroprob.skillclasses=zeroprob.skillclasses , Mj=Mj.user , avoid.zeroprobs=TRUE ) summary(mod3) } } \keyword{datasets}
/man/data.ecpe.Rd
no_license
parksejin/CDM
R
false
false
4,011
rd
\name{data.ecpe} \alias{data.ecpe} \docType{data} \title{ Dataset ECPE } \description{ ECPE dataset from the Templin and Hoffman (2013) tutorial of specifying cognitive diagnostic models in Mplus. } \usage{ data(data.ecpe) } \format{ The format of the data is a list containing the dichotomous item response data \code{data} (2922 persons at 28 items) and the Q-matrix \code{q.matrix} (28 items and 3 skills): \code{List of 2} \cr \code{ $ data :'data.frame':} \cr \code{ ..$ id : int [1:2922] 1 2 3 4 5 6 7 8 9 10 ...} \cr \code{ ..$ E1 : int [1:2922] 1 1 1 1 1 1 1 0 1 1 ...} \cr \code{ ..$ E2 : int [1:2922] 1 1 1 1 1 1 1 1 1 1 ...} \cr \code{ ..$ E3 : int [1:2922] 1 1 1 1 1 1 1 1 1 1 ...} \cr \code{ ..$ E4 : int [1:2922] 0 1 1 1 1 1 1 1 1 1 ...} \cr \code{ [...] } \cr \code{ ..$ E27: int [1:2922] 1 1 1 1 1 1 1 0 1 1 ...} \cr \code{ ..$ E28: int [1:2922] 1 1 1 1 1 1 1 1 1 1 ...} \cr \code{ $ q.matrix:'data.frame':} \cr \code{ ..$ skill1: int [1:28] 1 0 1 0 0 0 1 0 0 1 ...} \cr \code{ ..$ skill2: int [1:28] 1 1 0 0 0 0 0 1 0 0 ...} \cr \code{ ..$ skill3: int [1:28] 0 0 1 1 1 1 1 0 1 0 ...} \cr The skills are \code{skill1}: Morphosyntactic rules \code{skill2}: Cohesive rules \code{skill3}: Lexical rules. } %\details{ %% ~~ If necessary, more details than the __description__ above ~~ %} \source{ The dataset is used in Templin and Hoffman (2013) and Templin and Bradshaw (2014). The dataset was downloaded from \url{http://psych.unl.edu/jtemplin/teaching/dcm/dcm12ncme/}. } \references{ Templin, J., & Bradshaw, L. (2014). Hierarchical diagnostic classification models: A family of models for estimating and testing attribute hierarchies. \emph{Psychometrika}, \bold{79}, 317-339. Templin, J., & Hoffman, L. (2013). Obtaining diagnostic classification model estimates using Mplus. \emph{Educational Measurement: Issues and Practice}, \bold{32}, 37-50. } \section{Example Index}{ \code{\link{personfit.appropriateness}} (Example 1), \code{\link{sim.din}} (Example 3), \code{\link{sim.gdina}} (Examples 2, 3) } \examples{ \dontrun{ data(data.ecpe) #*** Model 1: LCDM model mod1 <- CDM::gdina( data.ecpe$data[,-1], q.matrix= data.ecpe$q.matrix , link="logit") summary(mod1) #*** Model 2: DINA model mod2 <- CDM::gdina( data.ecpe$data[,-1], q.matrix= data.ecpe$q.matrix , rule="DINA") summary(mod2) # Model comparison using likelihood ratio test anova(mod1,mod2) ## Model loglike Deviance Npars AIC BIC Chisq df p ## 2 Model 2 -42841.61 85683.23 63 85809.23 86185.97 206.0359 18 0 ## 1 Model 1 -42738.60 85477.19 81 85639.19 86123.57 NA NA NA #*** Model 3: Hierarchical LCDM (HLCDM) | Templin and Bradshaw (2014) # Testing a linear hierarchy hier <- "skill3 > skill2 > skill1" skill.names <- colnames( data.ecpe$q.matrix ) # define skill space with hierarchy skillspace <- CDM::skillspace.hierarchy( hier , skill.names= skill.names ) skillspace$skillspace.reduced ## skill1 skill2 skill3 ## A000 0 0 0 ## A001 0 0 1 ## A011 0 1 1 ## A111 1 1 1 zeroprob.skillclasses <- skillspace$zeroprob.skillclasses # define user-defined parameters in LCDM: hierarchical LCDM (HLCDM) Mj.user <- mod1$Mj # select items with require two attributes items <- which( rowSums( data.ecpe$q.matrix ) > 1 ) # modify design matrix for item parameters for (ii in items){ m1 <- Mj.user[[ii]] Mj.user[[ii]][[1]] <- (m1[[1]])[,-2] Mj.user[[ii]][[2]] <- (m1[[2]])[-2] } # estimate model # note that avoid.zeroprobs is set to TRUE to avoid algorithmic instabilities mod3 <- CDM::gdina( data.ecpe$data[,-1] , q.matrix= data.ecpe$q.matrix , link="logit" , zeroprob.skillclasses=zeroprob.skillclasses , Mj=Mj.user , avoid.zeroprobs=TRUE ) summary(mod3) } } \keyword{datasets}
#Case Study #Below packages need to be installed (if not available, a one time activity) # install.packages(pkgs="plyr") # install.packages(pkgs="dplyr") # install.packages(pkgs="stringr") # install.packages(pkgs="tidyr") library(plyr) library(dplyr) library(stringr) library(tidyr) ########################################## #######Checkpoint 1 Start################# ########################################## #Clear the Environment - To avoid any testing issues. rm(list = ls()) # Set working directory - physical location to read and write files from setwd("C:/pgdds/Course 1/Project") # Check if working directory is set getwd() # Read companies and rounds2 files into data frames #quote = "" ignores all quotes - this is required in this dataset, since # row number 114850 has quotes and we have to ignore quotes in both the # files for comparison - /ORGANIZATION/ZWAYO-"ON-DEMAND-VALET-PARKING" companies <- read.table( file = "companies.txt", sep = "\t", header = TRUE, fill = TRUE, comment.char = "", stringsAsFactors = FALSE, quote = "" ) rounds2 <- read.csv(file = "rounds2.csv", stringsAsFactors = FALSE, quote = "") # Check structure of the 2 dataframes str(companies) str(rounds2) #Case Study questions #1.1- Data cleaning #How many unique companies are present in rounds2? # Answer - 66368 # tolower is used to make the permalink case # insensitive (as in the files provided, the casse does not match) rounds2$company_permalink <- tolower(rounds2$company_permalink) companies$permalink <- tolower(companies$permalink) length(unique(rounds2$company_permalink)) #1.2 How many unique companies are present in companies? # Answer - 66368 length(unique(companies$permalink)) #1.3 - In the companies data frame, which column can be used as the unique key for each company? Write the name of the column. companies$permalink #1.4 - Are there any companies in the rounds2 file which are not present in companies? # Answer yes or no: Y/N # (Answer is NO) # We can achieve this through merging the 2 files (Left outer - Taking all rows # from Rounds2) and then checking for "NA" # values in the columns populated # from Companies dataframe. #all.x ensures all rows of rounds2 are present even if there is no matching #permalink in companies - like a left outer join of sql #by.x and by.y are used to match the columns (since the names are different, # this is required) master_frame <- merge(rounds2, companies, by.x = "company_permalink", by.y = "permalink", all.x = TRUE) #The below command will identify all the "NA" - i.e., rows not present #in companies file that are present in Rounds2 - Answer to question 4 in 1.1 # Answer is NO which(is.na(master_frame$name) == "TRUE") #1.5 Merge the two data frames so that all variables (columns) in the companies #frame are added to the rounds2 data frame. Name the merged frame master_frame. #How many observations are present in master_frame? #Data is merged as part of Step 4 above. #Total observations in master_frame is same as rounds2 (as we did a left outer) length(master_frame$company_permalink) #Distinct companies length(unique(master_frame$company_permalink)) ########################################## #########Checkpoint 1 End################# ########################################## ########################################## #######Checkpoint 2 Start################# ########################################## #2.1 #1 Average funding of each type master_frame_rollup_raised_amt <- setNames( aggregate( master_frame$raised_amount_usd, by = list(master_frame$funding_round_type), FUN = mean, na.rm = "TRUE" ), c("Funding_Round_Type", "Raised_Amount_USD") ) # Filter on the 4 investement types (as asked in checkpoint 2) # Calculate the average investment amount for each of the four funding types # (venture, angel, seed, and private equity) and report the answers in Table 2.1 master_frame_funding_type <- subset( master_frame_rollup_raised_amt, master_frame_rollup_raised_amt$Funding_Round_Type == "venture" | master_frame_rollup_raised_amt$Funding_Round_Type == "seed" | master_frame_rollup_raised_amt$Funding_Round_Type == "angel" | master_frame_rollup_raised_amt$Funding_Round_Type == "private_equity" ) # 2.2 #Based on the average investment amount calculated above, # which investment type do you think is the most suitable for Spark Funds? filter(master_frame_funding_type, Raised_Amount_USD >= 5000000 & Raised_Amount_USD <= 15000000) # ANSWER - "VENTURE" , as its the only one between 5 and 15 M USD. ########################################## #######Checkpoint 2 End################### ########################################## ########################################## ########Checkpoint 3 Start################ ########################################## # 3.1 Top nine countries which have received the highest total # funding (across ALL sectors for the chosen investment type) #Populate Chosen_type variable with funding type and raised amount #This code is re-usable for other investment types, just replace the "venture" # to some other type as needed for future use. Chosen_type <- subset( master_frame_funding_type, master_frame_funding_type$Funding_Round_Type == "venture" ) #Next step is to subset the master_frame on funding type (in this case "venture") venture_records <- subset(master_frame, master_frame$funding_round_type == as.character(Chosen_type[1])) #Next step is to aggregate the venture funding amounts by country venture_records_by_country <- setNames( aggregate( venture_records$raised_amount_usd, by = list(venture_records$country_code), FUN = sum, na.rm = TRUE ), c("Country_Code", "Raised_Amount_USD") ) #Next step is to remove records that have blank country_type venture_records_by_country_non_blanks <- subset(venture_records_by_country, venture_records_by_country$Country_Code != "") #We use arrange function from plyr package, as per CRAN community, this seems to # be the fastest way to sort a data.frame #Sort the venture funding amounts in descending order by country venture_records_by_country_non_blanks_desc_amt <- arrange(venture_records_by_country_non_blanks, desc(Raised_Amount_USD)) #Populate top9 with the top 9 countries (first goal of the analysis) top9 <- head(venture_records_by_country_non_blanks_desc_amt, n = 9) #Next step is to identify the top 3 english speaking countries # (second goal of the analysis) #From the link http://www.emmir.org/fileadmin/user_upload/admission/Countries_where_English_is_an_official_language.pdf # We can see that usa, united kingdom and india are the top 3 countries #USA - y #CHN - n #GBR - y #IND - y #CAN - y #FRA - n #ISR - n #DEU - n #JPN - n #Create a dataframe for english speaking countries country_name <- c("USA","CHN","GBR","IND","CAN","FRA","ISR","DEU","JPN") eng_countries <- c("y", "n", "y", "y","y","y","n","n","n") eng_speaking <- data.frame(country_name, eng_countries) #Merge the top9 countries (With highest venture funding) # with eng_speaking data frame # Countries are english speaking or not based on eng_speaking$eng_countries merged_countries <- merge(top9, eng_speaking, by.x = "Country_Code", by.y = "country_name") #Filter english speaking countries alone eng_speaking_countries <- subset(merged_countries, merged_countries$eng_countries == "y") #Sort the output (Descending order of Venture funding by country and take top 3) eng_speaking_countries_sorted <- head(arrange(eng_speaking_countries, desc(eng_speaking_countries$Raised_Amount_USD)),n=3) #output of eng_speaking_countries_sorted #Country_Code Raised_Amount_USD eng_countries #1 USA 422510842796 y #2 GBR 20245627416 y #3 IND 14391858718 y ########################################## #######Checkpoint 3 End################### ########################################## ########################################## ########Checkpoint 4 Start################ ########################################## #4.1. Extract the primary sector of each category list from the category_list column #We use the str_split_fixed function from stringr package to split # the category_list into primary category (keep the first category) # Since pipe - | is a special character, it needs to be escaped # The primary category is added as the 16th column in master_frame master_frame$primary_category <- str_split_fixed(master_frame$category_list, "\\|", 3)[, 1] #2 2. Use the mapping file 'mapping.csv' to map each primary sector to #one of the eight main sectors (Note that 'Others' is also considered # one of the main sectors) #Step 1 - load the mapping file to a data.frame #check.names=FALSE is required since there are special characters like # comma (,), Ampersand (&), Slash (/) etc. in the csv file mapping <- read.csv( file = "mapping.csv", stringsAsFactors = FALSE, sep = ",", check.names = FALSE ) # The mapping.csv file provided has an Issue, the letters "na" # have been replaced by "0" for some reason in the first column (category_list). # These need to be corrected. Except for "Enterprise 2.0" # https://learn.upgrad.com/v/course/113/question/57073 # to convert Strings like "A0lytics" to "Analytics" mapping$category_list <- str_replace_all(mapping$category_list, "0", "na") #to ignore Enterprise 2.0 mapping$category_list <- str_replace_all(mapping$category_list, "\\.na", ".0") #Convert Categories with first 2 characters as "na" to "Na" - sentence case mapping$category_list <- str_replace_all(mapping$category_list, "^na", "Na") #Add sector names in mapping file as a column # Another way to achieve this is by using "gather" function (needs tidyr package) # mapping_new <- gather(data = mapping, key = sector, value = value, "Automotive & Sports":"Social, Finance, Analytics, Advertising") # mapping_new <- subset(mapping_new,mapping_new$value == "1") #'above step can also be done using - > mapping_new <- mapping_new[!(mapping_new$value == 0),] # mapping_new[,3] <- NULL #'above step can also be done using - > mapping_new <- mapping_new[, -3] mapping$sector_names <- names(mapping)[-1][apply(mapping[2:10], 1, function(x) which(x == "1"))] mapping[, 2:10] <- NULL #Removing wide columns from mapping.csv as they are not reqd anymore #convert case on category in both master_Frame and mapping dataframe master_frame$primary_category <- tolower(master_frame$primary_category) mapping$category_list <- tolower(mapping$category_list) #Merge with master_frame on primary_Category to get an additional column on sector # in master_frame master_frame2 <- merge( master_frame, mapping, by.x = "primary_category", by.y = "category_list", all.x = TRUE ) ########################################## #######Checkpoint 4 End################### ########################################## ########################################## ########Checkpoint 5 Start################ ########################################## #Data extracted till now - # Dataframe with companys main sector mapped is present inm"aster_frame2" # Top 3 english (usa/gbr/ind) speaking countries is present in "eng_speaking_countries_sorted" # Funding type - Venture #As part of checkpoint 5, Now, the aim is to find out the most #heavily invested main sectors in each of the three countries #(for funding type FT and investments range of 5-15 M USD). # Create three separate data frames D1, D2 and D3 for each #of the three countries containing the observations of funding #type <FT> falling within the 5-15 million USD range. The #three data frames should contain: #. All the columns of the master_frame along with the primary sector and the main sector #. The total number (or count) of investments for each main sector in a separate column #. The total amount invested in each main sector in a separate column #Store funding type in FT variable (in this case it would be "venture") FT <- master_frame_funding_type[which( master_frame_funding_type$Raised_Amount_USD >= 5000000 & master_frame_funding_type$Raised_Amount_USD <= 15000000 ),][1, 1] # Filter master_frame2(with primary sector info) into only FT type # and store in master_frame3. Also filter only those records that # have each round within 5 to 15 million USD (As Spark funds is only # concerned with funding rounds within that range). master_frame3 <- filter(master_frame2, funding_round_type == FT & raised_amount_usd >= 5000000 & raised_amount_usd <= 15000000 ) #Create D1, D2 and D3 data frames dynamically and subset #the master_frame3 into D1, D2 and D3 depending on top 3 #english speaking countries (using dataframe eng_speaking_countries_sorted) # All this is done thru loop to scale in future (suppose analysis.. # ..needs to be done for top 5 countries etc., it can be easily done #Similar code can be used at other places (written as for loop to make it clear) for (i in 1:nrow(eng_speaking_countries_sorted)) { nam <- paste("D", i, sep = "") df <- data.frame( subset( master_frame3, master_frame3$country_code == eng_speaking_countries_sorted[i, 1] ), stringsAsFactors = FALSE ) assign(nam, df) } #The following 2 steps remain - #. The total number (or count) of investments for each main sector # in a separate column #. The total amount invested in each main sector in a separate column #!!!There could be categories in master_frame # which have no mapping in mapping.csv # Replacing them with Blanks (As of now in the current D1, D2, D3, # only D1 has 1 record for which sector_name is blanks) # Category - biotechnology and semiconductor, Name - HealthTell D1[which(is.na(D1$sector_names)),1] <- "Blanks" D2[which(is.na(D2$sector_names)),1] <- "Blanks" D3[which(is.na(D3$sector_names)),1] <- "Blanks" # let's create new Dataframes for each of the 3 dataframes to store # aggregate by sectors. Later we can merge it to D1, D2, D3 D1_group_by_sector <- setNames( aggregate( D1$raised_amount_usd, by = list(D1$sector_names), FUN = sum, na.rm = "TRUE" ), c("sector_names", "Aggregate_USD") ) D1_count_by_sector <- setNames(data.frame(table(D1$sector_names)), c("sector_names", "count_of_inv")) #If this needs to reflect in the D1 dataframe, we can merge on Main_Sector D1 <- merge(D1, D1_group_by_sector, by = "sector_names", all.x = TRUE) D1 <- merge(D1, D1_count_by_sector, by = "sector_names", all.x = TRUE) #Similarly for D2 and D3 D2_group_by_sector <- setNames( aggregate( D2$raised_amount_usd, by = list(D2$sector_names), FUN = sum, na.rm = "TRUE" ), c("sector_names", "Aggregate_USD") ) D2_count_by_sector <- setNames(data.frame(table(D2$sector_names)), c("sector_names", "count_of_inv")) D2 <- merge(D2, D2_group_by_sector, by = "sector_names", all.x = TRUE) D2 <- merge(D2, D2_count_by_sector, by = "sector_names", all.x = TRUE) D3_group_by_sector <- setNames( aggregate( D3$raised_amount_usd, by = list(D3$sector_names), FUN = sum, na.rm = "TRUE" ), c("sector_names", "Aggregate_USD") ) D3_count_by_sector <- setNames(data.frame(table(D3$sector_names)), c("sector_names", "count_of_inv")) D3 <- merge(D3, D3_group_by_sector, by = "sector_names", all.x = TRUE) D3 <- merge(D3, D3_count_by_sector, by = "sector_names", all.x = TRUE) #D1,D2,D3 are the 3 dataframes for each of the top 3 # english speaking countries for FT ("Venture") type. # They contain all the columns of master_frame # + the primary category # + the main sector # + the total count of investments for each main sector in a separate column # + the total amount invested in each main sector in a separate column # Checkpoint 5 Answers ----------------------- #5.1 Total number of Investments (count) sum(D1_count_by_sector$count_of_inv) sum(D2_count_by_sector$count_of_inv) sum(D3_count_by_sector$count_of_inv) #5.2 Total amount of investment (USD) sum(D1_group_by_sector$Aggregate_USD) sum(D2_group_by_sector$Aggregate_USD) sum(D3_group_by_sector$Aggregate_USD) #5.3 Top Sector name (no. of investment-wise) #5.6 Number of investments in top sector (3) top_D1_sector_nbr <- arrange(D1_count_by_sector,desc(count_of_inv))[1,] top_D2_sector_nbr <- arrange(D2_count_by_sector,desc(count_of_inv))[1,] top_D3_sector_nbr <- arrange(D3_count_by_sector,desc(count_of_inv))[1,] #5.4 Second Sector name (no. of investment-wise) #5.7 Number of investments in second sector (4) second_D1_sector_nbr <- arrange(D1_count_by_sector,desc(count_of_inv))[2,] second_D2_sector_nbr <- arrange(D2_count_by_sector,desc(count_of_inv))[2,] second_D3_sector_nbr <- arrange(D3_count_by_sector,desc(count_of_inv))[2,] #5.5 Third Sector name (no. of investment-wise) #5.8 Number of investments in third sector (5) third_D1_sector_nbr <- arrange(D1_count_by_sector,desc(count_of_inv))[3,] third_D2_sector_nbr <- arrange(D2_count_by_sector,desc(count_of_inv))[3,] third_D3_sector_nbr <- arrange(D3_count_by_sector,desc(count_of_inv))[3,] #5.9 For point 3 (top sector count-wise), which company received the highest investment? #Subset D1 to keep only top sector rows. D1_subset_top_sector = filter(D1,sector_names == top_D1_sector_nbr[1,1]) #Aggregate by company permalink for top sector D1_funding_by_company <- setNames(aggregate(D1_subset_top_sector$raised_amount_usd,by= list(D1_subset_top_sector$company_permalink),FUN=sum,na.rm=TRUE),c("Permalink","Total_inv")) #Sort by descending total investment in a company D1_top_funded_amount <- arrange(D1_funding_by_company,desc(Total_inv)) #Reason for filtering like this is , there may be more than 1 company, so we cant just sort desc and pick 1 D1_top_companies <- filter(D1_funding_by_company,Total_inv == D1_top_funded_amount[1,2]) D1_top_company_name <- filter(D1,D1$company_permalink == D1_top_companies$Permalink)[1,"name"] #similarly for d2 and d3.... #Subset D2 to keep only top sector rows. D2_subset_top_sector = filter(D2,sector_names == top_D2_sector_nbr[1,1]) #Aggregate by company permalink for top sector D2_funding_by_company <- setNames(aggregate(D2_subset_top_sector$raised_amount_usd,by= list(D2_subset_top_sector$company_permalink),FUN=sum,na.rm=TRUE),c("Permalink","Total_inv")) #Sort by descending total investment in a company D2_top_funded_amount <- arrange(D2_funding_by_company,desc(Total_inv)) #Reason for filtering like this is , there may be more than 1 company, so we cant just sort desc and pick 1 D2_top_companies <- filter(D2_funding_by_company,Total_inv == D2_top_funded_amount[1,2]) D2_top_company_name <- filter(D2,D2$company_permalink == D2_top_companies$Permalink)[1,"name"] #and D3... #Subset D3 to keep only top sector rows. D3_subset_top_sector = filter(D3,sector_names == top_D3_sector_nbr[1,1]) #Aggregate by company permalink for top sector D3_funding_by_company <- setNames(aggregate(D3_subset_top_sector$raised_amount_usd,by= list(D3_subset_top_sector$company_permalink),FUN=sum,na.rm=TRUE),c("Permalink","Total_inv")) #Sort by descending total investment in a company D3_top_funded_amount <- arrange(D3_funding_by_company,desc(Total_inv)) #Reason for filtering like this is , there may be more than 1 company, so we cant just sort desc and pick 1 D3_top_companies <- filter(D3_funding_by_company,Total_inv == D3_top_funded_amount[1,2]) D3_top_company_name <- filter(D3,D3$company_permalink == D3_top_companies$Permalink)[1,"name"] #5.10 For point 4 (second best sector count-wise), which company received the highest investment? #Subset D1 to keep only second sector rows. D1_subset_second_sector = subset(D1,D1$sector_names == second_D1_sector_nbr[1,1]) #Aggregate by company permalink for second sector D1_funding_by_company <- setNames(aggregate(D1_subset_second_sector$raised_amount_usd,by= list(D1_subset_second_sector$company_permalink),FUN=sum,na.rm=TRUE),c("Permalink","Total_inv")) #Sort by descending total investment in a company D1_second_funded_amount <- arrange(D1_funding_by_company,desc(Total_inv)) #Reason for filtering like this is , there may be more than 1 company, so we cant just sort desc and pick 1 D1_second_companies <- filter(D1_funding_by_company,Total_inv == D1_second_funded_amount[1,2]) D1_second_company_name <- filter(D1,D1$company_permalink == D1_second_companies$Permalink)[1,"name"] #Subset D2 to keep only second sector rows. D2_subset_second_sector = subset(D2,D2$sector_names == second_D2_sector_nbr[1,1]) #Aggregate by company permalink for second sector D2_funding_by_company <- setNames(aggregate(D2_subset_second_sector$raised_amount_usd,by= list(D2_subset_second_sector$company_permalink),FUN=sum,na.rm=TRUE),c("Permalink","Total_inv")) #Sort by descending total investment in a company D2_second_funded_amount <- arrange(D2_funding_by_company,desc(Total_inv)) #Reason for filtering like this is , there may be more than 1 company, so we cant just sort desc and pick 1 D2_second_companies <- filter(D2_funding_by_company,Total_inv == D2_second_funded_amount[1,2]) D2_second_company_name <- filter(D2,D2$company_permalink == D2_second_companies$Permalink)[1,"name"] #Subset D3 to keep only second sector rows. D3_subset_second_sector = subset(D3,D3$sector_names == second_D3_sector_nbr[1,1]) #Aggregate by company permalink for second sector D3_funding_by_company <- setNames(aggregate(D3_subset_second_sector$raised_amount_usd,by= list(D3_subset_second_sector$company_permalink),FUN=sum,na.rm=TRUE),c("Permalink","Total_inv")) #Sort by descending total investment in a company D3_second_funded_amount <- arrange(D3_funding_by_company,desc(Total_inv)) #Reason for filtering like this is , there may be more than 1 company, so we cant just sort desc and pick 1 D3_second_companies <- filter(D3_funding_by_company,Total_inv == D3_second_funded_amount[1,2]) D3_second_company_name <- filter(D3,D3$company_permalink == D3_second_companies$Permalink)[1,"name"] ########################################## #######Checkpoint 5 End################### ########################################## ########################################## ########Checkpoint 6 Start################ ########################################## # This checkpoint is on Tableau ########################################## #######Checkpoint 6 End################### ########################################## #-----***Write Unit Tests***--------- # These tests are written by analysing the data of companies # and rounds2 using excel and then populating certain constants. # These constants are then compared with the output of the R code # above. If they match, then "SUCCESS" else "FAILURE" # These kind of tests ensure that any code change above (say done # for performance or formatting etc.) doesnt change the expected # results. # Chose type is venture (As per analysis done on xls) # For Venture funding type # IND total 2976543602 # GBR total 5436843539 # USA total 108531347515 # l = ls() if (sum(D1$raised_amount_usd, na.rm = TRUE) == 108531347515 & sum(D2$raised_amount_usd, na.rm = TRUE) == 5436843539 & sum(D3$raised_amount_usd, na.rm = TRUE) == 2976543602 & FT == "venture" & eng_speaking_countries_sorted[1,1] == "USA" & eng_speaking_countries_sorted[2,1] == "GBR" & eng_speaking_countries_sorted[3,1] == "IND") { cat('Code looks fine') } else { cat('!!!****ISSUE****!!!') }
/Case_study_scripts.R
no_license
MonicaAlluri/Case_Study_R_Tableau
R
false
false
23,903
r
#Case Study #Below packages need to be installed (if not available, a one time activity) # install.packages(pkgs="plyr") # install.packages(pkgs="dplyr") # install.packages(pkgs="stringr") # install.packages(pkgs="tidyr") library(plyr) library(dplyr) library(stringr) library(tidyr) ########################################## #######Checkpoint 1 Start################# ########################################## #Clear the Environment - To avoid any testing issues. rm(list = ls()) # Set working directory - physical location to read and write files from setwd("C:/pgdds/Course 1/Project") # Check if working directory is set getwd() # Read companies and rounds2 files into data frames #quote = "" ignores all quotes - this is required in this dataset, since # row number 114850 has quotes and we have to ignore quotes in both the # files for comparison - /ORGANIZATION/ZWAYO-"ON-DEMAND-VALET-PARKING" companies <- read.table( file = "companies.txt", sep = "\t", header = TRUE, fill = TRUE, comment.char = "", stringsAsFactors = FALSE, quote = "" ) rounds2 <- read.csv(file = "rounds2.csv", stringsAsFactors = FALSE, quote = "") # Check structure of the 2 dataframes str(companies) str(rounds2) #Case Study questions #1.1- Data cleaning #How many unique companies are present in rounds2? # Answer - 66368 # tolower is used to make the permalink case # insensitive (as in the files provided, the casse does not match) rounds2$company_permalink <- tolower(rounds2$company_permalink) companies$permalink <- tolower(companies$permalink) length(unique(rounds2$company_permalink)) #1.2 How many unique companies are present in companies? # Answer - 66368 length(unique(companies$permalink)) #1.3 - In the companies data frame, which column can be used as the unique key for each company? Write the name of the column. companies$permalink #1.4 - Are there any companies in the rounds2 file which are not present in companies? # Answer yes or no: Y/N # (Answer is NO) # We can achieve this through merging the 2 files (Left outer - Taking all rows # from Rounds2) and then checking for "NA" # values in the columns populated # from Companies dataframe. #all.x ensures all rows of rounds2 are present even if there is no matching #permalink in companies - like a left outer join of sql #by.x and by.y are used to match the columns (since the names are different, # this is required) master_frame <- merge(rounds2, companies, by.x = "company_permalink", by.y = "permalink", all.x = TRUE) #The below command will identify all the "NA" - i.e., rows not present #in companies file that are present in Rounds2 - Answer to question 4 in 1.1 # Answer is NO which(is.na(master_frame$name) == "TRUE") #1.5 Merge the two data frames so that all variables (columns) in the companies #frame are added to the rounds2 data frame. Name the merged frame master_frame. #How many observations are present in master_frame? #Data is merged as part of Step 4 above. #Total observations in master_frame is same as rounds2 (as we did a left outer) length(master_frame$company_permalink) #Distinct companies length(unique(master_frame$company_permalink)) ########################################## #########Checkpoint 1 End################# ########################################## ########################################## #######Checkpoint 2 Start################# ########################################## #2.1 #1 Average funding of each type master_frame_rollup_raised_amt <- setNames( aggregate( master_frame$raised_amount_usd, by = list(master_frame$funding_round_type), FUN = mean, na.rm = "TRUE" ), c("Funding_Round_Type", "Raised_Amount_USD") ) # Filter on the 4 investement types (as asked in checkpoint 2) # Calculate the average investment amount for each of the four funding types # (venture, angel, seed, and private equity) and report the answers in Table 2.1 master_frame_funding_type <- subset( master_frame_rollup_raised_amt, master_frame_rollup_raised_amt$Funding_Round_Type == "venture" | master_frame_rollup_raised_amt$Funding_Round_Type == "seed" | master_frame_rollup_raised_amt$Funding_Round_Type == "angel" | master_frame_rollup_raised_amt$Funding_Round_Type == "private_equity" ) # 2.2 #Based on the average investment amount calculated above, # which investment type do you think is the most suitable for Spark Funds? filter(master_frame_funding_type, Raised_Amount_USD >= 5000000 & Raised_Amount_USD <= 15000000) # ANSWER - "VENTURE" , as its the only one between 5 and 15 M USD. ########################################## #######Checkpoint 2 End################### ########################################## ########################################## ########Checkpoint 3 Start################ ########################################## # 3.1 Top nine countries which have received the highest total # funding (across ALL sectors for the chosen investment type) #Populate Chosen_type variable with funding type and raised amount #This code is re-usable for other investment types, just replace the "venture" # to some other type as needed for future use. Chosen_type <- subset( master_frame_funding_type, master_frame_funding_type$Funding_Round_Type == "venture" ) #Next step is to subset the master_frame on funding type (in this case "venture") venture_records <- subset(master_frame, master_frame$funding_round_type == as.character(Chosen_type[1])) #Next step is to aggregate the venture funding amounts by country venture_records_by_country <- setNames( aggregate( venture_records$raised_amount_usd, by = list(venture_records$country_code), FUN = sum, na.rm = TRUE ), c("Country_Code", "Raised_Amount_USD") ) #Next step is to remove records that have blank country_type venture_records_by_country_non_blanks <- subset(venture_records_by_country, venture_records_by_country$Country_Code != "") #We use arrange function from plyr package, as per CRAN community, this seems to # be the fastest way to sort a data.frame #Sort the venture funding amounts in descending order by country venture_records_by_country_non_blanks_desc_amt <- arrange(venture_records_by_country_non_blanks, desc(Raised_Amount_USD)) #Populate top9 with the top 9 countries (first goal of the analysis) top9 <- head(venture_records_by_country_non_blanks_desc_amt, n = 9) #Next step is to identify the top 3 english speaking countries # (second goal of the analysis) #From the link http://www.emmir.org/fileadmin/user_upload/admission/Countries_where_English_is_an_official_language.pdf # We can see that usa, united kingdom and india are the top 3 countries #USA - y #CHN - n #GBR - y #IND - y #CAN - y #FRA - n #ISR - n #DEU - n #JPN - n #Create a dataframe for english speaking countries country_name <- c("USA","CHN","GBR","IND","CAN","FRA","ISR","DEU","JPN") eng_countries <- c("y", "n", "y", "y","y","y","n","n","n") eng_speaking <- data.frame(country_name, eng_countries) #Merge the top9 countries (With highest venture funding) # with eng_speaking data frame # Countries are english speaking or not based on eng_speaking$eng_countries merged_countries <- merge(top9, eng_speaking, by.x = "Country_Code", by.y = "country_name") #Filter english speaking countries alone eng_speaking_countries <- subset(merged_countries, merged_countries$eng_countries == "y") #Sort the output (Descending order of Venture funding by country and take top 3) eng_speaking_countries_sorted <- head(arrange(eng_speaking_countries, desc(eng_speaking_countries$Raised_Amount_USD)),n=3) #output of eng_speaking_countries_sorted #Country_Code Raised_Amount_USD eng_countries #1 USA 422510842796 y #2 GBR 20245627416 y #3 IND 14391858718 y ########################################## #######Checkpoint 3 End################### ########################################## ########################################## ########Checkpoint 4 Start################ ########################################## #4.1. Extract the primary sector of each category list from the category_list column #We use the str_split_fixed function from stringr package to split # the category_list into primary category (keep the first category) # Since pipe - | is a special character, it needs to be escaped # The primary category is added as the 16th column in master_frame master_frame$primary_category <- str_split_fixed(master_frame$category_list, "\\|", 3)[, 1] #2 2. Use the mapping file 'mapping.csv' to map each primary sector to #one of the eight main sectors (Note that 'Others' is also considered # one of the main sectors) #Step 1 - load the mapping file to a data.frame #check.names=FALSE is required since there are special characters like # comma (,), Ampersand (&), Slash (/) etc. in the csv file mapping <- read.csv( file = "mapping.csv", stringsAsFactors = FALSE, sep = ",", check.names = FALSE ) # The mapping.csv file provided has an Issue, the letters "na" # have been replaced by "0" for some reason in the first column (category_list). # These need to be corrected. Except for "Enterprise 2.0" # https://learn.upgrad.com/v/course/113/question/57073 # to convert Strings like "A0lytics" to "Analytics" mapping$category_list <- str_replace_all(mapping$category_list, "0", "na") #to ignore Enterprise 2.0 mapping$category_list <- str_replace_all(mapping$category_list, "\\.na", ".0") #Convert Categories with first 2 characters as "na" to "Na" - sentence case mapping$category_list <- str_replace_all(mapping$category_list, "^na", "Na") #Add sector names in mapping file as a column # Another way to achieve this is by using "gather" function (needs tidyr package) # mapping_new <- gather(data = mapping, key = sector, value = value, "Automotive & Sports":"Social, Finance, Analytics, Advertising") # mapping_new <- subset(mapping_new,mapping_new$value == "1") #'above step can also be done using - > mapping_new <- mapping_new[!(mapping_new$value == 0),] # mapping_new[,3] <- NULL #'above step can also be done using - > mapping_new <- mapping_new[, -3] mapping$sector_names <- names(mapping)[-1][apply(mapping[2:10], 1, function(x) which(x == "1"))] mapping[, 2:10] <- NULL #Removing wide columns from mapping.csv as they are not reqd anymore #convert case on category in both master_Frame and mapping dataframe master_frame$primary_category <- tolower(master_frame$primary_category) mapping$category_list <- tolower(mapping$category_list) #Merge with master_frame on primary_Category to get an additional column on sector # in master_frame master_frame2 <- merge( master_frame, mapping, by.x = "primary_category", by.y = "category_list", all.x = TRUE ) ########################################## #######Checkpoint 4 End################### ########################################## ########################################## ########Checkpoint 5 Start################ ########################################## #Data extracted till now - # Dataframe with companys main sector mapped is present inm"aster_frame2" # Top 3 english (usa/gbr/ind) speaking countries is present in "eng_speaking_countries_sorted" # Funding type - Venture #As part of checkpoint 5, Now, the aim is to find out the most #heavily invested main sectors in each of the three countries #(for funding type FT and investments range of 5-15 M USD). # Create three separate data frames D1, D2 and D3 for each #of the three countries containing the observations of funding #type <FT> falling within the 5-15 million USD range. The #three data frames should contain: #. All the columns of the master_frame along with the primary sector and the main sector #. The total number (or count) of investments for each main sector in a separate column #. The total amount invested in each main sector in a separate column #Store funding type in FT variable (in this case it would be "venture") FT <- master_frame_funding_type[which( master_frame_funding_type$Raised_Amount_USD >= 5000000 & master_frame_funding_type$Raised_Amount_USD <= 15000000 ),][1, 1] # Filter master_frame2(with primary sector info) into only FT type # and store in master_frame3. Also filter only those records that # have each round within 5 to 15 million USD (As Spark funds is only # concerned with funding rounds within that range). master_frame3 <- filter(master_frame2, funding_round_type == FT & raised_amount_usd >= 5000000 & raised_amount_usd <= 15000000 ) #Create D1, D2 and D3 data frames dynamically and subset #the master_frame3 into D1, D2 and D3 depending on top 3 #english speaking countries (using dataframe eng_speaking_countries_sorted) # All this is done thru loop to scale in future (suppose analysis.. # ..needs to be done for top 5 countries etc., it can be easily done #Similar code can be used at other places (written as for loop to make it clear) for (i in 1:nrow(eng_speaking_countries_sorted)) { nam <- paste("D", i, sep = "") df <- data.frame( subset( master_frame3, master_frame3$country_code == eng_speaking_countries_sorted[i, 1] ), stringsAsFactors = FALSE ) assign(nam, df) } #The following 2 steps remain - #. The total number (or count) of investments for each main sector # in a separate column #. The total amount invested in each main sector in a separate column #!!!There could be categories in master_frame # which have no mapping in mapping.csv # Replacing them with Blanks (As of now in the current D1, D2, D3, # only D1 has 1 record for which sector_name is blanks) # Category - biotechnology and semiconductor, Name - HealthTell D1[which(is.na(D1$sector_names)),1] <- "Blanks" D2[which(is.na(D2$sector_names)),1] <- "Blanks" D3[which(is.na(D3$sector_names)),1] <- "Blanks" # let's create new Dataframes for each of the 3 dataframes to store # aggregate by sectors. Later we can merge it to D1, D2, D3 D1_group_by_sector <- setNames( aggregate( D1$raised_amount_usd, by = list(D1$sector_names), FUN = sum, na.rm = "TRUE" ), c("sector_names", "Aggregate_USD") ) D1_count_by_sector <- setNames(data.frame(table(D1$sector_names)), c("sector_names", "count_of_inv")) #If this needs to reflect in the D1 dataframe, we can merge on Main_Sector D1 <- merge(D1, D1_group_by_sector, by = "sector_names", all.x = TRUE) D1 <- merge(D1, D1_count_by_sector, by = "sector_names", all.x = TRUE) #Similarly for D2 and D3 D2_group_by_sector <- setNames( aggregate( D2$raised_amount_usd, by = list(D2$sector_names), FUN = sum, na.rm = "TRUE" ), c("sector_names", "Aggregate_USD") ) D2_count_by_sector <- setNames(data.frame(table(D2$sector_names)), c("sector_names", "count_of_inv")) D2 <- merge(D2, D2_group_by_sector, by = "sector_names", all.x = TRUE) D2 <- merge(D2, D2_count_by_sector, by = "sector_names", all.x = TRUE) D3_group_by_sector <- setNames( aggregate( D3$raised_amount_usd, by = list(D3$sector_names), FUN = sum, na.rm = "TRUE" ), c("sector_names", "Aggregate_USD") ) D3_count_by_sector <- setNames(data.frame(table(D3$sector_names)), c("sector_names", "count_of_inv")) D3 <- merge(D3, D3_group_by_sector, by = "sector_names", all.x = TRUE) D3 <- merge(D3, D3_count_by_sector, by = "sector_names", all.x = TRUE) #D1,D2,D3 are the 3 dataframes for each of the top 3 # english speaking countries for FT ("Venture") type. # They contain all the columns of master_frame # + the primary category # + the main sector # + the total count of investments for each main sector in a separate column # + the total amount invested in each main sector in a separate column # Checkpoint 5 Answers ----------------------- #5.1 Total number of Investments (count) sum(D1_count_by_sector$count_of_inv) sum(D2_count_by_sector$count_of_inv) sum(D3_count_by_sector$count_of_inv) #5.2 Total amount of investment (USD) sum(D1_group_by_sector$Aggregate_USD) sum(D2_group_by_sector$Aggregate_USD) sum(D3_group_by_sector$Aggregate_USD) #5.3 Top Sector name (no. of investment-wise) #5.6 Number of investments in top sector (3) top_D1_sector_nbr <- arrange(D1_count_by_sector,desc(count_of_inv))[1,] top_D2_sector_nbr <- arrange(D2_count_by_sector,desc(count_of_inv))[1,] top_D3_sector_nbr <- arrange(D3_count_by_sector,desc(count_of_inv))[1,] #5.4 Second Sector name (no. of investment-wise) #5.7 Number of investments in second sector (4) second_D1_sector_nbr <- arrange(D1_count_by_sector,desc(count_of_inv))[2,] second_D2_sector_nbr <- arrange(D2_count_by_sector,desc(count_of_inv))[2,] second_D3_sector_nbr <- arrange(D3_count_by_sector,desc(count_of_inv))[2,] #5.5 Third Sector name (no. of investment-wise) #5.8 Number of investments in third sector (5) third_D1_sector_nbr <- arrange(D1_count_by_sector,desc(count_of_inv))[3,] third_D2_sector_nbr <- arrange(D2_count_by_sector,desc(count_of_inv))[3,] third_D3_sector_nbr <- arrange(D3_count_by_sector,desc(count_of_inv))[3,] #5.9 For point 3 (top sector count-wise), which company received the highest investment? #Subset D1 to keep only top sector rows. D1_subset_top_sector = filter(D1,sector_names == top_D1_sector_nbr[1,1]) #Aggregate by company permalink for top sector D1_funding_by_company <- setNames(aggregate(D1_subset_top_sector$raised_amount_usd,by= list(D1_subset_top_sector$company_permalink),FUN=sum,na.rm=TRUE),c("Permalink","Total_inv")) #Sort by descending total investment in a company D1_top_funded_amount <- arrange(D1_funding_by_company,desc(Total_inv)) #Reason for filtering like this is , there may be more than 1 company, so we cant just sort desc and pick 1 D1_top_companies <- filter(D1_funding_by_company,Total_inv == D1_top_funded_amount[1,2]) D1_top_company_name <- filter(D1,D1$company_permalink == D1_top_companies$Permalink)[1,"name"] #similarly for d2 and d3.... #Subset D2 to keep only top sector rows. D2_subset_top_sector = filter(D2,sector_names == top_D2_sector_nbr[1,1]) #Aggregate by company permalink for top sector D2_funding_by_company <- setNames(aggregate(D2_subset_top_sector$raised_amount_usd,by= list(D2_subset_top_sector$company_permalink),FUN=sum,na.rm=TRUE),c("Permalink","Total_inv")) #Sort by descending total investment in a company D2_top_funded_amount <- arrange(D2_funding_by_company,desc(Total_inv)) #Reason for filtering like this is , there may be more than 1 company, so we cant just sort desc and pick 1 D2_top_companies <- filter(D2_funding_by_company,Total_inv == D2_top_funded_amount[1,2]) D2_top_company_name <- filter(D2,D2$company_permalink == D2_top_companies$Permalink)[1,"name"] #and D3... #Subset D3 to keep only top sector rows. D3_subset_top_sector = filter(D3,sector_names == top_D3_sector_nbr[1,1]) #Aggregate by company permalink for top sector D3_funding_by_company <- setNames(aggregate(D3_subset_top_sector$raised_amount_usd,by= list(D3_subset_top_sector$company_permalink),FUN=sum,na.rm=TRUE),c("Permalink","Total_inv")) #Sort by descending total investment in a company D3_top_funded_amount <- arrange(D3_funding_by_company,desc(Total_inv)) #Reason for filtering like this is , there may be more than 1 company, so we cant just sort desc and pick 1 D3_top_companies <- filter(D3_funding_by_company,Total_inv == D3_top_funded_amount[1,2]) D3_top_company_name <- filter(D3,D3$company_permalink == D3_top_companies$Permalink)[1,"name"] #5.10 For point 4 (second best sector count-wise), which company received the highest investment? #Subset D1 to keep only second sector rows. D1_subset_second_sector = subset(D1,D1$sector_names == second_D1_sector_nbr[1,1]) #Aggregate by company permalink for second sector D1_funding_by_company <- setNames(aggregate(D1_subset_second_sector$raised_amount_usd,by= list(D1_subset_second_sector$company_permalink),FUN=sum,na.rm=TRUE),c("Permalink","Total_inv")) #Sort by descending total investment in a company D1_second_funded_amount <- arrange(D1_funding_by_company,desc(Total_inv)) #Reason for filtering like this is , there may be more than 1 company, so we cant just sort desc and pick 1 D1_second_companies <- filter(D1_funding_by_company,Total_inv == D1_second_funded_amount[1,2]) D1_second_company_name <- filter(D1,D1$company_permalink == D1_second_companies$Permalink)[1,"name"] #Subset D2 to keep only second sector rows. D2_subset_second_sector = subset(D2,D2$sector_names == second_D2_sector_nbr[1,1]) #Aggregate by company permalink for second sector D2_funding_by_company <- setNames(aggregate(D2_subset_second_sector$raised_amount_usd,by= list(D2_subset_second_sector$company_permalink),FUN=sum,na.rm=TRUE),c("Permalink","Total_inv")) #Sort by descending total investment in a company D2_second_funded_amount <- arrange(D2_funding_by_company,desc(Total_inv)) #Reason for filtering like this is , there may be more than 1 company, so we cant just sort desc and pick 1 D2_second_companies <- filter(D2_funding_by_company,Total_inv == D2_second_funded_amount[1,2]) D2_second_company_name <- filter(D2,D2$company_permalink == D2_second_companies$Permalink)[1,"name"] #Subset D3 to keep only second sector rows. D3_subset_second_sector = subset(D3,D3$sector_names == second_D3_sector_nbr[1,1]) #Aggregate by company permalink for second sector D3_funding_by_company <- setNames(aggregate(D3_subset_second_sector$raised_amount_usd,by= list(D3_subset_second_sector$company_permalink),FUN=sum,na.rm=TRUE),c("Permalink","Total_inv")) #Sort by descending total investment in a company D3_second_funded_amount <- arrange(D3_funding_by_company,desc(Total_inv)) #Reason for filtering like this is , there may be more than 1 company, so we cant just sort desc and pick 1 D3_second_companies <- filter(D3_funding_by_company,Total_inv == D3_second_funded_amount[1,2]) D3_second_company_name <- filter(D3,D3$company_permalink == D3_second_companies$Permalink)[1,"name"] ########################################## #######Checkpoint 5 End################### ########################################## ########################################## ########Checkpoint 6 Start################ ########################################## # This checkpoint is on Tableau ########################################## #######Checkpoint 6 End################### ########################################## #-----***Write Unit Tests***--------- # These tests are written by analysing the data of companies # and rounds2 using excel and then populating certain constants. # These constants are then compared with the output of the R code # above. If they match, then "SUCCESS" else "FAILURE" # These kind of tests ensure that any code change above (say done # for performance or formatting etc.) doesnt change the expected # results. # Chose type is venture (As per analysis done on xls) # For Venture funding type # IND total 2976543602 # GBR total 5436843539 # USA total 108531347515 # l = ls() if (sum(D1$raised_amount_usd, na.rm = TRUE) == 108531347515 & sum(D2$raised_amount_usd, na.rm = TRUE) == 5436843539 & sum(D3$raised_amount_usd, na.rm = TRUE) == 2976543602 & FT == "venture" & eng_speaking_countries_sorted[1,1] == "USA" & eng_speaking_countries_sorted[2,1] == "GBR" & eng_speaking_countries_sorted[3,1] == "IND") { cat('Code looks fine') } else { cat('!!!****ISSUE****!!!') }
library(h2o) h2o.init(nthreads = -1) datasets <- "https://raw.githubusercontent.com/DarrenCook/h2o/bk/datasets/" data <- h2o.importFile(paste0(datasets,"iris_wheader.csv")) y <- "class" x <- setdiff(names(data), y) parts <- h2o.splitFrame(data, 0.8) train <- parts[[1]] test <- parts[[2]] m <- h2o.deeplearning(x, y, train) p <- h2o.predict(m, test)
/code/iris_deeplearning.R
permissive
DarrenCook/h2o
R
false
false
352
r
library(h2o) h2o.init(nthreads = -1) datasets <- "https://raw.githubusercontent.com/DarrenCook/h2o/bk/datasets/" data <- h2o.importFile(paste0(datasets,"iris_wheader.csv")) y <- "class" x <- setdiff(names(data), y) parts <- h2o.splitFrame(data, 0.8) train <- parts[[1]] test <- parts[[2]] m <- h2o.deeplearning(x, y, train) p <- h2o.predict(m, test)
#------------------------------------------------------------------------------- # Name: 0_COAdb_creator.r # Purpose: Create an empty, new COA databases # Author: Christopher Tracey # Created: 2019-02-14 # Updated: 2019-02-20 # # Updates: # * 2019-02-20 - minor cleanup and documentation # To Do List/Future ideas: # #------------------------------------------------------------------------------- #tool_exec <- function(in_params, out_params) # #{ # check and load required libraries if (!requireNamespace("here", quietly = TRUE)) install.packages("here") require(here) if (!requireNamespace("arcgisbinding", quietly = TRUE)) install.packages("arcgisbinding") require(arcgisbinding) if (!requireNamespace("RSQLite", quietly = TRUE)) install.packages("RSQLite") require(RSQLite) if (!requireNamespace("knitr", quietly = TRUE)) install.packages("knitr") require(knitr) library(xtable) arc.check_product() ## Network Paths and such biotics_gdb <- "W:/Heritage/Heritage_Data/Biotics_datasets.gdb" # open the NHA feature class and select and NHA nha <- arc.open(here("NHA_newTemplate.gdb","NHA_Core")) ### nha <- arc.open("COA.pgh-GIS0.sde/PNHP.DBO.NHA_Core") # closer to opening over SDE selected_nha <- arc.select(nha, where_clause="SITE_NAME='Town Hill Barren'") nha_siteName <- selected_nha$SITE_NAME nha_filename <- gsub(" ", "", nha_siteName, fixed=TRUE) ## Build the Species Table ######################### # open the related species table and get the rows that match the NHA join id from above nha_relatedSpecies <- arc.open(here("NHA_newTemplate.gdb","NHA_SpeciesTable")) selected_nha_relatedSpecies <- arc.select(nha_relatedSpecies) # , where_clause=paste("\"NHD_JOIN_ID\"","=",sQuote(selected_nha$NHA_JOIN_ID),sep=" ") selected_nha_relatedSpecies <- selected_nha_relatedSpecies[which(selected_nha_relatedSpecies$NHA_JOIN_ID==selected_nha$NHA_JOIN_ID),] #! consider integrating with the previous line the select statement SD_speciesTable <- selected_nha_relatedSpecies[c("EO_ID","ELCODE","SNAME","SCOMNAME","ELEMENT_TYPE","G_RANK","S_RANK","S_PROTECTI","PBSSTATUS","LAST_OBS_D","BASIC_EO_R")] # subset to columns that are needed. eoid_list <- paste(toString(SD_speciesTable$EO_ID), collapse = ",") # make a list of EOIDs to get data from later ELCODE_list <- paste(toString(sQuote(unique(SD_speciesTable$ELCODE))), collapse = ",") # make a list of EOIDs to get data from later ## Get the EO data from Biotics data #################### # ptreps <- arc.open(paste(biotics_gdb,"eo_ptreps",sep="/")) ptreps_selected <- arc.select(ptreps, fields=c("EO_ID", "SNAME", "EO_DATA", "GEN_DESC","MGMT_COM","GENERL_COM"), where_clause=paste("EO_ID IN (", eoid_list, ")",sep="") ) SD_eodata <- ptreps_selected #SD_eodata <- merge(ptreps_selected,SD_speciesTable[c("ELCODE","SNAME")],all.x=TRUE) ## Write the output document for the site ############### setwd(here("output")) # knit2pdf errors for some reason...just knit then call directly knit(here("NHA_SiteTemplate.rnw"), output=paste(nha_filename, ".tex",sep="")) call <- paste0("pdflatex -interaction=nonstopmode ", nha_filename , ".tex") # call <- paste0("pdflatex -halt-on-error -interaction=nonstopmode ",model_run_name , ".tex") # this stops execution if there is an error. Not really necessary system(call) system(call) # 2nd run to apply citation numbers # delete .txt, .log etc if pdf is created successfully. fn_ext <- c(".log",".aux",".out",".tex") if (file.exists(paste(nha_filename, ".pdf",sep=""))){ #setInternet2(TRUE) #download.file(fileURL ,destfile,method="auto") for(i in 1:NROW(fn_ext)){ fn <- paste(nha_filename, fn_ext[i],sep="") if (file.exists(fn)){ file.remove(fn) } } }
/Old/scripts/NHA_SiteTemplate.r
no_license
PNHP/NHA_newTemplate
R
false
false
3,714
r
#------------------------------------------------------------------------------- # Name: 0_COAdb_creator.r # Purpose: Create an empty, new COA databases # Author: Christopher Tracey # Created: 2019-02-14 # Updated: 2019-02-20 # # Updates: # * 2019-02-20 - minor cleanup and documentation # To Do List/Future ideas: # #------------------------------------------------------------------------------- #tool_exec <- function(in_params, out_params) # #{ # check and load required libraries if (!requireNamespace("here", quietly = TRUE)) install.packages("here") require(here) if (!requireNamespace("arcgisbinding", quietly = TRUE)) install.packages("arcgisbinding") require(arcgisbinding) if (!requireNamespace("RSQLite", quietly = TRUE)) install.packages("RSQLite") require(RSQLite) if (!requireNamespace("knitr", quietly = TRUE)) install.packages("knitr") require(knitr) library(xtable) arc.check_product() ## Network Paths and such biotics_gdb <- "W:/Heritage/Heritage_Data/Biotics_datasets.gdb" # open the NHA feature class and select and NHA nha <- arc.open(here("NHA_newTemplate.gdb","NHA_Core")) ### nha <- arc.open("COA.pgh-GIS0.sde/PNHP.DBO.NHA_Core") # closer to opening over SDE selected_nha <- arc.select(nha, where_clause="SITE_NAME='Town Hill Barren'") nha_siteName <- selected_nha$SITE_NAME nha_filename <- gsub(" ", "", nha_siteName, fixed=TRUE) ## Build the Species Table ######################### # open the related species table and get the rows that match the NHA join id from above nha_relatedSpecies <- arc.open(here("NHA_newTemplate.gdb","NHA_SpeciesTable")) selected_nha_relatedSpecies <- arc.select(nha_relatedSpecies) # , where_clause=paste("\"NHD_JOIN_ID\"","=",sQuote(selected_nha$NHA_JOIN_ID),sep=" ") selected_nha_relatedSpecies <- selected_nha_relatedSpecies[which(selected_nha_relatedSpecies$NHA_JOIN_ID==selected_nha$NHA_JOIN_ID),] #! consider integrating with the previous line the select statement SD_speciesTable <- selected_nha_relatedSpecies[c("EO_ID","ELCODE","SNAME","SCOMNAME","ELEMENT_TYPE","G_RANK","S_RANK","S_PROTECTI","PBSSTATUS","LAST_OBS_D","BASIC_EO_R")] # subset to columns that are needed. eoid_list <- paste(toString(SD_speciesTable$EO_ID), collapse = ",") # make a list of EOIDs to get data from later ELCODE_list <- paste(toString(sQuote(unique(SD_speciesTable$ELCODE))), collapse = ",") # make a list of EOIDs to get data from later ## Get the EO data from Biotics data #################### # ptreps <- arc.open(paste(biotics_gdb,"eo_ptreps",sep="/")) ptreps_selected <- arc.select(ptreps, fields=c("EO_ID", "SNAME", "EO_DATA", "GEN_DESC","MGMT_COM","GENERL_COM"), where_clause=paste("EO_ID IN (", eoid_list, ")",sep="") ) SD_eodata <- ptreps_selected #SD_eodata <- merge(ptreps_selected,SD_speciesTable[c("ELCODE","SNAME")],all.x=TRUE) ## Write the output document for the site ############### setwd(here("output")) # knit2pdf errors for some reason...just knit then call directly knit(here("NHA_SiteTemplate.rnw"), output=paste(nha_filename, ".tex",sep="")) call <- paste0("pdflatex -interaction=nonstopmode ", nha_filename , ".tex") # call <- paste0("pdflatex -halt-on-error -interaction=nonstopmode ",model_run_name , ".tex") # this stops execution if there is an error. Not really necessary system(call) system(call) # 2nd run to apply citation numbers # delete .txt, .log etc if pdf is created successfully. fn_ext <- c(".log",".aux",".out",".tex") if (file.exists(paste(nha_filename, ".pdf",sep=""))){ #setInternet2(TRUE) #download.file(fileURL ,destfile,method="auto") for(i in 1:NROW(fn_ext)){ fn <- paste(nha_filename, fn_ext[i],sep="") if (file.exists(fn)){ file.remove(fn) } } }
library(plyr) #--------------------------- # Prep: Files and directories #--------------------------- uci_data_dir <- "UCI\ HAR\ Dataset" txt_x_train <- paste(uci_data_dir, "/train/X_train.txt", sep = "") txt_x_test <- paste(uci_data_dir, "/test/X_test.txt", sep = "") txt_y_train <- paste(uci_data_dir, "/train/y_train.txt", sep = "") txt_y_test <- paste(uci_data_dir, "/test/y_test.txt", sep = "") txt_subject_train <- paste(uci_data_dir, "/train/subject_train.txt", sep = "") txt_subject_test <- paste(uci_data_dir, "/test/subject_test.txt", sep = "") txt_features <- paste(uci_data_dir, "/features.txt", sep = "") txt_activity_labels <- paste(uci_data_dir, "/activity_labels.txt", sep = "") #--------------------------------------------------------------------- # Step 1 Merges the training and the test sets to create one data set. #--------------------------------------------------------------------- # Data set x df_train_x <- read.table(txt_x_train) df_test_x <- read.table(txt_x_test) df_x <- rbind(df_train_x, df_test_x) # Data set y df_train_y <- read.table(txt_y_train) df_test_y <- read.table(txt_y_test) df_y <- rbind(df_train_y, df_test_y) # Data set subject df_subject_train <- read.table(txt_subject_train) df_subject_test <- read.table(txt_subject_test) df_subject_data <- rbind(df_subject_train, df_subject_test) #----------------------------------------------------------------------------------------------- # Step 2 Extracts only the measurements on the mean and standard deviation for each measurement. #----------------------------------------------------------------------------------------------- df_features <- read.table(txt_features) # Get only mean or std df_mean_std_features <- grep("-(mean|std)\\(\\)", df_features[, 2]) df_x <- df_x[, df_mean_std_features] names(df_x) <- df_features[df_mean_std_features, 2] #------------------------------------------------------------------------------- # Step 3 Uses descriptive activity names to name the activities in the data set. #------------------------------------------------------------------------------- df_activities <- read.table(txt_activity_labels) df_y[, 1] <- df_activities[df_y[, 1], 2] names(df_y) <- "activity" #-------------------------------------------------------------------------- # Step 4 Appropriately labels the data set with descriptive variable names. #-------------------------------------------------------------------------- names(df_subject_data) <- "subject" # Bind data into total data set df_total <- cbind(df_x, df_y, df_subject_data) #------------------------------------------------------------------------------------------------------------------------- # Step 5 Creates a second, independent tidy data set with the average of each variable for each activity and each subject. #------------------------------------------------------------------------------------------------------------------------- # ncol(df_total) = 68 # colnames(df_total) shows activity and subject last df_avg <- ddply(df_total, .(subject, activity), function(x) colMeans(x[, 1:66])) write.table(df_avg, "activity-rec-avg.txt", row.name=FALSE)
/run_analysis.R
no_license
mikethwolff/Coursera-Getting-and-Cleaning-Data-Project
R
false
false
3,172
r
library(plyr) #--------------------------- # Prep: Files and directories #--------------------------- uci_data_dir <- "UCI\ HAR\ Dataset" txt_x_train <- paste(uci_data_dir, "/train/X_train.txt", sep = "") txt_x_test <- paste(uci_data_dir, "/test/X_test.txt", sep = "") txt_y_train <- paste(uci_data_dir, "/train/y_train.txt", sep = "") txt_y_test <- paste(uci_data_dir, "/test/y_test.txt", sep = "") txt_subject_train <- paste(uci_data_dir, "/train/subject_train.txt", sep = "") txt_subject_test <- paste(uci_data_dir, "/test/subject_test.txt", sep = "") txt_features <- paste(uci_data_dir, "/features.txt", sep = "") txt_activity_labels <- paste(uci_data_dir, "/activity_labels.txt", sep = "") #--------------------------------------------------------------------- # Step 1 Merges the training and the test sets to create one data set. #--------------------------------------------------------------------- # Data set x df_train_x <- read.table(txt_x_train) df_test_x <- read.table(txt_x_test) df_x <- rbind(df_train_x, df_test_x) # Data set y df_train_y <- read.table(txt_y_train) df_test_y <- read.table(txt_y_test) df_y <- rbind(df_train_y, df_test_y) # Data set subject df_subject_train <- read.table(txt_subject_train) df_subject_test <- read.table(txt_subject_test) df_subject_data <- rbind(df_subject_train, df_subject_test) #----------------------------------------------------------------------------------------------- # Step 2 Extracts only the measurements on the mean and standard deviation for each measurement. #----------------------------------------------------------------------------------------------- df_features <- read.table(txt_features) # Get only mean or std df_mean_std_features <- grep("-(mean|std)\\(\\)", df_features[, 2]) df_x <- df_x[, df_mean_std_features] names(df_x) <- df_features[df_mean_std_features, 2] #------------------------------------------------------------------------------- # Step 3 Uses descriptive activity names to name the activities in the data set. #------------------------------------------------------------------------------- df_activities <- read.table(txt_activity_labels) df_y[, 1] <- df_activities[df_y[, 1], 2] names(df_y) <- "activity" #-------------------------------------------------------------------------- # Step 4 Appropriately labels the data set with descriptive variable names. #-------------------------------------------------------------------------- names(df_subject_data) <- "subject" # Bind data into total data set df_total <- cbind(df_x, df_y, df_subject_data) #------------------------------------------------------------------------------------------------------------------------- # Step 5 Creates a second, independent tidy data set with the average of each variable for each activity and each subject. #------------------------------------------------------------------------------------------------------------------------- # ncol(df_total) = 68 # colnames(df_total) shows activity and subject last df_avg <- ddply(df_total, .(subject, activity), function(x) colMeans(x[, 1:66])) write.table(df_avg, "activity-rec-avg.txt", row.name=FALSE)
library(scholar) ### Name: get_coauthors ### Title: Gets the network of coauthors of a scholar ### Aliases: get_coauthors ### ** Examples ## Not run: ##D ##D library(scholar) ##D coauthor_network <- get_coauthors('amYIKXQAAAAJ&hl') ##D plot_coauthors(coauthor_network) ## End(Not run)
/data/genthat_extracted_code/scholar/examples/get_coauthors.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
297
r
library(scholar) ### Name: get_coauthors ### Title: Gets the network of coauthors of a scholar ### Aliases: get_coauthors ### ** Examples ## Not run: ##D ##D library(scholar) ##D coauthor_network <- get_coauthors('amYIKXQAAAAJ&hl') ##D plot_coauthors(coauthor_network) ## End(Not run)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/PsFunctions.R \name{truncateIptw} \alias{truncateIptw} \title{Truncate IPTW values} \usage{ truncateIptw(population, maxWeight = 10) } \arguments{ \item{population}{A data frame with at least the two columns described in the details} \item{maxWeight}{The maximum allowed IPTW.} } \value{ Returns a tibble with the same columns as the input. } \description{ Set the inverse probability of treatment weights (IPTW) to the user-specified threshold if it exceeds said threshold. } \details{ The data frame should have the following two columns: \itemize{ \item treatment (integer): Column indicating whether the person is in the target (1) or comparator (0) group. \item iptw (numeric): Propensity score. } } \examples{ rowId <- 1:2000 treatment <- rep(0:1, each = 1000) iptw <- 1 / c(runif(1000, min = 0, max = 1), runif(1000, min = 0, max = 1)) data <- data.frame(rowId = rowId, treatment = treatment, iptw = iptw) result <- truncateIptw(data) }
/man/truncateIptw.Rd
permissive
OHDSI/CohortMethod
R
false
true
1,025
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/PsFunctions.R \name{truncateIptw} \alias{truncateIptw} \title{Truncate IPTW values} \usage{ truncateIptw(population, maxWeight = 10) } \arguments{ \item{population}{A data frame with at least the two columns described in the details} \item{maxWeight}{The maximum allowed IPTW.} } \value{ Returns a tibble with the same columns as the input. } \description{ Set the inverse probability of treatment weights (IPTW) to the user-specified threshold if it exceeds said threshold. } \details{ The data frame should have the following two columns: \itemize{ \item treatment (integer): Column indicating whether the person is in the target (1) or comparator (0) group. \item iptw (numeric): Propensity score. } } \examples{ rowId <- 1:2000 treatment <- rep(0:1, each = 1000) iptw <- 1 / c(runif(1000, min = 0, max = 1), runif(1000, min = 0, max = 1)) data <- data.frame(rowId = rowId, treatment = treatment, iptw = iptw) result <- truncateIptw(data) }
x1<-rnorm(1000 mean = 5 sd = 1) x2-rnorm(1000 mean = 2 d = 1) x3-rnorm(1000 mean = 0 sd = 1) x<- c(x1, x2, x3) col1<- c(1, 0.5, 0.4) col2<- c(0, 0.7, 0.56) col3<- c(0, 0, 0.72) matrix <- cbind(col1, col2, col3) matrix matrix[1,] k1 <- matrix[1,] * x k2 <- matrix[2,] * x k3 <- matrix[3,] * x m <- data.frame(ghj = k1, jbj = k2, vb = k3) m y1 = matrix[1,] * x1 y2 = matrix[1,] * x2 y3 = matrix[1,] * x3 y4 = matrix[2,] * x1 y5 = matrix[2,] * x2 y6 = matrix[2,] * x3 y7 = matrix[3,] * x1 y8 = matrix[3,] * x2 y9 = matrix[3,] * x3 y = cbind(y1, y2, y3, y4, y5, y6, y7, y8, y9) y
/h.R
no_license
sergivna/Data-analysis
R
false
false
582
r
x1<-rnorm(1000 mean = 5 sd = 1) x2-rnorm(1000 mean = 2 d = 1) x3-rnorm(1000 mean = 0 sd = 1) x<- c(x1, x2, x3) col1<- c(1, 0.5, 0.4) col2<- c(0, 0.7, 0.56) col3<- c(0, 0, 0.72) matrix <- cbind(col1, col2, col3) matrix matrix[1,] k1 <- matrix[1,] * x k2 <- matrix[2,] * x k3 <- matrix[3,] * x m <- data.frame(ghj = k1, jbj = k2, vb = k3) m y1 = matrix[1,] * x1 y2 = matrix[1,] * x2 y3 = matrix[1,] * x3 y4 = matrix[2,] * x1 y5 = matrix[2,] * x2 y6 = matrix[2,] * x3 y7 = matrix[3,] * x1 y8 = matrix[3,] * x2 y9 = matrix[3,] * x3 y = cbind(y1, y2, y3, y4, y5, y6, y7, y8, y9) y
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ui_helpers.R \name{add_ui_sidebar_basic} \alias{add_ui_sidebar_basic} \title{Add UI Elements to the Sidebar (Basic Tab)} \usage{ add_ui_sidebar_basic(elementlist = NULL, append = FALSE, tabname = "Basic") } \arguments{ \item{elementlist}{list of UI elements to add to the sidebar tab} \item{append}{whether to append the \code{elementlist} to currently registered elements or replace the currently registered elements.} \item{tabname}{change the label on the UI tab (default = "Basic")} } \description{ This function registers UI elements to the primary (front-most) tab on the dashboard sidebar. The default name of the tab is \strong{Basic} but can be renamed using the tabname argument. This tab will be active on the sidebar when the user first opens the shiny application. } \section{Shiny Usage}{ Call this function after creating elements in \code{ui_sidebar.R} to register them to the application framework and show them on the Basic tab in the dashboard sidebar } \examples{ require(shiny) s1 <- selectInput("sample1", "A Select", c("A", "B", "C")) s2 <- radioButtons("sample2", NULL, c("A", "B", "C")) add_ui_sidebar_basic(list(s1, s2), append = FALSE) } \seealso{ \link[periscope]{add_ui_sidebar_advanced} \link[periscope]{add_ui_body} }
/man/add_ui_sidebar_basic.Rd
no_license
kar-agg-gen/periscope
R
false
true
1,339
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ui_helpers.R \name{add_ui_sidebar_basic} \alias{add_ui_sidebar_basic} \title{Add UI Elements to the Sidebar (Basic Tab)} \usage{ add_ui_sidebar_basic(elementlist = NULL, append = FALSE, tabname = "Basic") } \arguments{ \item{elementlist}{list of UI elements to add to the sidebar tab} \item{append}{whether to append the \code{elementlist} to currently registered elements or replace the currently registered elements.} \item{tabname}{change the label on the UI tab (default = "Basic")} } \description{ This function registers UI elements to the primary (front-most) tab on the dashboard sidebar. The default name of the tab is \strong{Basic} but can be renamed using the tabname argument. This tab will be active on the sidebar when the user first opens the shiny application. } \section{Shiny Usage}{ Call this function after creating elements in \code{ui_sidebar.R} to register them to the application framework and show them on the Basic tab in the dashboard sidebar } \examples{ require(shiny) s1 <- selectInput("sample1", "A Select", c("A", "B", "C")) s2 <- radioButtons("sample2", NULL, c("A", "B", "C")) add_ui_sidebar_basic(list(s1, s2), append = FALSE) } \seealso{ \link[periscope]{add_ui_sidebar_advanced} \link[periscope]{add_ui_body} }
library(psych) library(MASS) library(plyr) library(dplyr) library(reshape2) playersPerSecond <- as.data.frame(colnames(labelFile)[2:ncol(labelFile)]) colnames(playersPerSecond)[1] <- "TIME" playersPerSecond$COUNT <- NA #labelFile$userID <- as.factor(labe) # for (i in 2:ncol(labelFile)){ # #print(paste0("Column no. " + i)) # # count = 0 # # # for (j in 1:nrow(labelFile)){ # #print(paste0("Row no. " + j + " for col no. " + i)) # # if (labelFile[j,i] != "0"){ # print("Adding user presence") # # count = count + 1 # playersPerSecond$COUNT[i-1] <- count # # } # } # # } #j =1 # userPool <- data.frame() # #nonZero <- data.frame() # #nonZero$USER <- NA # userPool$USER <- NA for (i in 2:ncol(labelFile)){ print(paste0("At i :",i)) tempKeep <- c("userID", i) nonZero <- labelFile nonZero <- subset(nonZero, nonZero[,i] != 0) nonZero$userID <- as.factor(nonZero$userID) playersPerSecond$COUNT[i - 1] <- nlevels(nonZero$userID) # counter = 0 # # for (j in 1:nrow(labelFile)){ # # if (labelFile[j,i] != "0"){ # # nonZero <- rbind(nonZero, labelFile[j,]) # # nonZero$userID <- as.factor(nonZero$userID) # # playersPerSecond$COUNT[i - 1] <- nlevels(nonZero$userID) # print("Added") # } # } } playersPerSecond$TIME <- as.POSIXct(playersPerSecond$TIME, format = "%Y-%m-%d %H:%M:%OS") playersPerSecond <- na.omit(playersPerSecond) playersPerSecond <- subset(playersPerSecond, playersPerSecond$TIME<endDateTime)
/getPlayerCountPerSecond.R
no_license
adityaponnada/accelerometerPredictionVisualizer
R
false
false
1,581
r
library(psych) library(MASS) library(plyr) library(dplyr) library(reshape2) playersPerSecond <- as.data.frame(colnames(labelFile)[2:ncol(labelFile)]) colnames(playersPerSecond)[1] <- "TIME" playersPerSecond$COUNT <- NA #labelFile$userID <- as.factor(labe) # for (i in 2:ncol(labelFile)){ # #print(paste0("Column no. " + i)) # # count = 0 # # # for (j in 1:nrow(labelFile)){ # #print(paste0("Row no. " + j + " for col no. " + i)) # # if (labelFile[j,i] != "0"){ # print("Adding user presence") # # count = count + 1 # playersPerSecond$COUNT[i-1] <- count # # } # } # # } #j =1 # userPool <- data.frame() # #nonZero <- data.frame() # #nonZero$USER <- NA # userPool$USER <- NA for (i in 2:ncol(labelFile)){ print(paste0("At i :",i)) tempKeep <- c("userID", i) nonZero <- labelFile nonZero <- subset(nonZero, nonZero[,i] != 0) nonZero$userID <- as.factor(nonZero$userID) playersPerSecond$COUNT[i - 1] <- nlevels(nonZero$userID) # counter = 0 # # for (j in 1:nrow(labelFile)){ # # if (labelFile[j,i] != "0"){ # # nonZero <- rbind(nonZero, labelFile[j,]) # # nonZero$userID <- as.factor(nonZero$userID) # # playersPerSecond$COUNT[i - 1] <- nlevels(nonZero$userID) # print("Added") # } # } } playersPerSecond$TIME <- as.POSIXct(playersPerSecond$TIME, format = "%Y-%m-%d %H:%M:%OS") playersPerSecond <- na.omit(playersPerSecond) playersPerSecond <- subset(playersPerSecond, playersPerSecond$TIME<endDateTime)
#' \pkg{dp.opendata} - Query DB Opendata APIs #' #' @keywords package DB #' @name dp.opendata-package #' @docType package #' @import httr tidyverse htmltools lubridate data.table keyring jsonlite NULL #' Pipe re-export #' #' Re-export pipe to allow use without dependencies #' #' @importFrom magrittr %>% #' @name %>% #' @rdname pipe #' @export #' @param lhs,rhs A visualisation and a function to apply to it NULL
/R/dp.opendata-package.R
no_license
markheckmann/db.opendata
R
false
false
419
r
#' \pkg{dp.opendata} - Query DB Opendata APIs #' #' @keywords package DB #' @name dp.opendata-package #' @docType package #' @import httr tidyverse htmltools lubridate data.table keyring jsonlite NULL #' Pipe re-export #' #' Re-export pipe to allow use without dependencies #' #' @importFrom magrittr %>% #' @name %>% #' @rdname pipe #' @export #' @param lhs,rhs A visualisation and a function to apply to it NULL
.loadInputFile <- function(){ df <- fLoadTXTIntoDataframe("tmp/input.txt") str(df) REddyProc:::fCheckColNum(df, c("NEE","Tair","rH")) }
/develop/debug_onlineTool.R
no_license
zhzhyang/REddyProc
R
false
false
142
r
.loadInputFile <- function(){ df <- fLoadTXTIntoDataframe("tmp/input.txt") str(df) REddyProc:::fCheckColNum(df, c("NEE","Tair","rH")) }
/project/Deta Leage_2014_09_16/part2/part2.R
no_license
ShuDiamonds/R
R
false
false
773
r
# plotting rm(list = ls()) MyDF <- read.csv("../data/EcolArchives-E089-51-D1.csv") require(ggplot2) pdf("../results/PP_Regress.pdf") p <- ggplot(MyDF, aes(x = log(Prey.mass), y = log(Predator.mass), colour = Predator.lifestage)) + geom_point(shape = 3) + labs(x = "Prey mass in grams", y = "Predator mass in grams") + geom_smooth(method='lm', fullrange = TRUE) + facet_grid(Type.of.feeding.interaction ~. , scales = "free") + theme(legend.position = "bottom") + guides(col = guide_legend(nrow=1)) print(p) graphics.off() # calculation FeedingType <- unique(MyDF$Type.of.feeding.interaction) DF <- data.frame(FeedingType = NULL, LifeStage = NULL, Slope = NULL, Intercept = NULL, R2 = NULL, f.value = NULL, P.value = NULL, stringsAsFactors=FALSE) for(a in 1:length(FeedingType)){ Subset1 <- subset(MyDF, Type.of.feeding.interaction == FeedingType[a]) LifeStage <- unique(Subset1$Predator.lifestage) for (b in 1:length(LifeStage)){ # browser() Subset2 <- subset(Subset1, Predator.lifestage == LifeStage[b]) Model <- lm(log(Predator.mass)~log(Prey.mass), data = Subset2) Output <- summary(Model) if(is.null(Output$fstatistic[1])){ DF2 <- data.frame(FeedingType = FeedingType[a], LifeStage = LifeStage[b], Slope = NA, Intercept = NA, R2 = NA, f.value = NA, P.value = NA, stringsAsFactors=FALSE) } else{ DF2 <- data.frame(FeedingType = FeedingType[a], LifeStage = LifeStage[b], Slope = Output$coefficients[2,1], Intercept = Output$coefficients[1,1], R2 = Output$r.squared, f.value = Output$fstatistic[1], P.value = Output$coefficients[8]) } DF <- rbind(DF,DF2) } } write.csv(DF, "../results/PP_Regress_Results.csv", row.names=FALSE)
/week3/code/PP_Regress.R
no_license
zongyi2020/CMEECourseWork
R
false
false
2,072
r
# plotting rm(list = ls()) MyDF <- read.csv("../data/EcolArchives-E089-51-D1.csv") require(ggplot2) pdf("../results/PP_Regress.pdf") p <- ggplot(MyDF, aes(x = log(Prey.mass), y = log(Predator.mass), colour = Predator.lifestage)) + geom_point(shape = 3) + labs(x = "Prey mass in grams", y = "Predator mass in grams") + geom_smooth(method='lm', fullrange = TRUE) + facet_grid(Type.of.feeding.interaction ~. , scales = "free") + theme(legend.position = "bottom") + guides(col = guide_legend(nrow=1)) print(p) graphics.off() # calculation FeedingType <- unique(MyDF$Type.of.feeding.interaction) DF <- data.frame(FeedingType = NULL, LifeStage = NULL, Slope = NULL, Intercept = NULL, R2 = NULL, f.value = NULL, P.value = NULL, stringsAsFactors=FALSE) for(a in 1:length(FeedingType)){ Subset1 <- subset(MyDF, Type.of.feeding.interaction == FeedingType[a]) LifeStage <- unique(Subset1$Predator.lifestage) for (b in 1:length(LifeStage)){ # browser() Subset2 <- subset(Subset1, Predator.lifestage == LifeStage[b]) Model <- lm(log(Predator.mass)~log(Prey.mass), data = Subset2) Output <- summary(Model) if(is.null(Output$fstatistic[1])){ DF2 <- data.frame(FeedingType = FeedingType[a], LifeStage = LifeStage[b], Slope = NA, Intercept = NA, R2 = NA, f.value = NA, P.value = NA, stringsAsFactors=FALSE) } else{ DF2 <- data.frame(FeedingType = FeedingType[a], LifeStage = LifeStage[b], Slope = Output$coefficients[2,1], Intercept = Output$coefficients[1,1], R2 = Output$r.squared, f.value = Output$fstatistic[1], P.value = Output$coefficients[8]) } DF <- rbind(DF,DF2) } } write.csv(DF, "../results/PP_Regress_Results.csv", row.names=FALSE)
#################################################################################################### #################################################################################################### ## Tiling of an AOI (shapefile defined) ## Contact remi.dannunzio@fao.org ## 2019/03/11 #################################################################################################### #################################################################################################### ### load the parameters source('~/uga_activity_data/scripts/get_parameters.R') usernamelist <- paste0(mgmt_dir,'usernames_uga.csv') ### GET COUNTRY BOUNDARIES FROM THE WWW.GADM.ORG DATASET aoi <- getData('GADM', path=gadm_dir, country= countrycode, level=0) (bb <- extent(aoi)) ### What grid size do we need ? grid_size <- 20000 ## in meters ### GENERATE A GRID sqr_df <- generate_grid(aoi,grid_size/111320) nrow(sqr_df) ### Select a vector from location of another vector sqr_df_selected <- sqr_df[aoi,] nrow(sqr_df_selected) ### Give the output a decent name, with unique ID names(sqr_df_selected@data) <- "tileID" sqr_df_selected@data$tileID <- row(sqr_df_selected@data)[,1] ### Reproject in LAT LON tiles <- spTransform(sqr_df_selected,CRS("+init=epsg:4326")) aoi_geo <- spTransform(aoi,CRS("+init=epsg:4326")) ### Plot the results plot(tiles) plot(aoi_geo,add=T,border="blue") ### check against forest nonforest mask FNF_mask_proj <- paste0(lc_dir,"FNF_mask_2015_2017_proj.tif") FNF_mask_proj.shp <- paste0(lc_dir,"FNF_mask_2015_2017_proj.shp") plot(raster(FNF_mask_proj),add=T) ###################################### if(!file.exists(FNF_mask_proj.shp)){ system(sprintf("gdal_polygonize.py -mask %s %s -f 'ESRI Shapefile' %s ", FNF_mask_proj, FNF_mask_proj, FNF_mask_proj.shp )) } shp_mask <- readOGR(FNF_mask_proj.shp) # plot(shp_mask,add=T) shp_mask <- spTransform(shp_mask,CRS("+init=epsg:4326")) tiles@data$forest_mask <- over(tiles,shp_mask) subtile <- tiles[tiles@data$forest_mask$DN %in% 1 ,] subtile <- subtile[,"tileID"] subtile table(tiles$forest_mask) subtile <- tiles[tiles@data$forest_mask$DN %in% 1 ,] subtile <- subtile[,"tileID"] # plot(shp_mask,border="green") plot(subtile,add=T) ### Read the list of usernames users <- read.csv(usernamelist) ### Assign each tile with a username df <- data.frame(cbind(subtile@data[,"tileID"],users$Name)) names(df) <- c("tileID","username") df$tileID <- as.numeric(df$tileID) table(df$username) subtile@data <- df for(username in unique(df$username)){ ### Create a final subset corresponding to your username my_tiles <- subtile[subtile$tileID %in% df[df$username == username,"tileID"],] # plot(my_tiles,add=T,col="black") length(my_tiles) ### Export the final subset export_name <- str_replace_all(paste0("national_scale_",length(my_tiles),"_tiles_",username), " ","_") writeOGR(obj=my_tiles, dsn=paste(tile_dir,export_name,".kml",sep=""), layer= export_name, driver = "KML", overwrite_layer = T) } ### Export ALL TILES as KML export_name <- paste0("tiling_system_",countrycode) writeOGR(obj=subtile, dsn=paste(tile_dir,export_name,".kml",sep=""), layer= export_name, driver = "KML", overwrite_layer = T) my_tiles <- tiles[tiles$tileID %in% df[df$username == username,"tileID"],] plot(my_tiles,add=T,col="green") length(my_tiles) ### Export the ONE TILE IN THE subset export_name <- paste0("UGA_one_tile_",username) writeOGR(obj=my_tiles[1,], dsn=paste(tile_dir,export_name,".kml",sep=""), layer= export_name, driver = "KML", overwrite_layer = T)
/scripts/bfast_processing/create_tiling_system.R
no_license
yfinegold/uga_activity_data
R
false
false
3,885
r
#################################################################################################### #################################################################################################### ## Tiling of an AOI (shapefile defined) ## Contact remi.dannunzio@fao.org ## 2019/03/11 #################################################################################################### #################################################################################################### ### load the parameters source('~/uga_activity_data/scripts/get_parameters.R') usernamelist <- paste0(mgmt_dir,'usernames_uga.csv') ### GET COUNTRY BOUNDARIES FROM THE WWW.GADM.ORG DATASET aoi <- getData('GADM', path=gadm_dir, country= countrycode, level=0) (bb <- extent(aoi)) ### What grid size do we need ? grid_size <- 20000 ## in meters ### GENERATE A GRID sqr_df <- generate_grid(aoi,grid_size/111320) nrow(sqr_df) ### Select a vector from location of another vector sqr_df_selected <- sqr_df[aoi,] nrow(sqr_df_selected) ### Give the output a decent name, with unique ID names(sqr_df_selected@data) <- "tileID" sqr_df_selected@data$tileID <- row(sqr_df_selected@data)[,1] ### Reproject in LAT LON tiles <- spTransform(sqr_df_selected,CRS("+init=epsg:4326")) aoi_geo <- spTransform(aoi,CRS("+init=epsg:4326")) ### Plot the results plot(tiles) plot(aoi_geo,add=T,border="blue") ### check against forest nonforest mask FNF_mask_proj <- paste0(lc_dir,"FNF_mask_2015_2017_proj.tif") FNF_mask_proj.shp <- paste0(lc_dir,"FNF_mask_2015_2017_proj.shp") plot(raster(FNF_mask_proj),add=T) ###################################### if(!file.exists(FNF_mask_proj.shp)){ system(sprintf("gdal_polygonize.py -mask %s %s -f 'ESRI Shapefile' %s ", FNF_mask_proj, FNF_mask_proj, FNF_mask_proj.shp )) } shp_mask <- readOGR(FNF_mask_proj.shp) # plot(shp_mask,add=T) shp_mask <- spTransform(shp_mask,CRS("+init=epsg:4326")) tiles@data$forest_mask <- over(tiles,shp_mask) subtile <- tiles[tiles@data$forest_mask$DN %in% 1 ,] subtile <- subtile[,"tileID"] subtile table(tiles$forest_mask) subtile <- tiles[tiles@data$forest_mask$DN %in% 1 ,] subtile <- subtile[,"tileID"] # plot(shp_mask,border="green") plot(subtile,add=T) ### Read the list of usernames users <- read.csv(usernamelist) ### Assign each tile with a username df <- data.frame(cbind(subtile@data[,"tileID"],users$Name)) names(df) <- c("tileID","username") df$tileID <- as.numeric(df$tileID) table(df$username) subtile@data <- df for(username in unique(df$username)){ ### Create a final subset corresponding to your username my_tiles <- subtile[subtile$tileID %in% df[df$username == username,"tileID"],] # plot(my_tiles,add=T,col="black") length(my_tiles) ### Export the final subset export_name <- str_replace_all(paste0("national_scale_",length(my_tiles),"_tiles_",username), " ","_") writeOGR(obj=my_tiles, dsn=paste(tile_dir,export_name,".kml",sep=""), layer= export_name, driver = "KML", overwrite_layer = T) } ### Export ALL TILES as KML export_name <- paste0("tiling_system_",countrycode) writeOGR(obj=subtile, dsn=paste(tile_dir,export_name,".kml",sep=""), layer= export_name, driver = "KML", overwrite_layer = T) my_tiles <- tiles[tiles$tileID %in% df[df$username == username,"tileID"],] plot(my_tiles,add=T,col="green") length(my_tiles) ### Export the ONE TILE IN THE subset export_name <- paste0("UGA_one_tile_",username) writeOGR(obj=my_tiles[1,], dsn=paste(tile_dir,export_name,".kml",sep=""), layer= export_name, driver = "KML", overwrite_layer = T)
# DQI on number of jobs vs. building square footage # across census block and census tract # created by Kihong Kim # created in May 20, 2013 setwd("/home/kihong/DQI") library(MASS) library(RPostgreSQL) library(plyr) library(ggplot2) # connect to the PostgreSQL database conn <- dbConnect(PostgreSQL(), host="sapporo.usp.pdx.edu", user="smartdata", password="Smartaa00", dbname="portland") # read wac2010_bg wac2010_bg <- dbReadTable(conn, c("lehd","wac2010_bg")) # read buildings2010 buildings2010 <- dbReadTable(conn, c("rlis","buildings2010")) buildings2010 <- with(buildings2010, buildings2010[order(gid),]) buildings2010_bg <- ddply(buildings2010, .(state,county,tract,blockgroup), summarise, sqftBldg=sum(bldg_sqft)) # sqftBldg.tract <- ddply(buildings2010, .(state,county,tract), summarise, sqftBldg=sum(bldg_sqft)) # merge buildings2010 and wac2010_bg merged_bg <- merge(buildings2010_bg, wac2010_bg, by=c("state","county","tract","blockgroup")) # OLS regression analysis ols.jobs.bg.nojittering <- lm(sqftBldg~numjobs, data=merged_bg) summary(ols.jobs.bg.nojittering) ols.jobs.bg.jittering <- lm(sqftBldg~numjobsjit, data=merged_bg) summary(ols.jobs.bg.jittering) # identify the observations with either large residuals (outliers) or high leverages from OLS regression plots png("ols.jobs.bg.nojittering.png") op <- par(mfrow = c(2, 2), oma=c(0,0,1.1,0)) plot(ols.jobs.bg.nojittering, las=1) par(op) dev.off() png("ols.jobs.bg.jittering.png") op <- par(mfrow = c(2, 2), oma=c(0,0,1.1,0)) plot(ols.jobs.bg.jittering, las=1) par(op) dev.off() merged_bg[c(473,476,711),-8] # create a data set to detect outliers and leverages CD.ols.nojit <- cooks.distance(ols.jobs.bg.nojittering) RES.ols.nojit <- stdres(ols.jobs.bg.nojittering) RES.abs.ols.nojit <- abs(RES.ols.nojit) CD.ols.jit <- cooks.distance(ols.jobs.bg.jittering) RES.ols.jit <- stdres(ols.jobs.bg.jittering) RES.abs.ols.jit <- abs(RES.ols.jit) diagnose <- cbind(merged_bg,CD.ols.nojit,RES.ols.nojit,RES.abs.ols.nojit,CD.ols.jit,RES.ols.jit,RES.abs.ols.jit) # find out the observations that have relatively large values of Cook's Distance (a conventional cut-off point is 4/n, where n is the number of observations) diagnose[CD.ols.nojit>4/nrow(diagnose),c("state","county","tract","blockgroup","sqftBldg","numjobs","CD.ols.nojit")] diagnose[CD.ols.jit>4/nrow(diagnose),c("state","county","tract","blockgroup","sqftBldg","numjobsjit","CD.ols.jit")] # find out ten observations with the highest absolute residual values diagnose.sorted.nojit <- with(diagnose, diagnose[order(-RES.abs.ols.nojit),]) diagnose.sorted.nojit[1:10,c("state","county","tract","blockgroup","sqftBldg","numjobs","RES.ols.nojit","RES.abs.ols.nojit")] diagnose.sorted.jit <- with(diagnose, diagnose[order(-RES.abs.ols.jit),]) diagnose.sorted.jit[1:10,c("state","county","tract","blockgroup","sqftBldg","numjobsjit","RES.ols.jit","RES.abs.ols.jit")] # robust regression analysis # a compromise between excluding outliers or leverages entirely from the analysis and # including all the data points and treating all them equally in OLS regression rls.jobs.bg.nojittering <- rlm(sqftBldg~numjobs, data=merged_bg) summary(rls.jobs.bg.nojittering) rls.jobs.bg.jittering <- rlm(sqftBldg~numjobsjit, data=merged_bg) summary(rls.jobs.bg.jittering) # ignore for now summary(dqi.jobs.blockgroup) summary(dqi.jobs.blockgroup)$r.squared ggplot(merge.blockgroup, aes(x=numJobs,y=sqftBldg))+ geom_point(shape=1)+ labs(title="Building SQFT vs. Number of Jobs in 2010 \n across census block groups")+ ggsave(file="dqi.jobs.blockgroup.png") merge.tract <- merge(sqftBldg.tract, numJobs.tract, by=c("state","county","tract")) head(merge.tract) dqi.jobs.tract <- lm(sqftBldg~numJobs, merge.tract) summary(dqi.jobs.tract) summary(dqi.jobs.tract)$r.squared ggplot(merge.tract, aes(x=numJobs,y=sqftBldg))+ geom_point(shape=1)+ labs(title="Building SQFT vs. Number of Jobs in 2010 \n across census tracts")+ ggsave(file="dqi.jobs.tract.png")
/computeDQI_employment.R
no_license
KihongKim/data-quality-indicators
R
false
false
3,991
r
# DQI on number of jobs vs. building square footage # across census block and census tract # created by Kihong Kim # created in May 20, 2013 setwd("/home/kihong/DQI") library(MASS) library(RPostgreSQL) library(plyr) library(ggplot2) # connect to the PostgreSQL database conn <- dbConnect(PostgreSQL(), host="sapporo.usp.pdx.edu", user="smartdata", password="Smartaa00", dbname="portland") # read wac2010_bg wac2010_bg <- dbReadTable(conn, c("lehd","wac2010_bg")) # read buildings2010 buildings2010 <- dbReadTable(conn, c("rlis","buildings2010")) buildings2010 <- with(buildings2010, buildings2010[order(gid),]) buildings2010_bg <- ddply(buildings2010, .(state,county,tract,blockgroup), summarise, sqftBldg=sum(bldg_sqft)) # sqftBldg.tract <- ddply(buildings2010, .(state,county,tract), summarise, sqftBldg=sum(bldg_sqft)) # merge buildings2010 and wac2010_bg merged_bg <- merge(buildings2010_bg, wac2010_bg, by=c("state","county","tract","blockgroup")) # OLS regression analysis ols.jobs.bg.nojittering <- lm(sqftBldg~numjobs, data=merged_bg) summary(ols.jobs.bg.nojittering) ols.jobs.bg.jittering <- lm(sqftBldg~numjobsjit, data=merged_bg) summary(ols.jobs.bg.jittering) # identify the observations with either large residuals (outliers) or high leverages from OLS regression plots png("ols.jobs.bg.nojittering.png") op <- par(mfrow = c(2, 2), oma=c(0,0,1.1,0)) plot(ols.jobs.bg.nojittering, las=1) par(op) dev.off() png("ols.jobs.bg.jittering.png") op <- par(mfrow = c(2, 2), oma=c(0,0,1.1,0)) plot(ols.jobs.bg.jittering, las=1) par(op) dev.off() merged_bg[c(473,476,711),-8] # create a data set to detect outliers and leverages CD.ols.nojit <- cooks.distance(ols.jobs.bg.nojittering) RES.ols.nojit <- stdres(ols.jobs.bg.nojittering) RES.abs.ols.nojit <- abs(RES.ols.nojit) CD.ols.jit <- cooks.distance(ols.jobs.bg.jittering) RES.ols.jit <- stdres(ols.jobs.bg.jittering) RES.abs.ols.jit <- abs(RES.ols.jit) diagnose <- cbind(merged_bg,CD.ols.nojit,RES.ols.nojit,RES.abs.ols.nojit,CD.ols.jit,RES.ols.jit,RES.abs.ols.jit) # find out the observations that have relatively large values of Cook's Distance (a conventional cut-off point is 4/n, where n is the number of observations) diagnose[CD.ols.nojit>4/nrow(diagnose),c("state","county","tract","blockgroup","sqftBldg","numjobs","CD.ols.nojit")] diagnose[CD.ols.jit>4/nrow(diagnose),c("state","county","tract","blockgroup","sqftBldg","numjobsjit","CD.ols.jit")] # find out ten observations with the highest absolute residual values diagnose.sorted.nojit <- with(diagnose, diagnose[order(-RES.abs.ols.nojit),]) diagnose.sorted.nojit[1:10,c("state","county","tract","blockgroup","sqftBldg","numjobs","RES.ols.nojit","RES.abs.ols.nojit")] diagnose.sorted.jit <- with(diagnose, diagnose[order(-RES.abs.ols.jit),]) diagnose.sorted.jit[1:10,c("state","county","tract","blockgroup","sqftBldg","numjobsjit","RES.ols.jit","RES.abs.ols.jit")] # robust regression analysis # a compromise between excluding outliers or leverages entirely from the analysis and # including all the data points and treating all them equally in OLS regression rls.jobs.bg.nojittering <- rlm(sqftBldg~numjobs, data=merged_bg) summary(rls.jobs.bg.nojittering) rls.jobs.bg.jittering <- rlm(sqftBldg~numjobsjit, data=merged_bg) summary(rls.jobs.bg.jittering) # ignore for now summary(dqi.jobs.blockgroup) summary(dqi.jobs.blockgroup)$r.squared ggplot(merge.blockgroup, aes(x=numJobs,y=sqftBldg))+ geom_point(shape=1)+ labs(title="Building SQFT vs. Number of Jobs in 2010 \n across census block groups")+ ggsave(file="dqi.jobs.blockgroup.png") merge.tract <- merge(sqftBldg.tract, numJobs.tract, by=c("state","county","tract")) head(merge.tract) dqi.jobs.tract <- lm(sqftBldg~numJobs, merge.tract) summary(dqi.jobs.tract) summary(dqi.jobs.tract)$r.squared ggplot(merge.tract, aes(x=numJobs,y=sqftBldg))+ geom_point(shape=1)+ labs(title="Building SQFT vs. Number of Jobs in 2010 \n across census tracts")+ ggsave(file="dqi.jobs.tract.png")
library(edgeR) library(xlsx) dir <- c("~/counts/ALS Mice/experimental/results/suppl/mge/") a <- grep("new", list.files(dir), value = TRUE) setwd(dir) length <- read.delim(file = "seqlength.txt", header = FALSE, sep = ",") gr_control <- c("control_3") gr_case <- c("tg_3") files_control <- grep(paste(gr_control),list.files(dir),value=TRUE) files_case <- grep(paste(gr_case),list.files(dir),value=TRUE) sampleFiles <- c(files_control, files_case) cond_control <- rep(paste(gr_control), length(files_control)) cond_case <- rep(paste(gr_case), length(files_case)) sampleCondition <- c(cond_control, cond_case) sampleTable<-data.frame(sampleName=sampleFiles, fileName=sampleFiles, condition=sampleCondition) y <- readDGE(files = sampleTable$sampleName, group = sampleTable$condition, labels = sampleTable$fileName) a <- DGEList(counts=y, group = sampleTable$condition) CountsTable <- as.data.frame(y$counts) cpm <- cpm(y) cpm <- as.data.frame(cpm(y)) cpm$rowsum <- rowSums(cpm) keep <- rowSums(cpm > 0.5) >= ncol(sampleTable) logCPM <- as.data.frame(cpm(y, log = TRUE, lib.size = colSums(counts) * normalized_lib_sizes)) logCPM <- logCPM[keep,] a <- a[keep,] a <- calcNormFactors(a, method = "TMM") design <- model.matrix(~sampleTable$condition) a <- estimateDisp(a,design) fit <- glmQLFit(a,design, robust = TRUE) qlf <- glmQLFTest(fit,coef=ncol(fit$design)) et_annot <- as.data.frame(topTags(qlf, n = nrow(logCPM), adjust.method = "BH")) top <- as.data.frame(topTags(qlf, n = 20)) et_annot <- as.data.frame(subset(et_annot, logCPM > 0)) et_annot <- as.data.frame(subset(et_annot, PValue < 0.05)) et_annot <- as.data.frame(subset(et_annot, FDR < 0.05)) et_annot <- as.data.frame(subset(et_annot, logFC > 0.5 | logFC < -0.5)) et_annot <- et_annot[complete.cases(et_annot), ] l <- length[(length$V1 %in% rownames(et_annot)),] et_annot$length <- l$V2 write.xlsx(et_annot, file = "RTs edgeR.xlsx", append = TRUE, sheetName = paste(gr_control, "_", gr_case))
/supplementary scripts/mix of various R scripts/rt_edger.R
no_license
aprezvykh/bioinformatics
R
false
false
1,980
r
library(edgeR) library(xlsx) dir <- c("~/counts/ALS Mice/experimental/results/suppl/mge/") a <- grep("new", list.files(dir), value = TRUE) setwd(dir) length <- read.delim(file = "seqlength.txt", header = FALSE, sep = ",") gr_control <- c("control_3") gr_case <- c("tg_3") files_control <- grep(paste(gr_control),list.files(dir),value=TRUE) files_case <- grep(paste(gr_case),list.files(dir),value=TRUE) sampleFiles <- c(files_control, files_case) cond_control <- rep(paste(gr_control), length(files_control)) cond_case <- rep(paste(gr_case), length(files_case)) sampleCondition <- c(cond_control, cond_case) sampleTable<-data.frame(sampleName=sampleFiles, fileName=sampleFiles, condition=sampleCondition) y <- readDGE(files = sampleTable$sampleName, group = sampleTable$condition, labels = sampleTable$fileName) a <- DGEList(counts=y, group = sampleTable$condition) CountsTable <- as.data.frame(y$counts) cpm <- cpm(y) cpm <- as.data.frame(cpm(y)) cpm$rowsum <- rowSums(cpm) keep <- rowSums(cpm > 0.5) >= ncol(sampleTable) logCPM <- as.data.frame(cpm(y, log = TRUE, lib.size = colSums(counts) * normalized_lib_sizes)) logCPM <- logCPM[keep,] a <- a[keep,] a <- calcNormFactors(a, method = "TMM") design <- model.matrix(~sampleTable$condition) a <- estimateDisp(a,design) fit <- glmQLFit(a,design, robust = TRUE) qlf <- glmQLFTest(fit,coef=ncol(fit$design)) et_annot <- as.data.frame(topTags(qlf, n = nrow(logCPM), adjust.method = "BH")) top <- as.data.frame(topTags(qlf, n = 20)) et_annot <- as.data.frame(subset(et_annot, logCPM > 0)) et_annot <- as.data.frame(subset(et_annot, PValue < 0.05)) et_annot <- as.data.frame(subset(et_annot, FDR < 0.05)) et_annot <- as.data.frame(subset(et_annot, logFC > 0.5 | logFC < -0.5)) et_annot <- et_annot[complete.cases(et_annot), ] l <- length[(length$V1 %in% rownames(et_annot)),] et_annot$length <- l$V2 write.xlsx(et_annot, file = "RTs edgeR.xlsx", append = TRUE, sheetName = paste(gr_control, "_", gr_case))
c DCNF-Autarky [version 0.0.1]. c Copyright (c) 2018-2019 Swansea University. c c Input Clause Count: 37901 c Performing E1-Autarky iteration. c Remaining clauses count after E-Reduction: 37900 c c Performing E1-Autarky iteration. c Remaining clauses count after E-Reduction: 37900 c c Input Parameter (command line, file): c input filename QBFLIB/Sauer-Reimer/ISCAS89/s38584_PR_7_10.qdimacs c output filename /tmp/dcnfAutarky.dimacs c autarky level 1 c conformity level 0 c encoding type 2 c no.of var 13338 c no.of clauses 37901 c no.of taut cls 0 c c Output Parameters: c remaining no.of clauses 37900 c c QBFLIB/Sauer-Reimer/ISCAS89/s38584_PR_7_10.qdimacs 13338 37901 E1 [1] 0 286 12923 37900 RED
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Sauer-Reimer/ISCAS89/s38584_PR_7_10/s38584_PR_7_10.R
no_license
arey0pushpa/dcnf-autarky
R
false
false
729
r
c DCNF-Autarky [version 0.0.1]. c Copyright (c) 2018-2019 Swansea University. c c Input Clause Count: 37901 c Performing E1-Autarky iteration. c Remaining clauses count after E-Reduction: 37900 c c Performing E1-Autarky iteration. c Remaining clauses count after E-Reduction: 37900 c c Input Parameter (command line, file): c input filename QBFLIB/Sauer-Reimer/ISCAS89/s38584_PR_7_10.qdimacs c output filename /tmp/dcnfAutarky.dimacs c autarky level 1 c conformity level 0 c encoding type 2 c no.of var 13338 c no.of clauses 37901 c no.of taut cls 0 c c Output Parameters: c remaining no.of clauses 37900 c c QBFLIB/Sauer-Reimer/ISCAS89/s38584_PR_7_10.qdimacs 13338 37901 E1 [1] 0 286 12923 37900 RED
library(phytools) library(phangorn) library(seqinr) #read filenames fof=read.table("../HairlessBSmodels/alnfof.txt", stringsAsFactors = F) fof=fof$V1 #location of alignments alndir="../HairlessBSmodels/mamm63nt.alns/trimallcds_wSpalax/" #master tree master=read.tree(text=readLines("../HairlessBSmodels/meredithplustree62sphg19rooted")) #desired directory for trimmed alignments finaldir="../HairlessBSmodels/trimmedalns/" #desired directory for trimmed trees treedir="../HairlessBSmodels/trimmedtrees/" #all species to include specstouse=c("dasNov3", "triMan1", "loxAfr3", "oryAfe1", "eleEdw1", "sorAra2", "conCri1", "pteAle1", "myoDav1", "equCab2", "cerSim1", "vicPac2", "susScr3", "turTru2", "orcOrc1", "oviAri3", "bosTau7", "felCat5", "musFur1", "odoRosDi", "lepWed1", "tupChi1", "oryCun2", "jacJac1", "mm10", "criGri1", "speTri2", "hetGla2", "cavPor3", "otoGar3", "calJac3", "rheMac3", "chlSab1", "panTro4", "hg19") #check for typos - these should give the same number # length(specstouse) # sum(specstouse %in% master$tip.label) ################################################################################ #make trimmed aligns and pruned trees for(f in fof){ #align curf=paste0(alndir, f, collapse="") curaln=read.fasta(curf, as.string = T) l=nchar(curaln[[1]][1])+100 curaln=curaln[names(curaln) %in% specstouse] outfile=paste0(finaldir, f) write.fasta(sequences = curaln, names = names(curaln),file.out = outfile, as.string = T, nbchar = l) #tree specsfortree=names(curaln) totrim=master$tip.label[which(!(master$tip.label %in% specsfortree))] newtree=master newtree=drop.tip(master, totrim) n=paste0(strsplit(f, split="[.]")[[1]][1], ".tre", collapse="") treeoutfile=paste0(treedir, n) write.tree(newtree, file=treeoutfile) }
/BSModelGuide/trimAlnsofinterest.R
no_license
nclark-lab/bsmodels-dropout
R
false
false
1,869
r
library(phytools) library(phangorn) library(seqinr) #read filenames fof=read.table("../HairlessBSmodels/alnfof.txt", stringsAsFactors = F) fof=fof$V1 #location of alignments alndir="../HairlessBSmodels/mamm63nt.alns/trimallcds_wSpalax/" #master tree master=read.tree(text=readLines("../HairlessBSmodels/meredithplustree62sphg19rooted")) #desired directory for trimmed alignments finaldir="../HairlessBSmodels/trimmedalns/" #desired directory for trimmed trees treedir="../HairlessBSmodels/trimmedtrees/" #all species to include specstouse=c("dasNov3", "triMan1", "loxAfr3", "oryAfe1", "eleEdw1", "sorAra2", "conCri1", "pteAle1", "myoDav1", "equCab2", "cerSim1", "vicPac2", "susScr3", "turTru2", "orcOrc1", "oviAri3", "bosTau7", "felCat5", "musFur1", "odoRosDi", "lepWed1", "tupChi1", "oryCun2", "jacJac1", "mm10", "criGri1", "speTri2", "hetGla2", "cavPor3", "otoGar3", "calJac3", "rheMac3", "chlSab1", "panTro4", "hg19") #check for typos - these should give the same number # length(specstouse) # sum(specstouse %in% master$tip.label) ################################################################################ #make trimmed aligns and pruned trees for(f in fof){ #align curf=paste0(alndir, f, collapse="") curaln=read.fasta(curf, as.string = T) l=nchar(curaln[[1]][1])+100 curaln=curaln[names(curaln) %in% specstouse] outfile=paste0(finaldir, f) write.fasta(sequences = curaln, names = names(curaln),file.out = outfile, as.string = T, nbchar = l) #tree specsfortree=names(curaln) totrim=master$tip.label[which(!(master$tip.label %in% specsfortree))] newtree=master newtree=drop.tip(master, totrim) n=paste0(strsplit(f, split="[.]")[[1]][1], ".tre", collapse="") treeoutfile=paste0(treedir, n) write.tree(newtree, file=treeoutfile) }
annc.model.prod <- function(x, compare=1, offset=TRUE, cl=0.95){ data <- x # we're going to need this later, annoyingly ad.new <- x$ad.data[ , c('site', 'year', 'totcaps', 'corrcaps') ] names(ad.new) <- c('site', 'year', 'adcaps', 'adexcaps') jv.new <- x$jv.data[ , c('site', 'year', 'totcaps', 'corrcaps') ] names(jv.new) <- c('site', 'year', 'jvcaps', 'jvexcaps') x <- merge(ad.new, jv.new) x$totcaps <- x$adcaps + x$jvcaps x$totexcaps <- x$adexcaps + x$jvexcaps x <- x[x$totcaps>0, ] # no birds caught so doesn't contribute to model fit if ( class(x$year) == "factor") x$year <- as.numeric(levels(x$year)) nyrs <- max(x$year) - min(x$year) + 1 if ( compare >= nyrs ) stop("Not enough years to compare with\n") ybreak <- max(x$year) - compare - 1 yearf <- ifelse ( x$year > ybreak, 1, x$year ) yearf[x$year==max(x$year)] <- max(x$year) x$yearf <- relevel(factor(yearf), 1) if (offset) { x <- calc.offset(x) } else { x$offset <- 0 } if( length(table(x$site)) > 1 ) x.lm <- glm(as.matrix(cbind(jvcaps,totcaps)) ~ as.factor(site) + as.factor(yearf) - 1, family=quasibinomial, offset=offset, data=x) else x.lm <- glm(as.matrix(cbind(jvcaps,totcaps)) ~ as.factor(yearf) - 1, family=quasibinomial, offset=offset, data=x) if( (compare+1) < nyrs ) yearf1 <- c(min(x$year):ybreak, rep(1,compare), max(x$year)) else yearf1 <- c(rep(1,(nyrs-1)), max(x$year)) newdata <- as.data.frame(cbind(yearf=yearf1, site=rep(min(x$site), nyrs))) x.pred <- predict(x.lm, newdata, se.fit=TRUE) x.pred$fit <- x.pred$fit - x.pred$fit[length(x.pred$fit)-1] years <- c(min(x$year):max(x$year)) res <- cbind(years, data.frame(cbind(parm=x.pred$fit,se=x.pred$se))) # necessary to stop factor conversion! res$index <- exp(res$parm) # NOTE: log back-transform rather than logistic!! gives no jv per ad # rather simply ppn jvs res$annual <- ann.model.prod(data)$parms$index res$annual <- res$annual / mean(res$annual[yearf1==1]) # match the estimates cl.int <- qnorm(1-((1-cl)/2)) res$lcl <- exp(res$parm - cl.int * res$se) res$ucl <- exp(res$parm + cl.int * res$se) parno <- length(coef(x.lm)) slope <- coef(x.lm)[parno] slope.se <- sqrt(diag(vcov(x.lm)))[parno] tval <- slope/slope.se tsig <- 2*pt(abs(tval), x.lm$rank, lower.tail=FALSE) list(model=x.lm, parms=res, test=list(type='constant',nyrs=compare,slope=slope,slope.se=slope.se,tval=tval,tsig=tsig)) }
/R/annc.model.prod.R
no_license
Sophie-Bennett/cesr
R
false
false
2,533
r
annc.model.prod <- function(x, compare=1, offset=TRUE, cl=0.95){ data <- x # we're going to need this later, annoyingly ad.new <- x$ad.data[ , c('site', 'year', 'totcaps', 'corrcaps') ] names(ad.new) <- c('site', 'year', 'adcaps', 'adexcaps') jv.new <- x$jv.data[ , c('site', 'year', 'totcaps', 'corrcaps') ] names(jv.new) <- c('site', 'year', 'jvcaps', 'jvexcaps') x <- merge(ad.new, jv.new) x$totcaps <- x$adcaps + x$jvcaps x$totexcaps <- x$adexcaps + x$jvexcaps x <- x[x$totcaps>0, ] # no birds caught so doesn't contribute to model fit if ( class(x$year) == "factor") x$year <- as.numeric(levels(x$year)) nyrs <- max(x$year) - min(x$year) + 1 if ( compare >= nyrs ) stop("Not enough years to compare with\n") ybreak <- max(x$year) - compare - 1 yearf <- ifelse ( x$year > ybreak, 1, x$year ) yearf[x$year==max(x$year)] <- max(x$year) x$yearf <- relevel(factor(yearf), 1) if (offset) { x <- calc.offset(x) } else { x$offset <- 0 } if( length(table(x$site)) > 1 ) x.lm <- glm(as.matrix(cbind(jvcaps,totcaps)) ~ as.factor(site) + as.factor(yearf) - 1, family=quasibinomial, offset=offset, data=x) else x.lm <- glm(as.matrix(cbind(jvcaps,totcaps)) ~ as.factor(yearf) - 1, family=quasibinomial, offset=offset, data=x) if( (compare+1) < nyrs ) yearf1 <- c(min(x$year):ybreak, rep(1,compare), max(x$year)) else yearf1 <- c(rep(1,(nyrs-1)), max(x$year)) newdata <- as.data.frame(cbind(yearf=yearf1, site=rep(min(x$site), nyrs))) x.pred <- predict(x.lm, newdata, se.fit=TRUE) x.pred$fit <- x.pred$fit - x.pred$fit[length(x.pred$fit)-1] years <- c(min(x$year):max(x$year)) res <- cbind(years, data.frame(cbind(parm=x.pred$fit,se=x.pred$se))) # necessary to stop factor conversion! res$index <- exp(res$parm) # NOTE: log back-transform rather than logistic!! gives no jv per ad # rather simply ppn jvs res$annual <- ann.model.prod(data)$parms$index res$annual <- res$annual / mean(res$annual[yearf1==1]) # match the estimates cl.int <- qnorm(1-((1-cl)/2)) res$lcl <- exp(res$parm - cl.int * res$se) res$ucl <- exp(res$parm + cl.int * res$se) parno <- length(coef(x.lm)) slope <- coef(x.lm)[parno] slope.se <- sqrt(diag(vcov(x.lm)))[parno] tval <- slope/slope.se tsig <- 2*pt(abs(tval), x.lm$rank, lower.tail=FALSE) list(model=x.lm, parms=res, test=list(type='constant',nyrs=compare,slope=slope,slope.se=slope.se,tval=tval,tsig=tsig)) }
options(stringsAsFactors=FALSE) #WICHTIG!! context("Testing Travelling Salesman Problem ") #create 4 Nodes # GeoSituation context("\tTest 01: is solution for a TSP calculated with TSP-Nearest-Neighbor Method correctly?") test_that("Test for TSP-Nearest-Neighbor Method", { geo<-new("GeoSituation") #example taken from data(bordersgermany) #chemnitz, cottbus, dresden, erfurt, gera, halle leipzig A<- c(10, 11, 14, 17, 24, 22, 35) for(a in A){ df <- cities[a, ] geo<-add(geo,new("Node", id=df$id, label = df$label, x=df$x, y=df$y)) } t.costs <- 1 s.node <- 3 # index of dresden in geo$nodes geo$tsp<- TSP.NearestNeighbor(geo, nodes =geo$nodes, StartNode = s.node) expect_true(round(geo$tsp$F) == round(631.8579)) expect_true(geo$tsp$x[1,6] == 1) expect_true(geo$tsp$x[2,3] == 1) expect_true(geo$tsp$x[4,2] == 1) } ) context("\tTest 02 TSP-Nearest-Neighbor Method + 2opt") test_that("Test for TSP-Nearest-Neighbor Method + 2opt", { data(bordersgermany) geo<-new("GeoSituation") #example taken from #chemnitz, cottbus, dresden, erfurt, gera, halle leipzig A<- c(10, 11, 14, 17, 24, 22, 35) for(a in A){ df <- cities[a, ] geo<-add(geo,new("Node", id=df$id, label = df$label, x=df$x, y=df$y)) } t.costs <- 1 s.node <- 3 # index of dresden in geo$nodes geo$tsp<- TSP.NearestNeighbor(geo, nodes =geo$nodes, StartNode = s.node) expect_true(round(geo$tsp$F) == round(631.8579)) expect_true(geo$tsp$x[1,6] == 1) expect_true(geo$tsp$x[2,3] == 1) expect_true(geo$tsp$x[4,2] == 1) geo$tsp<-TSP.2OPT(geo, tsp=geo$tsp) expect_true(round(geo$tsp$F) == round(540.7254)) expect_true(geo$tsp$x[1,6] == 1) expect_true(geo$tsp$x[2,3] == 1) expect_true(geo$tsp$x[4,2] == 0) } ) context("\tTest 02 TSP-Nearest-Neighbor Method + 3opt ?") test_that("Test for TSP-Nearest-Neighbor Method + 3opt", { geo<-new("GeoSituation") #example taken from data(bordersgermany) #chemnitz, cottbus, dresden, erfurt, gera, halle leipzig A<- c(10, 11, 14, 17, 24, 22, 35) for(a in A){ df <- cities[a, ] geo<-add(geo,new("Node", id=df$id, label = df$label, x=df$x, y=df$y)) } t.costs <- 1 s.node <- 3 # index of dresden in geo$nodes geo$tsp<- TSP.NearestNeighbor(geo, nodes =geo$nodes, StartNode = s.node) expect_true(round(geo$tsp$F) == round(631.8579)) expect_true(geo$tsp$x[1,6] == 1) expect_true(geo$tsp$x[2,3] == 1) expect_true(geo$tsp$x[4,2] == 1) geo$tsp<-TSP.3OPT(geo, tsp=geo$tsp) expect_true(round(geo$tsp$F) == round(540.7254)) expect_true(geo$tsp$x[1,6] == 1) expect_true(geo$tsp$x[2,3] == 1) expect_true(geo$tsp$x[4,2] == 0) } )
/inst/tests/testTSP.R
permissive
felixlindemann/HNUORTools
R
false
false
2,736
r
options(stringsAsFactors=FALSE) #WICHTIG!! context("Testing Travelling Salesman Problem ") #create 4 Nodes # GeoSituation context("\tTest 01: is solution for a TSP calculated with TSP-Nearest-Neighbor Method correctly?") test_that("Test for TSP-Nearest-Neighbor Method", { geo<-new("GeoSituation") #example taken from data(bordersgermany) #chemnitz, cottbus, dresden, erfurt, gera, halle leipzig A<- c(10, 11, 14, 17, 24, 22, 35) for(a in A){ df <- cities[a, ] geo<-add(geo,new("Node", id=df$id, label = df$label, x=df$x, y=df$y)) } t.costs <- 1 s.node <- 3 # index of dresden in geo$nodes geo$tsp<- TSP.NearestNeighbor(geo, nodes =geo$nodes, StartNode = s.node) expect_true(round(geo$tsp$F) == round(631.8579)) expect_true(geo$tsp$x[1,6] == 1) expect_true(geo$tsp$x[2,3] == 1) expect_true(geo$tsp$x[4,2] == 1) } ) context("\tTest 02 TSP-Nearest-Neighbor Method + 2opt") test_that("Test for TSP-Nearest-Neighbor Method + 2opt", { data(bordersgermany) geo<-new("GeoSituation") #example taken from #chemnitz, cottbus, dresden, erfurt, gera, halle leipzig A<- c(10, 11, 14, 17, 24, 22, 35) for(a in A){ df <- cities[a, ] geo<-add(geo,new("Node", id=df$id, label = df$label, x=df$x, y=df$y)) } t.costs <- 1 s.node <- 3 # index of dresden in geo$nodes geo$tsp<- TSP.NearestNeighbor(geo, nodes =geo$nodes, StartNode = s.node) expect_true(round(geo$tsp$F) == round(631.8579)) expect_true(geo$tsp$x[1,6] == 1) expect_true(geo$tsp$x[2,3] == 1) expect_true(geo$tsp$x[4,2] == 1) geo$tsp<-TSP.2OPT(geo, tsp=geo$tsp) expect_true(round(geo$tsp$F) == round(540.7254)) expect_true(geo$tsp$x[1,6] == 1) expect_true(geo$tsp$x[2,3] == 1) expect_true(geo$tsp$x[4,2] == 0) } ) context("\tTest 02 TSP-Nearest-Neighbor Method + 3opt ?") test_that("Test for TSP-Nearest-Neighbor Method + 3opt", { geo<-new("GeoSituation") #example taken from data(bordersgermany) #chemnitz, cottbus, dresden, erfurt, gera, halle leipzig A<- c(10, 11, 14, 17, 24, 22, 35) for(a in A){ df <- cities[a, ] geo<-add(geo,new("Node", id=df$id, label = df$label, x=df$x, y=df$y)) } t.costs <- 1 s.node <- 3 # index of dresden in geo$nodes geo$tsp<- TSP.NearestNeighbor(geo, nodes =geo$nodes, StartNode = s.node) expect_true(round(geo$tsp$F) == round(631.8579)) expect_true(geo$tsp$x[1,6] == 1) expect_true(geo$tsp$x[2,3] == 1) expect_true(geo$tsp$x[4,2] == 1) geo$tsp<-TSP.3OPT(geo, tsp=geo$tsp) expect_true(round(geo$tsp$F) == round(540.7254)) expect_true(geo$tsp$x[1,6] == 1) expect_true(geo$tsp$x[2,3] == 1) expect_true(geo$tsp$x[4,2] == 0) } )
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/get_biogrid.R \name{get_biogrid} \alias{get_biogrid} \title{Downloading and filtering BIOGRID} \usage{ get_biogrid(species = "9606", version = "3.5.181", interactions = "physical") } \arguments{ \item{species}{numeric taxon of species} \item{version}{string of biogrid version} \item{interactions}{string stating either physical or genetic interactions} } \value{ biogrid data.frame with interactions } \description{ The function downloads the specifed version of biogrid for a particular taxon } \keyword{biogrid} \keyword{download}
/man/get_biogrid.Rd
no_license
sarbal/EGAD
R
false
true
614
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/get_biogrid.R \name{get_biogrid} \alias{get_biogrid} \title{Downloading and filtering BIOGRID} \usage{ get_biogrid(species = "9606", version = "3.5.181", interactions = "physical") } \arguments{ \item{species}{numeric taxon of species} \item{version}{string of biogrid version} \item{interactions}{string stating either physical or genetic interactions} } \value{ biogrid data.frame with interactions } \description{ The function downloads the specifed version of biogrid for a particular taxon } \keyword{biogrid} \keyword{download}
#turns vector columns in string "(x, y, z)" into three columns(Position.x, Position.y, Position.z) and returns the table vector3_to_columns = function(tab, column_name){ xyz = c("x", "y", "z") splitted = strsplit(substring(tab[, get(column_name)], 2, nchar(tab[, get(column_name)]) - 1), ",") #turns the Vector3 into lists of 3 values i = 1 for (letter in xyz){ new_name = paste(column_name, letter, sep = ".") tab[, (new_name) := as.numeric(sapply(splitted, "[", i))] i = i + 1 } return(tab) }
/R/Preprocessing/Helpers/vector3-to-columns.R
no_license
hejtmy/iEEG-unity
R
false
false
520
r
#turns vector columns in string "(x, y, z)" into three columns(Position.x, Position.y, Position.z) and returns the table vector3_to_columns = function(tab, column_name){ xyz = c("x", "y", "z") splitted = strsplit(substring(tab[, get(column_name)], 2, nchar(tab[, get(column_name)]) - 1), ",") #turns the Vector3 into lists of 3 values i = 1 for (letter in xyz){ new_name = paste(column_name, letter, sep = ".") tab[, (new_name) := as.numeric(sapply(splitted, "[", i))] i = i + 1 } return(tab) }
#-------------------------------------------------------------------# # Percent Bare Ground ~ Bee Species Richness # #-------------------------------------------------------------------# #Research Question: How does the amount of bare ground present within the strips influence bee species richness? #Objectives: #Create model(s) to explore relationship between bare ground availability and bee species richness #Use created model(s) to visualize the relationship graphically #Start #### #Clear environment and set working directory rm(list=ls()) setwd("~/ISU/Project/Data") #Load libraries library(lubridate) library(ggplot2) library(ggpmisc) library(dplyr) library(lme4) library(lmerTest) library(MuMIn) #Read in data Quadrats <- read.csv("Plants/Quadrats.csv") Bees <- read.csv("Bees/Bee IDs.csv") #Use lubridate to allow R to read the dates Quadrats$Date <- mdy(Quadrats$Date) Quadrats$Year <- year(Quadrats$Date) Bees$Date <- mdy(Bees$Date) Bees$Year <- year(Bees$Date) #Set BareGround column to numeric (must change to character first though) Quadrats$BareGround <- as.numeric(as.character(Quadrats$BareGround)) #Calculate total bare ground bareground <- Quadrats %>% filter(!is.na(BareGround)) %>% group_by(Date, Site, Quadrat) %>% summarise(total.bareground = BareGround[1]) #Calculate average bare ground cover for each site and date avg.bareground <- bareground %>% group_by(Date, Site) %>% summarise(avg.bareground = mean(total.bareground), number.quadrats = length(total.bareground)) #Two entries have only 9 quadrats included in calculation due to absences in the original 2014 data set #Calculate number of bees collected via all traps bee.spp <- Bees %>% group_by(Site, Date) %>% filter(!is.na(Date)) %>% filter(Family != "Wasp") %>% filter(Binomial != "Wasp") %>% summarise(no.beespp = n_distinct(Binomial)) #Join the two datasets together bareground.beespp <- full_join(bee.spp, avg.bareground, by = c("Date", "Site")) #Remove dates from bareground.bees without bare ground data bareground.bees <- bareground.beespp %>% na.omit(avg.bareground) #Fill NAs in bareground.bees with 0 (no bees were collected on these days) bareground.beespp$no.beespp[is.na(bareground.beespp$no.beespp)] <- 0 #Create Year column bareground.beespp$Year <- year(bareground.beespp$Date) #Years 1-2 #### #-------------------------------------------------------------------# # Years 1-2 # #-------------------------------------------------------------------# #Subset bareground.etbees to include only 2014-2015 bareground.beespp12 <- bareground.beespp %>% filter(Year < 2016) #Model for bee abundance predicted by bare ground BGonBS12model <- glmer(no.beespp ~ avg.bareground + (1|Site) + (1|Year), family = poisson, data = bareground.beespp12) summary(BGonBS12model) AIC(BGonBS12model) #Use MuMIn to get R-squared value of full model r.squaredGLMM(BGonBS12model) #Change "Year" column to factor. bareground.beespp12$Year <- as.factor(bareground.beespp12$Year) #Find coefficients of model coef(summary(BGonBS12model)) #Morgan's plot: Percent Bare Ground vs. Bee Abundance plot using ggplot2 BGonBS12plot <- ggplot(bareground.beespp12, aes(x = avg.bareground, y = no.beespp)) + geom_point(aes(shape = Year, color = Year), size = 3) + geom_abline(intercept = coef(summary(BGonBS12model))[ , "Estimate"][1], slope = coef(summary(BGonBS12model))[ , "Estimate"][2]) + scale_color_manual(labels = c("2014", "2015"), values = c("#FFB90F", "#000000")) + scale_shape_manual(labels = c("2014", "2015"), values = c(15, 16)) + theme_bw() + labs(x = "Bare Ground (%)", y = "Bee Species Richness") + ggtitle("2014-2015\nInfluence of Bare Ground on \nBee Species Richness") + theme(plot.title = element_text(size = 15, face = "bold", hjust = 0.5)) + theme(legend.text = element_text(size = 10)) + theme(legend.title.align = 0.5) BGonBS12plot #Year 3 #### #-------------------------------------------------------------------# # Year 3 # #-------------------------------------------------------------------# #Subset bareground.etbees to include only 2016 bareground.beespp3 <- bareground.beespp %>% filter(Year == 2016) #Model for bee abundance predicted by bare ground BGonBS3model <- glmer(no.beespp ~ avg.bareground + (1|Site), family = poisson, data = bareground.beespp3) summary(BGonBS3model) AIC(BGonBS3model) #Use MuMIn to get R-squared value of full model r.squaredGLMM(BGonBS12model) #Change "Year" column to factor. bareground.beespp3$Year <- as.factor(bareground.beespp3$Year) #Find coefficients of model coef(summary(BGonBS3model)) #Morgan's plot: Percent Bare Ground vs. Bee Abundance plot using ggplot2 BGonBS3plot <- ggplot(bareground.beespp3, aes(x = avg.bareground, y = no.beespp)) + geom_point(aes(shape = Year, color = Year), size = 3) + geom_abline(intercept = coef(summary(BGonBS3model))[ , "Estimate"][1], slope = coef(summary(BGonBS3model))[ , "Estimate"][2]) + scale_color_manual(labels = c("2016"), values = c("red3")) + scale_shape_manual(labels = c("2016"), values = c(17)) + theme_bw() + labs(x = "Bare Ground (%)", y = "Bee Species Richness") + ggtitle("2016\nInfluence of Bare Ground on \nBee Species Richness") + theme(plot.title = element_text(size = 15, face = "bold", hjust = 0.5)) + theme(legend.text = element_text(size = 10)) + theme(legend.title.align = 0.5) BGonBS3plot #Years 4-5 #### #-------------------------------------------------------------------# # Years 4-5 # #-------------------------------------------------------------------# #Subset bareground.etbees to include only years 2017-2018 bareground.beespp45 <- bareground.beespp %>% filter(Year > 2016) #Model for bee abundance predicted by bare ground including Year and Site as fixed effects. BGonBS45model <- glmer(no.beespp ~ avg.bareground + (1|Site) + (1|Year), family = poisson, data = bareground.beespp45) summary(BGonBS45model) AIC(BGonBS45model) #Use MuMIn to get R-squared value of full model r.squaredGLMM(BGonBS45model) #Change "Year" column to factor. bareground.beespp45$Year <- as.factor(bareground.beespp45$Year) #Find coefficients of model coef(summary(BGonBS45model)) #Morgan's plot: Percent Bare Ground vs. Bee Abundance plot using ggplot2 BGonBS45plot <- ggplot(bareground.beespp45, aes(x = avg.bareground, y = no.beespp)) + geom_point(aes(shape = Year, color = Year), size = 3) + geom_abline(intercept = coef(summary(BGonBS45model))[ , "Estimate"][1], slope = coef(summary(BGonBS45model))[ , "Estimate"][2]) + scale_color_manual(labels = c("2017", "2018"), values = c("palegreen4", "orchid2")) + scale_shape_manual(labels = c("2017", "2018"), values = c(18, 8)) + theme_bw() + labs(x = "Bare Ground (%)", y = "Bee Species Richness") + ggtitle("2017-2018\nInfluence of Bare Ground on \nBee Species Richness") + theme(plot.title = element_text(size = 15, face = "bold", hjust = 0.5)) + theme(legend.text = element_text(size = 10)) + theme(legend.title.align = 0.5) BGonBS45plot #Years 1-5 #### #-------------------------------------------------------------------# # Years 1-5 # #-------------------------------------------------------------------# #Model for bee abundance predicted by bare ground including Year and Site as fixed effects. BGonBSmodel <- lmer(no.beespp ~ avg.bareground + (1|Site) + (1|Year), data = bareground.beespp) summary(BGonBSmodel) AIC(BGonBSmodel) #Use MuMIn to get R-squared value of full model r.squaredGLMM(BGonBSmodel) #Change "Year" column to factor. bareground.beespp$Year <- as.factor(bareground.beespp$Year) #Find coefficients of model for graph coef(summary(BGonBSmodel)) #Morgan's plot: Percent Bare Ground vs. Bee Abundance plot using ggplot2 BGonBS12345plot <- ggplot(bareground.beespp, aes(x = avg.bareground, y = no.beespp)) + geom_point(aes(shape = Year, color = Year), size = 3) + geom_abline(intercept = coef(summary(BGonBSmodel))[ , "Estimate"][1], slope = coef(summary(BGonBSmodel))[ , "Estimate"][2]) + scale_color_manual(labels = c("2014", "2015", "2016", "2017", "2018"), values = c("#FFB90F", "#000000", "red3", "palegreen4", "orchid2")) + scale_shape_manual(labels = c("2014", "2015", "2016", "2017", "2018"), values = c(15, 16, 17, 18, 8)) + theme_bw() + labs(x = "Bare Ground (%)", y = "Bee Species Richness") + ggtitle("2014-2018\nInfluence of Bare Ground on \nBee Species Richness") + theme(plot.title = element_text(size = 15, face = "bold", hjust = 0.5)) + theme(legend.text = element_text(size = 10)) + theme(legend.title.align = 0.5) BGonBS12345plot #Multipanel graph showing plots for 2014-2015, 2016, 2017-2018, and 2014-2018 BGonBS12plot.grid <- BGonBS12plot + expand_limits(y = c(0, 32)) + expand_limits(x = c(0, 70)) + geom_text(x = 60, y = 30, label = "y = 0.004x + 2.313", size = 4) + geom_text(x = 55, y = 29.2, label = "p = 0.365", size = 4) + theme(legend.position = "none") BGonBS3plot.grid <- BGonBS3plot + expand_limits(y = c(0, 32)) + expand_limits(x = c(0, 70)) + labs(y = "") + geom_text(x = 60, y = 30, label = "y = -0.021x + 2.447", size = 4) + geom_text(x = 55, y = 29.2, label = "p = 0.061", size = 4) + theme(legend.position = "none") BGonBS45plot.grid <- BGonBS45plot + expand_limits(y = c(0, 32)) + expand_limits(x = c(0, 70)) + labs(y = "") + geom_text(x = 60, y = 30, label = "y = 0.005x + 2.516", size = 4) + geom_text(x = 55, y = 29.2, label = "p = 0.209", size = 4) + theme(legend.position = "none") BGonBS12345plot.grid <- BGonBS12345plot + expand_limits(y = c(0, 32)) + expand_limits(x = c(0, 70)) + labs(y = "") + geom_text(x = 60, y = 30, label = "y = -0.005x + 13.414", size = 4) + geom_text(x = 55, y = 29.2, label = "p = 0.890", size = 4) grid.arrange(BGonBS12plot.grid, BGonBS3plot.grid, BGonBS45plot.grid, BGonBS12345plot.grid, ncol = 4)
/AnalysisScripts/BareGround~BeeSpecies.R
no_license
morganmackert/mmackert
R
false
false
11,297
r
#-------------------------------------------------------------------# # Percent Bare Ground ~ Bee Species Richness # #-------------------------------------------------------------------# #Research Question: How does the amount of bare ground present within the strips influence bee species richness? #Objectives: #Create model(s) to explore relationship between bare ground availability and bee species richness #Use created model(s) to visualize the relationship graphically #Start #### #Clear environment and set working directory rm(list=ls()) setwd("~/ISU/Project/Data") #Load libraries library(lubridate) library(ggplot2) library(ggpmisc) library(dplyr) library(lme4) library(lmerTest) library(MuMIn) #Read in data Quadrats <- read.csv("Plants/Quadrats.csv") Bees <- read.csv("Bees/Bee IDs.csv") #Use lubridate to allow R to read the dates Quadrats$Date <- mdy(Quadrats$Date) Quadrats$Year <- year(Quadrats$Date) Bees$Date <- mdy(Bees$Date) Bees$Year <- year(Bees$Date) #Set BareGround column to numeric (must change to character first though) Quadrats$BareGround <- as.numeric(as.character(Quadrats$BareGround)) #Calculate total bare ground bareground <- Quadrats %>% filter(!is.na(BareGround)) %>% group_by(Date, Site, Quadrat) %>% summarise(total.bareground = BareGround[1]) #Calculate average bare ground cover for each site and date avg.bareground <- bareground %>% group_by(Date, Site) %>% summarise(avg.bareground = mean(total.bareground), number.quadrats = length(total.bareground)) #Two entries have only 9 quadrats included in calculation due to absences in the original 2014 data set #Calculate number of bees collected via all traps bee.spp <- Bees %>% group_by(Site, Date) %>% filter(!is.na(Date)) %>% filter(Family != "Wasp") %>% filter(Binomial != "Wasp") %>% summarise(no.beespp = n_distinct(Binomial)) #Join the two datasets together bareground.beespp <- full_join(bee.spp, avg.bareground, by = c("Date", "Site")) #Remove dates from bareground.bees without bare ground data bareground.bees <- bareground.beespp %>% na.omit(avg.bareground) #Fill NAs in bareground.bees with 0 (no bees were collected on these days) bareground.beespp$no.beespp[is.na(bareground.beespp$no.beespp)] <- 0 #Create Year column bareground.beespp$Year <- year(bareground.beespp$Date) #Years 1-2 #### #-------------------------------------------------------------------# # Years 1-2 # #-------------------------------------------------------------------# #Subset bareground.etbees to include only 2014-2015 bareground.beespp12 <- bareground.beespp %>% filter(Year < 2016) #Model for bee abundance predicted by bare ground BGonBS12model <- glmer(no.beespp ~ avg.bareground + (1|Site) + (1|Year), family = poisson, data = bareground.beespp12) summary(BGonBS12model) AIC(BGonBS12model) #Use MuMIn to get R-squared value of full model r.squaredGLMM(BGonBS12model) #Change "Year" column to factor. bareground.beespp12$Year <- as.factor(bareground.beespp12$Year) #Find coefficients of model coef(summary(BGonBS12model)) #Morgan's plot: Percent Bare Ground vs. Bee Abundance plot using ggplot2 BGonBS12plot <- ggplot(bareground.beespp12, aes(x = avg.bareground, y = no.beespp)) + geom_point(aes(shape = Year, color = Year), size = 3) + geom_abline(intercept = coef(summary(BGonBS12model))[ , "Estimate"][1], slope = coef(summary(BGonBS12model))[ , "Estimate"][2]) + scale_color_manual(labels = c("2014", "2015"), values = c("#FFB90F", "#000000")) + scale_shape_manual(labels = c("2014", "2015"), values = c(15, 16)) + theme_bw() + labs(x = "Bare Ground (%)", y = "Bee Species Richness") + ggtitle("2014-2015\nInfluence of Bare Ground on \nBee Species Richness") + theme(plot.title = element_text(size = 15, face = "bold", hjust = 0.5)) + theme(legend.text = element_text(size = 10)) + theme(legend.title.align = 0.5) BGonBS12plot #Year 3 #### #-------------------------------------------------------------------# # Year 3 # #-------------------------------------------------------------------# #Subset bareground.etbees to include only 2016 bareground.beespp3 <- bareground.beespp %>% filter(Year == 2016) #Model for bee abundance predicted by bare ground BGonBS3model <- glmer(no.beespp ~ avg.bareground + (1|Site), family = poisson, data = bareground.beespp3) summary(BGonBS3model) AIC(BGonBS3model) #Use MuMIn to get R-squared value of full model r.squaredGLMM(BGonBS12model) #Change "Year" column to factor. bareground.beespp3$Year <- as.factor(bareground.beespp3$Year) #Find coefficients of model coef(summary(BGonBS3model)) #Morgan's plot: Percent Bare Ground vs. Bee Abundance plot using ggplot2 BGonBS3plot <- ggplot(bareground.beespp3, aes(x = avg.bareground, y = no.beespp)) + geom_point(aes(shape = Year, color = Year), size = 3) + geom_abline(intercept = coef(summary(BGonBS3model))[ , "Estimate"][1], slope = coef(summary(BGonBS3model))[ , "Estimate"][2]) + scale_color_manual(labels = c("2016"), values = c("red3")) + scale_shape_manual(labels = c("2016"), values = c(17)) + theme_bw() + labs(x = "Bare Ground (%)", y = "Bee Species Richness") + ggtitle("2016\nInfluence of Bare Ground on \nBee Species Richness") + theme(plot.title = element_text(size = 15, face = "bold", hjust = 0.5)) + theme(legend.text = element_text(size = 10)) + theme(legend.title.align = 0.5) BGonBS3plot #Years 4-5 #### #-------------------------------------------------------------------# # Years 4-5 # #-------------------------------------------------------------------# #Subset bareground.etbees to include only years 2017-2018 bareground.beespp45 <- bareground.beespp %>% filter(Year > 2016) #Model for bee abundance predicted by bare ground including Year and Site as fixed effects. BGonBS45model <- glmer(no.beespp ~ avg.bareground + (1|Site) + (1|Year), family = poisson, data = bareground.beespp45) summary(BGonBS45model) AIC(BGonBS45model) #Use MuMIn to get R-squared value of full model r.squaredGLMM(BGonBS45model) #Change "Year" column to factor. bareground.beespp45$Year <- as.factor(bareground.beespp45$Year) #Find coefficients of model coef(summary(BGonBS45model)) #Morgan's plot: Percent Bare Ground vs. Bee Abundance plot using ggplot2 BGonBS45plot <- ggplot(bareground.beespp45, aes(x = avg.bareground, y = no.beespp)) + geom_point(aes(shape = Year, color = Year), size = 3) + geom_abline(intercept = coef(summary(BGonBS45model))[ , "Estimate"][1], slope = coef(summary(BGonBS45model))[ , "Estimate"][2]) + scale_color_manual(labels = c("2017", "2018"), values = c("palegreen4", "orchid2")) + scale_shape_manual(labels = c("2017", "2018"), values = c(18, 8)) + theme_bw() + labs(x = "Bare Ground (%)", y = "Bee Species Richness") + ggtitle("2017-2018\nInfluence of Bare Ground on \nBee Species Richness") + theme(plot.title = element_text(size = 15, face = "bold", hjust = 0.5)) + theme(legend.text = element_text(size = 10)) + theme(legend.title.align = 0.5) BGonBS45plot #Years 1-5 #### #-------------------------------------------------------------------# # Years 1-5 # #-------------------------------------------------------------------# #Model for bee abundance predicted by bare ground including Year and Site as fixed effects. BGonBSmodel <- lmer(no.beespp ~ avg.bareground + (1|Site) + (1|Year), data = bareground.beespp) summary(BGonBSmodel) AIC(BGonBSmodel) #Use MuMIn to get R-squared value of full model r.squaredGLMM(BGonBSmodel) #Change "Year" column to factor. bareground.beespp$Year <- as.factor(bareground.beespp$Year) #Find coefficients of model for graph coef(summary(BGonBSmodel)) #Morgan's plot: Percent Bare Ground vs. Bee Abundance plot using ggplot2 BGonBS12345plot <- ggplot(bareground.beespp, aes(x = avg.bareground, y = no.beespp)) + geom_point(aes(shape = Year, color = Year), size = 3) + geom_abline(intercept = coef(summary(BGonBSmodel))[ , "Estimate"][1], slope = coef(summary(BGonBSmodel))[ , "Estimate"][2]) + scale_color_manual(labels = c("2014", "2015", "2016", "2017", "2018"), values = c("#FFB90F", "#000000", "red3", "palegreen4", "orchid2")) + scale_shape_manual(labels = c("2014", "2015", "2016", "2017", "2018"), values = c(15, 16, 17, 18, 8)) + theme_bw() + labs(x = "Bare Ground (%)", y = "Bee Species Richness") + ggtitle("2014-2018\nInfluence of Bare Ground on \nBee Species Richness") + theme(plot.title = element_text(size = 15, face = "bold", hjust = 0.5)) + theme(legend.text = element_text(size = 10)) + theme(legend.title.align = 0.5) BGonBS12345plot #Multipanel graph showing plots for 2014-2015, 2016, 2017-2018, and 2014-2018 BGonBS12plot.grid <- BGonBS12plot + expand_limits(y = c(0, 32)) + expand_limits(x = c(0, 70)) + geom_text(x = 60, y = 30, label = "y = 0.004x + 2.313", size = 4) + geom_text(x = 55, y = 29.2, label = "p = 0.365", size = 4) + theme(legend.position = "none") BGonBS3plot.grid <- BGonBS3plot + expand_limits(y = c(0, 32)) + expand_limits(x = c(0, 70)) + labs(y = "") + geom_text(x = 60, y = 30, label = "y = -0.021x + 2.447", size = 4) + geom_text(x = 55, y = 29.2, label = "p = 0.061", size = 4) + theme(legend.position = "none") BGonBS45plot.grid <- BGonBS45plot + expand_limits(y = c(0, 32)) + expand_limits(x = c(0, 70)) + labs(y = "") + geom_text(x = 60, y = 30, label = "y = 0.005x + 2.516", size = 4) + geom_text(x = 55, y = 29.2, label = "p = 0.209", size = 4) + theme(legend.position = "none") BGonBS12345plot.grid <- BGonBS12345plot + expand_limits(y = c(0, 32)) + expand_limits(x = c(0, 70)) + labs(y = "") + geom_text(x = 60, y = 30, label = "y = -0.005x + 13.414", size = 4) + geom_text(x = 55, y = 29.2, label = "p = 0.890", size = 4) grid.arrange(BGonBS12plot.grid, BGonBS3plot.grid, BGonBS45plot.grid, BGonBS12345plot.grid, ncol = 4)
\name{ss.1way} \alias{ss.1way} \title{ Sample size calculation for balanced one-way ANOVA models } \description{ Calculate sample size for one-way ANOVA models. } \usage{ ss.1way(k=k, alpha=alpha, beta=beta, f=NULL, delta=delta, sigma=sigma, B=B) } \arguments{ \item{k}{ Number of groups } \item{alpha}{ Significant level (Type I error probability) } \item{beta}{ Type II error probability (Power=1-beta) } \item{f}{ Effect size } \item{delta}{ The smallest difference among k group } \item{sigma}{ Standard deviation, i.e. square root of variance } \item{B}{ Iteration times, default number is 100 } } \details{ Beta is the type II error probability which equals 1-power. For example, if the target power is 85\% (=0.85), the corresponding beta equals 0.15. If effect size f is known, plug it in to the function; If delta and sigma are known instead of effect size, put NULL to f, or just miss f argument. } \value{ Object of class "power.htest", a list of the arguments (including the computed one) augmented with "method" and "note" elements. } \author{ Pengcheng Lu, Junhao Liu, and Devin Koestler. } \references{ Angela Dean & Daniel Voss (1999). Design and Analysis of Experiments. Springer. } \examples{ ## Example 1 ss.1way(k=5, alpha=0.05, beta=0.1, f=1.5, B=100) ## Example 2 ss.1way(k=5, alpha=0.05, beta=0.1, delta=1.5, sigma=1, B=100) ss.1way(k=5, alpha=0.05, beta=0.1, f=NULL, delta=1.5, sigma=1, B=100) } \keyword{One-Way ANOVA} \keyword{Sample Size}
/man/ss.1way.Rd
no_license
cran/pwr2
R
false
false
1,555
rd
\name{ss.1way} \alias{ss.1way} \title{ Sample size calculation for balanced one-way ANOVA models } \description{ Calculate sample size for one-way ANOVA models. } \usage{ ss.1way(k=k, alpha=alpha, beta=beta, f=NULL, delta=delta, sigma=sigma, B=B) } \arguments{ \item{k}{ Number of groups } \item{alpha}{ Significant level (Type I error probability) } \item{beta}{ Type II error probability (Power=1-beta) } \item{f}{ Effect size } \item{delta}{ The smallest difference among k group } \item{sigma}{ Standard deviation, i.e. square root of variance } \item{B}{ Iteration times, default number is 100 } } \details{ Beta is the type II error probability which equals 1-power. For example, if the target power is 85\% (=0.85), the corresponding beta equals 0.15. If effect size f is known, plug it in to the function; If delta and sigma are known instead of effect size, put NULL to f, or just miss f argument. } \value{ Object of class "power.htest", a list of the arguments (including the computed one) augmented with "method" and "note" elements. } \author{ Pengcheng Lu, Junhao Liu, and Devin Koestler. } \references{ Angela Dean & Daniel Voss (1999). Design and Analysis of Experiments. Springer. } \examples{ ## Example 1 ss.1way(k=5, alpha=0.05, beta=0.1, f=1.5, B=100) ## Example 2 ss.1way(k=5, alpha=0.05, beta=0.1, delta=1.5, sigma=1, B=100) ss.1way(k=5, alpha=0.05, beta=0.1, f=NULL, delta=1.5, sigma=1, B=100) } \keyword{One-Way ANOVA} \keyword{Sample Size}
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/311.Expec_Leng_ADJ_All.R \name{lengthAAS} \alias{lengthAAS} \title{Expected length and sum of length of Adjusted ArcSine method} \usage{ lengthAAS(n, alp, h, a, b) } \arguments{ \item{n}{- Number of trials} \item{alp}{- Alpha value (significance level required)} \item{h}{- Adding factor} \item{a}{- Beta parameters for hypo "p"} \item{b}{- Beta parameters for hypo "p"} } \value{ A dataframe with \item{sumLen}{ The sum of the expected length} \item{explMean}{ The mean of the expected length} \item{explSD}{ The Standard Deviation of the expected length} \item{explMax}{ The max of the expected length} \item{explLL}{ The Lower limit of the expected length calculated using mean - SD} \item{explUL}{ The Upper limit of the expected length calculated using mean + SD} } \description{ Expected length and sum of length of Adjusted ArcSine method } \details{ Evaluation of adjusted Wald-type interval for the arcsine transformation of the parameter p using sum of length of the \eqn{n + 1} intervals } \examples{ n= 10; alp=0.05; h=2;a=1;b=1; lengthAAS(n,alp,h,a,b) } \references{ [1] 1998 Agresti A and Coull BA. Approximate is better than "Exact" for interval estimation of binomial proportions. The American Statistician: 52; 119 - 126. [2] 1998 Newcombe RG. Two-sided confidence intervals for the single proportion: Comparison of seven methods. Statistics in Medicine: 17; 857 - 872. [3] 2008 Pires, A.M., Amado, C. Interval Estimators for a Binomial Proportion: Comparison of Twenty Methods. REVSTAT - Statistical Journal, 6, 165-197. } \seealso{ Other Expected length of adjusted methods: \code{\link{PlotexplAAS}()}, \code{\link{PlotexplAAll}()}, \code{\link{PlotexplALR}()}, \code{\link{PlotexplALT}()}, \code{\link{PlotexplASC}()}, \code{\link{PlotexplATW}()}, \code{\link{PlotexplAWD}()}, \code{\link{PlotlengthAAS}()}, \code{\link{PlotlengthAAll}()}, \code{\link{PlotlengthALR}()}, \code{\link{PlotlengthALT}()}, \code{\link{PlotlengthASC}()}, \code{\link{PlotlengthATW}()}, \code{\link{PlotlengthAWD}()}, \code{\link{lengthAAll}()}, \code{\link{lengthALR}()}, \code{\link{lengthALT}()}, \code{\link{lengthASC}()}, \code{\link{lengthATW}()}, \code{\link{lengthAWD}()} } \concept{Expected length of adjusted methods}
/man/lengthAAS.Rd
no_license
RajeswaranV/proportion
R
false
true
2,325
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/311.Expec_Leng_ADJ_All.R \name{lengthAAS} \alias{lengthAAS} \title{Expected length and sum of length of Adjusted ArcSine method} \usage{ lengthAAS(n, alp, h, a, b) } \arguments{ \item{n}{- Number of trials} \item{alp}{- Alpha value (significance level required)} \item{h}{- Adding factor} \item{a}{- Beta parameters for hypo "p"} \item{b}{- Beta parameters for hypo "p"} } \value{ A dataframe with \item{sumLen}{ The sum of the expected length} \item{explMean}{ The mean of the expected length} \item{explSD}{ The Standard Deviation of the expected length} \item{explMax}{ The max of the expected length} \item{explLL}{ The Lower limit of the expected length calculated using mean - SD} \item{explUL}{ The Upper limit of the expected length calculated using mean + SD} } \description{ Expected length and sum of length of Adjusted ArcSine method } \details{ Evaluation of adjusted Wald-type interval for the arcsine transformation of the parameter p using sum of length of the \eqn{n + 1} intervals } \examples{ n= 10; alp=0.05; h=2;a=1;b=1; lengthAAS(n,alp,h,a,b) } \references{ [1] 1998 Agresti A and Coull BA. Approximate is better than "Exact" for interval estimation of binomial proportions. The American Statistician: 52; 119 - 126. [2] 1998 Newcombe RG. Two-sided confidence intervals for the single proportion: Comparison of seven methods. Statistics in Medicine: 17; 857 - 872. [3] 2008 Pires, A.M., Amado, C. Interval Estimators for a Binomial Proportion: Comparison of Twenty Methods. REVSTAT - Statistical Journal, 6, 165-197. } \seealso{ Other Expected length of adjusted methods: \code{\link{PlotexplAAS}()}, \code{\link{PlotexplAAll}()}, \code{\link{PlotexplALR}()}, \code{\link{PlotexplALT}()}, \code{\link{PlotexplASC}()}, \code{\link{PlotexplATW}()}, \code{\link{PlotexplAWD}()}, \code{\link{PlotlengthAAS}()}, \code{\link{PlotlengthAAll}()}, \code{\link{PlotlengthALR}()}, \code{\link{PlotlengthALT}()}, \code{\link{PlotlengthASC}()}, \code{\link{PlotlengthATW}()}, \code{\link{PlotlengthAWD}()}, \code{\link{lengthAAll}()}, \code{\link{lengthALR}()}, \code{\link{lengthALT}()}, \code{\link{lengthASC}()}, \code{\link{lengthATW}()}, \code{\link{lengthAWD}()} } \concept{Expected length of adjusted methods}
# The commands for the example in Diggle, Ribeiro Jr and Christensen (2003) [bookchapter], # and Christensen and Ribeiro Jr (2002) [R-news]. # WARNING: RUNNING THIS IS VERY TIME-CONSUMING AND MEMORY-DEMANDING library(geoR) library(geoRglm) set.seed(2018) N = 81 # 36 64 81 ## Simulating data 二项分布 sim <- grf(grid = expand.grid(x = seq(0.0555, 0.944444, l = sqrt(N)), y = seq(0.0555, 0.944444, l = sqrt(N))), cov.pars = c(0.5, 0.2), kappa = 0.5, nugget = 0 ) # cov.pars 依次是 sigma^2 (partial sill) 和 phi (range parameter) sim$units.m <- rep(4, N) # N 个采样点 每个采样点的观察值服从二项分布,其值分别取 0,1,2,3 sim$prob <- exp(sim$data) / (1 + exp(sim$data)) sim$data <- rbinom(N, size = sim$units.m, prob = sim$prob) ## Visualising the data and the (unobserved) random effects 空间效应或者说平稳高斯过程 pdf(file = "binom-without-nugget-geoRglm.pdf",width = 8,height = 4) par(mfrow = c(1, 2), mar = c(2.3, 2.5, .5, .7), mgp = c(1.5, .6, 0), cex = 1) plot(c(0, 1), c(-0.1, 1), type = "n", xlab = "Horizontal Coordinate", ylab = "Vertical Coordinate") text(sim$coords[, 1], sim$coords[, 2], format(sim$prob, digits = 2), cex = 0.9) plot(c(0, 1), c(-0.1, 1), type = "n", xlab = "Horizontal Coordinate", ylab = "Vertical Coordinate") text(sim$coords[, 1], sim$coords[, 2], format(sim$data), cex = 1.1) points(sim$coords[c(1, 29), ], cex = 5.5) dev.off() ## Setting input options and running the function 各参数先验设置 ## beta.prior 均值向量参数 beta 的先验分布为正态分布,且先验分布中的参数分别是均值 beta 和标准差 beta.var ## phi.prior 范围参数 phi 的先验分布是 exponential 指数分布 且先验分布的均值 phi = 0.2 ## phi.discrete 表示 support points for the discretisation of the prior for the parameter phi. ## 默认 tausq.rel = 0 无块金效应 ## sigma^2 的先验分布是 sc.inv.chisq (scaled inverse-chi^2 prior distribution) 逆卡方分布 自由度为 5 # sigmasq 表示 Parameter in the scaled inverse-chi^2 prior distribution for sigma^2 prior.sim <- prior.glm.control( beta.prior = "normal", beta = 0, beta.var = 1, phi.prior = "exponential", phi = 0.2, phi.discrete = seq(0.005, 0.3, l = 60), sigmasq.prior = "sc.inv.chisq", df.sigmasq = 5, sigmasq = 0.5 ) ## MCMC 使用 Langevin-Hastings 利用了 proposal distribution 中的梯度信息, ## 相比 random walk Metropolis 算法,在应用中有更好的效果 mcmc.sim <- mcmc.control(S.scale = 0.05, phi.scale = 0.015, thin = 100, burn.in = 10000) pred.grid <- expand.grid(x = seq(0.0125, 0.9875, l = 40), y = seq(0.0125, 0.9875, l = 40)) # 预测位置 40 x 40 = 1600 个 out.sim <- output.glm.control(sim.predict = TRUE) # 这一步很费时间 system.time({ run.sim <- binom.krige.bayes(sim, locations = pred.grid, prior = prior.sim, mcmc.input = mcmc.sim, output = out.sim) }, gcFirst = TRUE) df <- data.frame( beta = c(mean(run.sim$posterior$beta$sample), var(run.sim$posterior$beta$sample), quantile(run.sim$posterior$beta$sample, probs = c(2.5, 25, 50, 75, 97.5) / 100)), phi = c(mean(run.sim$posterior$phi$sample), var(run.sim$posterior$phi$sample), quantile(run.sim$posterior$phi$sample, probs = c(2.5, 25, 50, 75, 97.5) / 100)), sigmasq = c(mean(run.sim$posterior$sigmasq$sample), var(run.sim$posterior$sigmasq$sample), quantile(run.sim$posterior$sigmasq$sample, probs = c(2.5, 25, 50, 75, 97.5) / 100)) ) # 无块金效应 tau^2 = 0 kappa = 0.5 样本量 N = 6*6 = 36 # 参数真值 beta = 0 phi = 0.2 sigmasq = 0.5 knitr::kable(t(df), col.names = c("mean","var","2.5%","25%","50%","75%","97.5%"), digits = 3,format = "markdown", padding = 2) # marginal modes run.sim$posterior$beta$mean run.sim$posterior$beta$var run.sim$posterior$phi$mean run.sim$posterior$phi$var run.sim$posterior$sigmasq$mean run.sim$posterior$sigmasq$var # 64 个采样点 exp(S)/(1 + exp(S)) 后验分布的 5个分位点 loc_quant <- t(apply(run.sim$posterior$simulations, 1, quantile, probs = c(2.5, 25, 50, 75, 97.5) / 100)) loc_prob <- cbind( mean = apply(run.sim$posterior$simulations, 1, mean), # 后验分布 p(x) 的均值 var = apply(run.sim$posterior$simulations, 1, var), # 后验分布 p(x) 的方差 sd = apply(run.sim$posterior$simulations, 1, sd), # 后验分布 p(x) 的标准差 loc_quant ) rownames(loc_prob) <- paste0("$p(x_{", seq(64), "})$") # 输出为 markdown 形式插入到 R Markdown 文档中 knitr::kable(loc_prob, digits = 3, format = "markdown", padding = 2) # 模拟过程的诊断 ## Autocorrelations pdf(file = "binom-without-nugget-geoRglm-acf.pdf",width = 6,height = 8) par(mfrow = c(3, 2), mar = c(2.3, 2.5, .5, .7), mgp = c(1.5, .6, 0), cex = 0.6) plot(run.sim$posterior$sim[1, ], type = "l", ylab = "S(0.056, 0.056)") acf(run.sim$posterior$sim[1, ], main = "") plot(run.sim$posterior$sim[29, ], type = "l", ylab = "S(0.563, 0.436)") acf(run.sim$posterior$sim[29, ], main = "") plot(run.sim$posterior$phi$sample, type = "l", ylab = "phi") acf(run.sim$posterior$phi$sample, main = "") dev.off() ## Plot of timeseries # 任意取两个位置观察其后验分布 pdf(file = "binom-without-nugget-geoRglm-ts.pdf",width = 6,height = 9) par(mfrow = c(3, 1), mar = c(2.3, 2.5, .5, .7), mgp = c(1.5, .6, 0), cex = 0.6) plot(run.sim$posterior$sim[1, ], type = "l", ylab = "S(0.056, 0.056)") plot(run.sim$posterior$sim[29, ], type = "l", ylab = "S(0.563, 0.436)") plot(run.sim$posterior$phi$sample, type = "l", ylab = "phi") dev.off() ## Predictions sim.predict <- apply(run.sim$pred$simulations, 1, mean) sim.predict.var <- apply(run.sim$pred$simulations, 1, var) pdf(file = "binom-without-nugget-geoRglm-pred.pdf",width = 6,height = 3) par(mfrow = c(1, 2), mar = c(2.3, 2.5, .5, .7), mgp = c(1.5, .6, 0), cex = 0.6) # 1600 个点的预测值 image( x = run.sim, locations = pred.grid, values.to.plot = sim.predict, col = gray(seq(1, 0, l = 30)), x.leg = c(0.1, 0.9), y.leg = c(-0.12, -0.07), cex = 1.0, xlab = "Horizontal Coordinate", ylab = "Vertical Coordinate" ) # 预测值对应的方差 image( x = run.sim, locations = pred.grid, values = sim.predict.var, col = gray(seq(1, 0, l = 30)), x.leg = c(0.1, 0.9), y.leg = c(-0.12, -0.07), cex = 1.0, xlab = "Horizontal Coordinate", ylab = "Vertical Coordinate" ) dev.off()
/code/05-sim-Langevin-Hastings-binom.R
permissive
ahualian/Thesis-Template-Bookdown
R
false
false
6,483
r
# The commands for the example in Diggle, Ribeiro Jr and Christensen (2003) [bookchapter], # and Christensen and Ribeiro Jr (2002) [R-news]. # WARNING: RUNNING THIS IS VERY TIME-CONSUMING AND MEMORY-DEMANDING library(geoR) library(geoRglm) set.seed(2018) N = 81 # 36 64 81 ## Simulating data 二项分布 sim <- grf(grid = expand.grid(x = seq(0.0555, 0.944444, l = sqrt(N)), y = seq(0.0555, 0.944444, l = sqrt(N))), cov.pars = c(0.5, 0.2), kappa = 0.5, nugget = 0 ) # cov.pars 依次是 sigma^2 (partial sill) 和 phi (range parameter) sim$units.m <- rep(4, N) # N 个采样点 每个采样点的观察值服从二项分布,其值分别取 0,1,2,3 sim$prob <- exp(sim$data) / (1 + exp(sim$data)) sim$data <- rbinom(N, size = sim$units.m, prob = sim$prob) ## Visualising the data and the (unobserved) random effects 空间效应或者说平稳高斯过程 pdf(file = "binom-without-nugget-geoRglm.pdf",width = 8,height = 4) par(mfrow = c(1, 2), mar = c(2.3, 2.5, .5, .7), mgp = c(1.5, .6, 0), cex = 1) plot(c(0, 1), c(-0.1, 1), type = "n", xlab = "Horizontal Coordinate", ylab = "Vertical Coordinate") text(sim$coords[, 1], sim$coords[, 2], format(sim$prob, digits = 2), cex = 0.9) plot(c(0, 1), c(-0.1, 1), type = "n", xlab = "Horizontal Coordinate", ylab = "Vertical Coordinate") text(sim$coords[, 1], sim$coords[, 2], format(sim$data), cex = 1.1) points(sim$coords[c(1, 29), ], cex = 5.5) dev.off() ## Setting input options and running the function 各参数先验设置 ## beta.prior 均值向量参数 beta 的先验分布为正态分布,且先验分布中的参数分别是均值 beta 和标准差 beta.var ## phi.prior 范围参数 phi 的先验分布是 exponential 指数分布 且先验分布的均值 phi = 0.2 ## phi.discrete 表示 support points for the discretisation of the prior for the parameter phi. ## 默认 tausq.rel = 0 无块金效应 ## sigma^2 的先验分布是 sc.inv.chisq (scaled inverse-chi^2 prior distribution) 逆卡方分布 自由度为 5 # sigmasq 表示 Parameter in the scaled inverse-chi^2 prior distribution for sigma^2 prior.sim <- prior.glm.control( beta.prior = "normal", beta = 0, beta.var = 1, phi.prior = "exponential", phi = 0.2, phi.discrete = seq(0.005, 0.3, l = 60), sigmasq.prior = "sc.inv.chisq", df.sigmasq = 5, sigmasq = 0.5 ) ## MCMC 使用 Langevin-Hastings 利用了 proposal distribution 中的梯度信息, ## 相比 random walk Metropolis 算法,在应用中有更好的效果 mcmc.sim <- mcmc.control(S.scale = 0.05, phi.scale = 0.015, thin = 100, burn.in = 10000) pred.grid <- expand.grid(x = seq(0.0125, 0.9875, l = 40), y = seq(0.0125, 0.9875, l = 40)) # 预测位置 40 x 40 = 1600 个 out.sim <- output.glm.control(sim.predict = TRUE) # 这一步很费时间 system.time({ run.sim <- binom.krige.bayes(sim, locations = pred.grid, prior = prior.sim, mcmc.input = mcmc.sim, output = out.sim) }, gcFirst = TRUE) df <- data.frame( beta = c(mean(run.sim$posterior$beta$sample), var(run.sim$posterior$beta$sample), quantile(run.sim$posterior$beta$sample, probs = c(2.5, 25, 50, 75, 97.5) / 100)), phi = c(mean(run.sim$posterior$phi$sample), var(run.sim$posterior$phi$sample), quantile(run.sim$posterior$phi$sample, probs = c(2.5, 25, 50, 75, 97.5) / 100)), sigmasq = c(mean(run.sim$posterior$sigmasq$sample), var(run.sim$posterior$sigmasq$sample), quantile(run.sim$posterior$sigmasq$sample, probs = c(2.5, 25, 50, 75, 97.5) / 100)) ) # 无块金效应 tau^2 = 0 kappa = 0.5 样本量 N = 6*6 = 36 # 参数真值 beta = 0 phi = 0.2 sigmasq = 0.5 knitr::kable(t(df), col.names = c("mean","var","2.5%","25%","50%","75%","97.5%"), digits = 3,format = "markdown", padding = 2) # marginal modes run.sim$posterior$beta$mean run.sim$posterior$beta$var run.sim$posterior$phi$mean run.sim$posterior$phi$var run.sim$posterior$sigmasq$mean run.sim$posterior$sigmasq$var # 64 个采样点 exp(S)/(1 + exp(S)) 后验分布的 5个分位点 loc_quant <- t(apply(run.sim$posterior$simulations, 1, quantile, probs = c(2.5, 25, 50, 75, 97.5) / 100)) loc_prob <- cbind( mean = apply(run.sim$posterior$simulations, 1, mean), # 后验分布 p(x) 的均值 var = apply(run.sim$posterior$simulations, 1, var), # 后验分布 p(x) 的方差 sd = apply(run.sim$posterior$simulations, 1, sd), # 后验分布 p(x) 的标准差 loc_quant ) rownames(loc_prob) <- paste0("$p(x_{", seq(64), "})$") # 输出为 markdown 形式插入到 R Markdown 文档中 knitr::kable(loc_prob, digits = 3, format = "markdown", padding = 2) # 模拟过程的诊断 ## Autocorrelations pdf(file = "binom-without-nugget-geoRglm-acf.pdf",width = 6,height = 8) par(mfrow = c(3, 2), mar = c(2.3, 2.5, .5, .7), mgp = c(1.5, .6, 0), cex = 0.6) plot(run.sim$posterior$sim[1, ], type = "l", ylab = "S(0.056, 0.056)") acf(run.sim$posterior$sim[1, ], main = "") plot(run.sim$posterior$sim[29, ], type = "l", ylab = "S(0.563, 0.436)") acf(run.sim$posterior$sim[29, ], main = "") plot(run.sim$posterior$phi$sample, type = "l", ylab = "phi") acf(run.sim$posterior$phi$sample, main = "") dev.off() ## Plot of timeseries # 任意取两个位置观察其后验分布 pdf(file = "binom-without-nugget-geoRglm-ts.pdf",width = 6,height = 9) par(mfrow = c(3, 1), mar = c(2.3, 2.5, .5, .7), mgp = c(1.5, .6, 0), cex = 0.6) plot(run.sim$posterior$sim[1, ], type = "l", ylab = "S(0.056, 0.056)") plot(run.sim$posterior$sim[29, ], type = "l", ylab = "S(0.563, 0.436)") plot(run.sim$posterior$phi$sample, type = "l", ylab = "phi") dev.off() ## Predictions sim.predict <- apply(run.sim$pred$simulations, 1, mean) sim.predict.var <- apply(run.sim$pred$simulations, 1, var) pdf(file = "binom-without-nugget-geoRglm-pred.pdf",width = 6,height = 3) par(mfrow = c(1, 2), mar = c(2.3, 2.5, .5, .7), mgp = c(1.5, .6, 0), cex = 0.6) # 1600 个点的预测值 image( x = run.sim, locations = pred.grid, values.to.plot = sim.predict, col = gray(seq(1, 0, l = 30)), x.leg = c(0.1, 0.9), y.leg = c(-0.12, -0.07), cex = 1.0, xlab = "Horizontal Coordinate", ylab = "Vertical Coordinate" ) # 预测值对应的方差 image( x = run.sim, locations = pred.grid, values = sim.predict.var, col = gray(seq(1, 0, l = 30)), x.leg = c(0.1, 0.9), y.leg = c(-0.12, -0.07), cex = 1.0, xlab = "Horizontal Coordinate", ylab = "Vertical Coordinate" ) dev.off()
# Adjusted Rand index between two partitions Rand <- function(tab,adjust=T) { ########################################################################## # The function computes the (adjusted) Rand index between two partitions # # Copyright Steve Horvath and Luohua Jiang, UCLA, 2003 # ########################################################################## # helper function choosenew <- function(n,k) { n <- c(n); out1 <- rep(0,length(n)); for (i in c(1:length(n)) ){ if ( n[i]<k ) {out1[i] <- 0} else {out1[i] <- choose(n[i],k) } } out1 } a <- 0; b <- 0; c <- 0; d <- 0; nn <- 0 n <- nrow(tab) for (i in 1:n) { for(j in 1:n) { a <- a+choosenew(tab[i,j],2) nj <- sum(tab[,j]) c <- c+choosenew(nj,2) } ni <- sum(tab[i,]) b <- b+choosenew(ni,2) nn <- nn+ni } if(adjust==T) { d <- choosenew(nn,2) adrand <- (a-(b*c/n)/d)/(0.5*(b+c/n)-(b*c/n)/d) adrand } else { b <- b-a c <- c/n-a d <- choosenew(nn,2)-a-b-c rand <- (a+d)/(a+b+c+d) rand } }
/AdjRand.R
no_license
yuepaang/Random-Forest-Clustering
R
false
false
1,097
r
# Adjusted Rand index between two partitions Rand <- function(tab,adjust=T) { ########################################################################## # The function computes the (adjusted) Rand index between two partitions # # Copyright Steve Horvath and Luohua Jiang, UCLA, 2003 # ########################################################################## # helper function choosenew <- function(n,k) { n <- c(n); out1 <- rep(0,length(n)); for (i in c(1:length(n)) ){ if ( n[i]<k ) {out1[i] <- 0} else {out1[i] <- choose(n[i],k) } } out1 } a <- 0; b <- 0; c <- 0; d <- 0; nn <- 0 n <- nrow(tab) for (i in 1:n) { for(j in 1:n) { a <- a+choosenew(tab[i,j],2) nj <- sum(tab[,j]) c <- c+choosenew(nj,2) } ni <- sum(tab[i,]) b <- b+choosenew(ni,2) nn <- nn+ni } if(adjust==T) { d <- choosenew(nn,2) adrand <- (a-(b*c/n)/d)/(0.5*(b+c/n)-(b*c/n)/d) adrand } else { b <- b-a c <- c/n-a d <- choosenew(nn,2)-a-b-c rand <- (a+d)/(a+b+c+d) rand } }
## ----chunk-setup, echo=FALSE, message=FALSE------------------------------------------------------------------------------------------------------- knitr::opts_chunk$set(size="footnotesize", comment = NA, highlight = TRUE) def.chunk.hook <- knitr::knit_hooks$get("chunk") knitr::knit_hooks$set(chunk = function(x, options) { x <- def.chunk.hook(x, options) ifelse(options$size != "normalsize", paste0("\\", options$size,"\n\n", x, "\n\n \\normalsize"), x) }) # options(width = 85) ## **유용한 웹 사이트**: R과 관련한 거의 모든 문제는 Googling (구글을 이용한 검색)을 통해 해결 가능(검색주제 + "in R" or "in R software")하고 많은 해답들이 아래 열거한 웹 페이지에 게시되어 있음. ## ## - R 프로그래밍에 대한 Q&A: [Stack Overflow](https://stackoverflow.com) ## - R 관련 웹 문서 모음: [Rpubs](https://rpubs.com/) ## - R package에 대한 raw source code 제공: [Github](https://github.com) ## - R을 이용한 통계 분석: [Statistical tools for high-throughput data analysis (STHDA)](http://www.sthda.com/english/) ## ## ----fig.align='center', echo=FALSE, fig.show='hold', out.width='90%'----------------------------------------------------------------------------- options(knitr.graphics.auto_pdf = TRUE) knitr::include_graphics('figures/Rorg-main-add.png', dpi = NA) ## ----fig.align='center', echo=FALSE, fig.show='hold', out.width='90%'----------------------------------------------------------------------------- knitr::include_graphics('figures/CRAN-korea-01.png', dpi = NA) ## ----fig.align='center', echo=FALSE, fig.show='hold', out.width='90%'----------------------------------------------------------------------------- knitr::include_graphics('figures/Rinstall-01.png', dpi = NA) ## ----fig.align='center', echo=FALSE, fig.show='hold', out.width='90%'----------------------------------------------------------------------------- knitr::include_graphics('figures/Rinstall-02.png', dpi = NA) ## 다음 하위폴더에 대한 간략 설멍 ## ## - **`base`**: R 실행 프로그램 ## - **`contrib`**: R package의 바이너리 파일 ## - **`Rtools`**: R package 개발 및 배포를 위한 프로그램 ## ## ----fig.align='center', echo=FALSE, fig.show='hold', out.width='90%'----------------------------------------------------------------------------- knitr::include_graphics('figures/Rinstall-03.png', dpi = NA) ## ----fig.align='center', echo=FALSE, fig.show='hold', out.width='90%'----------------------------------------------------------------------------- knitr::include_graphics('figures/R-install-F01.png', dpi = NA) ## ----fig.align='center', echo=FALSE, fig.show='hold', out.width='90%'----------------------------------------------------------------------------- knitr::include_graphics('figures/R-install-F02.png', dpi = NA) ## ----fig.align='center', echo=FALSE, fig.show='hold', out.width='90%'----------------------------------------------------------------------------- knitr::include_graphics('figures/R-install-F03.png', dpi = NA) ## ----fig.align='center', echo=FALSE, fig.show='hold', out.width='90%'----------------------------------------------------------------------------- knitr::include_graphics('figures/R-install-F04.png', dpi = NA) ## ----fig.align='center', echo=FALSE, fig.show='hold', out.width='90%'----------------------------------------------------------------------------- knitr::include_graphics('figures/R-install-F05.png', dpi = NA) ## ----fig.align='center', echo=FALSE, fig.show='hold', out.width='90%'----------------------------------------------------------------------------- knitr::include_graphics('figures/R-install-F06.png', dpi = NA) ## ----fig.align='center', echo=FALSE, fig.show='hold', out.width='90%'----------------------------------------------------------------------------- knitr::include_graphics('figures/R-install-F07.png', dpi = NA) ## ----fig.align='center', echo=FALSE, fig.show='hold', out.width='90%'----------------------------------------------------------------------------- knitr::include_graphics('figures/R-install-F08.png', dpi = NA) ## ----fig.align='center', echo=FALSE, fig.show='hold', out.width='90%'----------------------------------------------------------------------------- knitr::include_graphics('figures/R-install-F09.png', dpi = NA) ## ----r-console, fig.align='center', echo=FALSE, fig.show='hold', out.width='100%', fig.cap="Windows에서 R 실행화면(콘솔 창, SDI 모드)", ref.label='r-console'---- knitr::include_graphics('figures/Rgui.png', dpi = NA) ## **실습**: 설치된 R을 실행 후 보이는 R 콘솔(consle) 창에서 명령어를 실행하고 결과 확인 ## ----check-00, echo=TRUE, comment=NA-------------------------------------------------------------------------------------------------------------- # R의 설치 버전 및 현재 설정된 locale(언어, 시간대) 및 로딩된 R package 정보 출력 sessionInfo() ## ----check-01, echo=TRUE, comment=NA-------------------------------------------------------------------------------------------------------------- #문자열 출력 print("Hello R") #문자열 ## ----check-02, echo=TRUE, comment=NA-------------------------------------------------------------------------------------------------------------- # 수치형 값(scalar)을 변수에 할당(assign) # 여러 명령어를 한줄에 입력할 때에는 세미콜론(;)으로 구분 a = 9; b = 7 a b ## ----check-03, echo=TRUE, comment=NA-------------------------------------------------------------------------------------------------------------- a+b; a-b; a*b; a/b ## ----check-04, fig.align='center', fig.show='hold', echo=TRUE, fig.cap="정규분포 100개의 히스토그램"---------------------------------------------- # 난수 생성 시 값은 매번 달라지기 때문에 seed를 주어 일정값이 생성되도록 고정 # "="과 "<-"는 모두 동일한 기능을 가진 할당 연산자임 #평균이 0 이고 분산이 1인 정규분포에서 난수 100개 생성 set.seed(12345) # random seed 지정 x <- rnorm(100) # 난수 생성 hist(x) # 히스토그램 ## R 명령어 또는 전체 프로그램 소스 실행 시 매우 빈번히 오류가 나타나는데, 이를 해결할 수 있는 가장 좋은 방법은 앞에서 언급한 Google을 이용한 검색 또는 R 설치 시 자체적으로 내장되어 있는 도움말을 참고하는 것이 가장 효율적임. ## ----tab-help, echo=FALSE, message=FALSE---------------------------------------------------------------------------------------------------------- # require(tidyverse) require(rmarkdown) require(knitr) require(kableExtra) `도움말 보기 명령어` <- c("`help` 또는 `?`", "`help.search` 또는 `??`", "`example`", "`vignette`") `설명` <- c("도움말 시스템 호출", "주어진 문자열을 포함한 문서 검색", "topic의 도움말 페이지에 있는 examples section 실행", "topic의 pdf 또는 html 레퍼런스 메뉴얼 불러오기") `사용법` <- c("`help(함수명)`", "`help.search(pattern)`", "`example(함수명)`", "`vignette(패키지명 또는 패턴)`") tab <- data.frame(`도움말 보기 명령어`, `설명`, `사용법`, check.names = F) options(kableExtra.html.bsTable = T) knitr::opts_knit$set(kable.force.latex = FALSE) kable(tab, align = "lll", escape = FALSE, booktabs = T, caption = "R help 관련 명령어 리스트") %>% kable_styling(bootstrap_options = c("condensed", "striped"), position = "center", font_size = 10, latex_options = c("striped", "HOLD_position")) %>% column_spec(2, width = "5cm") ## **Vignette** 의 활용 ## ## - `vignette()`에서 제공하는 문서는 데이터를 기반으로 사용하고자 하는 패키지의 실제 활용 예시를 작성한 문서이기 때문에 초보자들이 R 패키지 활용에 대한 접근성을 높혀줌. ## - `browseVignettes()` 명령어를 통해 vignette을 제공하는 R 패키지 및 해당 vignette 문서 확인 가능 ## ## **실습**: R 설치 후 Rgui 에서 제공하는 편집기(R editor)에 명령어를 입력하고 실행 ## ## ----r-console-edit, fig.align='center', echo=FALSE, fig.show='hold', out.width='100%'------------------------------------------------------------ knitr::include_graphics('figures/r-console-edit.png', dpi = NA) ## ----check-edit, echo=TRUE, eval=FALSE, comment=NA, tidy=TRUE------------------------------------------------------------------------------------- # R에 내장된 cars 데이터셋 불러오기 # cars dataset에 포함된 변수들의 기초통계량 출력 # 2차원 산점도 data(cars) help(cars) # cars 데이터셋에 대한 설명 help 창에 출력 head(cars) # cars 데이터셋 처음 6개 행 데이터 출력 summary(cars) # cars 데이터셋 요약 plot(cars) # 변수가 2개인 경우 산점도 출력 ## ----check-edit-out, echo=FALSE, comment=NA, fig.cap="cars 데이터셋의 speed와 dist 간 2차원 산점도: speed는 자동차 속도(mph)이고 dist는 해당 속도에서 브레이크를 밟았을 때 멈출 때 까지 걸린 거리(ft)를 나타냄."---- # R에 내장된 cars 데이터셋 불러오기 # cars dataset에 포함된 변수들의 기초통계량 출력 # 2차원 산점도 data(cars) # help(cars) head(cars) summary(cars) plot(cars) ## ----fig.align='center', echo=FALSE, fig.show='hold', out.width='80%'----------------------------------------------------------------------------- knitr::include_graphics('figures/rstudio-homepage.png', dpi = NA) ## ----fig.align='center', echo=FALSE, fig.show='hold', out.width='70%'----------------------------------------------------------------------------- knitr::include_graphics('figures/rstudio-download.png', dpi = NA) ## ----fig.align='center', echo=FALSE, fig.show='hold', out.width='60%'----------------------------------------------------------------------------- knitr::include_graphics('figures/r-studio-download-02.png', dpi = NA) ## ----fig.align='center', echo=FALSE, fig.show='hold', out.widtht='60%'---------------------------------------------------------------------------- knitr::include_graphics('figures/Rstudio-installer.png', dpi = NA) ## ----fig.align='center', echo=FALSE, fig.show='hold', out.width='80%'----------------------------------------------------------------------------- knitr::include_graphics('figures/Rstudio-init.png', dpi = NA) ## ----rstudio-windows, fig.align='center', echo=FALSE, fig.show='hold', out.width='90%', fig.cap="RStudio 화면구성: 우하단 그림은 http://r-statistics.co/Top50-Ggplot2-Visualizations-MasterList-R-Code.html 에서 발췌", ref.label='rstudio-windows'---- knitr::include_graphics('figures/Rstudio-cap1.png', dpi = NA) ## ----rstudio-console, fig.align='center', echo=FALSE, fig.show="hold", out.width='80%', fig.cap="RStudio 콘솔창에서 명령어 실행 후 출력결과 화면", ref.label='rstudio-console'---- knitr::include_graphics('figures/rstudio-console.png', dpi = NA) ## ----rstudio-new-script, fig.align='center', echo=FALSE, fig.show="hold", out.width='80%', fig.cap="RStudio 스크립트 새로 열기", ref.label='rstudio-new-script'---- knitr::include_graphics('figures/rstudio-open-new-script.png', dpi = NA) ## RStudio는 코딩 및 소스 작성의 효율성을 위해 여러 가지 단축 키를 제공하고 있음. 단축키는 아래 그림과 같이 pull down 메뉴 `[Tools]` 또는 `[Help]`에서 `[Keyboard shortcut help]` 또는 `[Alt] + [Shift] + [K]` 단축키를 통해 확인할 수 있음. 또는 Rstudio cheatsheet에서 단축키에 대한 정보를 제공하는데 pull down 메뉴 `[Help]` $\rightarrow$ `[Cheatsheets]` $\rightarrow$ `[RStudio IDE Cheat Sheet]`을 선택하면 각 아이콘 및 메뉴 기능에 대한 개괄적 설명 확인 가능함. ## ## ----rstudio-env, fig.align='center', echo=FALSE, fig.show='hold', out.width='90%', fig.cap="RStudio Environment 창 객체 상세 정보 및 스프레드 시트 출력 결과", ref.label='rstudio-env'---- knitr::include_graphics('figures/rstudio-environment.png', dpi = NA) ## ----rstudio-history, fig.align='center', echo=FALSE, fig.show='hold', out.width='90%'------------------------------------------------------------ knitr::include_graphics('figures/Rstudio-historywin.png', dpi = NA) ## ----rstudio-file, fig.align='center', echo=FALSE, fig.show='hold', out.width='80%'--------------------------------------------------------------- knitr::include_graphics('figures/Rstudio-file.png', dpi = NA) ## ----rstudio-plotwin, fig.align='center', echo=FALSE, fig.show='hold', out.width='80%'------------------------------------------------------------ knitr::include_graphics('figures/RStudio-plotwin.png', dpi = NA) ## ----rstudio-packagewin, fig.align='center', echo=FALSE, fig.show='hold', out.width='80%'--------------------------------------------------------- knitr::include_graphics('figures/RStudio-packagewin.png', dpi = NA) ## ----help, eval=FALSE----------------------------------------------------------------------------------------------------------------------------- ## help(lm) ## ----rstudio-helpwin, fig.align='center', echo=FALSE, fig.show='hold', out.width='80%'------------------------------------------------------------ knitr::include_graphics('figures/RStudio-helpwin.png', dpi = NA) ## ----rstudio-glob-menu, fig.align='center', echo=FALSE, fig.show="hold", out.width='80%'---------------------------------------------------------- knitr::include_graphics('figures/rstudio-glob-menu.png', dpi = NA) ## ----rstudio-glob-option, fig.align='center', echo=FALSE, fig.show="hold", out.width='80%', ref.label='rstudio-glob-option', fig.cap=fig_cap------ fig_cap <- "R General option 팝업 창" knitr::include_graphics('figures/rstudio-glob-option.png', dpi = NA) ## ----rstudio-wd-set, fig.align='center', echo=FALSE, fig.show="hold", out.width='80%'------------------------------------------------------------- knitr::include_graphics('figures/rstudio-wd-setting.JPG', dpi = NA) ## ------------------------------------------------------------------------------------------------------------------------------------------------- getwd() # 작업폴더 확인 GET Working Directory ## ------------------------------------------------------------------------------------------------------------------------------------------------- setwd("..") # 차상위 폴더로 이동 getwd() setwd("../..") # 차차상위 폴더로 이동 getwd() setwd("D:/Current-Workspace/Lecture/misc/") # 절대 폴더 명 입력 setwd("..") # dir() # 폴더 내 파일 명 출력 getwd() setwd("misc") # D:/Current-Workspace/Lecture 하위폴더인 misc 으로 이동 getwd() setwd("D:/Current-Workspace/Lecture/cnu-r-programming-lecture-note/") getwd() ## R에서 디렉토리 또는 폴더 구분자는 `/` 임. Windows에서 사용하는 구분자는 `\`인데, R에서 `\`는 특수문자로 간주하기 때문에 Windows 의 폴더명을 그대로 사용 시 에러 메세지를 출력함. 이를 해결하기 위해 Windows 경로명을 그대로 복사한 경우 경로 구분자 `\` 대신 `\\`로 변경 ## ## **실습**: `C:\r-project`를 컴퓨터에 생성 후 해당 폴더를 default 작업폴더로 설정 ## ----rstudio-code-option, fig.align='center', echo=FALSE, fig.show="hold", out.width='80%'-------------------------------------------------------- knitr::include_graphics('figures/rstudio-code-edit-option.png', dpi = NA) ## ----rstudio-code-display, fig.align='center', echo=FALSE, fig.show="hold", out.width='80%'------------------------------------------------------- knitr::include_graphics('figures/rstudio-code-display.png', dpi = NA) ## ----rstudio-code-saving, fig.align='center', echo=FALSE, fig.show="hold", out.width='80%'-------------------------------------------------------- knitr::include_graphics('figures/rstudio-code-saving.png', dpi = NA) ## ----rstudio-appearance, fig.align='center', echo=FALSE, fig.show="hold", out.width='80%'--------------------------------------------------------- knitr::include_graphics('figures/rstudio-appearance.png', dpi = NA) ## ----rstudio-pane-layout, fig.align='center', echo=FALSE, fig.show="hold", out.width='80%'-------------------------------------------------------- knitr::include_graphics('figures/rstudio-pane-layout.png', dpi = NA) ## **실습**: 개인 취향에 맞게 RStudio 에디터 및 theme을 변경해 보자!! ## ----rstudio-new-project-1, fig.align='center', echo=FALSE, fig.show="hold", out.width='80%'------------------------------------------------------ knitr::include_graphics('figures/R-newproject-01.png', dpi = NA) ## ----rstudio-new-project-2, fig.align='center', echo=FALSE, fig.show="hold", out.width='80%'------------------------------------------------------ knitr::include_graphics('figures/R-newproject-02.png', dpi = NA) ## ----rstudio-new-project-3, fig.align='center', echo=FALSE, fig.show="hold", out.width='80%'------------------------------------------------------ knitr::include_graphics('figures/R-newproject-03.png', dpi = NA) ## **실습**: 프로젝트 생성 ## ## - 위에서 설정한 작업폴더 내에 `학번-r-programming` 프로젝트 생성 ## - 생성한 프로젝트 폴더 내에 `docs`, `figures`, `script` 폴더 생성 ## ## **R 패키지(package)**: 특수 목적을 위한 로직으로 구성된 코드들의 집합으로 R에서 구동되는 분석툴을 통칭 ## ## - CRAN을 통해 배포: 3자가 이용하기 쉬움 $\rightarrow$ R 시스템 환경에서 패키지는 가장 중요한 역할 ## - CRAN [available package by name](https://cran.r-project.org/web/packages/available_packages_by_date.html) 또는 [available package by date](https://cran.r-project.org/web/packages/available_packages_by_name.html)에서 현재 등재된 패키지 리스트 확인 가능 ## - R console에서 `available.packages()` 함수를 통해서도 확인 가능 ## - 현재 CRAN 기준(2020-03-17) 배포된 패키지의 개수는 16045 개임 ## ## **목적**: RStudio 환경에서 패키지를 설치하고 불러오기 ## ----lib-path, comment=NA, tidy=TRUE-------------------------------------------------------------------------------------------------------------- .libPaths() ## ----window-env, fig.align='center', echo=FALSE, fig.show="hold", out.width='80%'----------------------------------------------------------------- knitr::include_graphics('figures/window-env-system.png', dpi = NA) ## ----window-env-var, fig.align='center', echo=FALSE, fig.show="hold", out.width='80%'------------------------------------------------------------- knitr::include_graphics('figures/window-env-var.png', dpi = NA) ## ----window-new-system-var, fig.align='center', echo=FALSE, fig.show="hold", out.width='90%'------------------------------------------------------ knitr::include_graphics('figures/window-new-system-var.png', dpi = NA) ## ----rstudio-package-install, fig.align='center', echo=FALSE, fig.show="hold", out.width='80%'---------------------------------------------------- knitr::include_graphics('figures/rstudio-package-install.png', dpi = NA) ## ----rstudio-package-win02, fig.align='center', echo=FALSE, fig.show="hold", out.width='80%'------------------------------------------------------ knitr::include_graphics('figures/rstudio-pack-win-02.png', dpi = NA) ## **실습**: `install.packages()` 함수를 이용해 `tidyverse` 패키지 설치 ## ---- eval=FALSE, comment=NA, tidy=TRUE----------------------------------------------------------------------------------------------------------- install.packages("tidyverse") ## **실습**: `tidyverse` 패키지 불러오기 ## ----multiple-package, eval=TRUE, warning=FALSE--------------------------------------------------------------------------------------------------- require(tidyverse) ## 실무에서 R의 활용능력은 패키지 활용 여부에 달려 있음. 즉, 목적에 맞는 업무를 수행하기 위해 가장 적합한 패키지를 찾고 활용하느냐에 따라 R 활용능력의 차이를 보임. 앞서 언급한 바와 같이 CRAN에 등록된 패키지는 16000 개가 넘지만, 이 중 많이 활용되고 있는 패키지의 수는 약 200 ~ 300 개 내외이고, 실제 데이터 분석 시 10 ~ 20개 정도의 패키지가 사용됨. 앞 예제에서 설치하고 불러온 `tidyverse` 패키지는 Hadley Wickham [@tidyverse2019]이 개발한 데이터 전처리 및 시각화 패키지 번들이고, 현재 R 프로그램 환경에 지대한 영향을 미침. 본 강의 "데이터프레임 가공 및 시각화"에서 해당 패키지 활용 방법을 배울 예정 ## 본 절에서 다루는 R 문법은 R 입문 시 객체(object)의 명명 규칙과 R 콘솔 창에서 가장 빈번하게 사용되는 기초적인 명령어만 다룰 예정임. 심화 내용은 2-3주 차에 다룰 예정임. ## 알아두면 유용한(콘솔창에서 매우 많이 사용되는) 명령어 및 단축키 ## ## - `ls()`: 현재 R 작업공간에 저장된 모든 객체 리스트 출력 ## - `rm(object_name)`: `object_name`에 해당하는 객체 삭제 ## - `rm(list = ls())`: R 작업공간에 저장된 모든 객체들을 일괄 삭제 ## - 단축키 `[Ctrl] + [L]`: R 콘솔 창 일괄 청소 ## - 단축키 `[Ctrl] + [Shift] + [F10]`: R session 초기화 ## ## **예시** ## ---- comment=NA---------------------------------------------------------------------------------------------------------------------------------- x <- 7 y <- 1:30 #1에서 30까지 정수 입력 ls() #현재 작업공간 내 객체명 출력 ## ---- comment=NA---------------------------------------------------------------------------------------------------------------------------------- rm(x) # 객체 x 삭제 ls() rm(a,b) # 객체 a,b 동시 삭제 ls() # rm(list = ls()) # 모든 객체 삭제 ## ----assign-diff, comment=NA, error=TRUE---------------------------------------------------------------------------------------------------------- # mean(): 입력 벡터의 평균 계산 mean(y <- 1:5) y mean(x = 1:5) x ## ----objectName-ex01, echo = T, eval = T, prompt = F---------------------------------------------------------------------------------------------- # 1:10은 1부터 10까지 정수 생성 # 'c()'는 벡터 생성 함수 x <- c(1:10) # 1:10으로 구성된 행렬 생성 X <- matrix(c(1:10), nrow = 2, ncol = 5, byrow = T) x X # 논리형 객체 .x <- TRUE #FALSE .x # 알파벳 + 숫자 a1 <- seq(from = 1, to = 10, by = 2) # 한글 변수명 가수 <- c("Damian Rice", "Beatles", "최백호", "Queen", "Carlos Gardel", "BTS", "조용필") 가수 ## ----objName-ex02, comment=NA, error=TRUE--------------------------------------------------------------------------------------------------------- 3x <- 7 ## ----objName-ex03, comment=NA, error=TRUE--------------------------------------------------------------------------------------------------------- _x <- c("M", "M", "F") ## ----objName-ex04, comment=NA, error=TRUE--------------------------------------------------------------------------------------------------------- .3 <- 10 ## [R 기초 문법] 절과 마찬가지로 R Markdown을 이용해 최소한의 문서(`html` 문서)를 작성하고 생성하는 방법에 대해 기술함. R Markdown에 대한 보다 상세한 내용은 본 수업의 마지막 주차에 다룰 예정임. ## ----rmarkdown-flow, fig.align='center', echo=FALSE, fig.show="hold", out.width='60%', fig.cap="R Markdown의 최종 결과물 산출과정(http://applied-r.com/project-reporting-template/)"---- knitr::include_graphics('figures/rmarkdown-flow.png', dpi = NA) ## RStudio를 처음 설치하고 위와 같이 진행할 경우 아래와 같은 패키지 설치 여부를 묻는 팝업 창이 나타남. 패키지 설치 여부에 `[Yes]`를 클릭하면 R Markdown 문서 생성을 위해 필요한 패키지들이 자동으로 설치 ## ----rmarkdown-new-01, fig.align='center', echo=FALSE, fig.show="hold", out.width='80%'----------------------------------------------------------- knitr::include_graphics('figures/rmarkdown-new-01.png', dpi = NA) ## ----rmarkdown-new-02, fig.align='center', echo=FALSE, fig.show="hold", out.width='80%'----------------------------------------------------------- knitr::include_graphics('figures/rmarkdown-new-02.png', dpi = NA) ## ----rmarkdown-new-03, fig.align='center', echo=FALSE, fig.show="hold", out.width='80%'----------------------------------------------------------- knitr::include_graphics('figures/rmarkdown-new-03.png', dpi = NA) ## ----rmarkdown-new-04, fig.align='center', echo=FALSE, fig.show="hold", out.width='80%'----------------------------------------------------------- knitr::include_graphics('figures/rmarkdown-new-04.png', dpi = NA) ## ----rmarkdown-new-out, fig.align='center', echo=FALSE, fig.show="hold", out.width='80%', fig.cap="test.html 문서 화면(저장 폴더 내 `test.html`을 크롬 브라우저로 실행)"---- knitr::include_graphics('figures/rmarkdown-new-out.png', dpi = NA) ## ----rmarkdown-part, fig.align='center', echo=FALSE, fig.show="hold", out.width='80%'------------------------------------------------------------- knitr::include_graphics('figures/rmarkdown-part.png', dpi = NA) ## ---- fig.show='hold'----------------------------------------------------------------------------------------------------------------------------- fit = lm(dist ~ speed, data = cars) b = coef(fit) plot(cars) abline(fit) ## ----knitr-logo, out.width='32.8%', fig.show='hold'----------------------------------------------------------------------------------------------- knitr::include_graphics(rep('figures/knit-logo.png', 3)) ## **Homework 1**: R Markdown 문서에 아래 내용을 포함한 문서를 `html` 파일 형식으로 출력 후 제출 ## ## - 간략한 자기소개 및 "통계 프로그래밍 언어" 수업에 대한 본인만의 목표 기술 ## - 본인이 setting 한 RStudio 구성 캡쳐 화면을 그림 파일로 저장하고 R Markdown 문서에 삽입(화면 캡쳐 시 생성 프로젝트 내 폴더 내용 반드시 포함) ## - 패키지 `ggplot2`를 불러오고 `cars` 데이터셋의 2차원 산점도(**hint**: `help(geom_point)` 또는 googling 활용)를 문서에 포함 ##
/code/01-overview.R
no_license
zorba78/cnu-r-programming-lecture-note
R
false
false
26,723
r
## ----chunk-setup, echo=FALSE, message=FALSE------------------------------------------------------------------------------------------------------- knitr::opts_chunk$set(size="footnotesize", comment = NA, highlight = TRUE) def.chunk.hook <- knitr::knit_hooks$get("chunk") knitr::knit_hooks$set(chunk = function(x, options) { x <- def.chunk.hook(x, options) ifelse(options$size != "normalsize", paste0("\\", options$size,"\n\n", x, "\n\n \\normalsize"), x) }) # options(width = 85) ## **유용한 웹 사이트**: R과 관련한 거의 모든 문제는 Googling (구글을 이용한 검색)을 통해 해결 가능(검색주제 + "in R" or "in R software")하고 많은 해답들이 아래 열거한 웹 페이지에 게시되어 있음. ## ## - R 프로그래밍에 대한 Q&A: [Stack Overflow](https://stackoverflow.com) ## - R 관련 웹 문서 모음: [Rpubs](https://rpubs.com/) ## - R package에 대한 raw source code 제공: [Github](https://github.com) ## - R을 이용한 통계 분석: [Statistical tools for high-throughput data analysis (STHDA)](http://www.sthda.com/english/) ## ## ----fig.align='center', echo=FALSE, fig.show='hold', out.width='90%'----------------------------------------------------------------------------- options(knitr.graphics.auto_pdf = TRUE) knitr::include_graphics('figures/Rorg-main-add.png', dpi = NA) ## ----fig.align='center', echo=FALSE, fig.show='hold', out.width='90%'----------------------------------------------------------------------------- knitr::include_graphics('figures/CRAN-korea-01.png', dpi = NA) ## ----fig.align='center', echo=FALSE, fig.show='hold', out.width='90%'----------------------------------------------------------------------------- knitr::include_graphics('figures/Rinstall-01.png', dpi = NA) ## ----fig.align='center', echo=FALSE, fig.show='hold', out.width='90%'----------------------------------------------------------------------------- knitr::include_graphics('figures/Rinstall-02.png', dpi = NA) ## 다음 하위폴더에 대한 간략 설멍 ## ## - **`base`**: R 실행 프로그램 ## - **`contrib`**: R package의 바이너리 파일 ## - **`Rtools`**: R package 개발 및 배포를 위한 프로그램 ## ## ----fig.align='center', echo=FALSE, fig.show='hold', out.width='90%'----------------------------------------------------------------------------- knitr::include_graphics('figures/Rinstall-03.png', dpi = NA) ## ----fig.align='center', echo=FALSE, fig.show='hold', out.width='90%'----------------------------------------------------------------------------- knitr::include_graphics('figures/R-install-F01.png', dpi = NA) ## ----fig.align='center', echo=FALSE, fig.show='hold', out.width='90%'----------------------------------------------------------------------------- knitr::include_graphics('figures/R-install-F02.png', dpi = NA) ## ----fig.align='center', echo=FALSE, fig.show='hold', out.width='90%'----------------------------------------------------------------------------- knitr::include_graphics('figures/R-install-F03.png', dpi = NA) ## ----fig.align='center', echo=FALSE, fig.show='hold', out.width='90%'----------------------------------------------------------------------------- knitr::include_graphics('figures/R-install-F04.png', dpi = NA) ## ----fig.align='center', echo=FALSE, fig.show='hold', out.width='90%'----------------------------------------------------------------------------- knitr::include_graphics('figures/R-install-F05.png', dpi = NA) ## ----fig.align='center', echo=FALSE, fig.show='hold', out.width='90%'----------------------------------------------------------------------------- knitr::include_graphics('figures/R-install-F06.png', dpi = NA) ## ----fig.align='center', echo=FALSE, fig.show='hold', out.width='90%'----------------------------------------------------------------------------- knitr::include_graphics('figures/R-install-F07.png', dpi = NA) ## ----fig.align='center', echo=FALSE, fig.show='hold', out.width='90%'----------------------------------------------------------------------------- knitr::include_graphics('figures/R-install-F08.png', dpi = NA) ## ----fig.align='center', echo=FALSE, fig.show='hold', out.width='90%'----------------------------------------------------------------------------- knitr::include_graphics('figures/R-install-F09.png', dpi = NA) ## ----r-console, fig.align='center', echo=FALSE, fig.show='hold', out.width='100%', fig.cap="Windows에서 R 실행화면(콘솔 창, SDI 모드)", ref.label='r-console'---- knitr::include_graphics('figures/Rgui.png', dpi = NA) ## **실습**: 설치된 R을 실행 후 보이는 R 콘솔(consle) 창에서 명령어를 실행하고 결과 확인 ## ----check-00, echo=TRUE, comment=NA-------------------------------------------------------------------------------------------------------------- # R의 설치 버전 및 현재 설정된 locale(언어, 시간대) 및 로딩된 R package 정보 출력 sessionInfo() ## ----check-01, echo=TRUE, comment=NA-------------------------------------------------------------------------------------------------------------- #문자열 출력 print("Hello R") #문자열 ## ----check-02, echo=TRUE, comment=NA-------------------------------------------------------------------------------------------------------------- # 수치형 값(scalar)을 변수에 할당(assign) # 여러 명령어를 한줄에 입력할 때에는 세미콜론(;)으로 구분 a = 9; b = 7 a b ## ----check-03, echo=TRUE, comment=NA-------------------------------------------------------------------------------------------------------------- a+b; a-b; a*b; a/b ## ----check-04, fig.align='center', fig.show='hold', echo=TRUE, fig.cap="정규분포 100개의 히스토그램"---------------------------------------------- # 난수 생성 시 값은 매번 달라지기 때문에 seed를 주어 일정값이 생성되도록 고정 # "="과 "<-"는 모두 동일한 기능을 가진 할당 연산자임 #평균이 0 이고 분산이 1인 정규분포에서 난수 100개 생성 set.seed(12345) # random seed 지정 x <- rnorm(100) # 난수 생성 hist(x) # 히스토그램 ## R 명령어 또는 전체 프로그램 소스 실행 시 매우 빈번히 오류가 나타나는데, 이를 해결할 수 있는 가장 좋은 방법은 앞에서 언급한 Google을 이용한 검색 또는 R 설치 시 자체적으로 내장되어 있는 도움말을 참고하는 것이 가장 효율적임. ## ----tab-help, echo=FALSE, message=FALSE---------------------------------------------------------------------------------------------------------- # require(tidyverse) require(rmarkdown) require(knitr) require(kableExtra) `도움말 보기 명령어` <- c("`help` 또는 `?`", "`help.search` 또는 `??`", "`example`", "`vignette`") `설명` <- c("도움말 시스템 호출", "주어진 문자열을 포함한 문서 검색", "topic의 도움말 페이지에 있는 examples section 실행", "topic의 pdf 또는 html 레퍼런스 메뉴얼 불러오기") `사용법` <- c("`help(함수명)`", "`help.search(pattern)`", "`example(함수명)`", "`vignette(패키지명 또는 패턴)`") tab <- data.frame(`도움말 보기 명령어`, `설명`, `사용법`, check.names = F) options(kableExtra.html.bsTable = T) knitr::opts_knit$set(kable.force.latex = FALSE) kable(tab, align = "lll", escape = FALSE, booktabs = T, caption = "R help 관련 명령어 리스트") %>% kable_styling(bootstrap_options = c("condensed", "striped"), position = "center", font_size = 10, latex_options = c("striped", "HOLD_position")) %>% column_spec(2, width = "5cm") ## **Vignette** 의 활용 ## ## - `vignette()`에서 제공하는 문서는 데이터를 기반으로 사용하고자 하는 패키지의 실제 활용 예시를 작성한 문서이기 때문에 초보자들이 R 패키지 활용에 대한 접근성을 높혀줌. ## - `browseVignettes()` 명령어를 통해 vignette을 제공하는 R 패키지 및 해당 vignette 문서 확인 가능 ## ## **실습**: R 설치 후 Rgui 에서 제공하는 편집기(R editor)에 명령어를 입력하고 실행 ## ## ----r-console-edit, fig.align='center', echo=FALSE, fig.show='hold', out.width='100%'------------------------------------------------------------ knitr::include_graphics('figures/r-console-edit.png', dpi = NA) ## ----check-edit, echo=TRUE, eval=FALSE, comment=NA, tidy=TRUE------------------------------------------------------------------------------------- # R에 내장된 cars 데이터셋 불러오기 # cars dataset에 포함된 변수들의 기초통계량 출력 # 2차원 산점도 data(cars) help(cars) # cars 데이터셋에 대한 설명 help 창에 출력 head(cars) # cars 데이터셋 처음 6개 행 데이터 출력 summary(cars) # cars 데이터셋 요약 plot(cars) # 변수가 2개인 경우 산점도 출력 ## ----check-edit-out, echo=FALSE, comment=NA, fig.cap="cars 데이터셋의 speed와 dist 간 2차원 산점도: speed는 자동차 속도(mph)이고 dist는 해당 속도에서 브레이크를 밟았을 때 멈출 때 까지 걸린 거리(ft)를 나타냄."---- # R에 내장된 cars 데이터셋 불러오기 # cars dataset에 포함된 변수들의 기초통계량 출력 # 2차원 산점도 data(cars) # help(cars) head(cars) summary(cars) plot(cars) ## ----fig.align='center', echo=FALSE, fig.show='hold', out.width='80%'----------------------------------------------------------------------------- knitr::include_graphics('figures/rstudio-homepage.png', dpi = NA) ## ----fig.align='center', echo=FALSE, fig.show='hold', out.width='70%'----------------------------------------------------------------------------- knitr::include_graphics('figures/rstudio-download.png', dpi = NA) ## ----fig.align='center', echo=FALSE, fig.show='hold', out.width='60%'----------------------------------------------------------------------------- knitr::include_graphics('figures/r-studio-download-02.png', dpi = NA) ## ----fig.align='center', echo=FALSE, fig.show='hold', out.widtht='60%'---------------------------------------------------------------------------- knitr::include_graphics('figures/Rstudio-installer.png', dpi = NA) ## ----fig.align='center', echo=FALSE, fig.show='hold', out.width='80%'----------------------------------------------------------------------------- knitr::include_graphics('figures/Rstudio-init.png', dpi = NA) ## ----rstudio-windows, fig.align='center', echo=FALSE, fig.show='hold', out.width='90%', fig.cap="RStudio 화면구성: 우하단 그림은 http://r-statistics.co/Top50-Ggplot2-Visualizations-MasterList-R-Code.html 에서 발췌", ref.label='rstudio-windows'---- knitr::include_graphics('figures/Rstudio-cap1.png', dpi = NA) ## ----rstudio-console, fig.align='center', echo=FALSE, fig.show="hold", out.width='80%', fig.cap="RStudio 콘솔창에서 명령어 실행 후 출력결과 화면", ref.label='rstudio-console'---- knitr::include_graphics('figures/rstudio-console.png', dpi = NA) ## ----rstudio-new-script, fig.align='center', echo=FALSE, fig.show="hold", out.width='80%', fig.cap="RStudio 스크립트 새로 열기", ref.label='rstudio-new-script'---- knitr::include_graphics('figures/rstudio-open-new-script.png', dpi = NA) ## RStudio는 코딩 및 소스 작성의 효율성을 위해 여러 가지 단축 키를 제공하고 있음. 단축키는 아래 그림과 같이 pull down 메뉴 `[Tools]` 또는 `[Help]`에서 `[Keyboard shortcut help]` 또는 `[Alt] + [Shift] + [K]` 단축키를 통해 확인할 수 있음. 또는 Rstudio cheatsheet에서 단축키에 대한 정보를 제공하는데 pull down 메뉴 `[Help]` $\rightarrow$ `[Cheatsheets]` $\rightarrow$ `[RStudio IDE Cheat Sheet]`을 선택하면 각 아이콘 및 메뉴 기능에 대한 개괄적 설명 확인 가능함. ## ## ----rstudio-env, fig.align='center', echo=FALSE, fig.show='hold', out.width='90%', fig.cap="RStudio Environment 창 객체 상세 정보 및 스프레드 시트 출력 결과", ref.label='rstudio-env'---- knitr::include_graphics('figures/rstudio-environment.png', dpi = NA) ## ----rstudio-history, fig.align='center', echo=FALSE, fig.show='hold', out.width='90%'------------------------------------------------------------ knitr::include_graphics('figures/Rstudio-historywin.png', dpi = NA) ## ----rstudio-file, fig.align='center', echo=FALSE, fig.show='hold', out.width='80%'--------------------------------------------------------------- knitr::include_graphics('figures/Rstudio-file.png', dpi = NA) ## ----rstudio-plotwin, fig.align='center', echo=FALSE, fig.show='hold', out.width='80%'------------------------------------------------------------ knitr::include_graphics('figures/RStudio-plotwin.png', dpi = NA) ## ----rstudio-packagewin, fig.align='center', echo=FALSE, fig.show='hold', out.width='80%'--------------------------------------------------------- knitr::include_graphics('figures/RStudio-packagewin.png', dpi = NA) ## ----help, eval=FALSE----------------------------------------------------------------------------------------------------------------------------- ## help(lm) ## ----rstudio-helpwin, fig.align='center', echo=FALSE, fig.show='hold', out.width='80%'------------------------------------------------------------ knitr::include_graphics('figures/RStudio-helpwin.png', dpi = NA) ## ----rstudio-glob-menu, fig.align='center', echo=FALSE, fig.show="hold", out.width='80%'---------------------------------------------------------- knitr::include_graphics('figures/rstudio-glob-menu.png', dpi = NA) ## ----rstudio-glob-option, fig.align='center', echo=FALSE, fig.show="hold", out.width='80%', ref.label='rstudio-glob-option', fig.cap=fig_cap------ fig_cap <- "R General option 팝업 창" knitr::include_graphics('figures/rstudio-glob-option.png', dpi = NA) ## ----rstudio-wd-set, fig.align='center', echo=FALSE, fig.show="hold", out.width='80%'------------------------------------------------------------- knitr::include_graphics('figures/rstudio-wd-setting.JPG', dpi = NA) ## ------------------------------------------------------------------------------------------------------------------------------------------------- getwd() # 작업폴더 확인 GET Working Directory ## ------------------------------------------------------------------------------------------------------------------------------------------------- setwd("..") # 차상위 폴더로 이동 getwd() setwd("../..") # 차차상위 폴더로 이동 getwd() setwd("D:/Current-Workspace/Lecture/misc/") # 절대 폴더 명 입력 setwd("..") # dir() # 폴더 내 파일 명 출력 getwd() setwd("misc") # D:/Current-Workspace/Lecture 하위폴더인 misc 으로 이동 getwd() setwd("D:/Current-Workspace/Lecture/cnu-r-programming-lecture-note/") getwd() ## R에서 디렉토리 또는 폴더 구분자는 `/` 임. Windows에서 사용하는 구분자는 `\`인데, R에서 `\`는 특수문자로 간주하기 때문에 Windows 의 폴더명을 그대로 사용 시 에러 메세지를 출력함. 이를 해결하기 위해 Windows 경로명을 그대로 복사한 경우 경로 구분자 `\` 대신 `\\`로 변경 ## ## **실습**: `C:\r-project`를 컴퓨터에 생성 후 해당 폴더를 default 작업폴더로 설정 ## ----rstudio-code-option, fig.align='center', echo=FALSE, fig.show="hold", out.width='80%'-------------------------------------------------------- knitr::include_graphics('figures/rstudio-code-edit-option.png', dpi = NA) ## ----rstudio-code-display, fig.align='center', echo=FALSE, fig.show="hold", out.width='80%'------------------------------------------------------- knitr::include_graphics('figures/rstudio-code-display.png', dpi = NA) ## ----rstudio-code-saving, fig.align='center', echo=FALSE, fig.show="hold", out.width='80%'-------------------------------------------------------- knitr::include_graphics('figures/rstudio-code-saving.png', dpi = NA) ## ----rstudio-appearance, fig.align='center', echo=FALSE, fig.show="hold", out.width='80%'--------------------------------------------------------- knitr::include_graphics('figures/rstudio-appearance.png', dpi = NA) ## ----rstudio-pane-layout, fig.align='center', echo=FALSE, fig.show="hold", out.width='80%'-------------------------------------------------------- knitr::include_graphics('figures/rstudio-pane-layout.png', dpi = NA) ## **실습**: 개인 취향에 맞게 RStudio 에디터 및 theme을 변경해 보자!! ## ----rstudio-new-project-1, fig.align='center', echo=FALSE, fig.show="hold", out.width='80%'------------------------------------------------------ knitr::include_graphics('figures/R-newproject-01.png', dpi = NA) ## ----rstudio-new-project-2, fig.align='center', echo=FALSE, fig.show="hold", out.width='80%'------------------------------------------------------ knitr::include_graphics('figures/R-newproject-02.png', dpi = NA) ## ----rstudio-new-project-3, fig.align='center', echo=FALSE, fig.show="hold", out.width='80%'------------------------------------------------------ knitr::include_graphics('figures/R-newproject-03.png', dpi = NA) ## **실습**: 프로젝트 생성 ## ## - 위에서 설정한 작업폴더 내에 `학번-r-programming` 프로젝트 생성 ## - 생성한 프로젝트 폴더 내에 `docs`, `figures`, `script` 폴더 생성 ## ## **R 패키지(package)**: 특수 목적을 위한 로직으로 구성된 코드들의 집합으로 R에서 구동되는 분석툴을 통칭 ## ## - CRAN을 통해 배포: 3자가 이용하기 쉬움 $\rightarrow$ R 시스템 환경에서 패키지는 가장 중요한 역할 ## - CRAN [available package by name](https://cran.r-project.org/web/packages/available_packages_by_date.html) 또는 [available package by date](https://cran.r-project.org/web/packages/available_packages_by_name.html)에서 현재 등재된 패키지 리스트 확인 가능 ## - R console에서 `available.packages()` 함수를 통해서도 확인 가능 ## - 현재 CRAN 기준(2020-03-17) 배포된 패키지의 개수는 16045 개임 ## ## **목적**: RStudio 환경에서 패키지를 설치하고 불러오기 ## ----lib-path, comment=NA, tidy=TRUE-------------------------------------------------------------------------------------------------------------- .libPaths() ## ----window-env, fig.align='center', echo=FALSE, fig.show="hold", out.width='80%'----------------------------------------------------------------- knitr::include_graphics('figures/window-env-system.png', dpi = NA) ## ----window-env-var, fig.align='center', echo=FALSE, fig.show="hold", out.width='80%'------------------------------------------------------------- knitr::include_graphics('figures/window-env-var.png', dpi = NA) ## ----window-new-system-var, fig.align='center', echo=FALSE, fig.show="hold", out.width='90%'------------------------------------------------------ knitr::include_graphics('figures/window-new-system-var.png', dpi = NA) ## ----rstudio-package-install, fig.align='center', echo=FALSE, fig.show="hold", out.width='80%'---------------------------------------------------- knitr::include_graphics('figures/rstudio-package-install.png', dpi = NA) ## ----rstudio-package-win02, fig.align='center', echo=FALSE, fig.show="hold", out.width='80%'------------------------------------------------------ knitr::include_graphics('figures/rstudio-pack-win-02.png', dpi = NA) ## **실습**: `install.packages()` 함수를 이용해 `tidyverse` 패키지 설치 ## ---- eval=FALSE, comment=NA, tidy=TRUE----------------------------------------------------------------------------------------------------------- install.packages("tidyverse") ## **실습**: `tidyverse` 패키지 불러오기 ## ----multiple-package, eval=TRUE, warning=FALSE--------------------------------------------------------------------------------------------------- require(tidyverse) ## 실무에서 R의 활용능력은 패키지 활용 여부에 달려 있음. 즉, 목적에 맞는 업무를 수행하기 위해 가장 적합한 패키지를 찾고 활용하느냐에 따라 R 활용능력의 차이를 보임. 앞서 언급한 바와 같이 CRAN에 등록된 패키지는 16000 개가 넘지만, 이 중 많이 활용되고 있는 패키지의 수는 약 200 ~ 300 개 내외이고, 실제 데이터 분석 시 10 ~ 20개 정도의 패키지가 사용됨. 앞 예제에서 설치하고 불러온 `tidyverse` 패키지는 Hadley Wickham [@tidyverse2019]이 개발한 데이터 전처리 및 시각화 패키지 번들이고, 현재 R 프로그램 환경에 지대한 영향을 미침. 본 강의 "데이터프레임 가공 및 시각화"에서 해당 패키지 활용 방법을 배울 예정 ## 본 절에서 다루는 R 문법은 R 입문 시 객체(object)의 명명 규칙과 R 콘솔 창에서 가장 빈번하게 사용되는 기초적인 명령어만 다룰 예정임. 심화 내용은 2-3주 차에 다룰 예정임. ## 알아두면 유용한(콘솔창에서 매우 많이 사용되는) 명령어 및 단축키 ## ## - `ls()`: 현재 R 작업공간에 저장된 모든 객체 리스트 출력 ## - `rm(object_name)`: `object_name`에 해당하는 객체 삭제 ## - `rm(list = ls())`: R 작업공간에 저장된 모든 객체들을 일괄 삭제 ## - 단축키 `[Ctrl] + [L]`: R 콘솔 창 일괄 청소 ## - 단축키 `[Ctrl] + [Shift] + [F10]`: R session 초기화 ## ## **예시** ## ---- comment=NA---------------------------------------------------------------------------------------------------------------------------------- x <- 7 y <- 1:30 #1에서 30까지 정수 입력 ls() #현재 작업공간 내 객체명 출력 ## ---- comment=NA---------------------------------------------------------------------------------------------------------------------------------- rm(x) # 객체 x 삭제 ls() rm(a,b) # 객체 a,b 동시 삭제 ls() # rm(list = ls()) # 모든 객체 삭제 ## ----assign-diff, comment=NA, error=TRUE---------------------------------------------------------------------------------------------------------- # mean(): 입력 벡터의 평균 계산 mean(y <- 1:5) y mean(x = 1:5) x ## ----objectName-ex01, echo = T, eval = T, prompt = F---------------------------------------------------------------------------------------------- # 1:10은 1부터 10까지 정수 생성 # 'c()'는 벡터 생성 함수 x <- c(1:10) # 1:10으로 구성된 행렬 생성 X <- matrix(c(1:10), nrow = 2, ncol = 5, byrow = T) x X # 논리형 객체 .x <- TRUE #FALSE .x # 알파벳 + 숫자 a1 <- seq(from = 1, to = 10, by = 2) # 한글 변수명 가수 <- c("Damian Rice", "Beatles", "최백호", "Queen", "Carlos Gardel", "BTS", "조용필") 가수 ## ----objName-ex02, comment=NA, error=TRUE--------------------------------------------------------------------------------------------------------- 3x <- 7 ## ----objName-ex03, comment=NA, error=TRUE--------------------------------------------------------------------------------------------------------- _x <- c("M", "M", "F") ## ----objName-ex04, comment=NA, error=TRUE--------------------------------------------------------------------------------------------------------- .3 <- 10 ## [R 기초 문법] 절과 마찬가지로 R Markdown을 이용해 최소한의 문서(`html` 문서)를 작성하고 생성하는 방법에 대해 기술함. R Markdown에 대한 보다 상세한 내용은 본 수업의 마지막 주차에 다룰 예정임. ## ----rmarkdown-flow, fig.align='center', echo=FALSE, fig.show="hold", out.width='60%', fig.cap="R Markdown의 최종 결과물 산출과정(http://applied-r.com/project-reporting-template/)"---- knitr::include_graphics('figures/rmarkdown-flow.png', dpi = NA) ## RStudio를 처음 설치하고 위와 같이 진행할 경우 아래와 같은 패키지 설치 여부를 묻는 팝업 창이 나타남. 패키지 설치 여부에 `[Yes]`를 클릭하면 R Markdown 문서 생성을 위해 필요한 패키지들이 자동으로 설치 ## ----rmarkdown-new-01, fig.align='center', echo=FALSE, fig.show="hold", out.width='80%'----------------------------------------------------------- knitr::include_graphics('figures/rmarkdown-new-01.png', dpi = NA) ## ----rmarkdown-new-02, fig.align='center', echo=FALSE, fig.show="hold", out.width='80%'----------------------------------------------------------- knitr::include_graphics('figures/rmarkdown-new-02.png', dpi = NA) ## ----rmarkdown-new-03, fig.align='center', echo=FALSE, fig.show="hold", out.width='80%'----------------------------------------------------------- knitr::include_graphics('figures/rmarkdown-new-03.png', dpi = NA) ## ----rmarkdown-new-04, fig.align='center', echo=FALSE, fig.show="hold", out.width='80%'----------------------------------------------------------- knitr::include_graphics('figures/rmarkdown-new-04.png', dpi = NA) ## ----rmarkdown-new-out, fig.align='center', echo=FALSE, fig.show="hold", out.width='80%', fig.cap="test.html 문서 화면(저장 폴더 내 `test.html`을 크롬 브라우저로 실행)"---- knitr::include_graphics('figures/rmarkdown-new-out.png', dpi = NA) ## ----rmarkdown-part, fig.align='center', echo=FALSE, fig.show="hold", out.width='80%'------------------------------------------------------------- knitr::include_graphics('figures/rmarkdown-part.png', dpi = NA) ## ---- fig.show='hold'----------------------------------------------------------------------------------------------------------------------------- fit = lm(dist ~ speed, data = cars) b = coef(fit) plot(cars) abline(fit) ## ----knitr-logo, out.width='32.8%', fig.show='hold'----------------------------------------------------------------------------------------------- knitr::include_graphics(rep('figures/knit-logo.png', 3)) ## **Homework 1**: R Markdown 문서에 아래 내용을 포함한 문서를 `html` 파일 형식으로 출력 후 제출 ## ## - 간략한 자기소개 및 "통계 프로그래밍 언어" 수업에 대한 본인만의 목표 기술 ## - 본인이 setting 한 RStudio 구성 캡쳐 화면을 그림 파일로 저장하고 R Markdown 문서에 삽입(화면 캡쳐 시 생성 프로젝트 내 폴더 내용 반드시 포함) ## - 패키지 `ggplot2`를 불러오고 `cars` 데이터셋의 2차원 산점도(**hint**: `help(geom_point)` 또는 googling 활용)를 문서에 포함 ##
library(testthat) library(ggplot2) library(isa) test_check("isa")
/tests/testthat.R
permissive
Fazendaaa/RTutorial
R
false
false
67
r
library(testthat) library(ggplot2) library(isa) test_check("isa")
context("npMeanSingle") ones <- rep(1, 20) zeros <- rep(0, 20) res <- npMeanSingle(ones, mu = .5) theta.1 <- res$theta test_that("correct rejection: ones", expect_true(res$rejection)) test_that("D.alt > null value", expect_true(res$d.alternative[1] > res$null.value)) test_that("D.alt < null value", expect_true(res$d.alternative[2] < res$null.value)) res <- npMeanSingle(zeros, mu = .5) theta.2 <- res$theta test_that("correct rejection: zeros", expect_true(res$rejection)) test_that("D.alt > null value", expect_true(res$d.alternative[1] > res$null.value)) test_that("D.alt < null value", expect_true(res$d.alternative[2] < res$null.value)) res <- npMeanSingle(ones, mu = .5, alternative = "greater", alpha = 0.025) test_that("theta equal in two.sided and greater alternative with alpha / 2", expect_equal(theta.1, res$theta)) test_that("D.alt > null value", expect_true(res$d.alternative > res$null.value)) res <- npMeanSingle(ones, mu = .5, alternative = "less", alpha = 0.025) test_that("theta equal in two.sided and greater alternative with alpha / 2", expect_equal(theta.1, res$theta)) test_that("D.alt < null value", expect_true(res$d.alternative < res$null.value)) res <- npMeanSingle(zeros, mu = .5, alternative = "greater", alpha = 0.025) test_that("theta equal in two.sided and greater alternative with alpha / 2", expect_equal(theta.2, res$theta)) test_that("D.alt > null value", expect_true(res$d.alternative > res$null.value)) res <- npMeanSingle(zeros, mu = .5, alternative = "less", alpha = 0.025) test_that("theta equal in two.sided and greater alternative with alpha / 2", expect_equal(theta.2, res$theta)) test_that("D.alt < null value", expect_true(res$d.alternative < res$null.value)) res <- npMeanSingle(ones, mu = .5, alternative = "greater") test_that("theta unequal in two.sided and greater alternative", expect_true(theta.1 != res$theta)) test_that("D.alt > null value", expect_true(res$d.alternative > res$null.value)) res <- npMeanSingle(ones, mu = .5, alternative = "less") test_that("theta unequal in two.sided and greater alternative", expect_true(theta.1 != res$theta)) test_that("D.alt < null value", expect_true(res$d.alternative < res$null.value)) for(i in 1:5) { res <- npMeanSingle(rep(0.5, i), mu = .5) test_that(paste("Not possible to calculate theta: ", i, sep = ""), expect_null(res$theta)) } ## ## extreme case ## w <- rep(0, 100) w[1] <- .5 res <- npMeanSingle(w, mu = 0.0001, upper = 5) test_that("Correct rejection: extreme case", expect_true(res$rejection)) test_that("D.alt > null value: extreme case", expect_true(res$d.alternative > res$null.value)) res <- npMeanSingle(w, mu = 0.0001, upper = 5, alternative = "greater", alpha = 0.025) test_that("Correct rejection: extreme case", expect_true(res$rejection)) test_that("D.alt > null value: extreme case", expect_true(res$d.alternative > res$null.value)) res <- npMeanSingle(w, mu = 0.0001, upper = 5, alternative = "less", alpha = 0.025) test_that("Correct non-rejection: extreme case", expect_false(res$rejection)) ## d.alternative is a zero-length vector ## test_that("D.alt is null", ## expect_null(res$d.alternative)) res <- npMeanSingle(w, mu = .1, upper = 5, alternative = "less", alpha = 0.025) test_that("Correct rejection: extreme case", expect_false(res$rejection)) ## d.alt same as above ## test_that("D.alt is null", ## expect_null(res$d.alternative)) res <- npMeanSingle(w, mu = .5, upper = 5, alternative = "less", alpha = 0.025) test_that("Correct rejection: extreme case", expect_true(res$rejection)) test_that("D.alt < null value", expect_true(res$d.alternative < res$null.value)) res <- npMeanSingle(w, mu = 1, upper = 5, alternative = "less", alpha = 0.025) test_that("Correct rejection: extreme case", expect_true(res$rejection)) test_that("D.alt < null value", expect_true(res$d.alternative < res$null.value)) ## ## development of probability of rejection set.seed(567) w <- runif(40) resGreater <- npMeanSingle(w, mu = 0.01, upper = 5, alternative = "greater", alpha = 0.025) probrejGreater.before <- resGreater$probrej resLess <- npMeanSingle(w, mu = 0.01, upper = 5, alternative = "less", alpha = 0.025) probrejLess.before <- resLess$probrej for(mu in seq(0.1, 1, by = 0.1)) { ## print(mu) resGreater <- npMeanSingle(w, mu = mu, upper = 5, alternative = "greater", alpha = 0.025) resLess <- npMeanSingle(w, mu = mu, upper = 5, alternative = "less", alpha = 0.025) test_that("Greater: probrej.before >= probrej.after", expect_true(probrejGreater.before >= resGreater$probrej)) test_that("Less: probrej.before <= probrej.after", expect_true(probrejLess.before <= resLess$probrej)) probrejLess.before <- resLess$probrej probrejGreater.before <- resGreater$probrej } set.seed(123) x <- runif(600) res <- npMeanSingle(x, mu = .8, lower = -2, upper = 2, alternative = "greater", alpha = 0.025) test_that("Correct non-rejection: large example, extra bounds", expect_false(res$rejection)) test_that("D.alt > null value: large example", expect_true(res$d.alternative > res$null.value)) res <- npMeanSingle(x, mu = .8, lower = -2, upper = 2, alternative = "less", alpha = 0.025) test_that("Correct rejection: large example, extra bounds", expect_true(res$rejection)) test_that("D.alt < null value: extreme case", expect_true(res$d.alternative < res$null.value)) ## ## another example ## x <- runif(45, max = 10) ## greater res <- npMeanSingle(x, mu = .8, lower = -1, upper = 12, alternative = "greater", alpha = 0.025) test_that("Correct rejection: extra bounds", expect_true(res$rejection)) test_that("D.alt > null value: large example", expect_true(res$d.alternative > res$null.value)) res <- npMeanSingle(x, mu = 8, lower = -1, upper = 12, alternative = "greater", alpha = 0.025) test_that("Correct non-rejection: extra bounds", expect_false(res$rejection)) test_that("D.alt > null value: large example", expect_true(res$d.alternative > res$null.value)) ## less res <- npMeanSingle(x, mu = .8, lower = -1, upper = 12, alternative = "less", alpha = 0.025) test_that("Correct non-rejection: extra bounds", expect_false(res$rejection)) test_that("D.alt < null value: large example", expect_true(res$d.alternative < res$null.value)) res <- npMeanSingle(x, mu = 8, lower = -1, upper = 12, alternative = "less", alpha = 0.025) test_that("Correct rejection: extra bounds", expect_true(res$rejection)) test_that("D.alt < null value: large example", expect_true(res$d.alternative < res$null.value)) set.seed(123) x <- runif(2) res <- npMeanSingle(x, mu = .3) test_that("npMeanSingle, no theta calculation. two-sided", expect_true(is.null(res$theta))) res <- npMeanSingle(x, mu = .3, alternative = "greater") test_that("npMeanSingle, no theta calculation. greater", expect_true(is.null(res$theta))) res <- npMeanSingle(x, mu = .3, alternative = "less") test_that("npMeanSingle, no theta calculation. less", expect_true(is.null(res$theta))) ## ## transBinomTest ## lower <- 0 upper <- 1 x <- c(rep(0, 4), rep(.5, 13), rep(1, 20)) x <- (x - lower)/(upper - lower) n <- length(x) test_that("transBinomtest, mixed rejection", expect_equal({ set.seed(1); p <- .6; transBinomTest(p, n, 0.03, list(x = x, xp = x - p)) }, 0.24926764)) test_that("transBinomtest, full rejection", expect_equal({ set.seed(1); p <- .5; transBinomTest(p, n, 0.03, list(x = x, xp = x - p)) }, 1)) test_that("transBinomtest, no rejection", expect_equal({ set.seed(1); p <- .7; transBinomTest(p, n, 0.03, list(x = x, xp = x - p)) }, 0)) x <- 1 - c(rep(0, 4), rep(.5, 13), rep(1, 20)) x <- (x - lower)/(upper - lower) n <- length(x) test_that("transBinomtest, mixed rejection", expect_equal({ set.seed(1); p <- .175; transBinomTest(p, n, 0.03, list(x = x, xp = x - p)) }, 0.26506823)) test_that("transBinomtest, full rejection", expect_equal({ set.seed(1); p <- .1; transBinomTest(p, n, 0.03, list(x = x, xp = x - p)) }, 1)) test_that("transBinomtest, no rejection", expect_equal({ set.seed(1); p <- .2; transBinomTest(p, n, 0.03, list(x = x, xp = x - p)) }, 0))
/tests/testthat/test_npMeanSingle.R
no_license
zauster/npExact
R
false
false
9,024
r
context("npMeanSingle") ones <- rep(1, 20) zeros <- rep(0, 20) res <- npMeanSingle(ones, mu = .5) theta.1 <- res$theta test_that("correct rejection: ones", expect_true(res$rejection)) test_that("D.alt > null value", expect_true(res$d.alternative[1] > res$null.value)) test_that("D.alt < null value", expect_true(res$d.alternative[2] < res$null.value)) res <- npMeanSingle(zeros, mu = .5) theta.2 <- res$theta test_that("correct rejection: zeros", expect_true(res$rejection)) test_that("D.alt > null value", expect_true(res$d.alternative[1] > res$null.value)) test_that("D.alt < null value", expect_true(res$d.alternative[2] < res$null.value)) res <- npMeanSingle(ones, mu = .5, alternative = "greater", alpha = 0.025) test_that("theta equal in two.sided and greater alternative with alpha / 2", expect_equal(theta.1, res$theta)) test_that("D.alt > null value", expect_true(res$d.alternative > res$null.value)) res <- npMeanSingle(ones, mu = .5, alternative = "less", alpha = 0.025) test_that("theta equal in two.sided and greater alternative with alpha / 2", expect_equal(theta.1, res$theta)) test_that("D.alt < null value", expect_true(res$d.alternative < res$null.value)) res <- npMeanSingle(zeros, mu = .5, alternative = "greater", alpha = 0.025) test_that("theta equal in two.sided and greater alternative with alpha / 2", expect_equal(theta.2, res$theta)) test_that("D.alt > null value", expect_true(res$d.alternative > res$null.value)) res <- npMeanSingle(zeros, mu = .5, alternative = "less", alpha = 0.025) test_that("theta equal in two.sided and greater alternative with alpha / 2", expect_equal(theta.2, res$theta)) test_that("D.alt < null value", expect_true(res$d.alternative < res$null.value)) res <- npMeanSingle(ones, mu = .5, alternative = "greater") test_that("theta unequal in two.sided and greater alternative", expect_true(theta.1 != res$theta)) test_that("D.alt > null value", expect_true(res$d.alternative > res$null.value)) res <- npMeanSingle(ones, mu = .5, alternative = "less") test_that("theta unequal in two.sided and greater alternative", expect_true(theta.1 != res$theta)) test_that("D.alt < null value", expect_true(res$d.alternative < res$null.value)) for(i in 1:5) { res <- npMeanSingle(rep(0.5, i), mu = .5) test_that(paste("Not possible to calculate theta: ", i, sep = ""), expect_null(res$theta)) } ## ## extreme case ## w <- rep(0, 100) w[1] <- .5 res <- npMeanSingle(w, mu = 0.0001, upper = 5) test_that("Correct rejection: extreme case", expect_true(res$rejection)) test_that("D.alt > null value: extreme case", expect_true(res$d.alternative > res$null.value)) res <- npMeanSingle(w, mu = 0.0001, upper = 5, alternative = "greater", alpha = 0.025) test_that("Correct rejection: extreme case", expect_true(res$rejection)) test_that("D.alt > null value: extreme case", expect_true(res$d.alternative > res$null.value)) res <- npMeanSingle(w, mu = 0.0001, upper = 5, alternative = "less", alpha = 0.025) test_that("Correct non-rejection: extreme case", expect_false(res$rejection)) ## d.alternative is a zero-length vector ## test_that("D.alt is null", ## expect_null(res$d.alternative)) res <- npMeanSingle(w, mu = .1, upper = 5, alternative = "less", alpha = 0.025) test_that("Correct rejection: extreme case", expect_false(res$rejection)) ## d.alt same as above ## test_that("D.alt is null", ## expect_null(res$d.alternative)) res <- npMeanSingle(w, mu = .5, upper = 5, alternative = "less", alpha = 0.025) test_that("Correct rejection: extreme case", expect_true(res$rejection)) test_that("D.alt < null value", expect_true(res$d.alternative < res$null.value)) res <- npMeanSingle(w, mu = 1, upper = 5, alternative = "less", alpha = 0.025) test_that("Correct rejection: extreme case", expect_true(res$rejection)) test_that("D.alt < null value", expect_true(res$d.alternative < res$null.value)) ## ## development of probability of rejection set.seed(567) w <- runif(40) resGreater <- npMeanSingle(w, mu = 0.01, upper = 5, alternative = "greater", alpha = 0.025) probrejGreater.before <- resGreater$probrej resLess <- npMeanSingle(w, mu = 0.01, upper = 5, alternative = "less", alpha = 0.025) probrejLess.before <- resLess$probrej for(mu in seq(0.1, 1, by = 0.1)) { ## print(mu) resGreater <- npMeanSingle(w, mu = mu, upper = 5, alternative = "greater", alpha = 0.025) resLess <- npMeanSingle(w, mu = mu, upper = 5, alternative = "less", alpha = 0.025) test_that("Greater: probrej.before >= probrej.after", expect_true(probrejGreater.before >= resGreater$probrej)) test_that("Less: probrej.before <= probrej.after", expect_true(probrejLess.before <= resLess$probrej)) probrejLess.before <- resLess$probrej probrejGreater.before <- resGreater$probrej } set.seed(123) x <- runif(600) res <- npMeanSingle(x, mu = .8, lower = -2, upper = 2, alternative = "greater", alpha = 0.025) test_that("Correct non-rejection: large example, extra bounds", expect_false(res$rejection)) test_that("D.alt > null value: large example", expect_true(res$d.alternative > res$null.value)) res <- npMeanSingle(x, mu = .8, lower = -2, upper = 2, alternative = "less", alpha = 0.025) test_that("Correct rejection: large example, extra bounds", expect_true(res$rejection)) test_that("D.alt < null value: extreme case", expect_true(res$d.alternative < res$null.value)) ## ## another example ## x <- runif(45, max = 10) ## greater res <- npMeanSingle(x, mu = .8, lower = -1, upper = 12, alternative = "greater", alpha = 0.025) test_that("Correct rejection: extra bounds", expect_true(res$rejection)) test_that("D.alt > null value: large example", expect_true(res$d.alternative > res$null.value)) res <- npMeanSingle(x, mu = 8, lower = -1, upper = 12, alternative = "greater", alpha = 0.025) test_that("Correct non-rejection: extra bounds", expect_false(res$rejection)) test_that("D.alt > null value: large example", expect_true(res$d.alternative > res$null.value)) ## less res <- npMeanSingle(x, mu = .8, lower = -1, upper = 12, alternative = "less", alpha = 0.025) test_that("Correct non-rejection: extra bounds", expect_false(res$rejection)) test_that("D.alt < null value: large example", expect_true(res$d.alternative < res$null.value)) res <- npMeanSingle(x, mu = 8, lower = -1, upper = 12, alternative = "less", alpha = 0.025) test_that("Correct rejection: extra bounds", expect_true(res$rejection)) test_that("D.alt < null value: large example", expect_true(res$d.alternative < res$null.value)) set.seed(123) x <- runif(2) res <- npMeanSingle(x, mu = .3) test_that("npMeanSingle, no theta calculation. two-sided", expect_true(is.null(res$theta))) res <- npMeanSingle(x, mu = .3, alternative = "greater") test_that("npMeanSingle, no theta calculation. greater", expect_true(is.null(res$theta))) res <- npMeanSingle(x, mu = .3, alternative = "less") test_that("npMeanSingle, no theta calculation. less", expect_true(is.null(res$theta))) ## ## transBinomTest ## lower <- 0 upper <- 1 x <- c(rep(0, 4), rep(.5, 13), rep(1, 20)) x <- (x - lower)/(upper - lower) n <- length(x) test_that("transBinomtest, mixed rejection", expect_equal({ set.seed(1); p <- .6; transBinomTest(p, n, 0.03, list(x = x, xp = x - p)) }, 0.24926764)) test_that("transBinomtest, full rejection", expect_equal({ set.seed(1); p <- .5; transBinomTest(p, n, 0.03, list(x = x, xp = x - p)) }, 1)) test_that("transBinomtest, no rejection", expect_equal({ set.seed(1); p <- .7; transBinomTest(p, n, 0.03, list(x = x, xp = x - p)) }, 0)) x <- 1 - c(rep(0, 4), rep(.5, 13), rep(1, 20)) x <- (x - lower)/(upper - lower) n <- length(x) test_that("transBinomtest, mixed rejection", expect_equal({ set.seed(1); p <- .175; transBinomTest(p, n, 0.03, list(x = x, xp = x - p)) }, 0.26506823)) test_that("transBinomtest, full rejection", expect_equal({ set.seed(1); p <- .1; transBinomTest(p, n, 0.03, list(x = x, xp = x - p)) }, 1)) test_that("transBinomtest, no rejection", expect_equal({ set.seed(1); p <- .2; transBinomTest(p, n, 0.03, list(x = x, xp = x - p)) }, 0))
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/markvario.R \name{make_pp} \alias{make_pp} \title{Generate a point pattern object} \usage{ make_pp(sce.obj, coords_accessor) } \arguments{ \item{sce.obj}{SCE object with XY coordinates in reducedDim slot and normalized expression values that can be used for testing whether individual genes show expression patterns that correlate with the spatial location of the respective cells} \item{coords_accessor}{Indicate which reducedDim slot should be accessed to obtain a matrix of x and y coordinates of the cells} } \description{ Generate a point pattern object }
/man/make_pp.Rd
no_license
friedue/spaeti
R
false
true
640
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/markvario.R \name{make_pp} \alias{make_pp} \title{Generate a point pattern object} \usage{ make_pp(sce.obj, coords_accessor) } \arguments{ \item{sce.obj}{SCE object with XY coordinates in reducedDim slot and normalized expression values that can be used for testing whether individual genes show expression patterns that correlate with the spatial location of the respective cells} \item{coords_accessor}{Indicate which reducedDim slot should be accessed to obtain a matrix of x and y coordinates of the cells} } \description{ Generate a point pattern object }
# PURPOSE ----------------------------------------------------------------- # There are two main kinds o maps used in the rig count # updates: counties and states maps. # This file creats the base maps to build upon. # These maps cannot have Alaska and Hawaii 'shifted' to sit # below Arizone-Texas (cutting down on the bounding box). # This is due to the fact that shifting the Alaskan offshore # areas, independent of the state/counties of Alaska, causes # misalignment between the two. # This procedure is instead performed in 'protractions.R'. # PACKAGES ---------------------------------------------------------------- library(sf) library(sp) library(raster) library(dplyr) library(stringr) library(readr) library(ggplot2) library(rmapshaper) # IMPORT ------------------------------------------------------------------ # Use only county (to simplify this script) # Need to shift Alaska to a more manageable location counties_raw <- st_read("~/R/mymaps/data/cb_2017_us_county_20m.shp") %>% filter(STATEFP != "72") # Remove Puerto Rico # Baker Hughes' county names are all caps counties_raw <- counties_raw %>% mutate_at(vars(NAME), str_to_upper) # Add full state name if (!file.exists("data/maps/state_fips.txt")) download.file("https://www2.census.gov/geo/docs/reference/state.txt", "data/maps/state_fips.txt") fips <- read_delim("data/maps/state_fips.txt", delim = "|") counties_raw <- counties_raw %>% mutate_at(vars(STATEFP), as.character) %>% inner_join(fips %>% select(STATE, STATE_NAME) %>% mutate_at(vars(STATE_NAME), str_to_upper), by = c("STATEFP" = "STATE")) # Select and rename only the needed columns counties_raw <- counties_raw %>% select(state = STATE_NAME, county = NAME) # Raw states map states_raw <- counties_raw %>% group_by(state) %>% summarize() # SIMPLIFY ---------------------------------------------------------------- counties_sim <- counties_raw %>% ms_simplify() states_sim <- counties_sim %>% group_by(state) %>% summarize() # SAVE -------------------------------------------------------------------- save(counties_raw, file = "data/maps/us/counties_raw.RData") save(counties_sim, file = "data/maps/us/counties_sim.RData") save(states_raw, file = "data/maps/us/states_raw.RData") save(states_sim, file = "data/maps/us/states_sim.RData")
/scripts/tidy/maps/us_counties-and-states.R
no_license
seasmith/oil
R
false
false
2,336
r
# PURPOSE ----------------------------------------------------------------- # There are two main kinds o maps used in the rig count # updates: counties and states maps. # This file creats the base maps to build upon. # These maps cannot have Alaska and Hawaii 'shifted' to sit # below Arizone-Texas (cutting down on the bounding box). # This is due to the fact that shifting the Alaskan offshore # areas, independent of the state/counties of Alaska, causes # misalignment between the two. # This procedure is instead performed in 'protractions.R'. # PACKAGES ---------------------------------------------------------------- library(sf) library(sp) library(raster) library(dplyr) library(stringr) library(readr) library(ggplot2) library(rmapshaper) # IMPORT ------------------------------------------------------------------ # Use only county (to simplify this script) # Need to shift Alaska to a more manageable location counties_raw <- st_read("~/R/mymaps/data/cb_2017_us_county_20m.shp") %>% filter(STATEFP != "72") # Remove Puerto Rico # Baker Hughes' county names are all caps counties_raw <- counties_raw %>% mutate_at(vars(NAME), str_to_upper) # Add full state name if (!file.exists("data/maps/state_fips.txt")) download.file("https://www2.census.gov/geo/docs/reference/state.txt", "data/maps/state_fips.txt") fips <- read_delim("data/maps/state_fips.txt", delim = "|") counties_raw <- counties_raw %>% mutate_at(vars(STATEFP), as.character) %>% inner_join(fips %>% select(STATE, STATE_NAME) %>% mutate_at(vars(STATE_NAME), str_to_upper), by = c("STATEFP" = "STATE")) # Select and rename only the needed columns counties_raw <- counties_raw %>% select(state = STATE_NAME, county = NAME) # Raw states map states_raw <- counties_raw %>% group_by(state) %>% summarize() # SIMPLIFY ---------------------------------------------------------------- counties_sim <- counties_raw %>% ms_simplify() states_sim <- counties_sim %>% group_by(state) %>% summarize() # SAVE -------------------------------------------------------------------- save(counties_raw, file = "data/maps/us/counties_raw.RData") save(counties_sim, file = "data/maps/us/counties_sim.RData") save(states_raw, file = "data/maps/us/states_raw.RData") save(states_sim, file = "data/maps/us/states_sim.RData")
library(coiaf) # Declare file location here::i_am("scripts/alternate-filtering/split-file.R") # Path to data path <- "~/Desktop/Malaria/COI data/new-wsafs/" # Read in the real data wsaf_all_regions <- readRDS(paste0(path, "wsaf_intersecting.rds"))$wsaf_cleaned # Read base pf6 predictions base_pf6_predictions <- readRDS(here::here("data-outputs", "base_pf6.rds")) sample_region <- dplyr::select(base_pf6_predictions, name, Region) # Determine number of regions regions <- sample_region %>% dplyr::pull(Region) %>% unique() # Filter real data file to sample in each region and save purrr::walk(regions, function(i) { samples <- sample_region %>% dplyr::filter(Region == regions[i]) %>% dplyr::pull(name) wsaf_region <- wsaf_all_regions[rownames(wsaf_all_regions) %in% samples, ] saveRDS( wsaf_region, paste0(path, "intersecting-regions/region_", regions[i], ".rds") ) })
/archive/intersecting-data/split-file.R
permissive
bailey-lab/coiaf-real-data
R
false
false
907
r
library(coiaf) # Declare file location here::i_am("scripts/alternate-filtering/split-file.R") # Path to data path <- "~/Desktop/Malaria/COI data/new-wsafs/" # Read in the real data wsaf_all_regions <- readRDS(paste0(path, "wsaf_intersecting.rds"))$wsaf_cleaned # Read base pf6 predictions base_pf6_predictions <- readRDS(here::here("data-outputs", "base_pf6.rds")) sample_region <- dplyr::select(base_pf6_predictions, name, Region) # Determine number of regions regions <- sample_region %>% dplyr::pull(Region) %>% unique() # Filter real data file to sample in each region and save purrr::walk(regions, function(i) { samples <- sample_region %>% dplyr::filter(Region == regions[i]) %>% dplyr::pull(name) wsaf_region <- wsaf_all_regions[rownames(wsaf_all_regions) %in% samples, ] saveRDS( wsaf_region, paste0(path, "intersecting-regions/region_", regions[i], ".rds") ) })
# required libraries library(tidyverse) library(leaflet) library(leaflet.extras) # read geolocalized data -------------------------------------------------- d <- read_csv("dataset/escuelas_geolocalizado.csv") # prepare data ------------------------------------------------------------ # filter data by sector sector_estatal <- d %>% filter(sector == "Estatal") sector_privado <- d %>% filter(sector == "Privado") # create labels for map --------------------------------------------------- # create label for sector estatal label_estatal <- lapply(seq(nrow(sector_estatal)), function(i) { paste0('<br>', "Nombre: ", sector_estatal[i, "nombre"], '</br>', '<br>', "CUE: ", sector_estatal[i, "cue_anexo"], '</br>', '<br>', "Localidad: ", sector_estatal[i, "localidad"],'</br>', '<br>', "Jurisdiccion: ", sector_estatal[i, "jurisdiccion"], '</br>') }) saveRDS(label_estatal, file = "shiny/label_estatal.rds") # create label for sector privado label_privado <- lapply(seq(nrow(sector_privado)), function(i) { paste0('<br>', "Nombre: ", sector_privado[i, "nombre"], '</br>', '<br>', "CUE: ", sector_privado[i, "cue_anexo"], '</br>', '<br>', "Localidad: ", sector_privado[i, "localidad"],'</br>', '<br>', "Jurisdiccion: ", sector_privado[i, "jurisdiccion"], '</br>') }) saveRDS(label_privado, file = "shiny/label_privado.rds") # maps in leaftlet -------------------------------------------------------- # define color palette pal <- colorFactor(palette = c("darkred", "steelblue"), levels = c("Estatal", "Privado")) # create base map m <- d %>% leaflet() %>% addTiles(group = "OSM") %>% addProviderTiles("Stamen.TonerLite", group = "Toner") %>% addProviderTiles("CartoDB.DarkMatter", group = "Dark") %>% addResetMapButton() %>% setView(lat = -37.0147402, lng = -81.6698073, zoom = 4) # add sector estatal and sector privado to base map m %>% addCircleMarkers( data = sector_estatal, radius = 1, color = ~ pal(sector), label = lapply(label_estatal, htmltools::HTML), group = "Estatal", clusterOptions = markerClusterOptions(disableClusteringAtZoom = 8)) %>% addCircleMarkers( data = sector_privado, radius = 1, color = ~ pal(sector), label = lapply(label_privado, htmltools::HTML), group = "Privado", clusterOptions = markerClusterOptions(disableClusteringAtZoom = 8)) %>% addLayersControl(baseGroups = c("Toner", "Dark", "OSM"), overlayGroups = c("Estatal", "Privado"), position = "topleft") %>% addLegend(title = "Referencias", position = "bottomright" , pal = pal , values = c("Estatal", "Privado"))
/03_map.R
no_license
manocan/escuelas
R
false
false
2,722
r
# required libraries library(tidyverse) library(leaflet) library(leaflet.extras) # read geolocalized data -------------------------------------------------- d <- read_csv("dataset/escuelas_geolocalizado.csv") # prepare data ------------------------------------------------------------ # filter data by sector sector_estatal <- d %>% filter(sector == "Estatal") sector_privado <- d %>% filter(sector == "Privado") # create labels for map --------------------------------------------------- # create label for sector estatal label_estatal <- lapply(seq(nrow(sector_estatal)), function(i) { paste0('<br>', "Nombre: ", sector_estatal[i, "nombre"], '</br>', '<br>', "CUE: ", sector_estatal[i, "cue_anexo"], '</br>', '<br>', "Localidad: ", sector_estatal[i, "localidad"],'</br>', '<br>', "Jurisdiccion: ", sector_estatal[i, "jurisdiccion"], '</br>') }) saveRDS(label_estatal, file = "shiny/label_estatal.rds") # create label for sector privado label_privado <- lapply(seq(nrow(sector_privado)), function(i) { paste0('<br>', "Nombre: ", sector_privado[i, "nombre"], '</br>', '<br>', "CUE: ", sector_privado[i, "cue_anexo"], '</br>', '<br>', "Localidad: ", sector_privado[i, "localidad"],'</br>', '<br>', "Jurisdiccion: ", sector_privado[i, "jurisdiccion"], '</br>') }) saveRDS(label_privado, file = "shiny/label_privado.rds") # maps in leaftlet -------------------------------------------------------- # define color palette pal <- colorFactor(palette = c("darkred", "steelblue"), levels = c("Estatal", "Privado")) # create base map m <- d %>% leaflet() %>% addTiles(group = "OSM") %>% addProviderTiles("Stamen.TonerLite", group = "Toner") %>% addProviderTiles("CartoDB.DarkMatter", group = "Dark") %>% addResetMapButton() %>% setView(lat = -37.0147402, lng = -81.6698073, zoom = 4) # add sector estatal and sector privado to base map m %>% addCircleMarkers( data = sector_estatal, radius = 1, color = ~ pal(sector), label = lapply(label_estatal, htmltools::HTML), group = "Estatal", clusterOptions = markerClusterOptions(disableClusteringAtZoom = 8)) %>% addCircleMarkers( data = sector_privado, radius = 1, color = ~ pal(sector), label = lapply(label_privado, htmltools::HTML), group = "Privado", clusterOptions = markerClusterOptions(disableClusteringAtZoom = 8)) %>% addLayersControl(baseGroups = c("Toner", "Dark", "OSM"), overlayGroups = c("Estatal", "Privado"), position = "topleft") %>% addLegend(title = "Referencias", position = "bottomright" , pal = pal , values = c("Estatal", "Privado"))
#Load dplyr package first library(dplyr) #Make sure that the unzipped UCI HAR Dataset is in your working directory #Load all train, test and feature files into R. train_subject<-read.table("~/UCI HAR Dataset/train/subject_train.txt") traindata<-read.table("~/UCI HAR Dataset/train/X_train.txt") trainlabel<-read.table("~/UCI HAR Dataset/train/y_train.txt") test_subject<-read.table("~/UCI HAR Dataset/test/subject_test.txt") testdata<-read.table("~/UCI HAR Dataset/test/X_test.txt") testlabel<-read.table("~/UCI HAR Dataset/test/y_test.txt") featurename<-read.table("~/UCI HAR Dataset/features.txt") #Features.txt has 2 variables, but you need just the second. featurename<-select(featurename, V2) #Transpose rows into columns so you can use it as column names for the feature data features<-t(featurename) #To create 1 data set of all this data, follow these steps - It's not yet tidy!. #Make sure to follow the same order of binding rows and columns together. #I've used test data first and then train data for all binding purposes: #1: bind test data and train data and name the columns accordingly: mydata1<-rbind(testdata, traindata) names(mydata1)<-features #2: bind test label and train label and name the column activity mylabel1<-rbind(testlabel, trainlabel) names(mylabel1)<-"activity" #3: bind test subjects and train subjects and name the column subject mysubject<-rbind(test_subject, train_subject) names(mysubject)<-"subject" #4: bind all columns together: #mytidy1 is the data set that combines all training and test data. Answer to (1) part of assignment mytidy1<-cbind(mysubject, mylabel1, mydata1) #------------------------------------------------------------------------------- #To extract only the mean and standard deviation for each measurement: #1. #In the column names, we're only looking for strings that match the word #subject, activity, std or mean but without meanFreq mytidy2<-mytidy1[, grepl("subject|activity|std|mean[^F]", colnames(mytidy1))] #2. The columns names are hard to read and operate on. #Let's remove the hyphen (-) and then the () names(mytidy2)<-gsub("-", "", names(mytidy2)) # This will look for hyphens and replace with blank names(mytidy2)<-gsub("\\(\\)", "", names(mytidy2)) #This will look for parenthesis and replace with blank names(mytidy2)<-tolower(names(mytidy2)) #This will make all column names to lower case #------------------------------------------------------------------------------- #To name the activities with descriptive activity names instead of numbers #Using mutate function in dplyr, substitute numbers with names of activities mytidy2<-mutate(mytidy2, activity=gsub(1, "WALKING", mytidy2$activity)) mytidy2<-mutate(mytidy2, activity=gsub(2, "WALKING_UPSTAIRS", mytidy2$activity)) mytidy2<-mutate(mytidy2, activity=gsub(3, "WALKING_DOWNSTAIRS", mytidy2$activity)) mytidy2<-mutate(mytidy2, activity=gsub(4, "SITTING", mytidy2$activity)) mytidy2<-mutate(mytidy2, activity=gsub(5, "STANDING", mytidy2$activity)) mytidy2<-mutate(mytidy2, activity=gsub(6, "LAYING", mytidy2$activity)) #------------------------------------------------------------------------------- #To create a new data set with the average of each variable for each activity and each subject mydf<-mytidy2 #Copy over existing dataset to create new dataset and then perform operations on new data set. grouping<-group_by(mydf, subject, activity) #Create grouping per activity as well as per subject #Using summarise_each function to calculate mean for tables with multiple columns mydf<-summarise_each(grouping, funs(mean)) mydf #-------------------------------------------------------------------------------
/run_analysis.R
no_license
kalleigh/getting-cleaning-data-project
R
false
false
3,669
r
#Load dplyr package first library(dplyr) #Make sure that the unzipped UCI HAR Dataset is in your working directory #Load all train, test and feature files into R. train_subject<-read.table("~/UCI HAR Dataset/train/subject_train.txt") traindata<-read.table("~/UCI HAR Dataset/train/X_train.txt") trainlabel<-read.table("~/UCI HAR Dataset/train/y_train.txt") test_subject<-read.table("~/UCI HAR Dataset/test/subject_test.txt") testdata<-read.table("~/UCI HAR Dataset/test/X_test.txt") testlabel<-read.table("~/UCI HAR Dataset/test/y_test.txt") featurename<-read.table("~/UCI HAR Dataset/features.txt") #Features.txt has 2 variables, but you need just the second. featurename<-select(featurename, V2) #Transpose rows into columns so you can use it as column names for the feature data features<-t(featurename) #To create 1 data set of all this data, follow these steps - It's not yet tidy!. #Make sure to follow the same order of binding rows and columns together. #I've used test data first and then train data for all binding purposes: #1: bind test data and train data and name the columns accordingly: mydata1<-rbind(testdata, traindata) names(mydata1)<-features #2: bind test label and train label and name the column activity mylabel1<-rbind(testlabel, trainlabel) names(mylabel1)<-"activity" #3: bind test subjects and train subjects and name the column subject mysubject<-rbind(test_subject, train_subject) names(mysubject)<-"subject" #4: bind all columns together: #mytidy1 is the data set that combines all training and test data. Answer to (1) part of assignment mytidy1<-cbind(mysubject, mylabel1, mydata1) #------------------------------------------------------------------------------- #To extract only the mean and standard deviation for each measurement: #1. #In the column names, we're only looking for strings that match the word #subject, activity, std or mean but without meanFreq mytidy2<-mytidy1[, grepl("subject|activity|std|mean[^F]", colnames(mytidy1))] #2. The columns names are hard to read and operate on. #Let's remove the hyphen (-) and then the () names(mytidy2)<-gsub("-", "", names(mytidy2)) # This will look for hyphens and replace with blank names(mytidy2)<-gsub("\\(\\)", "", names(mytidy2)) #This will look for parenthesis and replace with blank names(mytidy2)<-tolower(names(mytidy2)) #This will make all column names to lower case #------------------------------------------------------------------------------- #To name the activities with descriptive activity names instead of numbers #Using mutate function in dplyr, substitute numbers with names of activities mytidy2<-mutate(mytidy2, activity=gsub(1, "WALKING", mytidy2$activity)) mytidy2<-mutate(mytidy2, activity=gsub(2, "WALKING_UPSTAIRS", mytidy2$activity)) mytidy2<-mutate(mytidy2, activity=gsub(3, "WALKING_DOWNSTAIRS", mytidy2$activity)) mytidy2<-mutate(mytidy2, activity=gsub(4, "SITTING", mytidy2$activity)) mytidy2<-mutate(mytidy2, activity=gsub(5, "STANDING", mytidy2$activity)) mytidy2<-mutate(mytidy2, activity=gsub(6, "LAYING", mytidy2$activity)) #------------------------------------------------------------------------------- #To create a new data set with the average of each variable for each activity and each subject mydf<-mytidy2 #Copy over existing dataset to create new dataset and then perform operations on new data set. grouping<-group_by(mydf, subject, activity) #Create grouping per activity as well as per subject #Using summarise_each function to calculate mean for tables with multiple columns mydf<-summarise_each(grouping, funs(mean)) mydf #-------------------------------------------------------------------------------
tweets <- searchTwitter("#hershey",n=15000,lang="en",since = '2016-03-31') tweets_text <- sapply(tweets,function(x) x$getText()) object.size(tweets) head(tweets_text) tail(tweets_text) #install.packages("SnowballC") library(SnowballC) tweets_text <- iconv(tweets_text,to="utf-8-mac") #tweets_text <- iconv(tweets_text, to = "utf-8", sub="") tweet_Corpus <- Corpus(VectorSource(tweets_text)) tweet_Corpus <- tm_map(tweet_Corpus,removeNumbers, lazy = 'TRUE') tweet_Corpus <- tm_map(tweet_Corpus, stemDocument) tdm = TermDocumentMatrix(tweet_Corpus, control = list(removePunctuation = TRUE, stopwords = c("hershey","chocolate","sweet","%http%","hersheys", "httpstcokepoqp",stopwords("english")), removeNumbers = TRUE, tolower = TRUE )) dtm <- DocumentTermMatrix(tweet_Corpus) dtms <- removeSparseTerms(dtm, 0.1) # This makes a matrix that is 10% empty space, maximum. inspect(dtms) findFreqTerms(tdm) # define tdm as matrix m = as.matrix(tdm) # get word counts in decreasing order word_freqs = sort(rowSums(m), decreasing=TRUE) # create a data frame with words and their frequencies dm = data.frame(word=names(word_freqs), freq=word_freqs) # plot wordcloud wordcloud(dm$word, dm$freq, random.order=FALSE, colors=brewer.pal(8, "Dark2")) findAssocs(dtm, c("marketing" , "job"), corlimit=0.5) # specifying a correlation limit of 0.98 dtmss <- removeSparseTerms(dtm, 0.15) # This makes a matrix that is only 15% empty space, maximum. # To plot the most frequently occuring words wf <- data.frame(word=names(word_freqs), freq=word_freqs) head(wf) library(ggplot2) p <- ggplot(subset(wf, freq>100), aes(word, freq)) p <- p + geom_bar(stat="identity") p <- p + theme(axis.text.x=element_text(angle=45, hjust=1)) p
/test.R
no_license
kshachi/test
R
false
false
1,872
r
tweets <- searchTwitter("#hershey",n=15000,lang="en",since = '2016-03-31') tweets_text <- sapply(tweets,function(x) x$getText()) object.size(tweets) head(tweets_text) tail(tweets_text) #install.packages("SnowballC") library(SnowballC) tweets_text <- iconv(tweets_text,to="utf-8-mac") #tweets_text <- iconv(tweets_text, to = "utf-8", sub="") tweet_Corpus <- Corpus(VectorSource(tweets_text)) tweet_Corpus <- tm_map(tweet_Corpus,removeNumbers, lazy = 'TRUE') tweet_Corpus <- tm_map(tweet_Corpus, stemDocument) tdm = TermDocumentMatrix(tweet_Corpus, control = list(removePunctuation = TRUE, stopwords = c("hershey","chocolate","sweet","%http%","hersheys", "httpstcokepoqp",stopwords("english")), removeNumbers = TRUE, tolower = TRUE )) dtm <- DocumentTermMatrix(tweet_Corpus) dtms <- removeSparseTerms(dtm, 0.1) # This makes a matrix that is 10% empty space, maximum. inspect(dtms) findFreqTerms(tdm) # define tdm as matrix m = as.matrix(tdm) # get word counts in decreasing order word_freqs = sort(rowSums(m), decreasing=TRUE) # create a data frame with words and their frequencies dm = data.frame(word=names(word_freqs), freq=word_freqs) # plot wordcloud wordcloud(dm$word, dm$freq, random.order=FALSE, colors=brewer.pal(8, "Dark2")) findAssocs(dtm, c("marketing" , "job"), corlimit=0.5) # specifying a correlation limit of 0.98 dtmss <- removeSparseTerms(dtm, 0.15) # This makes a matrix that is only 15% empty space, maximum. # To plot the most frequently occuring words wf <- data.frame(word=names(word_freqs), freq=word_freqs) head(wf) library(ggplot2) p <- ggplot(subset(wf, freq>100), aes(word, freq)) p <- p + geom_bar(stat="identity") p <- p + theme(axis.text.x=element_text(angle=45, hjust=1)) p
# Plot summaries of melting peaks fit by qpcR::meltcurve(). # This is an uuuugly format. Need to either improve this or can the whole script. # Usage: plot_melt_summary.R peaksummary.csv library(ggplot2) library(grid) library("DeLuciatoR") library("ggplotTicks") theme_set(theme_ggEHD()) peak_summary = read.csv(commandArgs(trailingOnly=TRUE)[[1]]) pdf( file="figs/multi_ctab_melt_npeak_tm-20150421.pdf", #FIXME hard-coded path width=9, height=6, pointsize=24) plot(mirror_ticks(ggplot( peak_summary, aes(Tissue, npeak, shape=factor(Extractions))) +geom_point(position=position_jitter(w=0.25, h=0.1)) +coord_flip())) plot(mirror_ticks(ggplot( #peak_summary[peak_summary$main_tm > 77.5,], peak_summary, aes(Tissue, main_tm, shape=factor(Extractions))) +geom_point(position=position_jitter(w=0.1, h=0.25)) +coord_flip())) dev.off()
/R/plot_melt_summary.R
no_license
infotroph/Prairie_seq
R
false
false
857
r
# Plot summaries of melting peaks fit by qpcR::meltcurve(). # This is an uuuugly format. Need to either improve this or can the whole script. # Usage: plot_melt_summary.R peaksummary.csv library(ggplot2) library(grid) library("DeLuciatoR") library("ggplotTicks") theme_set(theme_ggEHD()) peak_summary = read.csv(commandArgs(trailingOnly=TRUE)[[1]]) pdf( file="figs/multi_ctab_melt_npeak_tm-20150421.pdf", #FIXME hard-coded path width=9, height=6, pointsize=24) plot(mirror_ticks(ggplot( peak_summary, aes(Tissue, npeak, shape=factor(Extractions))) +geom_point(position=position_jitter(w=0.25, h=0.1)) +coord_flip())) plot(mirror_ticks(ggplot( #peak_summary[peak_summary$main_tm > 77.5,], peak_summary, aes(Tissue, main_tm, shape=factor(Extractions))) +geom_point(position=position_jitter(w=0.1, h=0.25)) +coord_flip())) dev.off()
Htrip <-function(X,K=K,n=n,J=J,p=p,EO=EO,O=O){ H_hjk <- array(rep(0,K*K*K), dim=c(K,K,K), dimnames= list(J,J,J)) for (i in 1 : K) for (j in 1 : K) for(k in 1 : K) { H_hjk[i,j,k] <- 1 - O[i,j,k]/EO[i,j,k] } return(H_hjk) }
/R/Htrip.R
no_license
SpyrosBalafas/mudfold
R
false
false
234
r
Htrip <-function(X,K=K,n=n,J=J,p=p,EO=EO,O=O){ H_hjk <- array(rep(0,K*K*K), dim=c(K,K,K), dimnames= list(J,J,J)) for (i in 1 : K) for (j in 1 : K) for(k in 1 : K) { H_hjk[i,j,k] <- 1 - O[i,j,k]/EO[i,j,k] } return(H_hjk) }
#CcSCB_Distance # # Distance analysis of loggerhead turtles # in the SCB from aerial survey data. # Tomo Eguchi # 13 May 2016 rm(list=ls()) #library(unmarked) # --- seems to use discrete distance data only #library(DSpat) # Models by Devin Johnson et al. library(mrds) # Package by Jeff Laake et al. source('CcSCB_functions.R') # 2015 data that are split in 2 km chunks are here # these were created using AirSegChopCc_2016_04_11.R on merged data files # which were created using combineFiles.m in Matlab. AirSegChop script # was first created by Elizabeth Becker and Karin Forney and was modified # for turtle sightings by me. # data are stored in CcSCB_functions.R Sdata.2011$transectID <- paste(Sdata.2011$year, Sdata.2011$transectNum, sep = '_') Sdata.2015$transectID <- paste(Sdata.2015$year, Sdata.2015$transectNum, sep = '_') # compute the perpendicular distances in meters. Sdata$PerpDist <- ft2m(alt * tan(deg2rad(90 - abs(Sdata$DecAngle)))) # fix slon to all negative values: Sdata$slon[Sdata$slon > 0] <- Sdata$slon[Sdata$slon > 0] - 360 #ccData <- subset(Sdata, species == 'cc') #ccDataOn <- subset(ccData, Effort == 1) # perp distance in km ccData$distance <- ccData$PerpDist/1000 # transect effort data: # this is for 2015 only. - need 2011? - probably not because # effort is only needed for estimating density/abundance. # no sightings from 2011 effortData <- read.table('Data/tmpTracks_Nov2016.txt', header = TRUE, sep = ",") # first order lat/lon pairs so that offshore point is obvious # for each track segment: new.effort.data <- matrix(data = NA, nrow = dim(effortData)[1], ncol = 5) for (k in 1:dim(effortData)[1]){ if (effortData[k, 'Lon1'] < effortData[k, 'Lon2']){ new.effort.data[k, ] <- unlist(as.vector(c(effortData[k, c('Lat1', 'Lon1', 'Lat2', 'Lon2', 'Line')]))) } else { new.effort.data[k, ] <- unlist(as.vector(c(effortData[k, c('Lat2', 'Lon2', 'Lat1', 'Lon1', 'Line')]))) } } # Nov 2016 - need to split each line into inshore and offshore for # post-stratification. Need to find the splitting lat/lon and new # line IDs. effort.data.2 <- matrix(data = NA, nrow = (dim(new.effort.data)[1] * 2), ncol = 7) transect.data <- read.table('Data/transectLines_strata.csv', header = TRUE, sep = ",") #new.lines <- vector(mode = 'numeric', length = dim(new.effort.data)[1]) c <- 1 for (k in 1:dim(new.effort.data)[1]){ line.ID <- new.effort.data[k, 5] strata.lines <- transect.data[transect.data$ID == line.ID, ] if (dim(strata.lines)[1] > 1){ lon.middle <- strata.lines[1, 'lon_inshore'] # the segment goes over the middle point if (new.effort.data[k, 2] < lon.middle & new.effort.data[k, 4] > lon.middle){ # W of the middle point effort.data.2[c, ] <- unlist(c(new.effort.data[k, c(1,2)], strata.lines[1, c('lat_inshore', 'lon_inshore')], new.effort.data[k, 5], strata.lines[1, 'line'], strata.lines[1, 'offshore'])) c <- c + 1 # E of the middle point effort.data.2[c, ] <- unlist(c(strata.lines[1, c('lat_inshore', 'lon_inshore')], new.effort.data[k, c(3,4)], new.effort.data[k, 5], strata.lines[2, 'line'], strata.lines[2, 'offshore'])) c <- c + 1 # both points are west of the middle point } else if (new.effort.data[k, 2] < lon.middle & new.effort.data[k, 4] < lon.middle){ effort.data.2[c, ] <- unlist(c(new.effort.data[k, c(1:5)], strata.lines[1, 'line'], strata.lines[1, 'offshore'])) c <- c + 1 # both points are E of the middle point } else if (new.effort.data[k, 2] > lon.middle){ effort.data.2[c, ] <- unlist(c(new.effort.data[k, c(1:5)], strata.lines[2, 'line'], strata.lines[2, 'offshore'])) c <-c + 1 } } else { # only one strata.lines - take all at once effort.data.2[c, ] <- unlist(c(new.effort.data[k, c(1:5)], strata.lines[1, c('line', 'offshore')])) c <- c + 1 } } effort.data.2 <- as.data.frame(na.omit(effort.data.2)) colnames(effort.data.2) <- c('Lat1', 'Lon1', 'Lat2', 'Lon2', 'oldID', 'Line', 'offshore') # find distances between Lat1/Lon1 and Lat2/Lon2: # create the new UTM based coordinates for Lat1/Lon1 effort.data.2.1 <- effort.data.2 colnames(effort.data.2.1) <- c('Y', 'X', 'Lat2', 'Lon2', 'oldID', 'Line', 'offshore') effort.data.2.Sp <- latlon2sp(effort.data.2.1, center.UTM) effort.data.2.2 <- effort.data.2.Sp@data # rename again for Lat2/Lon2 colnames(effort.data.2.2) <- c('Y', 'X', 'oldID', 'Line', 'offshore', 'newX1', 'newY1') effort.data.2.Sp <- latlon2sp(effort.data.2.2, center.UTM) effort.data.2.3 <- effort.data.2.Sp@data # rename again for Lat2/Lon2 colnames(effort.data.2.3) <- c('oldID', 'Line', 'offshore', 'newX1', 'newY1', 'newX2', 'newY2') effort.data.3 <- cbind(effort.data.2.3, effort.data.2[, c('Lat1', 'Lon1', 'Lat2', 'Lon2')]) euclidDistance <- function(x1, x2) dist <- sqrt(sum((x1 - x2)^2)) library(foreach) distances <- foreach(i = 1:nrow(effort.data.3), .combine = c) %do% euclidDistance(effort.data.3[i, c('newX1', 'newY1')], effort.data.3[i, c('newX2', 'newY2')]) effort.data.3$Distance <- distances lineIDs <- sort(unique(effort.data.2$Line)) effortByLine <- vector(mode = "numeric", length = length(lineIDs)) for (k in 1:length(lineIDs)){ effortByLine[k] <- sum(effort.data.3$Distance[effort.data.3$Line == lineIDs[k]]) } effort.data.3$Region <- 'inshore' effort.data.3$Region[effort.data.3$offshore == 1] <- 'offshore' effort.df <- data.frame(ID = lineIDs, effort = effortByLine) region <- data.frame(Region.Label = "SCB", Area = total.area) # transect numbers need to be updated with new IDs ccData$Region <- NA line.vec <- vector(mode = 'numeric', length = nrow(ccData)) for (k in 1:nrow(ccData)){ sighting.mlon <- ccData[k, 'mlon'] sighting.line <- ccData[k, 'transectNum'] strata.lines <- transect.data[transect.data$ID == sighting.line, ] if (nrow(strata.lines) > 1){ if (sighting.mlon < strata.lines[1, 'lon_inshore']){ line.vec[k] <- strata.lines[1, 'line'] } else { line.vec[k] <- strata.lines[2, 'line'] } } else { line.vec[k] <- strata.lines[1, 'line'] } line <- transect.data[transect.data$line == line.vec[k],] ifelse(line$offshore == 0, ccData$Region[k] <- 'inshore', ccData$Region[k] <- 'offshore') } ccData$line.strata <- line.vec obs <- data.frame(Region.Label = 'SCB', object = seq(1, dim(ccData)[1]), Sample.Label = ccData$line.strata) # sample.table needs region, line IDs, and effort: sample <- data.frame(Sample.Label = lineIDs, Effort = effortByLine, Region.Label = 'SCB') # sample$Region.Label <- NA # for (k in 1:nrow(sample)){ # line <- transect.data[transect.data$line == sample$Sample.Label[k],] # ifelse(line$offshore == 0, # sample$Region.Label[k] <- 'inshore', # sample$Region.Label[k] <- 'offshore') # } # may not need to truncate because observers were instructed # not to look too far away; changed from 15% to 1% run.hr.cos.bft <- ds(data = ccData, truncation="1%", key = "hr", formula = ~ Beaufort, adjustment = 'cos', region.table = region, sample.table = sample, obs.table = obs) run.hr.cos.1 <- ds(data = ccData, truncation="1%", key = "hr", adjustment = "cos", formula = ~ 1, region.table = region, sample.table = sample, obs.table = obs) run.hr.bft <- ds(data = ccData, truncation="1%", key = "hr", formula = ~ Beaufort, adjustment = NULL, region.table = region, sample.table = sample, obs.table = obs) run.hn.bft <- ds(data = ccData, truncation="1%", key = "hn", adjustment = NULL, formula = ~ Beaufort, region.table = region, sample.table = sample, obs.table = obs) run.hr.null.1 <- ds(data = ccData, truncation="1%", key = "hr", adjustment = NULL, formula = ~ 1, region.table = region, sample.table = sample, obs.table = obs) run.hn.cos.bft <- ds(data = ccData, truncation="1%", key = "hn", formula = ~ Beaufort, adjustment = 'cos', region.table = region, sample.table = sample, obs.table = obs) run.hn.cos.1 <- ds(data = ccData, truncation="1%", key = "hn", adjustment = "cos", formula = ~ 1, region.table = region, sample.table = sample, obs.table = obs) run.hn.null.1 <- ds(data = ccData, truncation="1%", key = "hn", adjustment = NULL, formula = ~ 1, region.table = region, sample.table = sample, obs.table = obs) gof.hn.null.1 <- ds.gof(run.hn.null.1) gof.hr.null.1 <- ds.gof(run.hr.null.1) gof.hr.cos.1 <- ds.gof(run.hr.cos.1) gof.hn.cos.1 <- ds.gof(run.hn.cos.1) gof.hr.cos.bft <- ds.gof(run.hr.cos.bft) gof.hn.cos.bft <- ds.gof(run.hn.cos.bft) AIC_all <- c('HN.Cos.Bft' = AIC(run.hn.cos.bft), 'HR.Cos.Bft' = AIC(run.hr.cos.bft), 'HN.Cos' = AIC(run.hn.cos.1), 'HR.Cos' = AIC(run.hr.cos.1), 'HN.Bft' = AIC(run.hn.bft), 'HR.Bft' = AIC(run.hr.bft), 'HN' = AIC(run.hn.null.1), 'HR' = AIC(run.hr.null.1)) minAIC <- min(AIC_all) deltaAIC <- AIC_all - min(AIC_all) # AIC(run.hn.cos.bft) # AIC(run.hr.cos.bft) # AIC(run.hn.cos.1) # AIC(run.hr.cos.1) # AIC(run.hn.null.1) # AIC(run.hr.null.1) # HR + null is the best: check.mono(run.hr.null.1$ddf, plot=T, n.pts=100) # save the results - just the best one: save(list = ls(), file = paste0("RData/HR_Null_1_NoStrata_out_", Sys.Date(), ".RData"))
/CcSCB_Distance_NoStrata_Nov2016.R
no_license
mteguchi/SCB_AerialSurvey
R
false
false
11,832
r
#CcSCB_Distance # # Distance analysis of loggerhead turtles # in the SCB from aerial survey data. # Tomo Eguchi # 13 May 2016 rm(list=ls()) #library(unmarked) # --- seems to use discrete distance data only #library(DSpat) # Models by Devin Johnson et al. library(mrds) # Package by Jeff Laake et al. source('CcSCB_functions.R') # 2015 data that are split in 2 km chunks are here # these were created using AirSegChopCc_2016_04_11.R on merged data files # which were created using combineFiles.m in Matlab. AirSegChop script # was first created by Elizabeth Becker and Karin Forney and was modified # for turtle sightings by me. # data are stored in CcSCB_functions.R Sdata.2011$transectID <- paste(Sdata.2011$year, Sdata.2011$transectNum, sep = '_') Sdata.2015$transectID <- paste(Sdata.2015$year, Sdata.2015$transectNum, sep = '_') # compute the perpendicular distances in meters. Sdata$PerpDist <- ft2m(alt * tan(deg2rad(90 - abs(Sdata$DecAngle)))) # fix slon to all negative values: Sdata$slon[Sdata$slon > 0] <- Sdata$slon[Sdata$slon > 0] - 360 #ccData <- subset(Sdata, species == 'cc') #ccDataOn <- subset(ccData, Effort == 1) # perp distance in km ccData$distance <- ccData$PerpDist/1000 # transect effort data: # this is for 2015 only. - need 2011? - probably not because # effort is only needed for estimating density/abundance. # no sightings from 2011 effortData <- read.table('Data/tmpTracks_Nov2016.txt', header = TRUE, sep = ",") # first order lat/lon pairs so that offshore point is obvious # for each track segment: new.effort.data <- matrix(data = NA, nrow = dim(effortData)[1], ncol = 5) for (k in 1:dim(effortData)[1]){ if (effortData[k, 'Lon1'] < effortData[k, 'Lon2']){ new.effort.data[k, ] <- unlist(as.vector(c(effortData[k, c('Lat1', 'Lon1', 'Lat2', 'Lon2', 'Line')]))) } else { new.effort.data[k, ] <- unlist(as.vector(c(effortData[k, c('Lat2', 'Lon2', 'Lat1', 'Lon1', 'Line')]))) } } # Nov 2016 - need to split each line into inshore and offshore for # post-stratification. Need to find the splitting lat/lon and new # line IDs. effort.data.2 <- matrix(data = NA, nrow = (dim(new.effort.data)[1] * 2), ncol = 7) transect.data <- read.table('Data/transectLines_strata.csv', header = TRUE, sep = ",") #new.lines <- vector(mode = 'numeric', length = dim(new.effort.data)[1]) c <- 1 for (k in 1:dim(new.effort.data)[1]){ line.ID <- new.effort.data[k, 5] strata.lines <- transect.data[transect.data$ID == line.ID, ] if (dim(strata.lines)[1] > 1){ lon.middle <- strata.lines[1, 'lon_inshore'] # the segment goes over the middle point if (new.effort.data[k, 2] < lon.middle & new.effort.data[k, 4] > lon.middle){ # W of the middle point effort.data.2[c, ] <- unlist(c(new.effort.data[k, c(1,2)], strata.lines[1, c('lat_inshore', 'lon_inshore')], new.effort.data[k, 5], strata.lines[1, 'line'], strata.lines[1, 'offshore'])) c <- c + 1 # E of the middle point effort.data.2[c, ] <- unlist(c(strata.lines[1, c('lat_inshore', 'lon_inshore')], new.effort.data[k, c(3,4)], new.effort.data[k, 5], strata.lines[2, 'line'], strata.lines[2, 'offshore'])) c <- c + 1 # both points are west of the middle point } else if (new.effort.data[k, 2] < lon.middle & new.effort.data[k, 4] < lon.middle){ effort.data.2[c, ] <- unlist(c(new.effort.data[k, c(1:5)], strata.lines[1, 'line'], strata.lines[1, 'offshore'])) c <- c + 1 # both points are E of the middle point } else if (new.effort.data[k, 2] > lon.middle){ effort.data.2[c, ] <- unlist(c(new.effort.data[k, c(1:5)], strata.lines[2, 'line'], strata.lines[2, 'offshore'])) c <-c + 1 } } else { # only one strata.lines - take all at once effort.data.2[c, ] <- unlist(c(new.effort.data[k, c(1:5)], strata.lines[1, c('line', 'offshore')])) c <- c + 1 } } effort.data.2 <- as.data.frame(na.omit(effort.data.2)) colnames(effort.data.2) <- c('Lat1', 'Lon1', 'Lat2', 'Lon2', 'oldID', 'Line', 'offshore') # find distances between Lat1/Lon1 and Lat2/Lon2: # create the new UTM based coordinates for Lat1/Lon1 effort.data.2.1 <- effort.data.2 colnames(effort.data.2.1) <- c('Y', 'X', 'Lat2', 'Lon2', 'oldID', 'Line', 'offshore') effort.data.2.Sp <- latlon2sp(effort.data.2.1, center.UTM) effort.data.2.2 <- effort.data.2.Sp@data # rename again for Lat2/Lon2 colnames(effort.data.2.2) <- c('Y', 'X', 'oldID', 'Line', 'offshore', 'newX1', 'newY1') effort.data.2.Sp <- latlon2sp(effort.data.2.2, center.UTM) effort.data.2.3 <- effort.data.2.Sp@data # rename again for Lat2/Lon2 colnames(effort.data.2.3) <- c('oldID', 'Line', 'offshore', 'newX1', 'newY1', 'newX2', 'newY2') effort.data.3 <- cbind(effort.data.2.3, effort.data.2[, c('Lat1', 'Lon1', 'Lat2', 'Lon2')]) euclidDistance <- function(x1, x2) dist <- sqrt(sum((x1 - x2)^2)) library(foreach) distances <- foreach(i = 1:nrow(effort.data.3), .combine = c) %do% euclidDistance(effort.data.3[i, c('newX1', 'newY1')], effort.data.3[i, c('newX2', 'newY2')]) effort.data.3$Distance <- distances lineIDs <- sort(unique(effort.data.2$Line)) effortByLine <- vector(mode = "numeric", length = length(lineIDs)) for (k in 1:length(lineIDs)){ effortByLine[k] <- sum(effort.data.3$Distance[effort.data.3$Line == lineIDs[k]]) } effort.data.3$Region <- 'inshore' effort.data.3$Region[effort.data.3$offshore == 1] <- 'offshore' effort.df <- data.frame(ID = lineIDs, effort = effortByLine) region <- data.frame(Region.Label = "SCB", Area = total.area) # transect numbers need to be updated with new IDs ccData$Region <- NA line.vec <- vector(mode = 'numeric', length = nrow(ccData)) for (k in 1:nrow(ccData)){ sighting.mlon <- ccData[k, 'mlon'] sighting.line <- ccData[k, 'transectNum'] strata.lines <- transect.data[transect.data$ID == sighting.line, ] if (nrow(strata.lines) > 1){ if (sighting.mlon < strata.lines[1, 'lon_inshore']){ line.vec[k] <- strata.lines[1, 'line'] } else { line.vec[k] <- strata.lines[2, 'line'] } } else { line.vec[k] <- strata.lines[1, 'line'] } line <- transect.data[transect.data$line == line.vec[k],] ifelse(line$offshore == 0, ccData$Region[k] <- 'inshore', ccData$Region[k] <- 'offshore') } ccData$line.strata <- line.vec obs <- data.frame(Region.Label = 'SCB', object = seq(1, dim(ccData)[1]), Sample.Label = ccData$line.strata) # sample.table needs region, line IDs, and effort: sample <- data.frame(Sample.Label = lineIDs, Effort = effortByLine, Region.Label = 'SCB') # sample$Region.Label <- NA # for (k in 1:nrow(sample)){ # line <- transect.data[transect.data$line == sample$Sample.Label[k],] # ifelse(line$offshore == 0, # sample$Region.Label[k] <- 'inshore', # sample$Region.Label[k] <- 'offshore') # } # may not need to truncate because observers were instructed # not to look too far away; changed from 15% to 1% run.hr.cos.bft <- ds(data = ccData, truncation="1%", key = "hr", formula = ~ Beaufort, adjustment = 'cos', region.table = region, sample.table = sample, obs.table = obs) run.hr.cos.1 <- ds(data = ccData, truncation="1%", key = "hr", adjustment = "cos", formula = ~ 1, region.table = region, sample.table = sample, obs.table = obs) run.hr.bft <- ds(data = ccData, truncation="1%", key = "hr", formula = ~ Beaufort, adjustment = NULL, region.table = region, sample.table = sample, obs.table = obs) run.hn.bft <- ds(data = ccData, truncation="1%", key = "hn", adjustment = NULL, formula = ~ Beaufort, region.table = region, sample.table = sample, obs.table = obs) run.hr.null.1 <- ds(data = ccData, truncation="1%", key = "hr", adjustment = NULL, formula = ~ 1, region.table = region, sample.table = sample, obs.table = obs) run.hn.cos.bft <- ds(data = ccData, truncation="1%", key = "hn", formula = ~ Beaufort, adjustment = 'cos', region.table = region, sample.table = sample, obs.table = obs) run.hn.cos.1 <- ds(data = ccData, truncation="1%", key = "hn", adjustment = "cos", formula = ~ 1, region.table = region, sample.table = sample, obs.table = obs) run.hn.null.1 <- ds(data = ccData, truncation="1%", key = "hn", adjustment = NULL, formula = ~ 1, region.table = region, sample.table = sample, obs.table = obs) gof.hn.null.1 <- ds.gof(run.hn.null.1) gof.hr.null.1 <- ds.gof(run.hr.null.1) gof.hr.cos.1 <- ds.gof(run.hr.cos.1) gof.hn.cos.1 <- ds.gof(run.hn.cos.1) gof.hr.cos.bft <- ds.gof(run.hr.cos.bft) gof.hn.cos.bft <- ds.gof(run.hn.cos.bft) AIC_all <- c('HN.Cos.Bft' = AIC(run.hn.cos.bft), 'HR.Cos.Bft' = AIC(run.hr.cos.bft), 'HN.Cos' = AIC(run.hn.cos.1), 'HR.Cos' = AIC(run.hr.cos.1), 'HN.Bft' = AIC(run.hn.bft), 'HR.Bft' = AIC(run.hr.bft), 'HN' = AIC(run.hn.null.1), 'HR' = AIC(run.hr.null.1)) minAIC <- min(AIC_all) deltaAIC <- AIC_all - min(AIC_all) # AIC(run.hn.cos.bft) # AIC(run.hr.cos.bft) # AIC(run.hn.cos.1) # AIC(run.hr.cos.1) # AIC(run.hn.null.1) # AIC(run.hr.null.1) # HR + null is the best: check.mono(run.hr.null.1$ddf, plot=T, n.pts=100) # save the results - just the best one: save(list = ls(), file = paste0("RData/HR_Null_1_NoStrata_out_", Sys.Date(), ".RData"))
library(NISTunits) ### Name: NISTwattPerMeterKtOukThUnITInchPerHourSqrFtDegF ### Title: Convert watt per meter kelvin to British thermal unitIT inch per ### hour square foot degree Fahrenheit ### Aliases: NISTwattPerMeterKtOukThUnITInchPerHourSqrFtDegF ### Keywords: programming ### ** Examples NISTwattPerMeterKtOukThUnITInchPerHourSqrFtDegF(10)
/data/genthat_extracted_code/NISTunits/examples/NISTwattPerMeterKtOukThUnITInchPerHourSqrFtDegF.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
356
r
library(NISTunits) ### Name: NISTwattPerMeterKtOukThUnITInchPerHourSqrFtDegF ### Title: Convert watt per meter kelvin to British thermal unitIT inch per ### hour square foot degree Fahrenheit ### Aliases: NISTwattPerMeterKtOukThUnITInchPerHourSqrFtDegF ### Keywords: programming ### ** Examples NISTwattPerMeterKtOukThUnITInchPerHourSqrFtDegF(10)
##' Calulates the rank of a Date ##' ##' Calulates the rank of a Date ##' @title Calulates the rank of a Date ##' @param Date A date. ##' @param ... Other parameters. ##' @return The rank of the date ##' @export ##' @examples rankofday("2013-01-25");rankofday(as.Date("2013-01-25")) ##' @author Yishuo Deng rankofday <- function(x,...){ as.numeric(as.Date(x)-as.Date(paste(substr(x,1,4),"01-01",sep="-")))+1; }
/quantutils/R/rankofday.r
no_license
dengyishuo/dengyishuo.github.com
R
false
false
431
r
##' Calulates the rank of a Date ##' ##' Calulates the rank of a Date ##' @title Calulates the rank of a Date ##' @param Date A date. ##' @param ... Other parameters. ##' @return The rank of the date ##' @export ##' @examples rankofday("2013-01-25");rankofday(as.Date("2013-01-25")) ##' @author Yishuo Deng rankofday <- function(x,...){ as.numeric(as.Date(x)-as.Date(paste(substr(x,1,4),"01-01",sep="-")))+1; }
\name{gamsel-internal} \title{Internal gamsel functions} \alias{basis.subset} \alias{df.inv} \alias{fracdev} \alias{gendata} \alias{getmin} \alias{norm2} \alias{poly} \alias{getmin} \alias{summarynz} \alias{summaryplot} \alias{error.bars} \description{Internal gamsel functions} \usage{ basis.subset(basis.object,subset,...) } \author{Trevor Hastie} \details{These are not intended for use by users. } \keyword{internal}
/man/gamsel-internal.Rd
no_license
egenn/gamsel2
R
false
false
421
rd
\name{gamsel-internal} \title{Internal gamsel functions} \alias{basis.subset} \alias{df.inv} \alias{fracdev} \alias{gendata} \alias{getmin} \alias{norm2} \alias{poly} \alias{getmin} \alias{summarynz} \alias{summaryplot} \alias{error.bars} \description{Internal gamsel functions} \usage{ basis.subset(basis.object,subset,...) } \author{Trevor Hastie} \details{These are not intended for use by users. } \keyword{internal}
rdirichletCS <- function (n, alpha) { ## This actually allows matrix alpha, like the writeup of rdirichlet says, but not like the ## actual rdirichlet in MCMCpack. The rows of alpha correspond to separate draws. if (is.null(dim(alpha))) { l <- length(alpha) } else { l <- dim(alpha)[2] stopifnot(n == dim(alpha)[1]) } x <- matrix(rgamma(l * n, t(alpha)), nrow = l) sm <- rep(1, l) %*% x return(t(x)/as.vector(sm)) }
/Sims_svn/HiddenChain/trunk/rdirichletCS.R
no_license
Allisterh/VAR_Sims_rfvar
R
false
false
446
r
rdirichletCS <- function (n, alpha) { ## This actually allows matrix alpha, like the writeup of rdirichlet says, but not like the ## actual rdirichlet in MCMCpack. The rows of alpha correspond to separate draws. if (is.null(dim(alpha))) { l <- length(alpha) } else { l <- dim(alpha)[2] stopifnot(n == dim(alpha)[1]) } x <- matrix(rgamma(l * n, t(alpha)), nrow = l) sm <- rep(1, l) %*% x return(t(x)/as.vector(sm)) }
# LocusOfMarking aggregates # # This file is an AUTOTYP aggregation # # For questions, open an issue # # Copyright 2022 Taras Zakharko (CC BY 4.0). # ███████╗███████╗████████╗██╗ ██╗██████╗ # ██╔════╝██╔════╝╚══██╔══╝██║ ██║██╔══██╗ # ███████╗█████╗ ██║ ██║ ██║██████╔╝ # ╚════██║██╔══╝ ██║ ██║ ██║██╔═══╝ # ███████║███████╗ ██║ ╚██████╔╝██║ # ╚══════╝╚══════╝ ╚═╝ ╚═════╝ ╚═╝ # source("R/plugin-support.R") # convert snake case to camel case to_camel_case <- function(x) { str_split(x, "[- _]+") %>% map_chr(~ { # capitalize each word . <- map_chr(., ~ { substr(., 1L, 1L) <- toupper(substr(., 1L, 1L)); . }) # collapse them together str_flatten(., "") }) } # ███╗ ███╗ █████╗ ██████╗██████╗ ██████╗ ██████╗ ██████╗ ██╗ ███████╗ # ████╗ ████║██╔══██╗██╔════╝██╔══██╗██╔═══██╗██╔══██╗██╔═══██╗██║ ██╔════╝ # ██╔████╔██║███████║██║ ██████╔╝██║ ██║██████╔╝██║ ██║██║ █████╗ # ██║╚██╔╝██║██╔══██║██║ ██╔══██╗██║ ██║██╔══██╗██║ ██║██║ ██╔══╝ # ██║ ╚═╝ ██║██║ ██║╚██████╗██║ ██║╚██████╔╝██║ ██║╚██████╔╝███████╗███████╗ # ╚═╝ ╚═╝╚═╝ ╚═╝ ╚═════╝╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═╝ ╚═════╝ ╚══════╝╚══════╝ # # Aggregate over default locus of marking for syntactic macrorelations # If we have multiple loci of marking (via multipe microrelatinons), we choose # the most representative relation (see code for procedure) DefaultLocusOfMarkingPerMacrorelation <- LocusOfMarkingPerMicrorelation %>% # unnnest the data and take only the default locus unnest(LocusOfMarking) %>% filter(IsDefaultLocusOfMarking) %>% # remove all unnessesary columns select( LID, Language, Microrelation, Macrorelation, LocusOfMarking, LocusOfMarkingBinned5, LocusOfMarkingBinned6 ) %>% distinct() %>% # determine the locus of markign for each macrorelation group_by(LID, Language, Macrorelation) %>% summarize( # no ambiguity, take the first row if(n_distinct(LocusOfMarking) == 1L) { select(cur_data(), -Microrelation)[1L, ] } else # disambiguate A (A-default comes first) if(Macrorelation[[1L]] == "A" && "A-default" %in% Microrelation) { filter(cur_data(), Microrelation == "A-default") %>% select(-Microrelation) } else # disambiguate A (Act comes next) if(Macrorelation[[1L]] == "A" && "Act" %in% Microrelation) { filter(cur_data(), Microrelation == "Act") %>% select(-Microrelation) } else # disambiguate P (U-default comes first) if(Macrorelation[[1L]] == "P" && "U-default" %in% Microrelation) { filter(cur_data(), Microrelation == "U-default") %>% select(-Microrelation) } else # disambiguate P (Pat comes next) if(Macrorelation[[1L]] == "P" && "Pat" %in% Microrelation) { filter(cur_data(), Microrelation == "Pat") %>% select(-Microrelation) } # no default marking can be established, return NA else { NA }, .groups = "drop" ) %>% # add glottocodes left_join(select(Register, LID, Glottocode), by = "LID") %>% select(LID, Glottocode, Language, everything()) # ███████╗██╗ ██╗███╗ ███╗███╗ ███╗ █████╗ ██████╗ ██╗ ██╗ # ██╔════╝██║ ██║████╗ ████║████╗ ████║██╔══██╗██╔══██╗╚██╗ ██╔╝ # ███████╗██║ ██║██╔████╔██║██╔████╔██║███████║██████╔╝ ╚████╔╝ # ╚════██║██║ ██║██║╚██╔╝██║██║╚██╔╝██║██╔══██║██╔══██╗ ╚██╔╝ # ███████║╚██████╔╝██║ ╚═╝ ██║██║ ╚═╝ ██║██║ ██║██║ ██║ ██║ # ╚══════╝ ╚═════╝ ╚═╝ ╚═╝╚═╝ ╚═╝╚═╝ ╚═╝╚═╝ ╚═╝ ╚═╝ # # Agregate various aspects of microrelation marking in a per-language # summary table # # Presense of head and dependent marking for various macrorelations MarkingPresence <- LocusOfMarkingPerMicrorelation %>% unnest(LocusOfMarking) %>% filter(IsDefaultLocusOfMarking) %>% filter(!is.na(Macrorelation)) %>% group_by(LID, Language, Macrorelation) %>% summarize( HasHeadMarking = any(LocusOfMarkingBinned5 %in% c("2", "H")), HasDependentMarking = any(LocusOfMarkingBinned5 %in% c("2", "D")), .groups = "drop" ) %>% mutate(Macrorelation = to_camel_case(Macrorelation)) %>% pivot_wider( names_from = Macrorelation, values_from = c(HasHeadMarking, HasDependentMarking), names_glue = "{.value}For{Macrorelation}", values_fill = FALSE ) %>% # combinations of role pairs mutate( HasHeadMarkingForSAndA = HasHeadMarkingForS & HasHeadMarkingForA, HasHeadMarkingForSOrA = HasHeadMarkingForS | HasHeadMarkingForA, HasHeadMarkingForAAndP = HasHeadMarkingForA & HasHeadMarkingForP, HasHeadMarkingForAOrP = HasHeadMarkingForA | HasHeadMarkingForP, HasDependentMarkingForSAndA = HasDependentMarkingForS & HasDependentMarkingForA, HasDependentMarkingForSOrA = HasDependentMarkingForS | HasDependentMarkingForA, HasDependentMarkingForAAndP = HasDependentMarkingForA & HasDependentMarkingForP, HasDependentMarkingForAOrP = HasDependentMarkingForA | HasDependentMarkingForP ) # Locus of marking for various microrelations conditioned by morphosyntactic categories MarkingPerMicrorelation <- LocusOfMarkingPerMicrorelation %>% unnest(LocusOfMarking) %>% mutate(RoleCatLabel = str_c( to_camel_case(Microrelation), if_else( IsDefaultLocusOfMarking, "", str_c("If", to_camel_case(as.character(LocusOfMarkingCategoryCondition) %|% "Other")) ) ) ) %>% select( LID, Language, RoleCatLabel, LocusOfMarking, LocusOfMarkingBinned5, LocusOfMarkingBinned6 ) %>% pivot_wider( names_from=RoleCatLabel, values_from=c(LocusOfMarking, LocusOfMarkingBinned5, LocusOfMarkingBinned6), names_glue = "{.value}For{RoleCatLabel}", values_fn = function(x) { x <- unique(x) if(length(x) > 1) "multiple" else x }, values_fill = NA ) # combine both into one very large wide table LocusOfMarkingPerLanguage <- inner_join( MarkingPresence, MarkingPerMicrorelation, by = c("LID", "Language") ) %>% # add glottocodes left_join(select(Register, LID, Glottocode), by = "LID") %>% select(LID, Glottocode, Language, everything()) %>% arrange(LID, Language) # TODO: improve this descriptor <- describe_data( ptype = tibble(), description = "Locus of marking, aggregated per language", computed = "LocusOfMarking.R", fields = c( .metadata$Register$fields[c("LID", "Language", "Glottocode")], map(setdiff(names(LocusOfMarkingPerLanguage), c("LID", "Language", "Glottocode")), ~ { descriptor <- describe_data( ptype = if(is.logical(LocusOfMarkingPerLanguage[[.]])) logical() else factor(), computed = "LocusOfMarking.R", description = "<pending>" ) # fix factors if(is.factor(descriptor$ptype)) { # variable name var <- gsub("For.+$", "", .) dd <- .metadata$LocusOfMarkingPerMicrorelation$fields$LocusOfMarking$element$fields[[var]] !is_null(dd) || abort("Unknown variable {var}") descriptor$levels <- add_row(dd$levels, level = "multiple", description = "multiple different loci" ) descriptor <- fix_metadata_levels(descriptor, LocusOfMarkingPerLanguage[[.]]) LocusOfMarkingPerLanguage[[.]] <<- factor( as.character(LocusOfMarkingPerLanguage[[.]]), levels = levels(descriptor$ptype) ) } descriptor }) %>% set_names(setdiff(names(LocusOfMarkingPerLanguage), c("LID", "Language", "Glottocode"))) ) ) export_dataset("LocusOfMarkingPerLanguage", LocusOfMarkingPerLanguage, descriptor, c("PerLanguageSummaries", "Morphology")) descriptor <- describe_data( ptype = tibble(), description = "Default locus of marking, aggregated per language and macrorelation", computed = "LocusOfMarking.R", fields = list( LID = .metadata$Register$fields$LID, Language = .metadata$Register$fields$Language, Glottocode = .metadata$Register$fields$Glottocode, Macrorelation = .metadata$LocusOfMarkingPerMicrorelation$fields$Macrorelation, LocusOfMarking = .metadata$LocusOfMarking$fields$LocusOfMarking, LocusOfMarkingBinned5 = .metadata$LocusOfMarking$fields$LocusOfMarkingBinned5, LocusOfMarkingBinned6 = .metadata$LocusOfMarking$fields$LocusOfMarkingBinned6 ) ) export_dataset("DefaultLocusOfMarkingPerMacrorelation", DefaultLocusOfMarkingPerMacrorelation, descriptor, "Morphology")
/aggregation-scripts/LocusOfMarking.R
permissive
autotyp/autotyp-data
R
false
false
10,180
r
# LocusOfMarking aggregates # # This file is an AUTOTYP aggregation # # For questions, open an issue # # Copyright 2022 Taras Zakharko (CC BY 4.0). # ███████╗███████╗████████╗██╗ ██╗██████╗ # ██╔════╝██╔════╝╚══██╔══╝██║ ██║██╔══██╗ # ███████╗█████╗ ██║ ██║ ██║██████╔╝ # ╚════██║██╔══╝ ██║ ██║ ██║██╔═══╝ # ███████║███████╗ ██║ ╚██████╔╝██║ # ╚══════╝╚══════╝ ╚═╝ ╚═════╝ ╚═╝ # source("R/plugin-support.R") # convert snake case to camel case to_camel_case <- function(x) { str_split(x, "[- _]+") %>% map_chr(~ { # capitalize each word . <- map_chr(., ~ { substr(., 1L, 1L) <- toupper(substr(., 1L, 1L)); . }) # collapse them together str_flatten(., "") }) } # ███╗ ███╗ █████╗ ██████╗██████╗ ██████╗ ██████╗ ██████╗ ██╗ ███████╗ # ████╗ ████║██╔══██╗██╔════╝██╔══██╗██╔═══██╗██╔══██╗██╔═══██╗██║ ██╔════╝ # ██╔████╔██║███████║██║ ██████╔╝██║ ██║██████╔╝██║ ██║██║ █████╗ # ██║╚██╔╝██║██╔══██║██║ ██╔══██╗██║ ██║██╔══██╗██║ ██║██║ ██╔══╝ # ██║ ╚═╝ ██║██║ ██║╚██████╗██║ ██║╚██████╔╝██║ ██║╚██████╔╝███████╗███████╗ # ╚═╝ ╚═╝╚═╝ ╚═╝ ╚═════╝╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═╝ ╚═════╝ ╚══════╝╚══════╝ # # Aggregate over default locus of marking for syntactic macrorelations # If we have multiple loci of marking (via multipe microrelatinons), we choose # the most representative relation (see code for procedure) DefaultLocusOfMarkingPerMacrorelation <- LocusOfMarkingPerMicrorelation %>% # unnnest the data and take only the default locus unnest(LocusOfMarking) %>% filter(IsDefaultLocusOfMarking) %>% # remove all unnessesary columns select( LID, Language, Microrelation, Macrorelation, LocusOfMarking, LocusOfMarkingBinned5, LocusOfMarkingBinned6 ) %>% distinct() %>% # determine the locus of markign for each macrorelation group_by(LID, Language, Macrorelation) %>% summarize( # no ambiguity, take the first row if(n_distinct(LocusOfMarking) == 1L) { select(cur_data(), -Microrelation)[1L, ] } else # disambiguate A (A-default comes first) if(Macrorelation[[1L]] == "A" && "A-default" %in% Microrelation) { filter(cur_data(), Microrelation == "A-default") %>% select(-Microrelation) } else # disambiguate A (Act comes next) if(Macrorelation[[1L]] == "A" && "Act" %in% Microrelation) { filter(cur_data(), Microrelation == "Act") %>% select(-Microrelation) } else # disambiguate P (U-default comes first) if(Macrorelation[[1L]] == "P" && "U-default" %in% Microrelation) { filter(cur_data(), Microrelation == "U-default") %>% select(-Microrelation) } else # disambiguate P (Pat comes next) if(Macrorelation[[1L]] == "P" && "Pat" %in% Microrelation) { filter(cur_data(), Microrelation == "Pat") %>% select(-Microrelation) } # no default marking can be established, return NA else { NA }, .groups = "drop" ) %>% # add glottocodes left_join(select(Register, LID, Glottocode), by = "LID") %>% select(LID, Glottocode, Language, everything()) # ███████╗██╗ ██╗███╗ ███╗███╗ ███╗ █████╗ ██████╗ ██╗ ██╗ # ██╔════╝██║ ██║████╗ ████║████╗ ████║██╔══██╗██╔══██╗╚██╗ ██╔╝ # ███████╗██║ ██║██╔████╔██║██╔████╔██║███████║██████╔╝ ╚████╔╝ # ╚════██║██║ ██║██║╚██╔╝██║██║╚██╔╝██║██╔══██║██╔══██╗ ╚██╔╝ # ███████║╚██████╔╝██║ ╚═╝ ██║██║ ╚═╝ ██║██║ ██║██║ ██║ ██║ # ╚══════╝ ╚═════╝ ╚═╝ ╚═╝╚═╝ ╚═╝╚═╝ ╚═╝╚═╝ ╚═╝ ╚═╝ # # Agregate various aspects of microrelation marking in a per-language # summary table # # Presense of head and dependent marking for various macrorelations MarkingPresence <- LocusOfMarkingPerMicrorelation %>% unnest(LocusOfMarking) %>% filter(IsDefaultLocusOfMarking) %>% filter(!is.na(Macrorelation)) %>% group_by(LID, Language, Macrorelation) %>% summarize( HasHeadMarking = any(LocusOfMarkingBinned5 %in% c("2", "H")), HasDependentMarking = any(LocusOfMarkingBinned5 %in% c("2", "D")), .groups = "drop" ) %>% mutate(Macrorelation = to_camel_case(Macrorelation)) %>% pivot_wider( names_from = Macrorelation, values_from = c(HasHeadMarking, HasDependentMarking), names_glue = "{.value}For{Macrorelation}", values_fill = FALSE ) %>% # combinations of role pairs mutate( HasHeadMarkingForSAndA = HasHeadMarkingForS & HasHeadMarkingForA, HasHeadMarkingForSOrA = HasHeadMarkingForS | HasHeadMarkingForA, HasHeadMarkingForAAndP = HasHeadMarkingForA & HasHeadMarkingForP, HasHeadMarkingForAOrP = HasHeadMarkingForA | HasHeadMarkingForP, HasDependentMarkingForSAndA = HasDependentMarkingForS & HasDependentMarkingForA, HasDependentMarkingForSOrA = HasDependentMarkingForS | HasDependentMarkingForA, HasDependentMarkingForAAndP = HasDependentMarkingForA & HasDependentMarkingForP, HasDependentMarkingForAOrP = HasDependentMarkingForA | HasDependentMarkingForP ) # Locus of marking for various microrelations conditioned by morphosyntactic categories MarkingPerMicrorelation <- LocusOfMarkingPerMicrorelation %>% unnest(LocusOfMarking) %>% mutate(RoleCatLabel = str_c( to_camel_case(Microrelation), if_else( IsDefaultLocusOfMarking, "", str_c("If", to_camel_case(as.character(LocusOfMarkingCategoryCondition) %|% "Other")) ) ) ) %>% select( LID, Language, RoleCatLabel, LocusOfMarking, LocusOfMarkingBinned5, LocusOfMarkingBinned6 ) %>% pivot_wider( names_from=RoleCatLabel, values_from=c(LocusOfMarking, LocusOfMarkingBinned5, LocusOfMarkingBinned6), names_glue = "{.value}For{RoleCatLabel}", values_fn = function(x) { x <- unique(x) if(length(x) > 1) "multiple" else x }, values_fill = NA ) # combine both into one very large wide table LocusOfMarkingPerLanguage <- inner_join( MarkingPresence, MarkingPerMicrorelation, by = c("LID", "Language") ) %>% # add glottocodes left_join(select(Register, LID, Glottocode), by = "LID") %>% select(LID, Glottocode, Language, everything()) %>% arrange(LID, Language) # TODO: improve this descriptor <- describe_data( ptype = tibble(), description = "Locus of marking, aggregated per language", computed = "LocusOfMarking.R", fields = c( .metadata$Register$fields[c("LID", "Language", "Glottocode")], map(setdiff(names(LocusOfMarkingPerLanguage), c("LID", "Language", "Glottocode")), ~ { descriptor <- describe_data( ptype = if(is.logical(LocusOfMarkingPerLanguage[[.]])) logical() else factor(), computed = "LocusOfMarking.R", description = "<pending>" ) # fix factors if(is.factor(descriptor$ptype)) { # variable name var <- gsub("For.+$", "", .) dd <- .metadata$LocusOfMarkingPerMicrorelation$fields$LocusOfMarking$element$fields[[var]] !is_null(dd) || abort("Unknown variable {var}") descriptor$levels <- add_row(dd$levels, level = "multiple", description = "multiple different loci" ) descriptor <- fix_metadata_levels(descriptor, LocusOfMarkingPerLanguage[[.]]) LocusOfMarkingPerLanguage[[.]] <<- factor( as.character(LocusOfMarkingPerLanguage[[.]]), levels = levels(descriptor$ptype) ) } descriptor }) %>% set_names(setdiff(names(LocusOfMarkingPerLanguage), c("LID", "Language", "Glottocode"))) ) ) export_dataset("LocusOfMarkingPerLanguage", LocusOfMarkingPerLanguage, descriptor, c("PerLanguageSummaries", "Morphology")) descriptor <- describe_data( ptype = tibble(), description = "Default locus of marking, aggregated per language and macrorelation", computed = "LocusOfMarking.R", fields = list( LID = .metadata$Register$fields$LID, Language = .metadata$Register$fields$Language, Glottocode = .metadata$Register$fields$Glottocode, Macrorelation = .metadata$LocusOfMarkingPerMicrorelation$fields$Macrorelation, LocusOfMarking = .metadata$LocusOfMarking$fields$LocusOfMarking, LocusOfMarkingBinned5 = .metadata$LocusOfMarking$fields$LocusOfMarkingBinned5, LocusOfMarkingBinned6 = .metadata$LocusOfMarking$fields$LocusOfMarkingBinned6 ) ) export_dataset("DefaultLocusOfMarkingPerMacrorelation", DefaultLocusOfMarkingPerMacrorelation, descriptor, "Morphology")
library(tidyverse) library(ggpubr) setwd(dirname(rstudioapi::getActiveDocumentContext()$path)) load(file="../model_outputs/Rda_files/df_maintext.Rda") #order: conformity, memory, recent exp bias, risk-appetite #### Chi #### df_ABM_maintext = df_ABM_maintext %>% filter(EWA_sigma=="medium", EWA_rho=="medium", EWA_alpha=="risk-neutral", memory_window==10) %>% mutate(timestep=timestep+1) end_point = max(df_ABM_maintext$timestep) df = df_ABM_maintext %>% group_by(sim)%>% complete(timestep = seq(min(timestep), end_point, 1)) %>% arrange(timestep) %>% fill(graph_type,memory_window,EWA_sigma,EWA_rho,EWA_chi,EWA_alpha,sim,timestep,num_know_novel,num_produced_b,full_diffusion) p_chi_a = ggplot(df %>% group_by(sim) %>% filter(full_diffusion==T) %>% slice(head=1) %>% ungroup() %>% group_by(EWA_chi) %>% mutate(mean_timestep=mean(timestep)))+ geom_density(aes(x=timestep, color=as.factor(EWA_chi)),show.legend = F)+ geom_point(aes(group=as.factor(EWA_chi),x=mean_timestep, y=0, color=as.factor(EWA_chi)), show.legend = F)+ scale_color_viridis_d(option = "C", direction=-1, end=0.7)+ scale_x_continuous(limits=c(1,end_point))+ coord_trans(x = "sqrt")+ labs(x="Time",y="Prop. knowledgable",color="Conformity bias")+ theme_void() p_chi_b = ggplot(df, aes(x=timestep,y=num_know_novel,color=as.factor(EWA_chi)))+ geom_line(aes(group=sim),alpha=0.1)+ stat_summary(size=1, fun = mean, geom=c("line"))+ stat_summary(geom="errorbar", fun.data = "mean_cl_boot")+ stat_summary(geom="point", size=1.5, fun.data = "mean_cl_boot")+ scale_color_viridis_d(option = "C", direction=-1, end=0.7)+ scale_x_continuous(limits=c(1,end_point), breaks=c(1,25,100,200,350))+ coord_trans(x = "sqrt")+ scale_y_continuous(limits=c(0,1))+ labs(x="Time",y="Prop. knowledgable",color="Conformity bias")+ theme_classic() g_chi = ggarrange(p_chi_a,p_chi_b,ncol=1,nrow=2,heights=c(1,4),legend = "right",align="v",common.legend = T) #### memory #### load(file="../model_outputs/Rda_files/df_maintext.Rda") df_ABM_maintext = df_ABM_maintext %>% filter(EWA_chi=="linear bias", EWA_sigma=="medium", EWA_rho=="medium", EWA_alpha=="risk-neutral") %>% mutate(timestep=timestep+1) summary(df_ABM_maintext) end_point = max(df_ABM_maintext$timestep) df = df_ABM_maintext %>% group_by(sim)%>% complete(timestep = seq(min(timestep), end_point, 1)) %>% arrange(timestep) %>% fill(graph_type,memory_window,EWA_sigma,EWA_rho,EWA_chi,EWA_alpha,sim,timestep,num_know_novel,full_diffusion) p_mem_a = ggplot(df %>% group_by(sim) %>% filter(full_diffusion==T) %>% slice(head=1) %>% ungroup() %>% group_by(memory_window) %>% mutate(mean_timestep=mean(timestep)))+ geom_density(aes(x=timestep, color=as.factor(memory_window)),show.legend = F)+ geom_point(aes(group=as.factor(memory_window),x=mean_timestep,y=0, color=as.factor(memory_window)), show.legend = F)+ scale_color_viridis_d(option = "C", direction=-1, end=0.7)+ scale_x_continuous(limits=c(1,end_point))+ coord_trans(x = "sqrt")+ labs(x="Time",y="Prop. knowledgable",color="Memory window")+ theme_void() p_mem_b = ggplot(df, aes(x=timestep,y=num_know_novel,color=as.factor(memory_window)))+ geom_line(aes(group=sim),alpha=0.1)+ stat_summary(size=1, fun = mean, geom=c("line"))+ stat_summary(geom="errorbar", fun.data = "mean_cl_boot")+ stat_summary(geom="point", size=1.5, fun.data = "mean_cl_boot")+ scale_color_viridis_d(option = "C", direction=-1, end=0.7)+ scale_x_continuous(limits=c(1,end_point), breaks=c(1,25,100,200,350))+ coord_trans(x = "sqrt")+ scale_y_continuous(limits=c(0,1))+ labs(x="Time",y="Prop. knowledgable",color="Memory window")+ theme_classic() g_m = ggarrange(p_mem_a,p_mem_b,ncol=1,nrow=2,heights=c(1,4),legend = "right",align="v",common.legend = T) #### Rho #### load(file="../model_outputs/Rda_files/df_maintext.Rda") df_ABM_maintext = df_ABM_maintext %>% filter(EWA_chi=="linear bias", EWA_sigma=="medium", EWA_alpha=="risk-neutral", memory_window==10) %>% mutate(timestep=timestep+1) end_point = max(df_ABM_maintext$timestep) df = df_ABM_maintext %>% group_by(sim)%>% complete(timestep = seq(min(timestep), end_point, 1)) %>% arrange(timestep) %>% fill(graph_type,memory_window,EWA_sigma,EWA_rho,EWA_chi,EWA_alpha,sim,timestep,num_know_novel,full_diffusion) p_rho_a = ggplot(df %>% group_by(sim) %>% filter(full_diffusion==T) %>% slice(head=1) %>% ungroup() %>% group_by(EWA_rho) %>% mutate(mean_timestep=mean(timestep)))+ geom_density(aes(x=timestep, color=as.factor(EWA_rho)),show.legend = F)+ geom_point(aes(group=as.factor(EWA_rho),x=mean_timestep,y=0, color=as.factor(EWA_rho)), show.legend = F)+ scale_color_viridis_d(option = "C", direction=-1, end=0.7)+ scale_x_continuous(limits=c(1,end_point))+ coord_trans(x = "sqrt")+ labs(x="Time",y="Prop. knowledgable",color="Recent exp. bias")+ theme_void() p_rho_b = ggplot(df, aes(x=timestep,y=num_know_novel,color=as.factor(EWA_rho)))+ geom_line(aes(group=sim),alpha=0.1)+ stat_summary(size=1, fun = mean, geom=c("line"))+ stat_summary(geom="errorbar", fun.data = "mean_cl_boot")+ stat_summary(geom="point", size=1.5, fun.data = "mean_cl_boot")+ scale_color_viridis_d(option = "C", direction=-1, end=0.7)+ scale_x_continuous(limits=c(1,end_point), breaks=c(1,25,100,200,350))+ coord_trans(x = "sqrt")+ scale_y_continuous(limits=c(0,1))+ labs(x="Time",y="Prop. knowledgable",color="Recent exp. bias")+ theme_classic() g_rho = ggarrange(p_rho_a,p_rho_b,ncol=1,nrow=2,heights=c(1,4),legend = "right",align="v",common.legend = T) #### Alpha #### load(file="../model_outputs/Rda_files/df_maintext.Rda") df_ABM_maintext = df_ABM_maintext %>% filter(EWA_chi=="linear bias", EWA_rho=="medium", EWA_sigma=="medium", memory_window==10) %>% mutate(timestep=timestep+1) end_point = max(df_ABM_maintext$timestep) df = df_ABM_maintext %>% group_by(sim)%>% complete(timestep = seq(min(timestep), end_point, 1)) %>% arrange(timestep) %>% fill(graph_type,memory_window,EWA_sigma,EWA_rho,EWA_chi,EWA_alpha,sim,timestep,num_know_novel,full_diffusion) p_alpha_a = ggplot(df %>% group_by(sim) %>% filter(full_diffusion==T) %>% slice(head=1) %>% ungroup() %>% group_by(EWA_alpha) %>% mutate(mean_timestep=mean(timestep)))+ geom_density(aes(x=timestep, color=as.factor(EWA_alpha)),show.legend = F)+ geom_point(aes(group=as.factor(EWA_alpha),x=mean_timestep,y=0, color=as.factor(EWA_alpha)), show.legend = F)+ scale_color_viridis_d(option = "C", direction=-1, end=0.7)+ scale_x_continuous(limits=c(1,end_point))+ coord_trans(x = "sqrt")+ labs(x="Time",y="Prop. knowledgable",color="Risk-appetite")+ theme_void() p_alpha_b = ggplot(df, aes(x=timestep,y=num_know_novel,color=as.factor(EWA_alpha)))+ geom_line(aes(group=sim),alpha=0.1)+ stat_summary(size=1, fun = mean, geom=c("line"))+ stat_summary(geom="errorbar", fun.data = "mean_cl_boot")+ stat_summary(geom="point", size=1.5, fun.data = "mean_cl_boot")+ scale_color_viridis_d(option = "C", direction=-1, end=0.7)+ scale_x_continuous(limits=c(1,end_point), breaks=c(1,25,100,200,350))+ coord_trans(x = "sqrt")+ scale_y_continuous(limits=c(0,1))+ labs(x="Time",y="Prop. knowledgable",color="Risk-appetite")+ theme_classic() g_alpha = ggarrange(p_alpha_a,p_alpha_b,ncol=1,nrow=2,heights=c(1,4),legend = "right",align="v",common.legend = T) ggarrange(g_chi, g_m, g_rho, g_alpha, labels = c("A","B","C","D")) ggsave(file="../output/Fig_S_all_production_params.png",width=15,height=13,scale=2,units="cm")
/analysis/Figure_S_diffusion_all_params.R
no_license
michaelchimento/acquisition_production_abm
R
false
false
7,505
r
library(tidyverse) library(ggpubr) setwd(dirname(rstudioapi::getActiveDocumentContext()$path)) load(file="../model_outputs/Rda_files/df_maintext.Rda") #order: conformity, memory, recent exp bias, risk-appetite #### Chi #### df_ABM_maintext = df_ABM_maintext %>% filter(EWA_sigma=="medium", EWA_rho=="medium", EWA_alpha=="risk-neutral", memory_window==10) %>% mutate(timestep=timestep+1) end_point = max(df_ABM_maintext$timestep) df = df_ABM_maintext %>% group_by(sim)%>% complete(timestep = seq(min(timestep), end_point, 1)) %>% arrange(timestep) %>% fill(graph_type,memory_window,EWA_sigma,EWA_rho,EWA_chi,EWA_alpha,sim,timestep,num_know_novel,num_produced_b,full_diffusion) p_chi_a = ggplot(df %>% group_by(sim) %>% filter(full_diffusion==T) %>% slice(head=1) %>% ungroup() %>% group_by(EWA_chi) %>% mutate(mean_timestep=mean(timestep)))+ geom_density(aes(x=timestep, color=as.factor(EWA_chi)),show.legend = F)+ geom_point(aes(group=as.factor(EWA_chi),x=mean_timestep, y=0, color=as.factor(EWA_chi)), show.legend = F)+ scale_color_viridis_d(option = "C", direction=-1, end=0.7)+ scale_x_continuous(limits=c(1,end_point))+ coord_trans(x = "sqrt")+ labs(x="Time",y="Prop. knowledgable",color="Conformity bias")+ theme_void() p_chi_b = ggplot(df, aes(x=timestep,y=num_know_novel,color=as.factor(EWA_chi)))+ geom_line(aes(group=sim),alpha=0.1)+ stat_summary(size=1, fun = mean, geom=c("line"))+ stat_summary(geom="errorbar", fun.data = "mean_cl_boot")+ stat_summary(geom="point", size=1.5, fun.data = "mean_cl_boot")+ scale_color_viridis_d(option = "C", direction=-1, end=0.7)+ scale_x_continuous(limits=c(1,end_point), breaks=c(1,25,100,200,350))+ coord_trans(x = "sqrt")+ scale_y_continuous(limits=c(0,1))+ labs(x="Time",y="Prop. knowledgable",color="Conformity bias")+ theme_classic() g_chi = ggarrange(p_chi_a,p_chi_b,ncol=1,nrow=2,heights=c(1,4),legend = "right",align="v",common.legend = T) #### memory #### load(file="../model_outputs/Rda_files/df_maintext.Rda") df_ABM_maintext = df_ABM_maintext %>% filter(EWA_chi=="linear bias", EWA_sigma=="medium", EWA_rho=="medium", EWA_alpha=="risk-neutral") %>% mutate(timestep=timestep+1) summary(df_ABM_maintext) end_point = max(df_ABM_maintext$timestep) df = df_ABM_maintext %>% group_by(sim)%>% complete(timestep = seq(min(timestep), end_point, 1)) %>% arrange(timestep) %>% fill(graph_type,memory_window,EWA_sigma,EWA_rho,EWA_chi,EWA_alpha,sim,timestep,num_know_novel,full_diffusion) p_mem_a = ggplot(df %>% group_by(sim) %>% filter(full_diffusion==T) %>% slice(head=1) %>% ungroup() %>% group_by(memory_window) %>% mutate(mean_timestep=mean(timestep)))+ geom_density(aes(x=timestep, color=as.factor(memory_window)),show.legend = F)+ geom_point(aes(group=as.factor(memory_window),x=mean_timestep,y=0, color=as.factor(memory_window)), show.legend = F)+ scale_color_viridis_d(option = "C", direction=-1, end=0.7)+ scale_x_continuous(limits=c(1,end_point))+ coord_trans(x = "sqrt")+ labs(x="Time",y="Prop. knowledgable",color="Memory window")+ theme_void() p_mem_b = ggplot(df, aes(x=timestep,y=num_know_novel,color=as.factor(memory_window)))+ geom_line(aes(group=sim),alpha=0.1)+ stat_summary(size=1, fun = mean, geom=c("line"))+ stat_summary(geom="errorbar", fun.data = "mean_cl_boot")+ stat_summary(geom="point", size=1.5, fun.data = "mean_cl_boot")+ scale_color_viridis_d(option = "C", direction=-1, end=0.7)+ scale_x_continuous(limits=c(1,end_point), breaks=c(1,25,100,200,350))+ coord_trans(x = "sqrt")+ scale_y_continuous(limits=c(0,1))+ labs(x="Time",y="Prop. knowledgable",color="Memory window")+ theme_classic() g_m = ggarrange(p_mem_a,p_mem_b,ncol=1,nrow=2,heights=c(1,4),legend = "right",align="v",common.legend = T) #### Rho #### load(file="../model_outputs/Rda_files/df_maintext.Rda") df_ABM_maintext = df_ABM_maintext %>% filter(EWA_chi=="linear bias", EWA_sigma=="medium", EWA_alpha=="risk-neutral", memory_window==10) %>% mutate(timestep=timestep+1) end_point = max(df_ABM_maintext$timestep) df = df_ABM_maintext %>% group_by(sim)%>% complete(timestep = seq(min(timestep), end_point, 1)) %>% arrange(timestep) %>% fill(graph_type,memory_window,EWA_sigma,EWA_rho,EWA_chi,EWA_alpha,sim,timestep,num_know_novel,full_diffusion) p_rho_a = ggplot(df %>% group_by(sim) %>% filter(full_diffusion==T) %>% slice(head=1) %>% ungroup() %>% group_by(EWA_rho) %>% mutate(mean_timestep=mean(timestep)))+ geom_density(aes(x=timestep, color=as.factor(EWA_rho)),show.legend = F)+ geom_point(aes(group=as.factor(EWA_rho),x=mean_timestep,y=0, color=as.factor(EWA_rho)), show.legend = F)+ scale_color_viridis_d(option = "C", direction=-1, end=0.7)+ scale_x_continuous(limits=c(1,end_point))+ coord_trans(x = "sqrt")+ labs(x="Time",y="Prop. knowledgable",color="Recent exp. bias")+ theme_void() p_rho_b = ggplot(df, aes(x=timestep,y=num_know_novel,color=as.factor(EWA_rho)))+ geom_line(aes(group=sim),alpha=0.1)+ stat_summary(size=1, fun = mean, geom=c("line"))+ stat_summary(geom="errorbar", fun.data = "mean_cl_boot")+ stat_summary(geom="point", size=1.5, fun.data = "mean_cl_boot")+ scale_color_viridis_d(option = "C", direction=-1, end=0.7)+ scale_x_continuous(limits=c(1,end_point), breaks=c(1,25,100,200,350))+ coord_trans(x = "sqrt")+ scale_y_continuous(limits=c(0,1))+ labs(x="Time",y="Prop. knowledgable",color="Recent exp. bias")+ theme_classic() g_rho = ggarrange(p_rho_a,p_rho_b,ncol=1,nrow=2,heights=c(1,4),legend = "right",align="v",common.legend = T) #### Alpha #### load(file="../model_outputs/Rda_files/df_maintext.Rda") df_ABM_maintext = df_ABM_maintext %>% filter(EWA_chi=="linear bias", EWA_rho=="medium", EWA_sigma=="medium", memory_window==10) %>% mutate(timestep=timestep+1) end_point = max(df_ABM_maintext$timestep) df = df_ABM_maintext %>% group_by(sim)%>% complete(timestep = seq(min(timestep), end_point, 1)) %>% arrange(timestep) %>% fill(graph_type,memory_window,EWA_sigma,EWA_rho,EWA_chi,EWA_alpha,sim,timestep,num_know_novel,full_diffusion) p_alpha_a = ggplot(df %>% group_by(sim) %>% filter(full_diffusion==T) %>% slice(head=1) %>% ungroup() %>% group_by(EWA_alpha) %>% mutate(mean_timestep=mean(timestep)))+ geom_density(aes(x=timestep, color=as.factor(EWA_alpha)),show.legend = F)+ geom_point(aes(group=as.factor(EWA_alpha),x=mean_timestep,y=0, color=as.factor(EWA_alpha)), show.legend = F)+ scale_color_viridis_d(option = "C", direction=-1, end=0.7)+ scale_x_continuous(limits=c(1,end_point))+ coord_trans(x = "sqrt")+ labs(x="Time",y="Prop. knowledgable",color="Risk-appetite")+ theme_void() p_alpha_b = ggplot(df, aes(x=timestep,y=num_know_novel,color=as.factor(EWA_alpha)))+ geom_line(aes(group=sim),alpha=0.1)+ stat_summary(size=1, fun = mean, geom=c("line"))+ stat_summary(geom="errorbar", fun.data = "mean_cl_boot")+ stat_summary(geom="point", size=1.5, fun.data = "mean_cl_boot")+ scale_color_viridis_d(option = "C", direction=-1, end=0.7)+ scale_x_continuous(limits=c(1,end_point), breaks=c(1,25,100,200,350))+ coord_trans(x = "sqrt")+ scale_y_continuous(limits=c(0,1))+ labs(x="Time",y="Prop. knowledgable",color="Risk-appetite")+ theme_classic() g_alpha = ggarrange(p_alpha_a,p_alpha_b,ncol=1,nrow=2,heights=c(1,4),legend = "right",align="v",common.legend = T) ggarrange(g_chi, g_m, g_rho, g_alpha, labels = c("A","B","C","D")) ggsave(file="../output/Fig_S_all_production_params.png",width=15,height=13,scale=2,units="cm")
#### source functions neeeded cd_UNIQUE_NAME_Toy <- getwd() setwd(paste0(Sys.getenv("masters-thesis"), "/Vol")) source("vol_estimators.R") source("FlexibleFourierForm_Func.R") source("BV_Analysis_Func.R") source("PR_Func.R") # PR = Persistence and roughness setwd(cd_UNIQUE_NAME_Toy) rm(cd_UNIQUE_NAME_Toy) ################################## ########## DATA dt <- BV.get_SPY_data() #Gets first month of data ########### Vol estimation bucketLengthInMinutes <- 5 # Can't trust results if going lower bvS_List <- BV.data_deseason_BV_Func(dt = dt, bucketLengthInMinutes = bucketLengthInMinutes) #(bvS = BV^* in latex) # Now bvS_list constains results on: # Number of terms used in flexible fourier form to deseasonalize data (optimalP) # The Buckets used given by their endpoint (e.g. 2014-01-10 09:35) will be 5 minutes are the data starts # Number of obervations per bucket # A large data table bvSDTfff (fff = flexible fourier form) print(bvS_List$bvSDTfff) #Quite wide # Contains # The BV^* estimates in column bvS1 (note the 1 denotes the path, i.e. data viewed as a single path) # The volatility (simple calculated found in both theory an BV.data_deseason_BV_Func) # Bucket numbers, both day and intraday # Log vol # corrections: s for simple and n for nice (nice = fff) # Described in theory, LogVolsCorrect and LogVolnCorrect are the deseasonalized log vol. # Would always use LogVolnCorrect when needed # lagInd used to denote number of lags (here 5 minute periods) if night is not modelled as 0. Use with GREAT caution - it was only created to make a point. ####### Roughness estimation m <- 6 # Small lag paramter used in estimation. Described in theory. variogramDT <- PR.variogram_prep_DT(bvS_List$bvSDTfff) # Extract (and rename) columns alpha <- PR.est.alpha(variogramDT = variogramDT, m = m, bucketLengthInMinutes = bucketLengthInMinutes, OLS = T) # OLS is the nicer method, suggested use. alpha ####### Persistence estimation persistenceDT <- PR.persistence_prep_DT(bvS_List$bvSDTfff) # Extract (and rename) columns TradingDayLagMin <- floor(nrow(persistenceDT)^(1/4)) # Described in theory. Not the suggested value, just used for demonstration TradingDayLagMax <- floor(nrow(persistenceDT)^(1/3)) # Described in theory. Not the suggested value, just used for demonstration beta <- PR.est.beta(persistenceDT = persistenceDT, TradingDayLagMin = TradingDayLagMin, TradingDayLagMax = TradingDayLagMax, bucketLengthInMinutes = bucketLengthInMinutes) # OLS is the nicer method, suggested use. beta
/Module/Vol/Toy Example.R
no_license
SebastianGPedersen/masters-thesis
R
false
false
2,555
r
#### source functions neeeded cd_UNIQUE_NAME_Toy <- getwd() setwd(paste0(Sys.getenv("masters-thesis"), "/Vol")) source("vol_estimators.R") source("FlexibleFourierForm_Func.R") source("BV_Analysis_Func.R") source("PR_Func.R") # PR = Persistence and roughness setwd(cd_UNIQUE_NAME_Toy) rm(cd_UNIQUE_NAME_Toy) ################################## ########## DATA dt <- BV.get_SPY_data() #Gets first month of data ########### Vol estimation bucketLengthInMinutes <- 5 # Can't trust results if going lower bvS_List <- BV.data_deseason_BV_Func(dt = dt, bucketLengthInMinutes = bucketLengthInMinutes) #(bvS = BV^* in latex) # Now bvS_list constains results on: # Number of terms used in flexible fourier form to deseasonalize data (optimalP) # The Buckets used given by their endpoint (e.g. 2014-01-10 09:35) will be 5 minutes are the data starts # Number of obervations per bucket # A large data table bvSDTfff (fff = flexible fourier form) print(bvS_List$bvSDTfff) #Quite wide # Contains # The BV^* estimates in column bvS1 (note the 1 denotes the path, i.e. data viewed as a single path) # The volatility (simple calculated found in both theory an BV.data_deseason_BV_Func) # Bucket numbers, both day and intraday # Log vol # corrections: s for simple and n for nice (nice = fff) # Described in theory, LogVolsCorrect and LogVolnCorrect are the deseasonalized log vol. # Would always use LogVolnCorrect when needed # lagInd used to denote number of lags (here 5 minute periods) if night is not modelled as 0. Use with GREAT caution - it was only created to make a point. ####### Roughness estimation m <- 6 # Small lag paramter used in estimation. Described in theory. variogramDT <- PR.variogram_prep_DT(bvS_List$bvSDTfff) # Extract (and rename) columns alpha <- PR.est.alpha(variogramDT = variogramDT, m = m, bucketLengthInMinutes = bucketLengthInMinutes, OLS = T) # OLS is the nicer method, suggested use. alpha ####### Persistence estimation persistenceDT <- PR.persistence_prep_DT(bvS_List$bvSDTfff) # Extract (and rename) columns TradingDayLagMin <- floor(nrow(persistenceDT)^(1/4)) # Described in theory. Not the suggested value, just used for demonstration TradingDayLagMax <- floor(nrow(persistenceDT)^(1/3)) # Described in theory. Not the suggested value, just used for demonstration beta <- PR.est.beta(persistenceDT = persistenceDT, TradingDayLagMin = TradingDayLagMin, TradingDayLagMax = TradingDayLagMax, bucketLengthInMinutes = bucketLengthInMinutes) # OLS is the nicer method, suggested use. beta
#' Model selection #' #' Model selection using the stepwise procedure and the chosen criterion. #' #' The main goal of the package \code{bigstep} is to allow you to select a #' regression model using the stepwise procedure when data is very big, #' potentially larger than available RAM in your computer. What is more, the #' package gives you a lot of control over how this procedure should look like. #' At this moment, you can use one of these functions: \code{stepwise}, #' \code{forward}, \code{backward}, \code{fast_forward}, \code{multi_backward} #' and combinations of them. They can be treated as blocks from which the whole #' procedure of finding the best model is built. #' #' When your data is larger than RAM you have in your computer, it is #' impossible to read it in a normal way. Fortunately, in a process of building #' a regression model it is not necessary to have access to all predictors at the #' same time. Instead, you can read only a part of the matrix \code{X}, check #' all variables from that part and then read another one. To do that with this #' package, you only need to read the matrix \code{X} using #' \code{read.big.matrix} from \code{bigmemory} package. The \code{prepare_data} #' function has a parameter \code{maxp} which represents the maximum size (that #' is the number of elements) of one part. If \code{X} is bigger, it will be #' split. It will be done even if your matrix is big but you have enough RAM #' to read it in a normal way. It may seem unnecessary, but it is worth to do #' because R is not very efficient in dealing with big matrices. #' #' Another problem with a large number of predictors is choosing an appropriate #' criterion. Classical ones like AIC or BIC are bad choice because they will #' almost certainly select a model with two many variables [1]. You can use #' modifications of them like mBIC [2], mBIC2 [3], mAIC or mAIC2. In brief, #' these criteria have much heavier penalty for the number of parameters, so #' they prefer smaller models than their classic versions. #' #' If you want to read more, type \code{browseVignettes("bigstep")} #' #' @author Piotr Szulc #' #' @references #' [1] M. Bogdan, J.K. Ghosh, M. Zak-Szatkowska. Selecting explanatory #' variables with the modified version of Bayesian Information Criterion. #' Quality and Reliability Engineering International, 24:989-999, 2008. #' #' [2] M. Bogdan, J.K. Ghosh, R.W. Doerge. Modifying the Schwarz Bayesian #' Information Criterion to locate multiple interacting quantitative trait loci. #' Genetics, 167:989-999, 2004. #' #' [3] F. Frommlet, A. Chakrabarti, M. Murawska, M. Bogdan. Asymptotic Bayes #' optimality under sparsity for general distributions under the alternative, #' Technical report, arXiv:1005.4753v2, 2011. #' #' @examples #' \dontrun{ #' library(bigstep) #' #' ### small data #' set.seed(1) #' n <- 200 #' p <- 20 #' X <- matrix(rnorm(n * p), ncol = p) #' colnames(X) <- paste0("X", 1:p) #' y <- 1 + 0.4 * rowSums(X[, c(5, 10, 15, 20)]) + rnorm(n) #' #' data <- prepare_data(y, X) #' results <- stepwise(data, crit = aic) #' results$model #' summary(results) #' #' ### bigger data #' set.seed(1) #' n <- 1e3 #' p <- 1e4 #' X <- matrix(rnorm(p * n), ncol = p) #' colnames(X) <- paste0("X", 1:p) #' Xadd <- matrix(rnorm(5 * n), n, 5) # additional variables #' colnames(Xadd) <- paste0("Xadd", 1:5) #' y <- 0.2 * rowSums(X[, 1000 * (1:10)]) + Xadd[, 1] - 0.1 * Xadd[, 3] + rnorm(n) #' #' data <- prepare_data(y, X, Xadd = Xadd) #' data %>% #' reduce_matrix(minpv = 0.15) %>% #' stepwise(mbic) -> #' results #' summary(results) #' #' ### big data #' Xbig <- read.big.matrix("X.txt", sep = " ", header = TRUE, #' backingfile = "X.bin", descriptorfile = "X.desc") #' # Xbig <- attach.big.matrix("X.desc") # much faster #' y <- read.table("y.txt") #' # data <- prepare_data(y, Xbig) # slow because of checking NA #' data <- prepare_data(y, Xbig, na = FALSE) # set if you know that you do not have NA #' m <- data %>% #' reduce_matrix(minpv = 0.001) %>% #' fast_forward(crit = bic, maxf = 50) %>% #' multi_backward(crit = mbic) %>% #' stepwise(crit = mbic) #' summary(m) #' #' # more examples: type browseVignettes("bigstep") #' } #' #' @docType package #' @name bigstep #' @importFrom RcppEigen fastLmPure #' @importFrom speedglm speedglm.wfit #' @importFrom stats complete.cases binomial poisson #' @importFrom stats cor glm lm sd NULL
/R/bigstep.R
no_license
cran/bigstep
R
false
false
4,542
r
#' Model selection #' #' Model selection using the stepwise procedure and the chosen criterion. #' #' The main goal of the package \code{bigstep} is to allow you to select a #' regression model using the stepwise procedure when data is very big, #' potentially larger than available RAM in your computer. What is more, the #' package gives you a lot of control over how this procedure should look like. #' At this moment, you can use one of these functions: \code{stepwise}, #' \code{forward}, \code{backward}, \code{fast_forward}, \code{multi_backward} #' and combinations of them. They can be treated as blocks from which the whole #' procedure of finding the best model is built. #' #' When your data is larger than RAM you have in your computer, it is #' impossible to read it in a normal way. Fortunately, in a process of building #' a regression model it is not necessary to have access to all predictors at the #' same time. Instead, you can read only a part of the matrix \code{X}, check #' all variables from that part and then read another one. To do that with this #' package, you only need to read the matrix \code{X} using #' \code{read.big.matrix} from \code{bigmemory} package. The \code{prepare_data} #' function has a parameter \code{maxp} which represents the maximum size (that #' is the number of elements) of one part. If \code{X} is bigger, it will be #' split. It will be done even if your matrix is big but you have enough RAM #' to read it in a normal way. It may seem unnecessary, but it is worth to do #' because R is not very efficient in dealing with big matrices. #' #' Another problem with a large number of predictors is choosing an appropriate #' criterion. Classical ones like AIC or BIC are bad choice because they will #' almost certainly select a model with two many variables [1]. You can use #' modifications of them like mBIC [2], mBIC2 [3], mAIC or mAIC2. In brief, #' these criteria have much heavier penalty for the number of parameters, so #' they prefer smaller models than their classic versions. #' #' If you want to read more, type \code{browseVignettes("bigstep")} #' #' @author Piotr Szulc #' #' @references #' [1] M. Bogdan, J.K. Ghosh, M. Zak-Szatkowska. Selecting explanatory #' variables with the modified version of Bayesian Information Criterion. #' Quality and Reliability Engineering International, 24:989-999, 2008. #' #' [2] M. Bogdan, J.K. Ghosh, R.W. Doerge. Modifying the Schwarz Bayesian #' Information Criterion to locate multiple interacting quantitative trait loci. #' Genetics, 167:989-999, 2004. #' #' [3] F. Frommlet, A. Chakrabarti, M. Murawska, M. Bogdan. Asymptotic Bayes #' optimality under sparsity for general distributions under the alternative, #' Technical report, arXiv:1005.4753v2, 2011. #' #' @examples #' \dontrun{ #' library(bigstep) #' #' ### small data #' set.seed(1) #' n <- 200 #' p <- 20 #' X <- matrix(rnorm(n * p), ncol = p) #' colnames(X) <- paste0("X", 1:p) #' y <- 1 + 0.4 * rowSums(X[, c(5, 10, 15, 20)]) + rnorm(n) #' #' data <- prepare_data(y, X) #' results <- stepwise(data, crit = aic) #' results$model #' summary(results) #' #' ### bigger data #' set.seed(1) #' n <- 1e3 #' p <- 1e4 #' X <- matrix(rnorm(p * n), ncol = p) #' colnames(X) <- paste0("X", 1:p) #' Xadd <- matrix(rnorm(5 * n), n, 5) # additional variables #' colnames(Xadd) <- paste0("Xadd", 1:5) #' y <- 0.2 * rowSums(X[, 1000 * (1:10)]) + Xadd[, 1] - 0.1 * Xadd[, 3] + rnorm(n) #' #' data <- prepare_data(y, X, Xadd = Xadd) #' data %>% #' reduce_matrix(minpv = 0.15) %>% #' stepwise(mbic) -> #' results #' summary(results) #' #' ### big data #' Xbig <- read.big.matrix("X.txt", sep = " ", header = TRUE, #' backingfile = "X.bin", descriptorfile = "X.desc") #' # Xbig <- attach.big.matrix("X.desc") # much faster #' y <- read.table("y.txt") #' # data <- prepare_data(y, Xbig) # slow because of checking NA #' data <- prepare_data(y, Xbig, na = FALSE) # set if you know that you do not have NA #' m <- data %>% #' reduce_matrix(minpv = 0.001) %>% #' fast_forward(crit = bic, maxf = 50) %>% #' multi_backward(crit = mbic) %>% #' stepwise(crit = mbic) #' summary(m) #' #' # more examples: type browseVignettes("bigstep") #' } #' #' @docType package #' @name bigstep #' @importFrom RcppEigen fastLmPure #' @importFrom speedglm speedglm.wfit #' @importFrom stats complete.cases binomial poisson #' @importFrom stats cor glm lm sd NULL
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/mp.R \name{MPeggcomplex} \alias{MPeggcomplex} \title{MPeggcomplex} \usage{ MPeggcomplex(data, TAC.base) } \arguments{ \item{data}{list with data objects} \item{TAC.base}{TAC of previous year} \item{i}{year in the future} } \description{ MPeggcomplex } \details{ Based on the Gerromont and Butterworth rule. }
/man/MPeggcomplex.Rd
no_license
elisvb/CCAM
R
false
true
389
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/mp.R \name{MPeggcomplex} \alias{MPeggcomplex} \title{MPeggcomplex} \usage{ MPeggcomplex(data, TAC.base) } \arguments{ \item{data}{list with data objects} \item{TAC.base}{TAC of previous year} \item{i}{year in the future} } \description{ MPeggcomplex } \details{ Based on the Gerromont and Butterworth rule. }
##Update R version install.packages("installr") library(installr) updateR() ##Installing libraries install.packages("rtweet",dependencies = T) install.packages("dplyr",dependencies = T) install.packages("tidyr",dependencies = T) install.packages("tidytext",dependencies = T) install.packages("magrittr", dependencies = T) install.packages("ggplot2",dependencies=TRUE) install.packages("ggExtra",dependencies=TRUE) install.packages("purrr", dependencies = T) install.packages("tibble", dependencies = T) install.packages("rvest", dependencies = T) install.packages("ptstem",dependencies = T) install.packages("wordcloud2",dependencies = T) install.packages("RPostgreSQL") install.packages("httr") libraries <- function() { library(ggplot2) library(ggExtra) library(rtweet) library(dplyr) library(tidyr) library(tidytext) library(magrittr) library(textdata) library(purrr) library(tibble) library(twitteR) library(tidyverse) library(data.table) library(tidytext) library(glue) library(stringr) library(stringi) library(rvest) library(readr) #library(ptstem) library(wordcloud2) library(tm) #Database library(DBI) library(RODBC) library(odbc) library(RPostgreSQL) #http requests library(httr) } libraries() #Get data from NODE with http request req <- GET("http://localhost:3333/params") requestBody <- content(req) atributte <- requestBody["parameter"] valueSearch <- toString(atributte) #Conection with database ##DATABASE CONNECTION ## API KEYS ##setwd("C:/Users/Star/Documents/TCC/") ##Backup plan ##createTokenNoBrowser<- function(appName, consumerKey, consumerSecret, ## accessToken, accessTokenSecret) { ##app <- httr::oauth_app(appName, consumerKey, consumerSecret) ##params <- list(as_header = TRUE) ##credentials <- list(oauth_token = accessToken, ## oauth_token_secret = accessTokenSecret) ##token <- httr::Token1.0$new(endpoint = NULL, params = params, ## app = app, credentials = credentials) ##return(token) ##} ##Function to create token (not working) creatingToken <- function(appName, consumerKey, consumerSecret,accessToken,accesSecret) { token<-get_token() token if(is.null(token)) { tokenNew <- create_token(app = appName, consumer_key = consumerKey, consumer_secret = consumerSecret, access_token = accessToken, access_secret = accessSecret) tokenNew return(tokenNew) } else { token <- get_token() token return(token) } } ##token<-creatingToken(apiKeys[["appName"]],apiKeys[["consumerKey"]],apiKeys[["consumerSecret"]], ## apiKeys[["accessToken"]],apiKeys[["accessSecret"]]) ##Creating Token (working) tokenA<-create_token(app = apiKeys[["appName"]], consumer_key = apiKeys[["consumerKey"]], consumer_secret = apiKeys[["consumerSecret"]], access_token = apiKeys[["accessToken"]], access_secret = apiKeys[["accessSecret"]]) ##Getting token (working) tokenA<-get_token() ##brazil_coord <- lookup_coords(address = 'brazil',components = 'country:Brazil',apikey = apiKeys[["geomaps"]]) ##scotland_coord <- lookup_coords(address = "scotland",components = "country:Scotland",apikey = geomaps) ##Function to search tweets country1 <- search_tweets(q = {valueSearch}, n = 100, include_rts = FALSE,token = tokenA) country1 <- stream_tweets(q={valueSearch}, token = token) #country2 <- search_tweets(q = "minecraft", n = 100, include_rts = FALSE,token = token) ##Stream tweets for 30 seconds default ## ##Selecting only screen_name and text tweets.Country1 = country1 %>% select(screen_name,text) tweets.Country2 = country2 %>% select(screen_name,text) ##tweets.Country1 ##tweets.Country2 #Most relevant head(tweets.Country1$text) #Removing http elements tweets.Country1$stripped_text1 <- gsub("http\\S+","",tweets.Country1$text) #Use the unnest_tokens() to convert to lowercase and remove punctuation, and add ID tweets.Country1_stem <- tweets.Country1 %>% select(stripped_text1)%>%unnest_tokens(word,stripped_text1) ##head(tweets.Country1_stem) #Remove stop words from list of words cleaned_tweets.Country1 <- tweets.Country1_stem %>% anti_join(stop_words) ##head(cleaned_tweets.Country1) ##head(tweets.Country1$text) #Removing http elements tweets.Country2$stripped_text2 <- gsub("http\\S+","",tweets.Country2$text) #Use the unnest_tokens() to convert to lowercase and remove punctuation, and add ID tweets.Country2_stem <- tweets.Country2 %>% select(stripped_text2)%>%unnest_tokens(word,stripped_text2) ##head(tweets.Country2_stem) #Remove stop words from list of words cleaned_tweets.Country2 <- tweets.Country2_stem %>% anti_join(stop_words) ##head(cleaned_tweets.Country2) ##head(tweets.Country2$text) #Top 10 Tweets in country1 tweets cleaned_tweets.Country1 %>% count(word,sort = TRUE)%>% top_n(10)%>% mutate(word = reorder(word,n))%>% ggplot(aes(x=word,y=n))+geom_col()+xlab(NULL)+coord_flip()+theme_classic()+ labs(x="Count",y="Unique Words",title = "Unique words found in country1 tweets") #Top 10 Tweets in country2 tweets cleaned_tweets.Country2 %>% count(word,sort = TRUE)%>% top_n(10)%>% mutate(word = reorder(word,n))%>% ggplot(aes(x=word,y=n))+geom_col()+xlab(NULL)+coord_flip()+theme_classic()+ labs(x="Count",y="Unique Words",title = "Unique words found in country2 tweets") ##Bing Senment Analaysis ##Joining by word, sentiment and sorting from most to least bing_country1 = cleaned_tweets.Country1 %>% inner_join(get_sentiments("bing")) %>% count(word,sentiment,sort = TRUE) %>% ungroup() ##Grouping by the top n words and showing as negative/positive bing_country1 %>% group_by(sentiment)%>% top_n(10)%>% ungroup()%>% mutate(word = reorder(word,n)) %>% ggplot(aes(word,n,fill = sentiment)) + geom_col(show.legend = FALSE) + facet_wrap(~sentiment,scales = "free_y") + labs(title = "Tweets containing 'search1'", y = "Contribution to sentiment", x = NULL) + coord_flip() + theme_bw() ##bing_country1$sentiment ##Sum negative and positive negativeCount <- sum(str_count(bing_country1$sentiment,"negative")) positiveCount <- sum(str_count(bing_country1$sentiment,"positive")) valueWord <- bing_country1$word[1] ##bing_country1$n ##Bing sentiment 2 ##Same as join country1 bing_country2 = cleaned_tweets.Country2 %>% inner_join(get_sentiments("bing")) %>% count(word,sentiment,sort = TRUE) %>% ungroup() ##bing_country2 ##Same as group in country1 bing_country2 %>% group_by(sentiment)%>% top_n(10)%>% ungroup()%>% mutate(word = reorder(word,n)) %>% ggplot(aes(word,n,fill = sentiment)) + geom_col(show.legend = FALSE) + facet_wrap(~sentiment,scales = "free_y") + labs(title = "Tweets containing search2", y = "Contribution to sentiment", x = NULL) + coord_flip() + theme_bw() ##Inset values into database randomId <- sample(1:1000, 1, replace=TRUE) #Create query query <- 'INSERT INTO "Searchs" (id, "searchName","principalWord",positives,negatives)' values <- str_glue("VALUES ('{randomId}','{valueSearch}', '{valueWord}', '{positiveCount}', '{negativeCount}' )") total <- paste(query,values) #Insert data rs <- dbSendStatement(con, total) #Select data selectData <- dbGetQuery(con, 'SELECT "searchName" FROM "Searchs"')
/NLP.R
no_license
juaanluna/Analise-de-sentimentos
R
false
false
7,481
r
##Update R version install.packages("installr") library(installr) updateR() ##Installing libraries install.packages("rtweet",dependencies = T) install.packages("dplyr",dependencies = T) install.packages("tidyr",dependencies = T) install.packages("tidytext",dependencies = T) install.packages("magrittr", dependencies = T) install.packages("ggplot2",dependencies=TRUE) install.packages("ggExtra",dependencies=TRUE) install.packages("purrr", dependencies = T) install.packages("tibble", dependencies = T) install.packages("rvest", dependencies = T) install.packages("ptstem",dependencies = T) install.packages("wordcloud2",dependencies = T) install.packages("RPostgreSQL") install.packages("httr") libraries <- function() { library(ggplot2) library(ggExtra) library(rtweet) library(dplyr) library(tidyr) library(tidytext) library(magrittr) library(textdata) library(purrr) library(tibble) library(twitteR) library(tidyverse) library(data.table) library(tidytext) library(glue) library(stringr) library(stringi) library(rvest) library(readr) #library(ptstem) library(wordcloud2) library(tm) #Database library(DBI) library(RODBC) library(odbc) library(RPostgreSQL) #http requests library(httr) } libraries() #Get data from NODE with http request req <- GET("http://localhost:3333/params") requestBody <- content(req) atributte <- requestBody["parameter"] valueSearch <- toString(atributte) #Conection with database ##DATABASE CONNECTION ## API KEYS ##setwd("C:/Users/Star/Documents/TCC/") ##Backup plan ##createTokenNoBrowser<- function(appName, consumerKey, consumerSecret, ## accessToken, accessTokenSecret) { ##app <- httr::oauth_app(appName, consumerKey, consumerSecret) ##params <- list(as_header = TRUE) ##credentials <- list(oauth_token = accessToken, ## oauth_token_secret = accessTokenSecret) ##token <- httr::Token1.0$new(endpoint = NULL, params = params, ## app = app, credentials = credentials) ##return(token) ##} ##Function to create token (not working) creatingToken <- function(appName, consumerKey, consumerSecret,accessToken,accesSecret) { token<-get_token() token if(is.null(token)) { tokenNew <- create_token(app = appName, consumer_key = consumerKey, consumer_secret = consumerSecret, access_token = accessToken, access_secret = accessSecret) tokenNew return(tokenNew) } else { token <- get_token() token return(token) } } ##token<-creatingToken(apiKeys[["appName"]],apiKeys[["consumerKey"]],apiKeys[["consumerSecret"]], ## apiKeys[["accessToken"]],apiKeys[["accessSecret"]]) ##Creating Token (working) tokenA<-create_token(app = apiKeys[["appName"]], consumer_key = apiKeys[["consumerKey"]], consumer_secret = apiKeys[["consumerSecret"]], access_token = apiKeys[["accessToken"]], access_secret = apiKeys[["accessSecret"]]) ##Getting token (working) tokenA<-get_token() ##brazil_coord <- lookup_coords(address = 'brazil',components = 'country:Brazil',apikey = apiKeys[["geomaps"]]) ##scotland_coord <- lookup_coords(address = "scotland",components = "country:Scotland",apikey = geomaps) ##Function to search tweets country1 <- search_tweets(q = {valueSearch}, n = 100, include_rts = FALSE,token = tokenA) country1 <- stream_tweets(q={valueSearch}, token = token) #country2 <- search_tweets(q = "minecraft", n = 100, include_rts = FALSE,token = token) ##Stream tweets for 30 seconds default ## ##Selecting only screen_name and text tweets.Country1 = country1 %>% select(screen_name,text) tweets.Country2 = country2 %>% select(screen_name,text) ##tweets.Country1 ##tweets.Country2 #Most relevant head(tweets.Country1$text) #Removing http elements tweets.Country1$stripped_text1 <- gsub("http\\S+","",tweets.Country1$text) #Use the unnest_tokens() to convert to lowercase and remove punctuation, and add ID tweets.Country1_stem <- tweets.Country1 %>% select(stripped_text1)%>%unnest_tokens(word,stripped_text1) ##head(tweets.Country1_stem) #Remove stop words from list of words cleaned_tweets.Country1 <- tweets.Country1_stem %>% anti_join(stop_words) ##head(cleaned_tweets.Country1) ##head(tweets.Country1$text) #Removing http elements tweets.Country2$stripped_text2 <- gsub("http\\S+","",tweets.Country2$text) #Use the unnest_tokens() to convert to lowercase and remove punctuation, and add ID tweets.Country2_stem <- tweets.Country2 %>% select(stripped_text2)%>%unnest_tokens(word,stripped_text2) ##head(tweets.Country2_stem) #Remove stop words from list of words cleaned_tweets.Country2 <- tweets.Country2_stem %>% anti_join(stop_words) ##head(cleaned_tweets.Country2) ##head(tweets.Country2$text) #Top 10 Tweets in country1 tweets cleaned_tweets.Country1 %>% count(word,sort = TRUE)%>% top_n(10)%>% mutate(word = reorder(word,n))%>% ggplot(aes(x=word,y=n))+geom_col()+xlab(NULL)+coord_flip()+theme_classic()+ labs(x="Count",y="Unique Words",title = "Unique words found in country1 tweets") #Top 10 Tweets in country2 tweets cleaned_tweets.Country2 %>% count(word,sort = TRUE)%>% top_n(10)%>% mutate(word = reorder(word,n))%>% ggplot(aes(x=word,y=n))+geom_col()+xlab(NULL)+coord_flip()+theme_classic()+ labs(x="Count",y="Unique Words",title = "Unique words found in country2 tweets") ##Bing Senment Analaysis ##Joining by word, sentiment and sorting from most to least bing_country1 = cleaned_tweets.Country1 %>% inner_join(get_sentiments("bing")) %>% count(word,sentiment,sort = TRUE) %>% ungroup() ##Grouping by the top n words and showing as negative/positive bing_country1 %>% group_by(sentiment)%>% top_n(10)%>% ungroup()%>% mutate(word = reorder(word,n)) %>% ggplot(aes(word,n,fill = sentiment)) + geom_col(show.legend = FALSE) + facet_wrap(~sentiment,scales = "free_y") + labs(title = "Tweets containing 'search1'", y = "Contribution to sentiment", x = NULL) + coord_flip() + theme_bw() ##bing_country1$sentiment ##Sum negative and positive negativeCount <- sum(str_count(bing_country1$sentiment,"negative")) positiveCount <- sum(str_count(bing_country1$sentiment,"positive")) valueWord <- bing_country1$word[1] ##bing_country1$n ##Bing sentiment 2 ##Same as join country1 bing_country2 = cleaned_tweets.Country2 %>% inner_join(get_sentiments("bing")) %>% count(word,sentiment,sort = TRUE) %>% ungroup() ##bing_country2 ##Same as group in country1 bing_country2 %>% group_by(sentiment)%>% top_n(10)%>% ungroup()%>% mutate(word = reorder(word,n)) %>% ggplot(aes(word,n,fill = sentiment)) + geom_col(show.legend = FALSE) + facet_wrap(~sentiment,scales = "free_y") + labs(title = "Tweets containing search2", y = "Contribution to sentiment", x = NULL) + coord_flip() + theme_bw() ##Inset values into database randomId <- sample(1:1000, 1, replace=TRUE) #Create query query <- 'INSERT INTO "Searchs" (id, "searchName","principalWord",positives,negatives)' values <- str_glue("VALUES ('{randomId}','{valueSearch}', '{valueWord}', '{positiveCount}', '{negativeCount}' )") total <- paste(query,values) #Insert data rs <- dbSendStatement(con, total) #Select data selectData <- dbGetQuery(con, 'SELECT "searchName" FROM "Searchs"')
#--------------------------------------------------------------------------------------------- # Name: regional_rank_maps.r # Purpose: # Author: Christopher Tracey # Created: 2021-04-27 # Updated: 2021-05-17 # # Updates: # 2021-05-17 - code cleanup and documentation # # # To Do List/Future Ideas: # * #--------------------------------------------------------------------------------------------- #load the packages library(tidyverse) library(here) library(natserv) library(arcgisbinding) arc.check_product() # get the arc license # create a directory for this update unless it already exists ifelse(!dir.exists(here::here("_data")), dir.create(here::here("_data")), FALSE) ifelse(!dir.exists(here::here("_data","regRank")), dir.create(here::here("_data","regRank")), FALSE) # load spatial information from local geodatabase template_RegionalStatus <- arc.open(here::here("PNHP_ReportMaps.gdb","template_RegionalStatusInset")) # load the state boundaries template_RegionalStatus <- arc.select(template_RegionalStatus) template_RegionalStatus <- arc.data2sf(template_RegionalStatus) # load the species list species <- read.csv(here("tracked_species_universal_id_pa_20170530.csv"), stringsAsFactors=FALSE) # build th UID species$UID <- paste("ELEMENT_GLOBAL",species$ELEMENT_GLOBAL_OU_UID,species$ELEMENT.GLOBAL.UNIVERSAL.KEY,sep=".") # test of the for loop #get a list of SNAMEs to run the loop snames <-species[c("ELCODE","SNAME","UID")] #snames <- snames[substr(snames$ELCODE,1,1)=="P",] # only plants snames <- droplevels(snames) snames <- unique(snames) snames <- snames[order(snames$SNAME),] #### TEMP for demo snames <- snames[sample(nrow(snames), 10), ] # loop to get the data and make the maps for (i in 1:length(snames$UID)) { res <- list() # initialize an empty list delayedAssign("do.next", {next}) # some error catching if the results come back empty tryCatch(res <- ns_id(uid=snames$UID[i]), finally=print(snames$SNAME[i]), error=function(e) force(do.next)) # put the rank list into a variable for below constatus_US <- as.data.frame(res$elementNationals$elementSubnationals[match("US", res$elementNationals$nation$isoCode)]) constatus_US <- jsonlite::flatten(constatus_US) # gets rid of the nested data frame constatus_CA <- as.data.frame(res$elementNationals$elementSubnationals[match("CA", res$elementNationals$nation$isoCode)]) constatus_CA <- jsonlite::flatten(constatus_CA) # gets rid of the nested data frame # this handles species for which there are no conservatoin statuses... if(dim(constatus_CA)==c(0,0)&dim(constatus_US)==c(0,0)){ cat("There is no conservation statuses for this species, skipping...\n") } else { # combine the US and CA data. constatus <- rbind(constatus_US, constatus_CA) rm(constatus_US, constatus_CA) # clean up # select the lower of a seasonal rank library(stringr) constatus$smallest_number <- sapply( str_extract_all(constatus$roundedSRank, "[0-9]+"), function(x) min(as.integer(x)) ) constatus$roundedSRank <- ifelse(is.finite(constatus$smallest_number), paste("S",constatus$smallest_number, sep=""), constatus$roundedSRank) # combine the SNR, SU, SNA status constatus$roundedSRank[which(constatus$roundedSRank=="SNRN")] <- "SNR/SU/SNA" constatus$roundedSRank[which(constatus$roundedSRank=="SNR")] <- "SNR/SU/SNA" constatus$roundedSRank[which(constatus$roundedSRank=="SU")] <- "SNR/SU/SNA" constatus$roundedSRank[which(constatus$roundedSRank=="SNA")] <- "SNR/SU/SNA" # make a ordered factor of all the sranks #unique(constatus$roundedSRank) constatus$roundedSRank <- ordered(constatus$roundedSRank, levels=c("SX","SH","S1","S2","S3","S4","S5","SNR/SU/SNA")) tmpmap <- merge(template_RegionalStatus, constatus, by.x="subnation", by.y="subnation.subnationCode", all.x=TRUE) # build the plot a <- ggplot(data=tmpmap) + geom_sf(aes(fill=roundedSRank)) + scale_fill_manual( breaks=c("SX","SH","S1","S2","S3","S4","S5","SNR/SU/SNA"), values=c("SX"="#666666", "SH"="#98928B", "S1"="#E96B6B", "S2"="#F7AD75", "S3"="#FDE26E", "S4"="#7CD6F5", "S5"="#668BB3", "SNR/SU/SNA"="#E5CFC3"), labels=c("Presumed Extirpated (SX)","Possibly Extirpated (SH)","Critically Imperiled (S1)","Imperiled (S2)","Vulnerable (S3)","Apparently Secure (S4)","Secure (S5)","No Status Rank (SNR/SU/SNA)"), drop=FALSE, na.value="white") + # theme_void() + theme(legend.position="right") + theme(legend.title=element_blank()) + theme(legend.text = element_text(size=8)) # save the map as a png ggsave(filename=paste(here::here("_data","regRank"),"/","regRank_",gsub(" ","-",unique(snames$SNAME[i])),"_",gsub("-","",Sys.Date()),".png", sep=""), plot=a, width = 8, height = 6, units = c("in"), dpi = 200 ) } }
/PNHP_ReportMaps/regional_rank_maps.R
no_license
PNHP/DataManagement
R
false
false
4,895
r
#--------------------------------------------------------------------------------------------- # Name: regional_rank_maps.r # Purpose: # Author: Christopher Tracey # Created: 2021-04-27 # Updated: 2021-05-17 # # Updates: # 2021-05-17 - code cleanup and documentation # # # To Do List/Future Ideas: # * #--------------------------------------------------------------------------------------------- #load the packages library(tidyverse) library(here) library(natserv) library(arcgisbinding) arc.check_product() # get the arc license # create a directory for this update unless it already exists ifelse(!dir.exists(here::here("_data")), dir.create(here::here("_data")), FALSE) ifelse(!dir.exists(here::here("_data","regRank")), dir.create(here::here("_data","regRank")), FALSE) # load spatial information from local geodatabase template_RegionalStatus <- arc.open(here::here("PNHP_ReportMaps.gdb","template_RegionalStatusInset")) # load the state boundaries template_RegionalStatus <- arc.select(template_RegionalStatus) template_RegionalStatus <- arc.data2sf(template_RegionalStatus) # load the species list species <- read.csv(here("tracked_species_universal_id_pa_20170530.csv"), stringsAsFactors=FALSE) # build th UID species$UID <- paste("ELEMENT_GLOBAL",species$ELEMENT_GLOBAL_OU_UID,species$ELEMENT.GLOBAL.UNIVERSAL.KEY,sep=".") # test of the for loop #get a list of SNAMEs to run the loop snames <-species[c("ELCODE","SNAME","UID")] #snames <- snames[substr(snames$ELCODE,1,1)=="P",] # only plants snames <- droplevels(snames) snames <- unique(snames) snames <- snames[order(snames$SNAME),] #### TEMP for demo snames <- snames[sample(nrow(snames), 10), ] # loop to get the data and make the maps for (i in 1:length(snames$UID)) { res <- list() # initialize an empty list delayedAssign("do.next", {next}) # some error catching if the results come back empty tryCatch(res <- ns_id(uid=snames$UID[i]), finally=print(snames$SNAME[i]), error=function(e) force(do.next)) # put the rank list into a variable for below constatus_US <- as.data.frame(res$elementNationals$elementSubnationals[match("US", res$elementNationals$nation$isoCode)]) constatus_US <- jsonlite::flatten(constatus_US) # gets rid of the nested data frame constatus_CA <- as.data.frame(res$elementNationals$elementSubnationals[match("CA", res$elementNationals$nation$isoCode)]) constatus_CA <- jsonlite::flatten(constatus_CA) # gets rid of the nested data frame # this handles species for which there are no conservatoin statuses... if(dim(constatus_CA)==c(0,0)&dim(constatus_US)==c(0,0)){ cat("There is no conservation statuses for this species, skipping...\n") } else { # combine the US and CA data. constatus <- rbind(constatus_US, constatus_CA) rm(constatus_US, constatus_CA) # clean up # select the lower of a seasonal rank library(stringr) constatus$smallest_number <- sapply( str_extract_all(constatus$roundedSRank, "[0-9]+"), function(x) min(as.integer(x)) ) constatus$roundedSRank <- ifelse(is.finite(constatus$smallest_number), paste("S",constatus$smallest_number, sep=""), constatus$roundedSRank) # combine the SNR, SU, SNA status constatus$roundedSRank[which(constatus$roundedSRank=="SNRN")] <- "SNR/SU/SNA" constatus$roundedSRank[which(constatus$roundedSRank=="SNR")] <- "SNR/SU/SNA" constatus$roundedSRank[which(constatus$roundedSRank=="SU")] <- "SNR/SU/SNA" constatus$roundedSRank[which(constatus$roundedSRank=="SNA")] <- "SNR/SU/SNA" # make a ordered factor of all the sranks #unique(constatus$roundedSRank) constatus$roundedSRank <- ordered(constatus$roundedSRank, levels=c("SX","SH","S1","S2","S3","S4","S5","SNR/SU/SNA")) tmpmap <- merge(template_RegionalStatus, constatus, by.x="subnation", by.y="subnation.subnationCode", all.x=TRUE) # build the plot a <- ggplot(data=tmpmap) + geom_sf(aes(fill=roundedSRank)) + scale_fill_manual( breaks=c("SX","SH","S1","S2","S3","S4","S5","SNR/SU/SNA"), values=c("SX"="#666666", "SH"="#98928B", "S1"="#E96B6B", "S2"="#F7AD75", "S3"="#FDE26E", "S4"="#7CD6F5", "S5"="#668BB3", "SNR/SU/SNA"="#E5CFC3"), labels=c("Presumed Extirpated (SX)","Possibly Extirpated (SH)","Critically Imperiled (S1)","Imperiled (S2)","Vulnerable (S3)","Apparently Secure (S4)","Secure (S5)","No Status Rank (SNR/SU/SNA)"), drop=FALSE, na.value="white") + # theme_void() + theme(legend.position="right") + theme(legend.title=element_blank()) + theme(legend.text = element_text(size=8)) # save the map as a png ggsave(filename=paste(here::here("_data","regRank"),"/","regRank_",gsub(" ","-",unique(snames$SNAME[i])),"_",gsub("-","",Sys.Date()),".png", sep=""), plot=a, width = 8, height = 6, units = c("in"), dpi = 200 ) } }
library(uGMAR) ### Name: iterate_more ### Title: Maximum likelihood estimation of GMAR, StMAR or G-StMAR model ### with preliminary estimates ### Aliases: iterate_more ### ** Examples ## No test: # Estimate GMAR model with only 50 generations of genetic algorithm and # only 1 iteration in variable metric algorithm fit12 <- fitGSMAR(VIX, 1, 2, maxit=1, ngen=50) fit12 # Iterate more since iteration limit was reached fit12 <- iterate_more(fit12) fit12 ## End(No test)
/data/genthat_extracted_code/uGMAR/examples/iterate_more.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
480
r
library(uGMAR) ### Name: iterate_more ### Title: Maximum likelihood estimation of GMAR, StMAR or G-StMAR model ### with preliminary estimates ### Aliases: iterate_more ### ** Examples ## No test: # Estimate GMAR model with only 50 generations of genetic algorithm and # only 1 iteration in variable metric algorithm fit12 <- fitGSMAR(VIX, 1, 2, maxit=1, ngen=50) fit12 # Iterate more since iteration limit was reached fit12 <- iterate_more(fit12) fit12 ## End(No test)
library("e1071") svm_test <- function(model, x, y=NULL) { set.seed(123) predictions <- predict(model, x, probability = TRUE) confusion_matrix <- if (!is.null(y)) table(predicted = predictions, observation = y) else NULL accuracy <- if (!is.null(y)) round((confusion_matrix["1","1"] + confusion_matrix["-1","-1"]) / nrow(x), 4) else NULL return(list(metrics=list(cm=confusion_matrix, accuracy = accuracy), output=predictions)) }
/steps/svm_test.R
permissive
ansi-code/bci-matrix-speller
R
false
false
448
r
library("e1071") svm_test <- function(model, x, y=NULL) { set.seed(123) predictions <- predict(model, x, probability = TRUE) confusion_matrix <- if (!is.null(y)) table(predicted = predictions, observation = y) else NULL accuracy <- if (!is.null(y)) round((confusion_matrix["1","1"] + confusion_matrix["-1","-1"]) / nrow(x), 4) else NULL return(list(metrics=list(cm=confusion_matrix, accuracy = accuracy), output=predictions)) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/core-data_repo.R \name{any_subject_loaded} \alias{any_subject_loaded} \title{Function to check if data repository has data} \usage{ any_subject_loaded(rave_data = getDefaultDataRepository()) } \arguments{ \item{rave_data}{internally used} } \description{ Function to check if data repository has data }
/man/any_subject_loaded.Rd
no_license
beauchamplab/rave
R
false
true
381
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/core-data_repo.R \name{any_subject_loaded} \alias{any_subject_loaded} \title{Function to check if data repository has data} \usage{ any_subject_loaded(rave_data = getDefaultDataRepository()) } \arguments{ \item{rave_data}{internally used} } \description{ Function to check if data repository has data }
context("inserting relations into header") test_that("relation2header is working", { expect_is(relation2header(Kenya_veg, "REFERENCE"), "vegtable") expect_is(relation2header(Kenya_veg, "REFERENCE", "YEAR"), "vegtable") expect_error(relation2header( Kenya_veg, "REFERENCE", c("the_ref", "the_id") )) expect_error(relation2header(Kenya_veg, "the_land_use")) })
/tests/testthat/test-relation2header.R
no_license
kamapu/vegtable
R
false
false
378
r
context("inserting relations into header") test_that("relation2header is working", { expect_is(relation2header(Kenya_veg, "REFERENCE"), "vegtable") expect_is(relation2header(Kenya_veg, "REFERENCE", "YEAR"), "vegtable") expect_error(relation2header( Kenya_veg, "REFERENCE", c("the_ref", "the_id") )) expect_error(relation2header(Kenya_veg, "the_land_use")) })
fluidPage( titlePanel("Observer demo"), fluidRow( column(4, cardPanel( sliderInput("n", "N:", min = 10, max = 1000, value = 200, step = 10) )), column(8, verbatimTextOutput("text"), br(), br(), p("In this example, what's visible in the client isn't", "what's interesting. The server is writing to a log", "file each time the slider value changes.") ) ) )
/055-observer-demo/ui.R
permissive
dmpe/shiny-examples
R
false
false
441
r
fluidPage( titlePanel("Observer demo"), fluidRow( column(4, cardPanel( sliderInput("n", "N:", min = 10, max = 1000, value = 200, step = 10) )), column(8, verbatimTextOutput("text"), br(), br(), p("In this example, what's visible in the client isn't", "what's interesting. The server is writing to a log", "file each time the slider value changes.") ) ) )
library(RCurl) library(readr) library(R.utils) library(tidyverse) library(vroom) url = "https://ftp.ncbi.nlm.nih.gov/genomes/Viruses/AllNuclMetadata/AllNuclMetadata.csv.gz" d = tryCatch(download.file(url, destfile = "./Source/AllNuclMetadata.csv.gz"), error = function(e){-999}) if(d == -999) { while (d == -999){ Sys.sleep(600) d = tryCatch(download.file(url, destfile = "./Source/AllNuclMetadata.csv.gz"), error = function(e){-999}) } } seq <- data.table::fread("./Source/AllNuclMetadata.csv.gz", select = c("#Accession", "Release_Date", "Species", "Host", "Collection_Date")) seq %>% rename(Accession = "#Accession") %>% vroom_write("./Source/sequences.csv")
/Code/02_1a_Download GenBank.R
permissive
viralemergence/virion
R
false
false
705
r
library(RCurl) library(readr) library(R.utils) library(tidyverse) library(vroom) url = "https://ftp.ncbi.nlm.nih.gov/genomes/Viruses/AllNuclMetadata/AllNuclMetadata.csv.gz" d = tryCatch(download.file(url, destfile = "./Source/AllNuclMetadata.csv.gz"), error = function(e){-999}) if(d == -999) { while (d == -999){ Sys.sleep(600) d = tryCatch(download.file(url, destfile = "./Source/AllNuclMetadata.csv.gz"), error = function(e){-999}) } } seq <- data.table::fread("./Source/AllNuclMetadata.csv.gz", select = c("#Accession", "Release_Date", "Species", "Host", "Collection_Date")) seq %>% rename(Accession = "#Accession") %>% vroom_write("./Source/sequences.csv")
##Create Plot 2 plot(t$Global_active_power~t$dateTime, type="l", ylab="Global Active Power (kilowatts)", xlab="") ##Save file dev.copy(png,"plot2.png", width=480, height=480) dev.off()
/Plot2.R
no_license
emildabrowski/ExData_Plotting1
R
false
false
186
r
##Create Plot 2 plot(t$Global_active_power~t$dateTime, type="l", ylab="Global Active Power (kilowatts)", xlab="") ##Save file dev.copy(png,"plot2.png", width=480, height=480) dev.off()
##' Missing value generator ##' ##' This function adds a binary variable to a given \code{lvm} model ##' and also a variable which is equal to the original variable where ##' the binary variable is equal to zero ##' ##' @title Missing value generator ##' @param object \code{lvm}-object. ##' @param formula The right hand side specifies the name of a latent ##' variable which is not always observed. The left hand side ##' specifies the name of a new variable which is equal to the latent ##' variable but has missing values. If given as a string then this ##' is used as the name of the latent (full-data) name, and the ##' observed data name is 'missing.data' ##' @param Rformula Missing data mechanism with left hand side ##' specifying the name of the observed data indicator (may also just ##' be given as a character instead of a formula) ##' @param missing.name Name of observed data variable (only used if ##' 'formula' was given as a character specifying the name of the ##' full-data variable) ##' @param suffix If missing.name is missing, then the name of the ##' oberved data variable will be the name of the full-data variable + ##' the suffix ##' @param ... Passed to binomial.lvm. ##' @return lvm object ##' @aliases Missing, Missing<- ##' @examples ##' library(lava) ##' set.seed(17) ##' m <- lvm(y0~x01+x02+x03) ##' m <- Missing(m,formula=x1~x01,Rformula=R1~0.3*x02+-0.7*x01,p=0.4) ##' sim(m,10) ##' ##' ##' m <- lvm(y~1) ##' m <- Missing(m,"y","r") ##' ## same as ##' ## m <- Missing(m,y~1,r~1) ##' sim(m,10) ##' ##' ## same as ##' m <- lvm(y~1) ##' Missing(m,"y") <- r~x ##' sim(m,10) ##' ##' m <- lvm(y~1) ##' m <- Missing(m,"y","r",suffix=".") ##' ## same as ##' ## m <- Missing(m,"y","r",missing.name="y.") ##' ## same as ##' ## m <- Missing(m,y.~y,"r") ##' sim(m,10) ##' ##' @export ##' @author Thomas A. Gerds <tag@@biostat.ku.dk> Missing <- function(object,formula,Rformula,missing.name,suffix="0",...){ if (is.character(Rformula)) { indicatorname <- Rformula Rformula <- toformula(Rformula,1) } else { indicatorname <- all.vars(Rformula)[1] } if (length(all.vars(formula))==1) formula <- all.vars(formula) if (is.character(formula)) { if (missing(missing.name)) missing.name <- paste0(formula,suffix) formula <- toformula(missing.name,formula) } newf <- update(formula,paste(".~.+",indicatorname)) if (is.null(distribution(object,indicatorname)[[1]]) || length(list(...))>0) { distribution(object,indicatorname) <- binomial.lvm(...) } transform(object,newf) <- function(u){ out <- u[,1] out[u[,2]==0] <- NA out } regression(object) <- Rformula object } ##' @export "Missing<-" <- function(object,formula,...,value) { Missing(object,formula,value,...) }
/R/Missing.R
no_license
cran/lava
R
false
false
2,811
r
##' Missing value generator ##' ##' This function adds a binary variable to a given \code{lvm} model ##' and also a variable which is equal to the original variable where ##' the binary variable is equal to zero ##' ##' @title Missing value generator ##' @param object \code{lvm}-object. ##' @param formula The right hand side specifies the name of a latent ##' variable which is not always observed. The left hand side ##' specifies the name of a new variable which is equal to the latent ##' variable but has missing values. If given as a string then this ##' is used as the name of the latent (full-data) name, and the ##' observed data name is 'missing.data' ##' @param Rformula Missing data mechanism with left hand side ##' specifying the name of the observed data indicator (may also just ##' be given as a character instead of a formula) ##' @param missing.name Name of observed data variable (only used if ##' 'formula' was given as a character specifying the name of the ##' full-data variable) ##' @param suffix If missing.name is missing, then the name of the ##' oberved data variable will be the name of the full-data variable + ##' the suffix ##' @param ... Passed to binomial.lvm. ##' @return lvm object ##' @aliases Missing, Missing<- ##' @examples ##' library(lava) ##' set.seed(17) ##' m <- lvm(y0~x01+x02+x03) ##' m <- Missing(m,formula=x1~x01,Rformula=R1~0.3*x02+-0.7*x01,p=0.4) ##' sim(m,10) ##' ##' ##' m <- lvm(y~1) ##' m <- Missing(m,"y","r") ##' ## same as ##' ## m <- Missing(m,y~1,r~1) ##' sim(m,10) ##' ##' ## same as ##' m <- lvm(y~1) ##' Missing(m,"y") <- r~x ##' sim(m,10) ##' ##' m <- lvm(y~1) ##' m <- Missing(m,"y","r",suffix=".") ##' ## same as ##' ## m <- Missing(m,"y","r",missing.name="y.") ##' ## same as ##' ## m <- Missing(m,y.~y,"r") ##' sim(m,10) ##' ##' @export ##' @author Thomas A. Gerds <tag@@biostat.ku.dk> Missing <- function(object,formula,Rformula,missing.name,suffix="0",...){ if (is.character(Rformula)) { indicatorname <- Rformula Rformula <- toformula(Rformula,1) } else { indicatorname <- all.vars(Rformula)[1] } if (length(all.vars(formula))==1) formula <- all.vars(formula) if (is.character(formula)) { if (missing(missing.name)) missing.name <- paste0(formula,suffix) formula <- toformula(missing.name,formula) } newf <- update(formula,paste(".~.+",indicatorname)) if (is.null(distribution(object,indicatorname)[[1]]) || length(list(...))>0) { distribution(object,indicatorname) <- binomial.lvm(...) } transform(object,newf) <- function(u){ out <- u[,1] out[u[,2]==0] <- NA out } regression(object) <- Rformula object } ##' @export "Missing<-" <- function(object,formula,...,value) { Missing(object,formula,value,...) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/point_grid.R \name{lawn_point_grid} \alias{lawn_point_grid} \title{Create a PointGrid} \usage{ lawn_point_grid(extent, cellSide, units = "kilometers", centered = TRUE, bboxIsMask = FALSE) } \arguments{ \item{extent}{(numeric) Extent in \code{[minX, minY, maxX, maxY]} order.} \item{cellSide}{(integer) the distance between points} \item{units}{(character) Units to use for cellWidth, one of 'miles' or 'kilometers' (default).} \item{centered}{(logical) adjust points position to center the grid into bbox. This parameter is going to be removed in the next major release, having the output always centered into bbox. Default: \code{TRUE}} \item{bboxIsMask}{if \code{TRUE}, and bbox is a Polygon or MultiPolygon, the grid Point will be created only if inside the bbox Polygon(s). Default: \code{FALSE}} } \value{ \link{data-FeatureCollection} grid of points. } \description{ Takes a bounding box and a cell depth and returns a set of \link{data-Point}'s in a grid } \examples{ lawn_point_grid(c(-77.3876, 38.7198, -76.9482, 39.0277), 30, 'miles') lawn_point_grid(c(-77.3876, 38.7198, -76.9482, 39.0277), 10, 'miles') lawn_point_grid(c(-77.3876, 38.7198, -76.9482, 39.0277), 3, 'miles') } \seealso{ Other interpolation: \code{\link{lawn_hex_grid}}, \code{\link{lawn_isolines}}, \code{\link{lawn_planepoint}}, \code{\link{lawn_square_grid}}, \code{\link{lawn_tin}}, \code{\link{lawn_triangle_grid}} }
/man/lawn_point_grid.Rd
permissive
SSGoveia/lawn
R
false
true
1,489
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/point_grid.R \name{lawn_point_grid} \alias{lawn_point_grid} \title{Create a PointGrid} \usage{ lawn_point_grid(extent, cellSide, units = "kilometers", centered = TRUE, bboxIsMask = FALSE) } \arguments{ \item{extent}{(numeric) Extent in \code{[minX, minY, maxX, maxY]} order.} \item{cellSide}{(integer) the distance between points} \item{units}{(character) Units to use for cellWidth, one of 'miles' or 'kilometers' (default).} \item{centered}{(logical) adjust points position to center the grid into bbox. This parameter is going to be removed in the next major release, having the output always centered into bbox. Default: \code{TRUE}} \item{bboxIsMask}{if \code{TRUE}, and bbox is a Polygon or MultiPolygon, the grid Point will be created only if inside the bbox Polygon(s). Default: \code{FALSE}} } \value{ \link{data-FeatureCollection} grid of points. } \description{ Takes a bounding box and a cell depth and returns a set of \link{data-Point}'s in a grid } \examples{ lawn_point_grid(c(-77.3876, 38.7198, -76.9482, 39.0277), 30, 'miles') lawn_point_grid(c(-77.3876, 38.7198, -76.9482, 39.0277), 10, 'miles') lawn_point_grid(c(-77.3876, 38.7198, -76.9482, 39.0277), 3, 'miles') } \seealso{ Other interpolation: \code{\link{lawn_hex_grid}}, \code{\link{lawn_isolines}}, \code{\link{lawn_planepoint}}, \code{\link{lawn_square_grid}}, \code{\link{lawn_tin}}, \code{\link{lawn_triangle_grid}} }
#define normalized change function. NEEDS WORK! #na.rm = TRUE? normchange <- function(x, y) { if (y>x) {(y-x)/(100-x)} else {if (y & x == 100 | y & x == 0) {NA} else {if (y == x) {0} else {if (y<x) {(y-x)/x} }}} } #Warning message: #In if (y > x) { : # the condition has length > 1 and only the first element will be used
/Scripts/depricated/norm_c_function.R
no_license
RobertMTalbot/Keck
R
false
false
374
r
#define normalized change function. NEEDS WORK! #na.rm = TRUE? normchange <- function(x, y) { if (y>x) {(y-x)/(100-x)} else {if (y & x == 100 | y & x == 0) {NA} else {if (y == x) {0} else {if (y<x) {(y-x)/x} }}} } #Warning message: #In if (y > x) { : # the condition has length > 1 and only the first element will be used
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/autograd.R \name{with_no_grad} \alias{with_no_grad} \title{Temporarily modify gradient recording.} \usage{ with_no_grad(code) } \arguments{ \item{code}{code to be executed with no gradient recording.} } \description{ Temporarily modify gradient recording. } \examples{ if (torch_is_installed()) { x <- torch_tensor(runif(5), requires_grad = TRUE) with_no_grad({ x$sub_(torch_tensor(as.numeric(1:5))) }) x x$grad } }
/man/with_no_grad.Rd
permissive
krzjoa/torch
R
false
true
497
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/autograd.R \name{with_no_grad} \alias{with_no_grad} \title{Temporarily modify gradient recording.} \usage{ with_no_grad(code) } \arguments{ \item{code}{code to be executed with no gradient recording.} } \description{ Temporarily modify gradient recording. } \examples{ if (torch_is_installed()) { x <- torch_tensor(runif(5), requires_grad = TRUE) with_no_grad({ x$sub_(torch_tensor(as.numeric(1:5))) }) x x$grad } }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/measure_utils.R \name{pool_measured_tokens} \alias{pool_measured_tokens} \title{Pool token measurements into a single response measurement.} \usage{ pool_measured_tokens(tokens, token_pool) } \arguments{ \item{tokens}{A tibble of tokens and measurements as returned by `measure_tokens`.} \item{token_pool}{A function which accepts a list of token measurements and returns a numeric vector representing a single response measurement.} } \value{ A single pooled response measurement. } \description{ Pool token measurements into a single response measurement. }
/man/pool_measured_tokens.Rd
no_license
jlkravitz/texttest
R
false
true
639
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/measure_utils.R \name{pool_measured_tokens} \alias{pool_measured_tokens} \title{Pool token measurements into a single response measurement.} \usage{ pool_measured_tokens(tokens, token_pool) } \arguments{ \item{tokens}{A tibble of tokens and measurements as returned by `measure_tokens`.} \item{token_pool}{A function which accepts a list of token measurements and returns a numeric vector representing a single response measurement.} } \value{ A single pooled response measurement. } \description{ Pool token measurements into a single response measurement. }
library(shiny) library(ggplot2) library(DT) library(stringr) library(dplyr) load("movies.Rdata") # Define UI for application that plots features of movies --------------------- ui <- fluidPage( # Application title --------------------------------------------------------- titlePanel("Movie browser - without modules"), # Sidebar layout with a input and output definitions ------------------------ sidebarLayout( # Inputs: Select variables to plot ---------------------------------------- sidebarPanel( # Select variable for y-axis -------------------------------------------- selectInput(inputId = "y", label = "Y-axis:", choices = c("IMDB rating" = "imdb_rating", "IMDB number of votes" = "imdb_num_votes", "Critics Score" = "critics_score", "Audience Score" = "audience_score", "Runtime" = "runtime"), selected = "audience_score"), # Select variable for x-axis -------------------------------------------- selectInput(inputId = "x", label = "X-axis:", choices = c("IMDB rating" = "imdb_rating", "IMDB number of votes" = "imdb_num_votes", "Critics Score" = "critics_score", "Audience Score" = "audience_score", "Runtime" = "runtime"), selected = "critics_score"), # Select variable for color --------------------------------------------- selectInput(inputId = "z", label = "Color by:", choices = c("Genre" = "genre", "MPAA Rating" = "mpaa_rating", "Critics Rating" = "critics_rating", "Audience Rating" = "audience_rating"), selected = "mpaa_rating"), # Set alpha level ------------------------------------------------------- sliderInput(inputId = "alpha", label = "Alpha:", min = 0, max = 1, value = 0.5), # Set point size -------------------------------------------------------- sliderInput(inputId = "size", label = "Size:", min = 0, max = 5, value = 2), # Show data table ------------------------------------------------------- checkboxInput(inputId = "show_data", label = "Show data table", value = TRUE) ), # Output: ----------------------------------------------------------------- mainPanel( # Show scatterplot ------------------------------------------------------ tabsetPanel(id = "movies", tabPanel("Documentaries", plotOutput("scatterplot_doc"), dataTableOutput("moviestable_doc")), tabPanel("Feature Films", plotOutput("scatterplot_feature"), dataTableOutput("moviestable_feature")), tabPanel("TV Movies", plotOutput("scatterplot_tv"), dataTableOutput("moviestable_tv")) ) ) ) ) # Define server function required to create the scatterplot ------------------- server <- function(input, output, session) { # Create subsets for various title types ------------------------------------ docs <- reactive({ filter(movies, title_type == "Documentary") }) features <- reactive({ filter(movies, title_type == "Feature Film") }) tvs <- reactive({ filter(movies, title_type == "TV Movie") }) # Scatterplot for docs ------------------------------------------------------ output$scatterplot_doc <- renderPlot({ ggplot(data = docs(), aes_string(x = input$x, y = input$y, color = input$z)) + geom_point(alpha = input$alpha, size = input$size) + labs(x = toTitleCase(str_replace_all(input$x, "_", " ")), y = toTitleCase(str_replace_all(input$y, "_", " ")), color = toTitleCase(str_replace_all(input$z, "_", " ")) ) }) # Scatterplot for features -------------------------------------------------- output$scatterplot_feature <- renderPlot({ ggplot(data = features(), aes_string(x = input$x, y = input$y, color = input$z)) + geom_point(alpha = input$alpha, size = input$size) + labs(x = toTitleCase(str_replace_all(input$x, "_", " ")), y = toTitleCase(str_replace_all(input$y, "_", " ")), color = toTitleCase(str_replace_all(input$z, "_", " ")) ) }) # Scatterplot for tvs ------------------------------------------------------- output$scatterplot_tv <- renderPlot({ ggplot(data = tvs(), aes_string(x = input$x, y = input$y, color = input$z)) geom_point(alpha = input$alpha, size = input$size) + labs(x = toTitleCase(str_replace_all(input$x, "_", " ")), y = toTitleCase(str_replace_all(input$y, "_", " ")), color = toTitleCase(str_replace_all(input$z, "_", " ")) ) }) # Table for docs ------------------------------------------------------------ output$moviestable_doc <- DT::renderDataTable( if(input$show_data){ DT::datatable(data = docs()[, 1:7], options = list(pageLength = 10), rownames = FALSE) } ) # Table for features -------------------------------------------------------- output$moviestable_feature <- DT::renderDataTable( if(input$show_data){ DT::datatable(data = features()[, 1:7], options = list(pageLength = 10), rownames = FALSE) } ) # Table for tvs ------------------------------------------------------------- output$moviestable_tv <- DT::renderDataTable( if(input$show_data){ DT::datatable(data = tvs()[, 1:7], options = list(pageLength = 10), rownames = FALSE) } ) } # Run the application --------------------------------------------------------- shinyApp(ui = ui, server = server)
/apps/movies/movies_broken_02.R
no_license
tweep/shiny-training-2017-05-genentech
R
false
false
6,258
r
library(shiny) library(ggplot2) library(DT) library(stringr) library(dplyr) load("movies.Rdata") # Define UI for application that plots features of movies --------------------- ui <- fluidPage( # Application title --------------------------------------------------------- titlePanel("Movie browser - without modules"), # Sidebar layout with a input and output definitions ------------------------ sidebarLayout( # Inputs: Select variables to plot ---------------------------------------- sidebarPanel( # Select variable for y-axis -------------------------------------------- selectInput(inputId = "y", label = "Y-axis:", choices = c("IMDB rating" = "imdb_rating", "IMDB number of votes" = "imdb_num_votes", "Critics Score" = "critics_score", "Audience Score" = "audience_score", "Runtime" = "runtime"), selected = "audience_score"), # Select variable for x-axis -------------------------------------------- selectInput(inputId = "x", label = "X-axis:", choices = c("IMDB rating" = "imdb_rating", "IMDB number of votes" = "imdb_num_votes", "Critics Score" = "critics_score", "Audience Score" = "audience_score", "Runtime" = "runtime"), selected = "critics_score"), # Select variable for color --------------------------------------------- selectInput(inputId = "z", label = "Color by:", choices = c("Genre" = "genre", "MPAA Rating" = "mpaa_rating", "Critics Rating" = "critics_rating", "Audience Rating" = "audience_rating"), selected = "mpaa_rating"), # Set alpha level ------------------------------------------------------- sliderInput(inputId = "alpha", label = "Alpha:", min = 0, max = 1, value = 0.5), # Set point size -------------------------------------------------------- sliderInput(inputId = "size", label = "Size:", min = 0, max = 5, value = 2), # Show data table ------------------------------------------------------- checkboxInput(inputId = "show_data", label = "Show data table", value = TRUE) ), # Output: ----------------------------------------------------------------- mainPanel( # Show scatterplot ------------------------------------------------------ tabsetPanel(id = "movies", tabPanel("Documentaries", plotOutput("scatterplot_doc"), dataTableOutput("moviestable_doc")), tabPanel("Feature Films", plotOutput("scatterplot_feature"), dataTableOutput("moviestable_feature")), tabPanel("TV Movies", plotOutput("scatterplot_tv"), dataTableOutput("moviestable_tv")) ) ) ) ) # Define server function required to create the scatterplot ------------------- server <- function(input, output, session) { # Create subsets for various title types ------------------------------------ docs <- reactive({ filter(movies, title_type == "Documentary") }) features <- reactive({ filter(movies, title_type == "Feature Film") }) tvs <- reactive({ filter(movies, title_type == "TV Movie") }) # Scatterplot for docs ------------------------------------------------------ output$scatterplot_doc <- renderPlot({ ggplot(data = docs(), aes_string(x = input$x, y = input$y, color = input$z)) + geom_point(alpha = input$alpha, size = input$size) + labs(x = toTitleCase(str_replace_all(input$x, "_", " ")), y = toTitleCase(str_replace_all(input$y, "_", " ")), color = toTitleCase(str_replace_all(input$z, "_", " ")) ) }) # Scatterplot for features -------------------------------------------------- output$scatterplot_feature <- renderPlot({ ggplot(data = features(), aes_string(x = input$x, y = input$y, color = input$z)) + geom_point(alpha = input$alpha, size = input$size) + labs(x = toTitleCase(str_replace_all(input$x, "_", " ")), y = toTitleCase(str_replace_all(input$y, "_", " ")), color = toTitleCase(str_replace_all(input$z, "_", " ")) ) }) # Scatterplot for tvs ------------------------------------------------------- output$scatterplot_tv <- renderPlot({ ggplot(data = tvs(), aes_string(x = input$x, y = input$y, color = input$z)) geom_point(alpha = input$alpha, size = input$size) + labs(x = toTitleCase(str_replace_all(input$x, "_", " ")), y = toTitleCase(str_replace_all(input$y, "_", " ")), color = toTitleCase(str_replace_all(input$z, "_", " ")) ) }) # Table for docs ------------------------------------------------------------ output$moviestable_doc <- DT::renderDataTable( if(input$show_data){ DT::datatable(data = docs()[, 1:7], options = list(pageLength = 10), rownames = FALSE) } ) # Table for features -------------------------------------------------------- output$moviestable_feature <- DT::renderDataTable( if(input$show_data){ DT::datatable(data = features()[, 1:7], options = list(pageLength = 10), rownames = FALSE) } ) # Table for tvs ------------------------------------------------------------- output$moviestable_tv <- DT::renderDataTable( if(input$show_data){ DT::datatable(data = tvs()[, 1:7], options = list(pageLength = 10), rownames = FALSE) } ) } # Run the application --------------------------------------------------------- shinyApp(ui = ui, server = server)
context("public API") test_that("styler can style package", { capture_output(expect_false({ styled <- style_pkg(testthat_file("public-api", "xyzpackage")) any(styled$changed) })) }) test_that("styler can style directory", { capture_output(expect_false({ styled <- style_dir(testthat_file("public-api", "xyzdir")) any(styled$changed) })) }) test_that("styler can style files", { # just one capture_output(expect_equivalent( { out <- style_file(c( testthat_file("public-api", "xyzfile", "random-script.R") ), strict = FALSE) out$changed }, rep(FALSE, 1) )) # multiple not in the same working directory capture_output(expect_equivalent( { out <- style_file(c( testthat_file("public-api", "xyzfile", "random-script.R"), testthat_file("public-api", "xyzfile", "subfolder", "random-script.R") ), strict = FALSE) out$changed }, rep(FALSE, 2) )) }) test_that("styler does not return error when there is no file to style", { capture_output(expect_error(style_dir( testthat_file("public-api", "xyzemptydir"), strict = FALSE ), NA)) }) context("public API - Rmd in style_file()") test_that("styler can style Rmd file", { capture_output(expect_false({ out <- style_file( testthat_file("public-api", "xyzfile_rmd", "random.Rmd"), strict = FALSE ) out$changed })) capture_output(expect_warning( styled <- style_file(testthat_file("public-api", "xyzfile_rmd", "random2.Rmd"), strict = FALSE) )) expect_false(styled$changed) }) test_that("styler handles malformed Rmd file and invalid R code in chunk", { capture_output(expect_warning( style_file(testthat_file("public-api", "xyzfile_rmd", "random3.Rmd"), strict = FALSE) )) capture_output(expect_warning( style_file(testthat_file("public-api", "xyzfile_rmd", "random4.Rmd"), strict = FALSE) )) }) context("messages are correct") test_that("messages (via cat()) of style_file are correct", { for (encoding in ls_testable_encodings()) { withr::with_options( list(cli.unicode = encoding == "utf8"), { # Message if scope > line_breaks and code changes output <- catch_style_file_output(c( "public-api", "xyzdir-dirty", "dirty-sample-with-scope-tokens.R" ), encoding = encoding) expect_known_value( output, testthat_file(paste0( "public-api/xyzdir-dirty/dirty-reference-with-scope-tokens-", encoding )) ) # No message if scope > line_breaks and code does not change output <- catch_style_file_output(c( "public-api", "xyzdir-dirty", "clean-sample-with-scope-tokens.R" ), encoding = encoding) expect_known_value( output, testthat_file(paste0( "public-api/xyzdir-dirty/clean-reference-with-scope-tokens-", encoding )) ) # No message if scope <= line_breaks even if code is changed. output <- catch_style_file_output(c( "public-api", "xyzdir-dirty", "dirty-sample-with-scope-spaces.R" ), encoding = encoding) expect_known_value( output, testthat_file(paste0( "public-api/xyzdir-dirty/dirty-reference-with-scope-spaces-", encoding )) ) } ) } }) context("public API - Rmd in style_dir()") test_that("styler can style R and Rmd files via style_dir()", { msg <- capture_output( style_dir(testthat_file("public-api", "xyz-r-and-rmd-dir"), filetype = c("R", "Rmd") ) ) expect_true(any(grepl("random-script-in-sub-dir.R", msg, fixed = TRUE))) expect_true(any(grepl("random-rmd-script.Rmd", msg, fixed = TRUE))) }) test_that("styler can style Rmd files only via style_dir()", { msg <- capture_output( style_dir(testthat_file("public-api", "xyz-r-and-rmd-dir"), filetype = "Rmd" ) ) expect_true(any(grepl("random-rmd-script.Rmd", msg, fixed = TRUE))) expect_false(any(grepl("random-script-in-sub-dir.R", msg, fixed = TRUE))) }) test_that("styler can style .r and .rmd files via style_dir()", { msg <- capture_output( style_dir(testthat_file("public-api", "xyz-r-and-rmd-dir"), filetype = c(".r", ".rmd") ) ) expect_true(any(grepl("random-script-in-sub-dir.R", msg, fixed = TRUE))) expect_true(any(grepl("random-rmd-script.Rmd", msg, fixed = TRUE))) }) context("public API - Rmd in style_pkg()") test_that("styler can style R and Rmd files via style_pkg()", { msg <- capture_output( style_pkg(testthat_file("public-api", "xyzpackage-rmd"), filetype = c("R", "Rmd") ) ) expect_true(any(grepl("hello-world.R", msg, fixed = TRUE))) expect_true(any(grepl("test-package-xyz.R", msg, fixed = TRUE))) expect_true(any(grepl("random.Rmd", msg, fixed = TRUE))) expect_true(any(grepl("README.Rmd", msg, fixed = TRUE))) expect_false(any(grepl("RcppExports.R", msg, fixed = TRUE))) }) test_that("styler can style Rmd files only via style_pkg()", { msg <- capture_output( style_pkg(testthat_file("public-api", "xyzpackage-rmd"), filetype = "Rmd" ) ) expect_false(any(grepl("hello-world.R", msg, fixed = TRUE))) expect_false(any(grepl("test-package-xyz.R", msg, fixed = TRUE))) expect_true(any(grepl("random.Rmd", msg, fixed = TRUE))) expect_true(any(grepl("README.Rmd", msg, fixed = TRUE))) expect_false(any(grepl("RcppExports.R", msg, fixed = TRUE))) }) test_that("insufficient R version returns error", { expect_error(stop_insufficient_r_version()) }) context("public API - Rnw in style_file()") test_that("styler can style Rnw file", { capture_output(expect_false({ out <- style_file( testthat_file("public-api", "xyzfile-rnw", "random.Rnw"), strict = FALSE ) out$changed })) capture_output(expect_warning( styled <- style_file(testthat_file("public-api", "xyzfile-rnw", "random2.Rnw"), strict = FALSE) )) expect_false(styled$changed) }) test_that("styler handles malformed Rnw file and invalid R code in chunk", { capture_output(expect_warning( style_file(testthat_file("public-api", "xyzfile-rnw", "random3.Rnw"), strict = FALSE) )) capture_output(expect_warning( style_file(testthat_file("public-api", "xyzfile-rnw", "random4.Rnw"), strict = FALSE) )) }) context("public API - Rnw in style_pkg()") test_that("styler can style R, Rmd and Rnw files via style_pkg()", { msg <- capture_output( style_pkg(testthat_file("public-api", "xyzpackage-rnw"), filetype = c("R", "Rmd", "Rnw") ) ) expect_true(any(grepl("hello-world.R", msg, fixed = TRUE))) expect_true(any(grepl("test-package-xyz.R", msg, fixed = TRUE))) expect_true(any(grepl("random.Rmd", msg, fixed = TRUE))) expect_true(any(grepl("random.Rnw", msg, fixed = TRUE))) expect_false(any(grepl("RcppExports.R", msg, fixed = TRUE))) }) test_that("styler can style Rnw files only via style_pkg()", { msg <- capture_output( style_pkg(testthat_file("public-api", "xyzpackage-rnw"), filetype = "Rnw" ) ) expect_false(any(grepl("hello-world.R", msg, fixed = TRUE))) expect_false(any(grepl("test-package-xyz.R", msg, fixed = TRUE))) expect_false(any(grepl("random.Rmd", msg, fixed = TRUE))) expect_true(any(grepl("random.Rnw", msg, fixed = TRUE))) expect_false(any(grepl("RcppExports.R", msg, fixed = TRUE))) })
/tests/testthat/test-public_api.R
no_license
jhaltiga/styler
R
false
false
7,480
r
context("public API") test_that("styler can style package", { capture_output(expect_false({ styled <- style_pkg(testthat_file("public-api", "xyzpackage")) any(styled$changed) })) }) test_that("styler can style directory", { capture_output(expect_false({ styled <- style_dir(testthat_file("public-api", "xyzdir")) any(styled$changed) })) }) test_that("styler can style files", { # just one capture_output(expect_equivalent( { out <- style_file(c( testthat_file("public-api", "xyzfile", "random-script.R") ), strict = FALSE) out$changed }, rep(FALSE, 1) )) # multiple not in the same working directory capture_output(expect_equivalent( { out <- style_file(c( testthat_file("public-api", "xyzfile", "random-script.R"), testthat_file("public-api", "xyzfile", "subfolder", "random-script.R") ), strict = FALSE) out$changed }, rep(FALSE, 2) )) }) test_that("styler does not return error when there is no file to style", { capture_output(expect_error(style_dir( testthat_file("public-api", "xyzemptydir"), strict = FALSE ), NA)) }) context("public API - Rmd in style_file()") test_that("styler can style Rmd file", { capture_output(expect_false({ out <- style_file( testthat_file("public-api", "xyzfile_rmd", "random.Rmd"), strict = FALSE ) out$changed })) capture_output(expect_warning( styled <- style_file(testthat_file("public-api", "xyzfile_rmd", "random2.Rmd"), strict = FALSE) )) expect_false(styled$changed) }) test_that("styler handles malformed Rmd file and invalid R code in chunk", { capture_output(expect_warning( style_file(testthat_file("public-api", "xyzfile_rmd", "random3.Rmd"), strict = FALSE) )) capture_output(expect_warning( style_file(testthat_file("public-api", "xyzfile_rmd", "random4.Rmd"), strict = FALSE) )) }) context("messages are correct") test_that("messages (via cat()) of style_file are correct", { for (encoding in ls_testable_encodings()) { withr::with_options( list(cli.unicode = encoding == "utf8"), { # Message if scope > line_breaks and code changes output <- catch_style_file_output(c( "public-api", "xyzdir-dirty", "dirty-sample-with-scope-tokens.R" ), encoding = encoding) expect_known_value( output, testthat_file(paste0( "public-api/xyzdir-dirty/dirty-reference-with-scope-tokens-", encoding )) ) # No message if scope > line_breaks and code does not change output <- catch_style_file_output(c( "public-api", "xyzdir-dirty", "clean-sample-with-scope-tokens.R" ), encoding = encoding) expect_known_value( output, testthat_file(paste0( "public-api/xyzdir-dirty/clean-reference-with-scope-tokens-", encoding )) ) # No message if scope <= line_breaks even if code is changed. output <- catch_style_file_output(c( "public-api", "xyzdir-dirty", "dirty-sample-with-scope-spaces.R" ), encoding = encoding) expect_known_value( output, testthat_file(paste0( "public-api/xyzdir-dirty/dirty-reference-with-scope-spaces-", encoding )) ) } ) } }) context("public API - Rmd in style_dir()") test_that("styler can style R and Rmd files via style_dir()", { msg <- capture_output( style_dir(testthat_file("public-api", "xyz-r-and-rmd-dir"), filetype = c("R", "Rmd") ) ) expect_true(any(grepl("random-script-in-sub-dir.R", msg, fixed = TRUE))) expect_true(any(grepl("random-rmd-script.Rmd", msg, fixed = TRUE))) }) test_that("styler can style Rmd files only via style_dir()", { msg <- capture_output( style_dir(testthat_file("public-api", "xyz-r-and-rmd-dir"), filetype = "Rmd" ) ) expect_true(any(grepl("random-rmd-script.Rmd", msg, fixed = TRUE))) expect_false(any(grepl("random-script-in-sub-dir.R", msg, fixed = TRUE))) }) test_that("styler can style .r and .rmd files via style_dir()", { msg <- capture_output( style_dir(testthat_file("public-api", "xyz-r-and-rmd-dir"), filetype = c(".r", ".rmd") ) ) expect_true(any(grepl("random-script-in-sub-dir.R", msg, fixed = TRUE))) expect_true(any(grepl("random-rmd-script.Rmd", msg, fixed = TRUE))) }) context("public API - Rmd in style_pkg()") test_that("styler can style R and Rmd files via style_pkg()", { msg <- capture_output( style_pkg(testthat_file("public-api", "xyzpackage-rmd"), filetype = c("R", "Rmd") ) ) expect_true(any(grepl("hello-world.R", msg, fixed = TRUE))) expect_true(any(grepl("test-package-xyz.R", msg, fixed = TRUE))) expect_true(any(grepl("random.Rmd", msg, fixed = TRUE))) expect_true(any(grepl("README.Rmd", msg, fixed = TRUE))) expect_false(any(grepl("RcppExports.R", msg, fixed = TRUE))) }) test_that("styler can style Rmd files only via style_pkg()", { msg <- capture_output( style_pkg(testthat_file("public-api", "xyzpackage-rmd"), filetype = "Rmd" ) ) expect_false(any(grepl("hello-world.R", msg, fixed = TRUE))) expect_false(any(grepl("test-package-xyz.R", msg, fixed = TRUE))) expect_true(any(grepl("random.Rmd", msg, fixed = TRUE))) expect_true(any(grepl("README.Rmd", msg, fixed = TRUE))) expect_false(any(grepl("RcppExports.R", msg, fixed = TRUE))) }) test_that("insufficient R version returns error", { expect_error(stop_insufficient_r_version()) }) context("public API - Rnw in style_file()") test_that("styler can style Rnw file", { capture_output(expect_false({ out <- style_file( testthat_file("public-api", "xyzfile-rnw", "random.Rnw"), strict = FALSE ) out$changed })) capture_output(expect_warning( styled <- style_file(testthat_file("public-api", "xyzfile-rnw", "random2.Rnw"), strict = FALSE) )) expect_false(styled$changed) }) test_that("styler handles malformed Rnw file and invalid R code in chunk", { capture_output(expect_warning( style_file(testthat_file("public-api", "xyzfile-rnw", "random3.Rnw"), strict = FALSE) )) capture_output(expect_warning( style_file(testthat_file("public-api", "xyzfile-rnw", "random4.Rnw"), strict = FALSE) )) }) context("public API - Rnw in style_pkg()") test_that("styler can style R, Rmd and Rnw files via style_pkg()", { msg <- capture_output( style_pkg(testthat_file("public-api", "xyzpackage-rnw"), filetype = c("R", "Rmd", "Rnw") ) ) expect_true(any(grepl("hello-world.R", msg, fixed = TRUE))) expect_true(any(grepl("test-package-xyz.R", msg, fixed = TRUE))) expect_true(any(grepl("random.Rmd", msg, fixed = TRUE))) expect_true(any(grepl("random.Rnw", msg, fixed = TRUE))) expect_false(any(grepl("RcppExports.R", msg, fixed = TRUE))) }) test_that("styler can style Rnw files only via style_pkg()", { msg <- capture_output( style_pkg(testthat_file("public-api", "xyzpackage-rnw"), filetype = "Rnw" ) ) expect_false(any(grepl("hello-world.R", msg, fixed = TRUE))) expect_false(any(grepl("test-package-xyz.R", msg, fixed = TRUE))) expect_false(any(grepl("random.Rmd", msg, fixed = TRUE))) expect_true(any(grepl("random.Rnw", msg, fixed = TRUE))) expect_false(any(grepl("RcppExports.R", msg, fixed = TRUE))) })
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/enrichment_score.R \name{plotRES} \alias{plotRES} \title{Plot Result of RES} \usage{ plotRES() } \value{ } \description{ \code{plotRES} } \examples{ }
/man/plotRES.Rd
no_license
TamasKiss26/easyRNA
R
false
true
231
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/enrichment_score.R \name{plotRES} \alias{plotRES} \title{Plot Result of RES} \usage{ plotRES() } \value{ } \description{ \code{plotRES} } \examples{ }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/saver.R \name{saver} \alias{saver} \title{Runs SAVER} \usage{ saver( x, do.fast = TRUE, ncores = 1, size.factor = NULL, npred = NULL, pred.cells = NULL, pred.genes = NULL, pred.genes.only = FALSE, null.model = FALSE, mu = NULL, estimates.only = FALSE ) } \arguments{ \item{x}{An expression count matrix. The rows correspond to genes and the columns correspond to cells. Can be sparse.} \item{do.fast}{Approximates the prediction step. Default is TRUE.} \item{ncores}{Number of cores to use. Default is 1.} \item{size.factor}{Vector of cell size normalization factors. If \code{x} is already normalized or normalization is not desired, use \code{size.factor = 1}. Default uses mean library size normalization.} \item{npred}{Number of genes for regression prediction. Selects the top \code{npred} genes in terms of mean expression for regression prediction. Default is all genes.} \item{pred.cells}{Indices of cells to perform regression prediction. Default is all cells.} \item{pred.genes}{Indices of specific genes to perform regression prediction. Overrides \code{npred}. Default is all genes.} \item{pred.genes.only}{Return expression levels of only \code{pred.genes}. Default is FALSE (returns expression levels of all genes).} \item{null.model}{Whether to use mean gene expression as prediction.} \item{mu}{Matrix of prior means.} \item{estimates.only}{Only return SAVER estimates. Default is FALSE.} } \value{ If `estimates.only = TRUE`, then a matrix of SAVER estimates. If `estimates.only = FALSE`, a list with the following components \item{\code{estimate}}{Recovered (normalized) expression.} \item{\code{se}}{Standard error of estimates.} \item{\code{info}}{Information about dataset.} The \code{info} element is a list with the following components: \item{\code{size.factor}}{Size factor used for normalization.} \item{\code{maxcor}}{Maximum absolute correlation for each gene. 2 if not calculated} \item{\code{lambda.max}}{Smallest value of lambda which gives the null model.} \item{\code{lambda.min}}{Value of lambda from which the prediction model is used} \item{\code{sd.cv}}{Difference in the number of standard deviations in deviance between the model with lowest cross-validation error and the null model} \item{\code{pred.time}}{Time taken to generate predictions.} \item{\code{var.time}}{Time taken to estimate variance.} \item{\code{maxcor}}{Maximum absolute correlation cutoff used to determine if a gene should be predicted.} \item{\code{lambda.coefs}}{Coefficients for estimating lambda with lowest cross-validation error.} \item{\code{total.time}}{Total time for SAVER estimation.} } \description{ Recovers expression using the SAVER method. } \details{ The SAVER method starts by estimating the prior mean and variance for the true expression level for each gene and cell. The prior mean is obtained through predictions from a LASSO Poisson regression for each gene implemented using the \code{glmnet} package. Then, the variance is estimated through maximum likelihood assuming constant variance, Fano factor, or coefficient of variation variance structure for each gene. The posterior distribution is calculated and the posterior mean is reported as the SAVER estimate. } \examples{ data("linnarsson") \dontrun{ system.time(linnarsson_saver <- saver(linnarsson, ncores = 12)) } # predictions for top 5 highly expressed genes \dontrun{ saver2 <- saver(linnarsson, npred = 5) } # predictions for certain genes \dontrun{ genes <- c("Thy1", "Mbp", "Stim2", "Psmc6", "Rps19") genes.ind <- which(rownames(linnarsson) %in% genes) saver3 <- saver(linnarsson, pred.genes = genes.ind) } # return only certain genes \dontrun{ saver4 <- saver(linnarsson, pred.genes = genes.ind, pred.genes.only = TRUE) } }
/ExpressionRecoveryMaterials/SAVER/man/saver.Rd
no_license
dapingtai/SingleCell
R
false
true
3,840
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/saver.R \name{saver} \alias{saver} \title{Runs SAVER} \usage{ saver( x, do.fast = TRUE, ncores = 1, size.factor = NULL, npred = NULL, pred.cells = NULL, pred.genes = NULL, pred.genes.only = FALSE, null.model = FALSE, mu = NULL, estimates.only = FALSE ) } \arguments{ \item{x}{An expression count matrix. The rows correspond to genes and the columns correspond to cells. Can be sparse.} \item{do.fast}{Approximates the prediction step. Default is TRUE.} \item{ncores}{Number of cores to use. Default is 1.} \item{size.factor}{Vector of cell size normalization factors. If \code{x} is already normalized or normalization is not desired, use \code{size.factor = 1}. Default uses mean library size normalization.} \item{npred}{Number of genes for regression prediction. Selects the top \code{npred} genes in terms of mean expression for regression prediction. Default is all genes.} \item{pred.cells}{Indices of cells to perform regression prediction. Default is all cells.} \item{pred.genes}{Indices of specific genes to perform regression prediction. Overrides \code{npred}. Default is all genes.} \item{pred.genes.only}{Return expression levels of only \code{pred.genes}. Default is FALSE (returns expression levels of all genes).} \item{null.model}{Whether to use mean gene expression as prediction.} \item{mu}{Matrix of prior means.} \item{estimates.only}{Only return SAVER estimates. Default is FALSE.} } \value{ If `estimates.only = TRUE`, then a matrix of SAVER estimates. If `estimates.only = FALSE`, a list with the following components \item{\code{estimate}}{Recovered (normalized) expression.} \item{\code{se}}{Standard error of estimates.} \item{\code{info}}{Information about dataset.} The \code{info} element is a list with the following components: \item{\code{size.factor}}{Size factor used for normalization.} \item{\code{maxcor}}{Maximum absolute correlation for each gene. 2 if not calculated} \item{\code{lambda.max}}{Smallest value of lambda which gives the null model.} \item{\code{lambda.min}}{Value of lambda from which the prediction model is used} \item{\code{sd.cv}}{Difference in the number of standard deviations in deviance between the model with lowest cross-validation error and the null model} \item{\code{pred.time}}{Time taken to generate predictions.} \item{\code{var.time}}{Time taken to estimate variance.} \item{\code{maxcor}}{Maximum absolute correlation cutoff used to determine if a gene should be predicted.} \item{\code{lambda.coefs}}{Coefficients for estimating lambda with lowest cross-validation error.} \item{\code{total.time}}{Total time for SAVER estimation.} } \description{ Recovers expression using the SAVER method. } \details{ The SAVER method starts by estimating the prior mean and variance for the true expression level for each gene and cell. The prior mean is obtained through predictions from a LASSO Poisson regression for each gene implemented using the \code{glmnet} package. Then, the variance is estimated through maximum likelihood assuming constant variance, Fano factor, or coefficient of variation variance structure for each gene. The posterior distribution is calculated and the posterior mean is reported as the SAVER estimate. } \examples{ data("linnarsson") \dontrun{ system.time(linnarsson_saver <- saver(linnarsson, ncores = 12)) } # predictions for top 5 highly expressed genes \dontrun{ saver2 <- saver(linnarsson, npred = 5) } # predictions for certain genes \dontrun{ genes <- c("Thy1", "Mbp", "Stim2", "Psmc6", "Rps19") genes.ind <- which(rownames(linnarsson) %in% genes) saver3 <- saver(linnarsson, pred.genes = genes.ind) } # return only certain genes \dontrun{ saver4 <- saver(linnarsson, pred.genes = genes.ind, pred.genes.only = TRUE) } }
## part 1 of 3, week 2 programming R assignment, HOLY SHIT THIS WAS HARD pollutantmean <- function(datadir, pollutant, id = 1:332) { data.files <- list.files(path = datadir, pattern = "csv$", full.names = TRUE) all.data <- data.frame() for (i in id) { all.data <- rbind(all.data, read.csv(data.files[i])) } complete.data <- complete.cases(all.data) non.na.data <- all.data[complete.data,] mean(non.na.data[,pollutant]) }
/pollutantmean.R
no_license
khalua/r-course-week-2
R
false
false
505
r
## part 1 of 3, week 2 programming R assignment, HOLY SHIT THIS WAS HARD pollutantmean <- function(datadir, pollutant, id = 1:332) { data.files <- list.files(path = datadir, pattern = "csv$", full.names = TRUE) all.data <- data.frame() for (i in id) { all.data <- rbind(all.data, read.csv(data.files[i])) } complete.data <- complete.cases(all.data) non.na.data <- all.data[complete.data,] mean(non.na.data[,pollutant]) }
library(shiny) library(dplyr) library(reshape) library(RCurl) library(stringr) library(markdown) options(RCurlOptions = list(cainfo = system.file("CurlSSL", "cacert.pem", package = "RCurl"))) ghub <- "https://raw.githubusercontent.com/cherylb/markitzero/master/" fileurl <- paste(ghub,"VA.csv", sep ="") data <- getURL(fileurl) df <- read.csv(text = data, stringsAsFactors = FALSE) #''''''''''''''''''''''''' # reshape data # df$State <- as.character(df$State) # df$State <- sapply(df$State,str_trim) # df$MedicalGeneral <- df$medcare + df$genopex # df$OtherExpense <- df$insur + df$const + df$loan # df$AmtperVet <- df$TotalExpense/df$NumOfVeterans # # df <- df[c(2,3,4,5,6,8,13,15,16,17)] # dfvadata <- melt(df, id=c("State","Year")) # names <- c("State", "Year", "Description", "Value") # names(dfvadata) <- names # dfvadata <- dfvadata%>%filter(State != 0, Value != 0) # # dfvadata$State <- as.character(dfvadata$State) # dfvadata$Description <- as.character(dfvadata$Description) # # # Add national total # addtot <- dfvadata%>%group_by(Year, Description) %>% # summarise(Value = sum(Value))%>% # mutate(State = "National") # # addtot$Value[addtot$Description=="TotAmountperVet"] = # addtot$Value[addtot$Description =="TotalExpense"]/ # addtot$Value[addtot$Description== "NumOfVeterans"] # # dfvadata <- rbind(dfvadata,addtot[c(4,1,2,3)]) # dfvadata <- dfvadata%>% replace(is.na(.), 0) # #''''''''''''''''''''''''''''''''''''''''''''' # set up UI options selecttype <- as.list(sort(unique(dfvadata$Description))) selectgeo <- as.list(c("National",sort(unique(dfvadata$State)))) x <- unlist(selecttype) names(selecttype) <- x y <- unlist(selectgeo) names(selectgeo) <- y # UI for Veterans detail shinyUI(navbarPage("US VA Expense", tabPanel("Charts", fluidPage( h3("VA Expenditures by Selected State(s) ($'s in 1000's)"), column(2, selectInput("Type", label = h4("Measure: "), choices = selecttype, selected = "AmtperVet"), br(), checkboxGroupInput("Geo", label = h4("Select States"), choices = selectgeo, selected = "Alabama") ), column(3, tableOutput("dash") ) ) ), tabPanel("US Map", h3("Average VA Spend per State"), h4("1999-2013"), tableOutput("maptastic") ), tabPanel("Information", includeMarkdown("writeup.Rmd") ) ) )
/New folder/ui.R
no_license
cherylb/markitzero
R
false
false
2,565
r
library(shiny) library(dplyr) library(reshape) library(RCurl) library(stringr) library(markdown) options(RCurlOptions = list(cainfo = system.file("CurlSSL", "cacert.pem", package = "RCurl"))) ghub <- "https://raw.githubusercontent.com/cherylb/markitzero/master/" fileurl <- paste(ghub,"VA.csv", sep ="") data <- getURL(fileurl) df <- read.csv(text = data, stringsAsFactors = FALSE) #''''''''''''''''''''''''' # reshape data # df$State <- as.character(df$State) # df$State <- sapply(df$State,str_trim) # df$MedicalGeneral <- df$medcare + df$genopex # df$OtherExpense <- df$insur + df$const + df$loan # df$AmtperVet <- df$TotalExpense/df$NumOfVeterans # # df <- df[c(2,3,4,5,6,8,13,15,16,17)] # dfvadata <- melt(df, id=c("State","Year")) # names <- c("State", "Year", "Description", "Value") # names(dfvadata) <- names # dfvadata <- dfvadata%>%filter(State != 0, Value != 0) # # dfvadata$State <- as.character(dfvadata$State) # dfvadata$Description <- as.character(dfvadata$Description) # # # Add national total # addtot <- dfvadata%>%group_by(Year, Description) %>% # summarise(Value = sum(Value))%>% # mutate(State = "National") # # addtot$Value[addtot$Description=="TotAmountperVet"] = # addtot$Value[addtot$Description =="TotalExpense"]/ # addtot$Value[addtot$Description== "NumOfVeterans"] # # dfvadata <- rbind(dfvadata,addtot[c(4,1,2,3)]) # dfvadata <- dfvadata%>% replace(is.na(.), 0) # #''''''''''''''''''''''''''''''''''''''''''''' # set up UI options selecttype <- as.list(sort(unique(dfvadata$Description))) selectgeo <- as.list(c("National",sort(unique(dfvadata$State)))) x <- unlist(selecttype) names(selecttype) <- x y <- unlist(selectgeo) names(selectgeo) <- y # UI for Veterans detail shinyUI(navbarPage("US VA Expense", tabPanel("Charts", fluidPage( h3("VA Expenditures by Selected State(s) ($'s in 1000's)"), column(2, selectInput("Type", label = h4("Measure: "), choices = selecttype, selected = "AmtperVet"), br(), checkboxGroupInput("Geo", label = h4("Select States"), choices = selectgeo, selected = "Alabama") ), column(3, tableOutput("dash") ) ) ), tabPanel("US Map", h3("Average VA Spend per State"), h4("1999-2013"), tableOutput("maptastic") ), tabPanel("Information", includeMarkdown("writeup.Rmd") ) ) )
rm(list = ls()) ## change this to your own working directory where your data is saved #workdir <- "C:/Users/morrisyau/Desktop/stat220/final" #setwd(workdir) data <- read.csv("SequenceData.csv") head(data)
/ReadData.R
no_license
muherng/stat220
R
false
false
206
r
rm(list = ls()) ## change this to your own working directory where your data is saved #workdir <- "C:/Users/morrisyau/Desktop/stat220/final" #setwd(workdir) data <- read.csv("SequenceData.csv") head(data)
####################################################### # # Workshop on Politeness in Social Interaction # # Michael Yeomans # ####################################################### # Run once # install.packages(c("devtools","glmnet","politeness", # "multiwayvcov","lmtest","pROC", # "quanteda","tidyverse","spacyr")) # devtools::install_github("myeomans/DTMtools") # spacyr::spacy_install() # creates grammar parsing engine # Run every time library(politeness) # what we're here for library(quanteda) # generic text analysis library(tidyverse) # useful and ubiquitous library(glmnet) # machine learning algorithm library(pROC) # non-parametric accuracy using ROC library(DTMtools) # Mike Y's special ngram extractor library(multiwayvcov) # cluster-robust standard errors library(lmtest) # regression models spacyr::spacy_initialize() # turns on grammar parsing engine ####################################################### # -------- Workflow Example 1 -------- # Communicating Warmth in Distributive Negotiations # is Surprisingly Counterproductive CWstudy1<-read.csv("data/CWstudy1.csv") source("CWstudy1.R") source("CWstudy1accuracy.R") CWstudy3turns<-read.csv("data/CWstudy3turns.csv") CWstudy3people<-read.csv("data/CWstudy3people.csv") source("CWstudy3.R") # -------- Workflow Example 2 -------- # Conversational Receptiveness: # Improving Engagement with Opposing Views ################################################## CRstudy1A<-read.csv("data/CRstudy1A.csv") CRstudy1B<-read.csv("data/CRstudy1B.csv") source("CRstudy1.R") source("CRstudy1transfer.R") source("CRstudy1pairwise.R") source("CRstudy1interpret.R") CRstudy2<-read.csv("data/CRstudy2.csv") source("CRstudy2.R") source("CRstudy2outcomes.R") source("CRstudy2bmm.R") #######################################################
/TableofContents.R
permissive
myeomans/politeWorkshop
R
false
false
1,912
r
####################################################### # # Workshop on Politeness in Social Interaction # # Michael Yeomans # ####################################################### # Run once # install.packages(c("devtools","glmnet","politeness", # "multiwayvcov","lmtest","pROC", # "quanteda","tidyverse","spacyr")) # devtools::install_github("myeomans/DTMtools") # spacyr::spacy_install() # creates grammar parsing engine # Run every time library(politeness) # what we're here for library(quanteda) # generic text analysis library(tidyverse) # useful and ubiquitous library(glmnet) # machine learning algorithm library(pROC) # non-parametric accuracy using ROC library(DTMtools) # Mike Y's special ngram extractor library(multiwayvcov) # cluster-robust standard errors library(lmtest) # regression models spacyr::spacy_initialize() # turns on grammar parsing engine ####################################################### # -------- Workflow Example 1 -------- # Communicating Warmth in Distributive Negotiations # is Surprisingly Counterproductive CWstudy1<-read.csv("data/CWstudy1.csv") source("CWstudy1.R") source("CWstudy1accuracy.R") CWstudy3turns<-read.csv("data/CWstudy3turns.csv") CWstudy3people<-read.csv("data/CWstudy3people.csv") source("CWstudy3.R") # -------- Workflow Example 2 -------- # Conversational Receptiveness: # Improving Engagement with Opposing Views ################################################## CRstudy1A<-read.csv("data/CRstudy1A.csv") CRstudy1B<-read.csv("data/CRstudy1B.csv") source("CRstudy1.R") source("CRstudy1transfer.R") source("CRstudy1pairwise.R") source("CRstudy1interpret.R") CRstudy2<-read.csv("data/CRstudy2.csv") source("CRstudy2.R") source("CRstudy2outcomes.R") source("CRstudy2bmm.R") #######################################################
library(UsingR) ### Name: slc ### Title: Sodium-Lithium countertransport ### Aliases: slc ### Keywords: datasets ### ** Examples data(slc) hist(slc)
/data/genthat_extracted_code/UsingR/examples/slc.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
156
r
library(UsingR) ### Name: slc ### Title: Sodium-Lithium countertransport ### Aliases: slc ### Keywords: datasets ### ** Examples data(slc) hist(slc)
#' Transform an object into HTML and copy it for export #' #' This function transforms its argument to HTML and then copy it to the #' clipboard or to a file for later use in an external application. #' #' @aliases copie copie.default clipcopy.default #' @param obj object to be copied #' @param ... arguments passed to \code{R2HTML::HTML} #' @details #' Under linux, this function requires that \code{xclip} is #' installed on the system to copy to the clipboard. #' @examples #' data(iris) #' tab <- table(cut(iris$Sepal.Length,8),cut(iris$Sepal.Width,4)) #' \dontrun{copie(tab)} #' ptab <- rprop(tab, percent=TRUE) #' \dontrun{clipcopy(ptab)} #' @seealso \code{\link[R2HTML]{HTML}}, \code{\link[questionr]{format.proptab}} #' @keywords connection #' @export `clipcopy` <- function (obj, ...) { UseMethod("clipcopy") } #' @export copie <- clipcopy #' @return \code{NULL} #' #' @rdname clipcopy #' @aliases copie.proptab #' @param append if TRUE, append to the file instead of replacing it #' @param file if TRUE, export to a file instead of the clipboard #' @param filename name of the file to export to #' @param clipboard.size under Windows, size of the clipboard in kB #' @export `clipcopy.default` <- function (obj, append=FALSE, file=FALSE, filename="temp.html", clipboard.size=4096, ...) { if (file) { conn <- file(filename, "w", encoding="Latin1") R2HTML::HTML(obj, file=conn, append=append) close(conn) return() } if (Sys.info()["sysname"] == "Windows") { connection.name <- paste("clipboard", format(clipboard.size, scientific=1000), sep="-") conn <- file(connection.name, "w", encoding="Latin1") } if (Sys.info()["sysname"] == "Darwin") conn <- pipe("pbcopy", "w", encoding="Latin1") if (Sys.info()["sysname"] == "Linux") conn <- pipe("xclip -i", "w", encoding="Latin1") R2HTML::HTML(obj, file = conn, append = append, ...) close(conn) } #' @export copie.default <- clipcopy.default #' @return \code{NULL} #' #' @rdname clipcopy #' @aliases copie.proptab #' @param percent whether to add a percent sign in each cell #' @param digits number of digits to display #' @param justify justification #' @seealso \code{\link[questionr]{clipcopy}}, \code{\link[questionr]{format.proptab}} #' @export `clipcopy.proptab` <- function (obj, percent=NULL, digits=NULL, justify="right", ...) { if (!inherits(obj, "proptab")) stop("Object is not of class proptab") obj <- format.proptab(obj, digits=digits, percent=percent, justify=justify) copie.default(obj, ...) } #' @export copie.proptab <- clipcopy.proptab #' Rename a data frame column #' #' #' @aliases renomme.variable #' @param df data frame #' @param old old name #' @param new new name #' @keywords manip #' @return A data frame with the column named "old" renamed as "new" #' @examples #' data(iris) #' str(iris) #' iris <- rename.variable(iris, "Species", "especes") #' str(iris) #' @export renomme.variable rename.variable `rename.variable` <- function (df, old, new) { names(df)[which(names(df)==old)] <- new df } #' @export renomme.variable <- rename.variable #' Determine all duplicate elements #' #' The native \link{duplicated} function determines which elements of a vector #' or data frame are duplicates of elements already observed in the vector or the #' data frame provided. Therefore, only the second occurence (or third or nth) #' of an element is considered as a duplicate. #' \code{duplicated2} is similar but will also mark the first occurence as a #' duplicate (see examples). #' #' @param x a vector, a data frame or a matrix #' @return A logical vector indicated wich elements are duplicated in \code{x}. #' @source \url{http://forums.cirad.fr/logiciel-R/viewtopic.php?p=2968} #' @seealso \link{duplicated} #' @examples #' df <- data.frame(x=c("a","b","c","b","d","c"),y=c(1,2,3,2,4,3)) #' df #' duplicated(df) #' duplicated2(df) #' @export duplicated2 `duplicated2` <- function(x){ if (sum(dup <- duplicated(x))==0) return(dup) if (class(x) %in% c("data.frame","matrix")) duplicated(rbind(x[dup,],x))[-(1:sum(dup))] else duplicated(c(x[dup],x))[-(1:sum(dup))] } #' Remove observations with missing values #' #' \code{na.rm} is similar to \link{na.omit} but allows to specify a list of #' variables to take into account. #' #' @param x a data frame #' @param v a list of variables #' @details #' If \code{v} is not specified, the result of \code{na.rm} will be the same as #' \link{na.omit}. If a list of variables is specified through \code{v}, only #' observations with a missing value (\code{NA}) for one of the specified #' variables will be removed from \code{x}. See examples. #' @author Joseph Larmarange <joseph@@larmarange.net> #' @seealso \link{na.omit} #' @examples #' df <- data.frame(x = c(1, 2, 3), y = c(0, 10, NA), z= c("a",NA,"b")) #' df #' na.omit(df) #' na.rm(df) #' na.rm(df, c("x","y")) #' na.rm(df, "z") #' @export na.rm `na.rm` <- function(x, v=NULL){ if (!is.data.frame(x)) x <- as.data.frame(x) if (is.null(v)) v <- names(x) r <- x[stats::complete.cases(x[v]),] return(r) } #' Remove unused levels #' #' This function removes unused levels of a factor or in a data.frame. See examples. #' #' @param x a factor or a data frame #' @param v a list of variables (optional, if \code{x} is a data frame) #' @details #' If \code{x} is a data frame, only factor variables of \code{x} will be impacted. #' If a list of variables is provided through \code{v}, only the unused levels of the #' specified variables will be removed. #' @author Joseph Larmarange <joseph@@larmarange.net> #' @examples #' df <- data.frame(v1=c("a","b","a","b"),v2=c("x","x","y","y")) #' df$v1 <- factor(df$v1,c("a","b","c")) #' df$v2 <- factor(df$v2,c("x","y","z")) #' df #' str(df) #' str(rm.unused.levels(df)) #' str(rm.unused.levels(df,"v1")) #' @export rm.unused.levels `rm.unused.levels` <- function(x, v=NULL) { if (!is.data.frame(x) & !is.factor(x)) stop("x must be a factor or a data.frame.") if (is.factor(x)) x <- factor(x) if (is.data.frame(x)) { if (is.null(v)) v <- names(x) for (i in 1:length(x)) { if (is.factor(x[[i]]) & names(x)[i] %in% v) x[[i]] <- factor(x[[i]]) } } return(x) }
/R/tools.R
no_license
serenity-r/questionr
R
false
false
6,217
r
#' Transform an object into HTML and copy it for export #' #' This function transforms its argument to HTML and then copy it to the #' clipboard or to a file for later use in an external application. #' #' @aliases copie copie.default clipcopy.default #' @param obj object to be copied #' @param ... arguments passed to \code{R2HTML::HTML} #' @details #' Under linux, this function requires that \code{xclip} is #' installed on the system to copy to the clipboard. #' @examples #' data(iris) #' tab <- table(cut(iris$Sepal.Length,8),cut(iris$Sepal.Width,4)) #' \dontrun{copie(tab)} #' ptab <- rprop(tab, percent=TRUE) #' \dontrun{clipcopy(ptab)} #' @seealso \code{\link[R2HTML]{HTML}}, \code{\link[questionr]{format.proptab}} #' @keywords connection #' @export `clipcopy` <- function (obj, ...) { UseMethod("clipcopy") } #' @export copie <- clipcopy #' @return \code{NULL} #' #' @rdname clipcopy #' @aliases copie.proptab #' @param append if TRUE, append to the file instead of replacing it #' @param file if TRUE, export to a file instead of the clipboard #' @param filename name of the file to export to #' @param clipboard.size under Windows, size of the clipboard in kB #' @export `clipcopy.default` <- function (obj, append=FALSE, file=FALSE, filename="temp.html", clipboard.size=4096, ...) { if (file) { conn <- file(filename, "w", encoding="Latin1") R2HTML::HTML(obj, file=conn, append=append) close(conn) return() } if (Sys.info()["sysname"] == "Windows") { connection.name <- paste("clipboard", format(clipboard.size, scientific=1000), sep="-") conn <- file(connection.name, "w", encoding="Latin1") } if (Sys.info()["sysname"] == "Darwin") conn <- pipe("pbcopy", "w", encoding="Latin1") if (Sys.info()["sysname"] == "Linux") conn <- pipe("xclip -i", "w", encoding="Latin1") R2HTML::HTML(obj, file = conn, append = append, ...) close(conn) } #' @export copie.default <- clipcopy.default #' @return \code{NULL} #' #' @rdname clipcopy #' @aliases copie.proptab #' @param percent whether to add a percent sign in each cell #' @param digits number of digits to display #' @param justify justification #' @seealso \code{\link[questionr]{clipcopy}}, \code{\link[questionr]{format.proptab}} #' @export `clipcopy.proptab` <- function (obj, percent=NULL, digits=NULL, justify="right", ...) { if (!inherits(obj, "proptab")) stop("Object is not of class proptab") obj <- format.proptab(obj, digits=digits, percent=percent, justify=justify) copie.default(obj, ...) } #' @export copie.proptab <- clipcopy.proptab #' Rename a data frame column #' #' #' @aliases renomme.variable #' @param df data frame #' @param old old name #' @param new new name #' @keywords manip #' @return A data frame with the column named "old" renamed as "new" #' @examples #' data(iris) #' str(iris) #' iris <- rename.variable(iris, "Species", "especes") #' str(iris) #' @export renomme.variable rename.variable `rename.variable` <- function (df, old, new) { names(df)[which(names(df)==old)] <- new df } #' @export renomme.variable <- rename.variable #' Determine all duplicate elements #' #' The native \link{duplicated} function determines which elements of a vector #' or data frame are duplicates of elements already observed in the vector or the #' data frame provided. Therefore, only the second occurence (or third or nth) #' of an element is considered as a duplicate. #' \code{duplicated2} is similar but will also mark the first occurence as a #' duplicate (see examples). #' #' @param x a vector, a data frame or a matrix #' @return A logical vector indicated wich elements are duplicated in \code{x}. #' @source \url{http://forums.cirad.fr/logiciel-R/viewtopic.php?p=2968} #' @seealso \link{duplicated} #' @examples #' df <- data.frame(x=c("a","b","c","b","d","c"),y=c(1,2,3,2,4,3)) #' df #' duplicated(df) #' duplicated2(df) #' @export duplicated2 `duplicated2` <- function(x){ if (sum(dup <- duplicated(x))==0) return(dup) if (class(x) %in% c("data.frame","matrix")) duplicated(rbind(x[dup,],x))[-(1:sum(dup))] else duplicated(c(x[dup],x))[-(1:sum(dup))] } #' Remove observations with missing values #' #' \code{na.rm} is similar to \link{na.omit} but allows to specify a list of #' variables to take into account. #' #' @param x a data frame #' @param v a list of variables #' @details #' If \code{v} is not specified, the result of \code{na.rm} will be the same as #' \link{na.omit}. If a list of variables is specified through \code{v}, only #' observations with a missing value (\code{NA}) for one of the specified #' variables will be removed from \code{x}. See examples. #' @author Joseph Larmarange <joseph@@larmarange.net> #' @seealso \link{na.omit} #' @examples #' df <- data.frame(x = c(1, 2, 3), y = c(0, 10, NA), z= c("a",NA,"b")) #' df #' na.omit(df) #' na.rm(df) #' na.rm(df, c("x","y")) #' na.rm(df, "z") #' @export na.rm `na.rm` <- function(x, v=NULL){ if (!is.data.frame(x)) x <- as.data.frame(x) if (is.null(v)) v <- names(x) r <- x[stats::complete.cases(x[v]),] return(r) } #' Remove unused levels #' #' This function removes unused levels of a factor or in a data.frame. See examples. #' #' @param x a factor or a data frame #' @param v a list of variables (optional, if \code{x} is a data frame) #' @details #' If \code{x} is a data frame, only factor variables of \code{x} will be impacted. #' If a list of variables is provided through \code{v}, only the unused levels of the #' specified variables will be removed. #' @author Joseph Larmarange <joseph@@larmarange.net> #' @examples #' df <- data.frame(v1=c("a","b","a","b"),v2=c("x","x","y","y")) #' df$v1 <- factor(df$v1,c("a","b","c")) #' df$v2 <- factor(df$v2,c("x","y","z")) #' df #' str(df) #' str(rm.unused.levels(df)) #' str(rm.unused.levels(df,"v1")) #' @export rm.unused.levels `rm.unused.levels` <- function(x, v=NULL) { if (!is.data.frame(x) & !is.factor(x)) stop("x must be a factor or a data.frame.") if (is.factor(x)) x <- factor(x) if (is.data.frame(x)) { if (is.null(v)) v <- names(x) for (i in 1:length(x)) { if (is.factor(x[[i]]) & names(x)[i] %in% v) x[[i]] <- factor(x[[i]]) } } return(x) }
##1. identify files to read in filesToProcess <- list.files(path = my_path, pattern = my_fileext, full.names = T, recursive = T) ##2. Iterate over each of those file names with lapply listOfFiles <- mclapply(filesToProcess, function(x) { mydf = fread(x, header=F, colClasses = c("character", "numeric"), nrows = my_nrows) setnames(mydf, colnames(mydf), c("rowid", sprintf("%s", str_extract(basename(x), pattern = my_filename)))) }, mc.preschedule = F, mc.cores = my_cores) ##3. Merge all of the objects in the list together with Reduce or data.table::merge.data.table. ## for faster and less memory usage, specify hidden function, data.table:::merge.data.table - although I've tested this function and works well, double check final merged output for discrepency, if any. out_cnt <- Reduce(function(x,y) {data.table:::merge.data.table(x,y, by = "rowid")}, listOfFiles) ##4. Export merged table in tsv and rds format write.table(out_cnt, sprintf("merged_%s_%s.tsv", make.names(format(Sys.time(),"%b_%d_%Y_%H_%M_%S_%Z")), my_out), quote=F, sep="\t", row.names=F) saveRDS(out_cnt, file = sprintf("merged_%s_%s.rds", make.names(format(Sys.time(),"%b_%d_%Y_%H_%M_%S_%Z")), my_out)) print(sprintf("Exported merged table within work directory in tsv and rds format with file name merged_%s_%s", make.names(format(Sys.time(),"%b_%d_%Y_%H_%M_%S_%Z")), my_out)) }
/src/merge_tables_datatable.R
no_license
shaman-narayanasamy/LAO-time-series
R
false
false
1,366
r
##1. identify files to read in filesToProcess <- list.files(path = my_path, pattern = my_fileext, full.names = T, recursive = T) ##2. Iterate over each of those file names with lapply listOfFiles <- mclapply(filesToProcess, function(x) { mydf = fread(x, header=F, colClasses = c("character", "numeric"), nrows = my_nrows) setnames(mydf, colnames(mydf), c("rowid", sprintf("%s", str_extract(basename(x), pattern = my_filename)))) }, mc.preschedule = F, mc.cores = my_cores) ##3. Merge all of the objects in the list together with Reduce or data.table::merge.data.table. ## for faster and less memory usage, specify hidden function, data.table:::merge.data.table - although I've tested this function and works well, double check final merged output for discrepency, if any. out_cnt <- Reduce(function(x,y) {data.table:::merge.data.table(x,y, by = "rowid")}, listOfFiles) ##4. Export merged table in tsv and rds format write.table(out_cnt, sprintf("merged_%s_%s.tsv", make.names(format(Sys.time(),"%b_%d_%Y_%H_%M_%S_%Z")), my_out), quote=F, sep="\t", row.names=F) saveRDS(out_cnt, file = sprintf("merged_%s_%s.rds", make.names(format(Sys.time(),"%b_%d_%Y_%H_%M_%S_%Z")), my_out)) print(sprintf("Exported merged table within work directory in tsv and rds format with file name merged_%s_%s", make.names(format(Sys.time(),"%b_%d_%Y_%H_%M_%S_%Z")), my_out)) }
library(shiny) library(ggplot2) library(tidyr) library(dplyr) library(plotly) # read in data population <- read.csv("pop.csv", stringsAsFactors = FALSE) life_expectancy <- read.csv("life_exp.csv", stringsAsFactors = FALSE, skip=4) fertility <- read.csv("fert.csv", stringsAsFactors = FALSE, skip=4) meta <- read.csv("metadata.csv", header = TRUE) # change df to long population <- gather(population, year, pop, X1960:X2014)[,c("Country.Name","year","pop","Country.Code")] life_expectancy <- gather(life_expectancy, year, life_exp, X1960:X2014)[,c("Country.Name","year","life_exp","Country.Code")] fertility <- gather(fertility, year, fert, X1960:X2014)[,c("Country.Name","year","fert","Country.Code")] # join dfs, get remove Xs in year combo_df <- inner_join(population, inner_join(life_expectancy, fertility)) combo_df$year <- substr(combo_df$year, 2, nchar(combo_df$year)) # use metadata to get region for each country combo_df <- inner_join(combo_df, meta[,c("Country.Code","Region")]) # get rid of empty regions combo_df[combo_df == ""] <- NA combo_df <- combo_df[!is.na(combo_df$Region),] #change some col names colnames(combo_df)[3] <- "Population" colnames(combo_df)[5] <- "Life.Expectancy" colnames(combo_df)[6] <- "Fertility" ui <- fluidPage( titlePanel("Gapminder World Development Indicators"), mainPanel( plotlyOutput("scatter"), sliderInput("year", "Year", min=1960, max=2014, step=1, value = 1960, width='600px', animate = animationOptions(interval=300), sep="") ), sidebarPanel( sliderInput("Population", "Population", value=5, min=1, max=10, step=1) ), fluidRow( column(3, helpText("Note: To view only one region, click", "on all the regions you aren't interested", "in looking at.")) ) ) server <- function(input, output){ output$scatter <- renderPlotly({ subset_plot <- combo_df[combo_df$year==input$year,] plt <- ggplot(subset_plot, aes(x=Life.Expectancy, y=Fertility, key=Country.Name)) plt <- plt + geom_point(aes(fill=Region, size=Population), alpha=.85, shape=21, colour="gray13") plt <- plt + scale_size(range = c(1, input$Population)*2, guide = 'none') plt <- plt + xlim(0, 90) + ylim(0, 9) plt <- plt + scale_fill_manual(values = c('royalblue','orangered','orange','forestgreen', 'darkmagenta','lightseagreen','indianred'), name="") plt <- plt + theme_bw() plt <- plt + xlab("Life Expectancy") + ylab("Fertility") plt <- plt + ggtitle("Life Expectancy compared to Fertility Rate") plt <- plt + theme(panel.border = element_blank(), axis.line = element_line(colour = "black"), axis.ticks.x = element_blank(), axis.ticks.y = element_blank()) }) } shinyApp(ui = ui, server = server)
/app.R
no_license
usfviz/hleiber-hw2
R
false
false
2,891
r
library(shiny) library(ggplot2) library(tidyr) library(dplyr) library(plotly) # read in data population <- read.csv("pop.csv", stringsAsFactors = FALSE) life_expectancy <- read.csv("life_exp.csv", stringsAsFactors = FALSE, skip=4) fertility <- read.csv("fert.csv", stringsAsFactors = FALSE, skip=4) meta <- read.csv("metadata.csv", header = TRUE) # change df to long population <- gather(population, year, pop, X1960:X2014)[,c("Country.Name","year","pop","Country.Code")] life_expectancy <- gather(life_expectancy, year, life_exp, X1960:X2014)[,c("Country.Name","year","life_exp","Country.Code")] fertility <- gather(fertility, year, fert, X1960:X2014)[,c("Country.Name","year","fert","Country.Code")] # join dfs, get remove Xs in year combo_df <- inner_join(population, inner_join(life_expectancy, fertility)) combo_df$year <- substr(combo_df$year, 2, nchar(combo_df$year)) # use metadata to get region for each country combo_df <- inner_join(combo_df, meta[,c("Country.Code","Region")]) # get rid of empty regions combo_df[combo_df == ""] <- NA combo_df <- combo_df[!is.na(combo_df$Region),] #change some col names colnames(combo_df)[3] <- "Population" colnames(combo_df)[5] <- "Life.Expectancy" colnames(combo_df)[6] <- "Fertility" ui <- fluidPage( titlePanel("Gapminder World Development Indicators"), mainPanel( plotlyOutput("scatter"), sliderInput("year", "Year", min=1960, max=2014, step=1, value = 1960, width='600px', animate = animationOptions(interval=300), sep="") ), sidebarPanel( sliderInput("Population", "Population", value=5, min=1, max=10, step=1) ), fluidRow( column(3, helpText("Note: To view only one region, click", "on all the regions you aren't interested", "in looking at.")) ) ) server <- function(input, output){ output$scatter <- renderPlotly({ subset_plot <- combo_df[combo_df$year==input$year,] plt <- ggplot(subset_plot, aes(x=Life.Expectancy, y=Fertility, key=Country.Name)) plt <- plt + geom_point(aes(fill=Region, size=Population), alpha=.85, shape=21, colour="gray13") plt <- plt + scale_size(range = c(1, input$Population)*2, guide = 'none') plt <- plt + xlim(0, 90) + ylim(0, 9) plt <- plt + scale_fill_manual(values = c('royalblue','orangered','orange','forestgreen', 'darkmagenta','lightseagreen','indianred'), name="") plt <- plt + theme_bw() plt <- plt + xlab("Life Expectancy") + ylab("Fertility") plt <- plt + ggtitle("Life Expectancy compared to Fertility Rate") plt <- plt + theme(panel.border = element_blank(), axis.line = element_line(colour = "black"), axis.ticks.x = element_blank(), axis.ticks.y = element_blank()) }) } shinyApp(ui = ui, server = server)
\name{yrbss} \alias{yrbss} \docType{data} \title{ Youth Risk Behavior Surveillance System (YRBSS) } \description{ The YRBSS contains surveys conducted from 1991-2013 nationwide for the United States as a whole and in multiple states and districts individually. } \usage{data(yrbss)} \format{ A data frame with 13583 observations on the following 13 variables. \describe{ \item{\code{age}}{Age of participant, ranging from 12 to 18} \item{\code{gender}}{Gender of participant, can be \code{male} or \code{female}} \item{\code{grade}}{Represents grade in high school} \item{\code{hispanic}}{Indicates a participant's status as \code{hispanic} or \code{not}} \item{\code{race}}{Indicates a participant's race} \item{\code{height}}{Self reported height in meters} \item{\code{weight}}{Self reported weight in kgs} \item{\code{helmet_12m}}{During the 12 months preceding the survey, how frequently the participant wore a helmet while riding a bicycle.} \item{\code{text_while_driving_30d}}{During the 30 days preceding the survey, how frequently the participant texted or emailed while driving.} \item{\code{physically_active_7d}}{Days per week that the participant is physically active} \item{\code{hours_tv_per_school_day}}{The average number of hours of TV watched by the participant on a schoolday.} \item{\code{strength_training_7d}}{Out of the 7 days preceding the survey, how many days the participant did exercises to strengthen or tone their muscles (such as push-ups, sit-ups, or weight lifting).} \item{\code{school_night_hours_sleep}}{On an average school night, the number of hours of sleep the participant gets.} } } \details{ %% ~~ If necessary, more details than the __description__ above ~~ } \source{ %% ~~ reference to a publication or URL from which the data were obtained ~~ The data is provided by the CDC at http://www.cdc.gov/healthyyouth/data/yrbs/index.htm. } \references{ %% ~~ possibly secondary sources and usages ~~ } \examples{ % data(yrbss) % ## maybe str(yrbss) ; plot(yrbss) ... } \keyword{datasets}
/r_supplement/draft_package_OIBiostat/OIBioStat/man/yrbss.Rd
no_license
morganfb/R_companion_source
R
false
false
2,090
rd
\name{yrbss} \alias{yrbss} \docType{data} \title{ Youth Risk Behavior Surveillance System (YRBSS) } \description{ The YRBSS contains surveys conducted from 1991-2013 nationwide for the United States as a whole and in multiple states and districts individually. } \usage{data(yrbss)} \format{ A data frame with 13583 observations on the following 13 variables. \describe{ \item{\code{age}}{Age of participant, ranging from 12 to 18} \item{\code{gender}}{Gender of participant, can be \code{male} or \code{female}} \item{\code{grade}}{Represents grade in high school} \item{\code{hispanic}}{Indicates a participant's status as \code{hispanic} or \code{not}} \item{\code{race}}{Indicates a participant's race} \item{\code{height}}{Self reported height in meters} \item{\code{weight}}{Self reported weight in kgs} \item{\code{helmet_12m}}{During the 12 months preceding the survey, how frequently the participant wore a helmet while riding a bicycle.} \item{\code{text_while_driving_30d}}{During the 30 days preceding the survey, how frequently the participant texted or emailed while driving.} \item{\code{physically_active_7d}}{Days per week that the participant is physically active} \item{\code{hours_tv_per_school_day}}{The average number of hours of TV watched by the participant on a schoolday.} \item{\code{strength_training_7d}}{Out of the 7 days preceding the survey, how many days the participant did exercises to strengthen or tone their muscles (such as push-ups, sit-ups, or weight lifting).} \item{\code{school_night_hours_sleep}}{On an average school night, the number of hours of sleep the participant gets.} } } \details{ %% ~~ If necessary, more details than the __description__ above ~~ } \source{ %% ~~ reference to a publication or URL from which the data were obtained ~~ The data is provided by the CDC at http://www.cdc.gov/healthyyouth/data/yrbs/index.htm. } \references{ %% ~~ possibly secondary sources and usages ~~ } \examples{ % data(yrbss) % ## maybe str(yrbss) ; plot(yrbss) ... } \keyword{datasets}
#' @title Generic Elsevier Search #' #' @description Runs GET on generic Elsevier Search #' @param query Query to run, not overall query, but `queryParam` query #' @param type Type of search. See \url{https://dev.elsevier.com/api_docs.html} #' @param search_type Type of search if \code{type = "search"}. #' See \url{https://dev.elsevier.com/api_docs.html} #' @param api_key Elsevier API key #' @param headers Headers passed to \code{\link{add_headers}}, #' passed to \code{\link{GET}} #' @param content_type Is the data content or feedback? #' @param root_http address to use for query #' @param http_end string to add to end of http specification #' (done using \code{paste0}) #' @param verbose Print messages from specification #' @param api_key_error Should there be an error if no API key? #' @param ... Options passed to queryParam for \code{\link{GET}} #' @return List of elements, content and the \code{GET} request #' @export #' @examples \dontrun{ #' query_string = "affil(hopkins)" #' # Use affiliation query #' s = generic_elsevier_api(query = query_string, #' type = "search", search_type = "affiliation", #' api_key = api_key, #' verbose = FALSE) #' #' # Use author query #' s = generic_elsevier_api(query = query_string, #' type = "search", search_type = "author", #' api_key = api_key, #' verbose = FALSE) #' #' # Query abstract by pii #' s = generic_elsevier_api(query = "", #' type = "abstract", http_end = "pii/S1053811915002700", #' api_key = api_key, #' verbose = FALSE) #' } #' @importFrom httr GET add_headers generic_elsevier_api <- function( query = NULL, type = c("search", "article", "entitlement", "recommendation", "object", "fragment", "abstract", "affiliation", "embase", "author", "serial", "nonserial", "subject", "holdings", "citation-count", "citations"), search_type = c("affiliation", "author", "scopus", "scidir", "scidir-object"), api_key = NULL, headers = NULL, content_type = c("content", "feedback"), root_http = "https://api.elsevier.com", http_end = NULL, verbose = TRUE, api_key_error = TRUE, ... ){ api_key = get_api_key(api_key, error = api_key_error) type = match.arg(type) content_type = match.arg(content_type) root_http = paste(root_http, content_type, sep = "/") search_type = switch(type, search = match.arg(search_type), embase = "article", serial = "title", nonserial = "title", entitlement = "entitlement", holdings = "report.url", "citation-count" = "citation-count", citations = "citations" ) if (type %in% c("entitlement","recommendation")) { type = "article" } if (type %in% c("citation-count", "citations")) { type = "abstract" } http = paste(type, search_type, sep = "/") if (!is.null(http_end)) { http = paste0(http, http_end) } http = gsub("/$", "", http) http = gsub("//", "/", http) http = paste(root_http, http, sep = "/") if (verbose) { parsed_url = httr::parse_url(http) parsed_url$query$APIKey = NULL parsed_url = httr::build_url(parsed_url) message(paste0("HTTP specified is:", parsed_url, "\n")) } qlist = list(...) qlist$query = query qlist$apiKey = api_key if (length(qlist) > 0) { r = GET(http, query = qlist, add_headers(headers) ) } else { r = GET(http, add_headers(headers) ) } cr = content(r) return(list(get_statement = r, content = cr)) }
/R/generic_elsevier_search.R
no_license
Kimberly-Yang/rscopus
R
false
false
3,672
r
#' @title Generic Elsevier Search #' #' @description Runs GET on generic Elsevier Search #' @param query Query to run, not overall query, but `queryParam` query #' @param type Type of search. See \url{https://dev.elsevier.com/api_docs.html} #' @param search_type Type of search if \code{type = "search"}. #' See \url{https://dev.elsevier.com/api_docs.html} #' @param api_key Elsevier API key #' @param headers Headers passed to \code{\link{add_headers}}, #' passed to \code{\link{GET}} #' @param content_type Is the data content or feedback? #' @param root_http address to use for query #' @param http_end string to add to end of http specification #' (done using \code{paste0}) #' @param verbose Print messages from specification #' @param api_key_error Should there be an error if no API key? #' @param ... Options passed to queryParam for \code{\link{GET}} #' @return List of elements, content and the \code{GET} request #' @export #' @examples \dontrun{ #' query_string = "affil(hopkins)" #' # Use affiliation query #' s = generic_elsevier_api(query = query_string, #' type = "search", search_type = "affiliation", #' api_key = api_key, #' verbose = FALSE) #' #' # Use author query #' s = generic_elsevier_api(query = query_string, #' type = "search", search_type = "author", #' api_key = api_key, #' verbose = FALSE) #' #' # Query abstract by pii #' s = generic_elsevier_api(query = "", #' type = "abstract", http_end = "pii/S1053811915002700", #' api_key = api_key, #' verbose = FALSE) #' } #' @importFrom httr GET add_headers generic_elsevier_api <- function( query = NULL, type = c("search", "article", "entitlement", "recommendation", "object", "fragment", "abstract", "affiliation", "embase", "author", "serial", "nonserial", "subject", "holdings", "citation-count", "citations"), search_type = c("affiliation", "author", "scopus", "scidir", "scidir-object"), api_key = NULL, headers = NULL, content_type = c("content", "feedback"), root_http = "https://api.elsevier.com", http_end = NULL, verbose = TRUE, api_key_error = TRUE, ... ){ api_key = get_api_key(api_key, error = api_key_error) type = match.arg(type) content_type = match.arg(content_type) root_http = paste(root_http, content_type, sep = "/") search_type = switch(type, search = match.arg(search_type), embase = "article", serial = "title", nonserial = "title", entitlement = "entitlement", holdings = "report.url", "citation-count" = "citation-count", citations = "citations" ) if (type %in% c("entitlement","recommendation")) { type = "article" } if (type %in% c("citation-count", "citations")) { type = "abstract" } http = paste(type, search_type, sep = "/") if (!is.null(http_end)) { http = paste0(http, http_end) } http = gsub("/$", "", http) http = gsub("//", "/", http) http = paste(root_http, http, sep = "/") if (verbose) { parsed_url = httr::parse_url(http) parsed_url$query$APIKey = NULL parsed_url = httr::build_url(parsed_url) message(paste0("HTTP specified is:", parsed_url, "\n")) } qlist = list(...) qlist$query = query qlist$apiKey = api_key if (length(qlist) > 0) { r = GET(http, query = qlist, add_headers(headers) ) } else { r = GET(http, add_headers(headers) ) } cr = content(r) return(list(get_statement = r, content = cr)) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/subject_pronoun_type.R \name{plot.subject_pronoun_type} \alias{plot.subject_pronoun_type} \title{Plots an subject_pronoun_type Object} \usage{ \method{plot}{subject_pronoun_type}(x, type = 1, ...) } \arguments{ \item{x}{The subject_pronoun_type object.} \item{type}{An integer of \code{1}, \code{2}, \code{3}) corresponding to 1 - heat map; 2 - lexical dispersion plot; 3 - facetted bar graph.} \item{\ldots}{Other arguments passed to \code{\link[qdap]{qheat}}, \code{\link[qdap]{dispersion_plot}}, or \code{\link[ggplot2]{facet_wrap}}.} } \description{ Plots an subject_pronoun_type object. }
/man/plot.subject_pronoun_type.Rd
no_license
hoodaly/qdap
R
false
true
677
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/subject_pronoun_type.R \name{plot.subject_pronoun_type} \alias{plot.subject_pronoun_type} \title{Plots an subject_pronoun_type Object} \usage{ \method{plot}{subject_pronoun_type}(x, type = 1, ...) } \arguments{ \item{x}{The subject_pronoun_type object.} \item{type}{An integer of \code{1}, \code{2}, \code{3}) corresponding to 1 - heat map; 2 - lexical dispersion plot; 3 - facetted bar graph.} \item{\ldots}{Other arguments passed to \code{\link[qdap]{qheat}}, \code{\link[qdap]{dispersion_plot}}, or \code{\link[ggplot2]{facet_wrap}}.} } \description{ Plots an subject_pronoun_type object. }
### 讀入101年各里綜合所得稅總額資料 ### setwd('/Users/sheng/Documents/DSP/Power_Hackathon') readLines(file("origin_data/高雄市綜合所得.CSV", encoding = "UTF-8"), n = 3) income101 <- read.csv("origin_data/高雄市綜合所得.CSV", skip = 1, header = T, stringsAsFactors = F) # 將村里為"其他"與"合計"的資料給刪除 library(dplyr) income101 %>% filter(村里 != "其 他", 村里 != "合 計") -> income101 # 將資料推估至月資料,並且將101年的資料推估至102-104年資料 month_1 <- rep(c(1,3,5,7,9), each=898) month_2 <- rep(11, each=898) month <- c(paste('0', month_1, sep = ''), month_2) income101_month <- NULL for(i in 1:6){ income101_month <- bind_rows(income101_month, income101) } income101_month %>% mutate(月份 = month, 行政區域 = paste(鄉鎮市區, 村里, sep = '')) -> income101_month year <- rep(101:104, each=5388) income <- NULL for(i in 1:4){ income <- bind_rows(income, income101_month) } income %>% mutate(統計年月 = paste(year, month, sep = ''), 綜合所得IQR = 第三分位數 - 第一分位數) %>% select(統計年月, 行政區域, 納稅單位, 綜合所得總額, 平均數, 中位數, 綜合所得IQR, 第一分位數, 第三分位數, 標準差, 變異係數)-> income colnames(income) <- c(colnames(income)[1], '行政區域', '納稅單位', '綜合所得總額', '綜合所得平均數', '綜合所得中位數', '綜合所得IQR', '綜合所得Q1', '綜合所得Q3', '綜合所得標準差', '綜合所得變異係數') income %>% filter(substr(統計年月,1,3)=='104') -> income_104 # 輸出資料 write.csv(income, "origin_data/income_twomonth.csv", row.names = F) write.csv(income_104, "origin_data/104_income_twomonth.csv", row.names = F) # 里之間的合併 income_104 <- read.csv("origin_data/104_income_twomonth.csv", stringsAsFactors = F) income_104[income_104$行政區域 == "左營區自立里",2] <- "左營區明建里" income_104[income_104$行政區域 == "左營區合羣里",2] <- "左營區合群里" income_104[income_104$行政區域 == "左營區復興里",2] <- "左營區永清里" income_104[income_104$行政區域 == "左營區自治里",2] <- "左營區合群里" income_104[income_104$行政區域 == "左營區自勉里",2] <- "左營區崇實里" income_104[income_104$行政區域 == "三民區港北里",2] <- "三民區博愛里" income_104[income_104$行政區域 == "鳳山區海風里",2] <- "鳳山區海光里" income_104[income_104$行政區域 == "岡山區臺上里",2] <- "岡山區台上里" income_104[income_104$行政區域 == "岡山區爲隨里",2] <- "岡山區為隨里" income_104[income_104$行政區域 == "左營區自治里",2] <- "左營區合群里" income_104[income_104$行政區域 == "梓官區茄典里",2] <- "梓官區典寶里" income_104[income_104$行政區域 == "阿蓮區峰山里",2] <- "阿蓮區峯山里" income_104[income_104$行政區域 == "內門區內豐里",2] <- "內門區內豊里" income_104[income_104$行政區域 == "那瑪夏區達卡努瓦",2] <- "那瑪夏區達卡努瓦里" income_104 %>% filter(行政區域 != "鳳山區誠正里") %>% group_by(統計年月,行政區域) %>% summarise(納稅單位_n = sum(納稅單位),綜合所得總額_n=sum(綜合所得總額),綜合所得平均數_n=mean(綜合所得平均數),綜合所得中位數_n = mean(綜合所得中位數)) %>% write.csv("origin_data/104_income_twomonth.csv", row.names = F)
/Data_ETL_Code/匯入101各村里綜合所得稅所得總額.R
no_license
unityculture/Power_Hackathon
R
false
false
3,514
r
### 讀入101年各里綜合所得稅總額資料 ### setwd('/Users/sheng/Documents/DSP/Power_Hackathon') readLines(file("origin_data/高雄市綜合所得.CSV", encoding = "UTF-8"), n = 3) income101 <- read.csv("origin_data/高雄市綜合所得.CSV", skip = 1, header = T, stringsAsFactors = F) # 將村里為"其他"與"合計"的資料給刪除 library(dplyr) income101 %>% filter(村里 != "其 他", 村里 != "合 計") -> income101 # 將資料推估至月資料,並且將101年的資料推估至102-104年資料 month_1 <- rep(c(1,3,5,7,9), each=898) month_2 <- rep(11, each=898) month <- c(paste('0', month_1, sep = ''), month_2) income101_month <- NULL for(i in 1:6){ income101_month <- bind_rows(income101_month, income101) } income101_month %>% mutate(月份 = month, 行政區域 = paste(鄉鎮市區, 村里, sep = '')) -> income101_month year <- rep(101:104, each=5388) income <- NULL for(i in 1:4){ income <- bind_rows(income, income101_month) } income %>% mutate(統計年月 = paste(year, month, sep = ''), 綜合所得IQR = 第三分位數 - 第一分位數) %>% select(統計年月, 行政區域, 納稅單位, 綜合所得總額, 平均數, 中位數, 綜合所得IQR, 第一分位數, 第三分位數, 標準差, 變異係數)-> income colnames(income) <- c(colnames(income)[1], '行政區域', '納稅單位', '綜合所得總額', '綜合所得平均數', '綜合所得中位數', '綜合所得IQR', '綜合所得Q1', '綜合所得Q3', '綜合所得標準差', '綜合所得變異係數') income %>% filter(substr(統計年月,1,3)=='104') -> income_104 # 輸出資料 write.csv(income, "origin_data/income_twomonth.csv", row.names = F) write.csv(income_104, "origin_data/104_income_twomonth.csv", row.names = F) # 里之間的合併 income_104 <- read.csv("origin_data/104_income_twomonth.csv", stringsAsFactors = F) income_104[income_104$行政區域 == "左營區自立里",2] <- "左營區明建里" income_104[income_104$行政區域 == "左營區合羣里",2] <- "左營區合群里" income_104[income_104$行政區域 == "左營區復興里",2] <- "左營區永清里" income_104[income_104$行政區域 == "左營區自治里",2] <- "左營區合群里" income_104[income_104$行政區域 == "左營區自勉里",2] <- "左營區崇實里" income_104[income_104$行政區域 == "三民區港北里",2] <- "三民區博愛里" income_104[income_104$行政區域 == "鳳山區海風里",2] <- "鳳山區海光里" income_104[income_104$行政區域 == "岡山區臺上里",2] <- "岡山區台上里" income_104[income_104$行政區域 == "岡山區爲隨里",2] <- "岡山區為隨里" income_104[income_104$行政區域 == "左營區自治里",2] <- "左營區合群里" income_104[income_104$行政區域 == "梓官區茄典里",2] <- "梓官區典寶里" income_104[income_104$行政區域 == "阿蓮區峰山里",2] <- "阿蓮區峯山里" income_104[income_104$行政區域 == "內門區內豐里",2] <- "內門區內豊里" income_104[income_104$行政區域 == "那瑪夏區達卡努瓦",2] <- "那瑪夏區達卡努瓦里" income_104 %>% filter(行政區域 != "鳳山區誠正里") %>% group_by(統計年月,行政區域) %>% summarise(納稅單位_n = sum(納稅單位),綜合所得總額_n=sum(綜合所得總額),綜合所得平均數_n=mean(綜合所得平均數),綜合所得中位數_n = mean(綜合所得中位數)) %>% write.csv("origin_data/104_income_twomonth.csv", row.names = F)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/pulver.R \name{write_databel} \alias{write_databel} \title{Converts matrix of type "double" to a \code{databel} object file} \usage{ write_databel(data, file) } \arguments{ \item{data}{matrix of type "double"} \item{file}{file name of a \code{databel} object or text file} } \description{ Converts matrix of type "double" to a \code{databel} object file } \references{ \url{www.genabel.org/packages/DatABEL} }
/man/write_databel.Rd
no_license
smolnos/pulver
R
false
true
489
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/pulver.R \name{write_databel} \alias{write_databel} \title{Converts matrix of type "double" to a \code{databel} object file} \usage{ write_databel(data, file) } \arguments{ \item{data}{matrix of type "double"} \item{file}{file name of a \code{databel} object or text file} } \description{ Converts matrix of type "double" to a \code{databel} object file } \references{ \url{www.genabel.org/packages/DatABEL} }
library(ggplot2) train$fault_severity = as.factor(train$fault_severity) important_list = importance_matrix$Feature[1:20] for( att in important_list){ if(class(train[[att]])=="numeric" | class(train[[att]])=="integer"){ #Density histogram #att = 'log_feature_num_mean' plot <- ggplot(train, aes_string(att, fill = 'fault_severity')) + geom_histogram(alpha = 0.5, aes(y = ..density..), position = 'identity', bins = 100) + ggtitle(paste0('Histogram of attribute: ', att)) print(plot) temp = ggplot_build(plot)$data[[1]] ggsave(paste0('../figures/',att,'.jpg')) } } # #Count histogram # ggplot(train, aes_string(att, fill = 'fault_severity')) + # geom_histogram(alpha = 0.5, position = 'identity', bins = 100) + # ggtitle(paste0('Histogram of attribute: ', att))
/HomeDepot/R/Histogram_ggplot.R
no_license
nguyenhailong/Kaggle
R
false
false
812
r
library(ggplot2) train$fault_severity = as.factor(train$fault_severity) important_list = importance_matrix$Feature[1:20] for( att in important_list){ if(class(train[[att]])=="numeric" | class(train[[att]])=="integer"){ #Density histogram #att = 'log_feature_num_mean' plot <- ggplot(train, aes_string(att, fill = 'fault_severity')) + geom_histogram(alpha = 0.5, aes(y = ..density..), position = 'identity', bins = 100) + ggtitle(paste0('Histogram of attribute: ', att)) print(plot) temp = ggplot_build(plot)$data[[1]] ggsave(paste0('../figures/',att,'.jpg')) } } # #Count histogram # ggplot(train, aes_string(att, fill = 'fault_severity')) + # geom_histogram(alpha = 0.5, position = 'identity', bins = 100) + # ggtitle(paste0('Histogram of attribute: ', att))
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/assetDatabaseApi.r \name{assetDatabase$deleteSecurityEntry} \alias{assetDatabase$deleteSecurityEntry} \title{Delete a security entry owned by the asset database.} \arguments{ \item{name}{The name of the security entry. For every backslash character (\\) in the security entry name, replace with asterisk (*). As an example, use domain*username instead of domain\\username.} \item{webId}{The ID of the asset database where the security entry will be deleted.} \item{applyToChildren}{If false, the new access permissions are only applied to the associated object. If true, the access permissions of children with any parent-child reference types will change when the permissions on the primary parent change.} \item{securityItem}{The security item of the desired security entries to be deleted. If the parameter is not specified, security entries of the 'Default' security item will be deleted.} } \value{ The security entry was deleted. } \description{ Delete a security entry owned by the asset database. }
/man/assetDatabase-cash-deleteSecurityEntry.Rd
permissive
eddyrene/PI-Web-API-Client-R
R
false
true
1,134
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/assetDatabaseApi.r \name{assetDatabase$deleteSecurityEntry} \alias{assetDatabase$deleteSecurityEntry} \title{Delete a security entry owned by the asset database.} \arguments{ \item{name}{The name of the security entry. For every backslash character (\\) in the security entry name, replace with asterisk (*). As an example, use domain*username instead of domain\\username.} \item{webId}{The ID of the asset database where the security entry will be deleted.} \item{applyToChildren}{If false, the new access permissions are only applied to the associated object. If true, the access permissions of children with any parent-child reference types will change when the permissions on the primary parent change.} \item{securityItem}{The security item of the desired security entries to be deleted. If the parameter is not specified, security entries of the 'Default' security item will be deleted.} } \value{ The security entry was deleted. } \description{ Delete a security entry owned by the asset database. }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/usePackage.r \name{usePackage} \alias{usePackage} \title{usePackage} \usage{ usePackage(package) } \arguments{ \item{package}{package to be installed} } \description{ usePackage Convenience function for installing and loading R packages from CRAN } \examples{ usePackage("tidyverse") } \author{ Jed Carlson }
/man/usePackage.Rd
no_license
carjed/smaug
R
false
true
408
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/usePackage.r \name{usePackage} \alias{usePackage} \title{usePackage} \usage{ usePackage(package) } \arguments{ \item{package}{package to be installed} } \description{ usePackage Convenience function for installing and loading R packages from CRAN } \examples{ usePackage("tidyverse") } \author{ Jed Carlson }
##Exploratory data analysis course project 1 ##Plot 1 R code that creates specifed png file setwd("C:/Users/Brandon/Downloads/exdata-data-household_power_consumption") data1 <- read.table("household_power_consumption.txt", header=T, sep=";", colClasses = "character") ##Reading in data data2 <- data1[grepl("^[12]/2/2007", data1[,1]),] ##Filter of specified dates data2 <- cbind(strptime(paste(data2[,1], data2[,2]), format="%d/%m/%Y %H:%M:%S"), data2[,-c(1,2)]) ##Modifying dates names(data2) <- c("Date/Time", "Global_active_power", "Global_reactive_power", "Voltage", "Global_intensity", "Sub_metering_1", "Sub_metering_2", "Sub_metering_3") png(filename="plot3.png", width=480, height=480) plot(data2$Date, as.numeric(data2$Sub_metering_1), pch=NA, main="", ylab = "Energy Sub Metering", xlab = "") ##creating plot but not with any data lines(data2$Date, as.numeric(data2$Sub_metering_1), col="black") lines(data2$Date, as.numeric(data2$Sub_metering_2), col="red") lines(data2$Date, as.numeric(data2$Sub_metering_3), col="blue") legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), ##creating legend lty=c(1,1,1), lwd=c(1,1,1), col=c("black", "red", "blue")) dev.off()
/plot3.R
no_license
Tru0402/ExData_Plotting1
R
false
false
1,234
r
##Exploratory data analysis course project 1 ##Plot 1 R code that creates specifed png file setwd("C:/Users/Brandon/Downloads/exdata-data-household_power_consumption") data1 <- read.table("household_power_consumption.txt", header=T, sep=";", colClasses = "character") ##Reading in data data2 <- data1[grepl("^[12]/2/2007", data1[,1]),] ##Filter of specified dates data2 <- cbind(strptime(paste(data2[,1], data2[,2]), format="%d/%m/%Y %H:%M:%S"), data2[,-c(1,2)]) ##Modifying dates names(data2) <- c("Date/Time", "Global_active_power", "Global_reactive_power", "Voltage", "Global_intensity", "Sub_metering_1", "Sub_metering_2", "Sub_metering_3") png(filename="plot3.png", width=480, height=480) plot(data2$Date, as.numeric(data2$Sub_metering_1), pch=NA, main="", ylab = "Energy Sub Metering", xlab = "") ##creating plot but not with any data lines(data2$Date, as.numeric(data2$Sub_metering_1), col="black") lines(data2$Date, as.numeric(data2$Sub_metering_2), col="red") lines(data2$Date, as.numeric(data2$Sub_metering_3), col="blue") legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), ##creating legend lty=c(1,1,1), lwd=c(1,1,1), col=c("black", "red", "blue")) dev.off()
bootstrap.MISS <- function(Y,X1,X2,param="multilogit",Mu,Si,Be,Ga,B=100){ # preliminaries mMu = mSi = mBe = 0 m2Mu = m2Si = m2Be = 0 if(param=="multilogit"){ mGa = 0 m2Ga = 0 }else if(param=="difflogit"){ mGa = vector("list",2) m2Ga = vector("list",2) mGa[[1]] = matrix(0,dim(Ga[[1]])) mGa[[2]] = matrix(0,dim(Ga[[2]])) m2Ga[[1]] = matrix(0,dim(Ga[[1]])) m2Ga[[2]] = matrix(0,dim(Ga[[2]])) } if(is.vector(Mu)){ r =1 k = length(Mu) }else{ r = nrow(Mu) k = ncol(Mu) } for (b in 1:B) { cat("non-parametric boostrap sample n. ",b,"\n") n <- dim(Y)[1] ind = sample(n,n,replace=T) Yb = Y[ind,,] X1b = X1[ind,] X2b = X2[ind,,] #out = est_lm_cov_latent_cont(Yb,X1b,X2b,param=param,k=k) out <- lmcovlatent.cont.MISS(Y=Yb,X1=X1b,X2=X2b,k=k,param=param,output=TRUE) mMu = mMu + out$Mu/B mSi = mSi + out$Si/B mBe = mBe + out$Be/B if(param=="multilogit"){ mGa = mGa + out$Ga/B m2Ga = m2Ga + out$Ga^2/B }else if(param=="difflogit"){ mGa[[1]] = mGa[[1]]+out$Ga[[1]]/B mGa[[2]] = mGa[[2]]+out$Ga[[2]]/B m2Ga[[1]] = m2Ga[[1]] + out$Ga[[1]]^2/B m2Ga[[2]] = m2Ga[[2]] + out$Ga[[2]]^2/B } m2Mu = m2Mu + out$Mu^2/B m2Si = m2Si + out$Si^2/B m2Be = m2Be + out$Be^2/B } seMu = sqrt(m2Mu - mMu^2) seSi = sqrt(m2Si - mSi^2) seBe = sqrt(m2Be - mBe^2) if(param=="multilogit"){ seGa = sqrt(m2Ga - mGa^2) }else if(param=="difflogit"){ seGa = vector("list",2) seGa[[1]] = sqrt(m2Ga[[1]] - mGa[[1]]^2) seGa[[2]] = sqrt(m2Ga[[2]] - mGa[[2]]^2) } out = list(mMu = mMu, mSi = mSi, mBe = mBe, mGa = mGa, seMu = seMu, seSi = seSi, seBe = seBe, seGa = seGa) }
/bootstrap.MISS.R
no_license
penful/HMContMiss
R
false
false
1,766
r
bootstrap.MISS <- function(Y,X1,X2,param="multilogit",Mu,Si,Be,Ga,B=100){ # preliminaries mMu = mSi = mBe = 0 m2Mu = m2Si = m2Be = 0 if(param=="multilogit"){ mGa = 0 m2Ga = 0 }else if(param=="difflogit"){ mGa = vector("list",2) m2Ga = vector("list",2) mGa[[1]] = matrix(0,dim(Ga[[1]])) mGa[[2]] = matrix(0,dim(Ga[[2]])) m2Ga[[1]] = matrix(0,dim(Ga[[1]])) m2Ga[[2]] = matrix(0,dim(Ga[[2]])) } if(is.vector(Mu)){ r =1 k = length(Mu) }else{ r = nrow(Mu) k = ncol(Mu) } for (b in 1:B) { cat("non-parametric boostrap sample n. ",b,"\n") n <- dim(Y)[1] ind = sample(n,n,replace=T) Yb = Y[ind,,] X1b = X1[ind,] X2b = X2[ind,,] #out = est_lm_cov_latent_cont(Yb,X1b,X2b,param=param,k=k) out <- lmcovlatent.cont.MISS(Y=Yb,X1=X1b,X2=X2b,k=k,param=param,output=TRUE) mMu = mMu + out$Mu/B mSi = mSi + out$Si/B mBe = mBe + out$Be/B if(param=="multilogit"){ mGa = mGa + out$Ga/B m2Ga = m2Ga + out$Ga^2/B }else if(param=="difflogit"){ mGa[[1]] = mGa[[1]]+out$Ga[[1]]/B mGa[[2]] = mGa[[2]]+out$Ga[[2]]/B m2Ga[[1]] = m2Ga[[1]] + out$Ga[[1]]^2/B m2Ga[[2]] = m2Ga[[2]] + out$Ga[[2]]^2/B } m2Mu = m2Mu + out$Mu^2/B m2Si = m2Si + out$Si^2/B m2Be = m2Be + out$Be^2/B } seMu = sqrt(m2Mu - mMu^2) seSi = sqrt(m2Si - mSi^2) seBe = sqrt(m2Be - mBe^2) if(param=="multilogit"){ seGa = sqrt(m2Ga - mGa^2) }else if(param=="difflogit"){ seGa = vector("list",2) seGa[[1]] = sqrt(m2Ga[[1]] - mGa[[1]]^2) seGa[[2]] = sqrt(m2Ga[[2]] - mGa[[2]]^2) } out = list(mMu = mMu, mSi = mSi, mBe = mBe, mGa = mGa, seMu = seMu, seSi = seSi, seBe = seBe, seGa = seGa) }