content
large_stringlengths
0
6.46M
path
large_stringlengths
3
331
license_type
large_stringclasses
2 values
repo_name
large_stringlengths
5
125
language
large_stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
4
6.46M
extension
large_stringclasses
75 values
text
stringlengths
0
6.46M
#' @name sprinkle_width #' @title Adjust Table Cell Width #' #' @description Customize the width of a cell in a table. This may be done #' to improve the appearance of cells with long text. #' #' @param x An object of class \code{dust} #' @param rows Either a numeric vector of rows in the tabular object to be #' modified or an object of class \code{call}. When a \code{call}, #' generated by \code{quote(expression)}, the expression resolves to #' a logical vector the same length as the number of rows in the table. #' Sprinkles are applied to where the expression resolves to \code{TRUE}. #' @param cols Either a numeric vector of columns in the tabular object to #' be modified, or a character vector of column names. A mixture of #' character and numeric indices is permissible. #' @param width \code{numeric(1)}. Gives the width of the cell. #' @param width_units \code{character(1)}. Gives the units for \code{width}. #' One of \code{c("pt", "px", "cm", "in", "\%")} #' @param part A character string denoting which part of the table to modify. #' @param fixed \code{logical(1)} indicating if the values in \code{rows} #' and \code{cols} should be read as fixed coordinate pairs. By default, #' sprinkles are applied at the intersection of \code{rows} and \code{cols}, #' meaning that the arguments do not have to share the same length. #' When \code{fixed = TRUE}, they must share the same length. #' @param recycle A \code{character} one that determines how sprinkles are #' managed when the sprinkle input doesn't match the length of the region #' to be sprinkled. By default, recycling is turned off. Recycling #' may be performed across rows first (left to right, top to bottom), #' or down columns first (top to bottom, left to right). #' @param ... Additional arguments to pass to other methods. Currently ignored. #' #' @details This sprinkle is only recognized by HTML and LaTeX. All of the #' \code{width_units} values are recognized by HTML. For LaTeX, \code{"px"} #' is converted to \code{"pt"}. #' #' @section Functional Requirements: #' \enumerate{ #' \item Correctly reassigns the appropriate elements of \code{width} #' and \code{width_units} columns in the table part. #' \item Casts an error if \code{x} is not a \code{dust} object. #' \item Casts an error if \code{width} is not \code{numeric} #' \item Casts an error if \code{width_units} is not one of #' \code{c("px", "pt", "in", "cm", "\%")}. #' \item Casts an error if \code{part} is not one of \code{"body"}, #' \code{"head"}, \code{"foot"}, or \code{"interfoot"} #' \item Casts an error if \code{fixed} is not a \code{logical(1)} #' \item Casts an error if \code{recycle} is not one of \code{"none"}, #' \code{"rows"}, or \code{"cols"} #' \item Casts an error if \code{recycle = "none"} and \code{width} does #' not have length 1. #' \item Correctly assigns values when \code{recycle} is not \code{"none"} #' and multiple values are given. #' \item Quietly accepts only the first value in \code{width_units} when #' \code{recycle = "none"}. #' } #' #' The functional behavior of the \code{fixed} and \code{recycle} arguments #' is not tested for this function. It is tested and validated in the #' tests for \code{\link{index_to_sprinkle}}. #' #' @seealso \code{\link{sprinkle}}, #' \code{\link{index_to_sprinkle}} #' #' @export sprinkle_width <- function(x, rows = NULL, cols = NULL, width = NULL, width_units = NULL, part = c("body", "head", "foot", "interfoot", "table"), fixed = FALSE, recycle = c("none", "rows", "cols", "columns"), ...) { UseMethod("sprinkle_width") } #' @rdname sprinkle_width #' @export sprinkle_width.default <- function(x, rows = NULL, cols = NULL, width = NULL, width_units = NULL, part = c("body", "head", "foot", "interfoot", "table"), fixed = FALSE, recycle = c("none", "rows", "cols", "columns"), ...) { coll <- checkmate::makeAssertCollection() indices <- index_to_sprinkle(x = x, rows = rows, cols = cols, fixed = fixed, part = part, recycle = recycle, coll = coll) recycle <- recycle[1] width_units <- sprinkle_width_index_assert(width = width, width_units = width_units, recycle = recycle, coll = coll) checkmate::reportAssertions(coll) part <- part[1] sprinkle_width_index(x = x, indices = indices, width = width, width_units = width_units, part = part) } #' @rdname sprinkle_width #' @export sprinkle_width.dust_list <- function(x, rows = NULL, cols = NULL, width = NULL, width_units = NULL, part = c("body", "head", "foot", "interfoot", "table"), fixed = FALSE, recycle = c("none", "rows", "cols", "columns"), ...) { structure( lapply(X = x, FUN = sprinkle_width.default, rows = rows, cols = cols, width = width, width_units = width_units, part = part, fixed = fixed, recycle = recycle, ...), class = "dust_list" ) } # Unexported Utility ------------------------------------------------ # These functions are to be used inside of the general `sprinkle` call # When used inside `sprinkle`, the indices are already determined, # the only the `width` and `width_units` arguments needs to be validated. # The assert function is kept separate so it may be called earlier # without attempting to perform the assignment. sprinkle_width_index_assert <- function(width = NULL, width_units = NULL, recycle = "none", coll) { if (!is.null(width)) { checkmate::assert_numeric(x = width, add = coll, .var.name = "width") if (recycle == "none" && length(width) != 1) coll$push(paste0("When `recycle = none`, `width` must have length 1")) } if (!is.null(width_units)) { if (recycle == "none") { width_units <- width_units[1] } checkmate::assert_subset(x = width_units, choices = c("px", "pt", "in", "cm", "%"), add = coll, .var.name = "width_units") } width_units } sprinkle_width_index <- function(x, indices, width = "", width_units = "", part) { if (!is.null(width)) { x[[part]][["width"]][indices] <- width } if (!is.null(width_units)) { x[[part]][["width_units"]][indices] <- width_units } x }
/R/sprinkle_width.R
no_license
cran/pixiedust
R
false
false
7,509
r
#' @name sprinkle_width #' @title Adjust Table Cell Width #' #' @description Customize the width of a cell in a table. This may be done #' to improve the appearance of cells with long text. #' #' @param x An object of class \code{dust} #' @param rows Either a numeric vector of rows in the tabular object to be #' modified or an object of class \code{call}. When a \code{call}, #' generated by \code{quote(expression)}, the expression resolves to #' a logical vector the same length as the number of rows in the table. #' Sprinkles are applied to where the expression resolves to \code{TRUE}. #' @param cols Either a numeric vector of columns in the tabular object to #' be modified, or a character vector of column names. A mixture of #' character and numeric indices is permissible. #' @param width \code{numeric(1)}. Gives the width of the cell. #' @param width_units \code{character(1)}. Gives the units for \code{width}. #' One of \code{c("pt", "px", "cm", "in", "\%")} #' @param part A character string denoting which part of the table to modify. #' @param fixed \code{logical(1)} indicating if the values in \code{rows} #' and \code{cols} should be read as fixed coordinate pairs. By default, #' sprinkles are applied at the intersection of \code{rows} and \code{cols}, #' meaning that the arguments do not have to share the same length. #' When \code{fixed = TRUE}, they must share the same length. #' @param recycle A \code{character} one that determines how sprinkles are #' managed when the sprinkle input doesn't match the length of the region #' to be sprinkled. By default, recycling is turned off. Recycling #' may be performed across rows first (left to right, top to bottom), #' or down columns first (top to bottom, left to right). #' @param ... Additional arguments to pass to other methods. Currently ignored. #' #' @details This sprinkle is only recognized by HTML and LaTeX. All of the #' \code{width_units} values are recognized by HTML. For LaTeX, \code{"px"} #' is converted to \code{"pt"}. #' #' @section Functional Requirements: #' \enumerate{ #' \item Correctly reassigns the appropriate elements of \code{width} #' and \code{width_units} columns in the table part. #' \item Casts an error if \code{x} is not a \code{dust} object. #' \item Casts an error if \code{width} is not \code{numeric} #' \item Casts an error if \code{width_units} is not one of #' \code{c("px", "pt", "in", "cm", "\%")}. #' \item Casts an error if \code{part} is not one of \code{"body"}, #' \code{"head"}, \code{"foot"}, or \code{"interfoot"} #' \item Casts an error if \code{fixed} is not a \code{logical(1)} #' \item Casts an error if \code{recycle} is not one of \code{"none"}, #' \code{"rows"}, or \code{"cols"} #' \item Casts an error if \code{recycle = "none"} and \code{width} does #' not have length 1. #' \item Correctly assigns values when \code{recycle} is not \code{"none"} #' and multiple values are given. #' \item Quietly accepts only the first value in \code{width_units} when #' \code{recycle = "none"}. #' } #' #' The functional behavior of the \code{fixed} and \code{recycle} arguments #' is not tested for this function. It is tested and validated in the #' tests for \code{\link{index_to_sprinkle}}. #' #' @seealso \code{\link{sprinkle}}, #' \code{\link{index_to_sprinkle}} #' #' @export sprinkle_width <- function(x, rows = NULL, cols = NULL, width = NULL, width_units = NULL, part = c("body", "head", "foot", "interfoot", "table"), fixed = FALSE, recycle = c("none", "rows", "cols", "columns"), ...) { UseMethod("sprinkle_width") } #' @rdname sprinkle_width #' @export sprinkle_width.default <- function(x, rows = NULL, cols = NULL, width = NULL, width_units = NULL, part = c("body", "head", "foot", "interfoot", "table"), fixed = FALSE, recycle = c("none", "rows", "cols", "columns"), ...) { coll <- checkmate::makeAssertCollection() indices <- index_to_sprinkle(x = x, rows = rows, cols = cols, fixed = fixed, part = part, recycle = recycle, coll = coll) recycle <- recycle[1] width_units <- sprinkle_width_index_assert(width = width, width_units = width_units, recycle = recycle, coll = coll) checkmate::reportAssertions(coll) part <- part[1] sprinkle_width_index(x = x, indices = indices, width = width, width_units = width_units, part = part) } #' @rdname sprinkle_width #' @export sprinkle_width.dust_list <- function(x, rows = NULL, cols = NULL, width = NULL, width_units = NULL, part = c("body", "head", "foot", "interfoot", "table"), fixed = FALSE, recycle = c("none", "rows", "cols", "columns"), ...) { structure( lapply(X = x, FUN = sprinkle_width.default, rows = rows, cols = cols, width = width, width_units = width_units, part = part, fixed = fixed, recycle = recycle, ...), class = "dust_list" ) } # Unexported Utility ------------------------------------------------ # These functions are to be used inside of the general `sprinkle` call # When used inside `sprinkle`, the indices are already determined, # the only the `width` and `width_units` arguments needs to be validated. # The assert function is kept separate so it may be called earlier # without attempting to perform the assignment. sprinkle_width_index_assert <- function(width = NULL, width_units = NULL, recycle = "none", coll) { if (!is.null(width)) { checkmate::assert_numeric(x = width, add = coll, .var.name = "width") if (recycle == "none" && length(width) != 1) coll$push(paste0("When `recycle = none`, `width` must have length 1")) } if (!is.null(width_units)) { if (recycle == "none") { width_units <- width_units[1] } checkmate::assert_subset(x = width_units, choices = c("px", "pt", "in", "cm", "%"), add = coll, .var.name = "width_units") } width_units } sprinkle_width_index <- function(x, indices, width = "", width_units = "", part) { if (!is.null(width)) { x[[part]][["width"]][indices] <- width } if (!is.null(width_units)) { x[[part]][["width_units"]][indices] <- width_units } x }
#' Reorder an x or y axis within facets #' #' See \code{tidytext::\link[tidytext:reorder_within]{reorder_within}} for details. #' #' @name reorder_within #' @keywords internal #' @export #' @importFrom tidytext reorder_within #' @return A ggplot2 object for reordering elements within facets, used in combination with #' \code{upstartr::\link[upstartr:scale_x_reordered]{scale_x_reordered}} or #' \code{upstartr::\link[upstartr:scale_y_reordered]{scale_y_reordered}} NULL #' Passed after reorder_within to reorder x-axis along facets #' #' See \code{tidytext::\link[tidytext:scale_x_reordered]{scale_x_reordered}} for details. #' #' @name scale_x_reordered #' @keywords internal #' @export #' @importFrom tidytext scale_x_reordered #' @return A scale object to be consumed by ggplot2, used in combination with #' \code{upstartr::\link[upstartr:reorder_within]{reorder_within}}. NULL #' Passed after reorder_within to reorder x-axis along facets #' #' See \code{tidytext::\link[tidytext:scale_y_reordered]{scale_y_reordered}} for details. #' #' @name scale_y_reordered #' @keywords internal #' @export #' @importFrom tidytext scale_y_reordered #' @return A scale object to be consumed by ggplot2, used in combination with #' \code{upstartr::\link[upstartr:reorder_within]{reorder_within}}. NULL #' Create a continuous x-axis scale using percentages #' #' Convenience function to return a scale_x_continuous function using percentage labels. #' #' @param ... All your usual continuous x-axis scale parameters. #' #' @return A scale object to be consumed by ggplot2. #' #' @export scale_x_percent <- function(...) { ggplot2::scale_x_continuous(labels = scales::percent, ...) } #' Create a continuous y-axis scale using percentages #' #' Convenience function to return a scale_y_continuous function using percentage labels. #' #' @param ... All your usual continuous y-axis scale parameters. #' #' @return A scale object to be consumed by ggplot2. #' #' @export scale_y_percent <- function(...) { ggplot2::scale_y_continuous(labels = scales::percent, ...) }
/R/graphics.R
no_license
cran/upstartr
R
false
false
2,070
r
#' Reorder an x or y axis within facets #' #' See \code{tidytext::\link[tidytext:reorder_within]{reorder_within}} for details. #' #' @name reorder_within #' @keywords internal #' @export #' @importFrom tidytext reorder_within #' @return A ggplot2 object for reordering elements within facets, used in combination with #' \code{upstartr::\link[upstartr:scale_x_reordered]{scale_x_reordered}} or #' \code{upstartr::\link[upstartr:scale_y_reordered]{scale_y_reordered}} NULL #' Passed after reorder_within to reorder x-axis along facets #' #' See \code{tidytext::\link[tidytext:scale_x_reordered]{scale_x_reordered}} for details. #' #' @name scale_x_reordered #' @keywords internal #' @export #' @importFrom tidytext scale_x_reordered #' @return A scale object to be consumed by ggplot2, used in combination with #' \code{upstartr::\link[upstartr:reorder_within]{reorder_within}}. NULL #' Passed after reorder_within to reorder x-axis along facets #' #' See \code{tidytext::\link[tidytext:scale_y_reordered]{scale_y_reordered}} for details. #' #' @name scale_y_reordered #' @keywords internal #' @export #' @importFrom tidytext scale_y_reordered #' @return A scale object to be consumed by ggplot2, used in combination with #' \code{upstartr::\link[upstartr:reorder_within]{reorder_within}}. NULL #' Create a continuous x-axis scale using percentages #' #' Convenience function to return a scale_x_continuous function using percentage labels. #' #' @param ... All your usual continuous x-axis scale parameters. #' #' @return A scale object to be consumed by ggplot2. #' #' @export scale_x_percent <- function(...) { ggplot2::scale_x_continuous(labels = scales::percent, ...) } #' Create a continuous y-axis scale using percentages #' #' Convenience function to return a scale_y_continuous function using percentage labels. #' #' @param ... All your usual continuous y-axis scale parameters. #' #' @return A scale object to be consumed by ggplot2. #' #' @export scale_y_percent <- function(...) { ggplot2::scale_y_continuous(labels = scales::percent, ...) }
#' @aliases calc.multiple,character,list-method #' @rdname calc.multiple-method setMethod(f='calc.multiple', signature=c('character', 'list'), definition=function(type, input) { .SD <- ID <- NULL if (!type %in% c('makePartitions', 'genMatMFull', 'makeAttackerProblem', 'calcFullProblem') ) { stop("calc.multiple:: argument 'type' is not valid!\n") } if ( type == 'makePartitions' ) { return(c_make_partitions(input)) } if ( type == 'genMatMFull' ) { return(c_gen_mat_m(input)) } if ( type == 'makeAttackerProblem' ) { return(c_make_att_prob(input)) } if ( type == 'calcFullProblem' ) { return(c_calc_full_prob(input)) } } ) setMethod("c_make_partitions", signature=c("list"), definition=function(input) { pI <- input$objectA dimInfoObj <- input$objectB dimInfo <- g_dim_info(dimInfoObj) strIDs <- g_strID(pI) ## create classes and groups tmpDat <- expand.grid(lapply(1:length(dimInfo), function(x) { 1:g_nr_levels(dimInfo[[x]]) } )) groups <- apply(tmpDat, 1, function(x) { paste(x, collapse="-")}) classes <- apply(tmpDat, 1, sum) sortOrder <- order(classes) classes <- classes[sortOrder] classesUnique <- unique(classes) groups <- groups[sortOrder] splitGroups <- split(groups, classes) ## create tables for all classes and groups final <- list() final$groups <- as.list(groups) final$indices <- list() # default_codes and levels default_codes <- lapply(1:length(dimInfo), function(x) { g_default_codes(dimInfo[[x]]) }) dim_levels <- lapply(1:length(dimInfo), function(x) { g_levels(dimInfo[[x]]) }) # data.table to merge on df <- data.table(N=1:length(strIDs), strIDs=strIDs) setkey(df, strIDs) for (i in 1:length(groups)) { final$indices[[i]] <- list() levs <- as.integer(unlist(sapply(groups[[i]], strsplit, "-"))) res <- list() for (z in 1:length(dimInfo)) { res[[z]] <- list() index <- which(g_levels(dimInfo[[z]]) %in% c(levs[z], levs[z]-1)) codesDefault <- default_codes[[z]][index] if (levs[z] == 1) { res[[z]] <- codesDefault } else { levOrig <- dim_levels[[z]][index] diffs <- c(0,diff(levOrig)) checkInd <- which(diffs == 1)-1 out <- data.frame(index=index, levOrig=levOrig, codesDefault=codesDefault, ind=NA) out$ind[checkInd] <- 1 checkInd <- c(checkInd, length(index)) splitVec <- rep(0, length(index)) for ( j in 2:length(checkInd) ) { if ( j < length(checkInd) ) { splitVec[checkInd[j-1]:(checkInd[j]-1)] <- j-1 } else { splitVec[checkInd[j-1]:(checkInd[j])] <- j-1 } } spl <- split(index, splitVec) counter <- 1 for (k in 1:length(spl)) { rowInd <- match(spl[[k]], out$index) tmp <- out[rowInd,] if ( any(tmp[,"levOrig"]==levs[z]) ) { tmp <- tmp[1:(max(which(tmp$levOrig==levs[z]))),] res[[z]][[length(res[[z]])+1]] <- sort(unique(as.character(tmp$codesDefault))) } } } } final$indices[[i]] <- list() combs <- expand.grid(lapply(1:length(res), function(x) { 1:length(res[[x]]) })) final$indices[[i]] <- list(); length(final$indices[[i]]) <- nrow(combs) for (m in 1:nrow(combs)) { final.strIDs <- pasteStrVec(expand(lapply(1:ncol(combs), function(x) { res[[x]][[combs[m,x]]] })), ncol(combs)) df2 <- data.table(strIDs=final.strIDs, key="strIDs") final$indices[[i]][[m]] <- df[df2]$N } } final$nrGroups <- length(groups) final$nrTables <- sum(sapply(1:final$nrGroups, function(x) { length(final$indices[[x]]) })) return(final) }) setMethod("c_gen_mat_m", signature=c("list"), definition=function(input) { x <- input$objectA y <- input$objectB levelObj <- g_dim_info(y) strID <- g_strID(x) nrVars <- length(levelObj) nrCells <- g_nrVars(x) freqs <- g_freq(x) constraintM <- init.simpleTriplet(type='simpleTriplet', input=list(mat=matrix(0, nrow=0, ncol=nrCells))) for ( i in 1:nrVars ) { lO <- levelObj[[i]] keepList <- lapply(g_str_info(y)[-i], function(k) { seq(k[1], k[2]) }) keepList2 <- lapply(g_str_info(y)[i], function(k) { seq(k[1], k[2]) }) f1 <- f2 <- mySplitIndicesList(strID, keepList2) if ( nrVars > 1 ) { f1 <- mySplitIndicesList(strID, keepList) } dimlO <- g_dims(lO) if ( length(unique(f2)) != 1 ) { dimInd <- sapply(1:length(dimlO), function(x) { identical( sort(unique(f2)), dimlO[[x]]) } ) if ( sum(dimInd) == 0 ) { for ( j in 1:length(g_dims(lO)) ) { splitInd <- which(f2 %in% g_dims(lO)[[j]]) spl <- split(splitInd, f1[splitInd]) for ( z in 1:length(spl) ) { ind <- rep(1,length(spl[[z]])) ind[which.max(freqs[spl[[z]]])] <- -1 if ( !is.zero(sum(freqs[spl[[z]]]*ind)) ) { stop("something went wrong!\n") } constraintM <- c_add_row(constraintM, input=list(index=spl[[z]], values=ind)) } } } else { splitInd <- which(f2 %in% g_dims(lO)[[which(dimInd==TRUE)]]) ## only 1 dimension if ( nrVars > 1 ) { spl <- split(splitInd, f1[splitInd]) } else { spl <- split(splitInd, rep(1, length(splitInd))) } for ( z in 1:length(spl) ) { ind <- rep(1,length(spl[[z]])) ind[which.max(freqs[spl[[z]]])] <- -1 if ( !is.zero(sum(freqs[spl[[z]]]*ind)) ) { stop("something went wrong! (z=",z," und names(spl)[z]='",names(spl)[z],")\n") } constraintM <- c_add_row(constraintM, input=list(index=spl[[z]], value=ind)) } } } } return(constraintM) }) setMethod("c_make_att_prob", signature=c("list"), definition=function(input) { x <- input$objectA y <- input$objectB nrVars <- g_nrVars(x) A <- c_gen_mat_m(input=list(objectA=x, objectB=y)) ## calculating (logical) constraints for the master problem ## # idea: for each constraint at least 2 suppressions must # exist if one xi != 0! (http://www.eia.doe.gov/ices2/missing_papers.pdf) newCutsMaster <- init.cutList(type='empty', input=list(nrCols=nrVars)) #xx <- lapply(1:g_nr_rows(A), function(x) { # cols <- g_col_ind(g_row(A, input=list(x))) # v <- rep(0, nrVars) # v[cols] <- c(1, rep(-1, length(cols))) # s_add_complete_constraint(newCutsMaster) <<- list(init.cutList(type='singleCut', input=list(vals=v, dir="<=", rhs=0))) #}) ################################################################ nrConstraints <- g_nr_rows(A) objective <- rep(0, length=2*nrVars+nrConstraints) z1 <- init.simpleTriplet(type='simpleTripletDiag', input=list(nrRows=nrVars, negative=FALSE)) z2 <- init.simpleTriplet(type='simpleTripletDiag', input=list(nrRows=nrVars, negative=TRUE)) z <- c_bind(object=z1, input=list(z2, bindRow=FALSE)) A <- c_bind(object=z, input=list(g_transpose(A), bindRow=FALSE)) direction <- rep("==", g_nr_rows(A)) rhs <- rep(0, g_nr_rows(A)) types <- rep("C", g_nr_cols(A)) boundsLower <- list(ind=1:g_nr_cols(A), val=c(rep(0, 2*nrVars), rep(-Inf, nrConstraints))) boundsUpper <- list(ind=1:g_nr_cols(A), val=c(rep(Inf, 2*nrVars), rep(Inf, nrConstraints))) aProb <- new("linProb", objective=objective, constraints=A, direction=direction, rhs=rhs, boundsLower=boundsLower, boundsUpper=boundsUpper, types=types) return(list(aProb=aProb, newCutsMaster=newCutsMaster)) }) setMethod("c_calc_full_prob", signature=c("list"), definition=function(input) { .SD <- ID <- id <- NULL x <- input$objectA y <- input$objectB time.start <- proc.time() datO <- g_raw_data(x) dimObj <- g_dim_info(y) # we have to aggregate if we are dealing with microdata if (g_is_microdata(x)) { rawData <- datO[, lapply(.SD, sum, na.rm=TRUE), by=key(datO), .SDcols=setdiff(colnames(datO), key(datO))] } else { rawData <- copy(datO) } ind.dimvars <- g_dimvar_ind(x) ind.freq <- g_freqvar_ind(x) codes <- list(); length(codes) <- length(ind.dimvars) for (i in 1:length(codes)) { codes[[i]] <- rawData[[ind.dimvars[i]]] cDefault <- g_default_codes(dimObj[[i]]) cOriginal <- g_original_codes(dimObj[[i]]) cOriginalDups <- g_dups(dimObj[[i]]) cOriginalDupsUp <- g_dups_up(dimObj[[i]]) if (all(unique(codes[[i]]) %in% c(cOriginal, cOriginalDups))) { codes[[i]] <- c_match_default_codes(object=dimObj[[i]], input=rawData[[ind.dimvars[i]]]) if (sum(is.na(codes[[i]]))>0) { stop(paste0("NA values in default codes have been generated for variable ", shQuote(names(dimObj)[i]),".\nPlease check the definition of this hierarchy!\n")) } } else if (all(unique(codes[[i]]) %in% cDefault)) { # cat("no recoding necessary!\n") } else { stop("c_calc_full_prob:: recoding not possible. Check your inputs!\n") } } ## calculate all possible combinations within the lowest levels of dim-vars ## if any combinations are missing (missing.codes), we have to set them to 0 later strID <- as.character(pasteStrVec(unlist(codes), length(codes))) exDims <- pasteStrVec(unlist(codes), length(codes)) possDims <- sort(pasteStrVec(as.character(expand(lapply(dimObj, function(x) { g_minimal_default_codes(x) }), vector=TRUE)), length(dimObj))) missing.codes <- setdiff(possDims, exDims) ## fill the table nrIndexvars <- length(ind.dimvars) fullDims <- lapply(dimObj, g_dims) allCodes <- expand(lapply(dimObj, g_default_codes), vector=FALSE) fullTabObj <- data.table(ID=1:length(allCodes[[1]])) for (i in 1:length(allCodes)) { fullTabObj[,colnames(rawData)[ind.dimvars][i]:=allCodes[[i]]] } setkeyv(fullTabObj, colnames(rawData)[ind.dimvars]) fullTabObj[,ID:=NULL] ## revert rawData codes to default codes for (j in seq_along(ind.dimvars)) { v <- c_match_default_codes(object=dimObj[[j]], input=rawData[,get(names(dimObj)[j])]) set(rawData, NULL, names(dimObj)[j], v) } setkeyv(rawData, colnames(rawData)[ind.dimvars]) ## replace NAs in rawData by 0 (required for aggregation) cols <- colnames(rawData)[(length(dimObj)+1):ncol(rawData)] ind.na <- list(); length(ind.na) <- length(cols); k <- 1 for (j in cols) { ind.na[[k]] <- which(is.na(rawData[[j]])) set(rawData, ind.na[[k]], j, 0) k <- k+1 }; rm(k) ## merge minDat to fullDat fullTabObj <- merge(fullTabObj, rawData, all.x=TRUE) ## set missing combinations of lowest levels to 0 ## problematic are all levels that should exist, but do not exist ## they are filled with 0 so that we can aggregate dim.vars <- colnames(fullTabObj)[ind.dimvars] # performance improvement cmd <- paste0("fullTabObj[,strID:=paste0(",dim.vars[1]) if (length(dim.vars)>1) { for (i in 2:length(dim.vars)) { cmd <- paste0(cmd, ", ",dim.vars[i]) } } cmd <- paste0(cmd,")]") eval(parse(text=cmd)) strID <- fullTabObj$strID fullTabObj[,strID:=NULL] if (length(missing.codes) > 0) { index <- which(strID%in%missing.codes) for (i in 1:length(cols)) { set(fullTabObj, index, cols[i], 0) } } ## fill up missing dimensions not.finished <- TRUE # which indexvars have any hierarchy (not just the total?) # these indiecs specify the dim-variables we loop over useInds <- which(sapply(y@dimInfo, function(x) { length(x@codesOriginal)>1 })) fullTabObj[,id:=.I] cols <- (nrIndexvars+1):(ncol(fullTabObj)-1) col.names <- names(fullTabObj)[cols] while (not.finished) { for (i in useInds) { if (length(dim.vars) > 1) { setkeyv(fullTabObj, dim.vars[-i]) } else { setkeyv(fullTabObj, dim.vars[1]) } cur.dim <- dimObj[[i]]@dims for (j in length(cur.dim):1) { cur.levs <- cur.dim[[j]] cmd <- paste0("out <- fullTabObj[",dim.vars[i],"%in% cur.levs[-1],]") eval(parse(text=cmd)) if (length(dim.vars)==1) { out <- out[,lapply(.SD,sum), .SDcols=col.names] } else { out <- out[,lapply(.SD,sum), .SDcols=col.names, by=key(out)] } cmd <- paste0("row.ind <- fullTabObj[",dim.vars[i],"==cur.levs[1],id]") eval(parse(text=cmd)) for (z in col.names) { cmd <- paste0("fullTabObj[id %in% row.ind,",z,":=out[[z]]]") eval(parse(text=cmd)) } } } if (!is.na(fullTabObj[1,ind.freq,with=FALSE])) { not.finished <- FALSE } else { cat("nrMissings:",sum(is.na(fullTabObj$freq)),"\n") } } fullTabObj[,id:=NULL] nrV <- nrow(fullTabObj) f <- fullTabObj[[ind.freq]] strID <- apply(fullTabObj[,dim.vars,with=FALSE],1,paste0, collapse="") # performance improvement cmd <- paste0("fullTabObj[,strID:=paste0(",dim.vars[1]) if (length(dim.vars)>1) { for (i in 2:length(dim.vars)) { cmd <- paste0(cmd, ", ",dim.vars[i]) } } cmd <- paste0(cmd,")]") eval(parse(text=cmd)) strID <- fullTabObj$strID fullTabObj[,strID:=NULL] w <- numVarsList <- NULL w.ind <- g_weightvar_ind(x) if ( !is.null(w.ind) ) { w <- fullTabObj[[w.ind]] } n.ind <- g_numvar_ind(x) if (!is.null(n.ind)) { numVarsList <- list(); length(numVarsList) <- length(n.ind) for (n in 1:length(n.ind)) { numVarsList[[n]] <- fullTabObj[[n.ind[n]]] } } if (length(n.ind) > 0) { names(numVarsList) <- colnames(g_raw_data(x))[n.ind] } ## replace 0 in rawData by NA if they have been replaced earlier for (i in 1:length(ind.na)) { if (length(ind.na[[i]]) > 0) { set(rawData, ind.na[[i]], cols[i], NA) } } s_raw_data(x) <- list(datO) problemInstance <- new("problemInstance", strID=strID, Freq=f, w=w, numVars=numVarsList, lb=rep(0, nrV), ub=pmax(2*f, 5), LPL=rep(1, nrV), UPL=rep(1, nrV), SPL=rep(0, nrV), sdcStatus=rep("s", nrV) ) problemInstance@sdcStatus[problemInstance@Freq==0] <- "z" partition <- c_make_partitions(input=list(objectA=problemInstance, objectB=y)) sdcProblem <- new("sdcProblem", dataObj=x, dimInfo=y, problemInstance=problemInstance, partition=partition, startI=1, startJ=1, indicesDealtWith=NULL, elapsedTime=(proc.time()-time.start)[3] ) return(sdcProblem) })
/R/methods_multiple_classes.r
no_license
mattdowle/sdcTable
R
false
false
14,419
r
#' @aliases calc.multiple,character,list-method #' @rdname calc.multiple-method setMethod(f='calc.multiple', signature=c('character', 'list'), definition=function(type, input) { .SD <- ID <- NULL if (!type %in% c('makePartitions', 'genMatMFull', 'makeAttackerProblem', 'calcFullProblem') ) { stop("calc.multiple:: argument 'type' is not valid!\n") } if ( type == 'makePartitions' ) { return(c_make_partitions(input)) } if ( type == 'genMatMFull' ) { return(c_gen_mat_m(input)) } if ( type == 'makeAttackerProblem' ) { return(c_make_att_prob(input)) } if ( type == 'calcFullProblem' ) { return(c_calc_full_prob(input)) } } ) setMethod("c_make_partitions", signature=c("list"), definition=function(input) { pI <- input$objectA dimInfoObj <- input$objectB dimInfo <- g_dim_info(dimInfoObj) strIDs <- g_strID(pI) ## create classes and groups tmpDat <- expand.grid(lapply(1:length(dimInfo), function(x) { 1:g_nr_levels(dimInfo[[x]]) } )) groups <- apply(tmpDat, 1, function(x) { paste(x, collapse="-")}) classes <- apply(tmpDat, 1, sum) sortOrder <- order(classes) classes <- classes[sortOrder] classesUnique <- unique(classes) groups <- groups[sortOrder] splitGroups <- split(groups, classes) ## create tables for all classes and groups final <- list() final$groups <- as.list(groups) final$indices <- list() # default_codes and levels default_codes <- lapply(1:length(dimInfo), function(x) { g_default_codes(dimInfo[[x]]) }) dim_levels <- lapply(1:length(dimInfo), function(x) { g_levels(dimInfo[[x]]) }) # data.table to merge on df <- data.table(N=1:length(strIDs), strIDs=strIDs) setkey(df, strIDs) for (i in 1:length(groups)) { final$indices[[i]] <- list() levs <- as.integer(unlist(sapply(groups[[i]], strsplit, "-"))) res <- list() for (z in 1:length(dimInfo)) { res[[z]] <- list() index <- which(g_levels(dimInfo[[z]]) %in% c(levs[z], levs[z]-1)) codesDefault <- default_codes[[z]][index] if (levs[z] == 1) { res[[z]] <- codesDefault } else { levOrig <- dim_levels[[z]][index] diffs <- c(0,diff(levOrig)) checkInd <- which(diffs == 1)-1 out <- data.frame(index=index, levOrig=levOrig, codesDefault=codesDefault, ind=NA) out$ind[checkInd] <- 1 checkInd <- c(checkInd, length(index)) splitVec <- rep(0, length(index)) for ( j in 2:length(checkInd) ) { if ( j < length(checkInd) ) { splitVec[checkInd[j-1]:(checkInd[j]-1)] <- j-1 } else { splitVec[checkInd[j-1]:(checkInd[j])] <- j-1 } } spl <- split(index, splitVec) counter <- 1 for (k in 1:length(spl)) { rowInd <- match(spl[[k]], out$index) tmp <- out[rowInd,] if ( any(tmp[,"levOrig"]==levs[z]) ) { tmp <- tmp[1:(max(which(tmp$levOrig==levs[z]))),] res[[z]][[length(res[[z]])+1]] <- sort(unique(as.character(tmp$codesDefault))) } } } } final$indices[[i]] <- list() combs <- expand.grid(lapply(1:length(res), function(x) { 1:length(res[[x]]) })) final$indices[[i]] <- list(); length(final$indices[[i]]) <- nrow(combs) for (m in 1:nrow(combs)) { final.strIDs <- pasteStrVec(expand(lapply(1:ncol(combs), function(x) { res[[x]][[combs[m,x]]] })), ncol(combs)) df2 <- data.table(strIDs=final.strIDs, key="strIDs") final$indices[[i]][[m]] <- df[df2]$N } } final$nrGroups <- length(groups) final$nrTables <- sum(sapply(1:final$nrGroups, function(x) { length(final$indices[[x]]) })) return(final) }) setMethod("c_gen_mat_m", signature=c("list"), definition=function(input) { x <- input$objectA y <- input$objectB levelObj <- g_dim_info(y) strID <- g_strID(x) nrVars <- length(levelObj) nrCells <- g_nrVars(x) freqs <- g_freq(x) constraintM <- init.simpleTriplet(type='simpleTriplet', input=list(mat=matrix(0, nrow=0, ncol=nrCells))) for ( i in 1:nrVars ) { lO <- levelObj[[i]] keepList <- lapply(g_str_info(y)[-i], function(k) { seq(k[1], k[2]) }) keepList2 <- lapply(g_str_info(y)[i], function(k) { seq(k[1], k[2]) }) f1 <- f2 <- mySplitIndicesList(strID, keepList2) if ( nrVars > 1 ) { f1 <- mySplitIndicesList(strID, keepList) } dimlO <- g_dims(lO) if ( length(unique(f2)) != 1 ) { dimInd <- sapply(1:length(dimlO), function(x) { identical( sort(unique(f2)), dimlO[[x]]) } ) if ( sum(dimInd) == 0 ) { for ( j in 1:length(g_dims(lO)) ) { splitInd <- which(f2 %in% g_dims(lO)[[j]]) spl <- split(splitInd, f1[splitInd]) for ( z in 1:length(spl) ) { ind <- rep(1,length(spl[[z]])) ind[which.max(freqs[spl[[z]]])] <- -1 if ( !is.zero(sum(freqs[spl[[z]]]*ind)) ) { stop("something went wrong!\n") } constraintM <- c_add_row(constraintM, input=list(index=spl[[z]], values=ind)) } } } else { splitInd <- which(f2 %in% g_dims(lO)[[which(dimInd==TRUE)]]) ## only 1 dimension if ( nrVars > 1 ) { spl <- split(splitInd, f1[splitInd]) } else { spl <- split(splitInd, rep(1, length(splitInd))) } for ( z in 1:length(spl) ) { ind <- rep(1,length(spl[[z]])) ind[which.max(freqs[spl[[z]]])] <- -1 if ( !is.zero(sum(freqs[spl[[z]]]*ind)) ) { stop("something went wrong! (z=",z," und names(spl)[z]='",names(spl)[z],")\n") } constraintM <- c_add_row(constraintM, input=list(index=spl[[z]], value=ind)) } } } } return(constraintM) }) setMethod("c_make_att_prob", signature=c("list"), definition=function(input) { x <- input$objectA y <- input$objectB nrVars <- g_nrVars(x) A <- c_gen_mat_m(input=list(objectA=x, objectB=y)) ## calculating (logical) constraints for the master problem ## # idea: for each constraint at least 2 suppressions must # exist if one xi != 0! (http://www.eia.doe.gov/ices2/missing_papers.pdf) newCutsMaster <- init.cutList(type='empty', input=list(nrCols=nrVars)) #xx <- lapply(1:g_nr_rows(A), function(x) { # cols <- g_col_ind(g_row(A, input=list(x))) # v <- rep(0, nrVars) # v[cols] <- c(1, rep(-1, length(cols))) # s_add_complete_constraint(newCutsMaster) <<- list(init.cutList(type='singleCut', input=list(vals=v, dir="<=", rhs=0))) #}) ################################################################ nrConstraints <- g_nr_rows(A) objective <- rep(0, length=2*nrVars+nrConstraints) z1 <- init.simpleTriplet(type='simpleTripletDiag', input=list(nrRows=nrVars, negative=FALSE)) z2 <- init.simpleTriplet(type='simpleTripletDiag', input=list(nrRows=nrVars, negative=TRUE)) z <- c_bind(object=z1, input=list(z2, bindRow=FALSE)) A <- c_bind(object=z, input=list(g_transpose(A), bindRow=FALSE)) direction <- rep("==", g_nr_rows(A)) rhs <- rep(0, g_nr_rows(A)) types <- rep("C", g_nr_cols(A)) boundsLower <- list(ind=1:g_nr_cols(A), val=c(rep(0, 2*nrVars), rep(-Inf, nrConstraints))) boundsUpper <- list(ind=1:g_nr_cols(A), val=c(rep(Inf, 2*nrVars), rep(Inf, nrConstraints))) aProb <- new("linProb", objective=objective, constraints=A, direction=direction, rhs=rhs, boundsLower=boundsLower, boundsUpper=boundsUpper, types=types) return(list(aProb=aProb, newCutsMaster=newCutsMaster)) }) setMethod("c_calc_full_prob", signature=c("list"), definition=function(input) { .SD <- ID <- id <- NULL x <- input$objectA y <- input$objectB time.start <- proc.time() datO <- g_raw_data(x) dimObj <- g_dim_info(y) # we have to aggregate if we are dealing with microdata if (g_is_microdata(x)) { rawData <- datO[, lapply(.SD, sum, na.rm=TRUE), by=key(datO), .SDcols=setdiff(colnames(datO), key(datO))] } else { rawData <- copy(datO) } ind.dimvars <- g_dimvar_ind(x) ind.freq <- g_freqvar_ind(x) codes <- list(); length(codes) <- length(ind.dimvars) for (i in 1:length(codes)) { codes[[i]] <- rawData[[ind.dimvars[i]]] cDefault <- g_default_codes(dimObj[[i]]) cOriginal <- g_original_codes(dimObj[[i]]) cOriginalDups <- g_dups(dimObj[[i]]) cOriginalDupsUp <- g_dups_up(dimObj[[i]]) if (all(unique(codes[[i]]) %in% c(cOriginal, cOriginalDups))) { codes[[i]] <- c_match_default_codes(object=dimObj[[i]], input=rawData[[ind.dimvars[i]]]) if (sum(is.na(codes[[i]]))>0) { stop(paste0("NA values in default codes have been generated for variable ", shQuote(names(dimObj)[i]),".\nPlease check the definition of this hierarchy!\n")) } } else if (all(unique(codes[[i]]) %in% cDefault)) { # cat("no recoding necessary!\n") } else { stop("c_calc_full_prob:: recoding not possible. Check your inputs!\n") } } ## calculate all possible combinations within the lowest levels of dim-vars ## if any combinations are missing (missing.codes), we have to set them to 0 later strID <- as.character(pasteStrVec(unlist(codes), length(codes))) exDims <- pasteStrVec(unlist(codes), length(codes)) possDims <- sort(pasteStrVec(as.character(expand(lapply(dimObj, function(x) { g_minimal_default_codes(x) }), vector=TRUE)), length(dimObj))) missing.codes <- setdiff(possDims, exDims) ## fill the table nrIndexvars <- length(ind.dimvars) fullDims <- lapply(dimObj, g_dims) allCodes <- expand(lapply(dimObj, g_default_codes), vector=FALSE) fullTabObj <- data.table(ID=1:length(allCodes[[1]])) for (i in 1:length(allCodes)) { fullTabObj[,colnames(rawData)[ind.dimvars][i]:=allCodes[[i]]] } setkeyv(fullTabObj, colnames(rawData)[ind.dimvars]) fullTabObj[,ID:=NULL] ## revert rawData codes to default codes for (j in seq_along(ind.dimvars)) { v <- c_match_default_codes(object=dimObj[[j]], input=rawData[,get(names(dimObj)[j])]) set(rawData, NULL, names(dimObj)[j], v) } setkeyv(rawData, colnames(rawData)[ind.dimvars]) ## replace NAs in rawData by 0 (required for aggregation) cols <- colnames(rawData)[(length(dimObj)+1):ncol(rawData)] ind.na <- list(); length(ind.na) <- length(cols); k <- 1 for (j in cols) { ind.na[[k]] <- which(is.na(rawData[[j]])) set(rawData, ind.na[[k]], j, 0) k <- k+1 }; rm(k) ## merge minDat to fullDat fullTabObj <- merge(fullTabObj, rawData, all.x=TRUE) ## set missing combinations of lowest levels to 0 ## problematic are all levels that should exist, but do not exist ## they are filled with 0 so that we can aggregate dim.vars <- colnames(fullTabObj)[ind.dimvars] # performance improvement cmd <- paste0("fullTabObj[,strID:=paste0(",dim.vars[1]) if (length(dim.vars)>1) { for (i in 2:length(dim.vars)) { cmd <- paste0(cmd, ", ",dim.vars[i]) } } cmd <- paste0(cmd,")]") eval(parse(text=cmd)) strID <- fullTabObj$strID fullTabObj[,strID:=NULL] if (length(missing.codes) > 0) { index <- which(strID%in%missing.codes) for (i in 1:length(cols)) { set(fullTabObj, index, cols[i], 0) } } ## fill up missing dimensions not.finished <- TRUE # which indexvars have any hierarchy (not just the total?) # these indiecs specify the dim-variables we loop over useInds <- which(sapply(y@dimInfo, function(x) { length(x@codesOriginal)>1 })) fullTabObj[,id:=.I] cols <- (nrIndexvars+1):(ncol(fullTabObj)-1) col.names <- names(fullTabObj)[cols] while (not.finished) { for (i in useInds) { if (length(dim.vars) > 1) { setkeyv(fullTabObj, dim.vars[-i]) } else { setkeyv(fullTabObj, dim.vars[1]) } cur.dim <- dimObj[[i]]@dims for (j in length(cur.dim):1) { cur.levs <- cur.dim[[j]] cmd <- paste0("out <- fullTabObj[",dim.vars[i],"%in% cur.levs[-1],]") eval(parse(text=cmd)) if (length(dim.vars)==1) { out <- out[,lapply(.SD,sum), .SDcols=col.names] } else { out <- out[,lapply(.SD,sum), .SDcols=col.names, by=key(out)] } cmd <- paste0("row.ind <- fullTabObj[",dim.vars[i],"==cur.levs[1],id]") eval(parse(text=cmd)) for (z in col.names) { cmd <- paste0("fullTabObj[id %in% row.ind,",z,":=out[[z]]]") eval(parse(text=cmd)) } } } if (!is.na(fullTabObj[1,ind.freq,with=FALSE])) { not.finished <- FALSE } else { cat("nrMissings:",sum(is.na(fullTabObj$freq)),"\n") } } fullTabObj[,id:=NULL] nrV <- nrow(fullTabObj) f <- fullTabObj[[ind.freq]] strID <- apply(fullTabObj[,dim.vars,with=FALSE],1,paste0, collapse="") # performance improvement cmd <- paste0("fullTabObj[,strID:=paste0(",dim.vars[1]) if (length(dim.vars)>1) { for (i in 2:length(dim.vars)) { cmd <- paste0(cmd, ", ",dim.vars[i]) } } cmd <- paste0(cmd,")]") eval(parse(text=cmd)) strID <- fullTabObj$strID fullTabObj[,strID:=NULL] w <- numVarsList <- NULL w.ind <- g_weightvar_ind(x) if ( !is.null(w.ind) ) { w <- fullTabObj[[w.ind]] } n.ind <- g_numvar_ind(x) if (!is.null(n.ind)) { numVarsList <- list(); length(numVarsList) <- length(n.ind) for (n in 1:length(n.ind)) { numVarsList[[n]] <- fullTabObj[[n.ind[n]]] } } if (length(n.ind) > 0) { names(numVarsList) <- colnames(g_raw_data(x))[n.ind] } ## replace 0 in rawData by NA if they have been replaced earlier for (i in 1:length(ind.na)) { if (length(ind.na[[i]]) > 0) { set(rawData, ind.na[[i]], cols[i], NA) } } s_raw_data(x) <- list(datO) problemInstance <- new("problemInstance", strID=strID, Freq=f, w=w, numVars=numVarsList, lb=rep(0, nrV), ub=pmax(2*f, 5), LPL=rep(1, nrV), UPL=rep(1, nrV), SPL=rep(0, nrV), sdcStatus=rep("s", nrV) ) problemInstance@sdcStatus[problemInstance@Freq==0] <- "z" partition <- c_make_partitions(input=list(objectA=problemInstance, objectB=y)) sdcProblem <- new("sdcProblem", dataObj=x, dimInfo=y, problemInstance=problemInstance, partition=partition, startI=1, startJ=1, indicesDealtWith=NULL, elapsedTime=(proc.time()-time.start)[3] ) return(sdcProblem) })
# function for each method used in simulation func_EMLasso<-function(formula,data,dist){fit<-zipath(formula=formula,data=data,family=dist); idx<-which.min(fit$bic); coeff.final=list(count=fit$coefficients$count[,idx],zero=fit$coefficients$zero[,idx]); return(list(coefficients=coeff.final,aic=fit$aic[idx], bic=fit$bic[idx], loglik=fit$loglik[idx]))} func_grLasso<-function(data,yvar,xvars,zvars,group,dist){return(gooogle(data=data,yvar=yvar,xvars=xvars,zvars=zvars,group=group,dist=dist,penalty="grLasso"))} func_grMCP<-function(data,yvar,xvars,zvars,group,dist){return(gooogle(data=data,yvar=yvar,xvars=xvars,zvars=zvars,group=group,dist=dist,penalty="grMCP"))} func_grSCAD<-function(data,yvar,xvars,zvars,group,dist){return(gooogle(data=data,yvar=yvar,xvars=xvars,zvars=zvars,group=group,dist=dist,penalty="grSCAD"))} func_gBridge<-function(data,yvar,xvars,zvars,group,dist){return(gooogle(data=data,yvar=yvar,xvars=xvars,zvars=zvars,group=group,dist=dist,penalty="gBridge"))}
/Sim1_Cluster/Codes/penalty_func.R
no_license
sapgit/GooogleDoc
R
false
false
985
r
# function for each method used in simulation func_EMLasso<-function(formula,data,dist){fit<-zipath(formula=formula,data=data,family=dist); idx<-which.min(fit$bic); coeff.final=list(count=fit$coefficients$count[,idx],zero=fit$coefficients$zero[,idx]); return(list(coefficients=coeff.final,aic=fit$aic[idx], bic=fit$bic[idx], loglik=fit$loglik[idx]))} func_grLasso<-function(data,yvar,xvars,zvars,group,dist){return(gooogle(data=data,yvar=yvar,xvars=xvars,zvars=zvars,group=group,dist=dist,penalty="grLasso"))} func_grMCP<-function(data,yvar,xvars,zvars,group,dist){return(gooogle(data=data,yvar=yvar,xvars=xvars,zvars=zvars,group=group,dist=dist,penalty="grMCP"))} func_grSCAD<-function(data,yvar,xvars,zvars,group,dist){return(gooogle(data=data,yvar=yvar,xvars=xvars,zvars=zvars,group=group,dist=dist,penalty="grSCAD"))} func_gBridge<-function(data,yvar,xvars,zvars,group,dist){return(gooogle(data=data,yvar=yvar,xvars=xvars,zvars=zvars,group=group,dist=dist,penalty="gBridge"))}
# This is the server logic of a Shiny web application. You can run the library(shiny) # Define server logic required to draw a histogram shinyServer(function(input, output) { ## result from analysis in presentation mod<-lm(mpg~wt+qsec+am,data=mtcars) mpg_pred1<-reactive({ wtInput<-input$wt_input qsecInput<-input$qsec_input amInput<-as.numeric(input$am_input) predict(mod,newdata=data.frame(wt=wtInput,qsec=qsecInput,am=as.numeric(input$am_input))) }) output$plot1 <- renderPlot({ wtInput<-input$wt_input qsecInput<-input$qsec_input amInput<-as.numeric(input$am_input) plot(mtcars$wt,mtcars$mpg,xlab="Weight(1000 lbs)", ylab="Miles per gallon",xlim=c(1,6),ylim=c(5,40), pch=21) points(wtInput,mpg_pred1(),col="red",pch=19) }) output$mpg_pred<-renderText({ mpg_pred1() }) })
/server.R
no_license
ashw673/Developing-Data-Products-Project-3-1-00
R
false
false
966
r
# This is the server logic of a Shiny web application. You can run the library(shiny) # Define server logic required to draw a histogram shinyServer(function(input, output) { ## result from analysis in presentation mod<-lm(mpg~wt+qsec+am,data=mtcars) mpg_pred1<-reactive({ wtInput<-input$wt_input qsecInput<-input$qsec_input amInput<-as.numeric(input$am_input) predict(mod,newdata=data.frame(wt=wtInput,qsec=qsecInput,am=as.numeric(input$am_input))) }) output$plot1 <- renderPlot({ wtInput<-input$wt_input qsecInput<-input$qsec_input amInput<-as.numeric(input$am_input) plot(mtcars$wt,mtcars$mpg,xlab="Weight(1000 lbs)", ylab="Miles per gallon",xlim=c(1,6),ylim=c(5,40), pch=21) points(wtInput,mpg_pred1(),col="red",pch=19) }) output$mpg_pred<-renderText({ mpg_pred1() }) })
x <-53 print(x)
/AssessmentDropout/testing.R
no_license
thiliniperera/MOOCers
R
false
false
16
r
x <-53 print(x)
d 9 12 6 27 B 10 d 9 12 12 C 2 5 d 0 2 D 3 3 0 d 2 E 2 3 0 4 0 data1=matrix(c(0,10,2,3,2,9,0,5,3,3,12,9,0,0,0,6,12,0,0,4,27,12,2,2,0),5,5) fun_1(data1,1000,3,1) # para is the variance prior for the random position d # choose is choosing which one as the focal to let it's d value be 0 as standard # dist can be 1 or 2 # 1 is normal distribution and 2 is student t distribution # If you use t-distribution,the degree of freedom will be n-2, n is the total amount of species # Install the rstan package from stan website before running this function fun_1<-function(data1,para=1000,choose,dist=1,simulation=1000){ library(rstan) bayesiani_si<-" data{ int n; int y[n,n]; real sigma1; int focal; int dist; } parameters{ real <lower=-15,upper=15> d[n]; } transformed parameters{ real <lower=-15,upper=15> d1[n]; d1<-d; d1[focal]<-0; } model{ if (dist==2){ for (i in 1:n) {d[i]~student_t(n-2,0,sigma1);} }else{ for (i in 1:n) {d[i]~normal(0,sigma1);} } for (i in 1:(n-1)){ for(j in (i+1):n){ y[i,j]~binomial(y[i,j]+y[j,i],1/(1+exp(d1[j]-d1[i]))); } } } " n=nrow(data1) data=list(y=data1,n=n,focal=choose,dist=dist,sigma1=para) fit <- stan(model_code = bayesiani_si, model_name = "example11", data = data, iter = simulation, chains = 2, verbose = FALSE) combine<-function(string1){ n=length(string1) save='' for (i in 1:n) save=paste0(save,string1[i]) return (save) } getinverse<-function(a,b){ n=length(a) a_1<-sort(a,decreasing=TRUE,index.return=TRUE) after<-b[a_1$ix] sum1=0 for(i in 1:(n-1)){ for(j in (i+1):n){ if (after[j]>after[i]) sum1=sum1+1 } } return (sum1) } getarea<-function(a,b){ n=length(a) a=a+rnorm(n,0,0.1) b=b+rnorm(n,0,0.1) result<-rep(0,100) area<-0 for (i in 1:100){ a1=quantile(b,0.01*i) area=area+length(which(a<=a1))/n result[i]=length(which(a<=a1))/n } answer=list(area=area/100,result=result) } b=matrix(0,n,simulation) newletter=c(letters,LETTERS) for (j in 1:2){ a=fit@sim$samples[[j]] for (i in 1:n){ if (j==1) { b[i,1:(simulation/2)]=a[[n+i]][(simulation/2+1):simulation] }else if(j==2){ b[i,(simulation/2+1):simulation]=a[[n+i]][(simulation/2+1):simulation] } } } save1=NULL number=NULL times=simulation empirical<-rep(0,times) for (i in 1:times){ for(j in 1:times){ empirical[i]=empirical[i]+getinverse(b[,i],b[,j]) } } newmatrix<-matrix(0,n,n) mm<-rowMeans(b) ntime<-data1+t(data1) for(i in 1:n){ for(j in 1:n){ if(i!=j){ if(mm[i]>mm[j]){ newmatrix[i,j]=floor(1/(1+exp(mm[j]-mm[i]))*ntime[i,j])+1 }else{ newmatrix[i,j]=floor(1/(1+exp(mm[j]-mm[i]))*ntime[i,j]) } } } } data=list(y=newmatrix,n=n,focal=choose,dist=dist,sigma1=para) fit1 <- stan(model_code = bayesiani_si, model_name = "example11", data = data, iter = simulation, chains = 2, verbose = FALSE) b1=matrix(0,n,simulation) for (j in 1:2){ a=fit1@sim$samples[[j]] for (i in 1:n){ if (j==1) { b1[i,1:(simulation/2)]=a[[n+i]][(simulation/2+1):simulation] }else if(j==2){ b1[i,(simulation/2+1):simulation]=a[[n+i]][(simulation/2+1):simulation] } } } nullhypo<-rep(0,times) for (i in 1:times){ for(j in 1:times){ nullhypo[i]=nullhypo[i]+getinverse(b1[,i],b1[,j]) } } area1=getarea(nullhypo,empirical) ##do the simulation area fit2 <- stan(model_code = bayesiani_si, model_name = "example11", data = data, iter = simulation*2, chains = 2, verbose = FALSE) b2=matrix(0,n,simulation*2) for (j in 1:2){ a=fit2@sim$samples[[j]] for (i in 1:n){ if (j==1) { b2[i,1:simulation]=a[[n+i]][(simulation+1):simulation] }else if(j==2){ b2[i,(simulation+1):(2*simulation)]=a[[n+i]][(simulation+1):(2*simulation)] } } } em_area<-rep(0,5000) reference<-rep(0,2*simulation) for (i in 1:(2*times)){ for(j in 1:(2*times)){ reference[i]=reference[i]+getinverse(b2[,i],b2[,j]) } } for (i in 1:5000){ matrix1<-matrix(sample(2*times,2*times),2,times) ssss=getarea(reference[matrix1[1,]],reference[matrix1[2,]]) em_area[i]=ssss$area } area_pvalue<-length(which(em_area>area1$area))/5000 for (i in 1:simulation){ index=combine(newletter[sort(b[,i],index.return=TRUE,decreasing=TRUE)$ix]) if(any(save1==index)==FALSE){ save1=c(save1,index) number=c(number,1) }else if (any(save1==index)==TRUE){ number[which(save1==index)]=number[which(save1==index)]+1 } } result=sort(number,decreasing=TRUE,index.return=TRUE) prob=result$x[1:8]/simulation ranking=save1[result$ix[1:8]] kk=list(model=fit,ranking=ranking,prob=prob,lineartest_pvalue=area_pvalue) return (kk) }
/new/linear test.R
no_license
zihenghuang/compete
R
false
false
4,784
r
d 9 12 6 27 B 10 d 9 12 12 C 2 5 d 0 2 D 3 3 0 d 2 E 2 3 0 4 0 data1=matrix(c(0,10,2,3,2,9,0,5,3,3,12,9,0,0,0,6,12,0,0,4,27,12,2,2,0),5,5) fun_1(data1,1000,3,1) # para is the variance prior for the random position d # choose is choosing which one as the focal to let it's d value be 0 as standard # dist can be 1 or 2 # 1 is normal distribution and 2 is student t distribution # If you use t-distribution,the degree of freedom will be n-2, n is the total amount of species # Install the rstan package from stan website before running this function fun_1<-function(data1,para=1000,choose,dist=1,simulation=1000){ library(rstan) bayesiani_si<-" data{ int n; int y[n,n]; real sigma1; int focal; int dist; } parameters{ real <lower=-15,upper=15> d[n]; } transformed parameters{ real <lower=-15,upper=15> d1[n]; d1<-d; d1[focal]<-0; } model{ if (dist==2){ for (i in 1:n) {d[i]~student_t(n-2,0,sigma1);} }else{ for (i in 1:n) {d[i]~normal(0,sigma1);} } for (i in 1:(n-1)){ for(j in (i+1):n){ y[i,j]~binomial(y[i,j]+y[j,i],1/(1+exp(d1[j]-d1[i]))); } } } " n=nrow(data1) data=list(y=data1,n=n,focal=choose,dist=dist,sigma1=para) fit <- stan(model_code = bayesiani_si, model_name = "example11", data = data, iter = simulation, chains = 2, verbose = FALSE) combine<-function(string1){ n=length(string1) save='' for (i in 1:n) save=paste0(save,string1[i]) return (save) } getinverse<-function(a,b){ n=length(a) a_1<-sort(a,decreasing=TRUE,index.return=TRUE) after<-b[a_1$ix] sum1=0 for(i in 1:(n-1)){ for(j in (i+1):n){ if (after[j]>after[i]) sum1=sum1+1 } } return (sum1) } getarea<-function(a,b){ n=length(a) a=a+rnorm(n,0,0.1) b=b+rnorm(n,0,0.1) result<-rep(0,100) area<-0 for (i in 1:100){ a1=quantile(b,0.01*i) area=area+length(which(a<=a1))/n result[i]=length(which(a<=a1))/n } answer=list(area=area/100,result=result) } b=matrix(0,n,simulation) newletter=c(letters,LETTERS) for (j in 1:2){ a=fit@sim$samples[[j]] for (i in 1:n){ if (j==1) { b[i,1:(simulation/2)]=a[[n+i]][(simulation/2+1):simulation] }else if(j==2){ b[i,(simulation/2+1):simulation]=a[[n+i]][(simulation/2+1):simulation] } } } save1=NULL number=NULL times=simulation empirical<-rep(0,times) for (i in 1:times){ for(j in 1:times){ empirical[i]=empirical[i]+getinverse(b[,i],b[,j]) } } newmatrix<-matrix(0,n,n) mm<-rowMeans(b) ntime<-data1+t(data1) for(i in 1:n){ for(j in 1:n){ if(i!=j){ if(mm[i]>mm[j]){ newmatrix[i,j]=floor(1/(1+exp(mm[j]-mm[i]))*ntime[i,j])+1 }else{ newmatrix[i,j]=floor(1/(1+exp(mm[j]-mm[i]))*ntime[i,j]) } } } } data=list(y=newmatrix,n=n,focal=choose,dist=dist,sigma1=para) fit1 <- stan(model_code = bayesiani_si, model_name = "example11", data = data, iter = simulation, chains = 2, verbose = FALSE) b1=matrix(0,n,simulation) for (j in 1:2){ a=fit1@sim$samples[[j]] for (i in 1:n){ if (j==1) { b1[i,1:(simulation/2)]=a[[n+i]][(simulation/2+1):simulation] }else if(j==2){ b1[i,(simulation/2+1):simulation]=a[[n+i]][(simulation/2+1):simulation] } } } nullhypo<-rep(0,times) for (i in 1:times){ for(j in 1:times){ nullhypo[i]=nullhypo[i]+getinverse(b1[,i],b1[,j]) } } area1=getarea(nullhypo,empirical) ##do the simulation area fit2 <- stan(model_code = bayesiani_si, model_name = "example11", data = data, iter = simulation*2, chains = 2, verbose = FALSE) b2=matrix(0,n,simulation*2) for (j in 1:2){ a=fit2@sim$samples[[j]] for (i in 1:n){ if (j==1) { b2[i,1:simulation]=a[[n+i]][(simulation+1):simulation] }else if(j==2){ b2[i,(simulation+1):(2*simulation)]=a[[n+i]][(simulation+1):(2*simulation)] } } } em_area<-rep(0,5000) reference<-rep(0,2*simulation) for (i in 1:(2*times)){ for(j in 1:(2*times)){ reference[i]=reference[i]+getinverse(b2[,i],b2[,j]) } } for (i in 1:5000){ matrix1<-matrix(sample(2*times,2*times),2,times) ssss=getarea(reference[matrix1[1,]],reference[matrix1[2,]]) em_area[i]=ssss$area } area_pvalue<-length(which(em_area>area1$area))/5000 for (i in 1:simulation){ index=combine(newletter[sort(b[,i],index.return=TRUE,decreasing=TRUE)$ix]) if(any(save1==index)==FALSE){ save1=c(save1,index) number=c(number,1) }else if (any(save1==index)==TRUE){ number[which(save1==index)]=number[which(save1==index)]+1 } } result=sort(number,decreasing=TRUE,index.return=TRUE) prob=result$x[1:8]/simulation ranking=save1[result$ix[1:8]] kk=list(model=fit,ranking=ranking,prob=prob,lineartest_pvalue=area_pvalue) return (kk) }
# # This test file has been generated by kwb.test::create_test_files() # test_that("formatEventStatistics() works", { expect_error( kwb.monitoring:::formatEventStatistics() # argument "eventStatistics" is missing, with no default ) })
/tests/testthat/test-function-formatEventStatistics.R
permissive
rtmtemp/kwb.monitoring
R
false
false
264
r
# # This test file has been generated by kwb.test::create_test_files() # test_that("formatEventStatistics() works", { expect_error( kwb.monitoring:::formatEventStatistics() # argument "eventStatistics" is missing, with no default ) })
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{sample_trt_data} \alias{sample_trt_data} \title{metabolites information with subjectId and year as id} \format{A data frame with columns: \describe{ \item{subjectId}{an interger} \item{year}{ 0 or 1, an interger} \item{fishoilactive}{ 0 or 1, an interger} \item{vitdactive}{ 0 or 1, an interger} }} \source{ radomn generated } \usage{ sample_trt_data } \description{ metabolites information with subjectId and year as id } \examples{ \dontrun{ sample_trt_data } } \keyword{datasets}
/man/sample_trt_data.Rd
no_license
cautree/qa
R
false
true
594
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{sample_trt_data} \alias{sample_trt_data} \title{metabolites information with subjectId and year as id} \format{A data frame with columns: \describe{ \item{subjectId}{an interger} \item{year}{ 0 or 1, an interger} \item{fishoilactive}{ 0 or 1, an interger} \item{vitdactive}{ 0 or 1, an interger} }} \source{ radomn generated } \usage{ sample_trt_data } \description{ metabolites information with subjectId and year as id } \examples{ \dontrun{ sample_trt_data } } \keyword{datasets}
\name{predatGM} \alias{predatGM} \docType{data} \title{ Abundance data of predatory insects } \description{ In a field trial with 8 complete blocks, one genetically modified crop variety and three varieties without genetical modification (S1, S2, S3) have been cultivated. Note that S1 is genetically closely related to the GM variety, and mainly differs from GM by not containing the transformation, while S2 and S3 are conventional varieties, which are genetically not closely related to GM and S1. In each of the 24 plots, a certain taxonomic group of predatory insects has been trapped. Trapped individuals have been classified to the species level. A total of 33 different species has been observed. For each plot, the summed counts of each species over one cultivation period is given in the variables Sp1, Sp2,...Sp33. Among others, one question in research was: Does the genetic modified variety effect biodiversity of the (ecologically important, non-target) species? } \usage{data(predatGM)} \format{ A data frame with 32 observations on the following 35 variables. \describe{ \item{\code{Block}}{a numeric vector, values 1,...,8 indicate the blocks of the trial } \item{\code{Variety}}{a factor distinguishing the four varieties in the field trial, with levels \code{GM} (the genetically modified variety), \code{S1} (the near-isogenic, conventional variety), \code{S2} and \code{S3} (further conventional varieties)} \item{\code{Sp1}}{a numeric vector, observed counts of species 1} \item{\code{Sp2}}{a numeric vector, ...} \item{\code{Sp3}}{a numeric vector} \item{\code{Sp4}}{a numeric vector} \item{\code{Sp5}}{a numeric vector} \item{\code{Sp6}}{a numeric vector} \item{\code{Sp7}}{a numeric vector} \item{\code{Sp8}}{a numeric vector} \item{\code{Sp9}}{a numeric vector} \item{\code{Sp10}}{a numeric vector} \item{\code{Sp11}}{a numeric vector} \item{\code{Sp12}}{a numeric vector} \item{\code{Sp13}}{a numeric vector} \item{\code{Sp14}}{a numeric vector} \item{\code{Sp15}}{a numeric vector} \item{\code{Sp16}}{a numeric vector} \item{\code{Sp17}}{a numeric vector} \item{\code{Sp18}}{a numeric vector} \item{\code{Sp19}}{a numeric vector} \item{\code{Sp20}}{a numeric vector} \item{\code{Sp21}}{a numeric vector} \item{\code{Sp22}}{a numeric vector} \item{\code{Sp23}}{a numeric vector} \item{\code{Sp24}}{a numeric vector} \item{\code{Sp25}}{a numeric vector} \item{\code{Sp26}}{a numeric vector} \item{\code{Sp27}}{a numeric vector} \item{\code{Sp28}}{a numeric vector} \item{\code{Sp29}}{a numeric vector} \item{\code{Sp30}}{a numeric vector} \item{\code{Sp31}}{a numeric vector} \item{\code{Sp32}}{a numeric vector} \item{\code{Sp33}}{a numeric vector} } } \source{ Data set provided by Kai U. Priesnitz, Bavarian State Research Center for Agriculture, Institute for Plant Protection, Freising, Germany. } \examples{ data(predatGM) str(predatGM) # Display data as a mosaicplot # load("D:/Mueller/Biodiv/data/predatGM.rda") # Matrix of counts with appropriate names COUNTS<-as.matrix(predatGM[,3:35]) SPECNAM<-names(predatGM)[3:35] colnames(COUNTS)<-SPECNAM rownames(COUNTS)<-predatGM[,"Variety"] # Assign colors and order by decreasing total abundance COL<-grey(c(0,2,4,6,8,1,3,5,7)/8) DMO<-COUNTS[,order(colSums(COUNTS), decreasing=TRUE)] colnames(DMO)[15:33]<-"." # Mosaicplot par(mar=c(4,2,1,1)) mosaicplot(DMO, col=COL, las=2, off=15, main="", cex=1.1) mtext("A", side=3, line=-1.5, adj=0, cex=2) } \keyword{datasets}
/man/predatGM.Rd
no_license
shearer/simboot
R
false
false
3,689
rd
\name{predatGM} \alias{predatGM} \docType{data} \title{ Abundance data of predatory insects } \description{ In a field trial with 8 complete blocks, one genetically modified crop variety and three varieties without genetical modification (S1, S2, S3) have been cultivated. Note that S1 is genetically closely related to the GM variety, and mainly differs from GM by not containing the transformation, while S2 and S3 are conventional varieties, which are genetically not closely related to GM and S1. In each of the 24 plots, a certain taxonomic group of predatory insects has been trapped. Trapped individuals have been classified to the species level. A total of 33 different species has been observed. For each plot, the summed counts of each species over one cultivation period is given in the variables Sp1, Sp2,...Sp33. Among others, one question in research was: Does the genetic modified variety effect biodiversity of the (ecologically important, non-target) species? } \usage{data(predatGM)} \format{ A data frame with 32 observations on the following 35 variables. \describe{ \item{\code{Block}}{a numeric vector, values 1,...,8 indicate the blocks of the trial } \item{\code{Variety}}{a factor distinguishing the four varieties in the field trial, with levels \code{GM} (the genetically modified variety), \code{S1} (the near-isogenic, conventional variety), \code{S2} and \code{S3} (further conventional varieties)} \item{\code{Sp1}}{a numeric vector, observed counts of species 1} \item{\code{Sp2}}{a numeric vector, ...} \item{\code{Sp3}}{a numeric vector} \item{\code{Sp4}}{a numeric vector} \item{\code{Sp5}}{a numeric vector} \item{\code{Sp6}}{a numeric vector} \item{\code{Sp7}}{a numeric vector} \item{\code{Sp8}}{a numeric vector} \item{\code{Sp9}}{a numeric vector} \item{\code{Sp10}}{a numeric vector} \item{\code{Sp11}}{a numeric vector} \item{\code{Sp12}}{a numeric vector} \item{\code{Sp13}}{a numeric vector} \item{\code{Sp14}}{a numeric vector} \item{\code{Sp15}}{a numeric vector} \item{\code{Sp16}}{a numeric vector} \item{\code{Sp17}}{a numeric vector} \item{\code{Sp18}}{a numeric vector} \item{\code{Sp19}}{a numeric vector} \item{\code{Sp20}}{a numeric vector} \item{\code{Sp21}}{a numeric vector} \item{\code{Sp22}}{a numeric vector} \item{\code{Sp23}}{a numeric vector} \item{\code{Sp24}}{a numeric vector} \item{\code{Sp25}}{a numeric vector} \item{\code{Sp26}}{a numeric vector} \item{\code{Sp27}}{a numeric vector} \item{\code{Sp28}}{a numeric vector} \item{\code{Sp29}}{a numeric vector} \item{\code{Sp30}}{a numeric vector} \item{\code{Sp31}}{a numeric vector} \item{\code{Sp32}}{a numeric vector} \item{\code{Sp33}}{a numeric vector} } } \source{ Data set provided by Kai U. Priesnitz, Bavarian State Research Center for Agriculture, Institute for Plant Protection, Freising, Germany. } \examples{ data(predatGM) str(predatGM) # Display data as a mosaicplot # load("D:/Mueller/Biodiv/data/predatGM.rda") # Matrix of counts with appropriate names COUNTS<-as.matrix(predatGM[,3:35]) SPECNAM<-names(predatGM)[3:35] colnames(COUNTS)<-SPECNAM rownames(COUNTS)<-predatGM[,"Variety"] # Assign colors and order by decreasing total abundance COL<-grey(c(0,2,4,6,8,1,3,5,7)/8) DMO<-COUNTS[,order(colSums(COUNTS), decreasing=TRUE)] colnames(DMO)[15:33]<-"." # Mosaicplot par(mar=c(4,2,1,1)) mosaicplot(DMO, col=COL, las=2, off=15, main="", cex=1.1) mtext("A", side=3, line=-1.5, adj=0, cex=2) } \keyword{datasets}
source("functions_and_packages/plot_objects.R") ##standard standard <- read.csv("reports/container_assessment.csv") alpine_dat <- read.csv("reports/alpine_sizeindex.csv") #function to calculate size index, etc from nursery data--------------- #format raw dataframe alpine_dat$batch_id2 <- paste(alpine_dat$batch_id, alpine_dat$species, sep="-") ##need to replace 'mm' plots with appropriate volume alpine_dat$volume <- gsub("300mm", 15, alpine_dat$volume) alpine_dat$volume <- gsub("400mm", 35, alpine_dat$volume) alpine_dat$volume <- gsub("500mm", 65, alpine_dat$volume) alpine_dat$volume <- as.numeric(alpine_dat$volume) print("all container volumes corrected") #units and date formatting alpine_dat$height_m <- alpine_dat$height/100 alpine_dat$date <- as.Date(alpine_dat$date, format = "%d/%m/%Y", tz="AEST") print("date conversion worked") #deal with species name #check data quality #calulate indices alpine_dat$calliper300 <- with(alpine_dat, (diameter1+diameter2)/2) alpine_dat$rcd <- with(alpine_dat, (rcd1+rcd2)/2) alpine_dat$sizeindex <- with(alpine_dat, height_m * calliper300) alpine_dat$slenderness1 <- with(alpine_dat, height_m/rcd) alpine_dat$slenderness2 <- with(alpine_dat, height_m/calliper300) #log data for plotting alpine_dat$logSI <- with(alpine_dat, log10(sizeindex)) alpine_dat$logvol <- with(alpine_dat, log10(volume)) alpine_dat$logH <- with(alpine_dat, log10(height_m)) alpine_dat$logD <- with(alpine_dat, log10(calliper300)) alpine_dat$logRCD <- with(alpine_dat, log10(rcd)) alpine_dat$logslender <- with(alpine_dat, log10(slenderness2)) ### clean data saved ------------------------------------------------------------------------------------------------ alp2 <- alpine_dat[, c("nursery","date", "species","batch_id","batch_id2","volume","calliper300","rcd", "height_m", "sizeindex", "logSI", "logvol", "logH", "logD", "logRCD")] write.csv(alp2, "calculated_data/alpine_clean.csv", row.names = FALSE) #total number of observations print(paste(nrow(alpine_dat), "trees measured")) alpine_agg <- doBy::summaryBy(sizeindex+slenderness2 ~ species+batch_id, fun=mean, data=alpine_dat, keep.names = TRUE) #number of batches? length(unique(alpine_agg$batch_id)) #number of species? length(unique(alpine_agg$species)) #number of container sizes? length(unique(alpine_dat$volume)) alpinevols <- doBy::summaryBy(sizeindex ~ species+volume, fun=mean, data=alpine_dat, keep.names = TRUE) ###plotting------------------------------------------------------------------------------------------------------------- library(magicaxis) library(RColorBrewer) ##plotbits ###large color palette n <- 22 qual_col_pals = brewer.pal.info[brewer.pal.info$category == 'qual',] col_vector = unlist(mapply(brewer.pal, qual_col_pals$maxcolors, rownames(qual_col_pals))) # pie(rep(1,n), col=sample(col_vector, n)) speciesnames <- unique(alpine_dat$species) speciesnames2 <- data.frame(species = speciesnames, colorspec = col_vector[1:22]) alpine_dat2 <- merge(alpine_dat, speciesnames2) #1. size index vs volume windows(7,7) #png(filename = "images/prelimdata.png", width = 5, height = 5, units = "in", res= 600) par(mar=c(5,5,1,1),cex.axis=1, cex.lab=1.25,las=0,mgp=c(3,1,0)) plot(logSI ~ logvol, data=alpine_dat, xlab="Container volume (L)", ylab=silab, axes=FALSE, cex=1.25, bg=col_vector[alpine_dat$species],pch=21, xlim=c(.5,3.75), ylim=c(.5,3.5)) magicaxis::magaxis(side=c(1,2), unlog=c(1,2), frame.plot=FALSE) #add assessment points(log10(min_size_index[1:36])~log10(container_volume[1:36]), data=standard, bg="grey65", type='l',lwd=2) points(log10(max_size_index[1:36])~log10(container_volume[1:36]), data=standard, col="black", type='l', lwd=2) legend("topleft", c("Max. size index", "Min. size index") ,pch=c(16, 21), cex=1.25, bty='n', inset=.01) box() #2. height vs volume windows(7,7) par(mar=c(5,5,1,1),cex.axis=1, cex.lab=1.25,las=0,mgp=c(3,1,0)) plot(logH ~ logvol, data=alpine_dat, xlab="Container volume (L)", ylab="Height (m)", axes=FALSE, cex=1.25, bg=col_vector[alpine_dat$species],pch=21, xlim=c(.5,3.75), ylim=c(-.5,1)) magicaxis::magaxis(side=c(1,2), unlog=c(1,2), frame.plot=FALSE) box() #3. calliper vs volume windows(7,7) par(mar=c(5,5,1,1),cex.axis=1, cex.lab=1.25,las=0,mgp=c(3,1,0)) plot(logD ~ logvol, data=alpine_dat, xlab="Container volume (L)", ylab="Stem Calliper @ 300mm (mm)", axes=FALSE, cex=1.25, bg=col_vector[alpine_dat$species],pch=21, xlim=c(.5,3.75), ylim=c(.5,2.25)) magicaxis::magaxis(side=c(1,2), unlog=c(1,2), frame.plot=FALSE) box() #4. rcd vs volume windows(7,7) par(mar=c(5,5,1,1),cex.axis=1, cex.lab=1.25,las=0,mgp=c(3,1,0)) plot(logRCD ~ logvol, data=alpine_dat, xlab="Container volume (L)", ylab="Root Collar Diameter (mm)", axes=FALSE, cex=1.25, bg=col_vector[alpine_dat$species],pch=21, xlim=c(1,3.75), ylim=c(1,2.25)) magicaxis::magaxis(side=c(1,2), unlog=c(1,2), frame.plot=FALSE) box() #4. slenderness index windows(7,7) par(mar=c(5,5,1,1),cex.axis=1, cex.lab=1.25,las=0,mgp=c(3,1,0)) plot(logslender ~ logvol, data=alpine_dat, xlab="Container volume (L)", ylab="Slenderness Index (Height:Calliper)", axes=FALSE, cex=1.25, bg=col_vector[alpine_dat$species],pch=21, xlim=c(.5,3.75), ylim=c(-1.5, -.75)) magicaxis::magaxis(side=c(1,2), unlog=c(1,2), frame.plot=FALSE) box() #5. height vs diameter windows(7,7) par(mar=c(5,5,1,1),cex.axis=1, cex.lab=1.25,las=0,mgp=c(3,1,0)) plot(calliper300 ~ height_m, data=alpine_dat, xlab="Height (m)", ylab="Diameter (mm)",xlim=c(0,8), ylim=c(0,175), cex=1.25, col=col_vector[alpine_dat$species],pch=1) points(rcd ~ height_m, data=alpine_dat, cex=1.25, bg=col_vector[alpine_dat$species],pch=21) box() legend("topleft", c("Root Collar Diameter","Calliper @ 300mm") ,pch=c(16, 1), bg="black", cex=1, bty='n', inset=.01) #6. Boxplots of size index at 25L for example vol25 <- alpine_dat2[alpine_dat2$volume==25 , ] vol25 <- droplevels(vol25) species25 <- sort(unique(vol25$species)) # vol45 <- alpine_dat2[alpine_dat2$volume==45 , ] # vol45 <- droplevels(vol45) # species45 <- sort(unique(vol45$species)) vol100 <- alpine_dat2[alpine_dat2$volume==100, ] vol100 <- droplevels(vol100) species100 <- sort(unique(vol100$species)) windows(7,7) par(mar=c(10,5,1,1),mgp=c(3,1,0)) boxplot(sizeindex ~ species, data=vol25, ylab="Size Index", las=2, xlab="", xaxt='n', outline=FALSE,col=col_vector[1:9]) axis(1, at = 1:9,labels=FALSE) text(x=1:9, y=-3, species25, srt=45, xpd=TRUE, adj=1) text(x=8.5, y=62.5, label="25L Containers", font=2) # windows(7,7) # par(mar=c(10,5,1,1),mgp=c(3,1,0)) # boxplot(sizeindex ~ species, data=vol45, ylab="Size Index", las=2, xlab="", xaxt='n', outline=FALSE, col=col_vector) # axis(1, at = 1:8,labels=FALSE) # text(x=1:8, y=8, species45, srt=45, xpd=TRUE, adj=1) # text(x=7.5, y=150, label="45L Containers", font=2) windows(7,7) par(mar=c(10,5,1,1),mgp=c(3,1,0)) boxplot(sizeindex ~ species, data=vol100, ylab="Size Index", las=2, xlab="", xaxt='n', outline=FALSE, col=col_vector[c(1,10,4,11,7,8,12,13,14,15)]) axis(1, at = 1:10,labels=FALSE) text(x=1:10, y=35, species100, srt=45, xpd=TRUE, adj=1) text(x=9.5, y=310, label="100L Containers", font=2) #7. size index vs volume (banksia vs cormbia) banksia <- alpine_dat[alpine_dat$species == "banksia_integrifolia",] corymbia <- alpine_dat[alpine_dat$species == "corymbia_maculata",] saligna <- alpine_dat[alpine_dat$species == "eucalyptus_saligna",] leglab <- c("banksia_integrifolia","corymbia_maculata" ,"eucalyptus_saligna") halfblue <- scales::alpha("blue",alpha=.5) halfred <- scales::alpha("red",alpha=.5) halfgreen <- scales::alpha("forestgreen",alpha=.5) windows(7,7) #png(filename = "images/prelimdata.png", width = 5, height = 5, units = "in", res= 600) par(mar=c(5,5,1,1),cex.axis=1, cex.lab=1.25,las=0,mgp=c(3,1,0), cex=1.25) plot(logSI ~ logvol, data=banksia, xlab="Container volume (L)", ylab=silab, axes=FALSE, cex=1.25, bg=halfred,pch=21, xlim=c(.5,3.75), ylim=c(.5,3.5)) points(logSI ~ logvol, data=saligna, bg=halfblue ,pch=21) points(logSI ~ logvol, data=corymbia, bg=halfgreen, pch=21) magicaxis::magaxis(side=c(1,2), unlog=c(1,2), frame.plot=FALSE) #add assessment polygon(x=c(minx,minx,maxx,maxx), y=c(minsimin, minsimax, maxsimax,maxsimin), lwd=2,lty=2, col=greytrans ) box() legend(topleft, leglab, pch=21, bg=c(halfred, halfblue, halfgreen))
/master_scripts/nurseries/alpine_nursery.R
no_license
CourtneyCampany/treestockassessment
R
false
false
8,365
r
source("functions_and_packages/plot_objects.R") ##standard standard <- read.csv("reports/container_assessment.csv") alpine_dat <- read.csv("reports/alpine_sizeindex.csv") #function to calculate size index, etc from nursery data--------------- #format raw dataframe alpine_dat$batch_id2 <- paste(alpine_dat$batch_id, alpine_dat$species, sep="-") ##need to replace 'mm' plots with appropriate volume alpine_dat$volume <- gsub("300mm", 15, alpine_dat$volume) alpine_dat$volume <- gsub("400mm", 35, alpine_dat$volume) alpine_dat$volume <- gsub("500mm", 65, alpine_dat$volume) alpine_dat$volume <- as.numeric(alpine_dat$volume) print("all container volumes corrected") #units and date formatting alpine_dat$height_m <- alpine_dat$height/100 alpine_dat$date <- as.Date(alpine_dat$date, format = "%d/%m/%Y", tz="AEST") print("date conversion worked") #deal with species name #check data quality #calulate indices alpine_dat$calliper300 <- with(alpine_dat, (diameter1+diameter2)/2) alpine_dat$rcd <- with(alpine_dat, (rcd1+rcd2)/2) alpine_dat$sizeindex <- with(alpine_dat, height_m * calliper300) alpine_dat$slenderness1 <- with(alpine_dat, height_m/rcd) alpine_dat$slenderness2 <- with(alpine_dat, height_m/calliper300) #log data for plotting alpine_dat$logSI <- with(alpine_dat, log10(sizeindex)) alpine_dat$logvol <- with(alpine_dat, log10(volume)) alpine_dat$logH <- with(alpine_dat, log10(height_m)) alpine_dat$logD <- with(alpine_dat, log10(calliper300)) alpine_dat$logRCD <- with(alpine_dat, log10(rcd)) alpine_dat$logslender <- with(alpine_dat, log10(slenderness2)) ### clean data saved ------------------------------------------------------------------------------------------------ alp2 <- alpine_dat[, c("nursery","date", "species","batch_id","batch_id2","volume","calliper300","rcd", "height_m", "sizeindex", "logSI", "logvol", "logH", "logD", "logRCD")] write.csv(alp2, "calculated_data/alpine_clean.csv", row.names = FALSE) #total number of observations print(paste(nrow(alpine_dat), "trees measured")) alpine_agg <- doBy::summaryBy(sizeindex+slenderness2 ~ species+batch_id, fun=mean, data=alpine_dat, keep.names = TRUE) #number of batches? length(unique(alpine_agg$batch_id)) #number of species? length(unique(alpine_agg$species)) #number of container sizes? length(unique(alpine_dat$volume)) alpinevols <- doBy::summaryBy(sizeindex ~ species+volume, fun=mean, data=alpine_dat, keep.names = TRUE) ###plotting------------------------------------------------------------------------------------------------------------- library(magicaxis) library(RColorBrewer) ##plotbits ###large color palette n <- 22 qual_col_pals = brewer.pal.info[brewer.pal.info$category == 'qual',] col_vector = unlist(mapply(brewer.pal, qual_col_pals$maxcolors, rownames(qual_col_pals))) # pie(rep(1,n), col=sample(col_vector, n)) speciesnames <- unique(alpine_dat$species) speciesnames2 <- data.frame(species = speciesnames, colorspec = col_vector[1:22]) alpine_dat2 <- merge(alpine_dat, speciesnames2) #1. size index vs volume windows(7,7) #png(filename = "images/prelimdata.png", width = 5, height = 5, units = "in", res= 600) par(mar=c(5,5,1,1),cex.axis=1, cex.lab=1.25,las=0,mgp=c(3,1,0)) plot(logSI ~ logvol, data=alpine_dat, xlab="Container volume (L)", ylab=silab, axes=FALSE, cex=1.25, bg=col_vector[alpine_dat$species],pch=21, xlim=c(.5,3.75), ylim=c(.5,3.5)) magicaxis::magaxis(side=c(1,2), unlog=c(1,2), frame.plot=FALSE) #add assessment points(log10(min_size_index[1:36])~log10(container_volume[1:36]), data=standard, bg="grey65", type='l',lwd=2) points(log10(max_size_index[1:36])~log10(container_volume[1:36]), data=standard, col="black", type='l', lwd=2) legend("topleft", c("Max. size index", "Min. size index") ,pch=c(16, 21), cex=1.25, bty='n', inset=.01) box() #2. height vs volume windows(7,7) par(mar=c(5,5,1,1),cex.axis=1, cex.lab=1.25,las=0,mgp=c(3,1,0)) plot(logH ~ logvol, data=alpine_dat, xlab="Container volume (L)", ylab="Height (m)", axes=FALSE, cex=1.25, bg=col_vector[alpine_dat$species],pch=21, xlim=c(.5,3.75), ylim=c(-.5,1)) magicaxis::magaxis(side=c(1,2), unlog=c(1,2), frame.plot=FALSE) box() #3. calliper vs volume windows(7,7) par(mar=c(5,5,1,1),cex.axis=1, cex.lab=1.25,las=0,mgp=c(3,1,0)) plot(logD ~ logvol, data=alpine_dat, xlab="Container volume (L)", ylab="Stem Calliper @ 300mm (mm)", axes=FALSE, cex=1.25, bg=col_vector[alpine_dat$species],pch=21, xlim=c(.5,3.75), ylim=c(.5,2.25)) magicaxis::magaxis(side=c(1,2), unlog=c(1,2), frame.plot=FALSE) box() #4. rcd vs volume windows(7,7) par(mar=c(5,5,1,1),cex.axis=1, cex.lab=1.25,las=0,mgp=c(3,1,0)) plot(logRCD ~ logvol, data=alpine_dat, xlab="Container volume (L)", ylab="Root Collar Diameter (mm)", axes=FALSE, cex=1.25, bg=col_vector[alpine_dat$species],pch=21, xlim=c(1,3.75), ylim=c(1,2.25)) magicaxis::magaxis(side=c(1,2), unlog=c(1,2), frame.plot=FALSE) box() #4. slenderness index windows(7,7) par(mar=c(5,5,1,1),cex.axis=1, cex.lab=1.25,las=0,mgp=c(3,1,0)) plot(logslender ~ logvol, data=alpine_dat, xlab="Container volume (L)", ylab="Slenderness Index (Height:Calliper)", axes=FALSE, cex=1.25, bg=col_vector[alpine_dat$species],pch=21, xlim=c(.5,3.75), ylim=c(-1.5, -.75)) magicaxis::magaxis(side=c(1,2), unlog=c(1,2), frame.plot=FALSE) box() #5. height vs diameter windows(7,7) par(mar=c(5,5,1,1),cex.axis=1, cex.lab=1.25,las=0,mgp=c(3,1,0)) plot(calliper300 ~ height_m, data=alpine_dat, xlab="Height (m)", ylab="Diameter (mm)",xlim=c(0,8), ylim=c(0,175), cex=1.25, col=col_vector[alpine_dat$species],pch=1) points(rcd ~ height_m, data=alpine_dat, cex=1.25, bg=col_vector[alpine_dat$species],pch=21) box() legend("topleft", c("Root Collar Diameter","Calliper @ 300mm") ,pch=c(16, 1), bg="black", cex=1, bty='n', inset=.01) #6. Boxplots of size index at 25L for example vol25 <- alpine_dat2[alpine_dat2$volume==25 , ] vol25 <- droplevels(vol25) species25 <- sort(unique(vol25$species)) # vol45 <- alpine_dat2[alpine_dat2$volume==45 , ] # vol45 <- droplevels(vol45) # species45 <- sort(unique(vol45$species)) vol100 <- alpine_dat2[alpine_dat2$volume==100, ] vol100 <- droplevels(vol100) species100 <- sort(unique(vol100$species)) windows(7,7) par(mar=c(10,5,1,1),mgp=c(3,1,0)) boxplot(sizeindex ~ species, data=vol25, ylab="Size Index", las=2, xlab="", xaxt='n', outline=FALSE,col=col_vector[1:9]) axis(1, at = 1:9,labels=FALSE) text(x=1:9, y=-3, species25, srt=45, xpd=TRUE, adj=1) text(x=8.5, y=62.5, label="25L Containers", font=2) # windows(7,7) # par(mar=c(10,5,1,1),mgp=c(3,1,0)) # boxplot(sizeindex ~ species, data=vol45, ylab="Size Index", las=2, xlab="", xaxt='n', outline=FALSE, col=col_vector) # axis(1, at = 1:8,labels=FALSE) # text(x=1:8, y=8, species45, srt=45, xpd=TRUE, adj=1) # text(x=7.5, y=150, label="45L Containers", font=2) windows(7,7) par(mar=c(10,5,1,1),mgp=c(3,1,0)) boxplot(sizeindex ~ species, data=vol100, ylab="Size Index", las=2, xlab="", xaxt='n', outline=FALSE, col=col_vector[c(1,10,4,11,7,8,12,13,14,15)]) axis(1, at = 1:10,labels=FALSE) text(x=1:10, y=35, species100, srt=45, xpd=TRUE, adj=1) text(x=9.5, y=310, label="100L Containers", font=2) #7. size index vs volume (banksia vs cormbia) banksia <- alpine_dat[alpine_dat$species == "banksia_integrifolia",] corymbia <- alpine_dat[alpine_dat$species == "corymbia_maculata",] saligna <- alpine_dat[alpine_dat$species == "eucalyptus_saligna",] leglab <- c("banksia_integrifolia","corymbia_maculata" ,"eucalyptus_saligna") halfblue <- scales::alpha("blue",alpha=.5) halfred <- scales::alpha("red",alpha=.5) halfgreen <- scales::alpha("forestgreen",alpha=.5) windows(7,7) #png(filename = "images/prelimdata.png", width = 5, height = 5, units = "in", res= 600) par(mar=c(5,5,1,1),cex.axis=1, cex.lab=1.25,las=0,mgp=c(3,1,0), cex=1.25) plot(logSI ~ logvol, data=banksia, xlab="Container volume (L)", ylab=silab, axes=FALSE, cex=1.25, bg=halfred,pch=21, xlim=c(.5,3.75), ylim=c(.5,3.5)) points(logSI ~ logvol, data=saligna, bg=halfblue ,pch=21) points(logSI ~ logvol, data=corymbia, bg=halfgreen, pch=21) magicaxis::magaxis(side=c(1,2), unlog=c(1,2), frame.plot=FALSE) #add assessment polygon(x=c(minx,minx,maxx,maxx), y=c(minsimin, minsimax, maxsimax,maxsimin), lwd=2,lty=2, col=greytrans ) box() legend(topleft, leglab, pch=21, bg=c(halfred, halfblue, halfgreen))
library(mlr) library(caret) library(ggplot2) library(rJava) library(RWeka) library("FSelector") library(FSelectorRcpp) library(Biocomb) library(mlr) library(rpart) library(rpart.plot) library(kernlab) library(glmnet) library(ROCR) library(mlbench) Correlation <- function(){ set.seed(123) str(training$gender) training$gender <- ifelse(training$gender == "1",1,2) testing$gender <- ifelse(testing$gender == "1",1,2) # Correlation Based Feature Selection: results <- select.cfs(training) features <- results$Biomarker train <- training[, features] train$gender <- factor(training$gender) test <- testing[,features] test$gender <- factor(testing$gender) control <- trainControl(method = "repeatedcv", number = 10, repeats = 3) model <- caret::train(gender~., data=train, method="gbm", preProcess="scale", trControl=control, metric = "Kappa") tptrain <- predict(model, newdata = train) tpmodel <- predict(model, newdata = test) confusionMatrix(data = tpmodel, reference = test$gender) predictions_train <- predict(model, newdata = train) predictions_train <- ifelse(predictions_train == "1",1,2) predictions_train predictions_test <- predict(model, newdata = test) predictions_test <- ifelse(predictions_test == "1",1,2) predictions_test testing$gender # Performance cm <- confusionMatrix(as.factor(predictions_test), reference = as.factor(testing$gender)) # Accuracy: print(paste0("Testing Accuracy: " , cm$overall["Accuracy"])) # Auc : pred_test <- prediction(predictions_test, testing$gender) auc_test <- signif(attr(ROCR::performance(pred_test, "auc"), "y.values")[[1]], digits = 3) print(paste0("AUC for Testing: ",auc_test )) # Kappa: print(paste0("Kappa value for Testing: ", cm$overall["Kappa"] )) } ChiSquare <-function(){ set.seed(123) model <- makeLearner("classif.gbm", predict.type = "prob") lrn = makeFilterWrapper(learner = model, fw.method = "FSelector_chi.squared") ps = makeParamSet(makeNumericParam("fw.perc", lower = 0, upper = 1)) rdesc = makeResampleDesc("CV", iters = 10) res = tuneParams(lrn, task = train_task, resampling = rdesc, par.set = ps, measures = kappa, control = makeTuneControlGrid()) t.tree <- makeFilterWrapper(learner = model, fw.method = "FSelector_chi.squared", fw.perc = res$x$fw.perc, C = res$x$C, sigma = res$x$sigma) #train the model t.rpart <- mlr::train(t.tree, train_task) getLearnerModel(t.rpart) t.rpart length(getFilteredFeatures(t.rpart)) #make predictions tptrain <- predict(t.rpart, train_task) tpmodel <- predict(t.rpart, test_task) #Performance Measures: print("Confusion Matrix for Test Data: "); print(calculateConfusionMatrix(tpmodel)) print("Training Time for Train Data: ");print(mlr::performance(tptrain, measures = timetrain, model = t.rpart)) print("Accuracy, AUC for Test Data: ");print(mlr::performance(tpmodel, measures = list(acc,auc,kappa), model = t.rpart)) } InfoGain <-function(){ set.seed(123) model <- makeLearner("classif.svm", predict.type = "prob") lrn = makeFilterWrapper(learner = model, fw.method = "FSelectorRcpp_information.gain") ps = makeParamSet(makeNumericParam("fw.perc", lower = 0, upper = 1)) rdesc = makeResampleDesc("CV", iters = 10) res = tuneParams(lrn, task = train_task, resampling = rdesc, par.set = ps, measures = kappa, control = makeTuneControlGrid()) t.tree <- makeFilterWrapper(learner = model, fw.method = "FSelectorRcpp_information.gain", fw.perc = res$x$fw.perc) #train the model t.rpart <- mlr::train(t.tree, train_task) getLearnerModel(t.rpart) t.rpart length(getFilteredFeatures(t.rpart)) #make predictions tptrain <- predict(t.rpart, train_task) tpmodel <- predict(t.rpart, test_task) #Performance Measures: print("Confusion Matrix for Test Data: "); print(calculateConfusionMatrix(tpmodel)) print("Training Time for Train Data: ");print(mlr::performance(tptrain, measures = timetrain, model = t.rpart)) print("Accuracy, AUC for Test Data: ");print(mlr::performance(tpmodel, measures = list(acc,auc,kappa), model = t.rpart)) } SequentialForward <-function(){ set.seed(123) model <- makeLearner("classif.svm", predict.type = "prob") rdesc = makeResampleDesc("CV", iters = 10) lrn = makeFeatSelWrapper(model, resampling = rdesc,measures = acc, control = makeFeatSelControlSequential(method = "sfs", alpha = 0.02), show.info = TRUE) #train the model t.rpart <- mlr::train(lrn, train_task) sfeats <- getFeatSelResult(t.rpart) t.rpart length(sfeats$x) #make predictions tptrain <- predict(t.rpart, train_task) tpmodel <- predict(t.rpart, test_task) #Performance Measures: print("Confusion Matrix for Test Data: "); print(calculateConfusionMatrix(tpmodel)) print("Training Time for Train Data: ");print(mlr::performance(tptrain, measures = timetrain, model = t.rpart)) print("Accuracy, AUC for Test Data: ");print(mlr::performance(tpmodel, measures = list(acc,auc,kappa), model = t.rpart)) } SequentialBackward <-function(){ set.seed(123) model <- makeLearner("classif.svm", predict.type = "prob") rdesc = makeResampleDesc("CV", iters = 10) lrn = makeFeatSelWrapper(model, resampling = rdesc, control = makeFeatSelControlSequential(method = "sbs", alpha = 0.02), show.info = TRUE) #train the model t.rpart <- mlr::train(lrn, train_task) sfeats <- getFeatSelResult(t.rpart) t.rpart length(sfeats$x) #make predictions tptrain <- predict(t.rpart, train_task) tpmodel <- predict(t.rpart, test_task) #Performance Measures: print("Confusion Matrix for Test Data: "); print(calculateConfusionMatrix(tpmodel)) print("Training Time for Train Data: ");print(mlr::performance(tptrain, measures = timetrain, model = t.rpart)) print("Accuracy, AUC for Test Data: ");print(mlr::performance(tpmodel, measures = list(acc,auc,kappa), model = t.rpart)) } Genetic <-function(){ set.seed(123) model <- makeLearner("classif.svm", predict.type = "prob") rdesc = makeResampleDesc("CV", iters = 10) lrn = makeFeatSelWrapper(model, resampling = rdesc, control = makeFeatSelControlGA(maxit = 10), show.info = TRUE, measures = kappa) #train the model t.rpart <- mlr::train(lrn, train_task) sfeats <- getFeatSelResult(t.rpart) t.rpart length(sfeats$x) #make predictions tptrain <- predict(t.rpart, train_task) tpmodel <- predict(t.rpart, test_task) #Performance Measures: print("Confusion Matrix for Test Data: "); print(calculateConfusionMatrix(tpmodel)) print("Training Time for Train Data: ");print(mlr::performance(tptrain, measures = timetrain, model = t.rpart)) print("Accuracy, AUC for Test Data: ");print(mlr::performance(tpmodel, measures = list(acc,auc,kappa), model = t.rpart)) } Random <-function(){ set.seed(123) model <- makeLearner("classif.svm", predict.type = "prob") rdesc = makeResampleDesc("CV", iters = 10) lrn = makeFeatSelWrapper(model, resampling = rdesc, control = makeFeatSelControlRandom(maxit = 10), show.info = TRUE, measures = kappa) #train the model t.rpart <- mlr::train(lrn, train_task) sfeats <- getFeatSelResult(t.rpart) t.rpart length(sfeats$x) #make predictions tptrain <- predict(t.rpart, train_task) tpmodel <- predict(t.rpart, test_task) #Performance Measures: print("Confusion Matrix for Test Data: "); print(calculateConfusionMatrix(tpmodel)) print("Training Time for Train Data: ");print(mlr::performance(tptrain, measures = timetrain, model = t.rpart)) print("Accuracy, AUC for Test Data: ");print(mlr::performance(tpmodel, measures = list(acc,auc,kappa), model = t.rpart)) }
/GBMGB.R
no_license
AtrayeeNeog/Cardio-Classifier-HIWi
R
false
false
8,143
r
library(mlr) library(caret) library(ggplot2) library(rJava) library(RWeka) library("FSelector") library(FSelectorRcpp) library(Biocomb) library(mlr) library(rpart) library(rpart.plot) library(kernlab) library(glmnet) library(ROCR) library(mlbench) Correlation <- function(){ set.seed(123) str(training$gender) training$gender <- ifelse(training$gender == "1",1,2) testing$gender <- ifelse(testing$gender == "1",1,2) # Correlation Based Feature Selection: results <- select.cfs(training) features <- results$Biomarker train <- training[, features] train$gender <- factor(training$gender) test <- testing[,features] test$gender <- factor(testing$gender) control <- trainControl(method = "repeatedcv", number = 10, repeats = 3) model <- caret::train(gender~., data=train, method="gbm", preProcess="scale", trControl=control, metric = "Kappa") tptrain <- predict(model, newdata = train) tpmodel <- predict(model, newdata = test) confusionMatrix(data = tpmodel, reference = test$gender) predictions_train <- predict(model, newdata = train) predictions_train <- ifelse(predictions_train == "1",1,2) predictions_train predictions_test <- predict(model, newdata = test) predictions_test <- ifelse(predictions_test == "1",1,2) predictions_test testing$gender # Performance cm <- confusionMatrix(as.factor(predictions_test), reference = as.factor(testing$gender)) # Accuracy: print(paste0("Testing Accuracy: " , cm$overall["Accuracy"])) # Auc : pred_test <- prediction(predictions_test, testing$gender) auc_test <- signif(attr(ROCR::performance(pred_test, "auc"), "y.values")[[1]], digits = 3) print(paste0("AUC for Testing: ",auc_test )) # Kappa: print(paste0("Kappa value for Testing: ", cm$overall["Kappa"] )) } ChiSquare <-function(){ set.seed(123) model <- makeLearner("classif.gbm", predict.type = "prob") lrn = makeFilterWrapper(learner = model, fw.method = "FSelector_chi.squared") ps = makeParamSet(makeNumericParam("fw.perc", lower = 0, upper = 1)) rdesc = makeResampleDesc("CV", iters = 10) res = tuneParams(lrn, task = train_task, resampling = rdesc, par.set = ps, measures = kappa, control = makeTuneControlGrid()) t.tree <- makeFilterWrapper(learner = model, fw.method = "FSelector_chi.squared", fw.perc = res$x$fw.perc, C = res$x$C, sigma = res$x$sigma) #train the model t.rpart <- mlr::train(t.tree, train_task) getLearnerModel(t.rpart) t.rpart length(getFilteredFeatures(t.rpart)) #make predictions tptrain <- predict(t.rpart, train_task) tpmodel <- predict(t.rpart, test_task) #Performance Measures: print("Confusion Matrix for Test Data: "); print(calculateConfusionMatrix(tpmodel)) print("Training Time for Train Data: ");print(mlr::performance(tptrain, measures = timetrain, model = t.rpart)) print("Accuracy, AUC for Test Data: ");print(mlr::performance(tpmodel, measures = list(acc,auc,kappa), model = t.rpart)) } InfoGain <-function(){ set.seed(123) model <- makeLearner("classif.svm", predict.type = "prob") lrn = makeFilterWrapper(learner = model, fw.method = "FSelectorRcpp_information.gain") ps = makeParamSet(makeNumericParam("fw.perc", lower = 0, upper = 1)) rdesc = makeResampleDesc("CV", iters = 10) res = tuneParams(lrn, task = train_task, resampling = rdesc, par.set = ps, measures = kappa, control = makeTuneControlGrid()) t.tree <- makeFilterWrapper(learner = model, fw.method = "FSelectorRcpp_information.gain", fw.perc = res$x$fw.perc) #train the model t.rpart <- mlr::train(t.tree, train_task) getLearnerModel(t.rpart) t.rpart length(getFilteredFeatures(t.rpart)) #make predictions tptrain <- predict(t.rpart, train_task) tpmodel <- predict(t.rpart, test_task) #Performance Measures: print("Confusion Matrix for Test Data: "); print(calculateConfusionMatrix(tpmodel)) print("Training Time for Train Data: ");print(mlr::performance(tptrain, measures = timetrain, model = t.rpart)) print("Accuracy, AUC for Test Data: ");print(mlr::performance(tpmodel, measures = list(acc,auc,kappa), model = t.rpart)) } SequentialForward <-function(){ set.seed(123) model <- makeLearner("classif.svm", predict.type = "prob") rdesc = makeResampleDesc("CV", iters = 10) lrn = makeFeatSelWrapper(model, resampling = rdesc,measures = acc, control = makeFeatSelControlSequential(method = "sfs", alpha = 0.02), show.info = TRUE) #train the model t.rpart <- mlr::train(lrn, train_task) sfeats <- getFeatSelResult(t.rpart) t.rpart length(sfeats$x) #make predictions tptrain <- predict(t.rpart, train_task) tpmodel <- predict(t.rpart, test_task) #Performance Measures: print("Confusion Matrix for Test Data: "); print(calculateConfusionMatrix(tpmodel)) print("Training Time for Train Data: ");print(mlr::performance(tptrain, measures = timetrain, model = t.rpart)) print("Accuracy, AUC for Test Data: ");print(mlr::performance(tpmodel, measures = list(acc,auc,kappa), model = t.rpart)) } SequentialBackward <-function(){ set.seed(123) model <- makeLearner("classif.svm", predict.type = "prob") rdesc = makeResampleDesc("CV", iters = 10) lrn = makeFeatSelWrapper(model, resampling = rdesc, control = makeFeatSelControlSequential(method = "sbs", alpha = 0.02), show.info = TRUE) #train the model t.rpart <- mlr::train(lrn, train_task) sfeats <- getFeatSelResult(t.rpart) t.rpart length(sfeats$x) #make predictions tptrain <- predict(t.rpart, train_task) tpmodel <- predict(t.rpart, test_task) #Performance Measures: print("Confusion Matrix for Test Data: "); print(calculateConfusionMatrix(tpmodel)) print("Training Time for Train Data: ");print(mlr::performance(tptrain, measures = timetrain, model = t.rpart)) print("Accuracy, AUC for Test Data: ");print(mlr::performance(tpmodel, measures = list(acc,auc,kappa), model = t.rpart)) } Genetic <-function(){ set.seed(123) model <- makeLearner("classif.svm", predict.type = "prob") rdesc = makeResampleDesc("CV", iters = 10) lrn = makeFeatSelWrapper(model, resampling = rdesc, control = makeFeatSelControlGA(maxit = 10), show.info = TRUE, measures = kappa) #train the model t.rpart <- mlr::train(lrn, train_task) sfeats <- getFeatSelResult(t.rpart) t.rpart length(sfeats$x) #make predictions tptrain <- predict(t.rpart, train_task) tpmodel <- predict(t.rpart, test_task) #Performance Measures: print("Confusion Matrix for Test Data: "); print(calculateConfusionMatrix(tpmodel)) print("Training Time for Train Data: ");print(mlr::performance(tptrain, measures = timetrain, model = t.rpart)) print("Accuracy, AUC for Test Data: ");print(mlr::performance(tpmodel, measures = list(acc,auc,kappa), model = t.rpart)) } Random <-function(){ set.seed(123) model <- makeLearner("classif.svm", predict.type = "prob") rdesc = makeResampleDesc("CV", iters = 10) lrn = makeFeatSelWrapper(model, resampling = rdesc, control = makeFeatSelControlRandom(maxit = 10), show.info = TRUE, measures = kappa) #train the model t.rpart <- mlr::train(lrn, train_task) sfeats <- getFeatSelResult(t.rpart) t.rpart length(sfeats$x) #make predictions tptrain <- predict(t.rpart, train_task) tpmodel <- predict(t.rpart, test_task) #Performance Measures: print("Confusion Matrix for Test Data: "); print(calculateConfusionMatrix(tpmodel)) print("Training Time for Train Data: ");print(mlr::performance(tptrain, measures = timetrain, model = t.rpart)) print("Accuracy, AUC for Test Data: ");print(mlr::performance(tpmodel, measures = list(acc,auc,kappa), model = t.rpart)) }
# plots of local eQTL locations library(qtl) tissues <- c("adipose", "gastroc", "hypo", "islet", "kidney", "liver") source("colors.R") attach("../Analysis/FinalData/aligned_geno_with_pmap.RData") attach("../Analysis/R/Rcache/dgve.RData") minloc <- sapply(pull.map(f2g), min) source("my_plot_map.R") postscript("../SuppFigs/figS14.eps", height=8.4, width=6.5, pointsize=12, onefile=FALSE, horizontal=FALSE) par(mar=c(3.0,3.3,0.6,0.6)) my.plot.map(f2g, chr=1:19, tichwidth=0.1, xlim=c(0.5, 19.9), main="", darkgray=darkgray) for(i in seq(along=tissues)) { eqtl <- matrix(as.numeric(unlist(strsplit(colnames(attr(dgve[[i]], "obsg")), "@"))), ncol=2, byrow=TRUE) eqtl <- data.frame(chr=factor(eqtl[,1], 1:19), pos=eqtl[,2]) rownames(eqtl) <- as.character(1:nrow(eqtl)) eqtl <- qtl::interpPositions(eqtl, pmap, pull.map(f2g)) eqtl[,1] <- as.numeric(as.character(eqtl[,1])) points(eqtl[,1]+0.12*i, eqtl[,3]-minloc[eqtl[,1]], bg=CCcolor[c(1:3,5,6,8)[i]], pch=21, cex=0.6) } legend("bottomright", pch=21, pt.bg=CCcolor[c(1:3,5,6,8)], tissues, bg="white") dev.off()
/R/local_eqtl_locations.R
permissive
kbroman/Paper_SampleMixups
R
false
false
1,087
r
# plots of local eQTL locations library(qtl) tissues <- c("adipose", "gastroc", "hypo", "islet", "kidney", "liver") source("colors.R") attach("../Analysis/FinalData/aligned_geno_with_pmap.RData") attach("../Analysis/R/Rcache/dgve.RData") minloc <- sapply(pull.map(f2g), min) source("my_plot_map.R") postscript("../SuppFigs/figS14.eps", height=8.4, width=6.5, pointsize=12, onefile=FALSE, horizontal=FALSE) par(mar=c(3.0,3.3,0.6,0.6)) my.plot.map(f2g, chr=1:19, tichwidth=0.1, xlim=c(0.5, 19.9), main="", darkgray=darkgray) for(i in seq(along=tissues)) { eqtl <- matrix(as.numeric(unlist(strsplit(colnames(attr(dgve[[i]], "obsg")), "@"))), ncol=2, byrow=TRUE) eqtl <- data.frame(chr=factor(eqtl[,1], 1:19), pos=eqtl[,2]) rownames(eqtl) <- as.character(1:nrow(eqtl)) eqtl <- qtl::interpPositions(eqtl, pmap, pull.map(f2g)) eqtl[,1] <- as.numeric(as.character(eqtl[,1])) points(eqtl[,1]+0.12*i, eqtl[,3]-minloc[eqtl[,1]], bg=CCcolor[c(1:3,5,6,8)[i]], pch=21, cex=0.6) } legend("bottomright", pch=21, pt.bg=CCcolor[c(1:3,5,6,8)], tissues, bg="white") dev.off()
# # sparsebnUtils-lambdas.R # sparsebnUtils # # Created by Bryon Aragam (local) on 1/22/16. # Copyright (c) 2014-2017 Bryon Aragam. All rights reserved. # # # PACKAGE SPARSEBNUTILS: Lambdas # # CONTENTS: # generate.lambdas # gen_lambdas # #' generate.lambdas #' #' Convenience function for creating a grid of lambdas. #' #' See Section 5.3 of \href{http://jmlr.org/papers/v16/aragam15a.html}{Aragam and Zhou (2015)} #' for a discussion of regularization paths (also, solution paths). #' #' @param lambda.max Maximum value of lambda; in terms of the algorithm this is the initial value #' of the regularization parameter in the solution path. #' @param lambdas.ratio Ratio between the maximum lambda value and the minimum lambda value in the solution #' path. #' @param lambdas.length Number of values to include. #' @param scale Which scale to use: Either \code{"linear"} or \code{"log"}. #' #' @export generate.lambdas <- function(lambda.max, lambdas.ratio = 1e-3, lambdas.length = 50, scale = "linear" ){ lambda.min <- lambdas.ratio * lambda.max gen_lambdas(lambda.max = lambda.max, lambda.min = lambda.min, lambdas.length = lambdas.length, scale = scale) } # END GENERATE.LAMBDAS # gen_lambdas # Internal implementation of generate.lambdas gen_lambdas <- function(lambda.max, lambda.min, lambdas.length = 50, scale = "linear" ){ # lambda.max <- max(sqrt(nn)) # lambda.min <- lambdas.ratio * lambda.max if(scale == "linear"){ lambdas <- seq(lambda.max, lambda.min, length.out = lambdas.length) } else if(scale == "log"){ lambdas.ratio <- lambda.min / lambda.max lambdas <- exp(seq(log(lambda.max), log(lambda.min), log(lambdas.ratio)/(lambdas.length-1))) } else{ stop("Invalid input for scale argument! Must be either 'log' or 'linear'.") } lambdas } # END GEN_LAMBDAS
/R/sparsebnUtils-lambdas.R
no_license
itsrainingdata/sparsebnUtils
R
false
false
2,105
r
# # sparsebnUtils-lambdas.R # sparsebnUtils # # Created by Bryon Aragam (local) on 1/22/16. # Copyright (c) 2014-2017 Bryon Aragam. All rights reserved. # # # PACKAGE SPARSEBNUTILS: Lambdas # # CONTENTS: # generate.lambdas # gen_lambdas # #' generate.lambdas #' #' Convenience function for creating a grid of lambdas. #' #' See Section 5.3 of \href{http://jmlr.org/papers/v16/aragam15a.html}{Aragam and Zhou (2015)} #' for a discussion of regularization paths (also, solution paths). #' #' @param lambda.max Maximum value of lambda; in terms of the algorithm this is the initial value #' of the regularization parameter in the solution path. #' @param lambdas.ratio Ratio between the maximum lambda value and the minimum lambda value in the solution #' path. #' @param lambdas.length Number of values to include. #' @param scale Which scale to use: Either \code{"linear"} or \code{"log"}. #' #' @export generate.lambdas <- function(lambda.max, lambdas.ratio = 1e-3, lambdas.length = 50, scale = "linear" ){ lambda.min <- lambdas.ratio * lambda.max gen_lambdas(lambda.max = lambda.max, lambda.min = lambda.min, lambdas.length = lambdas.length, scale = scale) } # END GENERATE.LAMBDAS # gen_lambdas # Internal implementation of generate.lambdas gen_lambdas <- function(lambda.max, lambda.min, lambdas.length = 50, scale = "linear" ){ # lambda.max <- max(sqrt(nn)) # lambda.min <- lambdas.ratio * lambda.max if(scale == "linear"){ lambdas <- seq(lambda.max, lambda.min, length.out = lambdas.length) } else if(scale == "log"){ lambdas.ratio <- lambda.min / lambda.max lambdas <- exp(seq(log(lambda.max), log(lambda.min), log(lambdas.ratio)/(lambdas.length-1))) } else{ stop("Invalid input for scale argument! Must be either 'log' or 'linear'.") } lambdas } # END GEN_LAMBDAS
#' This function can be used to obtain individual prediction and compare with observed data and population prediction #' for each individual separately #' #' @param labels plot texts. labels, axis, #' @param facets list facets settings nrow/ncol #' @param dname name of dataset to be used #' @param pred_line \code{list} some ipred line geom properties aesthetics #' @param ipred_line \code{list} some pred line geom properties aesthetics #' @param point \code{list} some point geom properties aesthetics #' @param is.legend \code{logical} if TRUE add a legend #' @param use.finegrid \code{logical} if FALSE use predictions data set #' @param bloq \code{pmxBLOQ} object created by \code{\link{pmx_bloq}} #' @param ... others graphics arguments passed to \code{\link{pmx_gpar}} internal object. #' #' @return individual fit object #' @family plot_pmx #' @seealso \code{\link{plot_pmx.individual}} individual <- function(labels, facets = NULL, dname = NULL, ipred_line = NULL, pred_line = NULL, point = NULL, bloq = NULL, is.legend, use.finegrid, ...) { assert_that(is_list(facets)) assert_that(is_string_or_null(dname)) assert_that(is_list(labels)) if (!use.finegrid) dname <- "predictions" structure(list( ptype = "IND", strat = TRUE, is.legend = is.legend, use.finegrid = use.finegrid, dname = dname, aess = list(x = "TIME", y1 = "PRED", y2 = "IPRED"), labels = labels, point = point, ipred_line = ipred_line, pred_line = pred_line, facets = facets, bloq = bloq, gp = pmx_gpar(labels = labels, is.legend = is.legend, ...) ), class = c("individual", "pmx_gpar")) } get_invcolor <- function(color){ if (length(color) > 1) color[2] else "red" } #' This function can be used to plot individual prediction and compare with observed data and population prediction #' for each individual separately #' @param x individual object #' @param dx data set #' @param ... not used for the moment #' #' @return a list of ggplot2 #' @export #' @import ggplot2 #' @import data.table #' @family plot_pmx #' plot_pmx.individual <- function(x, dx, ...) { ID <- NULL dx$maxValue <- 0 ## plot if (x$dname == "predictions") cat("USE predictions data set \n") strat.facet <- x[["strat.facet"]] strat.color <- x[["strat.color"]] wrap.formula <- if (!is.null(strat.facet)) { wrap_formula(strat.facet, "ID") } else { formula("~ID") } get_page <- with(x, { p_point <- if (!is.null(point)) { point$data <- if (is.null(bloq)) { input } else { input[!get(bloq$cens) %in% c(1, -1)] } point.shape <- point$shape point$shape <- NULL max_y <- aggregate(TIME ~ ID, data=dx, max) colnames(max_y) <- c("ID", "maxValue") dx <- base::merge(dx, max_y, by="ID", all.x = T) dx$isobserv <- with(dx, TIME < maxValue) point$data <- base::merge(point$data, max_y, by="ID") point$data$isobserv <- ifelse(point$data$TIME < point$data$maxValue, "accepted", "ignored") points <- copy(point) points$colour <- NULL do.call(geom_point, points) } p_bloq <- if (!is.null(bloq)) { bloq$data <- x$input[get(bloq$cens) != 0] if (length(bloq$data$ID) > 0) { ## While cens may be in the dataset, all the data in the fit may be uncensored if (bloq$limit %in% names(bloq$data)) { bloq$data[!is.na(get(bloq$limit)), "y_end" := as.numeric(get(bloq$limit))] bloq$mapping <- aes_string( xend = "TIME", yend = "y_end" ) bloq$cens <- bloq$limit <- NULL do.call(geom_segment, bloq) } } } n <- ifelse(any(point$data$isobserv == "ignored"), 3, 2) linetype_values <- c(rep("solid", n), "dashed") if (any(point$data$isobserv == "ignored")) linetype_labels <- c("accepted", "ignored", "individual predictions", "population predictions") else linetype_labels <- c("accepted", "individual predictions", "population predictions") shape_values <- c(rep(point.shape, n + 1)) shape_values_leg <- c(rep(point.shape, n - 1), rep(20, 2)) size_values <- c(rep(1, n - 1), ipred_line$size, pred_line$size) if (any(point$data$isobserv == "ignored")) colour_values <- c(point$colour[1], get_invcolor(point$colour), ipred_line$colour, pred_line$colour) else colour_values <- c(point$colour[1], ipred_line$colour, pred_line$colour) keywidth_values <- c(rep(0, n - 1), rep(2, 2)) p <- ggplot(dx, aes(TIME, DV, shape = isobserv, colour = isobserv)) + p_point + geom_line( aes( y = IPRED, linetype = "individual predictions", colour = "individual predictions" ), size = ipred_line$size ) + geom_line( aes( y = PRED, linetype = "population predictions", colour = "population predictions" ), size = pred_line$size ) + scale_linetype_manual(values = setNames(linetype_values, linetype_labels), guide = FALSE) + scale_shape_manual(values = setNames(shape_values, linetype_labels), guide = FALSE) + scale_colour_manual( values = setNames(colour_values, linetype_labels), guide = guide_legend( override.aes = list( linetype = linetype_values, shape = shape_values_leg, size = size_values ), title = NULL, keywidth = keywidth_values ) ) + p_bloq gp$is.legend <- is.legend p <- plot_pmx(gp, p) ## split pages npages <- ceiling(with( facets, length(unique(dx$ID)) / nrow / ncol )) function(i) { res <- list() if (is.null(i)) i <- seq_len(npages) i <- intersect(i, seq_len(npages)) res <- lapply(i, function(x) { facets$page <- x facets$facets <- wrap.formula if (is.null(facets$labeller)) { facets$labeller <- labeller(ID = function(x) sprintf("ID: %s", x)) } p + do.call(facet_wrap_paginate, facets) }) if (length(res) == 1) res[[1]] else res } }) get_page }
/R/plot-individual.R
no_license
csetraynor/ggPMX
R
false
false
7,103
r
#' This function can be used to obtain individual prediction and compare with observed data and population prediction #' for each individual separately #' #' @param labels plot texts. labels, axis, #' @param facets list facets settings nrow/ncol #' @param dname name of dataset to be used #' @param pred_line \code{list} some ipred line geom properties aesthetics #' @param ipred_line \code{list} some pred line geom properties aesthetics #' @param point \code{list} some point geom properties aesthetics #' @param is.legend \code{logical} if TRUE add a legend #' @param use.finegrid \code{logical} if FALSE use predictions data set #' @param bloq \code{pmxBLOQ} object created by \code{\link{pmx_bloq}} #' @param ... others graphics arguments passed to \code{\link{pmx_gpar}} internal object. #' #' @return individual fit object #' @family plot_pmx #' @seealso \code{\link{plot_pmx.individual}} individual <- function(labels, facets = NULL, dname = NULL, ipred_line = NULL, pred_line = NULL, point = NULL, bloq = NULL, is.legend, use.finegrid, ...) { assert_that(is_list(facets)) assert_that(is_string_or_null(dname)) assert_that(is_list(labels)) if (!use.finegrid) dname <- "predictions" structure(list( ptype = "IND", strat = TRUE, is.legend = is.legend, use.finegrid = use.finegrid, dname = dname, aess = list(x = "TIME", y1 = "PRED", y2 = "IPRED"), labels = labels, point = point, ipred_line = ipred_line, pred_line = pred_line, facets = facets, bloq = bloq, gp = pmx_gpar(labels = labels, is.legend = is.legend, ...) ), class = c("individual", "pmx_gpar")) } get_invcolor <- function(color){ if (length(color) > 1) color[2] else "red" } #' This function can be used to plot individual prediction and compare with observed data and population prediction #' for each individual separately #' @param x individual object #' @param dx data set #' @param ... not used for the moment #' #' @return a list of ggplot2 #' @export #' @import ggplot2 #' @import data.table #' @family plot_pmx #' plot_pmx.individual <- function(x, dx, ...) { ID <- NULL dx$maxValue <- 0 ## plot if (x$dname == "predictions") cat("USE predictions data set \n") strat.facet <- x[["strat.facet"]] strat.color <- x[["strat.color"]] wrap.formula <- if (!is.null(strat.facet)) { wrap_formula(strat.facet, "ID") } else { formula("~ID") } get_page <- with(x, { p_point <- if (!is.null(point)) { point$data <- if (is.null(bloq)) { input } else { input[!get(bloq$cens) %in% c(1, -1)] } point.shape <- point$shape point$shape <- NULL max_y <- aggregate(TIME ~ ID, data=dx, max) colnames(max_y) <- c("ID", "maxValue") dx <- base::merge(dx, max_y, by="ID", all.x = T) dx$isobserv <- with(dx, TIME < maxValue) point$data <- base::merge(point$data, max_y, by="ID") point$data$isobserv <- ifelse(point$data$TIME < point$data$maxValue, "accepted", "ignored") points <- copy(point) points$colour <- NULL do.call(geom_point, points) } p_bloq <- if (!is.null(bloq)) { bloq$data <- x$input[get(bloq$cens) != 0] if (length(bloq$data$ID) > 0) { ## While cens may be in the dataset, all the data in the fit may be uncensored if (bloq$limit %in% names(bloq$data)) { bloq$data[!is.na(get(bloq$limit)), "y_end" := as.numeric(get(bloq$limit))] bloq$mapping <- aes_string( xend = "TIME", yend = "y_end" ) bloq$cens <- bloq$limit <- NULL do.call(geom_segment, bloq) } } } n <- ifelse(any(point$data$isobserv == "ignored"), 3, 2) linetype_values <- c(rep("solid", n), "dashed") if (any(point$data$isobserv == "ignored")) linetype_labels <- c("accepted", "ignored", "individual predictions", "population predictions") else linetype_labels <- c("accepted", "individual predictions", "population predictions") shape_values <- c(rep(point.shape, n + 1)) shape_values_leg <- c(rep(point.shape, n - 1), rep(20, 2)) size_values <- c(rep(1, n - 1), ipred_line$size, pred_line$size) if (any(point$data$isobserv == "ignored")) colour_values <- c(point$colour[1], get_invcolor(point$colour), ipred_line$colour, pred_line$colour) else colour_values <- c(point$colour[1], ipred_line$colour, pred_line$colour) keywidth_values <- c(rep(0, n - 1), rep(2, 2)) p <- ggplot(dx, aes(TIME, DV, shape = isobserv, colour = isobserv)) + p_point + geom_line( aes( y = IPRED, linetype = "individual predictions", colour = "individual predictions" ), size = ipred_line$size ) + geom_line( aes( y = PRED, linetype = "population predictions", colour = "population predictions" ), size = pred_line$size ) + scale_linetype_manual(values = setNames(linetype_values, linetype_labels), guide = FALSE) + scale_shape_manual(values = setNames(shape_values, linetype_labels), guide = FALSE) + scale_colour_manual( values = setNames(colour_values, linetype_labels), guide = guide_legend( override.aes = list( linetype = linetype_values, shape = shape_values_leg, size = size_values ), title = NULL, keywidth = keywidth_values ) ) + p_bloq gp$is.legend <- is.legend p <- plot_pmx(gp, p) ## split pages npages <- ceiling(with( facets, length(unique(dx$ID)) / nrow / ncol )) function(i) { res <- list() if (is.null(i)) i <- seq_len(npages) i <- intersect(i, seq_len(npages)) res <- lapply(i, function(x) { facets$page <- x facets$facets <- wrap.formula if (is.null(facets$labeller)) { facets$labeller <- labeller(ID = function(x) sprintf("ID: %s", x)) } p + do.call(facet_wrap_paginate, facets) }) if (length(res) == 1) res[[1]] else res } }) get_page }
#devtools::install_github("daattali/shinyjs") #install.packages("shinyBS") #install.packages("shinythemes") #install.packages("shinyjs") library(shinyBS) #for bootstrap methods library(shinythemes) #for background layout library(shiny) library(shinyjs) #javascript for hovers options(shiny.deprecation.messages=FALSE) # shinyUI functon is the entire user interface shinyUI(fluidPage(tagList( useShinyjs(), # contains all tabs navbarPage( "Gene Set Omic Analysis (GSOA)", id = "page-nav", theme = shinytheme("yeti"), collapsable = TRUE, # GSOA tab tabPanel("Run GSOA", br(), br(), HTML('<center><img src="./GSOA_Logo_Code2.png"></center>'), br(), HTML('<center><h4> GSOA identifies gene sets that differ between two phenotypes by integrating evidence from multi-omic data</h4></center>'), # Column 1: Data Files div(id = "inputs", column( 4, offset = 2, br(), #div(style="height: 100px;",fileInput('dataFile', label = h5("Gene Expression Data ", tags$style(type = "text/css", "#q1 {vertical-align: bottom;}"), #bsButton("q1", label = "", icon = icon("question"), size = "extra-small")), #width="85%", accept = c( 'text/csv','text/comma-separated-values','text/tab-separated-values', 'text/plain', '.csv', '.tsv'))), #bsPopover(id = "q1", title = "Tidy data", content = paste0("You should read the ", a("tidy data paper", href = "http://vita.had.co.nz/papers/tidy-data.pdf",target="_blank")), placement = "right", trigger = "click", options = list(container = "body")), div( style = "height: 85px;", fileInput('dataFile', 'Genomic Data File *required', multiple = T, width = "85%",accept = c('text/csv','text/comma-separated-values','text/tab-separated-values', 'text/plain','.csv','.tsv'))), div( style = "height: 85px;", fileInput( 'classFile', 'Sample Class File *required', width = "85%", accept = c( 'text/csv','text/comma-separated-values','text/tab-separated-values', 'text/plain','.csv','.tsv' ))), div( style = "height: 85px;", fileInput( 'gmtFile','Gene Sets (GMT file) *required', width = "85%", accept = c( 'text/csv','text/comma-separated-values','text/tab-separated-values', 'text/plain','.csv','.tsv' ))), #hidden things a(id = "toggleAdvanced", "Show/hide multi-omic data inputs"), hidden(div( id = "advanced", div( style = "height: 85px;", fileInput( 'dataFile2', 'DNA Sequencing Data.', width = "85%",accept = c( 'text/csv','text/comma-separated-values','text/tab-separated-values', 'text/plain','.csv','.tsv' ))), div( style = "height: 85px;",fileInput( 'dataFile3','Copy Number Variation Data.',width = "85%",accept = c('text/csv','text/comma-separated-values','text/tab-separated-values', 'text/plain','.csv','.tsv' ))), div(style = "height: 85px;", fileInput( 'dataFile4', 'Other Genomic Data.', width = "85%", accept = c( 'text/csv','text/comma-separated-values','text/tab-separated-values', 'text/plain','.csv','.tsv' ))), div( style = "height: 85px;", fileInput( 'dataFile5', 'Other Genomic Data.', width = "85%",accept = c('text/csv','text/comma-separated-values','text/tab-separated-values', 'text/plain','.csv','.tsv'))) )), div( style = "height: 30px;",checkboxInput("GFRN_sigs", label = "Include Bild Lab Signatures", value = FALSE)), bsPopover( id = "GFRN_sigs", title = "Select if you want to run all Bild lab signatures", content = "Bild lab signatures consists of experimentallt generated RNA-sequencing signatures from the growth factor receptor network " , placement = "right", trigger = "hover" ) )), ################## # Column 2: Variance ################## column( 4, offset = 1, br(), div(style = "height: 85px;", numericInput( "Variance", "% Variance to Filter",value = 10,width = "70%",min = 1,max = 90 )), div(style = "height: 85px;", numericInput( "LowExpression","% Low Gene Expression to Filter",value = 10,width = "70%", min = 1,max = 90)), div( style = "height: 65px;", textInput('results_h',value = "",width = "70%",label = 'E-mail Address' )), br(), # more hidden stuff a(id = "toggleAdvanced2", "Show/hide additional paramters"), hidden(div(id = "advanced2", div(style = "height: 85px;",selectInput('Algorithm', label = 'Machine Learning Algorithm.', choices = c("svm", "rf"), width = "70%" )), div( style = "height: 85px;",numericInput("CrossValidation","# of Cross Validations",value = 5, min = 1, width = "70%", max = 5 )), div( style = "height: 85px;", numericInput('Iterations', label = 'Number of Iterations.',value = 1000, min = 1,width = "70%",max = 1000)) )), br(), br(), actionButton("run", "Run",width = "60%", icon("paper-plane"),style = "color: black; background-color:green; border-color: grey; padding:9px" ), bsTooltip( id = "run",title = "Click run once files are uploaded",placement = "right", trigger = "hover" ), br(), br(), actionButton("refresh", "Refresh Inputs", width = "60%", icon("paper-plane"),style = "color: black; background-color: grey; border-color: grey; padding:9px" ) ), # Rendered Objects column(12, br(), br(), #HTML('<h4> File Validation </h4>'), #verbatimTextOutput("path"), #allows for message errors singleton(tags$head(tags$script(src = "message-handler.js"))), #htmlOutput("sessionInfo"), dataTableOutput("results") )), # Other Tabs tabPanel("Instructions for Use",includeMarkdown("doc/instructions.md")), tabPanel("Demo Data",includeMarkdown("doc/demo.md")), tabPanel("About", includeMarkdown("doc/about.md")), tabPanel("Code", includeMarkdown("doc/code.md")), tabPanel("Contact", includeMarkdown("doc/contact.md")), tabPanel("Software Updates", includeMarkdown("doc/version.md")) )))) ### NOT USING #h5('Please load your data files and specify GSOA parameters. See "Instructions for Use" section for file and parameter details',align= "left"), #h5('To run GSOA with', a(href = 'https://drive.google.com/open?id=0B-HGhGW-uF4AODNhMnVHSmUwdHM', 'Demo Files', target="_blank"), 'download all 3 files from drive,', 'and upload.', style="color:black"), #column(4, wellPanel( # offset=10, # h5('Download data after run is complete.',style="color:black"), # downloadButton('downloadData', 'Download Results'))), # put things side by side #fluidRow( # column(8, # p('', style="border:0.1px; border-style:solid; border-color:grey; padding: 0.01em;background:grey"), # column(8, # numericInput("LowExpression", "% Low Gene Expression to Filter",value=10, min = 1, max = 90, width="50%"))), #HTML('<style> hr.hasClass{ border-width: 4px; border:0px; height:1.5px; background-color:red;} </style> <hr class="hasClass">'), #not sure #tags$head(tags$style(HTML("#page-nav > li:first-child { display: none; }" ))), #Demo File Information #column(12, , #style = " font-size:17px" #HTML('<hr style="color: black; size="10";>'), #tags$div(HTML("<center><strong>GSOA identifies gene sets that differ between two phenotypes by integrating evidence from multi-omic data using machine learning algorithms</strong></center>")), #p('', style="border:0.75px; border-style:solid; border-color:black; padding: 0.02em;background:black"), #HTML('<style> hr.hasClass{ border-width: 4px; border:0px; height:1.5px; background-color:black;} </style> <hr class="hasClass">'), #h3('Gene Set Omic Analysis (GSOA)', style="color:white;border:1px; border-style:solid; border-color:grey; padding: 0.5em; background:#54b4eb", align= "center"), # sidebarLayout( # sidebarPanel( # # # main panel # mainPanel( # tags$style(type="text/css", # ".shiny-output-error { visibility: hidden; }", # ".shiny-output-error:before { visibility: hidden; }"), # , # #verbatimTextOutput("outFile"), # plotOutput('plot1') # )) # # ) #checkboxInput('header', 'Header', TRUE), #radioButtons('sep', 'Separator', # c(Comma=',', # Semicolon=';', # Tab='\t'), # ','), # div( # style = "height: 30px;", # checkboxInput("checkbox", label = "Include Hallmark Analysis", value = FALSE) # ), # bsPopover( # id = "checkbox", # title = "Select if you want to run the Hallmark Analysis", # content = "The hallmark gene sets represent 50 important pathways curated by the Molecular Signatures Database with the goal of reducing redundancy " , # placement = "right", # trigger = "hover" # ),
/ui.R
no_license
smacneil1/shiny_ui
R
false
false
8,802
r
#devtools::install_github("daattali/shinyjs") #install.packages("shinyBS") #install.packages("shinythemes") #install.packages("shinyjs") library(shinyBS) #for bootstrap methods library(shinythemes) #for background layout library(shiny) library(shinyjs) #javascript for hovers options(shiny.deprecation.messages=FALSE) # shinyUI functon is the entire user interface shinyUI(fluidPage(tagList( useShinyjs(), # contains all tabs navbarPage( "Gene Set Omic Analysis (GSOA)", id = "page-nav", theme = shinytheme("yeti"), collapsable = TRUE, # GSOA tab tabPanel("Run GSOA", br(), br(), HTML('<center><img src="./GSOA_Logo_Code2.png"></center>'), br(), HTML('<center><h4> GSOA identifies gene sets that differ between two phenotypes by integrating evidence from multi-omic data</h4></center>'), # Column 1: Data Files div(id = "inputs", column( 4, offset = 2, br(), #div(style="height: 100px;",fileInput('dataFile', label = h5("Gene Expression Data ", tags$style(type = "text/css", "#q1 {vertical-align: bottom;}"), #bsButton("q1", label = "", icon = icon("question"), size = "extra-small")), #width="85%", accept = c( 'text/csv','text/comma-separated-values','text/tab-separated-values', 'text/plain', '.csv', '.tsv'))), #bsPopover(id = "q1", title = "Tidy data", content = paste0("You should read the ", a("tidy data paper", href = "http://vita.had.co.nz/papers/tidy-data.pdf",target="_blank")), placement = "right", trigger = "click", options = list(container = "body")), div( style = "height: 85px;", fileInput('dataFile', 'Genomic Data File *required', multiple = T, width = "85%",accept = c('text/csv','text/comma-separated-values','text/tab-separated-values', 'text/plain','.csv','.tsv'))), div( style = "height: 85px;", fileInput( 'classFile', 'Sample Class File *required', width = "85%", accept = c( 'text/csv','text/comma-separated-values','text/tab-separated-values', 'text/plain','.csv','.tsv' ))), div( style = "height: 85px;", fileInput( 'gmtFile','Gene Sets (GMT file) *required', width = "85%", accept = c( 'text/csv','text/comma-separated-values','text/tab-separated-values', 'text/plain','.csv','.tsv' ))), #hidden things a(id = "toggleAdvanced", "Show/hide multi-omic data inputs"), hidden(div( id = "advanced", div( style = "height: 85px;", fileInput( 'dataFile2', 'DNA Sequencing Data.', width = "85%",accept = c( 'text/csv','text/comma-separated-values','text/tab-separated-values', 'text/plain','.csv','.tsv' ))), div( style = "height: 85px;",fileInput( 'dataFile3','Copy Number Variation Data.',width = "85%",accept = c('text/csv','text/comma-separated-values','text/tab-separated-values', 'text/plain','.csv','.tsv' ))), div(style = "height: 85px;", fileInput( 'dataFile4', 'Other Genomic Data.', width = "85%", accept = c( 'text/csv','text/comma-separated-values','text/tab-separated-values', 'text/plain','.csv','.tsv' ))), div( style = "height: 85px;", fileInput( 'dataFile5', 'Other Genomic Data.', width = "85%",accept = c('text/csv','text/comma-separated-values','text/tab-separated-values', 'text/plain','.csv','.tsv'))) )), div( style = "height: 30px;",checkboxInput("GFRN_sigs", label = "Include Bild Lab Signatures", value = FALSE)), bsPopover( id = "GFRN_sigs", title = "Select if you want to run all Bild lab signatures", content = "Bild lab signatures consists of experimentallt generated RNA-sequencing signatures from the growth factor receptor network " , placement = "right", trigger = "hover" ) )), ################## # Column 2: Variance ################## column( 4, offset = 1, br(), div(style = "height: 85px;", numericInput( "Variance", "% Variance to Filter",value = 10,width = "70%",min = 1,max = 90 )), div(style = "height: 85px;", numericInput( "LowExpression","% Low Gene Expression to Filter",value = 10,width = "70%", min = 1,max = 90)), div( style = "height: 65px;", textInput('results_h',value = "",width = "70%",label = 'E-mail Address' )), br(), # more hidden stuff a(id = "toggleAdvanced2", "Show/hide additional paramters"), hidden(div(id = "advanced2", div(style = "height: 85px;",selectInput('Algorithm', label = 'Machine Learning Algorithm.', choices = c("svm", "rf"), width = "70%" )), div( style = "height: 85px;",numericInput("CrossValidation","# of Cross Validations",value = 5, min = 1, width = "70%", max = 5 )), div( style = "height: 85px;", numericInput('Iterations', label = 'Number of Iterations.',value = 1000, min = 1,width = "70%",max = 1000)) )), br(), br(), actionButton("run", "Run",width = "60%", icon("paper-plane"),style = "color: black; background-color:green; border-color: grey; padding:9px" ), bsTooltip( id = "run",title = "Click run once files are uploaded",placement = "right", trigger = "hover" ), br(), br(), actionButton("refresh", "Refresh Inputs", width = "60%", icon("paper-plane"),style = "color: black; background-color: grey; border-color: grey; padding:9px" ) ), # Rendered Objects column(12, br(), br(), #HTML('<h4> File Validation </h4>'), #verbatimTextOutput("path"), #allows for message errors singleton(tags$head(tags$script(src = "message-handler.js"))), #htmlOutput("sessionInfo"), dataTableOutput("results") )), # Other Tabs tabPanel("Instructions for Use",includeMarkdown("doc/instructions.md")), tabPanel("Demo Data",includeMarkdown("doc/demo.md")), tabPanel("About", includeMarkdown("doc/about.md")), tabPanel("Code", includeMarkdown("doc/code.md")), tabPanel("Contact", includeMarkdown("doc/contact.md")), tabPanel("Software Updates", includeMarkdown("doc/version.md")) )))) ### NOT USING #h5('Please load your data files and specify GSOA parameters. See "Instructions for Use" section for file and parameter details',align= "left"), #h5('To run GSOA with', a(href = 'https://drive.google.com/open?id=0B-HGhGW-uF4AODNhMnVHSmUwdHM', 'Demo Files', target="_blank"), 'download all 3 files from drive,', 'and upload.', style="color:black"), #column(4, wellPanel( # offset=10, # h5('Download data after run is complete.',style="color:black"), # downloadButton('downloadData', 'Download Results'))), # put things side by side #fluidRow( # column(8, # p('', style="border:0.1px; border-style:solid; border-color:grey; padding: 0.01em;background:grey"), # column(8, # numericInput("LowExpression", "% Low Gene Expression to Filter",value=10, min = 1, max = 90, width="50%"))), #HTML('<style> hr.hasClass{ border-width: 4px; border:0px; height:1.5px; background-color:red;} </style> <hr class="hasClass">'), #not sure #tags$head(tags$style(HTML("#page-nav > li:first-child { display: none; }" ))), #Demo File Information #column(12, , #style = " font-size:17px" #HTML('<hr style="color: black; size="10";>'), #tags$div(HTML("<center><strong>GSOA identifies gene sets that differ between two phenotypes by integrating evidence from multi-omic data using machine learning algorithms</strong></center>")), #p('', style="border:0.75px; border-style:solid; border-color:black; padding: 0.02em;background:black"), #HTML('<style> hr.hasClass{ border-width: 4px; border:0px; height:1.5px; background-color:black;} </style> <hr class="hasClass">'), #h3('Gene Set Omic Analysis (GSOA)', style="color:white;border:1px; border-style:solid; border-color:grey; padding: 0.5em; background:#54b4eb", align= "center"), # sidebarLayout( # sidebarPanel( # # # main panel # mainPanel( # tags$style(type="text/css", # ".shiny-output-error { visibility: hidden; }", # ".shiny-output-error:before { visibility: hidden; }"), # , # #verbatimTextOutput("outFile"), # plotOutput('plot1') # )) # # ) #checkboxInput('header', 'Header', TRUE), #radioButtons('sep', 'Separator', # c(Comma=',', # Semicolon=';', # Tab='\t'), # ','), # div( # style = "height: 30px;", # checkboxInput("checkbox", label = "Include Hallmark Analysis", value = FALSE) # ), # bsPopover( # id = "checkbox", # title = "Select if you want to run the Hallmark Analysis", # content = "The hallmark gene sets represent 50 important pathways curated by the Molecular Signatures Database with the goal of reducing redundancy " , # placement = "right", # trigger = "hover" # ),
## Include one-off failures in 'compilation' or 'runtime' with full test name ## (e.g. '/ arg1 = numericArray(nDim=3) arg2 = logicalArray(nDim=3)'. If many ## tests are failing and adding them all to math_knownFailures becomes ## overwhelming, consider removing the op's entries in math_argTypes in ## testing_operatorLists.R. ################## ## unary operators ################## nCompiler:::updateOperatorDef( c('min', 'max', 'all', 'any', 'exp', 'log', 'rsqrt', 'sqrt', 'tanh', 'cube', 'square'), 'testing', 'known_failures', list( math = list( compilation = list('numericScalar', 'integerScalar', 'logicalScalar') ), AD = list( compilation = list('numericScalar') ) ) ) nCompiler:::updateOperatorDef( c('abs'), 'testing', 'known_failures', list( math = list( compilation = list('numericScalar', 'integerScalar', 'logicalScalar') ), AD = list( compilation = list('numericScalar'), runtime = list('numericVector(7)') ) ) ) nCompiler:::updateOperatorDef( c('atan', 'logit', 'mean', 'prod'), 'testing', 'known_failures', list( math = list( compilation = list('numericScalar', 'integerScalar', 'logicalScalar') ), AD = list( compilation = list('numericScalar', 'numericVector(7)') ) ) ) ################### ## binary operators ################### nCompiler:::updateOperatorDef( c('-'), 'testing', 'known_failures', list( AD = list( compilation = list( ## This case fails to compile in an nClass when AD is on, but does ## compile as an nFunction. c('numericVector(7)', 'numericScalar') ) ) ) ) nCompiler:::updateOperatorDef( c('+'), 'testing', 'known_failures', list( AD = list( runtime = list( c('numericVector(7)', 'numericScalar') ) ) ) ) nCompiler:::updateOperatorDef( c('/'), 'testing', 'known_failures', list( AD = list( runtime = list( c('numericVector(7)', 'numericVector(7)') ) ) ) ) nCompiler:::updateOperatorDef( c('pmin', 'pmax'), 'testing', 'known_failures', list( math = list( compilation = make_argType_tuples('numericScalar', 'integerScalar', 'logicalScalar'), runtime = c( make_argType_tuples('numericMatrix', 'integerMatrix', 'logicalMatrix', 'numericArray(nDim=3)', 'integerArray(nDim=3)', 'logicalArray(nDim=3)', rhs = 'numericVector'), ## in R, when first arg is scalar the result is a vector, so swapping ## args won't work for scalar and matrix/array combos make_argType_tuples('numericScalar', 'integerScalar', 'logicalScalar', rhs = c('numericMatrix', 'integerMatrix', 'logicalMatrix', 'numericArray(nDim=3)', 'integerArray(nDim=3)', 'logicalArray(nDim=3)')) ) ) ) ) nCompiler:::updateOperatorDef( c('&', '|'), 'testing', 'known_failures', list( math = list( compilation = c( ## doesn't work when one arg is Eigen::Tensor and the other is scalar make_argType_tuples('numericVector', 'integerVector', 'logicalVector', 'numericMatrix', 'integerMatrix', 'logicalMatrix', 'numericArray(nDim=3)', 'integerArray(nDim=3)', 'logicalArray(nDim=3)', rhs = c('numericScalar', 'integerScalar', 'logicalScalar')), make_argType_tuples('numericScalar', 'integerScalar', 'logicalScalar', rhs = c('numericVector', 'integerVector', 'logicalVector', 'numericMatrix', 'integerMatrix', 'logicalMatrix', 'numericArray(nDim=3)', 'integerArray(nDim=3)', 'logicalArray(nDim=3)')) ) ) ) ) nCompiler:::updateOperatorDef( c('%%'), 'testing', 'known_failures', list( math = list( ## %% currently only works for arguments that promote to integers compilation = c( ## we only test with rhs scalar... see math_operatorLists.R make_argType_tuples('numericScalar', 'integerScalar', 'logicalScalar', 'numericVector', 'integerVector', 'logicalVector', 'numericMatrix', 'integerMatrix', 'logicalMatrix', 'numericArray(nDim=3)', 'integerArray(nDim=3)', 'logicalArray(nDim=3)', rhs = c('numericScalar')), make_argType_tuples('numericScalar', 'numericVector', 'numericMatrix', 'numericArray(nDim=3)', rhs = c('logicalScalar', 'integerScalar')) ) ) ) ) nCompiler:::updateOperatorDef( c('^'), 'testing', 'known_failures', list( math = list( compilation = c( ## current doesn't work for scalar bases make_argType_tuples('numericScalar', 'integerScalar', 'logicalScalar') ) ) ) )
/nCompiler/tests/testthat/known_failures.R
permissive
joe-nano/nCompiler
R
false
false
5,123
r
## Include one-off failures in 'compilation' or 'runtime' with full test name ## (e.g. '/ arg1 = numericArray(nDim=3) arg2 = logicalArray(nDim=3)'. If many ## tests are failing and adding them all to math_knownFailures becomes ## overwhelming, consider removing the op's entries in math_argTypes in ## testing_operatorLists.R. ################## ## unary operators ################## nCompiler:::updateOperatorDef( c('min', 'max', 'all', 'any', 'exp', 'log', 'rsqrt', 'sqrt', 'tanh', 'cube', 'square'), 'testing', 'known_failures', list( math = list( compilation = list('numericScalar', 'integerScalar', 'logicalScalar') ), AD = list( compilation = list('numericScalar') ) ) ) nCompiler:::updateOperatorDef( c('abs'), 'testing', 'known_failures', list( math = list( compilation = list('numericScalar', 'integerScalar', 'logicalScalar') ), AD = list( compilation = list('numericScalar'), runtime = list('numericVector(7)') ) ) ) nCompiler:::updateOperatorDef( c('atan', 'logit', 'mean', 'prod'), 'testing', 'known_failures', list( math = list( compilation = list('numericScalar', 'integerScalar', 'logicalScalar') ), AD = list( compilation = list('numericScalar', 'numericVector(7)') ) ) ) ################### ## binary operators ################### nCompiler:::updateOperatorDef( c('-'), 'testing', 'known_failures', list( AD = list( compilation = list( ## This case fails to compile in an nClass when AD is on, but does ## compile as an nFunction. c('numericVector(7)', 'numericScalar') ) ) ) ) nCompiler:::updateOperatorDef( c('+'), 'testing', 'known_failures', list( AD = list( runtime = list( c('numericVector(7)', 'numericScalar') ) ) ) ) nCompiler:::updateOperatorDef( c('/'), 'testing', 'known_failures', list( AD = list( runtime = list( c('numericVector(7)', 'numericVector(7)') ) ) ) ) nCompiler:::updateOperatorDef( c('pmin', 'pmax'), 'testing', 'known_failures', list( math = list( compilation = make_argType_tuples('numericScalar', 'integerScalar', 'logicalScalar'), runtime = c( make_argType_tuples('numericMatrix', 'integerMatrix', 'logicalMatrix', 'numericArray(nDim=3)', 'integerArray(nDim=3)', 'logicalArray(nDim=3)', rhs = 'numericVector'), ## in R, when first arg is scalar the result is a vector, so swapping ## args won't work for scalar and matrix/array combos make_argType_tuples('numericScalar', 'integerScalar', 'logicalScalar', rhs = c('numericMatrix', 'integerMatrix', 'logicalMatrix', 'numericArray(nDim=3)', 'integerArray(nDim=3)', 'logicalArray(nDim=3)')) ) ) ) ) nCompiler:::updateOperatorDef( c('&', '|'), 'testing', 'known_failures', list( math = list( compilation = c( ## doesn't work when one arg is Eigen::Tensor and the other is scalar make_argType_tuples('numericVector', 'integerVector', 'logicalVector', 'numericMatrix', 'integerMatrix', 'logicalMatrix', 'numericArray(nDim=3)', 'integerArray(nDim=3)', 'logicalArray(nDim=3)', rhs = c('numericScalar', 'integerScalar', 'logicalScalar')), make_argType_tuples('numericScalar', 'integerScalar', 'logicalScalar', rhs = c('numericVector', 'integerVector', 'logicalVector', 'numericMatrix', 'integerMatrix', 'logicalMatrix', 'numericArray(nDim=3)', 'integerArray(nDim=3)', 'logicalArray(nDim=3)')) ) ) ) ) nCompiler:::updateOperatorDef( c('%%'), 'testing', 'known_failures', list( math = list( ## %% currently only works for arguments that promote to integers compilation = c( ## we only test with rhs scalar... see math_operatorLists.R make_argType_tuples('numericScalar', 'integerScalar', 'logicalScalar', 'numericVector', 'integerVector', 'logicalVector', 'numericMatrix', 'integerMatrix', 'logicalMatrix', 'numericArray(nDim=3)', 'integerArray(nDim=3)', 'logicalArray(nDim=3)', rhs = c('numericScalar')), make_argType_tuples('numericScalar', 'numericVector', 'numericMatrix', 'numericArray(nDim=3)', rhs = c('logicalScalar', 'integerScalar')) ) ) ) ) nCompiler:::updateOperatorDef( c('^'), 'testing', 'known_failures', list( math = list( compilation = c( ## current doesn't work for scalar bases make_argType_tuples('numericScalar', 'integerScalar', 'logicalScalar') ) ) ) )
#' displays basic information about the app #' #' @param id #' @param input #' @param output #' @param ui #' @export #' infoPanel=function(id,input=NULL,output=NULL,ui=T){ ns=NS(id) if (ui){ HTML(paste0("<br>data taken from WHO/EURO site. See ", "<a href='https://gateway.euro.who.int/en/datasets/european-health-for-all-database/'>", "link</a> for further details. Data available for educational and private use only." )) } else { stop("server side not currently needed") } }
/EuData/R/infoPanel.r
no_license
willeda1/schoolGIS
R
false
false
579
r
#' displays basic information about the app #' #' @param id #' @param input #' @param output #' @param ui #' @export #' infoPanel=function(id,input=NULL,output=NULL,ui=T){ ns=NS(id) if (ui){ HTML(paste0("<br>data taken from WHO/EURO site. See ", "<a href='https://gateway.euro.who.int/en/datasets/european-health-for-all-database/'>", "link</a> for further details. Data available for educational and private use only." )) } else { stop("server side not currently needed") } }
library(tidyverse) #Se cargan los datos datos_HEI <- read.csv2("/cloud/project/Clases/HEI_cost data variable subset.csv") BMI_sedentario <- datos_HEI %>% select(daycode, BMI, activity_level) %>% dplyr::filter(daycode == 1 & activity_level == 1) %>% select(BMI) %>% unlist() %>% as.vector() BMI_activo <- datos_HEI %>% select(daycode, BMI, activity_level) %>% dplyr::filter(daycode == 1 & activity_level == 3) %>% select(BMI)%>% unlist() %>% as.vector() BMI_fumador <- datos_HEI %>% select(daycode, BMI, smoker) %>% dplyr::filter(daycode == 1 & smoker == 1) %>% select(BMI)%>% unlist() %>% as.vector() BMI_colesterol <- datos_HEI %>% select(daycode, BMI, doc_chol) %>% dplyr::filter(daycode == 1 & doc_chol == 1) %>% select(BMI)%>% unlist() %>% as.vector() BMI_obesoantes <- datos_HEI %>% select(daycode, BMI, sr_overweight) %>% dplyr::filter(daycode == 1 & sr_overweight == 1) %>% select(BMI)%>% unlist() %>% as.vector() BMI_blancos_nohispanos <- datos_HEI %>% select(daycode, BMI, nhw) %>% dplyr::filter(daycode == 1 & nhw == 1) %>% select(BMI)%>% unlist() %>% as.vector() BMI_negros_nohispanos <- datos_HEI %>% select(daycode, BMI, nhb) %>% dplyr::filter(daycode == 1 & nhb == 1) %>% select(BMI)%>% unlist() %>% as.vector()
/Contraste_hipotesis/Datos parcial.R
no_license
kamecon/Stats2
R
false
false
1,319
r
library(tidyverse) #Se cargan los datos datos_HEI <- read.csv2("/cloud/project/Clases/HEI_cost data variable subset.csv") BMI_sedentario <- datos_HEI %>% select(daycode, BMI, activity_level) %>% dplyr::filter(daycode == 1 & activity_level == 1) %>% select(BMI) %>% unlist() %>% as.vector() BMI_activo <- datos_HEI %>% select(daycode, BMI, activity_level) %>% dplyr::filter(daycode == 1 & activity_level == 3) %>% select(BMI)%>% unlist() %>% as.vector() BMI_fumador <- datos_HEI %>% select(daycode, BMI, smoker) %>% dplyr::filter(daycode == 1 & smoker == 1) %>% select(BMI)%>% unlist() %>% as.vector() BMI_colesterol <- datos_HEI %>% select(daycode, BMI, doc_chol) %>% dplyr::filter(daycode == 1 & doc_chol == 1) %>% select(BMI)%>% unlist() %>% as.vector() BMI_obesoantes <- datos_HEI %>% select(daycode, BMI, sr_overweight) %>% dplyr::filter(daycode == 1 & sr_overweight == 1) %>% select(BMI)%>% unlist() %>% as.vector() BMI_blancos_nohispanos <- datos_HEI %>% select(daycode, BMI, nhw) %>% dplyr::filter(daycode == 1 & nhw == 1) %>% select(BMI)%>% unlist() %>% as.vector() BMI_negros_nohispanos <- datos_HEI %>% select(daycode, BMI, nhb) %>% dplyr::filter(daycode == 1 & nhb == 1) %>% select(BMI)%>% unlist() %>% as.vector()
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/BETS.arch_test.R \name{BETS.arch_test} \alias{BETS.arch_test} \title{Perform an ARCH test} \usage{ BETS.arch_test(x, lags = 12, demean = FALSE, alpha = 0.5) } \arguments{ \item{x}{A \code{ts} object. The time series} \item{lags}{An \code{integer}. Maximum number of lags} \item{demean}{A \code{boolean}. Should the series be demeaned?} \item{alpha}{A \code{numeric} value. Significance level} } \value{ A \code{list} with the results of the ARCH test } \description{ Performs an ARCH test and show the results. Formerly, this function was part of FinTS, now an obsoleted package. } \author{ Spencer Graves \email{spencer.graves@prodsyse.com}, Talitha Speranza \email{talitha.speranza@fgv.br} }
/man/BETS.arch_test.Rd
no_license
pedrocostaferreira/BETS
R
false
true
775
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/BETS.arch_test.R \name{BETS.arch_test} \alias{BETS.arch_test} \title{Perform an ARCH test} \usage{ BETS.arch_test(x, lags = 12, demean = FALSE, alpha = 0.5) } \arguments{ \item{x}{A \code{ts} object. The time series} \item{lags}{An \code{integer}. Maximum number of lags} \item{demean}{A \code{boolean}. Should the series be demeaned?} \item{alpha}{A \code{numeric} value. Significance level} } \value{ A \code{list} with the results of the ARCH test } \description{ Performs an ARCH test and show the results. Formerly, this function was part of FinTS, now an obsoleted package. } \author{ Spencer Graves \email{spencer.graves@prodsyse.com}, Talitha Speranza \email{talitha.speranza@fgv.br} }
#' calc_norm_factors #' #' Calculate normalization factors in a two step process: #' #' 1) summarize every region for each sample (default summary function is max) #' #' 2) caclulate a value to cap each sample to based on regions (default is 95th #' quantile). #' #' The uderlying assumption here is that meaningful enrichment is present at the #' majority of regions provided. If prevalence varies by a specific factor, say #' ChIP-seq targets with different characteristics - ie. when analyzing TSSes #' for H3K4me3 and an infrequent transcription factor it is more appropriate to #' specify appropriate quantile cutoffs per factor. #' #' @param full_dt a data.table, as returned by ssvFetch*(..., return_data.table. #' = TRUE) #' @param value_ character, attribute in full_dt to normalzie. #' @param cap_value_ character, new attribute name specifying values to cap to. #' @param by1 character vector, specifies attributes relevant to step 1. #' @param by2 character vector, specifies attributes relevant to step 1 and 2. #' @param aggFUN1 function called on value_ with by = c(by1, by2) in step 1. #' @param aggFUN2 function called on result of aggFUN1 with by = by2 in step 2. #' #' @return data.table mapping by2 to cap_value_. #' @export #' #' @examples #' calc_norm_factors(CTCF_in_10a_profiles_dt) #' calc_norm_factors(CTCF_in_10a_profiles_dt, #' aggFUN1 = mean, aggFUN2 = function(x)quantile(x, .5)) calc_norm_factors = function(full_dt, value_ = "y", cap_value_ = "y_cap_value", by1 = "id", by2 = "sample", aggFUN1 = max, aggFUN2 = function(x)quantile(x, .95) ){ value_summary_ = NULL #reserve binding for data.table stopifnot(data.table::is.data.table(full_dt)) stopifnot(value_ %in% colnames(full_dt)) stopifnot(by1 %in% colnames(full_dt)) stopifnot(by2 %in% colnames(full_dt)) cap_dt = full_dt[, list(value_summary_ = aggFUN1(get(value_))), c(by1, by2)][, list(value_capped_ = aggFUN2(value_summary_)), c(by2)] data.table::setnames(cap_dt, "value_capped_", cap_value_) cap_dt[] } #' append_ynorm #' #' see \code{\link{calc_norm_factors}} for normalization details. #' #' @param full_dt a data.table, as returned by ssvFetch*(..., return_data.table #' = TRUE). #' @param value_ character, attribute in full_dt to normalzie. #' @param cap_value_ character, new attribute name specifying values to cap to. #' @param norm_value_ character, new attribute name specifying normalized values. #' @param by1 character vector, specifies attributes relevant to step 1. #' @param by2 character vector, specifies attributes relevant to step 1 and 2. #' @param aggFUN1 function called on value_ with by = c(by1, by2) in step 1. #' @param aggFUN2 function called on result of aggFUN1 with by = by2 in step 2. #' @param cap_dt optionally, provide user generated by2 to cap_value_ mapping #' @param do_not_cap if TRUE, normalized values are not capped to 1. Default is FALSE. #' @param force_append if TRUE, any previous cap_value or norm_value is overridden. Default is FALSE. #' #' @return data.table, full_dt with cap_value_ and norm_value_ values appended. #' @export #' #' @examples #' append_ynorm(CTCF_in_10a_profiles_dt) #' append_ynorm(CTCF_in_10a_profiles_dt, #' aggFUN1 = mean, aggFUN2 = function(x)quantile(x, .5)) append_ynorm = function(full_dt, value_ = "y", cap_value_ = "y_cap_value", norm_value_ = "y_norm", by1 = "id", by2 = "sample", aggFUN1 = max, aggFUN2 = function(x)quantile(x, .95), cap_dt = NULL, do_not_cap = FALSE, force_append = FALSE ){ stopifnot(data.table::is.data.table(full_dt)) stopifnot(value_ %in% colnames(full_dt)) stopifnot(by1 %in% colnames(full_dt)) stopifnot(by2 %in% colnames(full_dt)) if(force_append){ full_dt[[cap_value_]] = NULL full_dt[[norm_value_]] = NULL } stopifnot(!cap_value_ %in% colnames(full_dt)) stopifnot(!norm_value_ %in% colnames(full_dt)) if(is.null(cap_dt)){ cap_dt = calc_norm_factors(full_dt, value_, cap_value_, by1, by2, aggFUN1, aggFUN2) }else{ stopifnot(cap_value_ %in% colnames(cap_dt)) stopifnot(by2 %in% colnames(cap_dt)) } full_dt = merge(full_dt, cap_dt, by = by2) data.table::set(full_dt, NULL, norm_value_, full_dt[[value_]]) if(!do_not_cap){ full_dt[get(norm_value_) > get(cap_value_), (norm_value_) := get(cap_value_)] } full_dt[, (norm_value_) := get((norm_value_)) / get(cap_value_)] full_dt[] }
/R/functions_normalization.R
no_license
jrboyd/seqsetvis
R
false
false
4,875
r
#' calc_norm_factors #' #' Calculate normalization factors in a two step process: #' #' 1) summarize every region for each sample (default summary function is max) #' #' 2) caclulate a value to cap each sample to based on regions (default is 95th #' quantile). #' #' The uderlying assumption here is that meaningful enrichment is present at the #' majority of regions provided. If prevalence varies by a specific factor, say #' ChIP-seq targets with different characteristics - ie. when analyzing TSSes #' for H3K4me3 and an infrequent transcription factor it is more appropriate to #' specify appropriate quantile cutoffs per factor. #' #' @param full_dt a data.table, as returned by ssvFetch*(..., return_data.table. #' = TRUE) #' @param value_ character, attribute in full_dt to normalzie. #' @param cap_value_ character, new attribute name specifying values to cap to. #' @param by1 character vector, specifies attributes relevant to step 1. #' @param by2 character vector, specifies attributes relevant to step 1 and 2. #' @param aggFUN1 function called on value_ with by = c(by1, by2) in step 1. #' @param aggFUN2 function called on result of aggFUN1 with by = by2 in step 2. #' #' @return data.table mapping by2 to cap_value_. #' @export #' #' @examples #' calc_norm_factors(CTCF_in_10a_profiles_dt) #' calc_norm_factors(CTCF_in_10a_profiles_dt, #' aggFUN1 = mean, aggFUN2 = function(x)quantile(x, .5)) calc_norm_factors = function(full_dt, value_ = "y", cap_value_ = "y_cap_value", by1 = "id", by2 = "sample", aggFUN1 = max, aggFUN2 = function(x)quantile(x, .95) ){ value_summary_ = NULL #reserve binding for data.table stopifnot(data.table::is.data.table(full_dt)) stopifnot(value_ %in% colnames(full_dt)) stopifnot(by1 %in% colnames(full_dt)) stopifnot(by2 %in% colnames(full_dt)) cap_dt = full_dt[, list(value_summary_ = aggFUN1(get(value_))), c(by1, by2)][, list(value_capped_ = aggFUN2(value_summary_)), c(by2)] data.table::setnames(cap_dt, "value_capped_", cap_value_) cap_dt[] } #' append_ynorm #' #' see \code{\link{calc_norm_factors}} for normalization details. #' #' @param full_dt a data.table, as returned by ssvFetch*(..., return_data.table #' = TRUE). #' @param value_ character, attribute in full_dt to normalzie. #' @param cap_value_ character, new attribute name specifying values to cap to. #' @param norm_value_ character, new attribute name specifying normalized values. #' @param by1 character vector, specifies attributes relevant to step 1. #' @param by2 character vector, specifies attributes relevant to step 1 and 2. #' @param aggFUN1 function called on value_ with by = c(by1, by2) in step 1. #' @param aggFUN2 function called on result of aggFUN1 with by = by2 in step 2. #' @param cap_dt optionally, provide user generated by2 to cap_value_ mapping #' @param do_not_cap if TRUE, normalized values are not capped to 1. Default is FALSE. #' @param force_append if TRUE, any previous cap_value or norm_value is overridden. Default is FALSE. #' #' @return data.table, full_dt with cap_value_ and norm_value_ values appended. #' @export #' #' @examples #' append_ynorm(CTCF_in_10a_profiles_dt) #' append_ynorm(CTCF_in_10a_profiles_dt, #' aggFUN1 = mean, aggFUN2 = function(x)quantile(x, .5)) append_ynorm = function(full_dt, value_ = "y", cap_value_ = "y_cap_value", norm_value_ = "y_norm", by1 = "id", by2 = "sample", aggFUN1 = max, aggFUN2 = function(x)quantile(x, .95), cap_dt = NULL, do_not_cap = FALSE, force_append = FALSE ){ stopifnot(data.table::is.data.table(full_dt)) stopifnot(value_ %in% colnames(full_dt)) stopifnot(by1 %in% colnames(full_dt)) stopifnot(by2 %in% colnames(full_dt)) if(force_append){ full_dt[[cap_value_]] = NULL full_dt[[norm_value_]] = NULL } stopifnot(!cap_value_ %in% colnames(full_dt)) stopifnot(!norm_value_ %in% colnames(full_dt)) if(is.null(cap_dt)){ cap_dt = calc_norm_factors(full_dt, value_, cap_value_, by1, by2, aggFUN1, aggFUN2) }else{ stopifnot(cap_value_ %in% colnames(cap_dt)) stopifnot(by2 %in% colnames(cap_dt)) } full_dt = merge(full_dt, cap_dt, by = by2) data.table::set(full_dt, NULL, norm_value_, full_dt[[value_]]) if(!do_not_cap){ full_dt[get(norm_value_) > get(cap_value_), (norm_value_) := get(cap_value_)] } full_dt[, (norm_value_) := get((norm_value_)) / get(cap_value_)] full_dt[] }
#Logistic Regression #read.csv(file.choose(), header=T) data <- read.csv("binary.csv", header = T) print(str(data)) #convert admit, rank into categorical varibales data$admit <- as.factor(data$admit) data$rank <- as.factor(data$rank) print(str(data)) #Two way table of factor variables xtabs(~admit + rank, data=data) #there is no zero rows, data looks good. #Partition data train and test set.seed(1234) ind <- sample(2, nrow(data), replace=T, prob=c(0.8,0.2)) training <- data[ind == 1,] testing <- data[ind == 2,] #Logistic regression model model <- glm(admit ~ gre + gpa + rank, data = training, family = 'binomial') print(summary(model)) #gre is not significant so we can drop #Logistic regression model model <- glm(admit ~ gpa + rank, data = training, family = 'binomial') print(summary(model)) p1 <- predict(model, training, type='response') print(head(p1)) print(head(training)) #misclassification error train data pred1 <- ifelse(p1>0.5, 1, 0) tab1 <- table(Predicted = pred1, Actual=training$admit) print(tab1) #gives confustion matrix error <- 1 - sum(diag(tab1))/sum(tab1) print(error) #misclassification error test data p2 <- predict(model, testing, type='response') pred2 <- ifelse(p2>0.5, 1, 0) tab2 <- table(Predicted = pred2, Actual=testing$admit) print(tab2) #gives confustion matrix error <- 1 - sum(diag(tab2))/sum(tab2) print(error) #Goodness of fit test p_value <- with(model, pchisq(null.deviance - deviance, df.null - df.residual, lower.tail = FALSE)) print(p_value) #since this value is so small, this model is significant
/LogisticRegressionWithStudentData.R
no_license
venkatram64/rwork
R
false
false
1,578
r
#Logistic Regression #read.csv(file.choose(), header=T) data <- read.csv("binary.csv", header = T) print(str(data)) #convert admit, rank into categorical varibales data$admit <- as.factor(data$admit) data$rank <- as.factor(data$rank) print(str(data)) #Two way table of factor variables xtabs(~admit + rank, data=data) #there is no zero rows, data looks good. #Partition data train and test set.seed(1234) ind <- sample(2, nrow(data), replace=T, prob=c(0.8,0.2)) training <- data[ind == 1,] testing <- data[ind == 2,] #Logistic regression model model <- glm(admit ~ gre + gpa + rank, data = training, family = 'binomial') print(summary(model)) #gre is not significant so we can drop #Logistic regression model model <- glm(admit ~ gpa + rank, data = training, family = 'binomial') print(summary(model)) p1 <- predict(model, training, type='response') print(head(p1)) print(head(training)) #misclassification error train data pred1 <- ifelse(p1>0.5, 1, 0) tab1 <- table(Predicted = pred1, Actual=training$admit) print(tab1) #gives confustion matrix error <- 1 - sum(diag(tab1))/sum(tab1) print(error) #misclassification error test data p2 <- predict(model, testing, type='response') pred2 <- ifelse(p2>0.5, 1, 0) tab2 <- table(Predicted = pred2, Actual=testing$admit) print(tab2) #gives confustion matrix error <- 1 - sum(diag(tab2))/sum(tab2) print(error) #Goodness of fit test p_value <- with(model, pchisq(null.deviance - deviance, df.null - df.residual, lower.tail = FALSE)) print(p_value) #since this value is so small, this model is significant
#折线图、柱状图 series_rectangular <- function(data,type,itemStyle=FALSE){ list_name <- colnames(data) colnames(data) <- NULL data <- as.list(data) if(length(type)==1){type <- rep(type,length(data))} for(i in seq(length(data))){ #加一个0是为了保证当只有一列的时候,转jason的时候可以带上[]方括号 data[[i]] <- list(name=unbox(list_name[i]),type=unbox(type[i]),data=data[[i]], itemStyle=list(normal=list(label=list(show=unbox(itemStyle),formatter=unbox('{c}'))))) } return(paste("series:",jsonlite::toJSON(data,auto_unbox=FALSE),sep="")) } #饼图 series_pie <- function(data,type='pie',radius='55%'){ list_name <- colnames(data) colnames(data) <- NULL data <- as.list(data) for(i in seq(length(data))){ data[[i]] <- list(name=list_name[i],value=data[[i]]) } paste("series:",jsonlite::toJSON(list(list(type='pie',radius='55%',data=data)),auto_unbox=TRUE),sep="") } #地图 series_map <- function(data, mapType, itemStyle.normal=FALSE, itemStyle.emphasis=FALSE){ list_name <- colnames(data) colnames(data) <- NULL data <- as.list(data) for(i in seq(length(data))){ data[[i]] <- list(name=list_name[i],value=data[[i]]) } paste("series:", jsonlite::toJSON(list(list(type='map', mapType=mapType, data=data, itemStyle=list(normal=list(label=list(show=itemStyle.normal)), emphasis=list(label=list(show=itemStyle.emphasis)) ))),,auto_unbox=TRUE) ,sep="") } #漏斗图 series_funnel <- function(data,type='funnel', sort='descending', gap=5,width='100%',x=0,y=0){ list_name <- colnames(data) colnames(data) <- NULL data <- as.list(data) for(i in seq(length(data))){ data[[i]] <- list(name=list_name[i],value=data[[i]]) } paste("series:", jsonlite::toJSON(list(list(data=data, type=type, sort=sort, gap=gap, width=width, x=x, y=y, itemStyle=list(normal=list(label=list(show=TRUE,position='inside'))) )),auto_unbox=TRUE) ,sep="") }
/R/series.R
no_license
xizhiming/RechartsCustom
R
false
false
2,494
r
#折线图、柱状图 series_rectangular <- function(data,type,itemStyle=FALSE){ list_name <- colnames(data) colnames(data) <- NULL data <- as.list(data) if(length(type)==1){type <- rep(type,length(data))} for(i in seq(length(data))){ #加一个0是为了保证当只有一列的时候,转jason的时候可以带上[]方括号 data[[i]] <- list(name=unbox(list_name[i]),type=unbox(type[i]),data=data[[i]], itemStyle=list(normal=list(label=list(show=unbox(itemStyle),formatter=unbox('{c}'))))) } return(paste("series:",jsonlite::toJSON(data,auto_unbox=FALSE),sep="")) } #饼图 series_pie <- function(data,type='pie',radius='55%'){ list_name <- colnames(data) colnames(data) <- NULL data <- as.list(data) for(i in seq(length(data))){ data[[i]] <- list(name=list_name[i],value=data[[i]]) } paste("series:",jsonlite::toJSON(list(list(type='pie',radius='55%',data=data)),auto_unbox=TRUE),sep="") } #地图 series_map <- function(data, mapType, itemStyle.normal=FALSE, itemStyle.emphasis=FALSE){ list_name <- colnames(data) colnames(data) <- NULL data <- as.list(data) for(i in seq(length(data))){ data[[i]] <- list(name=list_name[i],value=data[[i]]) } paste("series:", jsonlite::toJSON(list(list(type='map', mapType=mapType, data=data, itemStyle=list(normal=list(label=list(show=itemStyle.normal)), emphasis=list(label=list(show=itemStyle.emphasis)) ))),,auto_unbox=TRUE) ,sep="") } #漏斗图 series_funnel <- function(data,type='funnel', sort='descending', gap=5,width='100%',x=0,y=0){ list_name <- colnames(data) colnames(data) <- NULL data <- as.list(data) for(i in seq(length(data))){ data[[i]] <- list(name=list_name[i],value=data[[i]]) } paste("series:", jsonlite::toJSON(list(list(data=data, type=type, sort=sort, gap=gap, width=width, x=x, y=y, itemStyle=list(normal=list(label=list(show=TRUE,position='inside'))) )),auto_unbox=TRUE) ,sep="") }
# Load R's "USPersonalExpenditure" dataset using the "data()" function # This will produce a data frame called `USPersonalExpenditure` data("USPersonalExpenditure") # The variable USPersonalExpenditure is now accessible to you. Unfortunately, # it's not a data frame (it's actually what is called a matrix) # Test this using the `is.data.frame()` function is.data.frame(USPersonalExpenditure) # Luckily, you can simply pass the USPersonalExpenditure variable as an argument # to the `data.frame()` function to convert it a data farm. Do this, storing the # result in a new variable personal.expenditure <- data.frame(USPersonalExpenditure, stringsAsFactors = FALSE) # What are the column names of your dataframe? colnames(personal.expenditure) # Why are they so strange? Think about whether you could use a number like 1940 # with dollar notation! # What are the row names of your dataframe? rownames(personal.expenditure) # Create a column "category" that is equal to your rownames personal.expenditure$category <- rownames(personal.expenditure) View(personal.expenditure) # How much money was spent on personal care in 1940? personal.expenditure$X1940[personal.expenditure$category == "Personal Care"] # How much money was spent on Food and Tobacco in 1960? personal.expenditure$X1960[personal.expenditure$category == "Food and Tobacco"] # What was the highest expenditure category in 1960? personal.expenditure$X1960[max(personal.expenditure$X1960)] # Hint: use the `max()` function to find the largest, then compare that to the column # Define a function `DetectHighest` that takes in a year as a parameter, and # returns the highest spending category of that year # Using your function, determine the highest spending category of each year
/exercise-3/exercise.R
permissive
CarameIMacchiato/module9-dataframes
R
false
false
1,759
r
# Load R's "USPersonalExpenditure" dataset using the "data()" function # This will produce a data frame called `USPersonalExpenditure` data("USPersonalExpenditure") # The variable USPersonalExpenditure is now accessible to you. Unfortunately, # it's not a data frame (it's actually what is called a matrix) # Test this using the `is.data.frame()` function is.data.frame(USPersonalExpenditure) # Luckily, you can simply pass the USPersonalExpenditure variable as an argument # to the `data.frame()` function to convert it a data farm. Do this, storing the # result in a new variable personal.expenditure <- data.frame(USPersonalExpenditure, stringsAsFactors = FALSE) # What are the column names of your dataframe? colnames(personal.expenditure) # Why are they so strange? Think about whether you could use a number like 1940 # with dollar notation! # What are the row names of your dataframe? rownames(personal.expenditure) # Create a column "category" that is equal to your rownames personal.expenditure$category <- rownames(personal.expenditure) View(personal.expenditure) # How much money was spent on personal care in 1940? personal.expenditure$X1940[personal.expenditure$category == "Personal Care"] # How much money was spent on Food and Tobacco in 1960? personal.expenditure$X1960[personal.expenditure$category == "Food and Tobacco"] # What was the highest expenditure category in 1960? personal.expenditure$X1960[max(personal.expenditure$X1960)] # Hint: use the `max()` function to find the largest, then compare that to the column # Define a function `DetectHighest` that takes in a year as a parameter, and # returns the highest spending category of that year # Using your function, determine the highest spending category of each year
\name{departures} \alias{departures} \docType{data} \title{ Total monthly departures from Australia } \description{ Overseas departures from Australia: permanent departures, long-term (more than one year) residents departing, long-term (more than one year) visitors departing, short-term (less than one year) residents departing and short-term (less than one year) visitors departing. January 1976 - February 2016. } \usage{data(departures)} \format{Multiple monthly time series of class \code{mts} containing the following series: \describe{ \item{\code{permanent}}{a monthly time series of the permanent departures from Australia.} \item{\code{reslong}}{a monthly time series of the long-term resident departures from Australia.} \item{\code{vislong}}{a monthly time series of the long-term visitor departures from Australia.} \item{\code{resshort}}{a monthly time series of the short-term resident departures from Australia.} \item{\code{visshort}}{a monthly time series of the short-term visitor departures from Australia.} }} \source{Australian Bureau of Statistics. Catalogue No 3401.02.} \examples{ plot(departures) } \keyword{datasets}
/man/departures.Rd
no_license
mhdella/fpp
R
false
false
1,201
rd
\name{departures} \alias{departures} \docType{data} \title{ Total monthly departures from Australia } \description{ Overseas departures from Australia: permanent departures, long-term (more than one year) residents departing, long-term (more than one year) visitors departing, short-term (less than one year) residents departing and short-term (less than one year) visitors departing. January 1976 - February 2016. } \usage{data(departures)} \format{Multiple monthly time series of class \code{mts} containing the following series: \describe{ \item{\code{permanent}}{a monthly time series of the permanent departures from Australia.} \item{\code{reslong}}{a monthly time series of the long-term resident departures from Australia.} \item{\code{vislong}}{a monthly time series of the long-term visitor departures from Australia.} \item{\code{resshort}}{a monthly time series of the short-term resident departures from Australia.} \item{\code{visshort}}{a monthly time series of the short-term visitor departures from Australia.} }} \source{Australian Bureau of Statistics. Catalogue No 3401.02.} \examples{ plot(departures) } \keyword{datasets}
require(psych) require(car) require(lmtest) require(sandwich) require(boot) require(tidyverse) require(cAIC4) require(r2glmm) require(lme4) require(lmerTest) require(influence.ME) require(lattice) source("GraphPlot.R") sample_3 <- read.csv("https://raw.githubusercontent.com/kekecsz/PSYP13_Data_analysis_class-2019/master/home_sample_3.csv") sample_4 <- read.csv("https://raw.githubusercontent.com/kekecsz/PSYP13_Data_analysis_class-2019/master/home_sample_4.csv") View(sample_3) summary(sample_3) # change mindfulness - delete that case which is wrong (because max here is 6.05) sample_3[-c(195), ] sample_3_without195<- sample_3[-c(195), ] summary(sample_3_without195) describe(sample_3_without195) #change Female to female sample_3_cleaned <- sample_3_without195 %>% mutate(sex = droplevels(replace(sex, sex == "Female", "female"))) summary(sample_3_cleaned) describe(sample_3_cleaned) # checking data 4 summary(sample_4) describe(sample_4) # asign hospital as a group factor sample_3_cleaned = sample_3_cleaned %>% mutate(hospital = factor(hospital)) # Building random intercept model: mod_rnd_int_3 = lmer(pain ~ sex + age + STAI_trait + pain_cat + cortisol_serum + mindfulness + (1|hospital), data = sample_3_cleaned) # coefficients and the confidence intervals of the coefficients: coef_table(mod_rnd_int_3) summary(mod_rnd_int_3) confint(mod_rnd_int_3) require(lmfor) merMod(mod_rnd_int_3) # doesn't work require(sjstats) std_beta(mod_rnd_int_3) sample_3_cleaned = sample_3_cleaned %>% mutate(resid = residuals(mod_rnd_int_3)) # We also create a copy of our data object and save the residuals in a variable we call resid. ## ASSUMPTIONS OF THE MODEL: library(psych) # for pairs.panels library(tidyverse) # for tidy code and ggplot library(influence.ME) # for influence (this will also load the lme4 package) library(lattice) # for qqmath library(lme4) # for mixed models library(lmerTest) library(insight) # Report the variance components for the fixed effects, the random intercept, #and the residuals (from the model on data file 3)” get_variance_fixed(mod_rnd_int_3) get_variance_intercept(mod_rnd_int_3) get_variance_residual(mod_rnd_int_3) ### 1 # Outliers influence_observation = influence(mod_rnd_int_3, obs = T)$alt.fixed # this can take a minute or so influence_group = influence(mod_rnd_int_3, group = "hospital")$alt.fixed data_plot_inflience = as_tibble(influence_group) %>% gather(colnames(influence_group), value = coefficient, key = predictor) data_plot_inflience %>% ggplot() + aes(x = 1, y = coefficient, group = predictor) + geom_violin() + facet_wrap( ~ predictor, scales = "free") # so what does it mean? ### 2 # Normality openGraph() qqmath(mod_rnd_int_3, id=0.05) qqmath(ranef(mod_rnd_int_3), col="lightcoral") ### 3 # Linearity openGraph() plot(mod_rnd_int_3, arg = "pearson", col = "mediumvioletred") # residuals for sex + age + STAI_trait + pain_cat + cortisol_serum + mindfulness plot(cars) par(mfrow=c(2,2)) plot(cars) dev.off() sample_3_cleaned %>% ggplot() + aes(x = pain, y = resid, col = "aquamarine3") + geom_point() sample_3_cleaned %>% ggplot() + aes(x = sex, y = resid, col = "aquamarine3") + geom_point() sample_3_cleaned %>% ggplot() + aes(x = age, y = resid, col = "aquamarine3") + geom_point() sample_3_cleaned %>% ggplot() + aes(x = STAI_trait, y = resid, col = "aquamarine3") + geom_point() sample_3_cleaned %>% ggplot() + aes(x = pain_cat, y = resid, col = "aquamarine3") + geom_point() sample_3_cleaned %>% ggplot() + aes(x = cortisol_serum, y = resid, col = "aquamarine3") + geom_point() sample_3_cleaned %>% ggplot() + aes(x = mindfulness, y = resid, col = "aquamarine3") + geom_point() ### 4 # Homoscedasticity plot(mod_rnd_int_3, arg = "pearson") homosced_mod = lm(resid^2 ~ hospital, data = sample_3_cleaned) summary(homosced_mod) # Check the complete model F-test p-value. If it is < 0.05, # heteroscedasticity on the cluster level might be problematic. In my case p = 0.4242, so there should be no problems # and since p is sooo far from significance, it's not necessary to do the next step in exercise. # So we can continue with multicolinearity. ### 5 # Multicollinearity openGraph() pairs.panels(sample_3_cleaned[,c("pain", "sex", "age", "STAI_trait", "pain_cat","cortisol_serum", "mindfulness")], col = "red", lm = T) # if non of them is above 0.8, it's good. # Once the model is built, note the model coefficients and the confidence # intervals of the coefficients for all fixed effect predictors, # and compare them to the ones obtained in assignment 1. # confidence interval: confint(mod_rnd_int_3) coef(mod_rnd_int_3) # marginal R squared library(nlme) library(lme4) r2beta(mod_rnd_int_3, method = "nsj", data = sample_3_cleaned) # another way - marginal and conditional R squared: require(MuMIn) r.squaredGLMM(mod_rnd_int_3) ### ## equation regression: #y = 3,41 + 0,30*sexmale + (-0,06)*age + (-0,01)*STAI_trait + 0,08*pain_cat + 0,47 * cortisol_serum + (-0,18)*mindfulness View(sample_4) summary(sample_4) # there are some negative income values, but since we're not gonna use income, we can just leave it like that #negative household_income is weird but we will keep it since the models to not include the variable of household income and the other variables of those participants seemed fine #predict pain with equation of data file 3 on data file 4 pred_sample_4 <- predict(mod_rnd_int_3, newdata = sample_4, allow.new.levels =TRUE) pred_sample_4 #RSS: you have real values and predicted values, and you look how big is the difference RSS = sum((sample_4$pain - predict(mod_rnd_int_3, sample_4, allow.new.levels =TRUE))^2) #pred_data_sample_4)^2) RSS #There you look at the means. mod_mean <- lm(pain ~ 1, data = sample_3_cleaned) # if we want to compare, we put data 3 mod_mean TSS = sum((sample_3$pain - predict(mod_mean, sample_3_cleaned, allow.new.levels = TRUE))^2) TSS R2 = 1 - (RSS/TSS) R2 # R^2 the same model, but the variance which is explained on the data set 4 #we compare it with marginal and conditional R2 of the model. #new linear model: based on the variable that has the highest value from the old model, we make a new model: mod_rnd_int_3 # we see that cortisol_serum has the highest value #linear mixed model mod_rnd_int_4 = lmer(pain ~ cortisol_serum +( cortisol_serum | hospital), data = sample_3_cleaned) mod_rnd_int_4 #plotting pred_slope = predict(mod_rnd_int_4) #how cortisol predicts pain in each hospital openGraph() sample_3_cleaned %>% ggplot() + aes(y = pain, x = cortisol_serum, group = hospital) + geom_point(aes(color = hospital), size = 4) + geom_line(color = "red", aes(y = pred_slope, x = cortisol_serum)) + facet_wrap(~hospital, ncol = 2) ###### # exploring clustering in the data (this is last step?): sample_3_cleaned %>% ggplot() + aes(y = pain, x = age, STAI_trait, pain_cat, cortisol_serum, cortisol_saliva, mindfulness, weight) + geom_point(aes(color = hospital), size = 10) + geom_smooth(method = "lm", se = F) int_plot = sample_3_cleaned %>% ggplot() + aes(y = pain, x = age, STAI_trait, pain_cat, cortisol_serum, cortisol_saliva, mindfulness, weight, color = hospital) + geom_point(size = 4) + geom_smooth(method = "lm", se = F, fullrange = TRUE) int_plot openGraph() # this is just something: ######################################################################################################################################## ######################################################################################################################################## # ## Check the dataset # In the following section we visualize the change of average pain over time (using geom_point), # with the confidence interval of the mean estimate included (using geom_errorbar). # (in the code below we first calculate the means and confidence intervals before plugging them in to ggplot) # change sex, ID and hospital into numeric variables? home_assignment_data_3$sex <- as.numeric(as.factor(home_assignment_data_3$sex)) home_assignment_data_3$hospital <- as.numeric(as.factor(home_assignment_data_3$hospital)) # designate which are the repeated varibales ?? which ones ?? repeated_variables = c ("sex", "age", "STAI_trait", "pain_cat", "cortisol_serum", "mindfulness", "weight") # fit a random intercept model including the random intercept of hospital-ID, # and the fixed effect predictors you used in assignment 1. mod_rep_int = lmer(pain ~ sex +age + STAI_trait + pain_cat + cortisol_serum + mindfulness + weight + (1|hospital), data = home_assignment_data_3) ######################## FROM EXERCISE 17 ################### mod_rnd_int = lmer(pain ~ sex +age + STAI_trait + pain_cat + cortisol_serum + mindfulness + weight + (1|hospital), data = home_assignment_data_3)
/HA Q3 Zoltan.R
no_license
LinaMarija/PSYP13
R
false
false
8,936
r
require(psych) require(car) require(lmtest) require(sandwich) require(boot) require(tidyverse) require(cAIC4) require(r2glmm) require(lme4) require(lmerTest) require(influence.ME) require(lattice) source("GraphPlot.R") sample_3 <- read.csv("https://raw.githubusercontent.com/kekecsz/PSYP13_Data_analysis_class-2019/master/home_sample_3.csv") sample_4 <- read.csv("https://raw.githubusercontent.com/kekecsz/PSYP13_Data_analysis_class-2019/master/home_sample_4.csv") View(sample_3) summary(sample_3) # change mindfulness - delete that case which is wrong (because max here is 6.05) sample_3[-c(195), ] sample_3_without195<- sample_3[-c(195), ] summary(sample_3_without195) describe(sample_3_without195) #change Female to female sample_3_cleaned <- sample_3_without195 %>% mutate(sex = droplevels(replace(sex, sex == "Female", "female"))) summary(sample_3_cleaned) describe(sample_3_cleaned) # checking data 4 summary(sample_4) describe(sample_4) # asign hospital as a group factor sample_3_cleaned = sample_3_cleaned %>% mutate(hospital = factor(hospital)) # Building random intercept model: mod_rnd_int_3 = lmer(pain ~ sex + age + STAI_trait + pain_cat + cortisol_serum + mindfulness + (1|hospital), data = sample_3_cleaned) # coefficients and the confidence intervals of the coefficients: coef_table(mod_rnd_int_3) summary(mod_rnd_int_3) confint(mod_rnd_int_3) require(lmfor) merMod(mod_rnd_int_3) # doesn't work require(sjstats) std_beta(mod_rnd_int_3) sample_3_cleaned = sample_3_cleaned %>% mutate(resid = residuals(mod_rnd_int_3)) # We also create a copy of our data object and save the residuals in a variable we call resid. ## ASSUMPTIONS OF THE MODEL: library(psych) # for pairs.panels library(tidyverse) # for tidy code and ggplot library(influence.ME) # for influence (this will also load the lme4 package) library(lattice) # for qqmath library(lme4) # for mixed models library(lmerTest) library(insight) # Report the variance components for the fixed effects, the random intercept, #and the residuals (from the model on data file 3)” get_variance_fixed(mod_rnd_int_3) get_variance_intercept(mod_rnd_int_3) get_variance_residual(mod_rnd_int_3) ### 1 # Outliers influence_observation = influence(mod_rnd_int_3, obs = T)$alt.fixed # this can take a minute or so influence_group = influence(mod_rnd_int_3, group = "hospital")$alt.fixed data_plot_inflience = as_tibble(influence_group) %>% gather(colnames(influence_group), value = coefficient, key = predictor) data_plot_inflience %>% ggplot() + aes(x = 1, y = coefficient, group = predictor) + geom_violin() + facet_wrap( ~ predictor, scales = "free") # so what does it mean? ### 2 # Normality openGraph() qqmath(mod_rnd_int_3, id=0.05) qqmath(ranef(mod_rnd_int_3), col="lightcoral") ### 3 # Linearity openGraph() plot(mod_rnd_int_3, arg = "pearson", col = "mediumvioletred") # residuals for sex + age + STAI_trait + pain_cat + cortisol_serum + mindfulness plot(cars) par(mfrow=c(2,2)) plot(cars) dev.off() sample_3_cleaned %>% ggplot() + aes(x = pain, y = resid, col = "aquamarine3") + geom_point() sample_3_cleaned %>% ggplot() + aes(x = sex, y = resid, col = "aquamarine3") + geom_point() sample_3_cleaned %>% ggplot() + aes(x = age, y = resid, col = "aquamarine3") + geom_point() sample_3_cleaned %>% ggplot() + aes(x = STAI_trait, y = resid, col = "aquamarine3") + geom_point() sample_3_cleaned %>% ggplot() + aes(x = pain_cat, y = resid, col = "aquamarine3") + geom_point() sample_3_cleaned %>% ggplot() + aes(x = cortisol_serum, y = resid, col = "aquamarine3") + geom_point() sample_3_cleaned %>% ggplot() + aes(x = mindfulness, y = resid, col = "aquamarine3") + geom_point() ### 4 # Homoscedasticity plot(mod_rnd_int_3, arg = "pearson") homosced_mod = lm(resid^2 ~ hospital, data = sample_3_cleaned) summary(homosced_mod) # Check the complete model F-test p-value. If it is < 0.05, # heteroscedasticity on the cluster level might be problematic. In my case p = 0.4242, so there should be no problems # and since p is sooo far from significance, it's not necessary to do the next step in exercise. # So we can continue with multicolinearity. ### 5 # Multicollinearity openGraph() pairs.panels(sample_3_cleaned[,c("pain", "sex", "age", "STAI_trait", "pain_cat","cortisol_serum", "mindfulness")], col = "red", lm = T) # if non of them is above 0.8, it's good. # Once the model is built, note the model coefficients and the confidence # intervals of the coefficients for all fixed effect predictors, # and compare them to the ones obtained in assignment 1. # confidence interval: confint(mod_rnd_int_3) coef(mod_rnd_int_3) # marginal R squared library(nlme) library(lme4) r2beta(mod_rnd_int_3, method = "nsj", data = sample_3_cleaned) # another way - marginal and conditional R squared: require(MuMIn) r.squaredGLMM(mod_rnd_int_3) ### ## equation regression: #y = 3,41 + 0,30*sexmale + (-0,06)*age + (-0,01)*STAI_trait + 0,08*pain_cat + 0,47 * cortisol_serum + (-0,18)*mindfulness View(sample_4) summary(sample_4) # there are some negative income values, but since we're not gonna use income, we can just leave it like that #negative household_income is weird but we will keep it since the models to not include the variable of household income and the other variables of those participants seemed fine #predict pain with equation of data file 3 on data file 4 pred_sample_4 <- predict(mod_rnd_int_3, newdata = sample_4, allow.new.levels =TRUE) pred_sample_4 #RSS: you have real values and predicted values, and you look how big is the difference RSS = sum((sample_4$pain - predict(mod_rnd_int_3, sample_4, allow.new.levels =TRUE))^2) #pred_data_sample_4)^2) RSS #There you look at the means. mod_mean <- lm(pain ~ 1, data = sample_3_cleaned) # if we want to compare, we put data 3 mod_mean TSS = sum((sample_3$pain - predict(mod_mean, sample_3_cleaned, allow.new.levels = TRUE))^2) TSS R2 = 1 - (RSS/TSS) R2 # R^2 the same model, but the variance which is explained on the data set 4 #we compare it with marginal and conditional R2 of the model. #new linear model: based on the variable that has the highest value from the old model, we make a new model: mod_rnd_int_3 # we see that cortisol_serum has the highest value #linear mixed model mod_rnd_int_4 = lmer(pain ~ cortisol_serum +( cortisol_serum | hospital), data = sample_3_cleaned) mod_rnd_int_4 #plotting pred_slope = predict(mod_rnd_int_4) #how cortisol predicts pain in each hospital openGraph() sample_3_cleaned %>% ggplot() + aes(y = pain, x = cortisol_serum, group = hospital) + geom_point(aes(color = hospital), size = 4) + geom_line(color = "red", aes(y = pred_slope, x = cortisol_serum)) + facet_wrap(~hospital, ncol = 2) ###### # exploring clustering in the data (this is last step?): sample_3_cleaned %>% ggplot() + aes(y = pain, x = age, STAI_trait, pain_cat, cortisol_serum, cortisol_saliva, mindfulness, weight) + geom_point(aes(color = hospital), size = 10) + geom_smooth(method = "lm", se = F) int_plot = sample_3_cleaned %>% ggplot() + aes(y = pain, x = age, STAI_trait, pain_cat, cortisol_serum, cortisol_saliva, mindfulness, weight, color = hospital) + geom_point(size = 4) + geom_smooth(method = "lm", se = F, fullrange = TRUE) int_plot openGraph() # this is just something: ######################################################################################################################################## ######################################################################################################################################## # ## Check the dataset # In the following section we visualize the change of average pain over time (using geom_point), # with the confidence interval of the mean estimate included (using geom_errorbar). # (in the code below we first calculate the means and confidence intervals before plugging them in to ggplot) # change sex, ID and hospital into numeric variables? home_assignment_data_3$sex <- as.numeric(as.factor(home_assignment_data_3$sex)) home_assignment_data_3$hospital <- as.numeric(as.factor(home_assignment_data_3$hospital)) # designate which are the repeated varibales ?? which ones ?? repeated_variables = c ("sex", "age", "STAI_trait", "pain_cat", "cortisol_serum", "mindfulness", "weight") # fit a random intercept model including the random intercept of hospital-ID, # and the fixed effect predictors you used in assignment 1. mod_rep_int = lmer(pain ~ sex +age + STAI_trait + pain_cat + cortisol_serum + mindfulness + weight + (1|hospital), data = home_assignment_data_3) ######################## FROM EXERCISE 17 ################### mod_rnd_int = lmer(pain ~ sex +age + STAI_trait + pain_cat + cortisol_serum + mindfulness + weight + (1|hospital), data = home_assignment_data_3)
VoteGermany2013 <- data.frame(Party=c( "CDU/CSU", "SPD", "LINKE","GRUENE"), Result=c(311,193,64,63)) seats <- function(N,M, r0=2.5){ radii <- seq(r0, 1, len=M) counts <- numeric(M) pts = do.call(rbind, lapply(1:M, function(i){ counts[i] <<- round(N*radii[i]/sum(radii[i:M])) theta <- seq(0, pi, len = counts[i]) N <<- N - counts[i] data.frame(x=radii[i]*cos(theta), y=radii[i]*sin(theta), r=i, theta=theta) } ) ) pts = pts[order(-pts$theta,-pts$r),] pts } election <- function(seats, counts){ stopifnot(sum(counts)==nrow(seats)) seats$party = rep(1:length(counts),counts) seats } layout = seats(631,16) result = election(layout, VoteGermany2013$Result) # no overall majority!!! plot(result$x, result$y, col=result$party,pch=19, asp=1)
/plotter_elec.R
no_license
goyanedelv/chile-elections-2017
R
false
false
916
r
VoteGermany2013 <- data.frame(Party=c( "CDU/CSU", "SPD", "LINKE","GRUENE"), Result=c(311,193,64,63)) seats <- function(N,M, r0=2.5){ radii <- seq(r0, 1, len=M) counts <- numeric(M) pts = do.call(rbind, lapply(1:M, function(i){ counts[i] <<- round(N*radii[i]/sum(radii[i:M])) theta <- seq(0, pi, len = counts[i]) N <<- N - counts[i] data.frame(x=radii[i]*cos(theta), y=radii[i]*sin(theta), r=i, theta=theta) } ) ) pts = pts[order(-pts$theta,-pts$r),] pts } election <- function(seats, counts){ stopifnot(sum(counts)==nrow(seats)) seats$party = rep(1:length(counts),counts) seats } layout = seats(631,16) result = election(layout, VoteGermany2013$Result) # no overall majority!!! plot(result$x, result$y, col=result$party,pch=19, asp=1)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/events.R \name{details_type_event} \alias{details_type_event} \title{Autogenerated documentation for event type} \usage{ details_type_event() } \description{ Autogenerated documentation for event type } \keyword{internal}
/man/details_type_event.Rd
no_license
cran/IBMPopSim
R
false
true
300
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/events.R \name{details_type_event} \alias{details_type_event} \title{Autogenerated documentation for event type} \usage{ details_type_event() } \description{ Autogenerated documentation for event type } \keyword{internal}
library(readr) library(DEoptimR) set.seed(1234) rm(list=ls()) winSize <- 90 df <- as.data.frame(read_csv("~/Dropbox/Francois2/PPP L1=2 L2=2 iter=500 lag=6 SR= 0.537 .csv")) colnames(df)[113:123] plot(df$TR, type="l") colnames(df)[113:123] df[(winSize+1):nrow(df),113:123] allCoefs <- c() for(i in 113:123) { allCoefs <- c(allCoefs, df[(winSize+1):nrow(df),i]) } plot(density(allCoefs)) plot(density(df[(winSize+1):nrow(df), "DC"])) plot(((winSize+1):nrow(df)),df$ParaPP[(winSize+1):nrow(df)], type="l") plot(((winSize+1):nrow(df)),df$rPPP[(winSize+1):nrow(df)], type="l") plot(((winSize+1):nrow(df)),df$wEK[(winSize+1):nrow(df)], type="l") colnames(df)[124:132] for(t in (winSize+1):nrow(df)) { print(sum(df[t, 124:132])) }
/analysis.R
no_license
turzes/PPPcodes
R
false
false
768
r
library(readr) library(DEoptimR) set.seed(1234) rm(list=ls()) winSize <- 90 df <- as.data.frame(read_csv("~/Dropbox/Francois2/PPP L1=2 L2=2 iter=500 lag=6 SR= 0.537 .csv")) colnames(df)[113:123] plot(df$TR, type="l") colnames(df)[113:123] df[(winSize+1):nrow(df),113:123] allCoefs <- c() for(i in 113:123) { allCoefs <- c(allCoefs, df[(winSize+1):nrow(df),i]) } plot(density(allCoefs)) plot(density(df[(winSize+1):nrow(df), "DC"])) plot(((winSize+1):nrow(df)),df$ParaPP[(winSize+1):nrow(df)], type="l") plot(((winSize+1):nrow(df)),df$rPPP[(winSize+1):nrow(df)], type="l") plot(((winSize+1):nrow(df)),df$wEK[(winSize+1):nrow(df)], type="l") colnames(df)[124:132] for(t in (winSize+1):nrow(df)) { print(sum(df[t, 124:132])) }
#' Title testMat #' #' this function is used when the user need to specify the integration limits for a selected peak #' @param a list of data tables of the checked thermograms using checkmat , obtained at different rates #' @param l.lim left lim of running integral #' @param r.lim rigth lim of running integral #' @param toselect vector #' #' @return data table ready to be used by all the methods for kinetic analysis included in the package #' @export #' @import data.table pracma #' @examples #' require(data.table) #' npoints=1000 #' x=seq(1,npoints) #' y=(dnorm(x, mean=npoints/2, sd=npoints/10)) #' x=seq(1,1000) #' x2=seq(200,500,length.out=1000) #' dat=data.frame(x,x2,y) #' colnames(dat) <- c("time.seconds", "temperature.s","heat.flow") #' dat=data.table(dat) #' dat2=dat #' dat$rates=20 #' dat2$rates=50 #' toTest=list(dat,dat2) #' tested=testMat(toTest) testMat<-function(a, l.lim=1, r.lim=NULL,toselect=c(0,1,2,0,0,0,3,4) ){ heat.flow<-id<-rate<-time.minutes<-T.deg<-NULL if(is.null(r.lim)){r.lim<-NROW(a[[1]])} a.check<-lapply(seq(1,length(a)), function(x) checkmat(data.frame(a[[x]]),selected=toselect) ) a.dt <-lapply(seq(1,length(a)), function(x) data.table(data.frame(a.check[[x]]))) b<-rbindlist(a.dt) b$rate<-b$id a.peaks <- b[,list(res.list = list(findpeaks(heat.flow,sortstr=TRUE,npeaks=2))),by=id] a.peaks$rate<-a.peaks$id ref.peak=1 a.peaks <- data.table(data.table(a.peaks$rate),rbindlist((lapply(a.peaks$res.list, function(x) data.table(t(x[ref.peak,])))))) colnames(a.peaks)<- c("rate","peak.value","ind.max","left.lim","right.lim") a.peaks.old <-a.peaks a.peaks$left.lim=l.lim a.peaks$right.lim=r.lim a.mat<- lapply(unique(b$rate),function(x) ri(b[b$rate==x]$time.seconds,b[b$rate==x]$heat.flow,a.peaks[rate==x])) a.mat <- lapply(a.mat, function(x) x$ds) ap<- data.table(b,rbindlist(a.mat)) ap[,c("x","y"):=NULL] ap[,c("dadt"):=dadx(time.minutes,ri),by=list(rate)] ap$id_cycle=ap$id ap$time.minutes.zero<-ap$time.minutes ap$time.seconds.zero<-ap$time.seconds ap$y.loess=ap$heat.flow return(ap) }
/R/testMat.R
no_license
sere3s/takos
R
false
false
2,138
r
#' Title testMat #' #' this function is used when the user need to specify the integration limits for a selected peak #' @param a list of data tables of the checked thermograms using checkmat , obtained at different rates #' @param l.lim left lim of running integral #' @param r.lim rigth lim of running integral #' @param toselect vector #' #' @return data table ready to be used by all the methods for kinetic analysis included in the package #' @export #' @import data.table pracma #' @examples #' require(data.table) #' npoints=1000 #' x=seq(1,npoints) #' y=(dnorm(x, mean=npoints/2, sd=npoints/10)) #' x=seq(1,1000) #' x2=seq(200,500,length.out=1000) #' dat=data.frame(x,x2,y) #' colnames(dat) <- c("time.seconds", "temperature.s","heat.flow") #' dat=data.table(dat) #' dat2=dat #' dat$rates=20 #' dat2$rates=50 #' toTest=list(dat,dat2) #' tested=testMat(toTest) testMat<-function(a, l.lim=1, r.lim=NULL,toselect=c(0,1,2,0,0,0,3,4) ){ heat.flow<-id<-rate<-time.minutes<-T.deg<-NULL if(is.null(r.lim)){r.lim<-NROW(a[[1]])} a.check<-lapply(seq(1,length(a)), function(x) checkmat(data.frame(a[[x]]),selected=toselect) ) a.dt <-lapply(seq(1,length(a)), function(x) data.table(data.frame(a.check[[x]]))) b<-rbindlist(a.dt) b$rate<-b$id a.peaks <- b[,list(res.list = list(findpeaks(heat.flow,sortstr=TRUE,npeaks=2))),by=id] a.peaks$rate<-a.peaks$id ref.peak=1 a.peaks <- data.table(data.table(a.peaks$rate),rbindlist((lapply(a.peaks$res.list, function(x) data.table(t(x[ref.peak,])))))) colnames(a.peaks)<- c("rate","peak.value","ind.max","left.lim","right.lim") a.peaks.old <-a.peaks a.peaks$left.lim=l.lim a.peaks$right.lim=r.lim a.mat<- lapply(unique(b$rate),function(x) ri(b[b$rate==x]$time.seconds,b[b$rate==x]$heat.flow,a.peaks[rate==x])) a.mat <- lapply(a.mat, function(x) x$ds) ap<- data.table(b,rbindlist(a.mat)) ap[,c("x","y"):=NULL] ap[,c("dadt"):=dadx(time.minutes,ri),by=list(rate)] ap$id_cycle=ap$id ap$time.minutes.zero<-ap$time.minutes ap$time.seconds.zero<-ap$time.seconds ap$y.loess=ap$heat.flow return(ap) }
## v0.5 brings more date and time supprt print(RcppTOML::tomlparse("dates_and_times.toml"))
/data/genthat_extracted_code/RcppTOML/tests/dates_and_times.R
no_license
surayaaramli/typeRrh
R
false
false
93
r
## v0.5 brings more date and time supprt print(RcppTOML::tomlparse("dates_and_times.toml"))
#' Extract column locations from SPSS file syntax #' #' @param dataset_labels #' @param file_out_prefix #' @param variables_line_start #' @param variables_line_end #' #' @return process_column_locations = function(dataset_labels, file_out_prefix, variables_line_start, variables_line_end){ column_locations = dataset_labels[variables_line_start:variables_line_end] df_out = data.frame(column_locations = column_locations, stringsAsFactors = FALSE) df_out = df_out %>% mutate(variable_name = str_extract(column_locations, "[^\\s]+"), variable_start_position = str_extract(column_locations, "\\s[^\\s]+"), variable_end_position = str_extract(column_locations, "(?<=-\\s)[^\\s]+"), variable_type = str_extract(column_locations, "[^\\s]+$") ) %>% mutate(across(c(variable_start_position, variable_end_position), as.numeric)) %>% as.tbl() write_csv(df_out, glue('data/intermediate/labels/{file_out_prefix}_column_locations.csv')) return(df_out) } #' Extract variable names from SPSS file syntax #' #' @param dataset_labels #' @param df_col_locations #' @param value_labels_start #' @param value_labels_end #' @param file_out_prefix #' #' @return process_value_labels = function(dataset_labels, df_col_locations, file_out_prefix, value_labels_start, value_labels_end){ value_labels = dataset_labels[(value_labels_start):value_labels_end] #Locations of start points for different variables value_starts = c(1, which(str_detect(value_labels, "^\\/"))) value_ends = c(lead(value_starts)[1:(length(value_starts)-1)], value_labels_end - value_labels_start) df_value_label_positions = data.frame(variable = value_starts, labels_start = value_starts+1, labels_end = value_ends-1) #Extract the labels for each value df_value_labels = pmap(list(df_value_label_positions$variable, df_value_label_positions$labels_start, df_value_label_positions$labels_end), function(x,y,z){ data.frame(variable = value_labels[x], variable_level_labels = value_labels[y:z], stringsAsFactors = FALSE) %>% mutate(values = str_extract(variable_level_labels, '([^\\s]+)')) %>% mutate(labels = str_extract(variable_level_labels, '\\"([^\\"]+)\\"(\\s)*$')) }) %>% bind_rows() %>% as.tbl() #Labels are either a whitespace delimited list, or a range in the form 'x to y' df_value_labels = df_value_labels %>% mutate(variable = str_replace_all(variable, '/', '')) %>% mutate(variable_to_from = str_extract_all(variable, "[^\\s]+\\sto\\s[^\\s]+")) %>% mutate(variable = str_extract_all(variable, "[^\\s]+")) %>% mutate(variable_from = map(variable_to_from, function(x){ str_extract(x, "^[^\\s]+") }), variable_to = map(variable_to_from, function(x){ str_extract(x, "[^\\s]+$") })) #Extract the range variables df_var_to_from = df_value_labels %>% filter(map(variable_to_from, length) > 0) %>% mutate(variables_to_from = map2(variable_from, variable_to, function(x, y){ variables_start = df_col_locations %>% filter(variable_name %in% x) %>% pull(variable_start_position) variables_end = df_col_locations %>% filter(variable_name %in% y) %>% pull(variable_start_position) variable_ranges = map2(variables_start, variables_end, function(x, y){ df_col_locations %>% filter(variable_start_position >= x, variable_start_position <= y) %>% pull(variable_name) }) %>% reduce(c) return(variable_ranges) })) #Extract the space delimited variables df_value_labels = df_value_labels %>% filter(map(variable_to_from, length) == 0) %>% select(variable, values, labels) %>% unnest() df_var_to_from = df_var_to_from %>% select(variable = variables_to_from, values, labels) %>% unnest() df_value_labels = df_value_labels %>% bind_rows(df_var_to_from) #Clean up output df_value_labels = df_value_labels %>% mutate(variable = str_replace(variable, "\\/", "")) %>% mutate(across(where(is.character), ~str_replace_all(., "\"", "") %>% trimws())) write_csv(df_value_labels, glue('data/intermediate/labels/{file_out_prefix}_value_labels.csv')) return(df_value_labels) } #' Extract variable level labels from SPSS file syntax #' #' @param dataset_labels #' @param file_out_prefix #' @param variables_line_start #' @param variables_line_end #' #' @return process_variable_labels = function(dataset_labels, file_out_prefix, variables_labels_start, variables_labels_end){ variable_labels = dataset_labels[variables_labels_start:variables_labels_end] df_out = data.frame(variable_labels = variable_labels, stringsAsFactors = FALSE) df_out = df_out %>% mutate(variable_name = str_extract(variable_labels, '[^\\"]+'), variable_label = str_extract(variable_labels, '\\"(.)+')) %>% mutate(across(where(is.character), trimws)) %>% #Remove quotation marks around labels mutate(across(where(is.character), function(x){ str_replace(x, '^\\"', '') %>% str_replace('\\"$', '') })) %>% select(-variable_labels) %>% as.tbl() write_csv(df_out, glue('data/intermediate/labels/{file_out_prefix}_variable_labels.csv')) return(df_out) } process_spss_labels = function(file_in, file_out_prefix, variables_line_start, variables_line_end, variables_labels_start, variables_labels_end, value_labels_start, value_labels_end){ #Read column locations in raw dataset dataset_labels = readLines(file_in) df_col_locations = process_column_locations(dataset_labels, file_out_prefix, variables_line_start, variables_line_end) #Read variable names df_var_names = process_variable_labels(dataset_labels, file_out_prefix, variables_labels_start, variables_labels_end) #Read variable level labels df_value_labels = process_value_labels(dataset_labels, df_col_locations, file_out_prefix, value_labels_start, value_labels_end) }
/R/process-data-labels.R
no_license
sam-harvey/PISA-2012
R
false
false
7,818
r
#' Extract column locations from SPSS file syntax #' #' @param dataset_labels #' @param file_out_prefix #' @param variables_line_start #' @param variables_line_end #' #' @return process_column_locations = function(dataset_labels, file_out_prefix, variables_line_start, variables_line_end){ column_locations = dataset_labels[variables_line_start:variables_line_end] df_out = data.frame(column_locations = column_locations, stringsAsFactors = FALSE) df_out = df_out %>% mutate(variable_name = str_extract(column_locations, "[^\\s]+"), variable_start_position = str_extract(column_locations, "\\s[^\\s]+"), variable_end_position = str_extract(column_locations, "(?<=-\\s)[^\\s]+"), variable_type = str_extract(column_locations, "[^\\s]+$") ) %>% mutate(across(c(variable_start_position, variable_end_position), as.numeric)) %>% as.tbl() write_csv(df_out, glue('data/intermediate/labels/{file_out_prefix}_column_locations.csv')) return(df_out) } #' Extract variable names from SPSS file syntax #' #' @param dataset_labels #' @param df_col_locations #' @param value_labels_start #' @param value_labels_end #' @param file_out_prefix #' #' @return process_value_labels = function(dataset_labels, df_col_locations, file_out_prefix, value_labels_start, value_labels_end){ value_labels = dataset_labels[(value_labels_start):value_labels_end] #Locations of start points for different variables value_starts = c(1, which(str_detect(value_labels, "^\\/"))) value_ends = c(lead(value_starts)[1:(length(value_starts)-1)], value_labels_end - value_labels_start) df_value_label_positions = data.frame(variable = value_starts, labels_start = value_starts+1, labels_end = value_ends-1) #Extract the labels for each value df_value_labels = pmap(list(df_value_label_positions$variable, df_value_label_positions$labels_start, df_value_label_positions$labels_end), function(x,y,z){ data.frame(variable = value_labels[x], variable_level_labels = value_labels[y:z], stringsAsFactors = FALSE) %>% mutate(values = str_extract(variable_level_labels, '([^\\s]+)')) %>% mutate(labels = str_extract(variable_level_labels, '\\"([^\\"]+)\\"(\\s)*$')) }) %>% bind_rows() %>% as.tbl() #Labels are either a whitespace delimited list, or a range in the form 'x to y' df_value_labels = df_value_labels %>% mutate(variable = str_replace_all(variable, '/', '')) %>% mutate(variable_to_from = str_extract_all(variable, "[^\\s]+\\sto\\s[^\\s]+")) %>% mutate(variable = str_extract_all(variable, "[^\\s]+")) %>% mutate(variable_from = map(variable_to_from, function(x){ str_extract(x, "^[^\\s]+") }), variable_to = map(variable_to_from, function(x){ str_extract(x, "[^\\s]+$") })) #Extract the range variables df_var_to_from = df_value_labels %>% filter(map(variable_to_from, length) > 0) %>% mutate(variables_to_from = map2(variable_from, variable_to, function(x, y){ variables_start = df_col_locations %>% filter(variable_name %in% x) %>% pull(variable_start_position) variables_end = df_col_locations %>% filter(variable_name %in% y) %>% pull(variable_start_position) variable_ranges = map2(variables_start, variables_end, function(x, y){ df_col_locations %>% filter(variable_start_position >= x, variable_start_position <= y) %>% pull(variable_name) }) %>% reduce(c) return(variable_ranges) })) #Extract the space delimited variables df_value_labels = df_value_labels %>% filter(map(variable_to_from, length) == 0) %>% select(variable, values, labels) %>% unnest() df_var_to_from = df_var_to_from %>% select(variable = variables_to_from, values, labels) %>% unnest() df_value_labels = df_value_labels %>% bind_rows(df_var_to_from) #Clean up output df_value_labels = df_value_labels %>% mutate(variable = str_replace(variable, "\\/", "")) %>% mutate(across(where(is.character), ~str_replace_all(., "\"", "") %>% trimws())) write_csv(df_value_labels, glue('data/intermediate/labels/{file_out_prefix}_value_labels.csv')) return(df_value_labels) } #' Extract variable level labels from SPSS file syntax #' #' @param dataset_labels #' @param file_out_prefix #' @param variables_line_start #' @param variables_line_end #' #' @return process_variable_labels = function(dataset_labels, file_out_prefix, variables_labels_start, variables_labels_end){ variable_labels = dataset_labels[variables_labels_start:variables_labels_end] df_out = data.frame(variable_labels = variable_labels, stringsAsFactors = FALSE) df_out = df_out %>% mutate(variable_name = str_extract(variable_labels, '[^\\"]+'), variable_label = str_extract(variable_labels, '\\"(.)+')) %>% mutate(across(where(is.character), trimws)) %>% #Remove quotation marks around labels mutate(across(where(is.character), function(x){ str_replace(x, '^\\"', '') %>% str_replace('\\"$', '') })) %>% select(-variable_labels) %>% as.tbl() write_csv(df_out, glue('data/intermediate/labels/{file_out_prefix}_variable_labels.csv')) return(df_out) } process_spss_labels = function(file_in, file_out_prefix, variables_line_start, variables_line_end, variables_labels_start, variables_labels_end, value_labels_start, value_labels_end){ #Read column locations in raw dataset dataset_labels = readLines(file_in) df_col_locations = process_column_locations(dataset_labels, file_out_prefix, variables_line_start, variables_line_end) #Read variable names df_var_names = process_variable_labels(dataset_labels, file_out_prefix, variables_labels_start, variables_labels_end) #Read variable level labels df_value_labels = process_value_labels(dataset_labels, df_col_locations, file_out_prefix, value_labels_start, value_labels_end) }
########################################################################### ########################################################################### ########################################################################### # Introductory IPM exercises ########################################################################### ########################################################################### ########################################################################### #Here, we introduce an extremely simple IPM for a the long- live alpine perennial plant Dracocephalum austriacum. The analyses minimize the complexity of the R code in order to make the model transparent. We calculate basic population statistics, including population growth rate, sensitivity, elasticity, and passage times throughout to check the plausibility of the model. # OVERVIEW # the document is organized as follows # A. plots for data exploration # B. parameter estimation for regressions # C. build vital rate functions # D. make a kernel # E. basic analyses # set up directory structure. we'll place a temp folder on your desktop to store some plots if(!file.exists('~/Desktop/Temp_IPM_output')) dir.create('~/Desktop/Temp_IPM_output') # set this as the working directory setwd('~/Desktop/Temp_IPM_output') # ------------------------------------------------------------------- # ------------------------------------------------------------------- # A. plots for data exploration # ------------------------------------------------------------------- # ------------------------------------------------------------------- # read in data. you'll have to set your own file path here. the data is a .csv file included in the same folder as this file d=read.csv( '~/Dropbox/Projects/ipms/teachIPMs/2_Beginner/Intro_to_IPMs/Exercises/Intro_to_IPMs_Exercises_Data.csv') # you'll notice that adults are stored at the beginning of the data set with values for size (measured in 2001) and sizeNext (measured in 2002). the number of seeds and flowering status were measured in 2001. head(d) # and that new recruits are stored at the end and were only observed in the second survey (2002) tail(d) # make some plots - figure 1 par(mfrow=c(2,2),mar=c(4,4,2,1)) plot(d$size,jitter(d$surv),main='Survival') # jittered to see easier plot(d$size,d$sizeNext,main='Growth/Shrinkage/Stasis') plot(d$size,d$fec.seed,main='Seeds') # jittered to see easier hist(d$sizeNext[is.na(d$size)],main='Size of Recruits') # note that we use NAs for missing or non-applicable data rather than 0 or some other indicator because this causes them to be automatically excluded from r's regression functions. # the model we'll build takes the general form # n[z',t+1]=Integral{ ( P[z',z]+ F[z',z] ) n[z,t] dz} # where z' is size at time t+1 and z is size at time t. P is the growth/survival kernel and F is the fecundity kernel. these will be decomposed further as: # P[z',z]=growth[z',z] * survival[z] # F[z',z]=flowering_probability[z] * #seeds_per_individual[z] * establishment_probability * recruit_size[z'] # we'll begin building the regressions for each of these vital rates in the next section. # ------------------------------------------------------------------- # ------------------------------------------------------------------- # B. regressions build regressions for vital rate functions # ------------------------------------------------------------------- # ------------------------------------------------------------------- # 0. set up parameter list for regressions # this sets up a list of the model parameters. these parameters will be estimated and recorded below. params=data.frame( surv.int=NA, surv.slope=NA, growth.int=NA, growth.slope=NA, growth.sd=NA, seed.int=NA, seed.slope=NA, recruit.size.mean=NA, recruit.size.sd=NA, establishment.prob=NA ) # 1. survival regression surv.reg=glm(surv~size,data=d,family=binomial()) summary(surv.reg) params$surv.int=coefficients(surv.reg)[1] params$surv.slope=coefficients(surv.reg)[2] # 2. growth regression growth.reg=lm(sizeNext~size,data=d) summary(growth.reg) params$growth.int=coefficients(growth.reg)[1] params$growth.slope=coefficients(growth.reg)[2] params$growth.sd=sd(resid(growth.reg)) # 3. seeds regression # note that we are just pooling all individuals into this regression regardless of whether they flowered or not. a later exercise will be to explicitly model flowering probability. i.e. for the moment we're taking flowering_probability[z]=1. seed.reg=glm(fec.seed~size,data=d,family=poisson()) summary(seed.reg) params$seed.int=coefficients(seed.reg)[1] params$seed.slope=coefficients(seed.reg)[2] # 4. size distribution of recruits # in the dataframe, recruits are those individuals who have a value for sizeNext but not for size params$recruit.size.mean=mean(d$sizeNext[is.na(d$size)]) params$recruit.size.sd=sd(d$sizeNext[is.na(d$size)]) # 5. establishment probability # these data represent a single year's worth of data, hence establishment probability can be estimated by dividing the number of observed recruits by the number of seeds. hence the growth/survival measurements were taken in year t which the recruit sizes were measured in year t+1. params$establishment.prob=sum(is.na(d$size))/sum(d$fec.seed,na.rm=TRUE) # 6. plot the models over the data - figure 2 # you might want to save this plot for later reference. to do that, just uncomment the lines below, which work on a mac. not sure if pc's will like the system command. this saves to your working directory. # pdf('Intro_IPM_data.pdf',h=8,w=12) par(mfrow=c(2,2),mar=c(4,4,2,1)) xx=seq(0,8,by=.01) plot(d$size,d$sizeNext,main='Growth/Shrinkage/Stasis') lines(xx,predict(growth.reg,data.frame(size=xx)),col='red',lwd=3) plot(d$size,jitter(d$surv),main='Survival') # jittered to see easier lines(xx,predict(surv.reg,data.frame(size=xx),type='response'), col='red',lwd=3) plot(d$size,d$fec.seed,main='Seeds') # jittered to see easier lines(xx,predict(seed.reg,data.frame(size=xx),type='response'), col='red',lwd=3) hist(d$sizeNext[is.na(d$size)],main='Size of Recruits',freq=FALSE) lines(xx,dnorm(xx,params$recruit.size.mean,params$recruit.size.sd), col='red',lwd=3) # dev.off() # system('open Intro_IPM_data.pdf') # ------------------------------------------------------------------- # ------------------------------------------------------------------- # C. build vital rate functions # ------------------------------------------------------------------- # ------------------------------------------------------------------- # each of the functions below represents one or more of the vital rates. these functions are used to build the IPM and use output (the coefficients) from the regressions developed above. # these functions represent the modeler's decision about how to decompose the life cycle. in this very simple example, we model (1) survival, (2) growth, (3) seed number, (4) the seedling size distribution and (5) the establishment probability. these last three functions are combined in the model for fecundity below. in practice, we'd need to decide what regressions to build in part B in advance, but it's easier to digest this section after you've seen the regressions above, so fortunately we built all the right regressions already. in practice, sections B and C are iterative one might inform one another. ## vital rate functions # 1. probability of surviving s.x=function(x,params) { u=exp(params$surv.int+params$surv.slope*x) return(u/(1+u)) } # 2. growth function g.yx=function(xp,x,params) { dnorm(xp,mean=params$growth.int+params$growth.slope*x,sd=params$growth.sd) } # 3. reproduction function f.yx=function(xp,x,params) { params$establishment.prob* dnorm(xp,mean=params$recruit.size.mean,sd=params$recruit.size.sd)* exp(params$seed.int+params$seed.slope*x) } # ------------------------------------------------------------------- # ------------------------------------------------------------------- # D. make a kernel # ------------------------------------------------------------------- # ------------------------------------------------------------------- # In this section, we combine the vital rate functions to build the discretized IPM kernel, which we’ll call the IPM matrix (e.g. shown in Fig. 2c in the main text).These steps are performed behind the scenes in the IPMpack package used in Appendices C-G for convenience, but we show them here for illustrative purposes. To integrate, we begin by defining the boundary points (b; the edges of the cells defining the matrix), mesh points (y; the centers of the cells defining the matrix and the points at which the matrix is evaluated for the midpoint rule of numerical integration), and step size (h; the widths of the cells). The integration limits (min.size and max.size) span the range of sizes observed in the data set, and then some. # 1. boundary points b, mesh points y and step size h # integration limits - these limits span the range of sizes observed in the data set, and then some. min.size=.9*min(c(d$size,d$sizeNext),na.rm=T) max.size=1.1*max(c(d$size,d$sizeNext),na.rm=T) # number of cells in the discretized kernel n=100 # boundary points (the edges of the cells defining the kernel) b=min.size+c(0:n)*(max.size-min.size)/n # mesh points (midpoints of the cells) y=0.5*(b[1:n]+b[2:(n+1)]) # width of the cells h=y[2]-y[1] # 2. make component kernels # Next, we make the IPM matrices. The function outer() evaluates the matrix at all pairwise combinations of the two vectors y and y and returns matrices representing the kernel components for growth and fecundity, respectively. For the numerical integration, we’re using the midpoint rule (the simplest option) estimate the area under a curve. The midpoint rule assumes a rectangular approximation. The heights of the rectangles are given by the outer function and the width of the rectangles is h. par(mfrow=c(2,3)) G=h*outer(y,y,g.yx,params=params) # growth kernel image(y,y,t(G),main='growth kernel') # plot it S=s.x(y,params=params) # survival plot(y,S,type='l',main='survival') # plot it P=G # placeholder;redefine P on the next line for(i in 1:n) P[,i]=G[,i]*S[i] # growth/survival kernel image(y,y,t(P),main='survival/growth kernel') # plot it abline(0,1,lwd=3) # plot 1:1, which represents stasis F=h*outer(y,y,f.yx,params=params) # reproduction kernel image(y,y,t(F),main='fecundity kernel') # plot it K=P+F # full kernel image(y,y,t(K),main='full kernel') # plot it # sometimes it's hard to see both the fecundity part of the kernel swamps the growth/survival part, so here's a plotting trick to level out the kernel image(y,y,t(K)^.3,main='full kernel^0.3') # plot it # so what did the outer function just do to make the kernel? the way we've used it, it takes all pairwise combinations of the the first argument (y) with the second (also y), and does some operation to those combinations. here are a few examples: tmp1=outer(y,y,'+') image(y,y,t(tmp1)) tmp2=outer(y,y,'*') image(y,y,t(tmp2)) tmp3=outer(y,exp(y),'+') image(y,exp(y),t(tmp3)) # in the IPM case, the operation that we want is not a simple '+' or '*', but the slightly more complicated growth function, given in the third argument (g.yx). to use the growth function, we need to specify the regression coefficients, so the fourth argument supplies the list 'params' that we defined above, which aer passed to g.yx. # ------------------------------------------------------------------- # ------------------------------------------------------------------- # E. basic analyses # ------------------------------------------------------------------- # ------------------------------------------------------------------- # Analyses usually begin with obtaining the eigenvalues (λ) and eigenvectors (v-left; w-right) of the matrix. These are useful for understanding the asymptotic dynamics. The dominant eigenvalue gives the asymptotic population growth rate (lam). The right eigenvector gives the stable stage distribution and the left eigenvector gives the reproductive value, when normalized. # 1. get lamda,v,w (lam=Re(eigen(K)$values[1])) # should be 1.013391 w.eigen=Re(eigen(K)$vectors[,1]) stable.dist=w.eigen/sum(w.eigen) v.eigen=Re(eigen(t(K))$vectors[,1]) repro.val=v.eigen/v.eigen[1] # The eigen-things can be combined to obtain the sensitivity and elasticity matrices. # 2. compute elasticity and sensitivity matrices v.dot.w=sum(stable.dist*repro.val)*h sens=outer(repro.val,stable.dist)/v.dot.w elas=matrix(as.vector(sens)*as.vector(K)/lam,nrow=n) # 3. plot results # you might want to save this plot for comparison with later versions. to do that, just uncomment the lines below, which work on a mac. not sure if pc's will like the system command. this saves to your working directory. # pdf('IPM_output_v1.pdf',h=8,w=12) par(mfrow=c(2,3)) image(y,y,t(K), xlab="Size (t)",ylab="Size (t+1)",col=topo.colors(100), main="Kernel") contour(y,y,t(K), add = TRUE, drawlabels = TRUE) plot(y,stable.dist,xlab="Size",type="l",main="Stable size distribution") plot(y,repro.val,xlab="Size",type="l",main="Reproductive values") image(y,y,t(elas),xlab="Size (t)",ylab="Size (t+1)",main="Elasticity") image(y,y,t(sens),xlab="Size (t)",ylab="Size (t+1)", main="Sensitivity") # dev.off() # system('open IPM_output_v1.pdf')
/1_19_16_Rstudio_Intro/Intro_to_IPMs_Exercises.r
no_license
cmerow/Stats_Lunch
R
false
false
13,428
r
########################################################################### ########################################################################### ########################################################################### # Introductory IPM exercises ########################################################################### ########################################################################### ########################################################################### #Here, we introduce an extremely simple IPM for a the long- live alpine perennial plant Dracocephalum austriacum. The analyses minimize the complexity of the R code in order to make the model transparent. We calculate basic population statistics, including population growth rate, sensitivity, elasticity, and passage times throughout to check the plausibility of the model. # OVERVIEW # the document is organized as follows # A. plots for data exploration # B. parameter estimation for regressions # C. build vital rate functions # D. make a kernel # E. basic analyses # set up directory structure. we'll place a temp folder on your desktop to store some plots if(!file.exists('~/Desktop/Temp_IPM_output')) dir.create('~/Desktop/Temp_IPM_output') # set this as the working directory setwd('~/Desktop/Temp_IPM_output') # ------------------------------------------------------------------- # ------------------------------------------------------------------- # A. plots for data exploration # ------------------------------------------------------------------- # ------------------------------------------------------------------- # read in data. you'll have to set your own file path here. the data is a .csv file included in the same folder as this file d=read.csv( '~/Dropbox/Projects/ipms/teachIPMs/2_Beginner/Intro_to_IPMs/Exercises/Intro_to_IPMs_Exercises_Data.csv') # you'll notice that adults are stored at the beginning of the data set with values for size (measured in 2001) and sizeNext (measured in 2002). the number of seeds and flowering status were measured in 2001. head(d) # and that new recruits are stored at the end and were only observed in the second survey (2002) tail(d) # make some plots - figure 1 par(mfrow=c(2,2),mar=c(4,4,2,1)) plot(d$size,jitter(d$surv),main='Survival') # jittered to see easier plot(d$size,d$sizeNext,main='Growth/Shrinkage/Stasis') plot(d$size,d$fec.seed,main='Seeds') # jittered to see easier hist(d$sizeNext[is.na(d$size)],main='Size of Recruits') # note that we use NAs for missing or non-applicable data rather than 0 or some other indicator because this causes them to be automatically excluded from r's regression functions. # the model we'll build takes the general form # n[z',t+1]=Integral{ ( P[z',z]+ F[z',z] ) n[z,t] dz} # where z' is size at time t+1 and z is size at time t. P is the growth/survival kernel and F is the fecundity kernel. these will be decomposed further as: # P[z',z]=growth[z',z] * survival[z] # F[z',z]=flowering_probability[z] * #seeds_per_individual[z] * establishment_probability * recruit_size[z'] # we'll begin building the regressions for each of these vital rates in the next section. # ------------------------------------------------------------------- # ------------------------------------------------------------------- # B. regressions build regressions for vital rate functions # ------------------------------------------------------------------- # ------------------------------------------------------------------- # 0. set up parameter list for regressions # this sets up a list of the model parameters. these parameters will be estimated and recorded below. params=data.frame( surv.int=NA, surv.slope=NA, growth.int=NA, growth.slope=NA, growth.sd=NA, seed.int=NA, seed.slope=NA, recruit.size.mean=NA, recruit.size.sd=NA, establishment.prob=NA ) # 1. survival regression surv.reg=glm(surv~size,data=d,family=binomial()) summary(surv.reg) params$surv.int=coefficients(surv.reg)[1] params$surv.slope=coefficients(surv.reg)[2] # 2. growth regression growth.reg=lm(sizeNext~size,data=d) summary(growth.reg) params$growth.int=coefficients(growth.reg)[1] params$growth.slope=coefficients(growth.reg)[2] params$growth.sd=sd(resid(growth.reg)) # 3. seeds regression # note that we are just pooling all individuals into this regression regardless of whether they flowered or not. a later exercise will be to explicitly model flowering probability. i.e. for the moment we're taking flowering_probability[z]=1. seed.reg=glm(fec.seed~size,data=d,family=poisson()) summary(seed.reg) params$seed.int=coefficients(seed.reg)[1] params$seed.slope=coefficients(seed.reg)[2] # 4. size distribution of recruits # in the dataframe, recruits are those individuals who have a value for sizeNext but not for size params$recruit.size.mean=mean(d$sizeNext[is.na(d$size)]) params$recruit.size.sd=sd(d$sizeNext[is.na(d$size)]) # 5. establishment probability # these data represent a single year's worth of data, hence establishment probability can be estimated by dividing the number of observed recruits by the number of seeds. hence the growth/survival measurements were taken in year t which the recruit sizes were measured in year t+1. params$establishment.prob=sum(is.na(d$size))/sum(d$fec.seed,na.rm=TRUE) # 6. plot the models over the data - figure 2 # you might want to save this plot for later reference. to do that, just uncomment the lines below, which work on a mac. not sure if pc's will like the system command. this saves to your working directory. # pdf('Intro_IPM_data.pdf',h=8,w=12) par(mfrow=c(2,2),mar=c(4,4,2,1)) xx=seq(0,8,by=.01) plot(d$size,d$sizeNext,main='Growth/Shrinkage/Stasis') lines(xx,predict(growth.reg,data.frame(size=xx)),col='red',lwd=3) plot(d$size,jitter(d$surv),main='Survival') # jittered to see easier lines(xx,predict(surv.reg,data.frame(size=xx),type='response'), col='red',lwd=3) plot(d$size,d$fec.seed,main='Seeds') # jittered to see easier lines(xx,predict(seed.reg,data.frame(size=xx),type='response'), col='red',lwd=3) hist(d$sizeNext[is.na(d$size)],main='Size of Recruits',freq=FALSE) lines(xx,dnorm(xx,params$recruit.size.mean,params$recruit.size.sd), col='red',lwd=3) # dev.off() # system('open Intro_IPM_data.pdf') # ------------------------------------------------------------------- # ------------------------------------------------------------------- # C. build vital rate functions # ------------------------------------------------------------------- # ------------------------------------------------------------------- # each of the functions below represents one or more of the vital rates. these functions are used to build the IPM and use output (the coefficients) from the regressions developed above. # these functions represent the modeler's decision about how to decompose the life cycle. in this very simple example, we model (1) survival, (2) growth, (3) seed number, (4) the seedling size distribution and (5) the establishment probability. these last three functions are combined in the model for fecundity below. in practice, we'd need to decide what regressions to build in part B in advance, but it's easier to digest this section after you've seen the regressions above, so fortunately we built all the right regressions already. in practice, sections B and C are iterative one might inform one another. ## vital rate functions # 1. probability of surviving s.x=function(x,params) { u=exp(params$surv.int+params$surv.slope*x) return(u/(1+u)) } # 2. growth function g.yx=function(xp,x,params) { dnorm(xp,mean=params$growth.int+params$growth.slope*x,sd=params$growth.sd) } # 3. reproduction function f.yx=function(xp,x,params) { params$establishment.prob* dnorm(xp,mean=params$recruit.size.mean,sd=params$recruit.size.sd)* exp(params$seed.int+params$seed.slope*x) } # ------------------------------------------------------------------- # ------------------------------------------------------------------- # D. make a kernel # ------------------------------------------------------------------- # ------------------------------------------------------------------- # In this section, we combine the vital rate functions to build the discretized IPM kernel, which we’ll call the IPM matrix (e.g. shown in Fig. 2c in the main text).These steps are performed behind the scenes in the IPMpack package used in Appendices C-G for convenience, but we show them here for illustrative purposes. To integrate, we begin by defining the boundary points (b; the edges of the cells defining the matrix), mesh points (y; the centers of the cells defining the matrix and the points at which the matrix is evaluated for the midpoint rule of numerical integration), and step size (h; the widths of the cells). The integration limits (min.size and max.size) span the range of sizes observed in the data set, and then some. # 1. boundary points b, mesh points y and step size h # integration limits - these limits span the range of sizes observed in the data set, and then some. min.size=.9*min(c(d$size,d$sizeNext),na.rm=T) max.size=1.1*max(c(d$size,d$sizeNext),na.rm=T) # number of cells in the discretized kernel n=100 # boundary points (the edges of the cells defining the kernel) b=min.size+c(0:n)*(max.size-min.size)/n # mesh points (midpoints of the cells) y=0.5*(b[1:n]+b[2:(n+1)]) # width of the cells h=y[2]-y[1] # 2. make component kernels # Next, we make the IPM matrices. The function outer() evaluates the matrix at all pairwise combinations of the two vectors y and y and returns matrices representing the kernel components for growth and fecundity, respectively. For the numerical integration, we’re using the midpoint rule (the simplest option) estimate the area under a curve. The midpoint rule assumes a rectangular approximation. The heights of the rectangles are given by the outer function and the width of the rectangles is h. par(mfrow=c(2,3)) G=h*outer(y,y,g.yx,params=params) # growth kernel image(y,y,t(G),main='growth kernel') # plot it S=s.x(y,params=params) # survival plot(y,S,type='l',main='survival') # plot it P=G # placeholder;redefine P on the next line for(i in 1:n) P[,i]=G[,i]*S[i] # growth/survival kernel image(y,y,t(P),main='survival/growth kernel') # plot it abline(0,1,lwd=3) # plot 1:1, which represents stasis F=h*outer(y,y,f.yx,params=params) # reproduction kernel image(y,y,t(F),main='fecundity kernel') # plot it K=P+F # full kernel image(y,y,t(K),main='full kernel') # plot it # sometimes it's hard to see both the fecundity part of the kernel swamps the growth/survival part, so here's a plotting trick to level out the kernel image(y,y,t(K)^.3,main='full kernel^0.3') # plot it # so what did the outer function just do to make the kernel? the way we've used it, it takes all pairwise combinations of the the first argument (y) with the second (also y), and does some operation to those combinations. here are a few examples: tmp1=outer(y,y,'+') image(y,y,t(tmp1)) tmp2=outer(y,y,'*') image(y,y,t(tmp2)) tmp3=outer(y,exp(y),'+') image(y,exp(y),t(tmp3)) # in the IPM case, the operation that we want is not a simple '+' or '*', but the slightly more complicated growth function, given in the third argument (g.yx). to use the growth function, we need to specify the regression coefficients, so the fourth argument supplies the list 'params' that we defined above, which aer passed to g.yx. # ------------------------------------------------------------------- # ------------------------------------------------------------------- # E. basic analyses # ------------------------------------------------------------------- # ------------------------------------------------------------------- # Analyses usually begin with obtaining the eigenvalues (λ) and eigenvectors (v-left; w-right) of the matrix. These are useful for understanding the asymptotic dynamics. The dominant eigenvalue gives the asymptotic population growth rate (lam). The right eigenvector gives the stable stage distribution and the left eigenvector gives the reproductive value, when normalized. # 1. get lamda,v,w (lam=Re(eigen(K)$values[1])) # should be 1.013391 w.eigen=Re(eigen(K)$vectors[,1]) stable.dist=w.eigen/sum(w.eigen) v.eigen=Re(eigen(t(K))$vectors[,1]) repro.val=v.eigen/v.eigen[1] # The eigen-things can be combined to obtain the sensitivity and elasticity matrices. # 2. compute elasticity and sensitivity matrices v.dot.w=sum(stable.dist*repro.val)*h sens=outer(repro.val,stable.dist)/v.dot.w elas=matrix(as.vector(sens)*as.vector(K)/lam,nrow=n) # 3. plot results # you might want to save this plot for comparison with later versions. to do that, just uncomment the lines below, which work on a mac. not sure if pc's will like the system command. this saves to your working directory. # pdf('IPM_output_v1.pdf',h=8,w=12) par(mfrow=c(2,3)) image(y,y,t(K), xlab="Size (t)",ylab="Size (t+1)",col=topo.colors(100), main="Kernel") contour(y,y,t(K), add = TRUE, drawlabels = TRUE) plot(y,stable.dist,xlab="Size",type="l",main="Stable size distribution") plot(y,repro.val,xlab="Size",type="l",main="Reproductive values") image(y,y,t(elas),xlab="Size (t)",ylab="Size (t+1)",main="Elasticity") image(y,y,t(sens),xlab="Size (t)",ylab="Size (t+1)", main="Sensitivity") # dev.off() # system('open IPM_output_v1.pdf')
\name{dmr.linclas-package} \alias{dmr.linclas-package} \alias{dmr.linclas} \docType{package} \title{A package with example functions from the book Cichosz, P. (2015): Data Mining Algorithms: Explained Using R} \description{A package with example functions from Chapter 5 of the book Cichosz, P. (2015): Data Mining Algorithms: Explained Using R. See Appendix B or http://www.wiley.com/go/data_mining_algorithms for more details.} \details{See examples in Chapter 5.} \author{ Pawel Cichosz <p.cichosz@elka.pw.edu.pl> } \references{Cichosz, P. (2015): Data Mining Algorithms: Explained Using R. Wiley.} \keyword{package} \keyword{models} \seealso{ \code{\link[dmr.util:dmr.util-package]{dmr.util}} \code{\link[dmr.linreg:dmr.linreg-package]{dmr.linreg}} \code{\link[dmr.claseval:dmr.claseval-package]{dmr.claseval}} } \examples{ }
/man/dmr.linclas-package.Rd
no_license
42n4/dmr.linclas
R
false
false
839
rd
\name{dmr.linclas-package} \alias{dmr.linclas-package} \alias{dmr.linclas} \docType{package} \title{A package with example functions from the book Cichosz, P. (2015): Data Mining Algorithms: Explained Using R} \description{A package with example functions from Chapter 5 of the book Cichosz, P. (2015): Data Mining Algorithms: Explained Using R. See Appendix B or http://www.wiley.com/go/data_mining_algorithms for more details.} \details{See examples in Chapter 5.} \author{ Pawel Cichosz <p.cichosz@elka.pw.edu.pl> } \references{Cichosz, P. (2015): Data Mining Algorithms: Explained Using R. Wiley.} \keyword{package} \keyword{models} \seealso{ \code{\link[dmr.util:dmr.util-package]{dmr.util}} \code{\link[dmr.linreg:dmr.linreg-package]{dmr.linreg}} \code{\link[dmr.claseval:dmr.claseval-package]{dmr.claseval}} } \examples{ }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/test.R \name{plot_top_losses} \alias{plot_top_losses} \title{Plot_top_losses} \usage{ plot_top_losses( interp, k, largest = TRUE, figsize = c(19.2, 10.8), ..., dpi = 90 ) } \arguments{ \item{interp}{interpretation object} \item{k}{number of images} \item{largest}{largest} \item{figsize}{plot size} \item{...}{additional parameters to pass} \item{dpi}{dots per inch} } \value{ None } \description{ Plot_top_losses } \examples{ \dontrun{ # get interperetation from learn object, the model. interp = ClassificationInterpretation_from_learner(learn) interp \%>\% plot_top_losses(k = 9, figsize = c(15,11)) } }
/man/plot_top_losses.Rd
permissive
yangxhcaf/fastai
R
false
true
706
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/test.R \name{plot_top_losses} \alias{plot_top_losses} \title{Plot_top_losses} \usage{ plot_top_losses( interp, k, largest = TRUE, figsize = c(19.2, 10.8), ..., dpi = 90 ) } \arguments{ \item{interp}{interpretation object} \item{k}{number of images} \item{largest}{largest} \item{figsize}{plot size} \item{...}{additional parameters to pass} \item{dpi}{dots per inch} } \value{ None } \description{ Plot_top_losses } \examples{ \dontrun{ # get interperetation from learn object, the model. interp = ClassificationInterpretation_from_learner(learn) interp \%>\% plot_top_losses(k = 9, figsize = c(15,11)) } }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/nml_template_path.R \name{nml_template_path} \alias{nml_template_path} \title{Return the path to a current NML file template} \usage{ nml_template_path() } \description{ The NML file can change with updated versions of GLM. This returns a path to a valid NML template file matching the current GLM version. } \examples{ \dontrun{ file.edit(nml_template_path()) } } \author{ Luke Winslow, Jordan Read } \keyword{methods}
/man/nml_template_path.Rd
no_license
GLEON/GLM3r
R
false
true
500
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/nml_template_path.R \name{nml_template_path} \alias{nml_template_path} \title{Return the path to a current NML file template} \usage{ nml_template_path() } \description{ The NML file can change with updated versions of GLM. This returns a path to a valid NML template file matching the current GLM version. } \examples{ \dontrun{ file.edit(nml_template_path()) } } \author{ Luke Winslow, Jordan Read } \keyword{methods}
# Exercise 1: creating data frames # Create a vector of the number of points the Seahawks scored in the first 4 games # of the season (google "Seahawks" for the scores!) seahawks_points_scored <- c(14, 35, 21, 22) # Create a vector of the number of points the Seahwaks have allowed to be scored # against them in each of the first 4 games of the season points_scored_against_seahawks <- c(22, 13, 40, 31) # Combine your two vectors into a dataframe called `games` games <- data.frame(seahawks_points_scored, points_scored_against_seahawks, stringsAsFactors = FALSE) # Create a new column "diff" that is the difference in points between the teams # Hint: recall the syntax for assigning new elements (which in this case will be # a vector) to a list! games$diff <- games$seahawks_points_scored - games$points_scored_against_seahawks # Create a new column "won" which is TRUE if the Seahawks won the game games$won <- games$diff > 0 # Create a vector of the opponent names corresponding to the games played opponent_names <- c("Redhawks", "Giants", "Panthers", "Patriots") # Assign your dataframe rownames of their opponents rownames(games) <- opponent_names # View your data frame to see how it has changed! print(games)
/chapter-10-exercises/exercise-1/exercise.R
permissive
RamouJ/book-exercises
R
false
false
1,277
r
# Exercise 1: creating data frames # Create a vector of the number of points the Seahawks scored in the first 4 games # of the season (google "Seahawks" for the scores!) seahawks_points_scored <- c(14, 35, 21, 22) # Create a vector of the number of points the Seahwaks have allowed to be scored # against them in each of the first 4 games of the season points_scored_against_seahawks <- c(22, 13, 40, 31) # Combine your two vectors into a dataframe called `games` games <- data.frame(seahawks_points_scored, points_scored_against_seahawks, stringsAsFactors = FALSE) # Create a new column "diff" that is the difference in points between the teams # Hint: recall the syntax for assigning new elements (which in this case will be # a vector) to a list! games$diff <- games$seahawks_points_scored - games$points_scored_against_seahawks # Create a new column "won" which is TRUE if the Seahawks won the game games$won <- games$diff > 0 # Create a vector of the opponent names corresponding to the games played opponent_names <- c("Redhawks", "Giants", "Panthers", "Patriots") # Assign your dataframe rownames of their opponents rownames(games) <- opponent_names # View your data frame to see how it has changed! print(games)
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/evaluate-f-statistics.R \name{evaluate_f4} \alias{evaluate_f4} \title{Evaluates an \eqn{f_4} statistics in a given environment.} \usage{ evaluate_f4(graph, env, W, X, Y, Z) } \arguments{ \item{graph}{The admixture graph.} \item{env}{The environment containing the graph parameters.} \item{W}{First population/sample.} \item{X}{Second population/sample.} \item{Y}{Third population/sample.} \item{Z}{Fourth population/sample.} } \value{ The \eqn{f_4} value specified by the graph and the environment. } \description{ Evaluates an \eqn{f_4} statistics in a given environment. }
/man/evaluate_f4.Rd
no_license
KalleLeppala/admixture_graph
R
false
false
667
rd
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/evaluate-f-statistics.R \name{evaluate_f4} \alias{evaluate_f4} \title{Evaluates an \eqn{f_4} statistics in a given environment.} \usage{ evaluate_f4(graph, env, W, X, Y, Z) } \arguments{ \item{graph}{The admixture graph.} \item{env}{The environment containing the graph parameters.} \item{W}{First population/sample.} \item{X}{Second population/sample.} \item{Y}{Third population/sample.} \item{Z}{Fourth population/sample.} } \value{ The \eqn{f_4} value specified by the graph and the environment. } \description{ Evaluates an \eqn{f_4} statistics in a given environment. }
#' @name morphomap-package #' @docType package #' @aliases morphomap #' @title 2D and 3D cortical thickness maps and cross sectional geometry #' @author Antonio Profico, Luca Bondioli, Pasquale Raia, Paul O'Higgins, Damiano Marchi #' @description Tool to process long bone meshes (shape data, morphometric maps and cross-sectional geometry) #' @import Arothron #' @import lattice #' @import mgcv #' @import Rvcg #' @import Morpho #' @import rgl #' @import geometry #' @import colorRamps #' @import DescTools #' @import grDevices #' @import graphics #' @import rgdal #' @import utils #' @importFrom colorRamps blue2green2red #' @importFrom grDevices dev.off tiff #' @importFrom oce matrixSmooth #' @importFrom sp point.in.polygon CRS #' @importFrom raster raster flip writeRaster #' @importFrom graphics plot points polygon text #' @importFrom stats IQR quantile sd weighted.mean #' @importFrom utils setTxtProgressBar txtProgressBar NULL
/R/morphomap.R
no_license
AProfico/morphomap
R
false
false
967
r
#' @name morphomap-package #' @docType package #' @aliases morphomap #' @title 2D and 3D cortical thickness maps and cross sectional geometry #' @author Antonio Profico, Luca Bondioli, Pasquale Raia, Paul O'Higgins, Damiano Marchi #' @description Tool to process long bone meshes (shape data, morphometric maps and cross-sectional geometry) #' @import Arothron #' @import lattice #' @import mgcv #' @import Rvcg #' @import Morpho #' @import rgl #' @import geometry #' @import colorRamps #' @import DescTools #' @import grDevices #' @import graphics #' @import rgdal #' @import utils #' @importFrom colorRamps blue2green2red #' @importFrom grDevices dev.off tiff #' @importFrom oce matrixSmooth #' @importFrom sp point.in.polygon CRS #' @importFrom raster raster flip writeRaster #' @importFrom graphics plot points polygon text #' @importFrom stats IQR quantile sd weighted.mean #' @importFrom utils setTxtProgressBar txtProgressBar NULL
## Validation results from an autopls object R2.autopls <- function (object, estimate, nc = 'inherit', ic = FALSE, ...) { lv <- get.lv (object) class (object) <- 'mvr' if (nc == 'inherit') out <- R2 (object, estimate, ncomp = lv, intercept = ic, ...) else if (nc == 'all') out <- R2 (object, estimate, ncomp = 1:object$ncomp, intercept = ic, ...) else out <- R2 (object, estimate, ncomp = nc, intercept = ic, ...) return (out) } RMSEP.autopls <- function (object, estimate, nc = 'inherit', ic = FALSE, ...) { lv <- get.lv (object) class (object) <- 'mvr' if (nc == 'inherit') { out <- RMSEP (object, estimate, ncomp = lv, intercept = ic, ...) } else if (nc == 'all') out <- RMSEP (object, estimate, ncomp = 1:object$ncomp, intercept = ic, ...) else out <- RMSEP (object, estimate, ncomp = nc, intercept = ic, ...) return (out) } ## Jackknife test for an autopls object jack.test.autopls <- function (object, nc = 'inherit') { lv <- get.lv (object) class (object) <- 'mvr' if (nc == 'inherit') jack.test (object, ncomp = lv) else jack.test (object, ncomp = nc) } ## Get validation results for all iterations and numbers of latent vectors metaval <- function (object, method, estimate, ic) { niter <- length (object$metapls$lv.history) ncomp <- vector () for (i in 1:niter) ncomp <- c(ncomp, object$iterations [[i]] $ncomp) res <- matrix (NA, nrow = max (ncomp), ncol = niter) rownames (res) <- paste ('LV.', 1:max (ncomp), sep = '') colnames (res) <- paste ('Run.', 1:niter, sep = '') for (i in 1:niter) { tmp <- set.iter (object, i, verbose = FALSE) if (method == 'R2') res [1:ncomp [i], i] <- R2 (tmp, estimate = estimate, nc = 'all', ic = ic) $val if (method == 'RMSEP') res [1:ncomp [i], i] <- RMSEP (tmp, estimate = estimate, nc = 'all', ic = ic) $val } return (res) } repeatedCV <- function (object, k = 100, segments = 4) { pred <- object$model$X targ <- object$model$Y scaling <- object$metapls$scaling method <- object$method nlv <- get.lv (object) prep <- object$metapls$preprocessing ## Preprocessing if appropriate if (prep != 'none') pred <- prepro (pred, method = prep) ## Prepare input set <- data.frame (Y = targ, X = I (pred)) r2vec <- rmsevec <- rep (NA, k) for (i in 1:k) ## Outer loop { mod <- plsr (Y ~ X, data = set, scale = scaling, method = method, validation = 'CV', segments = segments, ncomp = nlv) r2vec [i] <- R2 (mod, estimate = 'CV', nc = nlv, intercept = FALSE)$val rmsevec [i] <- RMSEP (mod, estimate = 'CV', nc = nlv, intercept = FALSE)$val } result <- list (call = sys.call (), R2.mean = mean (r2vec), R2.sd = sd (r2vec), RMSE.mean = mean (rmsevec), RMSE.sd = sd (rmsevec), RMSE = rmsevec, R2 = r2vec) ## Screen output cat (paste ('mean R2 in ', k, ' runs of ', segments, '-fold CV: ', format (mean (r2vec), digits = 3), sep = '')) cat (paste (' (sd: ', format (sd (r2vec), digits = 3), ')\n', sep = '')) cat (paste ('mean RMSE in ', k, ' runs of ', segments, '-fold CV: ', format (mean (rmsevec), digits = 3), sep = '')) cat (paste (' (sd: ', format (sd (rmsevec), digits = 3), ') \n', sep = '')) invisible (result) } clusterCV <- function (object, valist) { pred <- object$model$X targ <- object$model$Y scaling <- object$metapls$scaling method <- object$method nlv <- get.lv (object) prep <- object$metapls$preprocessing ## Preprocessing if appropriate if (prep != 'none') pred <- prepro (pred, method = prep) ## Prepare input set <- data.frame (Y = targ, X = I (pred)) mod <- plsr (targ ~ pred, ncomp = nlv, method = method, scale = scaling, validation = 'CV', segments = valist) r2 <- R2 (mod, estimate = 'CV', nc = nlv, intercept = FALSE)$val rmse <- RMSEP (mod, estimate = 'CV', nc = nlv, intercept = FALSE)$val res <- unlist (c(r2, rmse)) names (res) <- c("R2val","RMSEval") return (res) }
/autopls/R/autoplsVAL.R
no_license
ingted/R-Examples
R
false
false
4,356
r
## Validation results from an autopls object R2.autopls <- function (object, estimate, nc = 'inherit', ic = FALSE, ...) { lv <- get.lv (object) class (object) <- 'mvr' if (nc == 'inherit') out <- R2 (object, estimate, ncomp = lv, intercept = ic, ...) else if (nc == 'all') out <- R2 (object, estimate, ncomp = 1:object$ncomp, intercept = ic, ...) else out <- R2 (object, estimate, ncomp = nc, intercept = ic, ...) return (out) } RMSEP.autopls <- function (object, estimate, nc = 'inherit', ic = FALSE, ...) { lv <- get.lv (object) class (object) <- 'mvr' if (nc == 'inherit') { out <- RMSEP (object, estimate, ncomp = lv, intercept = ic, ...) } else if (nc == 'all') out <- RMSEP (object, estimate, ncomp = 1:object$ncomp, intercept = ic, ...) else out <- RMSEP (object, estimate, ncomp = nc, intercept = ic, ...) return (out) } ## Jackknife test for an autopls object jack.test.autopls <- function (object, nc = 'inherit') { lv <- get.lv (object) class (object) <- 'mvr' if (nc == 'inherit') jack.test (object, ncomp = lv) else jack.test (object, ncomp = nc) } ## Get validation results for all iterations and numbers of latent vectors metaval <- function (object, method, estimate, ic) { niter <- length (object$metapls$lv.history) ncomp <- vector () for (i in 1:niter) ncomp <- c(ncomp, object$iterations [[i]] $ncomp) res <- matrix (NA, nrow = max (ncomp), ncol = niter) rownames (res) <- paste ('LV.', 1:max (ncomp), sep = '') colnames (res) <- paste ('Run.', 1:niter, sep = '') for (i in 1:niter) { tmp <- set.iter (object, i, verbose = FALSE) if (method == 'R2') res [1:ncomp [i], i] <- R2 (tmp, estimate = estimate, nc = 'all', ic = ic) $val if (method == 'RMSEP') res [1:ncomp [i], i] <- RMSEP (tmp, estimate = estimate, nc = 'all', ic = ic) $val } return (res) } repeatedCV <- function (object, k = 100, segments = 4) { pred <- object$model$X targ <- object$model$Y scaling <- object$metapls$scaling method <- object$method nlv <- get.lv (object) prep <- object$metapls$preprocessing ## Preprocessing if appropriate if (prep != 'none') pred <- prepro (pred, method = prep) ## Prepare input set <- data.frame (Y = targ, X = I (pred)) r2vec <- rmsevec <- rep (NA, k) for (i in 1:k) ## Outer loop { mod <- plsr (Y ~ X, data = set, scale = scaling, method = method, validation = 'CV', segments = segments, ncomp = nlv) r2vec [i] <- R2 (mod, estimate = 'CV', nc = nlv, intercept = FALSE)$val rmsevec [i] <- RMSEP (mod, estimate = 'CV', nc = nlv, intercept = FALSE)$val } result <- list (call = sys.call (), R2.mean = mean (r2vec), R2.sd = sd (r2vec), RMSE.mean = mean (rmsevec), RMSE.sd = sd (rmsevec), RMSE = rmsevec, R2 = r2vec) ## Screen output cat (paste ('mean R2 in ', k, ' runs of ', segments, '-fold CV: ', format (mean (r2vec), digits = 3), sep = '')) cat (paste (' (sd: ', format (sd (r2vec), digits = 3), ')\n', sep = '')) cat (paste ('mean RMSE in ', k, ' runs of ', segments, '-fold CV: ', format (mean (rmsevec), digits = 3), sep = '')) cat (paste (' (sd: ', format (sd (rmsevec), digits = 3), ') \n', sep = '')) invisible (result) } clusterCV <- function (object, valist) { pred <- object$model$X targ <- object$model$Y scaling <- object$metapls$scaling method <- object$method nlv <- get.lv (object) prep <- object$metapls$preprocessing ## Preprocessing if appropriate if (prep != 'none') pred <- prepro (pred, method = prep) ## Prepare input set <- data.frame (Y = targ, X = I (pred)) mod <- plsr (targ ~ pred, ncomp = nlv, method = method, scale = scaling, validation = 'CV', segments = valist) r2 <- R2 (mod, estimate = 'CV', nc = nlv, intercept = FALSE)$val rmse <- RMSEP (mod, estimate = 'CV', nc = nlv, intercept = FALSE)$val res <- unlist (c(r2, rmse)) names (res) <- c("R2val","RMSEval") return (res) }
/smoking_effect_predictions.R
no_license
paty-oliveira/SmokingEffectAdenocarcinoma
R
false
false
18,273
r
setwd('/Users/ashiswin/Documents/School/Analytics Edge/Week 3/Heart Disease') framing <- read.csv('framingham.csv') heart1 <- subset(framing, PERIOD == 1 & PREVCHD == 0) heart1$TENCHD <- as.integer((heart1$TIMECHD) / 365 <= 10) table(heart1$TENCHD) 644 / (644 + 3596) colnames(heart1) heart1 <- heart1[, c(1:21, 40)] library(caTools) set.seed(1) spl <- sample.split(heart1$TENCHD, SplitRatio = 0.65) head(spl) training <- subset(heart1, spl == TRUE) test <- subset(heart1, spl == FALSE) table(training$TENCHD) table(test$TENCHD) model1 <- glm(TENCHD ~ ., data = training, family = "binomial") summary(model1) model2 <- glm(TENCHD ~ SEX + AGE + SYSBP + CIGPDAY + GLUCOSE, data = training, family = 'binomial') summary(model2) predict2 <- predict(model2, newdata = test, type = "response") table(predict2 >= 0.5, test$TENCHD)
/Week 3/Heart Disease/Heart Disease.R
no_license
ashiswin/TheAnalyticsEdge
R
false
false
831
r
setwd('/Users/ashiswin/Documents/School/Analytics Edge/Week 3/Heart Disease') framing <- read.csv('framingham.csv') heart1 <- subset(framing, PERIOD == 1 & PREVCHD == 0) heart1$TENCHD <- as.integer((heart1$TIMECHD) / 365 <= 10) table(heart1$TENCHD) 644 / (644 + 3596) colnames(heart1) heart1 <- heart1[, c(1:21, 40)] library(caTools) set.seed(1) spl <- sample.split(heart1$TENCHD, SplitRatio = 0.65) head(spl) training <- subset(heart1, spl == TRUE) test <- subset(heart1, spl == FALSE) table(training$TENCHD) table(test$TENCHD) model1 <- glm(TENCHD ~ ., data = training, family = "binomial") summary(model1) model2 <- glm(TENCHD ~ SEX + AGE + SYSBP + CIGPDAY + GLUCOSE, data = training, family = 'binomial') summary(model2) predict2 <- predict(model2, newdata = test, type = "response") table(predict2 >= 0.5, test$TENCHD)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/EPFR.r \name{sql.1mFloMo} \alias{sql.1mFloMo} \title{sql.1mFloMo} \usage{ sql.1mFloMo(x, y, n, w, h, u = "All") } \arguments{ \item{x}{= the YYYYMM for which you want data} \item{y}{= a string vector of factors to be computed, the last element of which is the type of fund used} \item{n}{= any of StockFlows/China/Japan/CSI300/Energy} \item{w}{= T/F depending on whether you are checking ftp} \item{h}{= breakdown filter (e.g. All/GeoId/DomicileId)} \item{u}{= share-class filter (one of All/Inst/Retail)} } \description{ Generates the SQL query to get the data for 1mFloMo for individual stocks } \seealso{ Other sql: \code{\link{sql.1dActWtTrend.Alloc}}, \code{\link{sql.1dActWtTrend.Final}}, \code{\link{sql.1dActWtTrend.Flow}}, \code{\link{sql.1dActWtTrend.select}}, \code{\link{sql.1dActWtTrend.topline.from}}, \code{\link{sql.1dActWtTrend.topline}}, \code{\link{sql.1dActWtTrend.underlying.basic}}, \code{\link{sql.1dActWtTrend.underlying}}, \code{\link{sql.1dActWtTrend}}, \code{\link{sql.1dFloMo.CountryId.List}}, \code{\link{sql.1dFloMo.FI}}, \code{\link{sql.1dFloMo.Rgn}}, \code{\link{sql.1dFloMo.Sec.topline}}, \code{\link{sql.1dFloMo.filter}}, \code{\link{sql.1dFloMo.grp}}, \code{\link{sql.1dFloMo.select.wrapper}}, \code{\link{sql.1dFloMo.select}}, \code{\link{sql.1dFloMo.underlying}}, \code{\link{sql.1dFloMoAggr}}, \code{\link{sql.1dFloMo}}, \code{\link{sql.1dFloTrend.Alloc.data}}, \code{\link{sql.1dFloTrend.Alloc.fetch}}, \code{\link{sql.1dFloTrend.Alloc.final}}, \code{\link{sql.1dFloTrend.Alloc.from}}, \code{\link{sql.1dFloTrend.Alloc.purge}}, \code{\link{sql.1dFloTrend.Alloc}}, \code{\link{sql.1dFloTrend.select}}, \code{\link{sql.1dFloTrend.underlying}}, \code{\link{sql.1dFloTrend}}, \code{\link{sql.1dFundCt}}, \code{\link{sql.1dFundRet}}, \code{\link{sql.1dION}}, \code{\link{sql.1mActWt.underlying}}, \code{\link{sql.1mActWtIncrPct}}, \code{\link{sql.1mActWtTrend.underlying}}, \code{\link{sql.1mActWtTrend}}, \code{\link{sql.1mActWt}}, \code{\link{sql.1mAllocD.from}}, \code{\link{sql.1mAllocD.select}}, \code{\link{sql.1mAllocD.topline.from}}, \code{\link{sql.1mAllocD}}, \code{\link{sql.1mAllocMo.select}}, \code{\link{sql.1mAllocMo.underlying.from}}, \code{\link{sql.1mAllocMo.underlying.pre}}, \code{\link{sql.1mAllocMo}}, \code{\link{sql.1mAllocSkew.topline.from}}, \code{\link{sql.1mAllocSkew}}, \code{\link{sql.1mBullish.Alloc}}, \code{\link{sql.1mBullish.Final}}, \code{\link{sql.1mChActWt}}, \code{\link{sql.1mFloTrend.underlying}}, \code{\link{sql.1mFloTrend}}, \code{\link{sql.1mFundCt}}, \code{\link{sql.1mHoldAum}}, \code{\link{sql.1mSRIAdvisorPct}}, \code{\link{sql.1wFlow.Corp}}, \code{\link{sql.ActWtDiff2}}, \code{\link{sql.Allocation.Sec.FinsExREst}}, \code{\link{sql.Allocation.Sec}}, \code{\link{sql.Allocations.bulk.EqWtAvg}}, \code{\link{sql.Allocations.bulk.Single}}, \code{\link{sql.Allocation}}, \code{\link{sql.BenchIndex.duplication}}, \code{\link{sql.Bullish}}, \code{\link{sql.DailyFlo}}, \code{\link{sql.Diff}}, \code{\link{sql.Dispersion}}, \code{\link{sql.FloMo.Funds}}, \code{\link{sql.Flow}}, \code{\link{sql.Foreign}}, \code{\link{sql.FundHistory.macro}}, \code{\link{sql.FundHistory.sf}}, \code{\link{sql.FundHistory}}, \code{\link{sql.HSIdmap}}, \code{\link{sql.HerdingLSV}}, \code{\link{sql.Holdings.bulk.wrapper}}, \code{\link{sql.Holdings.bulk}}, \code{\link{sql.Holdings}}, \code{\link{sql.ION}}, \code{\link{sql.MonthlyAlloc}}, \code{\link{sql.MonthlyAssetsEnd}}, \code{\link{sql.Mo}}, \code{\link{sql.Overweight}}, \code{\link{sql.RDSuniv}}, \code{\link{sql.ReportDate}}, \code{\link{sql.SRI}}, \code{\link{sql.ShareClass}}, \code{\link{sql.TopDownAllocs.items}}, \code{\link{sql.TopDownAllocs.underlying}}, \code{\link{sql.TopDownAllocs}}, \code{\link{sql.Trend}}, \code{\link{sql.and}}, \code{\link{sql.arguments}}, \code{\link{sql.bcp}}, \code{\link{sql.breakdown}}, \code{\link{sql.case}}, \code{\link{sql.close}}, \code{\link{sql.connect.wrapper}}, \code{\link{sql.connect}}, \code{\link{sql.cross.border}}, \code{\link{sql.datediff}}, \code{\link{sql.declare}}, \code{\link{sql.delete}}, \code{\link{sql.drop}}, \code{\link{sql.exists}}, \code{\link{sql.extra.domicile}}, \code{\link{sql.index}}, \code{\link{sql.into}}, \code{\link{sql.in}}, \code{\link{sql.isin.old.to.new}}, \code{\link{sql.label}}, \code{\link{sql.map.classif}}, \code{\link{sql.mat.cofactor}}, \code{\link{sql.mat.crossprod.vector}}, \code{\link{sql.mat.crossprod}}, \code{\link{sql.mat.determinant}}, \code{\link{sql.mat.flip}}, \code{\link{sql.mat.multiply}}, \code{\link{sql.median}}, \code{\link{sql.nonneg}}, \code{\link{sql.query.underlying}}, \code{\link{sql.query}}, \code{\link{sql.regr}}, \code{\link{sql.tbl}}, \code{\link{sql.ui}}, \code{\link{sql.unbracket}}, \code{\link{sql.update}}, \code{\link{sql.yield.curve.1dFloMo}}, \code{\link{sql.yield.curve}}, \code{\link{sql.yyyymmdd}}, \code{\link{sql.yyyymm}} } \keyword{sql.1mFloMo}
/man/sql.1mFloMo.Rd
no_license
vsrimurthy/EPFR
R
false
true
5,189
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/EPFR.r \name{sql.1mFloMo} \alias{sql.1mFloMo} \title{sql.1mFloMo} \usage{ sql.1mFloMo(x, y, n, w, h, u = "All") } \arguments{ \item{x}{= the YYYYMM for which you want data} \item{y}{= a string vector of factors to be computed, the last element of which is the type of fund used} \item{n}{= any of StockFlows/China/Japan/CSI300/Energy} \item{w}{= T/F depending on whether you are checking ftp} \item{h}{= breakdown filter (e.g. All/GeoId/DomicileId)} \item{u}{= share-class filter (one of All/Inst/Retail)} } \description{ Generates the SQL query to get the data for 1mFloMo for individual stocks } \seealso{ Other sql: \code{\link{sql.1dActWtTrend.Alloc}}, \code{\link{sql.1dActWtTrend.Final}}, \code{\link{sql.1dActWtTrend.Flow}}, \code{\link{sql.1dActWtTrend.select}}, \code{\link{sql.1dActWtTrend.topline.from}}, \code{\link{sql.1dActWtTrend.topline}}, \code{\link{sql.1dActWtTrend.underlying.basic}}, \code{\link{sql.1dActWtTrend.underlying}}, \code{\link{sql.1dActWtTrend}}, \code{\link{sql.1dFloMo.CountryId.List}}, \code{\link{sql.1dFloMo.FI}}, \code{\link{sql.1dFloMo.Rgn}}, \code{\link{sql.1dFloMo.Sec.topline}}, \code{\link{sql.1dFloMo.filter}}, \code{\link{sql.1dFloMo.grp}}, \code{\link{sql.1dFloMo.select.wrapper}}, \code{\link{sql.1dFloMo.select}}, \code{\link{sql.1dFloMo.underlying}}, \code{\link{sql.1dFloMoAggr}}, \code{\link{sql.1dFloMo}}, \code{\link{sql.1dFloTrend.Alloc.data}}, \code{\link{sql.1dFloTrend.Alloc.fetch}}, \code{\link{sql.1dFloTrend.Alloc.final}}, \code{\link{sql.1dFloTrend.Alloc.from}}, \code{\link{sql.1dFloTrend.Alloc.purge}}, \code{\link{sql.1dFloTrend.Alloc}}, \code{\link{sql.1dFloTrend.select}}, \code{\link{sql.1dFloTrend.underlying}}, \code{\link{sql.1dFloTrend}}, \code{\link{sql.1dFundCt}}, \code{\link{sql.1dFundRet}}, \code{\link{sql.1dION}}, \code{\link{sql.1mActWt.underlying}}, \code{\link{sql.1mActWtIncrPct}}, \code{\link{sql.1mActWtTrend.underlying}}, \code{\link{sql.1mActWtTrend}}, \code{\link{sql.1mActWt}}, \code{\link{sql.1mAllocD.from}}, \code{\link{sql.1mAllocD.select}}, \code{\link{sql.1mAllocD.topline.from}}, \code{\link{sql.1mAllocD}}, \code{\link{sql.1mAllocMo.select}}, \code{\link{sql.1mAllocMo.underlying.from}}, \code{\link{sql.1mAllocMo.underlying.pre}}, \code{\link{sql.1mAllocMo}}, \code{\link{sql.1mAllocSkew.topline.from}}, \code{\link{sql.1mAllocSkew}}, \code{\link{sql.1mBullish.Alloc}}, \code{\link{sql.1mBullish.Final}}, \code{\link{sql.1mChActWt}}, \code{\link{sql.1mFloTrend.underlying}}, \code{\link{sql.1mFloTrend}}, \code{\link{sql.1mFundCt}}, \code{\link{sql.1mHoldAum}}, \code{\link{sql.1mSRIAdvisorPct}}, \code{\link{sql.1wFlow.Corp}}, \code{\link{sql.ActWtDiff2}}, \code{\link{sql.Allocation.Sec.FinsExREst}}, \code{\link{sql.Allocation.Sec}}, \code{\link{sql.Allocations.bulk.EqWtAvg}}, \code{\link{sql.Allocations.bulk.Single}}, \code{\link{sql.Allocation}}, \code{\link{sql.BenchIndex.duplication}}, \code{\link{sql.Bullish}}, \code{\link{sql.DailyFlo}}, \code{\link{sql.Diff}}, \code{\link{sql.Dispersion}}, \code{\link{sql.FloMo.Funds}}, \code{\link{sql.Flow}}, \code{\link{sql.Foreign}}, \code{\link{sql.FundHistory.macro}}, \code{\link{sql.FundHistory.sf}}, \code{\link{sql.FundHistory}}, \code{\link{sql.HSIdmap}}, \code{\link{sql.HerdingLSV}}, \code{\link{sql.Holdings.bulk.wrapper}}, \code{\link{sql.Holdings.bulk}}, \code{\link{sql.Holdings}}, \code{\link{sql.ION}}, \code{\link{sql.MonthlyAlloc}}, \code{\link{sql.MonthlyAssetsEnd}}, \code{\link{sql.Mo}}, \code{\link{sql.Overweight}}, \code{\link{sql.RDSuniv}}, \code{\link{sql.ReportDate}}, \code{\link{sql.SRI}}, \code{\link{sql.ShareClass}}, \code{\link{sql.TopDownAllocs.items}}, \code{\link{sql.TopDownAllocs.underlying}}, \code{\link{sql.TopDownAllocs}}, \code{\link{sql.Trend}}, \code{\link{sql.and}}, \code{\link{sql.arguments}}, \code{\link{sql.bcp}}, \code{\link{sql.breakdown}}, \code{\link{sql.case}}, \code{\link{sql.close}}, \code{\link{sql.connect.wrapper}}, \code{\link{sql.connect}}, \code{\link{sql.cross.border}}, \code{\link{sql.datediff}}, \code{\link{sql.declare}}, \code{\link{sql.delete}}, \code{\link{sql.drop}}, \code{\link{sql.exists}}, \code{\link{sql.extra.domicile}}, \code{\link{sql.index}}, \code{\link{sql.into}}, \code{\link{sql.in}}, \code{\link{sql.isin.old.to.new}}, \code{\link{sql.label}}, \code{\link{sql.map.classif}}, \code{\link{sql.mat.cofactor}}, \code{\link{sql.mat.crossprod.vector}}, \code{\link{sql.mat.crossprod}}, \code{\link{sql.mat.determinant}}, \code{\link{sql.mat.flip}}, \code{\link{sql.mat.multiply}}, \code{\link{sql.median}}, \code{\link{sql.nonneg}}, \code{\link{sql.query.underlying}}, \code{\link{sql.query}}, \code{\link{sql.regr}}, \code{\link{sql.tbl}}, \code{\link{sql.ui}}, \code{\link{sql.unbracket}}, \code{\link{sql.update}}, \code{\link{sql.yield.curve.1dFloMo}}, \code{\link{sql.yield.curve}}, \code{\link{sql.yyyymmdd}}, \code{\link{sql.yyyymm}} } \keyword{sql.1mFloMo}
library(ape) testtree <- read.tree("12836_0.txt") unrooted_tr <- unroot(testtree) write.tree(unrooted_tr, file="12836_0_unrooted.txt")
/codeml_files/newick_trees_processed/12836_0/rinput.R
no_license
DaniBoo/cyanobacteria_project
R
false
false
137
r
library(ape) testtree <- read.tree("12836_0.txt") unrooted_tr <- unroot(testtree) write.tree(unrooted_tr, file="12836_0_unrooted.txt")
library(testthat) library(RCMake) cat("run-all.R\n") test_package("RCMake")
/tests/run-all.R
no_license
wush978/RCMake
R
false
false
76
r
library(testthat) library(RCMake) cat("run-all.R\n") test_package("RCMake")
##################################### ## Gapfilling artisanal fishing data ##################################### ############################################### ## Pressure: alien invasions gap-filling ############################################### library(dplyr) library(tidyr) ## disaggregation regions d_regions <- read.csv('../ohi-global/global2015/gapFilling/dissaggregated_gap_fill.csv') %>% select(rgn_id = rgn_id_2013, region_id_2012) %>% mutate(gap_fill_1 = "disagg2012_gap_fill") %>% arrange(region_id_2012) ## hb (there is a non-disagregated version of these data used for NP, these should be scaled and used here!): hb <- read.csv('globalprep/pressures_artisanal_fp/v2012/data/p_fp_art_hb_disaggregateNature2012.csv') %>% mutate(pressures.score = ifelse(rgn_id %in% d_regions$rgn_id, "1", "0")) write.csv(hb, 'globalprep/pressures_artisanal_fp/v2012/data/p_fp_art_hb_disaggregateNature2012_gf.csv', row.names=FALSE) ## lb We didn't gapfill these data, but there was gapfilling prior to OHI: ## According to notes, data were ## supplied for 59 countries and then estimated for remaining. ## Look in original pressures paper for more details ## Seems like this would be available in here: Y:\mnt\storage\marine_threats\work\artisinal\datatables\Artisanal Models ## But I couldn't find anything that looked like original data. ## UPDATE 2016: The new data for this pressure has no gapfilling lb <- read.csv('globalprep/pressures_artisanal_fp/v2013/data/fp_art_lb_2013_NEW.csv') %>% mutate(pressures_score = 0) %>% select(rgn_id, pressure.score=pressures_score) write.csv(lb, 'globalprep/pressures_artisanal_fp/v2013/data/fp_art_lb_2013_NEW_gf.csv', row.names=FALSE)
/globalprep/prs_fish/v2013/gapfilling_v2012andv2013.R
no_license
OHI-Science/ohiprep_v2018
R
false
false
1,747
r
##################################### ## Gapfilling artisanal fishing data ##################################### ############################################### ## Pressure: alien invasions gap-filling ############################################### library(dplyr) library(tidyr) ## disaggregation regions d_regions <- read.csv('../ohi-global/global2015/gapFilling/dissaggregated_gap_fill.csv') %>% select(rgn_id = rgn_id_2013, region_id_2012) %>% mutate(gap_fill_1 = "disagg2012_gap_fill") %>% arrange(region_id_2012) ## hb (there is a non-disagregated version of these data used for NP, these should be scaled and used here!): hb <- read.csv('globalprep/pressures_artisanal_fp/v2012/data/p_fp_art_hb_disaggregateNature2012.csv') %>% mutate(pressures.score = ifelse(rgn_id %in% d_regions$rgn_id, "1", "0")) write.csv(hb, 'globalprep/pressures_artisanal_fp/v2012/data/p_fp_art_hb_disaggregateNature2012_gf.csv', row.names=FALSE) ## lb We didn't gapfill these data, but there was gapfilling prior to OHI: ## According to notes, data were ## supplied for 59 countries and then estimated for remaining. ## Look in original pressures paper for more details ## Seems like this would be available in here: Y:\mnt\storage\marine_threats\work\artisinal\datatables\Artisanal Models ## But I couldn't find anything that looked like original data. ## UPDATE 2016: The new data for this pressure has no gapfilling lb <- read.csv('globalprep/pressures_artisanal_fp/v2013/data/fp_art_lb_2013_NEW.csv') %>% mutate(pressures_score = 0) %>% select(rgn_id, pressure.score=pressures_score) write.csv(lb, 'globalprep/pressures_artisanal_fp/v2013/data/fp_art_lb_2013_NEW_gf.csv', row.names=FALSE)
library(dplyr) library(ggplot2) library(ambient) ##worley noise with distance2sub grid <- long_grid(seq(1, 10, length.out = 1000), seq(1, 10, length.out = 1000)) %>% mutate(noise = gen_worley(x, y, value = "distance2sub")) ggplot() + geom_raster(data = grid, aes(x, y, fill = noise)) + theme_void() + theme(legend.position = "none") #worley noise with distance2sub and distance_ind 1-5 grid <- long_grid(seq(1, 10, length.out = 1000), seq(1, 10, length.out = 1000)) %>% mutate(noise = gen_worley(x, y, value = "distance2sub", jitter = 0.4, distance_ind = c(1, 5))) ggplot() + geom_raster(data = grid, aes(x, y, fill = noise)) + scale_fill_gradientn(colors = c("black", "#47C2C9", "#E384BD", "white")) + theme_void() + theme(legend.position = "none") #worley noise with distance2div and ridged fractal grid <- long_grid(x = seq(1, 10, length.out = 1000), y = seq(1, 10, length.out = 1000)) %>% mutate(fractal = fracture(gen_worley, ridged, value = "distance2div", octaves = 8, x = x, y = y)) ggplot() + geom_raster(data = grid, aes(x, y, fill = fractal)) + theme_void() + theme(legend.position = "none") #simplex noise after two rounds of seed point warping grid <- long_grid(x = seq(0, 10, length.out = 1000), y = seq(0, 10, length.out = 1000)) %>% mutate( x1 = x + gen_perlin(x = x, y = y, frequency = 1), y1 = y + gen_perlin(x = x, y = y, frequency = 2), x2 = x1 + gen_simplex(x = x1, y = y1, frequency = 1), y2 = y1 + gen_simplex(x = x1, y = y1, frequency = 3), simplex_warp = gen_simplex(x = x1, y = y2) ) ggplot() + geom_raster(data = grid, aes(x, y, fill = simplex_warp)) + scale_fill_gradientn(colors = c('#253852', '#51222f', '#b53435', '#ecbb51', "#eeccc2"), guide = "none") + theme_void() + theme(legend.position = "none") #simplex ridged fractal blended with warped spheres, masked by worley distance2mul grid <- long_grid(x = seq(0, 10, length.out = 1000), y = seq(0, 10, length.out = 1000)) %>% mutate( x1 = x + gen_simplex(x, y) / 2, y1 = y + gen_simplex(x, y) * 2, worley = gen_worley(x, y, value = 'distance2mul', jitter = 0.5), simplex_frac = fracture(gen_simplex, ridged, octaves = 10, x = x, y = y), full = blend(normalise(worley), normalise(simplex_frac), gen_spheres(x1, y1)) ) ggplot() + geom_raster(data = grid, aes(x, y, fill = full)) + scale_fill_gradientn(colors = c("black", "#DC1F24", "#EDE8E8","#4BC4CB"), guide = "none") + theme_void() + theme(legend.position = "none", plot.background = element_blank(), panel.background = element_blank()) #worley noise warped by worley noise with distance2div and ridged fractal grid <- long_grid(seq(1, 10, length.out = 1000), seq(1, 10, length.out = 1000)) %>% mutate( x1 = x + fracture(gen_worley, ridged, octaves = 8, x = x, y = y, value = "distance2div", distance = "euclidean", distance_ind = c(1, 2), jitter = 0.4), y1 = y + fracture(gen_worley, ridged, octaves = 8, x = x, y = y, value = "distance2div", distance = "euclidean", distance_ind = c(1, 2), jitter = 0.4), simplex_warp = gen_worley(x = x1, y = y1, value = "distance") ) ggplot() + geom_raster(data = grid, aes(x, y, fill = simplex_warp)) + theme_void() + theme(legend.position = "none") #seed points warped by distance2div worley ridged fractal, blending worley and worley ridged fractal with cubic noise grid <- long_grid(seq(1, 10, length.out = 1000), seq(1, 10, length.out = 1000)) %>% mutate( x1 = x + fracture(gen_worley, ridged, octaves = 8, x = x, y = y, value = "distance2div", distance = "euclidean", distance_ind = c(1, 2), jitter = 0.5), y1 = y + fracture(gen_worley, ridged, octaves = 8, x = x, y = y, value = "distance2div", distance = "euclidean", distance_ind = c(1, 3), jitter = 0.4), worley_warp = gen_worley(x = x1, y = y1, value = "distance", jitter = 0.4, distance = "manhattan"), worley_warp2 = fracture(gen_worley, ridged, octaves = 8, x = x1, y = y1, value = "distance2div", distance = "euclidean", distance_ind = c(1, 2), jitter = 0.5), cubic = gen_cubic(x = x * 3, y = y / 3), blend = blend(normalize(cubic), worley_warp, worley_warp2) ) ggplot() + geom_raster(data = grid, aes(x, y, fill = blend)) + scale_fill_gradientn(colors = c('#f0efe2', '#363d4a', '#7b8a56', '#ff9369', '#f4c172'), guide = "none") + theme_void() + theme(legend.position = "none")
/August_noise/noise_examples.R
no_license
2008haas/aRt
R
false
false
4,541
r
library(dplyr) library(ggplot2) library(ambient) ##worley noise with distance2sub grid <- long_grid(seq(1, 10, length.out = 1000), seq(1, 10, length.out = 1000)) %>% mutate(noise = gen_worley(x, y, value = "distance2sub")) ggplot() + geom_raster(data = grid, aes(x, y, fill = noise)) + theme_void() + theme(legend.position = "none") #worley noise with distance2sub and distance_ind 1-5 grid <- long_grid(seq(1, 10, length.out = 1000), seq(1, 10, length.out = 1000)) %>% mutate(noise = gen_worley(x, y, value = "distance2sub", jitter = 0.4, distance_ind = c(1, 5))) ggplot() + geom_raster(data = grid, aes(x, y, fill = noise)) + scale_fill_gradientn(colors = c("black", "#47C2C9", "#E384BD", "white")) + theme_void() + theme(legend.position = "none") #worley noise with distance2div and ridged fractal grid <- long_grid(x = seq(1, 10, length.out = 1000), y = seq(1, 10, length.out = 1000)) %>% mutate(fractal = fracture(gen_worley, ridged, value = "distance2div", octaves = 8, x = x, y = y)) ggplot() + geom_raster(data = grid, aes(x, y, fill = fractal)) + theme_void() + theme(legend.position = "none") #simplex noise after two rounds of seed point warping grid <- long_grid(x = seq(0, 10, length.out = 1000), y = seq(0, 10, length.out = 1000)) %>% mutate( x1 = x + gen_perlin(x = x, y = y, frequency = 1), y1 = y + gen_perlin(x = x, y = y, frequency = 2), x2 = x1 + gen_simplex(x = x1, y = y1, frequency = 1), y2 = y1 + gen_simplex(x = x1, y = y1, frequency = 3), simplex_warp = gen_simplex(x = x1, y = y2) ) ggplot() + geom_raster(data = grid, aes(x, y, fill = simplex_warp)) + scale_fill_gradientn(colors = c('#253852', '#51222f', '#b53435', '#ecbb51', "#eeccc2"), guide = "none") + theme_void() + theme(legend.position = "none") #simplex ridged fractal blended with warped spheres, masked by worley distance2mul grid <- long_grid(x = seq(0, 10, length.out = 1000), y = seq(0, 10, length.out = 1000)) %>% mutate( x1 = x + gen_simplex(x, y) / 2, y1 = y + gen_simplex(x, y) * 2, worley = gen_worley(x, y, value = 'distance2mul', jitter = 0.5), simplex_frac = fracture(gen_simplex, ridged, octaves = 10, x = x, y = y), full = blend(normalise(worley), normalise(simplex_frac), gen_spheres(x1, y1)) ) ggplot() + geom_raster(data = grid, aes(x, y, fill = full)) + scale_fill_gradientn(colors = c("black", "#DC1F24", "#EDE8E8","#4BC4CB"), guide = "none") + theme_void() + theme(legend.position = "none", plot.background = element_blank(), panel.background = element_blank()) #worley noise warped by worley noise with distance2div and ridged fractal grid <- long_grid(seq(1, 10, length.out = 1000), seq(1, 10, length.out = 1000)) %>% mutate( x1 = x + fracture(gen_worley, ridged, octaves = 8, x = x, y = y, value = "distance2div", distance = "euclidean", distance_ind = c(1, 2), jitter = 0.4), y1 = y + fracture(gen_worley, ridged, octaves = 8, x = x, y = y, value = "distance2div", distance = "euclidean", distance_ind = c(1, 2), jitter = 0.4), simplex_warp = gen_worley(x = x1, y = y1, value = "distance") ) ggplot() + geom_raster(data = grid, aes(x, y, fill = simplex_warp)) + theme_void() + theme(legend.position = "none") #seed points warped by distance2div worley ridged fractal, blending worley and worley ridged fractal with cubic noise grid <- long_grid(seq(1, 10, length.out = 1000), seq(1, 10, length.out = 1000)) %>% mutate( x1 = x + fracture(gen_worley, ridged, octaves = 8, x = x, y = y, value = "distance2div", distance = "euclidean", distance_ind = c(1, 2), jitter = 0.5), y1 = y + fracture(gen_worley, ridged, octaves = 8, x = x, y = y, value = "distance2div", distance = "euclidean", distance_ind = c(1, 3), jitter = 0.4), worley_warp = gen_worley(x = x1, y = y1, value = "distance", jitter = 0.4, distance = "manhattan"), worley_warp2 = fracture(gen_worley, ridged, octaves = 8, x = x1, y = y1, value = "distance2div", distance = "euclidean", distance_ind = c(1, 2), jitter = 0.5), cubic = gen_cubic(x = x * 3, y = y / 3), blend = blend(normalize(cubic), worley_warp, worley_warp2) ) ggplot() + geom_raster(data = grid, aes(x, y, fill = blend)) + scale_fill_gradientn(colors = c('#f0efe2', '#363d4a', '#7b8a56', '#ff9369', '#f4c172'), guide = "none") + theme_void() + theme(legend.position = "none")
##Script to summarise hybrid data collected from galooli #import libraries library(xlsx) library(dplyr) ### read files hybriddata1 <- read.xlsx("Summary - Configurable - ATC Uganda.xlsx", sheetName = "data", colClasses = "character") hybriddata2 <- read.xlsx("Summary - Configurable - ATC Uganda 2.xlsx", sheetName = "data", colClasses = "character") ####clean data ##clean names of hybriddata1 names(hybriddata1) <- sapply(names(hybriddata1), function(x) gsub("\\(", "", x)) names(hybriddata1) <- sapply(names(hybriddata1), function(x) gsub("\\)", "", x)) names(hybriddata1) <- sapply(names(hybriddata1), function(x) gsub("\\-", "", x)) names(hybriddata1) <- sapply(names(hybriddata1), function(x) gsub("\\,", "", x)) names(hybriddata1) <- sapply(names(hybriddata1), function(x) gsub("\\.", "", x)) names(hybriddata1) <- sapply(names(hybriddata1), function(x) gsub(" ", "", x)) names(hybriddata1) <- tolower(names(hybriddata1)) ##clean names of hybriddata2 names(hybriddata2) <- sapply(names(hybriddata2), function(x) gsub("\\(", "", x)) names(hybriddata2) <- sapply(names(hybriddata2), function(x) gsub("\\)", "", x)) names(hybriddata2) <- sapply(names(hybriddata2), function(x) gsub("\\-", "", x)) names(hybriddata2) <- sapply(names(hybriddata2), function(x) gsub("\\,", "", x)) names(hybriddata2) <- sapply(names(hybriddata2), function(x) gsub("\\.", "", x)) names(hybriddata2) <- sapply(names(hybriddata2), function(x) gsub(" ", "", x)) names(hybriddata2) <- tolower(names(hybriddata2)) ##clean rows of hybriddata 1 hybriddata1$usagetimedg1 <- as.numeric(as.character(hybriddata1$usagetimedg1)) hybriddata1$usagetimebattery <- as.numeric(as.character(hybriddata1$usagetimebattery)) hybriddata1$usagetimeunknown <- as.numeric(as.character(hybriddata1$usagetimeunknown)) hybriddata1$dg2ignitioncount <- as.numeric(as.character(hybriddata1$dg1ignitioncount)) hybriddata1$usagetimegrid <- as.numeric(as.character(hybriddata1$usagetimegrid)) hybriddata1$usagetimesolar <- as.numeric(as.character(hybriddata1$usagetimesolar)) hybriddata1$generalinformationsiteid <- as.numeric(as.character(hybriddata1$generalinformationsiteid)) hybriddata1$generalinformationsitename <- as.character(hybriddata1$generalinformationsitename) hybriddata1$atskwhtotal <- as.numeric(as.character(hybriddata1$atskwhtotal)) ##added line hybriddata1$extrafield8 <- as.character(hybriddata1$extrafield8) hybriddata1$extrafield8 <- sapply(hybriddata1$extrafield8, function(x) gsub("\\-", "", x)) hybriddata1$extrafield8 <- sapply(hybriddata1$extrafield8, function(x) gsub(" ", "", x)) hybriddata1$extrafield8 <- tolower(hybriddata1$extrafield8) ##clean rows of hybriddata 2 hybriddata2$usagetimedg1 <- as.numeric(as.character(hybriddata2$usagetimedg1)) hybriddata2$usagetimebattery <- as.numeric(as.character(hybriddata2$usagetimebattery)) hybriddata2$usagetimeunknown <- as.numeric(as.character(hybriddata2$usagetimeunknown)) hybriddata2$dg2ignitioncount <- as.numeric(as.character(hybriddata2$dg1ignitioncount)) hybriddata2$usagetimegrid <- as.numeric(as.character(hybriddata2$usagetimegrid)) hybriddata2$usagetimesolar <- as.numeric(as.character(hybriddata2$usagetimesolar)) hybriddata2$generalinformationsiteid <- as.numeric(as.character(hybriddata2$generalinformationsiteid)) hybriddata2$generalinformationsitename <- as.character(hybriddata2$generalinformationsitename) hybriddata2$atskwhtotal <- as.numeric(as.character(hybriddata2$atskwhtotal)) hybriddata2$extrafield8 <- as.character(hybriddata2$extrafield8) hybriddata2$extrafield8 <- sapply(hybriddata2$extrafield8, function(x) gsub("\\-", "", x)) hybriddata2$extrafield8 <- sapply(hybriddata2$extrafield8, function(x) gsub(" ", "", x)) hybriddata2$extrafield8 <- tolower(hybriddata2$extrafield8) ##04/04/2019 code ##filter sites with hybrids hybriddata1 <- hybriddata1 %>% filter(generalinformationhybridstatus == "YES") hybriddata2 <- hybriddata2 %>% filter(generalinformationhybridstatus == "YES") ##clean out sites with zero kwh hybriddata1 <- hybriddata1 %>% filter(atskwhtotal != 0) hybriddata2 <- hybriddata2 %>% filter(atskwhtotal != 0) ## create column with kW hybriddata1 <- hybriddata1 %>% mutate(siteload = atskwhtotal/24) hybriddata2 <- hybriddata2 %>% mutate(siteload = atskwhtotal/24) ##sites with hybrid and having grid hybridgrid1 <- hybriddata1 %>% filter(sitelayout %in% c("TL_UG_OUTDOOR_ONGRID", "TL_UG_INDOOR_ONGRID_HYBRID_REV03", "TL_UG_INDOOR_ONGRID_HYBRID", "TL_UG_INDOOR_ONGRID_HYBRID_REV2", "TL_UG_INDOOR_ONGRID_REV01", "TL_UG_INDOOR_ONGRID_REV2", "TL_UG_OUTDOOR_ONGRID", " TL_UG_OUTDOOR_ONGRID_HYBRID_REV1", "TL_UG_INDOOR_ONGRID_HYBRID_REV0", "TL_UG_INDOOR_ONGRID_HYBRID_REV1", "TL_UG_INDOOR_ONGRID_REV0", " TL_UG_INDOOR_ONGRID_REV1", " TL_UG_OUTDOOR_ONGRID_HYBRID", "TL_UG_OUTDOOR_ONGRID_REV2")) ##sites with hybrid and not having grid hybridfull1 <- hybriddata1 %>% filter(!(sitelayout %in% c("TL_UG_OUTDOOR_ONGRID", "TL_UG_INDOOR_ONGRID_HYBRID_REV03", "TL_UG_INDOOR_ONGRID_HYBRID", "TL_UG_INDOOR_ONGRID_HYBRID_REV2", "TL_UG_INDOOR_ONGRID_REV01", "TL_UG_INDOOR_ONGRID_REV2", "TL_UG_OUTDOOR_ONGRID", " TL_UG_OUTDOOR_ONGRID_HYBRID_REV1", "TL_UG_INDOOR_ONGRID_HYBRID_REV0", "TL_UG_INDOOR_ONGRID_HYBRID_REV1", "TL_UG_INDOOR_ONGRID_REV0", " TL_UG_INDOOR_ONGRID_REV1", " TL_UG_OUTDOOR_ONGRID_HYBRID", "TL_UG_OUTDOOR_ONGRID_REV2"))) ##site with hybrid and solar hybridsolar1 <- hybriddata1 %>% filter(sitelayout %in% c("TL_UG_INDOOR_OFFGRID_HYBRID_SOLAR_REV03", "TL_UG_INDOOR_OFFGRID_HYBRID_SOLAR", "TL_UG_INDOOR_OFFGRID_HYBRID_SOLAR_REV2", "TL_UG_OUTDOOR_OFFGRID_HYBRID_SOLAR")) ##sites with hybrid only nameshybridonly <- c("TL_UG_INDOOR_OFFGRID", "TL_UG_INDOOR_OFFGRID_HYBRID", "TL_UG_INDOOR_OFFGRID_HYBRID_REV0", "TL_UG_INDOOR_OFFGRID_HYBRID_REV1", "TL_UG_INDOOR_OFFGRID_HYBRID_REV2", "TL_UG_INDOOR_OFFGRID_REV0", "TL_UG_INDOOR_OFFGRID_REV1", "TL_UG_INDOOR_OFFGRID_REV2", "TL_UG_OUTDOOR_OFFGRID", "TL_UG_OUTDOOR_OFFGRID_HYBRID", "TL_UG_OUTDOOR_OFFGRID_HYBRID_REV03") ##sites with only hybrid hybridonly1 <- hybriddata1 %>% filter(sitelayout %in% nameshybridonly) ##hybrid only sites with LiBs hybridonlylibs <- hybridonly1 %>% filter(extrafield8 %in% c("libincell", "liblgchem", "libacmereime")) #hybrid only libs hybridonlylibs <- hybridonly1 %>% filter(extrafield8 %in% c("libincell", "liblgchem", "libacmereime")) ####Visualisation graphs #site load Vs battery time ggloadbatterytime <- ggplot(data = hybridonly1, aes(usagetimebattery, siteload)) #ggloadbatterytime + geom_point() + geom_smooth() #ggloadbatterytime + geom_point() + geom_smooth(method = "lm") #ggloadbatterytime <- ggplot(data = hybridonly1, aes(usagetimebattery, siteload, color = extrafield8)) #ggloadbatterytime + geom_point() + geom_smooth(method = "lm") #ggloadbatterytime + geom_point() + geom_smooth(method = "lm", se = FALSE) #gghybridonlylib <- ggplot(data = hybridonlylibs, aes(usagetimebattery, siteload)) #gghybridonlylib + geom_point() #gghybridonlylib + geom_point() + geom_smooth(method = "lm") #gghybridonlylib <- ggplot(data = hybridonlylibs, aes(usagetimebattery, siteload, color = extrafield8)) #gghybridonlylib + geom_point() + geom_smooth(method = "lm") #gghybridonlylib + geom_boxplot() #ggplot(data = hybridsolar1, aes(usagetimesolar, fill = extrafield8)) + geom_histogram() + facet_grid(.~extrafield8) png(filename = "siteloaddgignitionsolar.png", width = 960, height = 960) qplot(siteload, data = hybridsolar1, fill = dg1ignitioncount, alpha = .5, facets = ~dg1ignitioncount) + geom_vline(xintercept = 2) + labs(title = "Ignition counts as per site loads on solar sites") dev.off() png(filename = "siteloaddgignitionlib.png", width = 960, height = 960) qplot(siteload, data = hybridonlylibs, fill = dg1ignitioncount, alpha = .5, facets = ~dg1ignitioncount) + geom_vline(xintercept = 2) + labs(title = "Ignition counts as per site loads on LiB sites") dev.off()
/hybrid_summary.R
no_license
AllanKavuma/Hybrid-and-Solar-data-analysis-on-Telecommunication-sites
R
false
false
8,820
r
##Script to summarise hybrid data collected from galooli #import libraries library(xlsx) library(dplyr) ### read files hybriddata1 <- read.xlsx("Summary - Configurable - ATC Uganda.xlsx", sheetName = "data", colClasses = "character") hybriddata2 <- read.xlsx("Summary - Configurable - ATC Uganda 2.xlsx", sheetName = "data", colClasses = "character") ####clean data ##clean names of hybriddata1 names(hybriddata1) <- sapply(names(hybriddata1), function(x) gsub("\\(", "", x)) names(hybriddata1) <- sapply(names(hybriddata1), function(x) gsub("\\)", "", x)) names(hybriddata1) <- sapply(names(hybriddata1), function(x) gsub("\\-", "", x)) names(hybriddata1) <- sapply(names(hybriddata1), function(x) gsub("\\,", "", x)) names(hybriddata1) <- sapply(names(hybriddata1), function(x) gsub("\\.", "", x)) names(hybriddata1) <- sapply(names(hybriddata1), function(x) gsub(" ", "", x)) names(hybriddata1) <- tolower(names(hybriddata1)) ##clean names of hybriddata2 names(hybriddata2) <- sapply(names(hybriddata2), function(x) gsub("\\(", "", x)) names(hybriddata2) <- sapply(names(hybriddata2), function(x) gsub("\\)", "", x)) names(hybriddata2) <- sapply(names(hybriddata2), function(x) gsub("\\-", "", x)) names(hybriddata2) <- sapply(names(hybriddata2), function(x) gsub("\\,", "", x)) names(hybriddata2) <- sapply(names(hybriddata2), function(x) gsub("\\.", "", x)) names(hybriddata2) <- sapply(names(hybriddata2), function(x) gsub(" ", "", x)) names(hybriddata2) <- tolower(names(hybriddata2)) ##clean rows of hybriddata 1 hybriddata1$usagetimedg1 <- as.numeric(as.character(hybriddata1$usagetimedg1)) hybriddata1$usagetimebattery <- as.numeric(as.character(hybriddata1$usagetimebattery)) hybriddata1$usagetimeunknown <- as.numeric(as.character(hybriddata1$usagetimeunknown)) hybriddata1$dg2ignitioncount <- as.numeric(as.character(hybriddata1$dg1ignitioncount)) hybriddata1$usagetimegrid <- as.numeric(as.character(hybriddata1$usagetimegrid)) hybriddata1$usagetimesolar <- as.numeric(as.character(hybriddata1$usagetimesolar)) hybriddata1$generalinformationsiteid <- as.numeric(as.character(hybriddata1$generalinformationsiteid)) hybriddata1$generalinformationsitename <- as.character(hybriddata1$generalinformationsitename) hybriddata1$atskwhtotal <- as.numeric(as.character(hybriddata1$atskwhtotal)) ##added line hybriddata1$extrafield8 <- as.character(hybriddata1$extrafield8) hybriddata1$extrafield8 <- sapply(hybriddata1$extrafield8, function(x) gsub("\\-", "", x)) hybriddata1$extrafield8 <- sapply(hybriddata1$extrafield8, function(x) gsub(" ", "", x)) hybriddata1$extrafield8 <- tolower(hybriddata1$extrafield8) ##clean rows of hybriddata 2 hybriddata2$usagetimedg1 <- as.numeric(as.character(hybriddata2$usagetimedg1)) hybriddata2$usagetimebattery <- as.numeric(as.character(hybriddata2$usagetimebattery)) hybriddata2$usagetimeunknown <- as.numeric(as.character(hybriddata2$usagetimeunknown)) hybriddata2$dg2ignitioncount <- as.numeric(as.character(hybriddata2$dg1ignitioncount)) hybriddata2$usagetimegrid <- as.numeric(as.character(hybriddata2$usagetimegrid)) hybriddata2$usagetimesolar <- as.numeric(as.character(hybriddata2$usagetimesolar)) hybriddata2$generalinformationsiteid <- as.numeric(as.character(hybriddata2$generalinformationsiteid)) hybriddata2$generalinformationsitename <- as.character(hybriddata2$generalinformationsitename) hybriddata2$atskwhtotal <- as.numeric(as.character(hybriddata2$atskwhtotal)) hybriddata2$extrafield8 <- as.character(hybriddata2$extrafield8) hybriddata2$extrafield8 <- sapply(hybriddata2$extrafield8, function(x) gsub("\\-", "", x)) hybriddata2$extrafield8 <- sapply(hybriddata2$extrafield8, function(x) gsub(" ", "", x)) hybriddata2$extrafield8 <- tolower(hybriddata2$extrafield8) ##04/04/2019 code ##filter sites with hybrids hybriddata1 <- hybriddata1 %>% filter(generalinformationhybridstatus == "YES") hybriddata2 <- hybriddata2 %>% filter(generalinformationhybridstatus == "YES") ##clean out sites with zero kwh hybriddata1 <- hybriddata1 %>% filter(atskwhtotal != 0) hybriddata2 <- hybriddata2 %>% filter(atskwhtotal != 0) ## create column with kW hybriddata1 <- hybriddata1 %>% mutate(siteload = atskwhtotal/24) hybriddata2 <- hybriddata2 %>% mutate(siteload = atskwhtotal/24) ##sites with hybrid and having grid hybridgrid1 <- hybriddata1 %>% filter(sitelayout %in% c("TL_UG_OUTDOOR_ONGRID", "TL_UG_INDOOR_ONGRID_HYBRID_REV03", "TL_UG_INDOOR_ONGRID_HYBRID", "TL_UG_INDOOR_ONGRID_HYBRID_REV2", "TL_UG_INDOOR_ONGRID_REV01", "TL_UG_INDOOR_ONGRID_REV2", "TL_UG_OUTDOOR_ONGRID", " TL_UG_OUTDOOR_ONGRID_HYBRID_REV1", "TL_UG_INDOOR_ONGRID_HYBRID_REV0", "TL_UG_INDOOR_ONGRID_HYBRID_REV1", "TL_UG_INDOOR_ONGRID_REV0", " TL_UG_INDOOR_ONGRID_REV1", " TL_UG_OUTDOOR_ONGRID_HYBRID", "TL_UG_OUTDOOR_ONGRID_REV2")) ##sites with hybrid and not having grid hybridfull1 <- hybriddata1 %>% filter(!(sitelayout %in% c("TL_UG_OUTDOOR_ONGRID", "TL_UG_INDOOR_ONGRID_HYBRID_REV03", "TL_UG_INDOOR_ONGRID_HYBRID", "TL_UG_INDOOR_ONGRID_HYBRID_REV2", "TL_UG_INDOOR_ONGRID_REV01", "TL_UG_INDOOR_ONGRID_REV2", "TL_UG_OUTDOOR_ONGRID", " TL_UG_OUTDOOR_ONGRID_HYBRID_REV1", "TL_UG_INDOOR_ONGRID_HYBRID_REV0", "TL_UG_INDOOR_ONGRID_HYBRID_REV1", "TL_UG_INDOOR_ONGRID_REV0", " TL_UG_INDOOR_ONGRID_REV1", " TL_UG_OUTDOOR_ONGRID_HYBRID", "TL_UG_OUTDOOR_ONGRID_REV2"))) ##site with hybrid and solar hybridsolar1 <- hybriddata1 %>% filter(sitelayout %in% c("TL_UG_INDOOR_OFFGRID_HYBRID_SOLAR_REV03", "TL_UG_INDOOR_OFFGRID_HYBRID_SOLAR", "TL_UG_INDOOR_OFFGRID_HYBRID_SOLAR_REV2", "TL_UG_OUTDOOR_OFFGRID_HYBRID_SOLAR")) ##sites with hybrid only nameshybridonly <- c("TL_UG_INDOOR_OFFGRID", "TL_UG_INDOOR_OFFGRID_HYBRID", "TL_UG_INDOOR_OFFGRID_HYBRID_REV0", "TL_UG_INDOOR_OFFGRID_HYBRID_REV1", "TL_UG_INDOOR_OFFGRID_HYBRID_REV2", "TL_UG_INDOOR_OFFGRID_REV0", "TL_UG_INDOOR_OFFGRID_REV1", "TL_UG_INDOOR_OFFGRID_REV2", "TL_UG_OUTDOOR_OFFGRID", "TL_UG_OUTDOOR_OFFGRID_HYBRID", "TL_UG_OUTDOOR_OFFGRID_HYBRID_REV03") ##sites with only hybrid hybridonly1 <- hybriddata1 %>% filter(sitelayout %in% nameshybridonly) ##hybrid only sites with LiBs hybridonlylibs <- hybridonly1 %>% filter(extrafield8 %in% c("libincell", "liblgchem", "libacmereime")) #hybrid only libs hybridonlylibs <- hybridonly1 %>% filter(extrafield8 %in% c("libincell", "liblgchem", "libacmereime")) ####Visualisation graphs #site load Vs battery time ggloadbatterytime <- ggplot(data = hybridonly1, aes(usagetimebattery, siteload)) #ggloadbatterytime + geom_point() + geom_smooth() #ggloadbatterytime + geom_point() + geom_smooth(method = "lm") #ggloadbatterytime <- ggplot(data = hybridonly1, aes(usagetimebattery, siteload, color = extrafield8)) #ggloadbatterytime + geom_point() + geom_smooth(method = "lm") #ggloadbatterytime + geom_point() + geom_smooth(method = "lm", se = FALSE) #gghybridonlylib <- ggplot(data = hybridonlylibs, aes(usagetimebattery, siteload)) #gghybridonlylib + geom_point() #gghybridonlylib + geom_point() + geom_smooth(method = "lm") #gghybridonlylib <- ggplot(data = hybridonlylibs, aes(usagetimebattery, siteload, color = extrafield8)) #gghybridonlylib + geom_point() + geom_smooth(method = "lm") #gghybridonlylib + geom_boxplot() #ggplot(data = hybridsolar1, aes(usagetimesolar, fill = extrafield8)) + geom_histogram() + facet_grid(.~extrafield8) png(filename = "siteloaddgignitionsolar.png", width = 960, height = 960) qplot(siteload, data = hybridsolar1, fill = dg1ignitioncount, alpha = .5, facets = ~dg1ignitioncount) + geom_vline(xintercept = 2) + labs(title = "Ignition counts as per site loads on solar sites") dev.off() png(filename = "siteloaddgignitionlib.png", width = 960, height = 960) qplot(siteload, data = hybridonlylibs, fill = dg1ignitioncount, alpha = .5, facets = ~dg1ignitioncount) + geom_vline(xintercept = 2) + labs(title = "Ignition counts as per site loads on LiB sites") dev.off()
#' Group in Azure Active Directory #' #' Class representing an AAD group. #' #' @docType class #' @section Fields: #' - `token`: The token used to authenticate with the Graph host. #' - `tenant`: The Azure Active Directory tenant for this group. #' - `type`: always "group" for a group object. #' - `properties`: The group properties. #' @section Methods: #' - `new(...)`: Initialize a new group object. Do not call this directly; see 'Initialization' below. #' - `delete(confirm=TRUE)`: Delete a group. By default, ask for confirmation first. #' - `update(...)`: Update the group information in Azure Active Directory. #' - `do_operation(...)`: Carry out an arbitrary operation on the group. #' - `sync_fields()`: Synchronise the R object with the app data in Azure Active Directory. #' - `list_members(type=c("user", "group", "application", "servicePrincipal"), filter=NULL, n=Inf)`: Return a list of all members of this group. Specify the `type` argument to limit the result to specific object type(s). #' - `list_owners(type=c("user", "group", "application", "servicePrincipal"), filter=NULL, n=Inf)`: Return a list of all owners of this group. Specify the `type` argument to limit the result to specific object type(s). #' #' @section Initialization: #' Creating new objects of this class should be done via the `create_group` and `get_group` methods of the [ms_graph] and [az_app] classes. Calling the `new()` method for this class only constructs the R object; it does not call the Microsoft Graph API to create the actual group. #' #' @section List methods: #' All `list_*` methods have `filter` and `n` arguments to limit the number of results. The former should be an [OData expression](https://docs.microsoft.com/en-us/graph/query-parameters#filter-parameter) as a string to filter the result set on. The latter should be a number setting the maximum number of (filtered) results to return. The default values are `filter=NULL` and `n=Inf`. If `n=NULL`, the `ms_graph_pager` iterator object is returned instead to allow manual iteration over the results. #' #' Support in the underlying Graph API for OData queries is patchy. Not all endpoints that return lists of objects support filtering, and if they do, they may not allow all of the defined operators. If your filtering expression results in an error, you can carry out the operation without filtering and then filter the results on the client side. #' @seealso #' [ms_graph], [az_app], [az_user], [az_object] #' #' [Microsoft Graph overview](https://docs.microsoft.com/en-us/graph/overview), #' [REST API reference](https://docs.microsoft.com/en-us/graph/api/overview?view=graph-rest-1.0) #' #' @examples #' \dontrun{ #' #' gr <- get_graph_login() #' usr <- gr$get_user("myname@aadtenant.com") #' #' grps <- usr$list_group_memberships() #' grp <- gr$get_group(grps[1]) #' #' grp$list_members() #' grp$list_owners() #' #' # capping the number of results #' grp$list_members(n=10) #' #' # get the pager object for a listing method #' pager <- grp$list_members(n=NULL) #' pager$value #' #' } #' @format An R6 object of class `az_group`, inheriting from `az_object`. #' @export az_group <- R6::R6Class("az_group", inherit=az_object, public=list( initialize=function(token, tenant=NULL, properties=NULL) { self$type <- "group" private$api_type <- "groups" super$initialize(token, tenant, properties) }, list_members=function(type=c("user", "group", "application", "servicePrincipal"), filter=NULL, n=Inf) { opts <- list(`$filter`=filter, `$count`=if(!is.null(filter)) "true") hdrs <- if(!is.null(filter)) httr::add_headers(consistencyLevel="eventual") pager <- self$get_list_pager(self$do_operation("members", options=opts, hdrs), type_filter=type) extract_list_values(pager, n) }, list_owners=function(type=c("user", "group", "application", "servicePrincipal"), filter=NULL, n=Inf) { opts <- list(`$filter`=filter, `$count`=if(!is.null(filter)) "true") hdrs <- if(!is.null(filter)) httr::add_headers(consistencyLevel="eventual") pager <- self$get_list_pager(self$do_operation("owners", options=opts, hdrs), type_filter=type) extract_list_values(pager, n) }, print=function(...) { group_type <- if("Unified" %in% self$properties$groupTypes) "Microsoft 365" else if(!self$properties$mailEnabled) "Security" else if(self$properties$securityEnabled) "Mail-enabled security" else "Distribution" cat("<", group_type, " group '", self$properties$displayName, "'>\n", sep="") cat(" directory id:", self$properties$id, "\n") cat(" description:", self$properties$description, "\n") cat("---\n") cat(format_public_methods(self)) invisible(self) } ))
/R/az_group.R
no_license
cran/AzureGraph
R
false
false
4,959
r
#' Group in Azure Active Directory #' #' Class representing an AAD group. #' #' @docType class #' @section Fields: #' - `token`: The token used to authenticate with the Graph host. #' - `tenant`: The Azure Active Directory tenant for this group. #' - `type`: always "group" for a group object. #' - `properties`: The group properties. #' @section Methods: #' - `new(...)`: Initialize a new group object. Do not call this directly; see 'Initialization' below. #' - `delete(confirm=TRUE)`: Delete a group. By default, ask for confirmation first. #' - `update(...)`: Update the group information in Azure Active Directory. #' - `do_operation(...)`: Carry out an arbitrary operation on the group. #' - `sync_fields()`: Synchronise the R object with the app data in Azure Active Directory. #' - `list_members(type=c("user", "group", "application", "servicePrincipal"), filter=NULL, n=Inf)`: Return a list of all members of this group. Specify the `type` argument to limit the result to specific object type(s). #' - `list_owners(type=c("user", "group", "application", "servicePrincipal"), filter=NULL, n=Inf)`: Return a list of all owners of this group. Specify the `type` argument to limit the result to specific object type(s). #' #' @section Initialization: #' Creating new objects of this class should be done via the `create_group` and `get_group` methods of the [ms_graph] and [az_app] classes. Calling the `new()` method for this class only constructs the R object; it does not call the Microsoft Graph API to create the actual group. #' #' @section List methods: #' All `list_*` methods have `filter` and `n` arguments to limit the number of results. The former should be an [OData expression](https://docs.microsoft.com/en-us/graph/query-parameters#filter-parameter) as a string to filter the result set on. The latter should be a number setting the maximum number of (filtered) results to return. The default values are `filter=NULL` and `n=Inf`. If `n=NULL`, the `ms_graph_pager` iterator object is returned instead to allow manual iteration over the results. #' #' Support in the underlying Graph API for OData queries is patchy. Not all endpoints that return lists of objects support filtering, and if they do, they may not allow all of the defined operators. If your filtering expression results in an error, you can carry out the operation without filtering and then filter the results on the client side. #' @seealso #' [ms_graph], [az_app], [az_user], [az_object] #' #' [Microsoft Graph overview](https://docs.microsoft.com/en-us/graph/overview), #' [REST API reference](https://docs.microsoft.com/en-us/graph/api/overview?view=graph-rest-1.0) #' #' @examples #' \dontrun{ #' #' gr <- get_graph_login() #' usr <- gr$get_user("myname@aadtenant.com") #' #' grps <- usr$list_group_memberships() #' grp <- gr$get_group(grps[1]) #' #' grp$list_members() #' grp$list_owners() #' #' # capping the number of results #' grp$list_members(n=10) #' #' # get the pager object for a listing method #' pager <- grp$list_members(n=NULL) #' pager$value #' #' } #' @format An R6 object of class `az_group`, inheriting from `az_object`. #' @export az_group <- R6::R6Class("az_group", inherit=az_object, public=list( initialize=function(token, tenant=NULL, properties=NULL) { self$type <- "group" private$api_type <- "groups" super$initialize(token, tenant, properties) }, list_members=function(type=c("user", "group", "application", "servicePrincipal"), filter=NULL, n=Inf) { opts <- list(`$filter`=filter, `$count`=if(!is.null(filter)) "true") hdrs <- if(!is.null(filter)) httr::add_headers(consistencyLevel="eventual") pager <- self$get_list_pager(self$do_operation("members", options=opts, hdrs), type_filter=type) extract_list_values(pager, n) }, list_owners=function(type=c("user", "group", "application", "servicePrincipal"), filter=NULL, n=Inf) { opts <- list(`$filter`=filter, `$count`=if(!is.null(filter)) "true") hdrs <- if(!is.null(filter)) httr::add_headers(consistencyLevel="eventual") pager <- self$get_list_pager(self$do_operation("owners", options=opts, hdrs), type_filter=type) extract_list_values(pager, n) }, print=function(...) { group_type <- if("Unified" %in% self$properties$groupTypes) "Microsoft 365" else if(!self$properties$mailEnabled) "Security" else if(self$properties$securityEnabled) "Mail-enabled security" else "Distribution" cat("<", group_type, " group '", self$properties$displayName, "'>\n", sep="") cat(" directory id:", self$properties$id, "\n") cat(" description:", self$properties$description, "\n") cat("---\n") cat(format_public_methods(self)) invisible(self) } ))
#Loading the required libraries library('caret') #Seeting the random seed set.seed(1) #Loading the hackathon dataset data <- read.csv('data.csv') str(data) #sum(is.na(data)) #preProcValues <- preProcess(data, method = c("medianImpute","center","scale")) #library('RANN') #sum(is.na(data_processed)) data_processed <- data #Spliting training set into two parts based on outcome: 75% and 25% index <- createDataPartition(data_processed$class, p=0.8, list=FALSE) trainSet <- data_processed[ index,] testSet <- data_processed[-index,] #Defining the training controls for multiple models fitControl <- trainControl( method = "repeatedcv", number = 5, savePredictions = 'final', classProbs = T) #Defining the predictors and outcome predictors<-c("X1", "X2", "X3", "X4") outcomeName <- "class" #Training the random forest model model_rf <- train(trainSet[,predictors],trainSet[,outcomeName], method='rf', trControl=fitControl, tuneLength=3) #Predicting using random forest model testSet$pred_rf <- predict(object = model_rf, testSet[, predictors]) confusionMatrix(testSet$class, testSet$pred_rf) a <- confusionMatrix(testSet$class, testSet$pred_rf)$overall['Accuracy'] #Training the knn model model_knn <- train(trainSet[,predictors],trainSet[,outcomeName],method='knn',trControl=fitControl,tuneLength=3) #Predicting using knn model testSet$pred_knn <- predict(object = model_knn,testSet[,predictors]) #Checking the accuracy of the random forest model confusionMatrix(testSet$class,testSet$pred_knn) b <- confusionMatrix(testSet$class, testSet$pred_knn)$overall['Accuracy'] #Training the naive model model_naive <- train(trainSet[,predictors],trainSet[,outcomeName],method='naive_bayes',trControl=fitControl,tuneLength=3) #Predicting using knn model testSet$pred_naive <- predict(object = model_naive,testSet[,predictors]) #Checking the accuracy of the random forest model confusionMatrix(testSet$class,testSet$pred_naive) c <- confusionMatrix(testSet$class, testSet$pred_naive)$overall['Accuracy'] a b c trainSet$OOF_pred_rf<-model_rf$pred$R[order(model_rf$pred$rowIndex)] trainSet$OOF_pred_knn<-model_knn$pred$R[order(model_knn$pred$rowIndex)] trainSet$OOF_pred_naive<-model_naive$pred$R[order(model_naive$pred$rowIndex)] testSet$OOF_pred_rf<-predict(model_rf,testSet[predictors],type='prob')$R testSet$OOF_pred_knn<-predict(model_knn,testSet[predictors],type='prob')$R testSet$OOF_pred_naive<-predict(model_naive,testSet[predictors],type='prob')$R predictors_top<-c('OOF_pred_rf','OOF_pred_knn','OOF_pred_naive') model_gbm<- train(trainSet[,predictors_top],trainSet[,outcomeName],method='gbm',trControl=fitControl,tuneLength=3) testSet$gbm_stacked<-predict(model_gbm,testSet[,predictors_top]) testSet$gbm_stacked testSet$class confusionMatrix(testSet$class,testSet$gbm_stacked)$overall['Accuracy']
/R Code/Final Codes/gbm_stack_94.R
no_license
s-ramyalakshmi/ML-project
R
false
false
2,846
r
#Loading the required libraries library('caret') #Seeting the random seed set.seed(1) #Loading the hackathon dataset data <- read.csv('data.csv') str(data) #sum(is.na(data)) #preProcValues <- preProcess(data, method = c("medianImpute","center","scale")) #library('RANN') #sum(is.na(data_processed)) data_processed <- data #Spliting training set into two parts based on outcome: 75% and 25% index <- createDataPartition(data_processed$class, p=0.8, list=FALSE) trainSet <- data_processed[ index,] testSet <- data_processed[-index,] #Defining the training controls for multiple models fitControl <- trainControl( method = "repeatedcv", number = 5, savePredictions = 'final', classProbs = T) #Defining the predictors and outcome predictors<-c("X1", "X2", "X3", "X4") outcomeName <- "class" #Training the random forest model model_rf <- train(trainSet[,predictors],trainSet[,outcomeName], method='rf', trControl=fitControl, tuneLength=3) #Predicting using random forest model testSet$pred_rf <- predict(object = model_rf, testSet[, predictors]) confusionMatrix(testSet$class, testSet$pred_rf) a <- confusionMatrix(testSet$class, testSet$pred_rf)$overall['Accuracy'] #Training the knn model model_knn <- train(trainSet[,predictors],trainSet[,outcomeName],method='knn',trControl=fitControl,tuneLength=3) #Predicting using knn model testSet$pred_knn <- predict(object = model_knn,testSet[,predictors]) #Checking the accuracy of the random forest model confusionMatrix(testSet$class,testSet$pred_knn) b <- confusionMatrix(testSet$class, testSet$pred_knn)$overall['Accuracy'] #Training the naive model model_naive <- train(trainSet[,predictors],trainSet[,outcomeName],method='naive_bayes',trControl=fitControl,tuneLength=3) #Predicting using knn model testSet$pred_naive <- predict(object = model_naive,testSet[,predictors]) #Checking the accuracy of the random forest model confusionMatrix(testSet$class,testSet$pred_naive) c <- confusionMatrix(testSet$class, testSet$pred_naive)$overall['Accuracy'] a b c trainSet$OOF_pred_rf<-model_rf$pred$R[order(model_rf$pred$rowIndex)] trainSet$OOF_pred_knn<-model_knn$pred$R[order(model_knn$pred$rowIndex)] trainSet$OOF_pred_naive<-model_naive$pred$R[order(model_naive$pred$rowIndex)] testSet$OOF_pred_rf<-predict(model_rf,testSet[predictors],type='prob')$R testSet$OOF_pred_knn<-predict(model_knn,testSet[predictors],type='prob')$R testSet$OOF_pred_naive<-predict(model_naive,testSet[predictors],type='prob')$R predictors_top<-c('OOF_pred_rf','OOF_pred_knn','OOF_pred_naive') model_gbm<- train(trainSet[,predictors_top],trainSet[,outcomeName],method='gbm',trControl=fitControl,tuneLength=3) testSet$gbm_stacked<-predict(model_gbm,testSet[,predictors_top]) testSet$gbm_stacked testSet$class confusionMatrix(testSet$class,testSet$gbm_stacked)$overall['Accuracy']
library(netdiffuseR) ### Name: classify_adopters ### Title: Classify adopters accordingly to Time of Adoption and Threshold ### levels. ### Aliases: classify_adopters classify classify_adopters.diffnet ### classify_adopters.default ftable.diffnet_adopters ### as.data.frame.diffnet_adopters plot.diffnet_adopters ### ** Examples # Classifying brfarmers ----------------------------------------------------- x <- brfarmersDiffNet diffnet.toa(x)[x$toa==max(x$toa, na.rm = TRUE)] <- NA out <- classify_adopters(x) # This is one way round( with(out, ftable(toa, thr, dnn=c("Time of Adoption", "Threshold")))/ nnodes(x[!is.na(x$toa)])*100, digits=2) # This is other ftable(out) # Can be coerced into a data.frame, e.g. ------------------------------------ ## Not run: ##D View(classify(brfarmersDiffNet)) ##D cbind(as.data.frame(classify(brfarmersDiffNet)), brfarmersDiffNet$toa) ## End(Not run) # Creating a mosaic plot with the medical innovations ----------------------- x <- classify(medInnovationsDiffNet) plot(x)
/data/genthat_extracted_code/netdiffuseR/examples/classify_adopters.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
1,038
r
library(netdiffuseR) ### Name: classify_adopters ### Title: Classify adopters accordingly to Time of Adoption and Threshold ### levels. ### Aliases: classify_adopters classify classify_adopters.diffnet ### classify_adopters.default ftable.diffnet_adopters ### as.data.frame.diffnet_adopters plot.diffnet_adopters ### ** Examples # Classifying brfarmers ----------------------------------------------------- x <- brfarmersDiffNet diffnet.toa(x)[x$toa==max(x$toa, na.rm = TRUE)] <- NA out <- classify_adopters(x) # This is one way round( with(out, ftable(toa, thr, dnn=c("Time of Adoption", "Threshold")))/ nnodes(x[!is.na(x$toa)])*100, digits=2) # This is other ftable(out) # Can be coerced into a data.frame, e.g. ------------------------------------ ## Not run: ##D View(classify(brfarmersDiffNet)) ##D cbind(as.data.frame(classify(brfarmersDiffNet)), brfarmersDiffNet$toa) ## End(Not run) # Creating a mosaic plot with the medical innovations ----------------------- x <- classify(medInnovationsDiffNet) plot(x)
throw <- function(throwCount, diceCount = 3, diceFaceCount = 6) { samples = array(dim = throwCount) for(throw in 1:throwCount) { samples[throw] <- sum(sample( 1:diceFaceCount, diceCount, replace = T)) } return(tabulate(samples, diceCount * diceFaceCount)) } plot(throw(10), col=rainbow(10), type = 'b', xlab = "Aantal ogen", ylab = "aantal keer voorgekomen",sub = "ten times") plot(throw(100), col=rainbow(10), type = 'b', xlab = "Aantal ogen", ylab = "aantal keer voorgekomen",sub = "one hundred times") plot(throw(100), col=rainbow(10), type = 'b', xlab = "Aantal ogen", ylab = "aantal keer voorgekomen",sub = "one hundred times") plot(throw(1000), col=rainbow(10), type = 'b', xlab = "Aantal ogen", ylab = "aantal keer voorgekomen",sub = "one thousand times") plot(throw(10000), col=rainbow(10), type = 'b', xlab = "Aantal ogen", ylab = "aantal keer voorgekomen",sub = "ten thousand times")
/1.5.r
no_license
royb3/statistiekMetR
R
false
false
907
r
throw <- function(throwCount, diceCount = 3, diceFaceCount = 6) { samples = array(dim = throwCount) for(throw in 1:throwCount) { samples[throw] <- sum(sample( 1:diceFaceCount, diceCount, replace = T)) } return(tabulate(samples, diceCount * diceFaceCount)) } plot(throw(10), col=rainbow(10), type = 'b', xlab = "Aantal ogen", ylab = "aantal keer voorgekomen",sub = "ten times") plot(throw(100), col=rainbow(10), type = 'b', xlab = "Aantal ogen", ylab = "aantal keer voorgekomen",sub = "one hundred times") plot(throw(100), col=rainbow(10), type = 'b', xlab = "Aantal ogen", ylab = "aantal keer voorgekomen",sub = "one hundred times") plot(throw(1000), col=rainbow(10), type = 'b', xlab = "Aantal ogen", ylab = "aantal keer voorgekomen",sub = "one thousand times") plot(throw(10000), col=rainbow(10), type = 'b', xlab = "Aantal ogen", ylab = "aantal keer voorgekomen",sub = "ten thousand times")
# +-----------------------------+ +-------------------------------+ # | Main R process | | Subprocess 1 | # | +------------------------+ | | +---------------------------+ | # | | test_dir_parallel() | | | | test_file() | | # | | +-------------------+ | | | | +-----------------------+ | | # | | | Event loop |< ------+ | | | SubprocessReporter | | | # | | +-------------------+ | | | | | | +-------------------+ | | | # | | | | | | | | | | test_that() | | | | # | | v | | | | | | +-------------------+ | | | # | | +-------------------+ | | | | | | | | | | # | | | Progress2Reporter | | | | | | | v | | | # | | +-------------------+ | | | | | | +-------------------+ | | | # | +------------------------+ | |--------| signalCondition() | | | | # +-----------------------------+ | | | | +-------------------+ | | | # | | | +-----------------------+ | | # | | +---------------------------+ | # | +-------------------------------+ # | +-------------------------------+ # |--| Subprocess 2 | # | +-------------------------------+ # | +-------------------------------+ # +--| Subprocess 3 | # +-------------------------------+ # ... # # ## Notes # # * Subprocesses run `callr::r_session` R sessions. They are re-used, # one R session can be used for several test_file() calls. # * Helper and setup files are loaded in the subprocesses after this. # * The main process puts all test files in the task queue, and then # runs an event loop. test_files_parallel <- function( test_dir, test_package, test_paths, load_helpers = TRUE, reporter = default_reporter(), env = NULL, stop_on_failure = FALSE, stop_on_warning = FALSE, load_package = c("none", "installed", "source") ) { reporters <- test_files_reporter(reporter) # TODO: support timeouts. 20-30s for each file by default? num_workers <- min(default_num_cpus(), length(test_paths)) message( "Starting ", num_workers, " test process", if (num_workers != 1) "es" ) # Set up work queue ------------------------------------------ queue <- NULL withr::defer(queue_teardown(queue)) # Start workers in parallel and add test tasks to queue. queue <- queue_setup( test_paths = test_paths, test_package = test_package, test_dir = test_dir, load_helpers = load_helpers, num_workers = num_workers, load_package = load_package ) with_reporter(reporters$multi, { parallel_update <- reporter$capabilities$parallel_update if (parallel_update) { parallel_event_loop_smooth(queue, reporters) } else { parallel_event_loop_chunky(queue, reporters) } }) test_files_check(reporters$list$get_results(), stop_on_failure = stop_on_failure, stop_on_warning = stop_on_warning ) } default_num_cpus <- function() { # Use common option, if set ncpus <- getOption("Ncpus", NULL) if (!is.null(ncpus) && !is_integer(ncpus)) { stop("`getOption(Ncpus)` must be integer") } if (!is.null(ncpus)) { return(ncpus) } # Otherwise detect. If we cannot detect, then compromise. if (ps::ps_is_supported()) { ncpus <- ps::ps_cpu_count() } else { ncpus <- 2L } # But allow capping with an env var max_env <- Sys.getenv("TESTTHAT_MAX_CPUS", "") if (max_env != "") { max_env <- as.integer(max_env) if (is.na(max_env)) abort("TESTTHAT_MAX_CPUS must be an integer") ncpus <- min(ncpus, max_env) } ncpus } parallel_event_loop_smooth <- function(queue, reporters) { update_interval <- 0.1 next_update <- proc.time()[[3]] + update_interval while (!queue$is_idle()) { # How much time do we have to poll before the next UI update? now <- proc.time()[[3]] poll_time <- max(next_update - now, 0) next_update <- now + update_interval msgs <- queue$poll(poll_time) updated <- FALSE for (x in msgs) { if (x$code != PROCESS_MSG) { next } m <- x$message if (!inherits(m, "testthat_message")) { message(m) next } if (m$cmd != "DONE") { reporters$multi$start_file(m$filename) do.call(reporters$multi[[m$cmd]], m$args) updated <- TRUE } } # We need to spin, even if there were no events if (!updated) reporters$multi$update() } } parallel_event_loop_chunky <- function(queue, reporters) { files <- list() while (!queue$is_idle()) { msgs <- queue$poll(Inf) for (x in msgs) { if (x$code != PROCESS_MSG) { next } m <- x$message if (!inherits(m, "testthat_message")) { message(m) next } # Record all events until we get end of file, then we replay them all # with the local reporters. This prevents out of order reporting. if (m$cmd != "DONE") { files[[m$filename]] <- append(files[[m$filename]], list(m)) } else { replay_events(reporters$multi, files[[m$filename]]) reporters$multi$end_context_if_started() files[[m$filename]] <- NULL } } } } replay_events <- function(reporter, events) { for (event in events) { do.call(reporter[[event$cmd]], event$args) } } queue_setup <- function(test_paths, test_package, test_dir, num_workers, load_helpers, load_package) { # TODO: observe `load_package`, but the "none" default is not # OK for the subprocess, because it'll not have the tested package if (load_package == "none") load_package <- "source" # TODO: similarly, load_helpers = FALSE, coming from devtools, # is not appropriate in the subprocess load_helpers <- TRUE test_package <- test_package %||% Sys.getenv("TESTTHAT_PKG") # TODO: meaningful error if startup fails load_hook <- expr(asNamespace("testthat")$queue_process_setup( test_package = !!test_package, test_dir = !!test_dir, load_helpers = !!load_helpers, load_package = !!load_package )) queue <- task_q$new(concurrency = num_workers, load_hook = load_hook) fun <- transport_fun(function(path) asNamespace("testthat")$queue_task(path)) for (path in test_paths) { queue$push(fun, list(path)) } queue } queue_process_setup <- function(test_package, test_dir, load_helpers, load_package) { env <- asNamespace("testthat")$test_files_setup_env( test_package, test_dir, load_package ) asNamespace("testthat")$test_files_setup_state( test_dir = test_dir, test_package = test_package, load_helpers = load_helpers, env = env, .env = .GlobalEnv ) # Save test environment in global env where it can easily be retrieved .GlobalEnv$.test_env <- env } queue_task <- function(path) { env <- .GlobalEnv$.test_env withr::local_envvar(c("TESTTHAT_PARALLEL" = "true")) reporters <- test_files_reporter(SubprocessReporter$new()) with_reporter(reporters$multi, test_one_file(path, env = env)) NULL } # Clean up subprocesses: we call teardown methods, but we only give them a # second, before killing the whole process tree using ps's env var marker # method. queue_teardown <- function(queue) { if (is.null(queue)) { return() } tasks <- queue$list_tasks() num <- nrow(tasks) clean_fn <- function() { withr::deferred_run(.GlobalEnv) } topoll <- list() for (i in seq_len(num)) { if (!is.null(tasks$worker[[i]])) { tasks$worker[[i]]$call(clean_fn) close(tasks$worker[[i]]$get_input_connection()) topoll <- c(topoll, tasks$worker[[i]]$get_poll_connection()) } } limit <- Sys.time() + 1 while (length(topoll) > 0 && (timeout <- limit - Sys.time()) > 0) { timeout <- as.double(timeout, units = "secs") * 1000 pr <- processx::poll(topoll, as.integer(timeout)) topoll <- topoll[pr != "ready"] } for (i in seq_len(num)) { if (!is.null(tasks$worker[[i]])) { # TODO: kill_tree() only works on Linux, Win, macOS tasks$worker[[i]]$kill_tree() } } } # Reporter that just forwards events in the subprocess back to the main process # # Ideally, these messages would be throttled, i.e. if the test code # emits many expectation conditions fast, SubprocessReporter should # collect several of them and only emit a condition a couple of times # a second. End-of-test and end-of-file events would be transmitted # immediately. SubprocessReporter <- R6::R6Class("SubprocessReporter", inherit = Reporter, public = list( start_file = function(filename) { private$filename <- filename private$event("start_file", filename) }, start_test = function(context, test) { private$event("start_test", context, test) }, start_context = function(context) { private$event("start_context", context) }, add_result = function(context, test, result) { if (inherits(result, "expectation_success")) { # Strip bulky components to reduce data transfer cost result[] <- result[c("message", "test")] } private$event("add_result", context, test, result) }, end_test = function(context, test) { private$event("end_test", context, test) }, end_context = function(context) { private$event("end_context", context) }, end_file = function() { private$event("end_file") }, end_reporter = function() { private$event("DONE") } ), private = list( filename = NULL, event = function(cmd, ...) { msg <- list( code = PROCESS_MSG, cmd = cmd, filename = private$filename, time = proc.time()[[3]], args = list(...) ) class(msg) <- c("testthat_message", "callr_message", "condition") signalCondition(msg) } ) )
/R/parallel.R
permissive
pat-s/testthat
R
false
false
10,506
r
# +-----------------------------+ +-------------------------------+ # | Main R process | | Subprocess 1 | # | +------------------------+ | | +---------------------------+ | # | | test_dir_parallel() | | | | test_file() | | # | | +-------------------+ | | | | +-----------------------+ | | # | | | Event loop |< ------+ | | | SubprocessReporter | | | # | | +-------------------+ | | | | | | +-------------------+ | | | # | | | | | | | | | | test_that() | | | | # | | v | | | | | | +-------------------+ | | | # | | +-------------------+ | | | | | | | | | | # | | | Progress2Reporter | | | | | | | v | | | # | | +-------------------+ | | | | | | +-------------------+ | | | # | +------------------------+ | |--------| signalCondition() | | | | # +-----------------------------+ | | | | +-------------------+ | | | # | | | +-----------------------+ | | # | | +---------------------------+ | # | +-------------------------------+ # | +-------------------------------+ # |--| Subprocess 2 | # | +-------------------------------+ # | +-------------------------------+ # +--| Subprocess 3 | # +-------------------------------+ # ... # # ## Notes # # * Subprocesses run `callr::r_session` R sessions. They are re-used, # one R session can be used for several test_file() calls. # * Helper and setup files are loaded in the subprocesses after this. # * The main process puts all test files in the task queue, and then # runs an event loop. test_files_parallel <- function( test_dir, test_package, test_paths, load_helpers = TRUE, reporter = default_reporter(), env = NULL, stop_on_failure = FALSE, stop_on_warning = FALSE, load_package = c("none", "installed", "source") ) { reporters <- test_files_reporter(reporter) # TODO: support timeouts. 20-30s for each file by default? num_workers <- min(default_num_cpus(), length(test_paths)) message( "Starting ", num_workers, " test process", if (num_workers != 1) "es" ) # Set up work queue ------------------------------------------ queue <- NULL withr::defer(queue_teardown(queue)) # Start workers in parallel and add test tasks to queue. queue <- queue_setup( test_paths = test_paths, test_package = test_package, test_dir = test_dir, load_helpers = load_helpers, num_workers = num_workers, load_package = load_package ) with_reporter(reporters$multi, { parallel_update <- reporter$capabilities$parallel_update if (parallel_update) { parallel_event_loop_smooth(queue, reporters) } else { parallel_event_loop_chunky(queue, reporters) } }) test_files_check(reporters$list$get_results(), stop_on_failure = stop_on_failure, stop_on_warning = stop_on_warning ) } default_num_cpus <- function() { # Use common option, if set ncpus <- getOption("Ncpus", NULL) if (!is.null(ncpus) && !is_integer(ncpus)) { stop("`getOption(Ncpus)` must be integer") } if (!is.null(ncpus)) { return(ncpus) } # Otherwise detect. If we cannot detect, then compromise. if (ps::ps_is_supported()) { ncpus <- ps::ps_cpu_count() } else { ncpus <- 2L } # But allow capping with an env var max_env <- Sys.getenv("TESTTHAT_MAX_CPUS", "") if (max_env != "") { max_env <- as.integer(max_env) if (is.na(max_env)) abort("TESTTHAT_MAX_CPUS must be an integer") ncpus <- min(ncpus, max_env) } ncpus } parallel_event_loop_smooth <- function(queue, reporters) { update_interval <- 0.1 next_update <- proc.time()[[3]] + update_interval while (!queue$is_idle()) { # How much time do we have to poll before the next UI update? now <- proc.time()[[3]] poll_time <- max(next_update - now, 0) next_update <- now + update_interval msgs <- queue$poll(poll_time) updated <- FALSE for (x in msgs) { if (x$code != PROCESS_MSG) { next } m <- x$message if (!inherits(m, "testthat_message")) { message(m) next } if (m$cmd != "DONE") { reporters$multi$start_file(m$filename) do.call(reporters$multi[[m$cmd]], m$args) updated <- TRUE } } # We need to spin, even if there were no events if (!updated) reporters$multi$update() } } parallel_event_loop_chunky <- function(queue, reporters) { files <- list() while (!queue$is_idle()) { msgs <- queue$poll(Inf) for (x in msgs) { if (x$code != PROCESS_MSG) { next } m <- x$message if (!inherits(m, "testthat_message")) { message(m) next } # Record all events until we get end of file, then we replay them all # with the local reporters. This prevents out of order reporting. if (m$cmd != "DONE") { files[[m$filename]] <- append(files[[m$filename]], list(m)) } else { replay_events(reporters$multi, files[[m$filename]]) reporters$multi$end_context_if_started() files[[m$filename]] <- NULL } } } } replay_events <- function(reporter, events) { for (event in events) { do.call(reporter[[event$cmd]], event$args) } } queue_setup <- function(test_paths, test_package, test_dir, num_workers, load_helpers, load_package) { # TODO: observe `load_package`, but the "none" default is not # OK for the subprocess, because it'll not have the tested package if (load_package == "none") load_package <- "source" # TODO: similarly, load_helpers = FALSE, coming from devtools, # is not appropriate in the subprocess load_helpers <- TRUE test_package <- test_package %||% Sys.getenv("TESTTHAT_PKG") # TODO: meaningful error if startup fails load_hook <- expr(asNamespace("testthat")$queue_process_setup( test_package = !!test_package, test_dir = !!test_dir, load_helpers = !!load_helpers, load_package = !!load_package )) queue <- task_q$new(concurrency = num_workers, load_hook = load_hook) fun <- transport_fun(function(path) asNamespace("testthat")$queue_task(path)) for (path in test_paths) { queue$push(fun, list(path)) } queue } queue_process_setup <- function(test_package, test_dir, load_helpers, load_package) { env <- asNamespace("testthat")$test_files_setup_env( test_package, test_dir, load_package ) asNamespace("testthat")$test_files_setup_state( test_dir = test_dir, test_package = test_package, load_helpers = load_helpers, env = env, .env = .GlobalEnv ) # Save test environment in global env where it can easily be retrieved .GlobalEnv$.test_env <- env } queue_task <- function(path) { env <- .GlobalEnv$.test_env withr::local_envvar(c("TESTTHAT_PARALLEL" = "true")) reporters <- test_files_reporter(SubprocessReporter$new()) with_reporter(reporters$multi, test_one_file(path, env = env)) NULL } # Clean up subprocesses: we call teardown methods, but we only give them a # second, before killing the whole process tree using ps's env var marker # method. queue_teardown <- function(queue) { if (is.null(queue)) { return() } tasks <- queue$list_tasks() num <- nrow(tasks) clean_fn <- function() { withr::deferred_run(.GlobalEnv) } topoll <- list() for (i in seq_len(num)) { if (!is.null(tasks$worker[[i]])) { tasks$worker[[i]]$call(clean_fn) close(tasks$worker[[i]]$get_input_connection()) topoll <- c(topoll, tasks$worker[[i]]$get_poll_connection()) } } limit <- Sys.time() + 1 while (length(topoll) > 0 && (timeout <- limit - Sys.time()) > 0) { timeout <- as.double(timeout, units = "secs") * 1000 pr <- processx::poll(topoll, as.integer(timeout)) topoll <- topoll[pr != "ready"] } for (i in seq_len(num)) { if (!is.null(tasks$worker[[i]])) { # TODO: kill_tree() only works on Linux, Win, macOS tasks$worker[[i]]$kill_tree() } } } # Reporter that just forwards events in the subprocess back to the main process # # Ideally, these messages would be throttled, i.e. if the test code # emits many expectation conditions fast, SubprocessReporter should # collect several of them and only emit a condition a couple of times # a second. End-of-test and end-of-file events would be transmitted # immediately. SubprocessReporter <- R6::R6Class("SubprocessReporter", inherit = Reporter, public = list( start_file = function(filename) { private$filename <- filename private$event("start_file", filename) }, start_test = function(context, test) { private$event("start_test", context, test) }, start_context = function(context) { private$event("start_context", context) }, add_result = function(context, test, result) { if (inherits(result, "expectation_success")) { # Strip bulky components to reduce data transfer cost result[] <- result[c("message", "test")] } private$event("add_result", context, test, result) }, end_test = function(context, test) { private$event("end_test", context, test) }, end_context = function(context) { private$event("end_context", context) }, end_file = function() { private$event("end_file") }, end_reporter = function() { private$event("DONE") } ), private = list( filename = NULL, event = function(cmd, ...) { msg <- list( code = PROCESS_MSG, cmd = cmd, filename = private$filename, time = proc.time()[[3]], args = list(...) ) class(msg) <- c("testthat_message", "callr_message", "condition") signalCondition(msg) } ) )
require(dplyr) library(dplyr) #step1 Read data featuresname <- read.table("features.txt", stringsAsFactors = FALSE) activitiesdescription <- read.table("activity_labels.txt", stringsAsFactors = FALSE) trdata <- read.table("./train/X_train.txt", stringsAsFactors = FALSE) tedata <- read.table("./test/X_test.txt", stringsAsFactors = FALSE) tractivities <- read.table("./train/y_train.txt", stringsAsFactors = FALSE) teactivities <- read.table("./test/y_test.txt", stringsAsFactors = FALSE) tesubject <- read.table("./test/subject_test.txt", stringsAsFactors = FALSE) trsubject <- read.table("./train/subject_train.txt", stringsAsFactors = FALSE) #sept2 append both train,test, subjects and activites data in one originDataTotal <- rbind(trdata, tedata) activitiesTotal <- rbind(tractivities, teactivities) subjectTotal <- rbind(trsubject, tesubject) #step3 select only the mean and std colums from the features list finalcolumnsId <- featuresname$V1[grepl('mean\\(\\)|std\\(\\)', featuresname$V2)] datastep1 <- select(originDataTotal, finalcolumnsId) selActivities <- activitiesdescription$V2[activitiesTotal$V1] featurenames <- featuresname$V2[finalcolumnsId] colnames(datastep1) <- featurenames datastep1 <- cbind(datastep1, "subject" = subjectTotal$V1) datastep2 <- cbind(datastep1, "activity" = selActivities) #step4 generate the final dataset finaldata <- datastep2 %>% group_by(activity, subject) %>% summarise_each(funs(mean)) #step5 generate the file write.table(finaldata, "finaldata.txt", row.names = FALSE)
/run_analysis.R
no_license
jvillarreal3261/DataClean
R
false
false
1,556
r
require(dplyr) library(dplyr) #step1 Read data featuresname <- read.table("features.txt", stringsAsFactors = FALSE) activitiesdescription <- read.table("activity_labels.txt", stringsAsFactors = FALSE) trdata <- read.table("./train/X_train.txt", stringsAsFactors = FALSE) tedata <- read.table("./test/X_test.txt", stringsAsFactors = FALSE) tractivities <- read.table("./train/y_train.txt", stringsAsFactors = FALSE) teactivities <- read.table("./test/y_test.txt", stringsAsFactors = FALSE) tesubject <- read.table("./test/subject_test.txt", stringsAsFactors = FALSE) trsubject <- read.table("./train/subject_train.txt", stringsAsFactors = FALSE) #sept2 append both train,test, subjects and activites data in one originDataTotal <- rbind(trdata, tedata) activitiesTotal <- rbind(tractivities, teactivities) subjectTotal <- rbind(trsubject, tesubject) #step3 select only the mean and std colums from the features list finalcolumnsId <- featuresname$V1[grepl('mean\\(\\)|std\\(\\)', featuresname$V2)] datastep1 <- select(originDataTotal, finalcolumnsId) selActivities <- activitiesdescription$V2[activitiesTotal$V1] featurenames <- featuresname$V2[finalcolumnsId] colnames(datastep1) <- featurenames datastep1 <- cbind(datastep1, "subject" = subjectTotal$V1) datastep2 <- cbind(datastep1, "activity" = selActivities) #step4 generate the final dataset finaldata <- datastep2 %>% group_by(activity, subject) %>% summarise_each(funs(mean)) #step5 generate the file write.table(finaldata, "finaldata.txt", row.names = FALSE)
# This scripts impements the causalMatch procedure library(tidyr) # Match each of the observations match_to_k <- function(target, initial, t = 1, X) { # Select only the individuals according to the Tr indicator from Target target <- target$dfObserved[target$dfObserved$t == t, X, drop = F] # Select only covariates from the initial location initialwithY <- initial$dfObserved[initial$dfObserved$t == t, , drop = F] initial <- initial$dfObserved[initial$dfObserved$t == t, X, drop = F] # Create the list in which all observations are going to be saved DistAll <- list() # Start loop for (i in 1:nrow(target)) { # Take the distance from each other observation from the initial dataset Distances <- numeric() Names <- character() for (j in 1:nrow(initial)) { TwoRows <- rbind(target[i, , drop = F], initial[j, , drop = F]) computeDist <- dist(TwoRows) toReturn <- as.numeric(computeDist) names(toReturn) <- rownames(initial[j, , drop = F]) Distances[j] <- toReturn Names[j] <- names(toReturn) } Distances <- setNames(Distances, Names) DistAll[[rownames(target[i,,drop=F])]] <- Distances } class(DistAll) <- 'CausalMatchDist' attr(DistAll, 'initial.set') <- initialwithY attr(DistAll, 'target.set') <- target return(DistAll) } # Get the outcomes get_outcomes <- function(CausalMatchDist) { if (class(CausalMatchDist) != 'CausalMatchDist') return(NULL) initial <- attr(CausalMatchDist, 'initial.set') ys <- list() outcome <- numeric() for (i in CausalMatchDist) { whichYs <- names(which(i == min(i))) ys <- append(ys, whichYs) y <- mean(initial[rownames(initial) == whichYs, 'y']) outcome <- c(outcome, y) } attr(outcome, 'index.list') <- ys return(outcome) } causalMatch <- function(target, initial, X, seed = NULL) { target$assignTreatment(seed) # first for loop dist_t <- match_to_k(target, initial, t = 1, X) matched_y_t <- get_outcomes(dist_t) matched_y_t_mean <- mean(matched_y_t) # second for loop dist_c <- match_to_k(target, initial, t = 0, X) matched_y_c <- get_outcomes(dist_c) matched_y_c_mean <- mean(matched_y_c) prediction <- matched_y_t_mean - matched_y_c_mean listtoReturn <- list( initial_ate = initial$ate, target_ate = target$ate, predicted_ate = prediction ) attr(listtoReturn, 'prediction.error') <- (prediction - target$ate)^2 attr(listtoReturn, 'targetinitial.error') <- (initial$ate - target$ate)^2 print('Done. Success. Matched.') return(listtoReturn) }
/functions/causalMatch.R
no_license
lubospernis/dissertation
R
false
false
2,596
r
# This scripts impements the causalMatch procedure library(tidyr) # Match each of the observations match_to_k <- function(target, initial, t = 1, X) { # Select only the individuals according to the Tr indicator from Target target <- target$dfObserved[target$dfObserved$t == t, X, drop = F] # Select only covariates from the initial location initialwithY <- initial$dfObserved[initial$dfObserved$t == t, , drop = F] initial <- initial$dfObserved[initial$dfObserved$t == t, X, drop = F] # Create the list in which all observations are going to be saved DistAll <- list() # Start loop for (i in 1:nrow(target)) { # Take the distance from each other observation from the initial dataset Distances <- numeric() Names <- character() for (j in 1:nrow(initial)) { TwoRows <- rbind(target[i, , drop = F], initial[j, , drop = F]) computeDist <- dist(TwoRows) toReturn <- as.numeric(computeDist) names(toReturn) <- rownames(initial[j, , drop = F]) Distances[j] <- toReturn Names[j] <- names(toReturn) } Distances <- setNames(Distances, Names) DistAll[[rownames(target[i,,drop=F])]] <- Distances } class(DistAll) <- 'CausalMatchDist' attr(DistAll, 'initial.set') <- initialwithY attr(DistAll, 'target.set') <- target return(DistAll) } # Get the outcomes get_outcomes <- function(CausalMatchDist) { if (class(CausalMatchDist) != 'CausalMatchDist') return(NULL) initial <- attr(CausalMatchDist, 'initial.set') ys <- list() outcome <- numeric() for (i in CausalMatchDist) { whichYs <- names(which(i == min(i))) ys <- append(ys, whichYs) y <- mean(initial[rownames(initial) == whichYs, 'y']) outcome <- c(outcome, y) } attr(outcome, 'index.list') <- ys return(outcome) } causalMatch <- function(target, initial, X, seed = NULL) { target$assignTreatment(seed) # first for loop dist_t <- match_to_k(target, initial, t = 1, X) matched_y_t <- get_outcomes(dist_t) matched_y_t_mean <- mean(matched_y_t) # second for loop dist_c <- match_to_k(target, initial, t = 0, X) matched_y_c <- get_outcomes(dist_c) matched_y_c_mean <- mean(matched_y_c) prediction <- matched_y_t_mean - matched_y_c_mean listtoReturn <- list( initial_ate = initial$ate, target_ate = target$ate, predicted_ate = prediction ) attr(listtoReturn, 'prediction.error') <- (prediction - target$ate)^2 attr(listtoReturn, 'targetinitial.error') <- (initial$ate - target$ate)^2 print('Done. Success. Matched.') return(listtoReturn) }
## Reading and subseting the data data <- fread("household_power_consumption.txt", na.strings = "?", stringsAsFactors = FALSE) data_sub <- subset(data, Date == "1/2/2007" | Date == "2/2/2007") ## Change format DT <- strptime(paste(data_sub$Date, data_sub$Time, sep = " "), "%d/%m/%Y %H:%M:%S") GAP <- as.numeric(data_sub$Global_active_power) SM1 <- as.numeric(data_sub$Sub_metering_1) SM2 <- as.numeric(data_sub$Sub_metering_2) SM3 <- as.numeric(data_sub$Sub_metering_3) VOL <- as.numeric(data_sub$Voltage) GRP <- as.numeric(data_sub$Global_reactive_power) ## Plot 4 png("plot4.png", width = 480, height = 480) par(mfrow = c(2,2)) plot(DT, GAP, type = "l" , xlab = " ", ylab = "Global Active Power") plot(DT, VOL , type = "l" , xlab = "datetime", ylab = "Voltage" ) plot(DT, SM1, type = "l" , xlab = " ", ylab = "Energy sub metering") lines(DT, SM2, type = "l", col = "red") lines(DT, SM3, type = "l", col = "blue") legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), col = c("black" , "red" , "blue") , lty = 1, lwd = 2.5) plot(DT, GRP, type = "l", xlab = "datetime", ylab = "Global_reactive_power") dev.off()
/plot4.R
no_license
victoriadm13/Exploratory-Data-Analysis
R
false
false
1,148
r
## Reading and subseting the data data <- fread("household_power_consumption.txt", na.strings = "?", stringsAsFactors = FALSE) data_sub <- subset(data, Date == "1/2/2007" | Date == "2/2/2007") ## Change format DT <- strptime(paste(data_sub$Date, data_sub$Time, sep = " "), "%d/%m/%Y %H:%M:%S") GAP <- as.numeric(data_sub$Global_active_power) SM1 <- as.numeric(data_sub$Sub_metering_1) SM2 <- as.numeric(data_sub$Sub_metering_2) SM3 <- as.numeric(data_sub$Sub_metering_3) VOL <- as.numeric(data_sub$Voltage) GRP <- as.numeric(data_sub$Global_reactive_power) ## Plot 4 png("plot4.png", width = 480, height = 480) par(mfrow = c(2,2)) plot(DT, GAP, type = "l" , xlab = " ", ylab = "Global Active Power") plot(DT, VOL , type = "l" , xlab = "datetime", ylab = "Voltage" ) plot(DT, SM1, type = "l" , xlab = " ", ylab = "Energy sub metering") lines(DT, SM2, type = "l", col = "red") lines(DT, SM3, type = "l", col = "blue") legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), col = c("black" , "red" , "blue") , lty = 1, lwd = 2.5) plot(DT, GRP, type = "l", xlab = "datetime", ylab = "Global_reactive_power") dev.off()
\name{gmm_fit_kmeans} \alias{gmm_fit_kmeans} \docType{methods} \title{Estimate Gaussian Mixture parameters from kmeans.} \description{ Estimates parameters for Gaussian mixture using kmeans. } \usage{ gmm_fit_kmeans( x, n ) } \arguments{ \item{x}{data vector} \item{n}{number of mixture components} } \value{ Vector of 3*\emph{n} mixture parameters, where \emph{n} is number of mixture components. Structure of p vector is p = c( A1, A2, ..., A\emph{n}, mu1, mu2, ..., mu\emph{n}, sigma1, sigma2, ..., sigma\emph{n} ), where A\emph{i} is the proportion of \emph{i}-th component, mu\emph{i} is the location of \emph{i}-th component, sigma\emph{i} is the scale of \emph{i}-th component. } \author{Andrius Merkys}
/man/gmm_fit_kmeans.Rd
no_license
merkys/MixtureFitting
R
false
false
751
rd
\name{gmm_fit_kmeans} \alias{gmm_fit_kmeans} \docType{methods} \title{Estimate Gaussian Mixture parameters from kmeans.} \description{ Estimates parameters for Gaussian mixture using kmeans. } \usage{ gmm_fit_kmeans( x, n ) } \arguments{ \item{x}{data vector} \item{n}{number of mixture components} } \value{ Vector of 3*\emph{n} mixture parameters, where \emph{n} is number of mixture components. Structure of p vector is p = c( A1, A2, ..., A\emph{n}, mu1, mu2, ..., mu\emph{n}, sigma1, sigma2, ..., sigma\emph{n} ), where A\emph{i} is the proportion of \emph{i}-th component, mu\emph{i} is the location of \emph{i}-th component, sigma\emph{i} is the scale of \emph{i}-th component. } \author{Andrius Merkys}
require(RIdeogram) data(human_karyotype, package="RIdeogram") data(gene_density, package="RIdeogram") data(Random_RNAs_500, package="RIdeogram") ideogram(karyotype = human_karyotype) ideogram(karyotype, overlaid = NULL, label = NULL, label_type = NULL, synteny = NULL, colorset1, colorset2, width, Lx, Ly, output = "chromosome.svg") ideogram(karyotype = human_karyotype, overlaid = gene_density) convertSVG("chromosome.svg", device = "png")
/rideogram.R
no_license
ArvinZoy/Gene_clusters
R
false
false
445
r
require(RIdeogram) data(human_karyotype, package="RIdeogram") data(gene_density, package="RIdeogram") data(Random_RNAs_500, package="RIdeogram") ideogram(karyotype = human_karyotype) ideogram(karyotype, overlaid = NULL, label = NULL, label_type = NULL, synteny = NULL, colorset1, colorset2, width, Lx, Ly, output = "chromosome.svg") ideogram(karyotype = human_karyotype, overlaid = gene_density) convertSVG("chromosome.svg", device = "png")
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{messy_data} \alias{messy_data} \alias{messyData} \title{Messy clinical trial data} \format{ A data frame with 33 observations on the following 7 variables. This data has been designed to show reshaping/tidying of data. \describe{ \item{\code{Subject}}{A numeric vector giving the subject ID} \item{\code{Placebo.1}}{A numeric vector giving the subjects observed value on treatment Placebo at time 1} \item{\code{Placebo.2}}{A numeric vector giving the subjects observed value on treatment Placebo at time 2} \item{\code{Drug1.1}}{A numeric vector giving the subjects observed value on treatment Drug1 at time 1} \item{\code{Drug1.2}}{A numeric vector giving the subjects observed value on treatment Drug1 at time 2} \item{\code{Drug2.1}}{A numeric vector giving the subjects observed value on treatment Drug2 at time 1} \item{\code{Drug2.2}}{A numeric vector giving the subjects observed value on treatment Drug2 at time 2} } } \source{ Simulated data } \usage{ messy_data messyData } \description{ Simulated dataset for examples of reshaping data } \details{ This dataset has be renamed using tidyverse-style snake_case naming conventions. However the original name of the dataset has been kept to ensure backwards compatibility with the book SAMS Teach Yourself R in 24 Hours (ISBN: 978-0-672-33848-9). } \keyword{datasets}
/man/messy_data.Rd
no_license
MangoTheCat/mangoTraining
R
false
true
1,450
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{messy_data} \alias{messy_data} \alias{messyData} \title{Messy clinical trial data} \format{ A data frame with 33 observations on the following 7 variables. This data has been designed to show reshaping/tidying of data. \describe{ \item{\code{Subject}}{A numeric vector giving the subject ID} \item{\code{Placebo.1}}{A numeric vector giving the subjects observed value on treatment Placebo at time 1} \item{\code{Placebo.2}}{A numeric vector giving the subjects observed value on treatment Placebo at time 2} \item{\code{Drug1.1}}{A numeric vector giving the subjects observed value on treatment Drug1 at time 1} \item{\code{Drug1.2}}{A numeric vector giving the subjects observed value on treatment Drug1 at time 2} \item{\code{Drug2.1}}{A numeric vector giving the subjects observed value on treatment Drug2 at time 1} \item{\code{Drug2.2}}{A numeric vector giving the subjects observed value on treatment Drug2 at time 2} } } \source{ Simulated data } \usage{ messy_data messyData } \description{ Simulated dataset for examples of reshaping data } \details{ This dataset has be renamed using tidyverse-style snake_case naming conventions. However the original name of the dataset has been kept to ensure backwards compatibility with the book SAMS Teach Yourself R in 24 Hours (ISBN: 978-0-672-33848-9). } \keyword{datasets}
# icd codes # --------------------------------------- # Date of first in-patient diagnosis - ICD10 h5readAttributes(h5.fn,"f.41280") dateICD10 = h5read(h5.fn,"f.41280/f.41280") colnames(dateICD10) = h5readAttributes(h5.fn,"f.41280/f.41280")$f.41280 rownames(dateICD10) <- sample.id[,1] # Date of first in-patient diagnosis - ICD9 h5readAttributes(h5.fn,"f.41281") dateICD9 = h5read(h5.fn,"f.41281/f.41281") colnames(dateICD9) = h5readAttributes(h5.fn,"f.41281/f.41281")$f.41281 rownames(dateICD9) <- sample.id[,1] # # externalCauses data-coding 19 # h5readAttributes(h5.fn,"f.41201") # externalCauses_datacoding19 = h5read(h5.fn,"f.41201/f.41201") # colnames(externalCauses_datacoding19) = h5readAttributes(h5.fn,"f.41201/f.41201")$f.41201 # rownames(externalCauses_datacoding19) <- sample.id[,1] # Diagnoses - ICD10 h5readAttributes(h5.fn,"f.41270") ICD10 = h5read(h5.fn,"f.41270/f.41270") colnames(ICD10) = h5readAttributes(h5.fn,"f.41270/f.41270")$f.41270 rownames(ICD10) <- sample.id[,1] # Diagnoses - ICD9 h5readAttributes(h5.fn,"f.41271") ICD9 = h5read(h5.fn,"f.41271/f.41271") colnames(ICD9) = h5readAttributes(h5.fn,"f.41271/f.41271")$f.41271 rownames(ICD9) <- sample.id[,1] #Death status h5readAttributes(h5.fn,"f.40001") causeOfDeath = h5read(h5.fn,"f.40001/f.40001") colnames(causeOfDeath) = h5readAttributes(h5.fn,"f.40001/f.40001")$f.40001 rownames(causeOfDeath) <- sample.id[,1] icd_codes_list = list(ICD10 = ICD10, dateICD10= dateICD10, ICD9 = ICD9, dateICD9 = dateICD9 )
/scripts/prs/icd.R
no_license
barbarathorslund/warfarin
R
false
false
1,558
r
# icd codes # --------------------------------------- # Date of first in-patient diagnosis - ICD10 h5readAttributes(h5.fn,"f.41280") dateICD10 = h5read(h5.fn,"f.41280/f.41280") colnames(dateICD10) = h5readAttributes(h5.fn,"f.41280/f.41280")$f.41280 rownames(dateICD10) <- sample.id[,1] # Date of first in-patient diagnosis - ICD9 h5readAttributes(h5.fn,"f.41281") dateICD9 = h5read(h5.fn,"f.41281/f.41281") colnames(dateICD9) = h5readAttributes(h5.fn,"f.41281/f.41281")$f.41281 rownames(dateICD9) <- sample.id[,1] # # externalCauses data-coding 19 # h5readAttributes(h5.fn,"f.41201") # externalCauses_datacoding19 = h5read(h5.fn,"f.41201/f.41201") # colnames(externalCauses_datacoding19) = h5readAttributes(h5.fn,"f.41201/f.41201")$f.41201 # rownames(externalCauses_datacoding19) <- sample.id[,1] # Diagnoses - ICD10 h5readAttributes(h5.fn,"f.41270") ICD10 = h5read(h5.fn,"f.41270/f.41270") colnames(ICD10) = h5readAttributes(h5.fn,"f.41270/f.41270")$f.41270 rownames(ICD10) <- sample.id[,1] # Diagnoses - ICD9 h5readAttributes(h5.fn,"f.41271") ICD9 = h5read(h5.fn,"f.41271/f.41271") colnames(ICD9) = h5readAttributes(h5.fn,"f.41271/f.41271")$f.41271 rownames(ICD9) <- sample.id[,1] #Death status h5readAttributes(h5.fn,"f.40001") causeOfDeath = h5read(h5.fn,"f.40001/f.40001") colnames(causeOfDeath) = h5readAttributes(h5.fn,"f.40001/f.40001")$f.40001 rownames(causeOfDeath) <- sample.id[,1] icd_codes_list = list(ICD10 = ICD10, dateICD10= dateICD10, ICD9 = ICD9, dateICD9 = dateICD9 )
# use this for interactive setup # library(h2o) # library(testthat) # h2o.startLogging() # conn = h2o.init() test.apply <- function() { a_initial <- data.frame( v1=c(1,0,1,0,1,0,1,0,1,0), v2=c(2,2,2,2,2,2,2,2,2,2), v3=c(3,3,3,3,3,3,3,3,3,3), v4=c(3,2,3,2,3,2,3,2,3,2) ) a <- a_initial b <- apply(a, 1, sum) a.h2o <- as.h2o(a_initial, destination_frame="r.hex") b.h2o <- apply(a.h2o, 1, sum) b.h2o.R <- as.matrix(b.h2o) b b.h2o.R expect_that(all(b == b.h2o.R), equals(T)) b <- apply(a, 2, sum) b.h2o <- apply(a.h2o, 2, sum) b.h2o.R <- as.matrix(b.h2o) b b.h2o.R expect_that(all(b == b.h2o.R), equals(T)) } doTest("Test for apply.", test.apply)
/h2o-r/tests/testdir_munging/exec/runit_pub-685.R
permissive
StephRoark/h2o-3
R
false
false
762
r
# use this for interactive setup # library(h2o) # library(testthat) # h2o.startLogging() # conn = h2o.init() test.apply <- function() { a_initial <- data.frame( v1=c(1,0,1,0,1,0,1,0,1,0), v2=c(2,2,2,2,2,2,2,2,2,2), v3=c(3,3,3,3,3,3,3,3,3,3), v4=c(3,2,3,2,3,2,3,2,3,2) ) a <- a_initial b <- apply(a, 1, sum) a.h2o <- as.h2o(a_initial, destination_frame="r.hex") b.h2o <- apply(a.h2o, 1, sum) b.h2o.R <- as.matrix(b.h2o) b b.h2o.R expect_that(all(b == b.h2o.R), equals(T)) b <- apply(a, 2, sum) b.h2o <- apply(a.h2o, 2, sum) b.h2o.R <- as.matrix(b.h2o) b b.h2o.R expect_that(all(b == b.h2o.R), equals(T)) } doTest("Test for apply.", test.apply)
library(dplyr) # 1.1.1 Reading training datasets x_train <- read.table("C:/Users/Lenovo/Desktop/coursera2/Getting_and_Cleaning_DataProject/UCI HAR Dataset/train/X_train.txt") y_train <- read.table("C:/Users/Lenovo/Desktop/coursera2/Getting_and_Cleaning_DataProject/UCI HAR Dataset/train/y_train.txt") subject_train <- read.table("C:/Users/Lenovo/Desktop/coursera2/Getting_and_Cleaning_DataProject/UCI HAR Dataset/train/subject_train.txt") # 1.1.2 Reading test datasets x_test <- read.table("C:/Users/Lenovo/Desktop/coursera2/Getting_and_Cleaning_DataProject/UCI HAR Dataset/test/X_test.txt") y_test <- read.table("C:/Users/Lenovo/Desktop/coursera2/Getting_and_Cleaning_DataProject/UCI HAR Dataset/test/y_test.txt") subject_test <- read.table("C:/Users/Lenovo/Desktop/coursera2/Getting_and_Cleaning_DataProject/UCI HAR Dataset/test/subject_test.txt") # 1.1.3 Reading feature vector features <- read.table("C:/Users/Lenovo/Desktop/coursera2/Getting_and_Cleaning_DataProject/UCI HAR Dataset/features.txt") # 1.1.4 Reading activity labels activityLabels = read.table("C:/Users/Lenovo/Desktop/coursera2/Getting_and_Cleaning_DataProject/UCI HAR Dataset/activity_labels.txt") # 1.2 Assigning variable names colnames(x_train) <- features[,2] colnames(y_train) <- "activityID" colnames(subject_train) <- "subjectID" colnames(x_test) <- features[,2] colnames(y_test) <- "activityID" colnames(subject_test) <- "subjectID" colnames(activityLabels) <- c("activityID", "activityType") # 1.3 Merging all datasets into one set alltrain <- cbind(y_train, subject_train, x_train) alltest <- cbind(y_test, subject_test, x_test) finaldataset <- rbind(alltrain, alltest) # 2. Extracting only the measurements on the mean and sd for each measurement # 2.1 Reading column names colNames <- colnames(finaldataset) # 2.2 Create vector for defining ID, mean, and sd mean_and_std <- (grepl("activityID", colNames) | grepl("subjectID", colNames) | grepl("mean..", colNames) | grepl("std...", colNames) ) # 2.3 Making nessesary subset setforMeanandStd <- finaldataset[ , mean_and_std == TRUE] # 3. Use descriptive activity names setWithActivityNames <- merge(setforMeanandStd, activityLabels, by = "activityID", all.x = TRUE) # 4. Label the data set with descriptive variable names # see 1.3, 2.2, 2.3 # 5. Creating a second, independent tidy data set with the avg of each variable for each activity and subject # 5.1 Making a second tidy data set tidySet <- aggregate(. ~subjectID + activityID, setWithActivityNames, mean) tidySet <- tidySet[order(tidySet$subjectID, tidySet$activityID), ] # 5.2 Writing second tidy data set into a txt file write.table(tidySet, "tidySet.txt", row.names = FALSE)
/run_analysis.R
no_license
yashBhardwaJ09/Getting_and_Cleaning_DataProject
R
false
false
2,805
r
library(dplyr) # 1.1.1 Reading training datasets x_train <- read.table("C:/Users/Lenovo/Desktop/coursera2/Getting_and_Cleaning_DataProject/UCI HAR Dataset/train/X_train.txt") y_train <- read.table("C:/Users/Lenovo/Desktop/coursera2/Getting_and_Cleaning_DataProject/UCI HAR Dataset/train/y_train.txt") subject_train <- read.table("C:/Users/Lenovo/Desktop/coursera2/Getting_and_Cleaning_DataProject/UCI HAR Dataset/train/subject_train.txt") # 1.1.2 Reading test datasets x_test <- read.table("C:/Users/Lenovo/Desktop/coursera2/Getting_and_Cleaning_DataProject/UCI HAR Dataset/test/X_test.txt") y_test <- read.table("C:/Users/Lenovo/Desktop/coursera2/Getting_and_Cleaning_DataProject/UCI HAR Dataset/test/y_test.txt") subject_test <- read.table("C:/Users/Lenovo/Desktop/coursera2/Getting_and_Cleaning_DataProject/UCI HAR Dataset/test/subject_test.txt") # 1.1.3 Reading feature vector features <- read.table("C:/Users/Lenovo/Desktop/coursera2/Getting_and_Cleaning_DataProject/UCI HAR Dataset/features.txt") # 1.1.4 Reading activity labels activityLabels = read.table("C:/Users/Lenovo/Desktop/coursera2/Getting_and_Cleaning_DataProject/UCI HAR Dataset/activity_labels.txt") # 1.2 Assigning variable names colnames(x_train) <- features[,2] colnames(y_train) <- "activityID" colnames(subject_train) <- "subjectID" colnames(x_test) <- features[,2] colnames(y_test) <- "activityID" colnames(subject_test) <- "subjectID" colnames(activityLabels) <- c("activityID", "activityType") # 1.3 Merging all datasets into one set alltrain <- cbind(y_train, subject_train, x_train) alltest <- cbind(y_test, subject_test, x_test) finaldataset <- rbind(alltrain, alltest) # 2. Extracting only the measurements on the mean and sd for each measurement # 2.1 Reading column names colNames <- colnames(finaldataset) # 2.2 Create vector for defining ID, mean, and sd mean_and_std <- (grepl("activityID", colNames) | grepl("subjectID", colNames) | grepl("mean..", colNames) | grepl("std...", colNames) ) # 2.3 Making nessesary subset setforMeanandStd <- finaldataset[ , mean_and_std == TRUE] # 3. Use descriptive activity names setWithActivityNames <- merge(setforMeanandStd, activityLabels, by = "activityID", all.x = TRUE) # 4. Label the data set with descriptive variable names # see 1.3, 2.2, 2.3 # 5. Creating a second, independent tidy data set with the avg of each variable for each activity and subject # 5.1 Making a second tidy data set tidySet <- aggregate(. ~subjectID + activityID, setWithActivityNames, mean) tidySet <- tidySet[order(tidySet$subjectID, tidySet$activityID), ] # 5.2 Writing second tidy data set into a txt file write.table(tidySet, "tidySet.txt", row.names = FALSE)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/res.R \name{res_binom} \alias{res_binom} \title{Binomial Residuals} \usage{ res_binom(x, size = 1, prob = 0.5, type = "dev", simulate = FALSE) } \arguments{ \item{x}{A non-negative whole numeric vector of values.} \item{size}{A non-negative whole numeric vector of the number of trials.} \item{prob}{A numeric vector of values between 0 and 1 of the probability of success.} \item{type}{A string of the residual type. 'raw' for raw residuals 'dev' for deviance residuals and 'data' for the data.} \item{simulate}{A flag specifying whether to simulate residuals.} } \value{ An numeric vector of the corresponding residuals. } \description{ Binomial Residuals } \examples{ res_binom(c(0, 1, 2), 2, 0.3) } \seealso{ Other res_dist: \code{\link{res_bern}()}, \code{\link{res_beta_binom}()}, \code{\link{res_gamma_pois_zi}()}, \code{\link{res_gamma_pois}()}, \code{\link{res_gamma}()}, \code{\link{res_lnorm}()}, \code{\link{res_neg_binom}()}, \code{\link{res_norm}()}, \code{\link{res_pois_zi}()}, \code{\link{res_pois}()}, \code{\link{res_skewnorm}()}, \code{\link{res_student}()} } \concept{res_dist}
/man/res_binom.Rd
permissive
poissonconsulting/extras
R
false
true
1,182
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/res.R \name{res_binom} \alias{res_binom} \title{Binomial Residuals} \usage{ res_binom(x, size = 1, prob = 0.5, type = "dev", simulate = FALSE) } \arguments{ \item{x}{A non-negative whole numeric vector of values.} \item{size}{A non-negative whole numeric vector of the number of trials.} \item{prob}{A numeric vector of values between 0 and 1 of the probability of success.} \item{type}{A string of the residual type. 'raw' for raw residuals 'dev' for deviance residuals and 'data' for the data.} \item{simulate}{A flag specifying whether to simulate residuals.} } \value{ An numeric vector of the corresponding residuals. } \description{ Binomial Residuals } \examples{ res_binom(c(0, 1, 2), 2, 0.3) } \seealso{ Other res_dist: \code{\link{res_bern}()}, \code{\link{res_beta_binom}()}, \code{\link{res_gamma_pois_zi}()}, \code{\link{res_gamma_pois}()}, \code{\link{res_gamma}()}, \code{\link{res_lnorm}()}, \code{\link{res_neg_binom}()}, \code{\link{res_norm}()}, \code{\link{res_pois_zi}()}, \code{\link{res_pois}()}, \code{\link{res_skewnorm}()}, \code{\link{res_student}()} } \concept{res_dist}
library(MDplot) ### Name: load_ramachandran ### Title: Load dihedral information (Ramachandran plot input) ### Aliases: load_ramachandran ### Keywords: Ramachandran ### ** Examples # GROMOS load_ramachandran( system.file( "extdata/ramachandran_example.txt.gz", package = "MDplot" ) ) # GROMACS load_ramachandran( system.file( "extdata/ramachandran_example_GROMACS.txt.gz", package = "MDplot" ), mdEngine = "GROMACS" ) # AMBER load_ramachandran( system.file( "extdata/ramachandran_example_AMBER.txt.gz", package = "MDplot" ), mdEngine = "AMBER" )
/data/genthat_extracted_code/MDplot/examples/load_ramchandran.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
593
r
library(MDplot) ### Name: load_ramachandran ### Title: Load dihedral information (Ramachandran plot input) ### Aliases: load_ramachandran ### Keywords: Ramachandran ### ** Examples # GROMOS load_ramachandran( system.file( "extdata/ramachandran_example.txt.gz", package = "MDplot" ) ) # GROMACS load_ramachandran( system.file( "extdata/ramachandran_example_GROMACS.txt.gz", package = "MDplot" ), mdEngine = "GROMACS" ) # AMBER load_ramachandran( system.file( "extdata/ramachandran_example_AMBER.txt.gz", package = "MDplot" ), mdEngine = "AMBER" )
\name{dfbetaPlots} \alias{dfbetaPlots} \alias{dfbetasPlots} \alias{dfbetaPlots.lm} \alias{dfbetasPlots.lm} \title{dfbeta and dfbetas Index Plots} \description{ These functions display index plots of dfbeta (effect on coefficients of deleting each observation in turn) and dfbetas (effect on coefficients of deleting each observation in turn, standardized by a deleted estimate of the coefficient standard error). In the plot of dfbeta, horizontal lines are drawn at 0 and +/- one standard error; in the plot of dfbetas, horizontal lines are drawn and 0 and +/- 1. } \usage{ dfbetaPlots(model, ...) dfbetasPlots(model, ...) \method{dfbetaPlots}{lm}(model, terms= ~ ., intercept=FALSE, layout=NULL, ask, main, xlab, ylab, labels=rownames(dfbeta), id.method="y", id.n=if(id.method[1]=="identify") Inf else 0, id.cex=1, id.col=carPalette()[1], id.location="lr", col=carPalette()[1], grid=TRUE, ...) \method{dfbetasPlots}{lm}(model, terms=~., intercept=FALSE, layout=NULL, ask, main, xlab, ylab, labels=rownames(dfbetas), id.method="y", id.n=if(id.method[1]=="identify") Inf else 0, id.cex=1, id.col=carPalette()[1], id.location="lr", col=carPalette()[1], grid=TRUE, ...) } \arguments{ \item{model}{model object produced by \code{lm} or \code{glm}. } \item{terms}{ A one-sided formula that specifies a subset of the terms in the model. One dfbeta or dfbetas plot is drawn for each regressor. The default \code{~.} is to plot against all terms in the model with the exception of an intercept. For example, the specification \code{terms = ~.-X3} would plot against all terms except for \code{X3}. If this argument is a quoted name of one of the terms, the index plot is drawn for that term only. } \item{intercept}{Include the intercept in the plots; default is \code{FALSE}.} \item{layout}{ If set to a value like \code{c(1, 1)} or \code{c(4, 3)}, the layout of the graph will have this many rows and columns. If not set, the program will select an appropriate layout. If the number of graphs exceed nine, you must select the layout yourself, or you will get a maximum of nine per page. If \code{layout=NA}, the function does not set the layout and the user can use the \code{par} function to control the layout, for example to have plots from two models in the same graphics window. } \item{main}{The title of the graph; if missing, one will be supplied. } \item{xlab}{Horizontal axis label; defaults to \code{"Index"}.} \item{ylab}{Vertical axis label; defaults to coefficient name.} \item{ask}{If \code{TRUE}, ask the user before drawing the next plot; if \code{FALSE}, the default, don't ask. } \item{\dots}{optional additional arguments to be passed to \code{\link{plot}}, \code{\link{points}}, and \code{\link{showLabels}}}. \item{id.method, labels, id.n, id.cex, id.col, id.location}{Arguments for the labelling of points. The default is \code{id.n=0} for labeling no points. See \code{\link{showLabels}} for details of these arguments. } \item{col}{color for points; defaults to the first entry in the color \code{\link{carPalette}}.} \item{grid}{If \code{TRUE}, the default, a light-gray background grid is put on the graph} } \value{ \code{NULL}. These functions are used for their side effect: producing plots. } \references{ Fox, J. (2016) \emph{Applied Regression Analysis and Generalized Linear Models}, Third Edition. Sage. Fox, J. and Weisberg, S. (2019) \emph{An R Companion to Applied Regression}, Third Edition, Sage. } \author{John Fox \email{jfox@mcmaster.ca}} \seealso{\code{\link{dfbeta}} ,\code{\link{dfbetas}}} \examples{ dfbetaPlots(lm(prestige ~ income + education + type, data=Duncan)) dfbetasPlots(glm(partic != "not.work" ~ hincome + children, data=Womenlf, family=binomial)) } \keyword{hplot} \keyword{regression}
/man/dfbetaPlots.Rd
no_license
cran/car
R
false
false
3,884
rd
\name{dfbetaPlots} \alias{dfbetaPlots} \alias{dfbetasPlots} \alias{dfbetaPlots.lm} \alias{dfbetasPlots.lm} \title{dfbeta and dfbetas Index Plots} \description{ These functions display index plots of dfbeta (effect on coefficients of deleting each observation in turn) and dfbetas (effect on coefficients of deleting each observation in turn, standardized by a deleted estimate of the coefficient standard error). In the plot of dfbeta, horizontal lines are drawn at 0 and +/- one standard error; in the plot of dfbetas, horizontal lines are drawn and 0 and +/- 1. } \usage{ dfbetaPlots(model, ...) dfbetasPlots(model, ...) \method{dfbetaPlots}{lm}(model, terms= ~ ., intercept=FALSE, layout=NULL, ask, main, xlab, ylab, labels=rownames(dfbeta), id.method="y", id.n=if(id.method[1]=="identify") Inf else 0, id.cex=1, id.col=carPalette()[1], id.location="lr", col=carPalette()[1], grid=TRUE, ...) \method{dfbetasPlots}{lm}(model, terms=~., intercept=FALSE, layout=NULL, ask, main, xlab, ylab, labels=rownames(dfbetas), id.method="y", id.n=if(id.method[1]=="identify") Inf else 0, id.cex=1, id.col=carPalette()[1], id.location="lr", col=carPalette()[1], grid=TRUE, ...) } \arguments{ \item{model}{model object produced by \code{lm} or \code{glm}. } \item{terms}{ A one-sided formula that specifies a subset of the terms in the model. One dfbeta or dfbetas plot is drawn for each regressor. The default \code{~.} is to plot against all terms in the model with the exception of an intercept. For example, the specification \code{terms = ~.-X3} would plot against all terms except for \code{X3}. If this argument is a quoted name of one of the terms, the index plot is drawn for that term only. } \item{intercept}{Include the intercept in the plots; default is \code{FALSE}.} \item{layout}{ If set to a value like \code{c(1, 1)} or \code{c(4, 3)}, the layout of the graph will have this many rows and columns. If not set, the program will select an appropriate layout. If the number of graphs exceed nine, you must select the layout yourself, or you will get a maximum of nine per page. If \code{layout=NA}, the function does not set the layout and the user can use the \code{par} function to control the layout, for example to have plots from two models in the same graphics window. } \item{main}{The title of the graph; if missing, one will be supplied. } \item{xlab}{Horizontal axis label; defaults to \code{"Index"}.} \item{ylab}{Vertical axis label; defaults to coefficient name.} \item{ask}{If \code{TRUE}, ask the user before drawing the next plot; if \code{FALSE}, the default, don't ask. } \item{\dots}{optional additional arguments to be passed to \code{\link{plot}}, \code{\link{points}}, and \code{\link{showLabels}}}. \item{id.method, labels, id.n, id.cex, id.col, id.location}{Arguments for the labelling of points. The default is \code{id.n=0} for labeling no points. See \code{\link{showLabels}} for details of these arguments. } \item{col}{color for points; defaults to the first entry in the color \code{\link{carPalette}}.} \item{grid}{If \code{TRUE}, the default, a light-gray background grid is put on the graph} } \value{ \code{NULL}. These functions are used for their side effect: producing plots. } \references{ Fox, J. (2016) \emph{Applied Regression Analysis and Generalized Linear Models}, Third Edition. Sage. Fox, J. and Weisberg, S. (2019) \emph{An R Companion to Applied Regression}, Third Edition, Sage. } \author{John Fox \email{jfox@mcmaster.ca}} \seealso{\code{\link{dfbeta}} ,\code{\link{dfbetas}}} \examples{ dfbetaPlots(lm(prestige ~ income + education + type, data=Duncan)) dfbetasPlots(glm(partic != "not.work" ~ hincome + children, data=Womenlf, family=binomial)) } \keyword{hplot} \keyword{regression}
\name{RcmdrSurvivalTPlugin-package} \alias{RcmdrSurvivalTPlugin-package} \alias{RcmdrSurvivalTPlugin} \docType{package} \title{ Install the SurvivalT Rcmdr Plug-In } \description{ This package provides an Rcmdr \dQuote{plug-in} based on the survival package for easier student access to survival analysis. } \details{ \tabular{ll}{ Package: \tab RcmdrSurvivalTPlugin\cr Type: \tab Package\cr Version: \tab 1.0-7\cr Date: \tab 2008-11-18\cr License: \tab GPL version 2 or newer\cr } } \author{ Daniel Leucuta <danny.ldc@gmail.com> Maintainer: Daniel Leucuta <danny.ldc@gmail.com> } \keyword{ package } \seealso{ \code{\link[Rcmdr]{Rcmdr}}. }
/man/RcmdrPlugin.SurvivalT-package.Rd
no_license
cran/RcmdrPlugin.SurvivalT
R
false
false
676
rd
\name{RcmdrSurvivalTPlugin-package} \alias{RcmdrSurvivalTPlugin-package} \alias{RcmdrSurvivalTPlugin} \docType{package} \title{ Install the SurvivalT Rcmdr Plug-In } \description{ This package provides an Rcmdr \dQuote{plug-in} based on the survival package for easier student access to survival analysis. } \details{ \tabular{ll}{ Package: \tab RcmdrSurvivalTPlugin\cr Type: \tab Package\cr Version: \tab 1.0-7\cr Date: \tab 2008-11-18\cr License: \tab GPL version 2 or newer\cr } } \author{ Daniel Leucuta <danny.ldc@gmail.com> Maintainer: Daniel Leucuta <danny.ldc@gmail.com> } \keyword{ package } \seealso{ \code{\link[Rcmdr]{Rcmdr}}. }
addJob <- function(name, exec_file, cpu, disc, ram, price, deadline, folder_path, partialResultsVars = c()) { if (is.null(user_session_token)) { stop("NOT LOGGED IN") } body <- list( name = name, exec_file = exec_file, CPU = cpu, disc = disc, RAM = ram, price = price, deadline = deadline, partialResultsVars = partialResultsVars ) r <- PUT( paste(base_url, '/job/', sep = ''), add_headers(Authorization = paste('Bearer', user_session_token)), body = toJSON(body, simplifyVector = TRUE, flatten = TRUE) ) print(fromJSON(content(r, "text"))) if (r$status_code == 200) { job <- fromJSON(content(r, "text"), flatten = TRUE) id <- job$id print("Job added.") View(as.data.frame(job), 'Job created') print('Saving current environment data') RData_filename = paste(id, "_input.RData", sep = "") save.image(RData_filename) print('Sending environment data') r2 <- POST( paste(base_url, '/job/', id, '/data', sep = ''), add_headers(Authorization = paste('Bearer', user_session_token)), body = upload_file(RData_filename) ) if (r2$status_code == 200) { unlink(RData_filename) print('Environment data sent, zipping code folder') wd <- getwd() setwd(folder_path) files <- list.files(folder_path) print(files) zip( paste(wd, id, sep = '/'), unlist(files, use.names = FALSE), extras = '-j', zip = 'C:/Program Files/7-Zip/zip.exe' ) setwd(wd) print('Sending code') r3 <- POST( paste(base_url, '/job/', id, '/code', sep = ''), content_type("application/octet-stream"), add_headers(Authorization = paste('Bearer', user_session_token)), body = upload_file(paste(id, 'zip', sep = '.')) ) if (r3$status_code == 200) { print('Job sucessfully added.') unlink(paste(id, 'zip', sep = '.')) } } } } viewJob <- function(id) { if (is.null(user_session_token)) { stop("NOT LOGGED IN") } r <- GET(paste(base_url, '/job/', id, sep = ''), add_headers(Authorization = paste('Bearer', user_session_token)), enconde = 'json') if (r$status_code == 200) { job <- fromJSON(content(r, "text"), flatten = TRUE) View(as.data.frame(job), paste('Machine', id)) } } listJobs <- function(page = 1) { if (is.null(user_session_token)) { stop("NOT LOGGED IN") } r <- GET( paste(base_url, '/job/?page=', page, sep = ''), add_headers(Authorization = paste('Bearer', user_session_token)), enconde = 'json' ) if (r$status_code == 200) { job <- fromJSON(content(r, "text"), flatten = TRUE) View(as.data.frame(job$results), 'Machines List') } } loadJob <- function(id, envir = .GlobalEnv) { download.file( url = paste(base_url, '/job/', id, '/output', sep = ''), destfile = paste(getwd() , '/', id, '_out', '.RData', sep = ''), method = 'curl' ) load(paste(getwd() , '/', id, '_out', '.RData', sep = ''), envir = envir) } loadPartialResult <- function(jobId, partialVar, loadToVar = NULL) { if (is.null(loadToVar)) { loadToVar = partialVar } pVar <- GET( paste( base_url, '/job/', jobId, '/partialResults/', partialVar, sep = '' ), #todo job id add_headers(Authorization = paste('Bearer', user_session_token)), enconde = 'json' ) cc <- pVar$content ccD <- unserialize(cc) assign(loadToVar, ccD, envir = .GlobalEnv) return (ccD) } loadPartialResultHistory <- function(jobId, partialVar, loadToVar = NULL) { if (is.null(loadToVar)) { loadToVar = partialVar } r <- GET( paste( base_url, '/job/', jobId, '/partialResults/', partialVar, '/history', sep = '' ), #todo job id add_headers(Authorization = paste('Bearer', user_session_token)), enconde = 'json' ) if (r$status_code == 200) { history <- fromJSON(content(r, "text")) result <- list() i <- 1 for (value in history$data) { result[[i]] <- unserialize(as.raw(unlist(value, use.names = FALSE))) i<- i + 1 } df <-data.frame(partialResult = integer(length(result))) df$partialResult <- result assign(loadToVar, df, envir = .GlobalEnv) return(df) } }
/job.R
no_license
Ahlid/P2P-VC-R-Cli
R
false
false
4,715
r
addJob <- function(name, exec_file, cpu, disc, ram, price, deadline, folder_path, partialResultsVars = c()) { if (is.null(user_session_token)) { stop("NOT LOGGED IN") } body <- list( name = name, exec_file = exec_file, CPU = cpu, disc = disc, RAM = ram, price = price, deadline = deadline, partialResultsVars = partialResultsVars ) r <- PUT( paste(base_url, '/job/', sep = ''), add_headers(Authorization = paste('Bearer', user_session_token)), body = toJSON(body, simplifyVector = TRUE, flatten = TRUE) ) print(fromJSON(content(r, "text"))) if (r$status_code == 200) { job <- fromJSON(content(r, "text"), flatten = TRUE) id <- job$id print("Job added.") View(as.data.frame(job), 'Job created') print('Saving current environment data') RData_filename = paste(id, "_input.RData", sep = "") save.image(RData_filename) print('Sending environment data') r2 <- POST( paste(base_url, '/job/', id, '/data', sep = ''), add_headers(Authorization = paste('Bearer', user_session_token)), body = upload_file(RData_filename) ) if (r2$status_code == 200) { unlink(RData_filename) print('Environment data sent, zipping code folder') wd <- getwd() setwd(folder_path) files <- list.files(folder_path) print(files) zip( paste(wd, id, sep = '/'), unlist(files, use.names = FALSE), extras = '-j', zip = 'C:/Program Files/7-Zip/zip.exe' ) setwd(wd) print('Sending code') r3 <- POST( paste(base_url, '/job/', id, '/code', sep = ''), content_type("application/octet-stream"), add_headers(Authorization = paste('Bearer', user_session_token)), body = upload_file(paste(id, 'zip', sep = '.')) ) if (r3$status_code == 200) { print('Job sucessfully added.') unlink(paste(id, 'zip', sep = '.')) } } } } viewJob <- function(id) { if (is.null(user_session_token)) { stop("NOT LOGGED IN") } r <- GET(paste(base_url, '/job/', id, sep = ''), add_headers(Authorization = paste('Bearer', user_session_token)), enconde = 'json') if (r$status_code == 200) { job <- fromJSON(content(r, "text"), flatten = TRUE) View(as.data.frame(job), paste('Machine', id)) } } listJobs <- function(page = 1) { if (is.null(user_session_token)) { stop("NOT LOGGED IN") } r <- GET( paste(base_url, '/job/?page=', page, sep = ''), add_headers(Authorization = paste('Bearer', user_session_token)), enconde = 'json' ) if (r$status_code == 200) { job <- fromJSON(content(r, "text"), flatten = TRUE) View(as.data.frame(job$results), 'Machines List') } } loadJob <- function(id, envir = .GlobalEnv) { download.file( url = paste(base_url, '/job/', id, '/output', sep = ''), destfile = paste(getwd() , '/', id, '_out', '.RData', sep = ''), method = 'curl' ) load(paste(getwd() , '/', id, '_out', '.RData', sep = ''), envir = envir) } loadPartialResult <- function(jobId, partialVar, loadToVar = NULL) { if (is.null(loadToVar)) { loadToVar = partialVar } pVar <- GET( paste( base_url, '/job/', jobId, '/partialResults/', partialVar, sep = '' ), #todo job id add_headers(Authorization = paste('Bearer', user_session_token)), enconde = 'json' ) cc <- pVar$content ccD <- unserialize(cc) assign(loadToVar, ccD, envir = .GlobalEnv) return (ccD) } loadPartialResultHistory <- function(jobId, partialVar, loadToVar = NULL) { if (is.null(loadToVar)) { loadToVar = partialVar } r <- GET( paste( base_url, '/job/', jobId, '/partialResults/', partialVar, '/history', sep = '' ), #todo job id add_headers(Authorization = paste('Bearer', user_session_token)), enconde = 'json' ) if (r$status_code == 200) { history <- fromJSON(content(r, "text")) result <- list() i <- 1 for (value in history$data) { result[[i]] <- unserialize(as.raw(unlist(value, use.names = FALSE))) i<- i + 1 } df <-data.frame(partialResult = integer(length(result))) df$partialResult <- result assign(loadToVar, df, envir = .GlobalEnv) return(df) } }
library(DLMtool) ### Name: LstepCC1 ### Title: Step-wise Constant Catch ### Aliases: LstepCC1 LstepCC2 LstepCC3 LstepCC4 ### ** Examples LstepCC1(1, Data=DLMtool::SimulatedData, plot=TRUE) LstepCC2(1, Data=DLMtool::SimulatedData, plot=TRUE) LstepCC3(1, Data=DLMtool::SimulatedData, plot=TRUE) LstepCC4(1, Data=DLMtool::SimulatedData, plot=TRUE)
/data/genthat_extracted_code/DLMtool/examples/LstepCC1.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
353
r
library(DLMtool) ### Name: LstepCC1 ### Title: Step-wise Constant Catch ### Aliases: LstepCC1 LstepCC2 LstepCC3 LstepCC4 ### ** Examples LstepCC1(1, Data=DLMtool::SimulatedData, plot=TRUE) LstepCC2(1, Data=DLMtool::SimulatedData, plot=TRUE) LstepCC3(1, Data=DLMtool::SimulatedData, plot=TRUE) LstepCC4(1, Data=DLMtool::SimulatedData, plot=TRUE)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/cleanup.R \name{batch_cleanup} \alias{batch_cleanup} \title{Cleanup Batch Processing} \usage{ batch_cleanup( path, force = FALSE, remaining = FALSE, failed = NA, recursive = FALSE, silent = FALSE ) } \arguments{ \item{path}{A string of the path to the directory with the files for processing.} \item{force}{A flag specifying whether to delete configuration and log files even if there are files remaining to be processed.} \item{remaining}{A flag specifying whether to delete any files that are remaining to be processed (only applied when \code{force = TRUE}). Files that have been processed are never deleted.} \item{failed}{A logical scalar specifying how to treat files that previously failed to process. If FALSE (the default) failed files are excluded, if NA they are included and if TRUE they are only included.} \item{recursive}{A flag specifying whether to recurse into subdirectories when cleaning up. This is unrelated to the \code{recurse} option of \code{\link[=batch_config]{batch_config()}} and is only expected to be used if the user has neglected to clean up multiple nested directories.} \item{silent}{A flag specifying whether to suppress warnings (and messages).} } \value{ A named logical vector indicating which directories were successfully cleaned up. } \description{ Deletes configuration file created by \code{\link[=batch_config]{batch_config()}} and log file created by \code{\link[=batch_run]{batch_run()}}. } \details{ The \code{\link[=batch_completed]{batch_completed()}} function can be used to test if batch processing is complete. } \examples{ path <- tempdir() write.csv(mtcars, file.path(path, "file1.csv")) batch_config(function(x) TRUE, path, regexp = "[.]csv$") batch_run(path, ask = FALSE) batch_cleanup(path) unlink(file.path(path, "file1.csv")) } \seealso{ \code{\link[=batch_process]{batch_process()}} }
/man/batch_cleanup.Rd
permissive
cran/batchr
R
false
true
1,942
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/cleanup.R \name{batch_cleanup} \alias{batch_cleanup} \title{Cleanup Batch Processing} \usage{ batch_cleanup( path, force = FALSE, remaining = FALSE, failed = NA, recursive = FALSE, silent = FALSE ) } \arguments{ \item{path}{A string of the path to the directory with the files for processing.} \item{force}{A flag specifying whether to delete configuration and log files even if there are files remaining to be processed.} \item{remaining}{A flag specifying whether to delete any files that are remaining to be processed (only applied when \code{force = TRUE}). Files that have been processed are never deleted.} \item{failed}{A logical scalar specifying how to treat files that previously failed to process. If FALSE (the default) failed files are excluded, if NA they are included and if TRUE they are only included.} \item{recursive}{A flag specifying whether to recurse into subdirectories when cleaning up. This is unrelated to the \code{recurse} option of \code{\link[=batch_config]{batch_config()}} and is only expected to be used if the user has neglected to clean up multiple nested directories.} \item{silent}{A flag specifying whether to suppress warnings (and messages).} } \value{ A named logical vector indicating which directories were successfully cleaned up. } \description{ Deletes configuration file created by \code{\link[=batch_config]{batch_config()}} and log file created by \code{\link[=batch_run]{batch_run()}}. } \details{ The \code{\link[=batch_completed]{batch_completed()}} function can be used to test if batch processing is complete. } \examples{ path <- tempdir() write.csv(mtcars, file.path(path, "file1.csv")) batch_config(function(x) TRUE, path, regexp = "[.]csv$") batch_run(path, ask = FALSE) batch_cleanup(path) unlink(file.path(path, "file1.csv")) } \seealso{ \code{\link[=batch_process]{batch_process()}} }
# Import data ------------------------------------------------------------- ffs.data=read_csv("data/input/cms-ffs-costs/Extracted Data/Aged Only/aged11.csv", skip=2, col_names=FALSE, na="*") ffs.data=ffs.data[,1:15] names(ffs.data) = c("ssa","state","county_name","parta_enroll", "parta_reimb","parta_percap","parta_reimb_unadj", "parta_percap_unadj","parta_ime","parta_dsh", "parta_gme","partb_enroll", "partb_reimb","partb_percap", "mean_risk") final.ffs.costs <- ffs.data %>% select(ssa,state,county_name,parta_enroll,parta_reimb, partb_enroll,partb_reimb,mean_risk) %>% mutate(year=2011, ssa=as.numeric(ssa)) %>% mutate_at(vars(parta_enroll, parta_reimb, partb_enroll, partb_reimb, mean_risk),~str_replace_all(.,",","")) %>% mutate_at(vars(parta_enroll, parta_reimb, partb_enroll, partb_reimb, mean_risk),as.numeric)
/R_code/8_ffs-costs-2011.R
no_license
smonto2/Medicare-Advantage
R
false
false
996
r
# Import data ------------------------------------------------------------- ffs.data=read_csv("data/input/cms-ffs-costs/Extracted Data/Aged Only/aged11.csv", skip=2, col_names=FALSE, na="*") ffs.data=ffs.data[,1:15] names(ffs.data) = c("ssa","state","county_name","parta_enroll", "parta_reimb","parta_percap","parta_reimb_unadj", "parta_percap_unadj","parta_ime","parta_dsh", "parta_gme","partb_enroll", "partb_reimb","partb_percap", "mean_risk") final.ffs.costs <- ffs.data %>% select(ssa,state,county_name,parta_enroll,parta_reimb, partb_enroll,partb_reimb,mean_risk) %>% mutate(year=2011, ssa=as.numeric(ssa)) %>% mutate_at(vars(parta_enroll, parta_reimb, partb_enroll, partb_reimb, mean_risk),~str_replace_all(.,",","")) %>% mutate_at(vars(parta_enroll, parta_reimb, partb_enroll, partb_reimb, mean_risk),as.numeric)
#Per essere sicuri di cancellare tutto! rm(list=ls()) ########## Here I use JAGS!!!! #Load the library library(rjags) # to interface R with JAGS ## Rheumatoid arthritis is an autoimmune disease characterized by ## chronic synovial inflammation and destruction of cartilage and bone ## in the joints. The Rotterdam Early Arthritis Cohort (REACH) study was ## initiated in 2004 to investigate the development of rheumatoid arthritis ## in patients with early manifestations of joint impairment. ## Information regarding basic patient characteristics, serological ## measurements, and patterns of disease involvement at baseline has ## been gathered in 681 recruited patients. It is of interest ## to know which of the following 12 factors are potentially ## associated with the development of rheumatoid arthritis considered as ## a binary (yes/no) outcome: ## ACCP (cyclic citrullinated peptide antibody), ## age, ## ESR (erythrocyte sedimentation rate), ## DC (duration of complaints in days), ## stiffness (duration of morning stiffness in minutes), ## RF (rheumatoid factor), ## gender, ## Sym (symmetrical pattern of joint inflammation;yes/no), ## SJC (swollen joint count), ## TJC (tender joint count), ## BCPH (bilateral compression pain in hands; yes/no), ## BCPF (bilateral compression pain in feet; yes/no). ##The standard approach to analyze these data would be to use ## logistic/probit regression combined with some off-the-shelf variable ## selection method. The F -to-out backward selection with p D 0:05 yields a ## model with the following variables: ACCP, ESR, DC, Sym, SJC, and BCPH. ## The model with the most favorable value of the AIC selected after an ## exhaustive model evaluation contains two extra variables: RF and stiffness. ## Which of these models provide the best approximation to the true ## underlying relationships is, if at all possible, difficult to assess. reach <- read.table("REACH_data.txt",header=T) #the design matrix x <- as.matrix(reach[,1:12]) ## the response vector Y <- as.vector(reach[,13]) #number of observations N <- dim(x)[1] #number of covariates p <- dim(x)[2] ###Parameters of the quasi-spike slab prior #We fix the shape of the inverse gamma to be 2 (finte mean) # while, as suggestd Iswaran Rao (2003) jasa, we fix v1=1 a=2 v1=1 #We consider setting 2 of hyperparameters var_sp <- 0.00027 var_sl <- 2.7 ##Then as a conseguence, ## Remark if X~t-student with ## scale parameter sigma and nu>2 df ## var(X)=sigma*(nu/(nu-2)) ##Then b <- var_sl*(a-1)/v1 b v0 <- var_sp*(a-1)/b v0 ##### delta s1 <- sqrt(b*v0/a) s2 <- sqrt(b*v1/a) rr <- (s2/s1)^(2/(2*a+1)) dd <- sqrt(2*a*(1-rr)/(rr/s2^2-1/s1^2 ) ) dd ########################################### library("MCMCpack") # In the next lines, I will use some plot-tricks of R, # to draw the quasi-spike and slab distribution mistrures we will usre # as prior in this code curve(0.5*dinvgamma(x,shape=a,scale=v1*b)+0.5*dinvgamma(x,shape=a,scale=v0*b),from=0,to=10,type="n",ylab="prior density") curve(0.5*dinvgamma(x,shape=a,scale=v0*b),from=0.000001,to=0.2,add=T,col="red",lty=2,lwd=2) text(2,0.05,"quasi-spike component",col="red") curve(0.5*dinvgamma(x,shape=a,scale=v1*b),from=0,to=10,add=T,col="blue",lty=2,lwd=2) text(7,0.05,"slab component",col="blue") ############################################# ### To plot the quasi-spike and slab prior we need the ### distribution of a t-student with scale parameter sigma ### and center mu tdist <- function(x,nu,sig=1,mu=0){ out <- lgamma((nu+1)/2)-lgamma(nu/2)-1/2*log(pi*nu)-log(sig)-((nu+1)/2)*log( 1+1/nu*( (x-mu)/sig)^2) return(exp(out)) } ##Then we compute the scale parameter for the quasi-spike and the slab ## prior s1 <- sqrt(b*v0/a) s2 <- sqrt(b*v1/a) ##And we try to visualize curve(0.5*tdist(x,nu=2*a,sig=s1)+0.5*tdist(x,nu=2*a,sig=s2),from=-5,to=5,type="l",ylab="prior density") curve(0.5*tdist(x,nu=2*a,sig=s1),from=-5,to=5,add=T,col="red",lty=2,lwd=2) text(1.8,7,"quasi-spike component",col="red") curve(0.5*tdist(x,nu=2*a,sig=s2),from=-5,to=5,add=T,col="blue",lty=2,lwd=2) text(-2,0.3,"slab component",col="blue") curve(0.5*tdist(x,nu=2*a,sig=s1)+0.5*tdist(x,nu=2*a,sig=s2),from=-5,to=5,add=T) ###We will use jags to implement the model data_win <-list(N=N, p=p, Y = Y,x=as.matrix(x),v0=v0,v1=v1,a=a,b=b) ## A list of initial value for the MCMC algorithm # that WinBUGS will implement inits = function() { list( beta0=1.0, beta=rep(1,p), tau=1.0, g=rep(0,p),.RNG.seed=321,.RNG.name = 'base::Wichmann-Hill') } ######################### # The function jags.model() compile the jags model, moreover it performs #and adaptive burn-in for 'nchain' parallel chains. #As input we have to provide: the model, the data, #the initial condition of the chains, the number of chains, and the number of #adaptations (burn inn). model=jags.model("NMIG_probit.bug",data=data_win,n.adapt=1000,inits=inits,n.chains=1) ####Posterior parameter WinBUGS has to save param <- c("beta0","beta","sigma2","g","mdl") ## Some other information for the ## MCMC algorihm #number of iterations ###The probit model is computationally more intensive ## I run a chain previusly with the usual choice of nit <- 50000 #thinning thin <-10 #to be fast here #nit=100 #thin=1 ##The command coda.samle() calls jags from R passing the data and initial #value just defined output_nmig=coda.samples(model=model,variable.names=param,n.iter=nit,thin=thin) #save(output_nmig,file="nmig_set1.dat") load("nmig_set1.dat") # the output is an mcmc object of the library coda str(output_nmig) ### A qalitative analysis of the convergence of the posteror chais plot(output_nmig) ### To work wit the posterior chain it is better ### to cast output to be an array object of R output_nmig <- as.matrix(output_nmig) ###Some variable selection thecniques: # The median probability moodel (MPM) # pick variables with estimated posterior inclusion probabilities # higher than 0.5 # Notice that the estimated posterior inclusion probabilities are the # posterior means of the gamma variables (in the code we called g) ##We save the posterior chain of the inclusion variable in post.g colnames(output_nmig) post.g <- output_nmig[,14:25] # then we compute the sample mean , column by column apply(post.g,2,"mean") post_mean_g <- apply(post.g,2,"mean") ## to produce a bar plot of the posterior inclusion probabilities #postscript("reach_nmig_set2.ps") barplot(post_mean_g,names.arg=colnames(x),main="Posterior inclusion probabilities set 2") #Finally we can add a horizontal line abline(h=0.5,col="red",lty=2,lwd=3) #dev.off() # The variable included in the median probability model are: mp_model<-post_mean_g>0.5 #mp_model is a logical vector with TRUE if the corresponding # parameters is included in to the model FALSE otherwise mp_model ## Frequency of the selected variables post_mean_g[mp_model] ##trick to see whech variable choose with the... paste(colnames(x),1:p,sep="")[mp_model] ## close all the graphical device dev.off() ###### ###### Highest posterior density model (HPD) # The following function has as input an integer and a base p. # As a result it returns a vector containing the representation in # in the base p of the integer n. ##Note that n%%base indicates n mod base and %/% indicates integer division. as.binary <- function(n,base=2) { if(n==0){return(0)} out <- NULL while(n > 0) { out <- c(n%%base , out) n <- n %/% base } names(out) <- NULL return(out) } ##In the following function I will use the as.binary function ## and two time a paste() trick to write down automatically ## in a nice form wich variable are included in a visited model ##It have as input the number of the model n and the number of covariate n.cov wich.variable <- function(n,n.cov){ bin <- rev(as.binary(n)) n.bin <- length(bin) logic.bin <- rep(FALSE,n.cov+1) for(i in 1:n.bin){ logic.bin[i]=(bin[i]==1) } out <- paste(paste("x",0:p,sep="")[logic.bin],collapse="_") return(out) } ### We start to analize how many models have been visited ## by the posterior chain: length(unique( output_nmig[,"mdl"])) ## Now we compute the posterior frequency of the visited models visited_models<-table(output_nmig[,"mdl"]) visited_models #We can visualize the table of the visited models x11() barplot(visited_models) ## Let's sort the table to see which are the "top ten" top <- sort(visited_models,decreasing=T)[1:10] top ## In the following lines we will use the as.binary with a "paste()" trick ## to visualize wich variable is included in the most visited models numeric_top <- as.numeric(names(top)) for(j in 1:10){ names(top)[j]=wich.variable(numeric_top[j],p) } top ############################# ######### Th Hard Shrinckage (HS; hard thresholding/selection shrinkage) #variables are included whenever the absolute value of the estimated # coefficient (e.g., posterior mean) exceeds some threshold value. # Remark we will base the interval decision criterion for HS on a # one standard deviation interval around the posterior mean. #posterior of the beta parmeter ##We save the posterior chain of the inclusion variable in post.g colnames(output_nmig) post.beta <- output_nmig[,1:12] # then we compute the sample mean , column by column mean.beta.post <- apply(post.beta,2,"mean") sd.beta.post <- apply(post.beta,2,"sd") require(plotrix) #postscript("HS_decision_nmig_set2.ps") plotCI(x=1:p, y=mean.beta.post, uiw=sd.beta.post,lwd=1.5, main="Decision intervals for HS, nmig-set1") abline(h=0,col="blue") #dev.off() ## ####################
/script lezione/Lesson Material/Linear and generalized linear models/CovariateSelection/nmig_set1.R.r
no_license
matty9210455/Stat.-Bayesiana
R
false
false
9,669
r
#Per essere sicuri di cancellare tutto! rm(list=ls()) ########## Here I use JAGS!!!! #Load the library library(rjags) # to interface R with JAGS ## Rheumatoid arthritis is an autoimmune disease characterized by ## chronic synovial inflammation and destruction of cartilage and bone ## in the joints. The Rotterdam Early Arthritis Cohort (REACH) study was ## initiated in 2004 to investigate the development of rheumatoid arthritis ## in patients with early manifestations of joint impairment. ## Information regarding basic patient characteristics, serological ## measurements, and patterns of disease involvement at baseline has ## been gathered in 681 recruited patients. It is of interest ## to know which of the following 12 factors are potentially ## associated with the development of rheumatoid arthritis considered as ## a binary (yes/no) outcome: ## ACCP (cyclic citrullinated peptide antibody), ## age, ## ESR (erythrocyte sedimentation rate), ## DC (duration of complaints in days), ## stiffness (duration of morning stiffness in minutes), ## RF (rheumatoid factor), ## gender, ## Sym (symmetrical pattern of joint inflammation;yes/no), ## SJC (swollen joint count), ## TJC (tender joint count), ## BCPH (bilateral compression pain in hands; yes/no), ## BCPF (bilateral compression pain in feet; yes/no). ##The standard approach to analyze these data would be to use ## logistic/probit regression combined with some off-the-shelf variable ## selection method. The F -to-out backward selection with p D 0:05 yields a ## model with the following variables: ACCP, ESR, DC, Sym, SJC, and BCPH. ## The model with the most favorable value of the AIC selected after an ## exhaustive model evaluation contains two extra variables: RF and stiffness. ## Which of these models provide the best approximation to the true ## underlying relationships is, if at all possible, difficult to assess. reach <- read.table("REACH_data.txt",header=T) #the design matrix x <- as.matrix(reach[,1:12]) ## the response vector Y <- as.vector(reach[,13]) #number of observations N <- dim(x)[1] #number of covariates p <- dim(x)[2] ###Parameters of the quasi-spike slab prior #We fix the shape of the inverse gamma to be 2 (finte mean) # while, as suggestd Iswaran Rao (2003) jasa, we fix v1=1 a=2 v1=1 #We consider setting 2 of hyperparameters var_sp <- 0.00027 var_sl <- 2.7 ##Then as a conseguence, ## Remark if X~t-student with ## scale parameter sigma and nu>2 df ## var(X)=sigma*(nu/(nu-2)) ##Then b <- var_sl*(a-1)/v1 b v0 <- var_sp*(a-1)/b v0 ##### delta s1 <- sqrt(b*v0/a) s2 <- sqrt(b*v1/a) rr <- (s2/s1)^(2/(2*a+1)) dd <- sqrt(2*a*(1-rr)/(rr/s2^2-1/s1^2 ) ) dd ########################################### library("MCMCpack") # In the next lines, I will use some plot-tricks of R, # to draw the quasi-spike and slab distribution mistrures we will usre # as prior in this code curve(0.5*dinvgamma(x,shape=a,scale=v1*b)+0.5*dinvgamma(x,shape=a,scale=v0*b),from=0,to=10,type="n",ylab="prior density") curve(0.5*dinvgamma(x,shape=a,scale=v0*b),from=0.000001,to=0.2,add=T,col="red",lty=2,lwd=2) text(2,0.05,"quasi-spike component",col="red") curve(0.5*dinvgamma(x,shape=a,scale=v1*b),from=0,to=10,add=T,col="blue",lty=2,lwd=2) text(7,0.05,"slab component",col="blue") ############################################# ### To plot the quasi-spike and slab prior we need the ### distribution of a t-student with scale parameter sigma ### and center mu tdist <- function(x,nu,sig=1,mu=0){ out <- lgamma((nu+1)/2)-lgamma(nu/2)-1/2*log(pi*nu)-log(sig)-((nu+1)/2)*log( 1+1/nu*( (x-mu)/sig)^2) return(exp(out)) } ##Then we compute the scale parameter for the quasi-spike and the slab ## prior s1 <- sqrt(b*v0/a) s2 <- sqrt(b*v1/a) ##And we try to visualize curve(0.5*tdist(x,nu=2*a,sig=s1)+0.5*tdist(x,nu=2*a,sig=s2),from=-5,to=5,type="l",ylab="prior density") curve(0.5*tdist(x,nu=2*a,sig=s1),from=-5,to=5,add=T,col="red",lty=2,lwd=2) text(1.8,7,"quasi-spike component",col="red") curve(0.5*tdist(x,nu=2*a,sig=s2),from=-5,to=5,add=T,col="blue",lty=2,lwd=2) text(-2,0.3,"slab component",col="blue") curve(0.5*tdist(x,nu=2*a,sig=s1)+0.5*tdist(x,nu=2*a,sig=s2),from=-5,to=5,add=T) ###We will use jags to implement the model data_win <-list(N=N, p=p, Y = Y,x=as.matrix(x),v0=v0,v1=v1,a=a,b=b) ## A list of initial value for the MCMC algorithm # that WinBUGS will implement inits = function() { list( beta0=1.0, beta=rep(1,p), tau=1.0, g=rep(0,p),.RNG.seed=321,.RNG.name = 'base::Wichmann-Hill') } ######################### # The function jags.model() compile the jags model, moreover it performs #and adaptive burn-in for 'nchain' parallel chains. #As input we have to provide: the model, the data, #the initial condition of the chains, the number of chains, and the number of #adaptations (burn inn). model=jags.model("NMIG_probit.bug",data=data_win,n.adapt=1000,inits=inits,n.chains=1) ####Posterior parameter WinBUGS has to save param <- c("beta0","beta","sigma2","g","mdl") ## Some other information for the ## MCMC algorihm #number of iterations ###The probit model is computationally more intensive ## I run a chain previusly with the usual choice of nit <- 50000 #thinning thin <-10 #to be fast here #nit=100 #thin=1 ##The command coda.samle() calls jags from R passing the data and initial #value just defined output_nmig=coda.samples(model=model,variable.names=param,n.iter=nit,thin=thin) #save(output_nmig,file="nmig_set1.dat") load("nmig_set1.dat") # the output is an mcmc object of the library coda str(output_nmig) ### A qalitative analysis of the convergence of the posteror chais plot(output_nmig) ### To work wit the posterior chain it is better ### to cast output to be an array object of R output_nmig <- as.matrix(output_nmig) ###Some variable selection thecniques: # The median probability moodel (MPM) # pick variables with estimated posterior inclusion probabilities # higher than 0.5 # Notice that the estimated posterior inclusion probabilities are the # posterior means of the gamma variables (in the code we called g) ##We save the posterior chain of the inclusion variable in post.g colnames(output_nmig) post.g <- output_nmig[,14:25] # then we compute the sample mean , column by column apply(post.g,2,"mean") post_mean_g <- apply(post.g,2,"mean") ## to produce a bar plot of the posterior inclusion probabilities #postscript("reach_nmig_set2.ps") barplot(post_mean_g,names.arg=colnames(x),main="Posterior inclusion probabilities set 2") #Finally we can add a horizontal line abline(h=0.5,col="red",lty=2,lwd=3) #dev.off() # The variable included in the median probability model are: mp_model<-post_mean_g>0.5 #mp_model is a logical vector with TRUE if the corresponding # parameters is included in to the model FALSE otherwise mp_model ## Frequency of the selected variables post_mean_g[mp_model] ##trick to see whech variable choose with the... paste(colnames(x),1:p,sep="")[mp_model] ## close all the graphical device dev.off() ###### ###### Highest posterior density model (HPD) # The following function has as input an integer and a base p. # As a result it returns a vector containing the representation in # in the base p of the integer n. ##Note that n%%base indicates n mod base and %/% indicates integer division. as.binary <- function(n,base=2) { if(n==0){return(0)} out <- NULL while(n > 0) { out <- c(n%%base , out) n <- n %/% base } names(out) <- NULL return(out) } ##In the following function I will use the as.binary function ## and two time a paste() trick to write down automatically ## in a nice form wich variable are included in a visited model ##It have as input the number of the model n and the number of covariate n.cov wich.variable <- function(n,n.cov){ bin <- rev(as.binary(n)) n.bin <- length(bin) logic.bin <- rep(FALSE,n.cov+1) for(i in 1:n.bin){ logic.bin[i]=(bin[i]==1) } out <- paste(paste("x",0:p,sep="")[logic.bin],collapse="_") return(out) } ### We start to analize how many models have been visited ## by the posterior chain: length(unique( output_nmig[,"mdl"])) ## Now we compute the posterior frequency of the visited models visited_models<-table(output_nmig[,"mdl"]) visited_models #We can visualize the table of the visited models x11() barplot(visited_models) ## Let's sort the table to see which are the "top ten" top <- sort(visited_models,decreasing=T)[1:10] top ## In the following lines we will use the as.binary with a "paste()" trick ## to visualize wich variable is included in the most visited models numeric_top <- as.numeric(names(top)) for(j in 1:10){ names(top)[j]=wich.variable(numeric_top[j],p) } top ############################# ######### Th Hard Shrinckage (HS; hard thresholding/selection shrinkage) #variables are included whenever the absolute value of the estimated # coefficient (e.g., posterior mean) exceeds some threshold value. # Remark we will base the interval decision criterion for HS on a # one standard deviation interval around the posterior mean. #posterior of the beta parmeter ##We save the posterior chain of the inclusion variable in post.g colnames(output_nmig) post.beta <- output_nmig[,1:12] # then we compute the sample mean , column by column mean.beta.post <- apply(post.beta,2,"mean") sd.beta.post <- apply(post.beta,2,"sd") require(plotrix) #postscript("HS_decision_nmig_set2.ps") plotCI(x=1:p, y=mean.beta.post, uiw=sd.beta.post,lwd=1.5, main="Decision intervals for HS, nmig-set1") abline(h=0,col="blue") #dev.off() ## ####################
library(ELLsae) context("Testing if the argument seed actualy works for setting seed in C++") # example data from "clustermeans" df.survey <- data.frame(y = c(1,2,3,4,1,2,3,4,1), a = c(1,2,3,1,2,3,1,2,3), b = c(5,6,4,8,2,6,9,8,5)) df.census <- data.frame(a = (c(1,2,3,1,2,3,1,2,3,1,2,3,1,2,3,1)), b = c(5,3,7,2,5,4,7,5,1,1,7,9,5,4,7,2), d = c(11,12,13,14,15,16,17,18,19,10,12,13,8,9,7,8)) random_control <- c() for (i in 1:7) { ellsae(model = y ~ a + b, clustermeans = "b", survey = df.survey, census = df.census, location_survey = "a", seed = 12345, output = "all", n_boot = 50L)$yboot_est random_control[i] <- runif(1) } test_that("setting a seed actually leads to the same result every time (ellsae)", { expect_equal(ellsae(model = y ~ a + b, clustermeans = "b", survey = df.survey, census = df.census, location_survey = "a", seed = 12345, output = "all", n_boot = 50L)$yboot_est, ellsae(model = y ~ a + b, clustermeans = "b", survey = df.survey, census = df.census, location_survey = "a", seed = 12345, output = "all", n_boot = 50L)$yboot_est) }) test_that("The seed is actually randomized again after the function is done (ellsae)", { expect_gte(length(unique(random_control)), 2) }) test_that("setting a seed actually leads to the same result every time (ellsae_big)", { expect_equal(ellsae_big(model = y ~ a + b, clustermeans = "b", survey = df.survey, census = df.census, location_survey = "a", seed = 12345, output = "all", n_boot = 50L)$yboot_est, ellsae_big(model = y ~ a + b, clustermeans = "b", survey = df.survey, census = df.census, location_survey = "a", seed = 12345, output = "all", n_boot = 50L)$yboot_est) }) random_control_big <- c() for (i in 1:7) { ellsae_big(model = y ~ a + b, clustermeans = "b", survey = df.survey, census = df.census, location_survey = "a", seed = 12345, output = "all", n_boot = 50L)$yboot_est random_control_big[i] <- runif(1) } test_that("The seed is actually randomized again after the function is done (ellsae_big)", { expect_gte(length(unique(random_control_big)), 2) })
/tests/testthat/test_seed_Argument.R
no_license
fsuettmann/ELLsae
R
false
false
2,637
r
library(ELLsae) context("Testing if the argument seed actualy works for setting seed in C++") # example data from "clustermeans" df.survey <- data.frame(y = c(1,2,3,4,1,2,3,4,1), a = c(1,2,3,1,2,3,1,2,3), b = c(5,6,4,8,2,6,9,8,5)) df.census <- data.frame(a = (c(1,2,3,1,2,3,1,2,3,1,2,3,1,2,3,1)), b = c(5,3,7,2,5,4,7,5,1,1,7,9,5,4,7,2), d = c(11,12,13,14,15,16,17,18,19,10,12,13,8,9,7,8)) random_control <- c() for (i in 1:7) { ellsae(model = y ~ a + b, clustermeans = "b", survey = df.survey, census = df.census, location_survey = "a", seed = 12345, output = "all", n_boot = 50L)$yboot_est random_control[i] <- runif(1) } test_that("setting a seed actually leads to the same result every time (ellsae)", { expect_equal(ellsae(model = y ~ a + b, clustermeans = "b", survey = df.survey, census = df.census, location_survey = "a", seed = 12345, output = "all", n_boot = 50L)$yboot_est, ellsae(model = y ~ a + b, clustermeans = "b", survey = df.survey, census = df.census, location_survey = "a", seed = 12345, output = "all", n_boot = 50L)$yboot_est) }) test_that("The seed is actually randomized again after the function is done (ellsae)", { expect_gte(length(unique(random_control)), 2) }) test_that("setting a seed actually leads to the same result every time (ellsae_big)", { expect_equal(ellsae_big(model = y ~ a + b, clustermeans = "b", survey = df.survey, census = df.census, location_survey = "a", seed = 12345, output = "all", n_boot = 50L)$yboot_est, ellsae_big(model = y ~ a + b, clustermeans = "b", survey = df.survey, census = df.census, location_survey = "a", seed = 12345, output = "all", n_boot = 50L)$yboot_est) }) random_control_big <- c() for (i in 1:7) { ellsae_big(model = y ~ a + b, clustermeans = "b", survey = df.survey, census = df.census, location_survey = "a", seed = 12345, output = "all", n_boot = 50L)$yboot_est random_control_big[i] <- runif(1) } test_that("The seed is actually randomized again after the function is done (ellsae_big)", { expect_gte(length(unique(random_control_big)), 2) })
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plotseries.R \name{getConvexHull} \alias{getConvexHull} \title{Compute coordinates of a closed convex hull for data points} \usage{ getConvexHull(points) } \arguments{ \item{points}{matrix of data frame with coordinates of the points} } \description{ Compute coordinates of a closed convex hull for data points }
/man/getConvexHull.Rd
permissive
svkucheryavski/mdatools
R
false
true
391
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plotseries.R \name{getConvexHull} \alias{getConvexHull} \title{Compute coordinates of a closed convex hull for data points} \usage{ getConvexHull(points) } \arguments{ \item{points}{matrix of data frame with coordinates of the points} } \description{ Compute coordinates of a closed convex hull for data points }
RK_species_fitness <- function(lambda, competitive_response){ if(lambda > 0){ log(lambda)/competitive_response }else{ NA } }
/R/RK_species_fitness.R
permissive
RadicalCommEcol/cxr
R
false
false
138
r
RK_species_fitness <- function(lambda, competitive_response){ if(lambda > 0){ log(lambda)/competitive_response }else{ NA } }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{kawel_boehm.ref} \alias{kawel_boehm.ref} \title{Parameters for Cardiovascular Magnetic Resonance} \source{ "Kawel-Boehm N, Hetzel SJ, Ambale-Venkatesh B, et al. Reference ranges (“normal values”) for cardiovascular magnetic resonance (CMR) in adults and children: 2020 update. Journal of Cardiovascular Magnetic Resonance 2020;22(1):87." } \usage{ kawel_boehm.ref } \description{ Parameters for Cardiovascular Magnetic Resonance }
/man/kawel_boehm.ref.Rd
no_license
cran/childsds
R
false
true
541
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{kawel_boehm.ref} \alias{kawel_boehm.ref} \title{Parameters for Cardiovascular Magnetic Resonance} \source{ "Kawel-Boehm N, Hetzel SJ, Ambale-Venkatesh B, et al. Reference ranges (“normal values”) for cardiovascular magnetic resonance (CMR) in adults and children: 2020 update. Journal of Cardiovascular Magnetic Resonance 2020;22(1):87." } \usage{ kawel_boehm.ref } \description{ Parameters for Cardiovascular Magnetic Resonance }
\description{no Description} \name{getReleaseFlux14} \alias{getReleaseFlux14} \usage{getReleaseFlux14(object)} \title{This function } \arguments{ \item{object}{see the method arguments for details} } \section{Methods}{ \code{\link{getReleaseFlux14,Model_14-method}}\cr}
/pkg/man/getReleaseFlux14.Rd
no_license
Dong-po/SoilR-exp
R
false
false
270
rd
\description{no Description} \name{getReleaseFlux14} \alias{getReleaseFlux14} \usage{getReleaseFlux14(object)} \title{This function } \arguments{ \item{object}{see the method arguments for details} } \section{Methods}{ \code{\link{getReleaseFlux14,Model_14-method}}\cr}
################ # workspace source('../main/setup.R') library(amen) source('../main/binPerfHelpers.R') ################ ################ # load data load('../main/nigeriaMatList_acled_v7.rda') # loads yList object load('../main/ameResults.rda') ################ ################ # function to run k-fold cross validation analysis using ame ameOutSamp = function( yList=yList, xDyadL=NULL, xRowL=NULL, xColL=NULL, seed=6886, R=2, model='bin', intercept=TRUE, rvar=TRUE, cvar=TRUE, symmetric=FALSE, burn=10000, nscan=20000, odens=25, folds=30, cores=6 ){ ################ # divide dataset into folds randomly set.seed(seed) yListFolds = lapply(yList, function(y){ yFold=matrix(sample(1:folds, length(y), replace=TRUE), nrow=nrow(y),ncol=ncol(y), dimnames=dimnames(y)) diag(yFold) = NA return(yFold) }) ################ ################ # run models by fold yCrossValTrain = lapply(1:folds, function(f){ yListMiss = lapply(1:length(yList), function(t){ foldID = yListFolds[[t]] ; y = yList[[t]] foldID[foldID==f]=NA ; y=y*foldID return(y) }) names(yListMiss) = names(yList) return(yListMiss) }) ; names(yCrossValTrain) = char(1:folds) # run ame by fold loadPkg(c('doParallel', 'foreach')) cl=makeCluster(cores) ; registerDoParallel(cl) fitCrossVal <- foreach(ii=1:length(yCrossValTrain), .packages=c('amen')) %dopar%{ fit=ame_repL( Y=yCrossValTrain[[ii]], Xdyad=xDyadL, Xrow=xRowL, Xcol=xColL, symmetric=symmetric, rvar=rvar, cvar=cvar, R=R, model=model, intercept=intercept, seed=seed, burn=burn, nscan=nscan, odens=odens, plot=FALSE, gof=TRUE, periodicSave=FALSE ) return(fit) } stopCluster(cl) ; names(fitCrossVal) = char(1:folds) # get preds outPerf = do.call('rbind', lapply(1:folds, function(f){ fitFoldPred = fitCrossVal[[f]]$'EZ' do.call('rbind', lapply(1:length(fitFoldPred), function(t){ predT = fitFoldPred[[t]] foldID = yListFolds[[t]] ; y = yList[[t]] covarMissInfo = design_array_listwisedel(xRowL[[t]], xColL[[t]], xDyadL[[t]], intercept, nrow(y)) covarMissInfo = apply(covarMissInfo, c(1,2), sum) covarMissInfo[!is.na(covarMissInfo)] = 1 foldID[foldID!=f]=NA ; foldID[!is.na(foldID)] = 1 y=y*foldID*covarMissInfo ; predT=predT*foldID res=na.omit(data.frame(actual=c(y), pred=c(predT), fold=f, stringsAsFactors=FALSE)) res$pred = pnorm(res$pred) return(res) }) ) }) ) # get binperfhelpers loadPkg(c('ROCR', 'RColorBrewer', 'caTools')) source('../main/binPerfHelpers.R') # get perf stats aucByFold=do.call('rbind', lapply(1:folds, function(f){ slice = outPerf[outPerf$fold==f,] if(length(unique(slice$actual))==1){ return(NULL) } perf=cbind(fold=f, aucROC=getAUC(slice$pred, slice$actual), aucPR=auc_pr(slice$actual, slice$pred) ) return(perf) } )) aucROC=getAUC(outPerf$pred, outPerf$actual) aucPR=auc_pr(outPerf$actual, outPerf$pred) ################ # org output and return out=list( yCrossValTrain=yCrossValTrain, fitCrossVal=fitCrossVal, outPerf=outPerf, aucByFold=aucByFold, aucROC=aucROC, aucPR=aucPR ) return(out) } ################ ################ # load data load('../main/nigeriaMatList_acled_v7.rda') # loads yList object load('../main/exoVars.rda') # load xNodeL, xDyadL # focus on post 2000 data [few actors beforehand] yrs = char(2000:2016) yList = yList[yrs] ; xDyadL = xDyadL[yrs] ; xNodeL = xNodeL[yrs] ############### ################ # set up model specs subListArray = function(lA, vars, dims=2){ if(dims==2){ return( lapply(lA, function(x){ x[,vars, drop=FALSE] }) ) } if(dims==3){ return( lapply(lA, function(x){ x[,,vars,drop=FALSE] }) ) } } designArrays = list( base=list( senCovar=subListArray(xNodeL, c('riotsProtestsAgainst', 'vioCivEvents', 'groupSpread'), 2), recCovar=subListArray(xNodeL, c('riotsProtestsAgainst', 'vioCivEvents', 'groupSpread'), 2), dyadCovar=subListArray(xDyadL, c('govActor', 'postBoko', 'elecYear', 'ngbrConfCount'), 3) ) ) ranks = seq(0,20,5) results = lapply(ranks, function(rank){ mod = ameOutSamp( yList=yList, xDyadL=designArrays$base$dyadCovar, xRowL=designArrays$base$senCovar, xColL=designArrays$base$recCovar, R=rank ) return(mod) }) ################ ############################ # org results predDfs = lapply(1:length(ranks), function(i){ out = cbind( results[[i]]$outPerf, model=paste0('AME (K=',ranks[i],')')) }) names(predDfs) = paste0('AME (K=', ranks, ')') # tabular data aucSumm=do.call('rbind', lapply(predDfs,function(x){ aucROC=getAUC(x$pred,x$actual) ; aucPR=auc_pr(x$actual,x$pred) return( c('AUC'=aucROC,'AUC (PR)'=aucPR) ) }) ) aucSumm = trim(format(round(aucSumm, 3), nsmall=2)) aucSumm = aucSumm[nrow(aucSumm):1,] # viz results print.xtable( xtable(aucSumm, align='lcc', caption='Out-of-sample performance statistics by varying dimensions of multiplicative effects in AME.', label='tab:ame_vark' ), include.rownames=TRUE, sanitize.text.function = identity, hline.after=c(0,0,1,nrow(aucSumm),nrow(aucSumm)), size='normalsize', file='floats/tableA1.tex' ) ################
/replArchive/appendix/tableA1.R
no_license
s7minhas/conflictEvolution
R
false
false
5,079
r
################ # workspace source('../main/setup.R') library(amen) source('../main/binPerfHelpers.R') ################ ################ # load data load('../main/nigeriaMatList_acled_v7.rda') # loads yList object load('../main/ameResults.rda') ################ ################ # function to run k-fold cross validation analysis using ame ameOutSamp = function( yList=yList, xDyadL=NULL, xRowL=NULL, xColL=NULL, seed=6886, R=2, model='bin', intercept=TRUE, rvar=TRUE, cvar=TRUE, symmetric=FALSE, burn=10000, nscan=20000, odens=25, folds=30, cores=6 ){ ################ # divide dataset into folds randomly set.seed(seed) yListFolds = lapply(yList, function(y){ yFold=matrix(sample(1:folds, length(y), replace=TRUE), nrow=nrow(y),ncol=ncol(y), dimnames=dimnames(y)) diag(yFold) = NA return(yFold) }) ################ ################ # run models by fold yCrossValTrain = lapply(1:folds, function(f){ yListMiss = lapply(1:length(yList), function(t){ foldID = yListFolds[[t]] ; y = yList[[t]] foldID[foldID==f]=NA ; y=y*foldID return(y) }) names(yListMiss) = names(yList) return(yListMiss) }) ; names(yCrossValTrain) = char(1:folds) # run ame by fold loadPkg(c('doParallel', 'foreach')) cl=makeCluster(cores) ; registerDoParallel(cl) fitCrossVal <- foreach(ii=1:length(yCrossValTrain), .packages=c('amen')) %dopar%{ fit=ame_repL( Y=yCrossValTrain[[ii]], Xdyad=xDyadL, Xrow=xRowL, Xcol=xColL, symmetric=symmetric, rvar=rvar, cvar=cvar, R=R, model=model, intercept=intercept, seed=seed, burn=burn, nscan=nscan, odens=odens, plot=FALSE, gof=TRUE, periodicSave=FALSE ) return(fit) } stopCluster(cl) ; names(fitCrossVal) = char(1:folds) # get preds outPerf = do.call('rbind', lapply(1:folds, function(f){ fitFoldPred = fitCrossVal[[f]]$'EZ' do.call('rbind', lapply(1:length(fitFoldPred), function(t){ predT = fitFoldPred[[t]] foldID = yListFolds[[t]] ; y = yList[[t]] covarMissInfo = design_array_listwisedel(xRowL[[t]], xColL[[t]], xDyadL[[t]], intercept, nrow(y)) covarMissInfo = apply(covarMissInfo, c(1,2), sum) covarMissInfo[!is.na(covarMissInfo)] = 1 foldID[foldID!=f]=NA ; foldID[!is.na(foldID)] = 1 y=y*foldID*covarMissInfo ; predT=predT*foldID res=na.omit(data.frame(actual=c(y), pred=c(predT), fold=f, stringsAsFactors=FALSE)) res$pred = pnorm(res$pred) return(res) }) ) }) ) # get binperfhelpers loadPkg(c('ROCR', 'RColorBrewer', 'caTools')) source('../main/binPerfHelpers.R') # get perf stats aucByFold=do.call('rbind', lapply(1:folds, function(f){ slice = outPerf[outPerf$fold==f,] if(length(unique(slice$actual))==1){ return(NULL) } perf=cbind(fold=f, aucROC=getAUC(slice$pred, slice$actual), aucPR=auc_pr(slice$actual, slice$pred) ) return(perf) } )) aucROC=getAUC(outPerf$pred, outPerf$actual) aucPR=auc_pr(outPerf$actual, outPerf$pred) ################ # org output and return out=list( yCrossValTrain=yCrossValTrain, fitCrossVal=fitCrossVal, outPerf=outPerf, aucByFold=aucByFold, aucROC=aucROC, aucPR=aucPR ) return(out) } ################ ################ # load data load('../main/nigeriaMatList_acled_v7.rda') # loads yList object load('../main/exoVars.rda') # load xNodeL, xDyadL # focus on post 2000 data [few actors beforehand] yrs = char(2000:2016) yList = yList[yrs] ; xDyadL = xDyadL[yrs] ; xNodeL = xNodeL[yrs] ############### ################ # set up model specs subListArray = function(lA, vars, dims=2){ if(dims==2){ return( lapply(lA, function(x){ x[,vars, drop=FALSE] }) ) } if(dims==3){ return( lapply(lA, function(x){ x[,,vars,drop=FALSE] }) ) } } designArrays = list( base=list( senCovar=subListArray(xNodeL, c('riotsProtestsAgainst', 'vioCivEvents', 'groupSpread'), 2), recCovar=subListArray(xNodeL, c('riotsProtestsAgainst', 'vioCivEvents', 'groupSpread'), 2), dyadCovar=subListArray(xDyadL, c('govActor', 'postBoko', 'elecYear', 'ngbrConfCount'), 3) ) ) ranks = seq(0,20,5) results = lapply(ranks, function(rank){ mod = ameOutSamp( yList=yList, xDyadL=designArrays$base$dyadCovar, xRowL=designArrays$base$senCovar, xColL=designArrays$base$recCovar, R=rank ) return(mod) }) ################ ############################ # org results predDfs = lapply(1:length(ranks), function(i){ out = cbind( results[[i]]$outPerf, model=paste0('AME (K=',ranks[i],')')) }) names(predDfs) = paste0('AME (K=', ranks, ')') # tabular data aucSumm=do.call('rbind', lapply(predDfs,function(x){ aucROC=getAUC(x$pred,x$actual) ; aucPR=auc_pr(x$actual,x$pred) return( c('AUC'=aucROC,'AUC (PR)'=aucPR) ) }) ) aucSumm = trim(format(round(aucSumm, 3), nsmall=2)) aucSumm = aucSumm[nrow(aucSumm):1,] # viz results print.xtable( xtable(aucSumm, align='lcc', caption='Out-of-sample performance statistics by varying dimensions of multiplicative effects in AME.', label='tab:ame_vark' ), include.rownames=TRUE, sanitize.text.function = identity, hline.after=c(0,0,1,nrow(aucSumm),nrow(aucSumm)), size='normalsize', file='floats/tableA1.tex' ) ################
library(party, lib="/data/home/eayrey/R") library(readr, lib="/data/home/eayrey/R") FUSION_Metrics <- read_csv("H:/Temp/convnet_3d/Leaf_On_Data/FUSION_Metrics.csv") withheld <- read_csv("H:/Temp/convnet_3d/Leaf_On_Data/withheld.csv", col_names = FALSE) FUSION_Metrics$Biomass_AG[is.na(FUSION_Metrics$Biomass_AG)] <- 0 testF=FUSION_Metrics[as.integer(c(withheld[0:1000,1])$X1+1),] validF=FUSION_Metrics[as.integer(c(withheld[1001:2000,1])$X1+1),] trainF=FUSION_Metrics[-as.integer(c(withheld[,1])$X1+1),] trainF=trainF[sample(nrow(trainF),350),] X='Biomass_AG' cov=colnames(trainF)[7:length(colnames(trainF))] Biomass_Mod=cforest(as.formula((paste(X,'~',paste(cov,collapse='+')))), data=trainF, controls= cforest_control(mtry=round(length(cov)/3), ntree=25, trace=TRUE)) errorF=c() preds=c() for (i in 1:nrow(testF)){ #predF=exp(predict(Tree_Count_Mod_F,testF[i,11:65])) predF=predict(Biomass_Mod,newdata=testF[i,7:length(colnames(trainF))]) trueF=testF$Biomass_AG[i] errorF=append(errorF,(predF-trueF)) preds=append(preds, predF) print(i) } RMSEf=sqrt(mean(errorF^2)) RMSEf_Perc=RMSEf/mean(testF$Biomass_AG) Bias=mean(errorF) Bias_Perc=mean(errorF)/mean(testF$Biomass_AG) Abs_Bias=mean(abs(errorF)) R_squared=1-sum((testF$Biomass_AG-preds)^2)/sum((testF$Biomass_AG-mean(testF$Biomass_AG))^2) X='Biomass_AG' importance=varimp(Biomass_Mod, conditional = TRUE, threshold=.65) importance=stack(importance) colnames(importance)=c("imp", "name") importance= importance[order(-importance[,1]),] bads=tail(importance, round(nrow(importance)*.15)) cov=as.character(importance$name) all_metricsF=c(R_squared, RMSEf,RMSEf_Perc,Bias,Bias_Perc,Abs_Bias,paste(X,'~',paste(cov,collapse='+')) ) print(paste(RMSEf, 'starting iterations...')) while (length(cov)>5){ cov=as.character(importance[!importance[,1] %in% bads[,1],]$name) Biomass_Mod_F2=cforest(as.formula((paste(X,'~',paste(cov,collapse='+')))), data=trainF, controls= cforest_control(mtry=round(length(cov)/3), ntree=50, trace=TRUE)) errorF=c() predsF=c() for (i in 1:nrow(testF)){ predF=predict(Biomass_Mod_F2,newdata=testF[i,7:length(colnames(trainF))]) trueF=testF$Biomass_AG[i] errorF=append(errorF,(predF-trueF)) predsF=append(predsF, predF) } #RMSE rmse=sqrt(mean(errorF^2)) #RMSE Perc rmse_perc=rmse/mean(testF$Biomass_AG) #Bias bias=mean(errorF) #Perc_Bias perc_bias=mean(errorF)/mean(testF$Biomass_AG) R_squared=1-sum((testF$Biomass_AG-predsF)^2)/sum((testF$Biomass_AG-mean(testF$Biomass_AG))^2) #MAE mae=mean(abs(errorF)) all_metricsF=rbind(all_metricsF, c(R_squared,rmse,rmse_perc,bias,perc_bias,mae, paste(X,'~',paste(cov,collapse='+')))) importance=varimp(Biomass_Mod_F2, conditional = TRUE, threshold=.65) importance=stack(importance) colnames(importance)=c("imp", "name") importance= importance[order(-importance[,1]),] number=ifelse(round(nrow(importance)/10)>0,round(nrow(importance)/10),1) bads=tail(importance, number) print(paste(rmse ,paste(X,'~',paste(cov,collapse='+')))) } colnames(metrics)=c("r2", "RMSE", "RMSE_Perc", "bias", "bias_Perc", "MAE", "covs") ####################################################################################################################### TC_Mod_F=cforest(SW_Percent ~ Percentage_all_returns_above_mean+Percentage_all_returns_above_2+Total_return_count+Total_all_returns+Elev_variance+Return_1_count+Elev_P70+Elev_AAD+Canopy_relief_ratio+all_rtns_above_mean__frst_rtns+Elev_stddev+Elev_P10+Total_first_returns+First_returns_above_mode+Elev_P05+Elev_L_kurtosis+All_returns_above_mean+Elev_skewness+Elev_L_skewness+Elev_MAD_median+Elev_CV+Elev_L_CV+Percentage_all_returns_above_mode+Elev_P40+All_returns_above_mode+Elev_P20+Percentage_first_returns_above_mean+Percentage_first_returns_above_2+Elev_L4+Elev_mean+Elev_MAD_mode+Elev_P60+Elev_P25+Elev_kurtosis+Elev_L1+Elev_P30+Elev_P80+Elev_P90+Elev_P95+Elev_SQRT_mean_SQ+Elev_P75+Elev_mode+Elev_P99+All_returns_above_2+Percentage_first_returns_above_mode+all_rtns_above_mode__frst_rtns+First_returns_above_2+First_returns_above_mean+Elev_P01+Elev_CURT_mean_CUBE+Elev_maximum+Elev_IQ, data=trainF,importance=TRUE, ntree=1000) errorsF=c() preds=c() for (i in 1:nrow(testF)){ predF=predict(TC_Mod_F,newdata=testF[i,6:53]) trueF=testF$Tree_Count[i] errorF=append(errorF,(predF-trueF)) preds=append(preds, predF) } RMSEf=sqrt(mean(errorF^2)) RMSEf_Perc=RMSEf/mean(testF$Tree_Count) Bias=mean(errorF) Bias_Perc=mean(errorF)/mean(testF$Tree_Count) Abs_Bias=mean(abs(errorF)) R_squared=1-sum((testF$Tree_Count-preds)^2)/sum((testF$Tree_Count-mean(testF$Tree_Count))^2) #Biomass_Mod_F=randomForest(Biomass_AG ~ total_mean+Percentile_20+Perc_Above_15m+Perc_Above_20m+heightB_90+countB_P20+Percentile_40+Percentile_60+heightB_max+basal_area_in+mean_top_ht+heightB_75+rugoseL_mean+total_median+SIMH+Percentile_80+scan_angle_sd+Perc_Above_5m+mean_ht+Percentile_95+mean_obscured+skew_dists_NB+heightLM_mean+pt2WSedge_max+SD_crown_area_B+total_sd+height_75+mean_pointy+sd_residuals+height_max+biomass_tot+height_mean+heightB_25+basal_area_tot+perc_in_tally+max_crown_area_B+crown_volume_tot+heightB_mean+biomass_in+prod+heightB_med+Perc_Above_P80+encroaching_tree_count+Perc_Above_25m+count_P10+countB_P10+tot_top_ht+Perc_Above_10m+Perc_Above_P60+rugoseL_sd+kurt_dists_B+mean_growing_spaceNB+csr+countLM_P10+rugose_WS_perc+out_in_ratioC+sd_off_cent+mean_top_positivity+Perc_Above_P95+height_25+rugoseS_sd+sd_growing_spaceNB+mean_off_cent+total_range+Perc_Above_P20+Perc_Above_P40+Perc_Above_35m+kurt_dists_NB+mean_dists_B+SD_dists_B+max_growing_space+pt2Sedge_mean+GINIH+out_in_ratioH+mean_crown_area_B+watershed_count_inplot+mean_crown_area_NB+encroach_area+crown_volume_in+mean_residuals+mean_positivity+mean_raw_pointy+pt2WSedge_sd+countB_P30+countB+height_sd+count_P30+countLM_P30+sd_shadowed+sky_view_areas_m+neighbor_top_dist_m+rugoseS_count+softwoodyness+heightB_min+cell_ppm+sky_view_areas_sd+count_P20+pt2WSedge_mean+pt2Sedge_max+heightB_sd+max_crown_area_NB+sd_obscured+countLM+SD_dists_NB+rugoseS_mean+neighbor_top_dist_sd+sd_positivity, # data=trainF,importance=TRUE, ntree=1000)
/Parametric_Modelling/RF_Cond_Imps.R
no_license
jtpils/3D-Convolutional-Neural-Networks-with-LiDAR
R
false
false
6,224
r
library(party, lib="/data/home/eayrey/R") library(readr, lib="/data/home/eayrey/R") FUSION_Metrics <- read_csv("H:/Temp/convnet_3d/Leaf_On_Data/FUSION_Metrics.csv") withheld <- read_csv("H:/Temp/convnet_3d/Leaf_On_Data/withheld.csv", col_names = FALSE) FUSION_Metrics$Biomass_AG[is.na(FUSION_Metrics$Biomass_AG)] <- 0 testF=FUSION_Metrics[as.integer(c(withheld[0:1000,1])$X1+1),] validF=FUSION_Metrics[as.integer(c(withheld[1001:2000,1])$X1+1),] trainF=FUSION_Metrics[-as.integer(c(withheld[,1])$X1+1),] trainF=trainF[sample(nrow(trainF),350),] X='Biomass_AG' cov=colnames(trainF)[7:length(colnames(trainF))] Biomass_Mod=cforest(as.formula((paste(X,'~',paste(cov,collapse='+')))), data=trainF, controls= cforest_control(mtry=round(length(cov)/3), ntree=25, trace=TRUE)) errorF=c() preds=c() for (i in 1:nrow(testF)){ #predF=exp(predict(Tree_Count_Mod_F,testF[i,11:65])) predF=predict(Biomass_Mod,newdata=testF[i,7:length(colnames(trainF))]) trueF=testF$Biomass_AG[i] errorF=append(errorF,(predF-trueF)) preds=append(preds, predF) print(i) } RMSEf=sqrt(mean(errorF^2)) RMSEf_Perc=RMSEf/mean(testF$Biomass_AG) Bias=mean(errorF) Bias_Perc=mean(errorF)/mean(testF$Biomass_AG) Abs_Bias=mean(abs(errorF)) R_squared=1-sum((testF$Biomass_AG-preds)^2)/sum((testF$Biomass_AG-mean(testF$Biomass_AG))^2) X='Biomass_AG' importance=varimp(Biomass_Mod, conditional = TRUE, threshold=.65) importance=stack(importance) colnames(importance)=c("imp", "name") importance= importance[order(-importance[,1]),] bads=tail(importance, round(nrow(importance)*.15)) cov=as.character(importance$name) all_metricsF=c(R_squared, RMSEf,RMSEf_Perc,Bias,Bias_Perc,Abs_Bias,paste(X,'~',paste(cov,collapse='+')) ) print(paste(RMSEf, 'starting iterations...')) while (length(cov)>5){ cov=as.character(importance[!importance[,1] %in% bads[,1],]$name) Biomass_Mod_F2=cforest(as.formula((paste(X,'~',paste(cov,collapse='+')))), data=trainF, controls= cforest_control(mtry=round(length(cov)/3), ntree=50, trace=TRUE)) errorF=c() predsF=c() for (i in 1:nrow(testF)){ predF=predict(Biomass_Mod_F2,newdata=testF[i,7:length(colnames(trainF))]) trueF=testF$Biomass_AG[i] errorF=append(errorF,(predF-trueF)) predsF=append(predsF, predF) } #RMSE rmse=sqrt(mean(errorF^2)) #RMSE Perc rmse_perc=rmse/mean(testF$Biomass_AG) #Bias bias=mean(errorF) #Perc_Bias perc_bias=mean(errorF)/mean(testF$Biomass_AG) R_squared=1-sum((testF$Biomass_AG-predsF)^2)/sum((testF$Biomass_AG-mean(testF$Biomass_AG))^2) #MAE mae=mean(abs(errorF)) all_metricsF=rbind(all_metricsF, c(R_squared,rmse,rmse_perc,bias,perc_bias,mae, paste(X,'~',paste(cov,collapse='+')))) importance=varimp(Biomass_Mod_F2, conditional = TRUE, threshold=.65) importance=stack(importance) colnames(importance)=c("imp", "name") importance= importance[order(-importance[,1]),] number=ifelse(round(nrow(importance)/10)>0,round(nrow(importance)/10),1) bads=tail(importance, number) print(paste(rmse ,paste(X,'~',paste(cov,collapse='+')))) } colnames(metrics)=c("r2", "RMSE", "RMSE_Perc", "bias", "bias_Perc", "MAE", "covs") ####################################################################################################################### TC_Mod_F=cforest(SW_Percent ~ Percentage_all_returns_above_mean+Percentage_all_returns_above_2+Total_return_count+Total_all_returns+Elev_variance+Return_1_count+Elev_P70+Elev_AAD+Canopy_relief_ratio+all_rtns_above_mean__frst_rtns+Elev_stddev+Elev_P10+Total_first_returns+First_returns_above_mode+Elev_P05+Elev_L_kurtosis+All_returns_above_mean+Elev_skewness+Elev_L_skewness+Elev_MAD_median+Elev_CV+Elev_L_CV+Percentage_all_returns_above_mode+Elev_P40+All_returns_above_mode+Elev_P20+Percentage_first_returns_above_mean+Percentage_first_returns_above_2+Elev_L4+Elev_mean+Elev_MAD_mode+Elev_P60+Elev_P25+Elev_kurtosis+Elev_L1+Elev_P30+Elev_P80+Elev_P90+Elev_P95+Elev_SQRT_mean_SQ+Elev_P75+Elev_mode+Elev_P99+All_returns_above_2+Percentage_first_returns_above_mode+all_rtns_above_mode__frst_rtns+First_returns_above_2+First_returns_above_mean+Elev_P01+Elev_CURT_mean_CUBE+Elev_maximum+Elev_IQ, data=trainF,importance=TRUE, ntree=1000) errorsF=c() preds=c() for (i in 1:nrow(testF)){ predF=predict(TC_Mod_F,newdata=testF[i,6:53]) trueF=testF$Tree_Count[i] errorF=append(errorF,(predF-trueF)) preds=append(preds, predF) } RMSEf=sqrt(mean(errorF^2)) RMSEf_Perc=RMSEf/mean(testF$Tree_Count) Bias=mean(errorF) Bias_Perc=mean(errorF)/mean(testF$Tree_Count) Abs_Bias=mean(abs(errorF)) R_squared=1-sum((testF$Tree_Count-preds)^2)/sum((testF$Tree_Count-mean(testF$Tree_Count))^2) #Biomass_Mod_F=randomForest(Biomass_AG ~ total_mean+Percentile_20+Perc_Above_15m+Perc_Above_20m+heightB_90+countB_P20+Percentile_40+Percentile_60+heightB_max+basal_area_in+mean_top_ht+heightB_75+rugoseL_mean+total_median+SIMH+Percentile_80+scan_angle_sd+Perc_Above_5m+mean_ht+Percentile_95+mean_obscured+skew_dists_NB+heightLM_mean+pt2WSedge_max+SD_crown_area_B+total_sd+height_75+mean_pointy+sd_residuals+height_max+biomass_tot+height_mean+heightB_25+basal_area_tot+perc_in_tally+max_crown_area_B+crown_volume_tot+heightB_mean+biomass_in+prod+heightB_med+Perc_Above_P80+encroaching_tree_count+Perc_Above_25m+count_P10+countB_P10+tot_top_ht+Perc_Above_10m+Perc_Above_P60+rugoseL_sd+kurt_dists_B+mean_growing_spaceNB+csr+countLM_P10+rugose_WS_perc+out_in_ratioC+sd_off_cent+mean_top_positivity+Perc_Above_P95+height_25+rugoseS_sd+sd_growing_spaceNB+mean_off_cent+total_range+Perc_Above_P20+Perc_Above_P40+Perc_Above_35m+kurt_dists_NB+mean_dists_B+SD_dists_B+max_growing_space+pt2Sedge_mean+GINIH+out_in_ratioH+mean_crown_area_B+watershed_count_inplot+mean_crown_area_NB+encroach_area+crown_volume_in+mean_residuals+mean_positivity+mean_raw_pointy+pt2WSedge_sd+countB_P30+countB+height_sd+count_P30+countLM_P30+sd_shadowed+sky_view_areas_m+neighbor_top_dist_m+rugoseS_count+softwoodyness+heightB_min+cell_ppm+sky_view_areas_sd+count_P20+pt2WSedge_mean+pt2Sedge_max+heightB_sd+max_crown_area_NB+sd_obscured+countLM+SD_dists_NB+rugoseS_mean+neighbor_top_dist_sd+sd_positivity, # data=trainF,importance=TRUE, ntree=1000)
unemployment <- read.csv("data/unemployment_rate.csv", header=T) data_ori_seas <- data.frame(unemployment[2:3]) data_original <- data.frame(unemployment[2]) data_seasonal <- data.frame(unemployment[3]) ts_ori_seas <- ts(data_ori_seas, start=c(2000,1), frequency=12) ts_original <- ts(data_original, start=c(2000,1), frequency=12) ts_seasonal <- ts(data_seasonal, start=c(2000,1), frequency=12) plot(ts_ori_seas, xlab="년도", col="black", main="실업률 비교(원계열, 계절조정계열)")
/scripts/script1.R
no_license
cypowers/study_forecast_method_midterm
R
false
false
499
r
unemployment <- read.csv("data/unemployment_rate.csv", header=T) data_ori_seas <- data.frame(unemployment[2:3]) data_original <- data.frame(unemployment[2]) data_seasonal <- data.frame(unemployment[3]) ts_ori_seas <- ts(data_ori_seas, start=c(2000,1), frequency=12) ts_original <- ts(data_original, start=c(2000,1), frequency=12) ts_seasonal <- ts(data_seasonal, start=c(2000,1), frequency=12) plot(ts_ori_seas, xlab="년도", col="black", main="실업률 비교(원계열, 계절조정계열)")
#### 1. Load libraries and source functions #### library(dplyr) library(helpeR) # devtools::install_github("mhesselbarth/helpeR") library(purrr) library(readr) purrr::walk(list.files(path = "1_Setup_Functions", pattern = ".R", full.names = TRUE), function(x) source(x)) # metrics not comparable between landscapes with different area absolute_metrics <- c("ca", "ndca", "np", "pafrac", "pr", "ta", "tca", "te") # if of repetition simulation_design$id <- rep(1:(nrow(simulation_design) / 50), times = 50) overwrite <- FALSE # dont overwrite if file already exists #### 1. Low AC #### sampling_low_ac <- readr::read_rds(path = paste0(getwd(), "/3_Output/sampling_low_ac_50.rds")) true_value_low_ac <- readr::read_rds(path = paste0(getwd(), "/3_Output/true_value_low_ac_50.rds")) # remove non-comparable metrics and add unique id for(i in 1:length(true_value_low_ac)) { true_value_low_ac[[i]] <- dplyr::filter(true_value_low_ac[[i]], !(metric %in% absolute_metrics)) true_value_low_ac[[i]] <- dplyr::mutate(true_value_low_ac[[i]], layer = i) } true_value_low_ac <- dplyr::bind_rows(true_value_low_ac) # remove non-comparable metrics and add unique ids for(i in 1:length(sampling_low_ac)) { sampling_low_ac[[i]] <- dplyr::filter(sampling_low_ac[[i]], !(metric %in% absolute_metrics)) sampling_low_ac[[i]] <- dplyr::mutate(sampling_low_ac[[i]], landscape_id = simulation_design$i[i], simulation_id = simulation_design$id[i]) } # join value of whole landscape, calculate sample mean and calculate nRMSE deviation_low_ac <- bind_rows(sampling_low_ac) %>% dplyr::left_join(true_value_low_ac, by = c("landscape_id" = "layer", "level" = "level", "class" = "class", "id" = "id", "metric" = "metric"), suffix = c("_sample", "_true")) %>% dplyr::group_by(simulation_id, landscape_id, level, class, metric) %>% dplyr::summarise(n = n(), value_true = unique(value_true), estimate = mean(value_sample, na.rm = TRUE), var = var(value_sample, na.rm = TRUE) / (n - 1)) %>% dplyr::mutate(bias = estimate - value_true, mse = var + (bias ^ 2), rmse = sqrt(mse), nrmse = rmse / estimate) helpeR::save_rds(object = deviation_low_ac, filename = "deviation_low_ac_50.rds", path = paste0(getwd(), "/3_Output"), overwrite = overwrite) # rm(sampling_low_ac, true_value_low_ac) #### 2. Medium AC #### sampling_medium_ac <- readr::read_rds(path = paste0(getwd(), "/3_Output/sampling_medium_ac_50.rds")) true_value_medium_ac <- readr::read_rds(path = paste0(getwd(), "/3_Output/true_value_medium_ac_50.rds")) # remove non-comparable metrics and add unique id for(i in 1:length(true_value_medium_ac)) { true_value_medium_ac[[i]] <- dplyr::filter(true_value_medium_ac[[i]], !(metric %in% absolute_metrics)) true_value_medium_ac[[i]] <- dplyr::mutate(true_value_medium_ac[[i]], layer = i) } true_value_medium_ac <- dplyr::bind_rows(true_value_medium_ac) # remove non-comparable metrics and add unique id for(i in 1:length(sampling_medium_ac)) { sampling_medium_ac[[i]] <- dplyr::filter(sampling_medium_ac[[i]], !(metric %in% absolute_metrics)) sampling_medium_ac[[i]] <- dplyr::mutate(sampling_medium_ac[[i]], landscape_id = simulation_design$i[i], simulation_id = simulation_design$id[i]) } # join value of whole landscape, calculate sample mean and calculate nRMSE deviation_medium_ac <- bind_rows(sampling_medium_ac) %>% dplyr::left_join(true_value_medium_ac, by = c("landscape_id" = "layer", "level" = "level", "class" = "class", "id" = "id", "metric" = "metric"), suffix = c("_sample", "_true")) %>% dplyr::group_by(simulation_id, landscape_id, level, class, metric) %>% dplyr::summarise(n = n(), value_true = unique(value_true), estimate = mean(value_sample, na.rm = TRUE), var = var(value_sample, na.rm = TRUE) / (n - 1)) %>% dplyr::mutate(bias = estimate - value_true, mse = var + (bias ^ 2), rmse = sqrt(mse), nrmse = rmse / estimate) helpeR::save_rds(object = deviation_medium_ac, filename = "deviation_medium_ac_50.rds", path = paste0(getwd(), "/3_Output"), overwrite = overwrite) # rm(sampling_medium_ac, true_value_medium_ac) #### 3. High AC #### sampling_high_ac <- readr::read_rds(path = paste0(getwd(), "/3_Output/sampling_high_ac_50.rds")) true_value_high_ac <- readr::read_rds(path = paste0(getwd(), "/3_Output/true_value_high_ac_50.rds")) # remove non-comparable metrics and add unique id for(i in 1:length(true_value_high_ac)) { true_value_high_ac[[i]] <- dplyr::filter(true_value_high_ac[[i]], !(metric %in% absolute_metrics)) true_value_high_ac[[i]] <- dplyr::mutate(true_value_high_ac[[i]], layer = i) } true_value_high_ac <- dplyr::bind_rows(true_value_high_ac) # remove non-comparable metrics and add unique id for(i in 1:length(sampling_high_ac)) { sampling_high_ac[[i]] <- dplyr::filter(sampling_high_ac[[i]], !(metric %in% absolute_metrics)) sampling_high_ac[[i]] <- dplyr::mutate(sampling_high_ac[[i]], landscape_id = simulation_design$i[i], simulation_id = simulation_design$id[i]) } # join value of whole landscape, calculate sample mean and calculate nRMSE deviation_high_ac <- bind_rows(sampling_high_ac) %>% dplyr::left_join(true_value_high_ac, by = c("landscape_id" = "layer", "level" = "level", "class" = "class", "id" = "id", "metric" = "metric"), suffix = c("_sample", "_true")) %>% dplyr::group_by(simulation_id, landscape_id, level, class, metric) %>% dplyr::summarise(n = n(), value_true = unique(value_true), estimate = mean(value_sample, na.rm = TRUE), var = var(value_sample, na.rm = TRUE) / (n - 1)) %>% dplyr::mutate(bias = estimate - value_true, mse = var + (bias ^ 2), rmse = sqrt(mse), nrmse = rmse / estimate) helpeR::save_rds(object = deviation_high_ac, filename = "deviation_high_ac_50.rds", path = paste0(getwd(), "/3_Output"), overwrite = overwrite) # rm(sampling_high_ac, true_value_high_ac)
/2_Experiment/4_compare_values.R
no_license
mhesselbarth/Hesselbarth_et_al_2019_Ecography
R
false
false
7,565
r
#### 1. Load libraries and source functions #### library(dplyr) library(helpeR) # devtools::install_github("mhesselbarth/helpeR") library(purrr) library(readr) purrr::walk(list.files(path = "1_Setup_Functions", pattern = ".R", full.names = TRUE), function(x) source(x)) # metrics not comparable between landscapes with different area absolute_metrics <- c("ca", "ndca", "np", "pafrac", "pr", "ta", "tca", "te") # if of repetition simulation_design$id <- rep(1:(nrow(simulation_design) / 50), times = 50) overwrite <- FALSE # dont overwrite if file already exists #### 1. Low AC #### sampling_low_ac <- readr::read_rds(path = paste0(getwd(), "/3_Output/sampling_low_ac_50.rds")) true_value_low_ac <- readr::read_rds(path = paste0(getwd(), "/3_Output/true_value_low_ac_50.rds")) # remove non-comparable metrics and add unique id for(i in 1:length(true_value_low_ac)) { true_value_low_ac[[i]] <- dplyr::filter(true_value_low_ac[[i]], !(metric %in% absolute_metrics)) true_value_low_ac[[i]] <- dplyr::mutate(true_value_low_ac[[i]], layer = i) } true_value_low_ac <- dplyr::bind_rows(true_value_low_ac) # remove non-comparable metrics and add unique ids for(i in 1:length(sampling_low_ac)) { sampling_low_ac[[i]] <- dplyr::filter(sampling_low_ac[[i]], !(metric %in% absolute_metrics)) sampling_low_ac[[i]] <- dplyr::mutate(sampling_low_ac[[i]], landscape_id = simulation_design$i[i], simulation_id = simulation_design$id[i]) } # join value of whole landscape, calculate sample mean and calculate nRMSE deviation_low_ac <- bind_rows(sampling_low_ac) %>% dplyr::left_join(true_value_low_ac, by = c("landscape_id" = "layer", "level" = "level", "class" = "class", "id" = "id", "metric" = "metric"), suffix = c("_sample", "_true")) %>% dplyr::group_by(simulation_id, landscape_id, level, class, metric) %>% dplyr::summarise(n = n(), value_true = unique(value_true), estimate = mean(value_sample, na.rm = TRUE), var = var(value_sample, na.rm = TRUE) / (n - 1)) %>% dplyr::mutate(bias = estimate - value_true, mse = var + (bias ^ 2), rmse = sqrt(mse), nrmse = rmse / estimate) helpeR::save_rds(object = deviation_low_ac, filename = "deviation_low_ac_50.rds", path = paste0(getwd(), "/3_Output"), overwrite = overwrite) # rm(sampling_low_ac, true_value_low_ac) #### 2. Medium AC #### sampling_medium_ac <- readr::read_rds(path = paste0(getwd(), "/3_Output/sampling_medium_ac_50.rds")) true_value_medium_ac <- readr::read_rds(path = paste0(getwd(), "/3_Output/true_value_medium_ac_50.rds")) # remove non-comparable metrics and add unique id for(i in 1:length(true_value_medium_ac)) { true_value_medium_ac[[i]] <- dplyr::filter(true_value_medium_ac[[i]], !(metric %in% absolute_metrics)) true_value_medium_ac[[i]] <- dplyr::mutate(true_value_medium_ac[[i]], layer = i) } true_value_medium_ac <- dplyr::bind_rows(true_value_medium_ac) # remove non-comparable metrics and add unique id for(i in 1:length(sampling_medium_ac)) { sampling_medium_ac[[i]] <- dplyr::filter(sampling_medium_ac[[i]], !(metric %in% absolute_metrics)) sampling_medium_ac[[i]] <- dplyr::mutate(sampling_medium_ac[[i]], landscape_id = simulation_design$i[i], simulation_id = simulation_design$id[i]) } # join value of whole landscape, calculate sample mean and calculate nRMSE deviation_medium_ac <- bind_rows(sampling_medium_ac) %>% dplyr::left_join(true_value_medium_ac, by = c("landscape_id" = "layer", "level" = "level", "class" = "class", "id" = "id", "metric" = "metric"), suffix = c("_sample", "_true")) %>% dplyr::group_by(simulation_id, landscape_id, level, class, metric) %>% dplyr::summarise(n = n(), value_true = unique(value_true), estimate = mean(value_sample, na.rm = TRUE), var = var(value_sample, na.rm = TRUE) / (n - 1)) %>% dplyr::mutate(bias = estimate - value_true, mse = var + (bias ^ 2), rmse = sqrt(mse), nrmse = rmse / estimate) helpeR::save_rds(object = deviation_medium_ac, filename = "deviation_medium_ac_50.rds", path = paste0(getwd(), "/3_Output"), overwrite = overwrite) # rm(sampling_medium_ac, true_value_medium_ac) #### 3. High AC #### sampling_high_ac <- readr::read_rds(path = paste0(getwd(), "/3_Output/sampling_high_ac_50.rds")) true_value_high_ac <- readr::read_rds(path = paste0(getwd(), "/3_Output/true_value_high_ac_50.rds")) # remove non-comparable metrics and add unique id for(i in 1:length(true_value_high_ac)) { true_value_high_ac[[i]] <- dplyr::filter(true_value_high_ac[[i]], !(metric %in% absolute_metrics)) true_value_high_ac[[i]] <- dplyr::mutate(true_value_high_ac[[i]], layer = i) } true_value_high_ac <- dplyr::bind_rows(true_value_high_ac) # remove non-comparable metrics and add unique id for(i in 1:length(sampling_high_ac)) { sampling_high_ac[[i]] <- dplyr::filter(sampling_high_ac[[i]], !(metric %in% absolute_metrics)) sampling_high_ac[[i]] <- dplyr::mutate(sampling_high_ac[[i]], landscape_id = simulation_design$i[i], simulation_id = simulation_design$id[i]) } # join value of whole landscape, calculate sample mean and calculate nRMSE deviation_high_ac <- bind_rows(sampling_high_ac) %>% dplyr::left_join(true_value_high_ac, by = c("landscape_id" = "layer", "level" = "level", "class" = "class", "id" = "id", "metric" = "metric"), suffix = c("_sample", "_true")) %>% dplyr::group_by(simulation_id, landscape_id, level, class, metric) %>% dplyr::summarise(n = n(), value_true = unique(value_true), estimate = mean(value_sample, na.rm = TRUE), var = var(value_sample, na.rm = TRUE) / (n - 1)) %>% dplyr::mutate(bias = estimate - value_true, mse = var + (bias ^ 2), rmse = sqrt(mse), nrmse = rmse / estimate) helpeR::save_rds(object = deviation_high_ac, filename = "deviation_high_ac_50.rds", path = paste0(getwd(), "/3_Output"), overwrite = overwrite) # rm(sampling_high_ac, true_value_high_ac)
rm(list = ls()) name <- c('Ira A', 'David A', 'Todd A', 'Robert B', 'Yosef C', 'James C', 'Francesca A', 'David F', 'Rocky G', 'Peter J', 'Anne K', 'Kristen N', 'Ray N', 'James P', 'Peter S', 'George S', 'Ellen S', 'Bruce V') height <- c(5 + 4 / 12, 6 + 11 / 12, 5 + 11 / 12, 5 + 11 / 12, 6, 5 + 10 / 12, 5 + 10 / 12, 5 + 11 / 12, 5 + 3 / 12, 5 + 10 / 12, 5 + 8 / 12, 5 + 7 / 12, 5 + 10 / 12, 5 + 9 / 12, 5 + 10.5 / 12, 5 + 10.5 / 12, 5 + 10 / 12, 6) faculty <- data.frame(name, height) save(faculty, file = 'faculty') set.seed(1) ; mean(sample(faculty$height, 5)) report <- rbind( c(var(faculty$height), var(faculty$height[-2])), summary(faculty$height)[c(2, 5)], summary(faculty$height[-2])[c(2, 5)]) dimnames(report) <- list( c('sigma^2', 'IQR with David A.', 'IQR without'), c(' ', ' ')) round(report, 2)
/scripts/ch08/faculty.r
no_license
StefanoCiotti/MyProgectsFirst
R
false
false
850
r
rm(list = ls()) name <- c('Ira A', 'David A', 'Todd A', 'Robert B', 'Yosef C', 'James C', 'Francesca A', 'David F', 'Rocky G', 'Peter J', 'Anne K', 'Kristen N', 'Ray N', 'James P', 'Peter S', 'George S', 'Ellen S', 'Bruce V') height <- c(5 + 4 / 12, 6 + 11 / 12, 5 + 11 / 12, 5 + 11 / 12, 6, 5 + 10 / 12, 5 + 10 / 12, 5 + 11 / 12, 5 + 3 / 12, 5 + 10 / 12, 5 + 8 / 12, 5 + 7 / 12, 5 + 10 / 12, 5 + 9 / 12, 5 + 10.5 / 12, 5 + 10.5 / 12, 5 + 10 / 12, 6) faculty <- data.frame(name, height) save(faculty, file = 'faculty') set.seed(1) ; mean(sample(faculty$height, 5)) report <- rbind( c(var(faculty$height), var(faculty$height[-2])), summary(faculty$height)[c(2, 5)], summary(faculty$height[-2])[c(2, 5)]) dimnames(report) <- list( c('sigma^2', 'IQR with David A.', 'IQR without'), c(' ', ' ')) round(report, 2)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/cpue.r \name{plotstand} \alias{plotstand} \title{plotstand - plot optimum model from standLM vs Geometric mean} \usage{ plotstand(stnd, bars = FALSE, geo = 0, P = 95, catch = NA, usefont = 7) } \arguments{ \item{stnd}{is the list output from standLM} \item{bars}{is a logical T or F determining whether to put confidence bounds around each estimate; defaults to FALSE} \item{geo}{is an estimate of the original geometric mean catch rate across all years. If this is > 0.0 it is used to rescale the graph to the nominal scale, otherwise the mean of each time-series will be 1.0, which simplifies visual comparisons. geo defaults to 0.0.} \item{P}{is the percentile used for the log-normal confidence bounds, if they are plotted; defaults to 95.} \item{catch}{if it is desired to plot the catch as well as the CPUE then a vector of catches needs to be input here} \item{usefont}{enables the font used in the plot to be modified. Most publications appear to prefer usefont=1; defaults to 7 - Times bold} } \value{ a plot of the model with the smallest AIC (solid line) and the geometric mean (model 1, always = LnCE ~ Year, the dashed line). 'Year' could be some other time step. } \description{ plot optimum model from standLM vs Geometric mean. Has options that allow for log-normls P% intervals around each time period's parameter estimate. Also can rescale the graph to have an average the same as the geometric mean average of the original time series of data. } \examples{ \dontrun{ data(sps) splabel = "SpeciesName" labelM <- c("Year","Vessel","Month") sps1 <- makecategorical(labelM[1:3],sps) mods <- makemodels(labelM) out <- standLM(mods,sps1,splabel) plotprep() plotstand(out, bars=TRUE, P=90,geo=100.0,usefont=1) plotstand(out) } }
/man/plotstand.Rd
no_license
haddonm/cede
R
false
true
1,891
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/cpue.r \name{plotstand} \alias{plotstand} \title{plotstand - plot optimum model from standLM vs Geometric mean} \usage{ plotstand(stnd, bars = FALSE, geo = 0, P = 95, catch = NA, usefont = 7) } \arguments{ \item{stnd}{is the list output from standLM} \item{bars}{is a logical T or F determining whether to put confidence bounds around each estimate; defaults to FALSE} \item{geo}{is an estimate of the original geometric mean catch rate across all years. If this is > 0.0 it is used to rescale the graph to the nominal scale, otherwise the mean of each time-series will be 1.0, which simplifies visual comparisons. geo defaults to 0.0.} \item{P}{is the percentile used for the log-normal confidence bounds, if they are plotted; defaults to 95.} \item{catch}{if it is desired to plot the catch as well as the CPUE then a vector of catches needs to be input here} \item{usefont}{enables the font used in the plot to be modified. Most publications appear to prefer usefont=1; defaults to 7 - Times bold} } \value{ a plot of the model with the smallest AIC (solid line) and the geometric mean (model 1, always = LnCE ~ Year, the dashed line). 'Year' could be some other time step. } \description{ plot optimum model from standLM vs Geometric mean. Has options that allow for log-normls P% intervals around each time period's parameter estimate. Also can rescale the graph to have an average the same as the geometric mean average of the original time series of data. } \examples{ \dontrun{ data(sps) splabel = "SpeciesName" labelM <- c("Year","Vessel","Month") sps1 <- makecategorical(labelM[1:3],sps) mods <- makemodels(labelM) out <- standLM(mods,sps1,splabel) plotprep() plotstand(out, bars=TRUE, P=90,geo=100.0,usefont=1) plotstand(out) } }
\name{DataFrame-class} \docType{class} \alias{class:DataFrame} \alias{DataFrame-class} % accessor \alias{nrow,DataFrame-method} \alias{ncol,DataFrame-method} \alias{rownames,DataFrame-method} \alias{colnames,DataFrame-method} \alias{rownames<-,DataFrame-method} \alias{colnames<-,DataFrame-method} % constructor \alias{DataFrame} % subsetting \alias{[,DataFrame-method} \alias{[<-,DataFrame-method} \alias{[[<-,DataFrame-method} \alias{[[,DataFrame-method} % coercion \alias{as.data.frame,DataFrame-method} \alias{as.matrix,DataFrame-method} \alias{coerce,matrix,DataFrame-method} \alias{coerce,vector,DataFrame-method} \alias{coerce,list,DataFrame-method} \alias{coerce,integer,DataFrame-method} \alias{coerce,Vector,DataFrame-method} \alias{coerce,data.frame,DataFrame-method} \alias{coerce,data.table,DataFrame-method} \alias{coerce,NULL,DataFrame-method} \alias{coerce,table,DataFrame-method} \alias{coerce,AsIs,DataFrame-method} \alias{coerce,DataFrame,data.frame-method} \alias{coerce,xtabs,DataFrame-method} \alias{coerce,ANY,DataFrame-method} \alias{coerce,SimpleList,DataFrame-method} % combining \alias{bindROWS,DataFrame-method} \alias{rbind,DataFrame-method} \alias{c,DataFrame-method} \alias{cbind,DataFrame-method} \title{DataFrame objects} \description{ The \code{DataFrame} class extends the \link{DataTable} virtual class and supports the storage of any type of object (with \code{length} and \code{[} methods) as columns. } \details{ On the whole, the \code{DataFrame} behaves very similarly to \code{data.frame}, in terms of construction, subsetting, splitting, combining, etc. The most notable exception is that the row names are optional. This means calling \code{rownames(x)} will return \code{NULL} if there are no row names. Of course, it could return \code{seq_len(nrow(x))}, but returning \code{NULL} informs, for example, combination functions that no row names are desired (they are often a luxury when dealing with large data). As \code{DataFrame} derives from \code{\linkS4class{Vector}}, it is possible to set an \code{annotation} string. Also, another \code{DataFrame} can hold metadata on the columns. For a class to be supported as a column, it must have \code{length} and \code{[} methods, where \code{[} supports subsetting only by \code{i} and respects \code{drop=FALSE}. Optionally, a method may be defined for the \code{showAsCell} generic, which should return a vector of the same length as the subset of the column passed to it. This vector is then placed into a \code{data.frame} and converted to text with \code{format}. Thus, each element of the vector should be some simple, usually character, representation of the corresponding element in the column. } \section{Constructor}{ \describe{\code{DataFrame(..., row.names = NULL, check.names = TRUE, stringsAsFactors)}: Constructs a \code{DataFrame} in similar fashion to \code{\link{data.frame}}. Each argument in \code{...} is coerced to a \code{DataFrame} and combined column-wise. No special effort is expended to automatically determine the row names from the arguments. The row names should be given in \code{row.names}; otherwise, there are no row names. This is by design, as row names are normally undesirable when data is large. If \code{check.names} is \code{TRUE}, the column names will be checked for syntactic validity and made unique, if necessary. To store an object of a class that does not support coercion to \code{DataFrame}, wrap it in \code{I()}. The class must still have methods for \code{length} and \code{[}. The \code{stringsAsFactors} argument is ignored. The coercion of column arguments to DataFrame determines whether strings become factors. } } \section{Accessors}{ In the following code snippets, \code{x} is a \code{DataFrame}. \describe{ \item{}{\code{dim(x)}: Get the length two integer vector indicating in the first and second element the number of rows and columns, respectively. } \item{}{\code{dimnames(x)}, \code{dimnames(x) <- value}: Get and set the two element list containing the row names (character vector of length \code{nrow(x)} or \code{NULL}) and the column names (character vector of length \code{ncol(x)}). } } } \section{Coercion}{ \describe{ \item{}{\code{as(from, "DataFrame")}: By default, constructs a new \code{DataFrame} with \code{from} as its only column. If \code{from} is a \code{matrix} or \code{data.frame}, all of its columns become columns in the new \code{DataFrame}. If \code{from} is a list, each element becomes a column, recycling as necessary. Note that for the \code{DataFrame} to behave correctly, each column object must support element-wise subsetting via the \code{[} method and return the number of elements with \code{length}. It is recommended to use the \code{DataFrame} constructor, rather than this interface. } \item{}{\code{as.list(x)}: Coerces \code{x}, a \code{DataFrame}, to a \code{list}. } \item{}{\code{as.data.frame(x, row.names=NULL, optional=FALSE)}: Coerces \code{x}, a \code{DataFrame}, to a \code{data.frame}. Each column is coerced to a \code{data.frame} and then column bound together. If \code{row.names} is \code{NULL}, they are retrieved from \code{x}, if it has any. Otherwise, they are inferred by the \code{data.frame} constructor. NOTE: conversion of \code{x} to a \code{data.frame} is not supported if \code{x} contains any \code{list}, \code{SimpleList}, or \code{CompressedList} columns. } \item{}{\code{as(from, "data.frame")}: Coerces a \code{DataFrame} to a \code{data.frame} by calling \code{as.data.frame(from)}. } \item{}{\code{as.matrix(x)}: Coerces the \code{DataFrame} to a \code{matrix}, if possible. } } } \section{Subsetting}{ In the following code snippets, \code{x} is a \code{DataFrame}. \describe{ \item{}{\code{x[i,j,drop]}: Behaves very similarly to the \code{\link{[.data.frame}} method, except \code{i} can be a logical \code{Rle} object and subsetting by \code{matrix} indices is not supported. Indices containing \code{NA}'s are also not supported. } \item{}{\code{x[i,j] <- value}: Behaves very similarly to the \code{\link{[<-.data.frame}} method. } \item{}{\code{x[[i]]}: Behaves very similarly to the \code{\link{[[.data.frame}} method, except arguments \code{j} and \code{exact} are not supported. Column name matching is always exact. Subsetting by matrices is not supported. } \item{}{\code{x[[i]] <- value}: Behaves very similarly to the \code{\link{[[<-.data.frame}} method, except argument \code{j} is not supported. } } } \section{Combining}{ In the following code snippets, \code{x} is a \code{DataFrame}. \describe{ \item{}{ \code{rbind(...)}: Creates a new \code{DataFrame} by combining the rows of the \code{DataFrame} objects in \code{...}. Very similar to \code{\link{rbind.data.frame}}, except in the handling of row names. If all elements have row names, they are concatenated and made unique. Otherwise, the result does not have row names. The return value inherits its metadata from the first argument. } \item{}{ \code{cbind(...)}: Creates a new \code{DataFrame} by combining the columns of the \code{DataFrame} objects in \code{...}. Very similar to \code{\link{cbind.data.frame}}. The return value inherits its metadata from the first argument. } } } \author{Michael Lawrence} \seealso{ \itemize{ \item \link{DataTable} and \link{SimpleList} which DataFrame extends directly. } } \examples{ score <- c(1L, 3L, NA) counts <- c(10L, 2L, NA) row.names <- c("one", "two", "three") df <- DataFrame(score) # single column df[["score"]] df <- DataFrame(score, row.names = row.names) #with row names rownames(df) df <- DataFrame(vals = score) # explicit naming df[["vals"]] # arrays ary <- array(1:4, c(2,1,2)) sw <- DataFrame(I(ary)) # a data.frame sw <- DataFrame(swiss) as.data.frame(sw) # swiss, without row names # now with row names sw <- DataFrame(swiss, row.names = rownames(swiss)) as.data.frame(sw) # swiss # subsetting sw[] # identity subset sw[,] # same sw[NULL] # no columns sw[,NULL] # no columns sw[NULL,] # no rows ## select columns sw[1:3] sw[,1:3] # same as above sw[,"Fertility"] sw[,c(TRUE, FALSE, FALSE, FALSE, FALSE, FALSE)] ## select rows and columns sw[4:5, 1:3] sw[1] # one-column DataFrame ## the same sw[, 1, drop = FALSE] sw[, 1] # a (unnamed) vector sw[[1]] # the same sw[["Fertility"]] sw[["Fert"]] # should return 'NULL' sw[1,] # a one-row DataFrame sw[1,, drop=TRUE] # a list ## duplicate row, unique row names are created sw[c(1, 1:2),] ## indexing by row names sw["Courtelary",] subsw <- sw[1:5,1:4] subsw["C",] # partially matches ## row and column names cn <- paste("X", seq_len(ncol(swiss)), sep = ".") colnames(sw) <- cn colnames(sw) rn <- seq(nrow(sw)) rownames(sw) <- rn rownames(sw) ## column replacement df[["counts"]] <- counts df[["counts"]] df[[3]] <- score df[["X"]] df[[3]] <- NULL # deletion } \keyword{classes} \keyword{methods}
/man/DataFrame-class.Rd
no_license
Liubuntu/S4Vectors-1
R
false
false
9,414
rd
\name{DataFrame-class} \docType{class} \alias{class:DataFrame} \alias{DataFrame-class} % accessor \alias{nrow,DataFrame-method} \alias{ncol,DataFrame-method} \alias{rownames,DataFrame-method} \alias{colnames,DataFrame-method} \alias{rownames<-,DataFrame-method} \alias{colnames<-,DataFrame-method} % constructor \alias{DataFrame} % subsetting \alias{[,DataFrame-method} \alias{[<-,DataFrame-method} \alias{[[<-,DataFrame-method} \alias{[[,DataFrame-method} % coercion \alias{as.data.frame,DataFrame-method} \alias{as.matrix,DataFrame-method} \alias{coerce,matrix,DataFrame-method} \alias{coerce,vector,DataFrame-method} \alias{coerce,list,DataFrame-method} \alias{coerce,integer,DataFrame-method} \alias{coerce,Vector,DataFrame-method} \alias{coerce,data.frame,DataFrame-method} \alias{coerce,data.table,DataFrame-method} \alias{coerce,NULL,DataFrame-method} \alias{coerce,table,DataFrame-method} \alias{coerce,AsIs,DataFrame-method} \alias{coerce,DataFrame,data.frame-method} \alias{coerce,xtabs,DataFrame-method} \alias{coerce,ANY,DataFrame-method} \alias{coerce,SimpleList,DataFrame-method} % combining \alias{bindROWS,DataFrame-method} \alias{rbind,DataFrame-method} \alias{c,DataFrame-method} \alias{cbind,DataFrame-method} \title{DataFrame objects} \description{ The \code{DataFrame} class extends the \link{DataTable} virtual class and supports the storage of any type of object (with \code{length} and \code{[} methods) as columns. } \details{ On the whole, the \code{DataFrame} behaves very similarly to \code{data.frame}, in terms of construction, subsetting, splitting, combining, etc. The most notable exception is that the row names are optional. This means calling \code{rownames(x)} will return \code{NULL} if there are no row names. Of course, it could return \code{seq_len(nrow(x))}, but returning \code{NULL} informs, for example, combination functions that no row names are desired (they are often a luxury when dealing with large data). As \code{DataFrame} derives from \code{\linkS4class{Vector}}, it is possible to set an \code{annotation} string. Also, another \code{DataFrame} can hold metadata on the columns. For a class to be supported as a column, it must have \code{length} and \code{[} methods, where \code{[} supports subsetting only by \code{i} and respects \code{drop=FALSE}. Optionally, a method may be defined for the \code{showAsCell} generic, which should return a vector of the same length as the subset of the column passed to it. This vector is then placed into a \code{data.frame} and converted to text with \code{format}. Thus, each element of the vector should be some simple, usually character, representation of the corresponding element in the column. } \section{Constructor}{ \describe{\code{DataFrame(..., row.names = NULL, check.names = TRUE, stringsAsFactors)}: Constructs a \code{DataFrame} in similar fashion to \code{\link{data.frame}}. Each argument in \code{...} is coerced to a \code{DataFrame} and combined column-wise. No special effort is expended to automatically determine the row names from the arguments. The row names should be given in \code{row.names}; otherwise, there are no row names. This is by design, as row names are normally undesirable when data is large. If \code{check.names} is \code{TRUE}, the column names will be checked for syntactic validity and made unique, if necessary. To store an object of a class that does not support coercion to \code{DataFrame}, wrap it in \code{I()}. The class must still have methods for \code{length} and \code{[}. The \code{stringsAsFactors} argument is ignored. The coercion of column arguments to DataFrame determines whether strings become factors. } } \section{Accessors}{ In the following code snippets, \code{x} is a \code{DataFrame}. \describe{ \item{}{\code{dim(x)}: Get the length two integer vector indicating in the first and second element the number of rows and columns, respectively. } \item{}{\code{dimnames(x)}, \code{dimnames(x) <- value}: Get and set the two element list containing the row names (character vector of length \code{nrow(x)} or \code{NULL}) and the column names (character vector of length \code{ncol(x)}). } } } \section{Coercion}{ \describe{ \item{}{\code{as(from, "DataFrame")}: By default, constructs a new \code{DataFrame} with \code{from} as its only column. If \code{from} is a \code{matrix} or \code{data.frame}, all of its columns become columns in the new \code{DataFrame}. If \code{from} is a list, each element becomes a column, recycling as necessary. Note that for the \code{DataFrame} to behave correctly, each column object must support element-wise subsetting via the \code{[} method and return the number of elements with \code{length}. It is recommended to use the \code{DataFrame} constructor, rather than this interface. } \item{}{\code{as.list(x)}: Coerces \code{x}, a \code{DataFrame}, to a \code{list}. } \item{}{\code{as.data.frame(x, row.names=NULL, optional=FALSE)}: Coerces \code{x}, a \code{DataFrame}, to a \code{data.frame}. Each column is coerced to a \code{data.frame} and then column bound together. If \code{row.names} is \code{NULL}, they are retrieved from \code{x}, if it has any. Otherwise, they are inferred by the \code{data.frame} constructor. NOTE: conversion of \code{x} to a \code{data.frame} is not supported if \code{x} contains any \code{list}, \code{SimpleList}, or \code{CompressedList} columns. } \item{}{\code{as(from, "data.frame")}: Coerces a \code{DataFrame} to a \code{data.frame} by calling \code{as.data.frame(from)}. } \item{}{\code{as.matrix(x)}: Coerces the \code{DataFrame} to a \code{matrix}, if possible. } } } \section{Subsetting}{ In the following code snippets, \code{x} is a \code{DataFrame}. \describe{ \item{}{\code{x[i,j,drop]}: Behaves very similarly to the \code{\link{[.data.frame}} method, except \code{i} can be a logical \code{Rle} object and subsetting by \code{matrix} indices is not supported. Indices containing \code{NA}'s are also not supported. } \item{}{\code{x[i,j] <- value}: Behaves very similarly to the \code{\link{[<-.data.frame}} method. } \item{}{\code{x[[i]]}: Behaves very similarly to the \code{\link{[[.data.frame}} method, except arguments \code{j} and \code{exact} are not supported. Column name matching is always exact. Subsetting by matrices is not supported. } \item{}{\code{x[[i]] <- value}: Behaves very similarly to the \code{\link{[[<-.data.frame}} method, except argument \code{j} is not supported. } } } \section{Combining}{ In the following code snippets, \code{x} is a \code{DataFrame}. \describe{ \item{}{ \code{rbind(...)}: Creates a new \code{DataFrame} by combining the rows of the \code{DataFrame} objects in \code{...}. Very similar to \code{\link{rbind.data.frame}}, except in the handling of row names. If all elements have row names, they are concatenated and made unique. Otherwise, the result does not have row names. The return value inherits its metadata from the first argument. } \item{}{ \code{cbind(...)}: Creates a new \code{DataFrame} by combining the columns of the \code{DataFrame} objects in \code{...}. Very similar to \code{\link{cbind.data.frame}}. The return value inherits its metadata from the first argument. } } } \author{Michael Lawrence} \seealso{ \itemize{ \item \link{DataTable} and \link{SimpleList} which DataFrame extends directly. } } \examples{ score <- c(1L, 3L, NA) counts <- c(10L, 2L, NA) row.names <- c("one", "two", "three") df <- DataFrame(score) # single column df[["score"]] df <- DataFrame(score, row.names = row.names) #with row names rownames(df) df <- DataFrame(vals = score) # explicit naming df[["vals"]] # arrays ary <- array(1:4, c(2,1,2)) sw <- DataFrame(I(ary)) # a data.frame sw <- DataFrame(swiss) as.data.frame(sw) # swiss, without row names # now with row names sw <- DataFrame(swiss, row.names = rownames(swiss)) as.data.frame(sw) # swiss # subsetting sw[] # identity subset sw[,] # same sw[NULL] # no columns sw[,NULL] # no columns sw[NULL,] # no rows ## select columns sw[1:3] sw[,1:3] # same as above sw[,"Fertility"] sw[,c(TRUE, FALSE, FALSE, FALSE, FALSE, FALSE)] ## select rows and columns sw[4:5, 1:3] sw[1] # one-column DataFrame ## the same sw[, 1, drop = FALSE] sw[, 1] # a (unnamed) vector sw[[1]] # the same sw[["Fertility"]] sw[["Fert"]] # should return 'NULL' sw[1,] # a one-row DataFrame sw[1,, drop=TRUE] # a list ## duplicate row, unique row names are created sw[c(1, 1:2),] ## indexing by row names sw["Courtelary",] subsw <- sw[1:5,1:4] subsw["C",] # partially matches ## row and column names cn <- paste("X", seq_len(ncol(swiss)), sep = ".") colnames(sw) <- cn colnames(sw) rn <- seq(nrow(sw)) rownames(sw) <- rn rownames(sw) ## column replacement df[["counts"]] <- counts df[["counts"]] df[[3]] <- score df[["X"]] df[[3]] <- NULL # deletion } \keyword{classes} \keyword{methods}
##Set working directory (this should be the folder where the unziped file ##"household_power_consumption.txt" is contained contained) setwd("~/Documents/Couersa/4 Exploratory Data Analysis/Assignments/Assignement 1/") ##Load the data into R. data<-read.table("./household_power_consumption.txt", header=TRUE, sep=";",na.strings="?", colClasses=c("character", "character", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric")) ## Create a new variable combining the exisiting Date and Time variable. data["Date_Time"]<-paste(data$Date, data$Time, sep="-") data$Date_Time<-as.character(data$Date_Time) ##Change the class of data in the new variable and column one and two into a ##time and date format. data$Date_Time<-strptime(data$Date_Time, "%d/%m/%Y-%H:%M:%S") data$Date<-as.Date(data$Date, "%d/%m/%Y") data$Time<-strptime(data$Time,"%H:%M:%S") ##Make subset of the needed data. datasub<-subset(data, data$Date==as.Date("01/02/2007", "%d/%m/%Y") | data$Date==as.Date("02/02/2007", "%d/%m/%Y")) ##Set the global paramters for mulitple graphs on same page, graphs will be ## printed by column. png("plot4.png") par(mfcol=c(2,2), mar=c(4,4,1,1)) ##First plot ("Global Active Power") plot(datasub$Date_Time, datasub$Global_active_power, type="l", xlab="", ylab="Global Active Power") ##Second plot ("Energy sub metering") plot(datasub$Date_Time, datasub$Sub_metering_1, col="Black", type="l", xlab="", ylab="Energy sub metering") points(datasub$Date_Time, datasub$Sub_metering_2, col="red", type="l", xlab="", ylab="") points(datasub$Date_Time, datasub$Sub_metering_3, col="blue", type="l", xlab="", ylab="") legend("topright",pch="-", col=c("black", "red", "blue"), legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), cex=0.7) ##Third plot ("Voltage") plot(datasub$Date_Time, datasub$Voltage, type="l", xlab="datetime", ylab="Voltage") ##Forth plot ("Global_reactive_power) plot(datasub$Date_Time, datasub$Global_reactive_power, type="l", ylab="Global_reactive_power", xlab="datetime") dev.off()
/plot4.r
no_license
trygvebo/ExData_Plotting1
R
false
false
2,035
r
##Set working directory (this should be the folder where the unziped file ##"household_power_consumption.txt" is contained contained) setwd("~/Documents/Couersa/4 Exploratory Data Analysis/Assignments/Assignement 1/") ##Load the data into R. data<-read.table("./household_power_consumption.txt", header=TRUE, sep=";",na.strings="?", colClasses=c("character", "character", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric")) ## Create a new variable combining the exisiting Date and Time variable. data["Date_Time"]<-paste(data$Date, data$Time, sep="-") data$Date_Time<-as.character(data$Date_Time) ##Change the class of data in the new variable and column one and two into a ##time and date format. data$Date_Time<-strptime(data$Date_Time, "%d/%m/%Y-%H:%M:%S") data$Date<-as.Date(data$Date, "%d/%m/%Y") data$Time<-strptime(data$Time,"%H:%M:%S") ##Make subset of the needed data. datasub<-subset(data, data$Date==as.Date("01/02/2007", "%d/%m/%Y") | data$Date==as.Date("02/02/2007", "%d/%m/%Y")) ##Set the global paramters for mulitple graphs on same page, graphs will be ## printed by column. png("plot4.png") par(mfcol=c(2,2), mar=c(4,4,1,1)) ##First plot ("Global Active Power") plot(datasub$Date_Time, datasub$Global_active_power, type="l", xlab="", ylab="Global Active Power") ##Second plot ("Energy sub metering") plot(datasub$Date_Time, datasub$Sub_metering_1, col="Black", type="l", xlab="", ylab="Energy sub metering") points(datasub$Date_Time, datasub$Sub_metering_2, col="red", type="l", xlab="", ylab="") points(datasub$Date_Time, datasub$Sub_metering_3, col="blue", type="l", xlab="", ylab="") legend("topright",pch="-", col=c("black", "red", "blue"), legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), cex=0.7) ##Third plot ("Voltage") plot(datasub$Date_Time, datasub$Voltage, type="l", xlab="datetime", ylab="Voltage") ##Forth plot ("Global_reactive_power) plot(datasub$Date_Time, datasub$Global_reactive_power, type="l", ylab="Global_reactive_power", xlab="datetime") dev.off()
#' @examples #' \dontrun{ #' res <- dfp_createProducts(request_data) #' }
/examples/examples-dfp_createProducts.R
no_license
JhuangSiteAnalytics/rdfp
R
false
false
74
r
#' @examples #' \dontrun{ #' res <- dfp_createProducts(request_data) #' }
diferencias.adelante <- function(dataX, dataY) { #dataX : puntos fijos para construir el polinomio #dataY : valores conocidos para dataX n <- length(dataX) diferencias <- diff(dataY)/diff(dataX) #diferencias hacia adelante ultimo <- diferencias[n-1] #la ultima diferencia la evaluamos hacia atras return(c(diferencias, ultimo)) #regresamos el vector con las diferencias } HermiteNo <- function(dataX, dataY, x) { #dataX (vector) : puntos fijos para construir el polinomio #dataY (vector) : valores conocidos para dataX #x: punto a interpolar n <- length(dataX) M1 <- matrix(rep(0, 2*n*n ), nrow = n) #construimos una matriz que iguale a la funcion M1[,1] <- 1 for(i in 2:(n*2)) #hay que tener cuidado con los subindices { M1[,i] <- dataX**(i-1) } M2 <- matrix(rep(0, 2*n*n ), nrow = n)#construimos una matriz que iguale a la primer #derivada M2[,1] <- 0 M2[,2] <- 1 for(i in 3:(n*2)) { M2[,i] <- (i-1)*dataX**(i-2)#hay que tener cuidado con los subindices } #construimos el vector b, con la dataY y las primeras diferencias b1 <- dataY b2 <- diferencias.adelante(dataX, dataY) b <- c(b1, b2) #pegamos las matrices M <- rbind(M1, M2) coeficientes <- solve(M, b) #obtenemos los coeficientes res <- pol.no.naive(x, coeficientes) return(res) } Hermite.closure <- function(dataX, dataY) #objeto de tipo closure para fijar parametros { #dataX (vector) : puntos fijos para construir el polinomio #dataY (vector) : valores conocidos para dataX #ESTE OBJETO REGRESA UNA FUNCION CON PARAMETROS FIJOS function( x ) { HermiteNo(dataX, dataY , x) } } ########### datax=c(0,1) datay=c(1,2) Hermite <- Hermite.closure(datax, dataY)
/Punto2/InterpolaciónHermite.R
no_license
juanpablorp96/TallerInterpolacion
R
false
false
1,809
r
diferencias.adelante <- function(dataX, dataY) { #dataX : puntos fijos para construir el polinomio #dataY : valores conocidos para dataX n <- length(dataX) diferencias <- diff(dataY)/diff(dataX) #diferencias hacia adelante ultimo <- diferencias[n-1] #la ultima diferencia la evaluamos hacia atras return(c(diferencias, ultimo)) #regresamos el vector con las diferencias } HermiteNo <- function(dataX, dataY, x) { #dataX (vector) : puntos fijos para construir el polinomio #dataY (vector) : valores conocidos para dataX #x: punto a interpolar n <- length(dataX) M1 <- matrix(rep(0, 2*n*n ), nrow = n) #construimos una matriz que iguale a la funcion M1[,1] <- 1 for(i in 2:(n*2)) #hay que tener cuidado con los subindices { M1[,i] <- dataX**(i-1) } M2 <- matrix(rep(0, 2*n*n ), nrow = n)#construimos una matriz que iguale a la primer #derivada M2[,1] <- 0 M2[,2] <- 1 for(i in 3:(n*2)) { M2[,i] <- (i-1)*dataX**(i-2)#hay que tener cuidado con los subindices } #construimos el vector b, con la dataY y las primeras diferencias b1 <- dataY b2 <- diferencias.adelante(dataX, dataY) b <- c(b1, b2) #pegamos las matrices M <- rbind(M1, M2) coeficientes <- solve(M, b) #obtenemos los coeficientes res <- pol.no.naive(x, coeficientes) return(res) } Hermite.closure <- function(dataX, dataY) #objeto de tipo closure para fijar parametros { #dataX (vector) : puntos fijos para construir el polinomio #dataY (vector) : valores conocidos para dataX #ESTE OBJETO REGRESA UNA FUNCION CON PARAMETROS FIJOS function( x ) { HermiteNo(dataX, dataY , x) } } ########### datax=c(0,1) datay=c(1,2) Hermite <- Hermite.closure(datax, dataY)
# Multiple Plots in 1 Window par(mfrow=c(2,2)) for (i in 1:4) { hist(mtcars$cyl, main=i) } par(mfcol=c(2,2)) for (i in 1:4) { hist(mtcars$cyl, main=i) }
/graphs/mfrow1.R
no_license
dupadhyaya/dspgmsc2017
R
false
false
158
r
# Multiple Plots in 1 Window par(mfrow=c(2,2)) for (i in 1:4) { hist(mtcars$cyl, main=i) } par(mfcol=c(2,2)) for (i in 1:4) { hist(mtcars$cyl, main=i) }
################# # Day 2, Session 2: Manipulating Data # April 2, 2021 # Heili Lowman ################# # This script is from Day 2, Session 2 of the Introduction to R for # Ecologists workshop # Load packages. library(tidyverse) library(palmerpenguins) # Load in data. penguins_raw <- palmerpenguins::penguins # View the data. View(penguins_raw) # Look at the structure of the data. str(penguins_raw) # select() function - choose certain columns penguin1 <- select(penguins_raw, species, island, sex, year) # use "-" to remove selected columns penguin2 <- select(penguins_raw, -bill_length_mm) # Filter # filter() function - filter by certain conditions penguin3 <- filter(penguins_raw, sex == "male") # filtering for only male records penguin4 <- filter(penguins_raw, body_mass_g > 4000 & sex == "female") # filtering for large, female penguins # "&" stands for "and", "|" stands for "or" # Mutate # mutate() function - create new columns penguin5 <- mutate(penguins_raw, bill_area_mm2 = bill_length_mm * bill_depth_mm) # create new bill area column penguin6 <- mutate(penguins_raw, size_category = case_when(body_mass_g < 3000 ~ "XS", body_mass_g >= 3000 & body_mass_g < 4000 ~ "S", body_mass_g >= 4000 & body_mass_g < 5000 ~ "M", body_mass_g >= 5000 ~ "L")) # creating new column with categories by body mass # Challenge: # Create a new dataset where only penguins found on Dream Island are included. penguin_dream <- filter(penguins_raw, island == "Dream") # "==" is for matching # Create a new dataset where there's a new column called "penguin_index" which adds together flipper length and body mass. penguin_new <- mutate(penguins_raw, penguin_index = flipper_length_mm + body_mass_g) # Piping # Pipes look like %>% # Shortcuts: mac: command + shift + m # pc: ctrl + shift + m # Create our first pipe! penguin_pipe <- penguins_raw %>% # Create a new dataset called "penguin_pipe". First, we start with raw data, and then... filter(body_mass_g > 3000) %>% # Filter by body mass, and then.. select(species, island, bill_depth_mm) # Select only certain columns. # Challenge: Using the pipe, create a new dataset with data collected before 2009 (filter()) and keep only the following columns - body mass, sex, and year (select()). penguin_pipe2 <- penguins_raw %>% # raw data and then... filter(year < 2009) %>% # filter for year and then... select(body_mass_g, sex, year) # pull out 3 columns. # Summary statistics penguin_mean <- penguins_raw %>% # raw data group_by(sex) %>% # grouping by sex summarize(mean_mass = mean(body_mass_g, na.rm=TRUE)) %>% # creating a new column called "mean_mass" from the original body_mass_g column ungroup() # DON'T FORGET TO UNGROUP! (after using group_by) # BONUS CONTENT: # And you can group by multiple columns. penguin_isl_sp <- penguins_raw %>% group_by(island,species) %>% summarize(mean_mass = mean(body_mass_g, na.rm=TRUE)) %>% ungroup() # Base statistical functions in R: # mean = mean() # maximum = max() # minimum = min() # median = median() # standard deviation = sd() # Challenge : Create a new dataset that groups by species and sex and then calculates the statistic of your choice for flipper length. penguin_sp_sx <- penguins_raw %>% group_by(species, sex) %>% summarize(max_flipper = max(flipper_length_mm, na.rm=TRUE)) %>% ungroup() # Back to our regularly scheduled lesson: # Long and wide formats # Create a smaller penguin dataset. penguins_count <- penguins_raw %>% # raw data group_by(year, island) %>% # group by year and island summarize(count = n()) %>% # count records ungroup() # always end with ungroup # Examine data View(penguins_count) # pivot_wider() # shortens dataset - increasing columns, decreasing rows penguins_wide <- penguins_count %>% # tiny dataset pivot_wider(names_from = island, values_from = count) # pivot_longer() # lengthen dataset - increasing rows, decreasing columns penguins_long <- penguins_wide %>% # wide dataset pivot_longer(Biscoe:Torgersen, names_to = "island", values_to = "count") # ":" allows us to select consecutive columns # Export a dataset. write_csv(penguins_long, "data/penguins_long.csv") # Send to data folder, once dataset is manipulated. # End of script.
/live_coding/Day2_Sess2_manipulating_data.R
permissive
anamtk/ucsb_r_workshop
R
false
false
4,354
r
################# # Day 2, Session 2: Manipulating Data # April 2, 2021 # Heili Lowman ################# # This script is from Day 2, Session 2 of the Introduction to R for # Ecologists workshop # Load packages. library(tidyverse) library(palmerpenguins) # Load in data. penguins_raw <- palmerpenguins::penguins # View the data. View(penguins_raw) # Look at the structure of the data. str(penguins_raw) # select() function - choose certain columns penguin1 <- select(penguins_raw, species, island, sex, year) # use "-" to remove selected columns penguin2 <- select(penguins_raw, -bill_length_mm) # Filter # filter() function - filter by certain conditions penguin3 <- filter(penguins_raw, sex == "male") # filtering for only male records penguin4 <- filter(penguins_raw, body_mass_g > 4000 & sex == "female") # filtering for large, female penguins # "&" stands for "and", "|" stands for "or" # Mutate # mutate() function - create new columns penguin5 <- mutate(penguins_raw, bill_area_mm2 = bill_length_mm * bill_depth_mm) # create new bill area column penguin6 <- mutate(penguins_raw, size_category = case_when(body_mass_g < 3000 ~ "XS", body_mass_g >= 3000 & body_mass_g < 4000 ~ "S", body_mass_g >= 4000 & body_mass_g < 5000 ~ "M", body_mass_g >= 5000 ~ "L")) # creating new column with categories by body mass # Challenge: # Create a new dataset where only penguins found on Dream Island are included. penguin_dream <- filter(penguins_raw, island == "Dream") # "==" is for matching # Create a new dataset where there's a new column called "penguin_index" which adds together flipper length and body mass. penguin_new <- mutate(penguins_raw, penguin_index = flipper_length_mm + body_mass_g) # Piping # Pipes look like %>% # Shortcuts: mac: command + shift + m # pc: ctrl + shift + m # Create our first pipe! penguin_pipe <- penguins_raw %>% # Create a new dataset called "penguin_pipe". First, we start with raw data, and then... filter(body_mass_g > 3000) %>% # Filter by body mass, and then.. select(species, island, bill_depth_mm) # Select only certain columns. # Challenge: Using the pipe, create a new dataset with data collected before 2009 (filter()) and keep only the following columns - body mass, sex, and year (select()). penguin_pipe2 <- penguins_raw %>% # raw data and then... filter(year < 2009) %>% # filter for year and then... select(body_mass_g, sex, year) # pull out 3 columns. # Summary statistics penguin_mean <- penguins_raw %>% # raw data group_by(sex) %>% # grouping by sex summarize(mean_mass = mean(body_mass_g, na.rm=TRUE)) %>% # creating a new column called "mean_mass" from the original body_mass_g column ungroup() # DON'T FORGET TO UNGROUP! (after using group_by) # BONUS CONTENT: # And you can group by multiple columns. penguin_isl_sp <- penguins_raw %>% group_by(island,species) %>% summarize(mean_mass = mean(body_mass_g, na.rm=TRUE)) %>% ungroup() # Base statistical functions in R: # mean = mean() # maximum = max() # minimum = min() # median = median() # standard deviation = sd() # Challenge : Create a new dataset that groups by species and sex and then calculates the statistic of your choice for flipper length. penguin_sp_sx <- penguins_raw %>% group_by(species, sex) %>% summarize(max_flipper = max(flipper_length_mm, na.rm=TRUE)) %>% ungroup() # Back to our regularly scheduled lesson: # Long and wide formats # Create a smaller penguin dataset. penguins_count <- penguins_raw %>% # raw data group_by(year, island) %>% # group by year and island summarize(count = n()) %>% # count records ungroup() # always end with ungroup # Examine data View(penguins_count) # pivot_wider() # shortens dataset - increasing columns, decreasing rows penguins_wide <- penguins_count %>% # tiny dataset pivot_wider(names_from = island, values_from = count) # pivot_longer() # lengthen dataset - increasing rows, decreasing columns penguins_long <- penguins_wide %>% # wide dataset pivot_longer(Biscoe:Torgersen, names_to = "island", values_to = "count") # ":" allows us to select consecutive columns # Export a dataset. write_csv(penguins_long, "data/penguins_long.csv") # Send to data folder, once dataset is manipulated. # End of script.
library(tidyverse) library(readxl) library(rvest) # #if someone can think of better var name dont be afraid to use it # # #Cleaning season 2020-2021 # Year0 <- read_excel("NCAA Statistics.xlsx",sheet = "2020-2021") # #the w-L column didn't come in right # View(Year0) # # # #cleaning season 2019-2020 # Year1 <- read_excel("NCAA Statistics.xlsx", sheet = "2019-2020") # #split the W-L column into two columns # a <- str_split(Year1$`W-L`,"-") # for (i in 1:dim(Year1)[1]) { # Year1$W[i] <- a[[i]][1] %>% as.numeric() # Year1$L[i] <- a[[i]][2] %>% as.numeric() # } # Year1 <- Year1 %>% mutate('W-L'=NULL) #removed W-L column # # # # #Cleaning season 2018-2019 # Year2 <- read_excel("NCAA Statistics.xlsx", sheet = "2018-2019") # #split the W-L column into two columns # b <- str_split(Year2$`W-L`,"-") # for (i in 1:dim(Year2)[1]) { # Year2$W[i] <- b[[i]][1] %>% as.numeric() # Year2$L[i] <- b[[i]][2] %>% as.numeric() # } # Year2 <- Year2 %>% mutate('W-L'=NULL) #removed W-L column ------------------------------------------------------------------------------- #from https://www.ncaa.com/scoreboard/basketball-men/d1/2021/03/10/all-conf url1 <- "https://www.ncaa.com/scoreboard/basketball-men/d1/" end <- "/all-conf" url <- c() lis <- c() df <- c() i = 1 # This takes a while to run if the date range is big # Looking at March game shtrough today: dates <-seq(as.Date("2021-03-01"), as.Date("2021-03-14"), by="days") dates <- format(as.Date(dates), "%Y/%m/%d") # each date from above, create a list for one day at a time[i], one game at a time[[i]]. for (date in dates) { url[i] <- paste0(url1, date, end) lis[[i]] <- url[i] %>% read_html() %>% html_nodes(".gamePod.gamePod-type-game.status-final") %>% html_text() df <- c(df,lis[[i]]) i = i + 1 } # this jsut gets rid of that nasty html stuff (I think that's what it is anyway) df <- gsub("[ |0-2][0-9]\n\n", "", df) # Gets Rid of Seed Number df <- gsub(" (OT)", "", df, fixed = TRUE) # Get rid of OT and FINAL marks df <- gsub(" (2OT)", "", df, fixed = TRUE) df <- gsub(" (3OT)", "", df, fixed = TRUE) df <- gsub(" (4OT)", "", df, fixed = TRUE) df <- gsub(" (5OT)", "", df, fixed = TRUE) df <- gsub("FINAL","",df) df <- gsub("\n","",df) # get rid of extra whitespaces and newline characters df <- gsub(" ","",df) df <- as_tibble(df) # separate team names and scores from 1 string per game scores <- c() teams <- c() i = 1 for (i in 1:dim(df)[1]){ if (grepl("\\d", df[i,1]) == FALSE){ } else { #uses regular expression, reference: https://rstudio.com/wp-content/uploads/2016/09/RegExCheatsheet.pdf scores[i] <- regmatches(df[i,1], gregexpr("\\d+", df[i,1])) #get the connected numbers teams[i] <- regmatches(df[i,1], gregexpr("\\D+", df[i,1])) # get the strings of words } } # Combine scores and teams into a data frame games_score <- as_tibble(do.call(rbind,scores)) games_team <- as_tibble(do.call(rbind,teams)) colnames(games_score) <- c("score1","score2") colnames(games_team) <- c("team1","team2") games <- cbind(games_team,games_score) %>% as_tibble() games$team1 <- trimws(games$team1) #trim leading whitespace in front of some team names games$team2 <- trimws(games$team2) View(games) -------------------------------------------------------------------------------- #https://www.sports-reference.com/cbb/boxscores/index.cgi?month=03&day=1&year=2021 url1 <- "https://www.sports-reference.com/cbb/boxscores/index.cgi?month=" day <- "&day=" year <- "&year=" url <- c() df <- c() for (i in 1:2) { #day url[i] <- paste0(url1,"3",day,i,year,"2021")############################### lis[[i]] <- url[i] %>% read_html() %>% html_nodes(".gamePod.gamePod-type-game.status-final") %>% html_text() df <- c(df,lis[[i]]) #we don't want the data in a list, and it would get to messy me to concentatnate the lis variable in the loop (I think anyways) } # --------------------------------------------------------------------------------- # #this is the same data Nicole found # # from http://web1.ncaa.org/stats/StatsSrv/rankings # # WL_per <- read_csv("rankings.csv",skip = 11,n_max = 354) # WL_per <- WL_per[-c(351),] # WL_per$Rank <- c(seq(1,350),rep("NR",3)) # View(WL_per) # # Iscor_Offense <- read_csv("rankings.csv",skip = 372,n_max = 354, # col_types = cols( # Rank = col_character(), #bc we have some unranked teams # Name = col_character(), # GM = col_double(), # `W-L` = col_character(), # PTS = col_double(), # PPG = col_double() # )) # Iscor_Offense <- Iscor_Offense[-c(351),] # Iscor_Offense$Rank <- c(seq(1,350),rep("NR",3)) # view(Iscor_Offense) # # PTS ~ cumulative points in season # # PPG ~ average points per game # # Iscor_Defense <-
/data_cleaning.R
no_license
SeanCranston/MinneAnalytics
R
false
false
5,038
r
library(tidyverse) library(readxl) library(rvest) # #if someone can think of better var name dont be afraid to use it # # #Cleaning season 2020-2021 # Year0 <- read_excel("NCAA Statistics.xlsx",sheet = "2020-2021") # #the w-L column didn't come in right # View(Year0) # # # #cleaning season 2019-2020 # Year1 <- read_excel("NCAA Statistics.xlsx", sheet = "2019-2020") # #split the W-L column into two columns # a <- str_split(Year1$`W-L`,"-") # for (i in 1:dim(Year1)[1]) { # Year1$W[i] <- a[[i]][1] %>% as.numeric() # Year1$L[i] <- a[[i]][2] %>% as.numeric() # } # Year1 <- Year1 %>% mutate('W-L'=NULL) #removed W-L column # # # # #Cleaning season 2018-2019 # Year2 <- read_excel("NCAA Statistics.xlsx", sheet = "2018-2019") # #split the W-L column into two columns # b <- str_split(Year2$`W-L`,"-") # for (i in 1:dim(Year2)[1]) { # Year2$W[i] <- b[[i]][1] %>% as.numeric() # Year2$L[i] <- b[[i]][2] %>% as.numeric() # } # Year2 <- Year2 %>% mutate('W-L'=NULL) #removed W-L column ------------------------------------------------------------------------------- #from https://www.ncaa.com/scoreboard/basketball-men/d1/2021/03/10/all-conf url1 <- "https://www.ncaa.com/scoreboard/basketball-men/d1/" end <- "/all-conf" url <- c() lis <- c() df <- c() i = 1 # This takes a while to run if the date range is big # Looking at March game shtrough today: dates <-seq(as.Date("2021-03-01"), as.Date("2021-03-14"), by="days") dates <- format(as.Date(dates), "%Y/%m/%d") # each date from above, create a list for one day at a time[i], one game at a time[[i]]. for (date in dates) { url[i] <- paste0(url1, date, end) lis[[i]] <- url[i] %>% read_html() %>% html_nodes(".gamePod.gamePod-type-game.status-final") %>% html_text() df <- c(df,lis[[i]]) i = i + 1 } # this jsut gets rid of that nasty html stuff (I think that's what it is anyway) df <- gsub("[ |0-2][0-9]\n\n", "", df) # Gets Rid of Seed Number df <- gsub(" (OT)", "", df, fixed = TRUE) # Get rid of OT and FINAL marks df <- gsub(" (2OT)", "", df, fixed = TRUE) df <- gsub(" (3OT)", "", df, fixed = TRUE) df <- gsub(" (4OT)", "", df, fixed = TRUE) df <- gsub(" (5OT)", "", df, fixed = TRUE) df <- gsub("FINAL","",df) df <- gsub("\n","",df) # get rid of extra whitespaces and newline characters df <- gsub(" ","",df) df <- as_tibble(df) # separate team names and scores from 1 string per game scores <- c() teams <- c() i = 1 for (i in 1:dim(df)[1]){ if (grepl("\\d", df[i,1]) == FALSE){ } else { #uses regular expression, reference: https://rstudio.com/wp-content/uploads/2016/09/RegExCheatsheet.pdf scores[i] <- regmatches(df[i,1], gregexpr("\\d+", df[i,1])) #get the connected numbers teams[i] <- regmatches(df[i,1], gregexpr("\\D+", df[i,1])) # get the strings of words } } # Combine scores and teams into a data frame games_score <- as_tibble(do.call(rbind,scores)) games_team <- as_tibble(do.call(rbind,teams)) colnames(games_score) <- c("score1","score2") colnames(games_team) <- c("team1","team2") games <- cbind(games_team,games_score) %>% as_tibble() games$team1 <- trimws(games$team1) #trim leading whitespace in front of some team names games$team2 <- trimws(games$team2) View(games) -------------------------------------------------------------------------------- #https://www.sports-reference.com/cbb/boxscores/index.cgi?month=03&day=1&year=2021 url1 <- "https://www.sports-reference.com/cbb/boxscores/index.cgi?month=" day <- "&day=" year <- "&year=" url <- c() df <- c() for (i in 1:2) { #day url[i] <- paste0(url1,"3",day,i,year,"2021")############################### lis[[i]] <- url[i] %>% read_html() %>% html_nodes(".gamePod.gamePod-type-game.status-final") %>% html_text() df <- c(df,lis[[i]]) #we don't want the data in a list, and it would get to messy me to concentatnate the lis variable in the loop (I think anyways) } # --------------------------------------------------------------------------------- # #this is the same data Nicole found # # from http://web1.ncaa.org/stats/StatsSrv/rankings # # WL_per <- read_csv("rankings.csv",skip = 11,n_max = 354) # WL_per <- WL_per[-c(351),] # WL_per$Rank <- c(seq(1,350),rep("NR",3)) # View(WL_per) # # Iscor_Offense <- read_csv("rankings.csv",skip = 372,n_max = 354, # col_types = cols( # Rank = col_character(), #bc we have some unranked teams # Name = col_character(), # GM = col_double(), # `W-L` = col_character(), # PTS = col_double(), # PPG = col_double() # )) # Iscor_Offense <- Iscor_Offense[-c(351),] # Iscor_Offense$Rank <- c(seq(1,350),rep("NR",3)) # view(Iscor_Offense) # # PTS ~ cumulative points in season # # PPG ~ average points per game # # Iscor_Defense <-
# # bias.R # created 2007-12-05 # Aron Eklund # # # getBiasMetrics <- function(x.batch, x.rma = rma(x.batch)) { x.pm <- log2(pm(x.batch)) message('Calculating degradation scores...') degradation <- affy::AffyRNAdeg(x.batch)$slope data.frame( pm.median = apply(x.pm, 2, median), pm.IQR = apply(x.pm, 2, IQR), rma.IQR = apply(exprs(x.rma), 2, IQR), degradation = degradation ) } getBiasMetrics2 <- function(x.batch, x.rma = rma(x.batch)) { ## first do the four original bias metrics x.pm <- log2(pm(x.batch)) pm.median <- apply(x.pm, 2, median) pm.IQR <- apply(x.pm, 2, IQR) rma.IQR <- apply(exprs(x.rma), 2, IQR) message('Calculating degradation scores...') degradation <- affy::AffyRNAdeg(x.batch)$slope ## now the "extended" bias metrics message('Calculating MAS5 calls...') present.calls <- try(apply(exprs(mas5calls(x.batch)) == 'P', 2, mean), silent = TRUE) if(class(present.calls) == "try-error") { present.calls <- rep(as.numeric(NA), length(pm.median)) } ## probe-specific bias metrics ## (all probe sets may not be present on all arrays -- so we use safety functions) availableProbes <- featureNames(x.rma) getMeans <- function(probes) { ok <- intersect(probes, availableProbes) colMeans(exprs(x.rma)[ok, , drop=FALSE]) } pr.spikes.mRNA <- c("AFFX-LysX-3_at", "AFFX-LysX-5_at", "AFFX-LysX-M_at", "AFFX-PheX-3_at", "AFFX-PheX-5_at", "AFFX-PheX-M_at", "AFFX-ThrX-3_at", "AFFX-ThrX-5_at", "AFFX-ThrX-M_at", "AFFX-DapX-3_at", "AFFX-DapX-5_at", "AFFX-DapX-M_at" ) pr.spikes.cRNA <- c("AFFX-BioB-3_at", "AFFX-BioB-5_at", "AFFX-BioB-M_at", "AFFX-BioC-3_at", "AFFX-BioC-5_at", "AFFX-BioDn-3_at", "AFFX-BioDn-5_at", "AFFX-CreX-3_at", "AFFX-CreX-5_at" ) pr.rRNA <- c("AFFX-HUMRGE/M10098_3_at", "AFFX-HUMRGE/M10098_5_at", "AFFX-HUMRGE/M10098_M_at", "AFFX-M27830_5_at", "AFFX-M27830_M_at") pr.alu <- "AFFX-hum_alu_at" rma.spikes.mRNA <- getMeans(pr.spikes.mRNA) rma.spikes.cRNA <- getMeans(pr.spikes.cRNA) rma.rRNA <- getMeans(pr.rRNA) rma.alu <- getMeans(pr.alu) border.plus <- sapply(borders(x.batch), function(x) median(unlist(x$plus))) border.minus <- sapply(borders(x.batch), function(x) median(unlist(x$minus))) data.frame( pm.median, pm.IQR, rma.IQR, degradation, present.calls, rma.spikes.mRNA, rma.spikes.cRNA, rma.rRNA, rma.alu, border.plus, border.minus ) } borders <- function(x.batch) { nr <- nrow(x.batch) nc <- ncol(x.batch) xy2i <- function(x, y) x + (nr * (y - 1)) sb <- (nr %% 2) # sign on bottom sr <- (nc %% 2) # sign on right index.plus <- list( bottom = xy2i(seq(1 + sb, nc, by=2), nr), left = xy2i(1, seq(1, nr, by=2)), top = xy2i(seq(1, nc, by=2), 1), right = xy2i(nc, seq(1 + sr, nr, by=2)) ) index.minus <- list(bottom = xy2i(seq(2 - sb, nc, by=2), nr), left = xy2i(1, seq(2, nr, by=2)), top = xy2i(seq(2, nc, by=2), 1), right = xy2i(nc, seq(2 - sr, nr, by=2)) ) out <- lapply(1:ncol(exprs(x.batch)), function(j) { plus <- lapply(index.plus, function(i) exprs(x.batch)[i, j]) minus <- lapply(index.minus, function(i) exprs(x.batch)[i, j]) list(plus = plus, minus = minus) }) names(out) <- sampleNames(x.batch) out } biasCorrection <- function(x, metrics) { if( class(x) %in% c('exprSet', 'ExpressionSet')) { in.mat <- Biobase::exprs(x) } else { in.mat <- x } fit <- lm(t(in.mat) ~ ., data = metrics) res <- t(residuals(fit)) fixed <- sweep(res, 1, rowMeans(in.mat), FUN = '+') if( class(x) %in% c('exprSet', 'ExpressionSet')) { out <- x Biobase::exprs(out) <- fixed } else { out <- fixed } return(out) } getScanDate <- function(filenames = list.celfiles()) { DatHeaders <- rep('', length(filenames)) for (i in seq(along = filenames)) { con <- gzfile(filenames[i]) DatHeaders[i] <- grep('DatHeader', readLines(con, n = 20), value = TRUE) close(con) } dateInUSAformat <- sub("^.*(../../..).*$", "\\1", DatHeaders, perl = TRUE) dateInDateFormat <- as.Date(dateInUSAformat, "%m/%d/%y") names(dateInDateFormat) <- filenames dateInDateFormat } calcCM <- function(x, n = 50, use = 'all', method = 'pearson', FUN = median, ...) { if( class(x) %in% c('exprSet', 'ExpressionSet')) x <- Biobase::exprs(x) x <- t(as.matrix(x)) b <- round(seq(0, ncol(x), length = n + 1)) z <- matrix(NA, nrow = n, ncol = n) count <- matrix(NA, nrow = n, ncol = n) for (i in 1:n) { which.i <- (b[i] + 1):b[i + 1] for (j in i:n) { which.j <- (b[j] + 1):b[j + 1] myCor <- cor( x[,which.i], x[, which.j], use = use, method = method ) if( i == j ) { myCor <- myCor[lower.tri(myCor)] z[i,j] <- FUN(myCor, ...) count[i,j] <- length(myCor) } else { z[i,j] <- z[j,i] <- FUN(myCor, ...) count[i,j] <- count[j,i] <- length(myCor) } } } return(list(x = b, y = b, z = z, count = count, call = paste( as.character(match.call()), collapse=" " ) )) } quantileNormalize <- function(x, margin = 2) { v <- rowMeans(apply(x, margin, sort)) x.qn <- apply(x, margin, function(y) v[rank(y, ties.method = 'random')] ) rownames(x.qn) <- rownames(x) return(x.qn) } batchCorrection <- function(x, b) { stopifnot( all( ! is.na(b) ) ) stopifnot(length(b) == ncol(x)) if (class(x) %in% c("exprSet", "ExpressionSet")) { in.mat <- Biobase::exprs(x) } else { in.mat <- x } b <- factor(b) batchMeans <- sapply(levels(b), function(a) rowMeans(in.mat[,b == a, drop = FALSE]) ) adj <- batchMeans - rowMeans(in.mat) fixed <- in.mat - adj[,b] if (class(x) %in% c("exprSet", "ExpressionSet")) { out <- x Biobase::exprs(out) <- fixed } else { out <- fixed } out }
/R/bias.R
no_license
aroneklund/bias
R
false
false
6,203
r
# # bias.R # created 2007-12-05 # Aron Eklund # # # getBiasMetrics <- function(x.batch, x.rma = rma(x.batch)) { x.pm <- log2(pm(x.batch)) message('Calculating degradation scores...') degradation <- affy::AffyRNAdeg(x.batch)$slope data.frame( pm.median = apply(x.pm, 2, median), pm.IQR = apply(x.pm, 2, IQR), rma.IQR = apply(exprs(x.rma), 2, IQR), degradation = degradation ) } getBiasMetrics2 <- function(x.batch, x.rma = rma(x.batch)) { ## first do the four original bias metrics x.pm <- log2(pm(x.batch)) pm.median <- apply(x.pm, 2, median) pm.IQR <- apply(x.pm, 2, IQR) rma.IQR <- apply(exprs(x.rma), 2, IQR) message('Calculating degradation scores...') degradation <- affy::AffyRNAdeg(x.batch)$slope ## now the "extended" bias metrics message('Calculating MAS5 calls...') present.calls <- try(apply(exprs(mas5calls(x.batch)) == 'P', 2, mean), silent = TRUE) if(class(present.calls) == "try-error") { present.calls <- rep(as.numeric(NA), length(pm.median)) } ## probe-specific bias metrics ## (all probe sets may not be present on all arrays -- so we use safety functions) availableProbes <- featureNames(x.rma) getMeans <- function(probes) { ok <- intersect(probes, availableProbes) colMeans(exprs(x.rma)[ok, , drop=FALSE]) } pr.spikes.mRNA <- c("AFFX-LysX-3_at", "AFFX-LysX-5_at", "AFFX-LysX-M_at", "AFFX-PheX-3_at", "AFFX-PheX-5_at", "AFFX-PheX-M_at", "AFFX-ThrX-3_at", "AFFX-ThrX-5_at", "AFFX-ThrX-M_at", "AFFX-DapX-3_at", "AFFX-DapX-5_at", "AFFX-DapX-M_at" ) pr.spikes.cRNA <- c("AFFX-BioB-3_at", "AFFX-BioB-5_at", "AFFX-BioB-M_at", "AFFX-BioC-3_at", "AFFX-BioC-5_at", "AFFX-BioDn-3_at", "AFFX-BioDn-5_at", "AFFX-CreX-3_at", "AFFX-CreX-5_at" ) pr.rRNA <- c("AFFX-HUMRGE/M10098_3_at", "AFFX-HUMRGE/M10098_5_at", "AFFX-HUMRGE/M10098_M_at", "AFFX-M27830_5_at", "AFFX-M27830_M_at") pr.alu <- "AFFX-hum_alu_at" rma.spikes.mRNA <- getMeans(pr.spikes.mRNA) rma.spikes.cRNA <- getMeans(pr.spikes.cRNA) rma.rRNA <- getMeans(pr.rRNA) rma.alu <- getMeans(pr.alu) border.plus <- sapply(borders(x.batch), function(x) median(unlist(x$plus))) border.minus <- sapply(borders(x.batch), function(x) median(unlist(x$minus))) data.frame( pm.median, pm.IQR, rma.IQR, degradation, present.calls, rma.spikes.mRNA, rma.spikes.cRNA, rma.rRNA, rma.alu, border.plus, border.minus ) } borders <- function(x.batch) { nr <- nrow(x.batch) nc <- ncol(x.batch) xy2i <- function(x, y) x + (nr * (y - 1)) sb <- (nr %% 2) # sign on bottom sr <- (nc %% 2) # sign on right index.plus <- list( bottom = xy2i(seq(1 + sb, nc, by=2), nr), left = xy2i(1, seq(1, nr, by=2)), top = xy2i(seq(1, nc, by=2), 1), right = xy2i(nc, seq(1 + sr, nr, by=2)) ) index.minus <- list(bottom = xy2i(seq(2 - sb, nc, by=2), nr), left = xy2i(1, seq(2, nr, by=2)), top = xy2i(seq(2, nc, by=2), 1), right = xy2i(nc, seq(2 - sr, nr, by=2)) ) out <- lapply(1:ncol(exprs(x.batch)), function(j) { plus <- lapply(index.plus, function(i) exprs(x.batch)[i, j]) minus <- lapply(index.minus, function(i) exprs(x.batch)[i, j]) list(plus = plus, minus = minus) }) names(out) <- sampleNames(x.batch) out } biasCorrection <- function(x, metrics) { if( class(x) %in% c('exprSet', 'ExpressionSet')) { in.mat <- Biobase::exprs(x) } else { in.mat <- x } fit <- lm(t(in.mat) ~ ., data = metrics) res <- t(residuals(fit)) fixed <- sweep(res, 1, rowMeans(in.mat), FUN = '+') if( class(x) %in% c('exprSet', 'ExpressionSet')) { out <- x Biobase::exprs(out) <- fixed } else { out <- fixed } return(out) } getScanDate <- function(filenames = list.celfiles()) { DatHeaders <- rep('', length(filenames)) for (i in seq(along = filenames)) { con <- gzfile(filenames[i]) DatHeaders[i] <- grep('DatHeader', readLines(con, n = 20), value = TRUE) close(con) } dateInUSAformat <- sub("^.*(../../..).*$", "\\1", DatHeaders, perl = TRUE) dateInDateFormat <- as.Date(dateInUSAformat, "%m/%d/%y") names(dateInDateFormat) <- filenames dateInDateFormat } calcCM <- function(x, n = 50, use = 'all', method = 'pearson', FUN = median, ...) { if( class(x) %in% c('exprSet', 'ExpressionSet')) x <- Biobase::exprs(x) x <- t(as.matrix(x)) b <- round(seq(0, ncol(x), length = n + 1)) z <- matrix(NA, nrow = n, ncol = n) count <- matrix(NA, nrow = n, ncol = n) for (i in 1:n) { which.i <- (b[i] + 1):b[i + 1] for (j in i:n) { which.j <- (b[j] + 1):b[j + 1] myCor <- cor( x[,which.i], x[, which.j], use = use, method = method ) if( i == j ) { myCor <- myCor[lower.tri(myCor)] z[i,j] <- FUN(myCor, ...) count[i,j] <- length(myCor) } else { z[i,j] <- z[j,i] <- FUN(myCor, ...) count[i,j] <- count[j,i] <- length(myCor) } } } return(list(x = b, y = b, z = z, count = count, call = paste( as.character(match.call()), collapse=" " ) )) } quantileNormalize <- function(x, margin = 2) { v <- rowMeans(apply(x, margin, sort)) x.qn <- apply(x, margin, function(y) v[rank(y, ties.method = 'random')] ) rownames(x.qn) <- rownames(x) return(x.qn) } batchCorrection <- function(x, b) { stopifnot( all( ! is.na(b) ) ) stopifnot(length(b) == ncol(x)) if (class(x) %in% c("exprSet", "ExpressionSet")) { in.mat <- Biobase::exprs(x) } else { in.mat <- x } b <- factor(b) batchMeans <- sapply(levels(b), function(a) rowMeans(in.mat[,b == a, drop = FALSE]) ) adj <- batchMeans - rowMeans(in.mat) fixed <- in.mat - adj[,b] if (class(x) %in% c("exprSet", "ExpressionSet")) { out <- x Biobase::exprs(out) <- fixed } else { out <- fixed } out }
library(readr) library(plyr) library(dplyr) parse_lines <- function(line) { data.frame( name = line[1], speed = as.numeric(line[4]), runtime = as.numeric(line[7]), resttime = as.numeric(line[14]) ) } df <- read_lines("input.txt") %>% strsplit(" ") %>% ldply(parse_lines) # distances after 2503 seconds end <- 2503 df %>% mutate(distance = speed * runtime * floor(end / (runtime + resttime)) + speed * pmin(runtime, end %% (runtime + resttime))) %>% filter(distance == max(distance)) # 2640
/day14/1-distance.R
no_license
pdil/adventR
R
false
false
533
r
library(readr) library(plyr) library(dplyr) parse_lines <- function(line) { data.frame( name = line[1], speed = as.numeric(line[4]), runtime = as.numeric(line[7]), resttime = as.numeric(line[14]) ) } df <- read_lines("input.txt") %>% strsplit(" ") %>% ldply(parse_lines) # distances after 2503 seconds end <- 2503 df %>% mutate(distance = speed * runtime * floor(end / (runtime + resttime)) + speed * pmin(runtime, end %% (runtime + resttime))) %>% filter(distance == max(distance)) # 2640
#@since 1.8.2 require tk require tkextlib/bwidget require tkextlib/bwidget/messagedlg = class Tk::BWidget::SelectFont < Tk::BWidget::MessageDlg extend Tk == Class Methods --- load_font #@todo == Instance Methods --- create #@todo #@include(selectfont/SelectFont__Dialog) #@include(selectfont/SelectFont__Toolbar) #@end
/target/rubydoc/refm/api/src/tkextlib/bwidget/selectfont.rd
no_license
nacyot/omegat-rurima-ruby
R
false
false
334
rd
#@since 1.8.2 require tk require tkextlib/bwidget require tkextlib/bwidget/messagedlg = class Tk::BWidget::SelectFont < Tk::BWidget::MessageDlg extend Tk == Class Methods --- load_font #@todo == Instance Methods --- create #@todo #@include(selectfont/SelectFont__Dialog) #@include(selectfont/SelectFont__Toolbar) #@end
# R script to run author supplied code, typically used to install additional R packages # contains placeholders which are inserted by the compile script # NOTE: this script is executed in the chroot context; check paths! r <- getOption("repos") r["CRAN"] <- "http://cloud.r-project.org" options(repos=r) # ====================================================================== # packages go here install.packages("remotes") install.packages("tidyverse") install.packages("scales") # installs Rcpp, rlang, BH install.packages("later") install.packages("https://cran.r-project.org/src/contrib/jsonlite_1.6.tar.gz", type="source", repos=NULL) install.packages("rjson") install.packages("listenv") install.packages("anytime") install.packages("readr") install.packages("heatmaply") # installs magrittr, promises, R6 remotes::install_version("httpuv", version = "1.4.5.1", repos = "http://cloud.r-project.org", upgrade="never") # installs crayon, digest, htmltools, mime, sourcetools, xtable remotes::install_version("shiny", version = "1.2.0", repos = "http://cloud.r-project.org", upgrade="never") # installs askpass, assertthat, base64enc, cli, colorspace, crosstalk, curl, data.table, dplyr, fansi, ggplot2, glue, gtable, hexbin, htmlwidgets, httr, labeling, lattice, lazyeval, mgcv, munsell, nlme, openssl, pillar, pkgconfig, plogr, plyr, purrr, RColorBrewer, reshape2, scales, stringi, stringr, sys, tibble, tidyr, tidyselect, utf8, viridisLite, withr, yaml remotes::install_version("plotly", version = "4.9.1", repos = "http://cloud.r-project.org", upgrade="never") install.packages("https://cloud.r-project.org/src/contrib/assertthat_0.2.1.tar.gz", type="source", repos=NULL) install.packages("https://cloud.r-project.org/src/contrib/xml2_1.2.2.tar.gz", type="source", repos=NULL) install.packages("https://cloud.r-project.org/src/contrib/triebeard_0.3.0.tar.gz", type="source", repos=NULL) install.packages("https://cloud.r-project.org/src/contrib/Archive/urltools/urltools_1.7.2.tar.gz", type="source", repos=NULL) install.packages("https://cloud.r-project.org/src/contrib/jsonlite_1.6.tar.gz", type="source", repos=NULL) install.packages("https://cloud.r-project.org/src/contrib/webutils_1.0.tar.gz", type="source", repos=NULL) install.packages("https://cloud.r-project.org/src/contrib/brotli_1.2.tar.gz", type="source", repos=NULL) install.packages("https://cloud.r-project.org/src/contrib/reqres_0.2.3.tar.gz", type="source", repos=NULL) install.packages("https://cloud.r-project.org/src/contrib/uuid_0.1-2.tar.gz", type="source", repos=NULL) install.packages("https://cloud.r-project.org/src/contrib/base64enc_0.1-3.tar.gz", type="source", repos=NULL) install.packages("https://cloud.r-project.org/src/contrib/codetools_0.2-16.tar.gz", type="source", repos=NULL) install.packages("https://cloud.r-project.org/src/contrib/globals_0.12.5.tar.gz", type="source", repos=NULL) install.packages("https://cloud.r-project.org/src/contrib/Archive/future/future_1.11.1.1.tar.gz", type="source", repos=NULL) # fiery and friends install.packages("https://cloud.r-project.org/src/contrib/routr_0.4.0.tar.gz", type="source", repos=NULL) install.packages("https://cloud.r-project.org/src/contrib/fiery_1.1.2.tar.gz", type="source", repos=NULL) # dash components remotes::install_github("plotly/dash-table", ref="042ad65") remotes::install_github("plotly/dash-html-components", ref="17da1f4") remotes::install_github("plotly/dash-core-components", ref="cc1e654") remotes::install_github("plotly/dashR", ref="dev", dependencies=FALSE)
/init.R
permissive
UBC-MDS/DSCI_532_group_208_DashR_Job
R
false
false
3,537
r
# R script to run author supplied code, typically used to install additional R packages # contains placeholders which are inserted by the compile script # NOTE: this script is executed in the chroot context; check paths! r <- getOption("repos") r["CRAN"] <- "http://cloud.r-project.org" options(repos=r) # ====================================================================== # packages go here install.packages("remotes") install.packages("tidyverse") install.packages("scales") # installs Rcpp, rlang, BH install.packages("later") install.packages("https://cran.r-project.org/src/contrib/jsonlite_1.6.tar.gz", type="source", repos=NULL) install.packages("rjson") install.packages("listenv") install.packages("anytime") install.packages("readr") install.packages("heatmaply") # installs magrittr, promises, R6 remotes::install_version("httpuv", version = "1.4.5.1", repos = "http://cloud.r-project.org", upgrade="never") # installs crayon, digest, htmltools, mime, sourcetools, xtable remotes::install_version("shiny", version = "1.2.0", repos = "http://cloud.r-project.org", upgrade="never") # installs askpass, assertthat, base64enc, cli, colorspace, crosstalk, curl, data.table, dplyr, fansi, ggplot2, glue, gtable, hexbin, htmlwidgets, httr, labeling, lattice, lazyeval, mgcv, munsell, nlme, openssl, pillar, pkgconfig, plogr, plyr, purrr, RColorBrewer, reshape2, scales, stringi, stringr, sys, tibble, tidyr, tidyselect, utf8, viridisLite, withr, yaml remotes::install_version("plotly", version = "4.9.1", repos = "http://cloud.r-project.org", upgrade="never") install.packages("https://cloud.r-project.org/src/contrib/assertthat_0.2.1.tar.gz", type="source", repos=NULL) install.packages("https://cloud.r-project.org/src/contrib/xml2_1.2.2.tar.gz", type="source", repos=NULL) install.packages("https://cloud.r-project.org/src/contrib/triebeard_0.3.0.tar.gz", type="source", repos=NULL) install.packages("https://cloud.r-project.org/src/contrib/Archive/urltools/urltools_1.7.2.tar.gz", type="source", repos=NULL) install.packages("https://cloud.r-project.org/src/contrib/jsonlite_1.6.tar.gz", type="source", repos=NULL) install.packages("https://cloud.r-project.org/src/contrib/webutils_1.0.tar.gz", type="source", repos=NULL) install.packages("https://cloud.r-project.org/src/contrib/brotli_1.2.tar.gz", type="source", repos=NULL) install.packages("https://cloud.r-project.org/src/contrib/reqres_0.2.3.tar.gz", type="source", repos=NULL) install.packages("https://cloud.r-project.org/src/contrib/uuid_0.1-2.tar.gz", type="source", repos=NULL) install.packages("https://cloud.r-project.org/src/contrib/base64enc_0.1-3.tar.gz", type="source", repos=NULL) install.packages("https://cloud.r-project.org/src/contrib/codetools_0.2-16.tar.gz", type="source", repos=NULL) install.packages("https://cloud.r-project.org/src/contrib/globals_0.12.5.tar.gz", type="source", repos=NULL) install.packages("https://cloud.r-project.org/src/contrib/Archive/future/future_1.11.1.1.tar.gz", type="source", repos=NULL) # fiery and friends install.packages("https://cloud.r-project.org/src/contrib/routr_0.4.0.tar.gz", type="source", repos=NULL) install.packages("https://cloud.r-project.org/src/contrib/fiery_1.1.2.tar.gz", type="source", repos=NULL) # dash components remotes::install_github("plotly/dash-table", ref="042ad65") remotes::install_github("plotly/dash-html-components", ref="17da1f4") remotes::install_github("plotly/dash-core-components", ref="cc1e654") remotes::install_github("plotly/dashR", ref="dev", dependencies=FALSE)
# Association Rules - Groceries data set #### library(arules) #install first library(arulesViz) #install first library(datasets) # no need to install, just load it reqd for Groceries data('Groceries') #different format - transaction format Groceries #Structure of Groceries str(Groceries) Groceries arules::LIST(Groceries[1:6]) #another view arules::inspect(Groceries[1:5]) #Find Frequent Itemset #.01 * 9835; A + B + C = 3 items, A + B + C + D : 4 items frequentItems = eclat (Groceries, parameter = list(supp = 0.01, minlen= 2, maxlen = 5)) #frequentItems = eclat (Groceries, parameter = list(minlen= 3)) inspect(frequentItems[1:88]) frequentItems inspect(frequentItems[10:100]) #inspect(frequentItems[100:122]) #Descending Sort frequent items by count : 1 to 25 itemsets inspect(sort (frequentItems, by="count", decreasing=TRUE)[1:25]) inspect(sort (frequentItems, by="count", decreasing=F)[1:25]) #Support is : support(A&B) = n(A&B)/ N #Plot the Frequency Plot itemFrequencyPlot(Groceries, topN = 15,type="absolute") itemFrequencyPlot(Groceries, topN = 10, type='relative') abline(h=0.15) # Create rules and the relationship between items #parameters are min filter conditions rules = apriori(Groceries, parameter = list(supp = 0.005, conf = 0.5, minlen=2)) rules inspect (rules[1:15]) #Sort Rules by confidence, lift and see the data rulesc <- sort (rules, by="confidence", decreasing=TRUE) inspect(rulesc[1:5]) rulesl <- sort (rules, by="lift", decreasing=TRUE) inspect (rulesl[1:5]) #which items have strong confidence and lift #How To Control The Number Of Rules in Output ? #maxlen, minlen, supp, conf rules2 = apriori (Groceries, parameter = list (supp = 0.001, conf = 0.5, minlen=2, maxlen=3)) rules2 #no of rules rules #earlier rules inspect(rules2[1:15]) #Find out what events were influenced by a given event - from already created rules subset1 = subset(rules2, subset=rhs %in% "whole milk") inspect(subset1) # rhs has milk subset1 = subset(rules2, subset=rhs %in% 'bottled beer' ) inspect(subset1) #rhs has beer #inspect(rules2) subset2 = subset(rules2, subset=lhs %ain% c('baking powder','soda') ) inspect(subset2) # all items %ain% subset2a = subset(rules2, subset=lhs %in% c('baking powder','soda') ) inspect(subset2a) # any of the items %in% #RHS, Confidence, sort by Lift subset3 = subset(rules2, subset=rhs %in% 'bottled beer' & confidence > .5, by = 'lift', decreasing = T) inspect(subset3) #sometimes there may be no rules, change few parameters subset4 = subset(rules2, subset=lhs %in% 'bottled beer' & rhs %in% 'whole milk' ) inspect(subset4) subset4b = subset(rules2, subset=rhs %in% 'bottled beer' ) inspect(subset4b) #no such rules library(arulesViz) #install first #https://cran.r-project.org/web/packages/arulesViz/vignettes/arulesViz.pdf #Visualizing The Rules ----- subset1 rules2 inspect(subset2) plot(subset1[1:2]) plot(subset1[1:2], measure=c("support", "lift"), shading="confidence") #change the axis plot(rules2[1:100], measure=c("support", "lift"), shading="confidence") # #Find what factors influenced an event ‘X’ - create fresh Rules rules3 = apriori (data=Groceries, parameter=list (supp=0.002,conf = 0.7), appearance = list (default="lhs",rhs="whole milk"), control = list (verbose=F)) inspect(rules3[1:5]) inspect(rules3) #rhs as it is, lhs to have tropical fruit or herbs rules4 = apriori (data=Groceries, parameter=list (supp=0.001,conf = 0.4), appearance = list (default="rhs",lhs=c('tropical fruit','herbs')), control = list (verbose=F)) inspect(rules4[1:5]) inspect(rules4) plot(subset4) #legend to filter #legend to condition commands # lhs - means left hand side, or antecendent # rhs - mean right hand side, or consequent # items - items, that make up itemsets # %in% - matches any # %ain% - matches all # %pin% - matches partially # default - no restrictions applied # & - additional restrictions on lift, confidence etc. #summarise Association Rules / Market Basket analysis #load libraries - arules, arulesViz #load dataset in Transaction Format eg Groceries #Find frequentitems set #Find rules as per parameters #Parameters - min support, min confidence, minlen, maxlen #sort - confidence, lift, count, support #subset of rules - lhs, rhs, confidence, all, any item #plot of the rules #find interesting rules - high lift, high confidence #put strategy in place - location, bundle, discounts, advertisement
/MarketBasket.R
no_license
Gajendra001/Analytics
R
false
false
4,399
r
# Association Rules - Groceries data set #### library(arules) #install first library(arulesViz) #install first library(datasets) # no need to install, just load it reqd for Groceries data('Groceries') #different format - transaction format Groceries #Structure of Groceries str(Groceries) Groceries arules::LIST(Groceries[1:6]) #another view arules::inspect(Groceries[1:5]) #Find Frequent Itemset #.01 * 9835; A + B + C = 3 items, A + B + C + D : 4 items frequentItems = eclat (Groceries, parameter = list(supp = 0.01, minlen= 2, maxlen = 5)) #frequentItems = eclat (Groceries, parameter = list(minlen= 3)) inspect(frequentItems[1:88]) frequentItems inspect(frequentItems[10:100]) #inspect(frequentItems[100:122]) #Descending Sort frequent items by count : 1 to 25 itemsets inspect(sort (frequentItems, by="count", decreasing=TRUE)[1:25]) inspect(sort (frequentItems, by="count", decreasing=F)[1:25]) #Support is : support(A&B) = n(A&B)/ N #Plot the Frequency Plot itemFrequencyPlot(Groceries, topN = 15,type="absolute") itemFrequencyPlot(Groceries, topN = 10, type='relative') abline(h=0.15) # Create rules and the relationship between items #parameters are min filter conditions rules = apriori(Groceries, parameter = list(supp = 0.005, conf = 0.5, minlen=2)) rules inspect (rules[1:15]) #Sort Rules by confidence, lift and see the data rulesc <- sort (rules, by="confidence", decreasing=TRUE) inspect(rulesc[1:5]) rulesl <- sort (rules, by="lift", decreasing=TRUE) inspect (rulesl[1:5]) #which items have strong confidence and lift #How To Control The Number Of Rules in Output ? #maxlen, minlen, supp, conf rules2 = apriori (Groceries, parameter = list (supp = 0.001, conf = 0.5, minlen=2, maxlen=3)) rules2 #no of rules rules #earlier rules inspect(rules2[1:15]) #Find out what events were influenced by a given event - from already created rules subset1 = subset(rules2, subset=rhs %in% "whole milk") inspect(subset1) # rhs has milk subset1 = subset(rules2, subset=rhs %in% 'bottled beer' ) inspect(subset1) #rhs has beer #inspect(rules2) subset2 = subset(rules2, subset=lhs %ain% c('baking powder','soda') ) inspect(subset2) # all items %ain% subset2a = subset(rules2, subset=lhs %in% c('baking powder','soda') ) inspect(subset2a) # any of the items %in% #RHS, Confidence, sort by Lift subset3 = subset(rules2, subset=rhs %in% 'bottled beer' & confidence > .5, by = 'lift', decreasing = T) inspect(subset3) #sometimes there may be no rules, change few parameters subset4 = subset(rules2, subset=lhs %in% 'bottled beer' & rhs %in% 'whole milk' ) inspect(subset4) subset4b = subset(rules2, subset=rhs %in% 'bottled beer' ) inspect(subset4b) #no such rules library(arulesViz) #install first #https://cran.r-project.org/web/packages/arulesViz/vignettes/arulesViz.pdf #Visualizing The Rules ----- subset1 rules2 inspect(subset2) plot(subset1[1:2]) plot(subset1[1:2], measure=c("support", "lift"), shading="confidence") #change the axis plot(rules2[1:100], measure=c("support", "lift"), shading="confidence") # #Find what factors influenced an event ‘X’ - create fresh Rules rules3 = apriori (data=Groceries, parameter=list (supp=0.002,conf = 0.7), appearance = list (default="lhs",rhs="whole milk"), control = list (verbose=F)) inspect(rules3[1:5]) inspect(rules3) #rhs as it is, lhs to have tropical fruit or herbs rules4 = apriori (data=Groceries, parameter=list (supp=0.001,conf = 0.4), appearance = list (default="rhs",lhs=c('tropical fruit','herbs')), control = list (verbose=F)) inspect(rules4[1:5]) inspect(rules4) plot(subset4) #legend to filter #legend to condition commands # lhs - means left hand side, or antecendent # rhs - mean right hand side, or consequent # items - items, that make up itemsets # %in% - matches any # %ain% - matches all # %pin% - matches partially # default - no restrictions applied # & - additional restrictions on lift, confidence etc. #summarise Association Rules / Market Basket analysis #load libraries - arules, arulesViz #load dataset in Transaction Format eg Groceries #Find frequentitems set #Find rules as per parameters #Parameters - min support, min confidence, minlen, maxlen #sort - confidence, lift, count, support #subset of rules - lhs, rhs, confidence, all, any item #plot of the rules #find interesting rules - high lift, high confidence #put strategy in place - location, bundle, discounts, advertisement
# Compares models using leave-one-out (LOO) expected log posterior density (elpd) library(tidyverse) library(ggthemes) library(loo) wong <- c("#000000", "#e69f00", "#56b4e9", "#009e73", "#f0e442", "#0072b2", "#d55e00", "#cc79a7") setwd("/Users/adkinsty/Box/LeeLab/Experiments/Exp_files/reach/") models <- c("MEG", "MELV", "MEU", "MSWU", "UH", "H") meg_loo <- loo(readRDS("modeling/exp2/stan/rds/MEG.rds") ,cores = 1) mleg_loo <- loo(readRDS("modeling/exp2/stan/rds/MLEG.rds"),cores = 1) meu_loo <- loo(readRDS("modeling/exp2/stan/rds/MEU.rds") ,cores = 1) mswu_loo <- loo(readRDS("modeling/exp2/stan/rds/MSWU.rds") ,cores = 1) h_loo <- loo(readRDS("modeling/exp2/stan/rds/H.rds"),cores = 1) uh_loo <- loo(readRDS("modeling/exp2/stan/rds/UH.rds"),cores = 1) #ulh_loo <- loo(readRDS("modeling/exp2/stan/rds/ULH.rds"),cores = 1) #meg_vp_loo <- loo(readRDS("modeling/exp2/stan/rds/MEG_vp.rds") ,cores = 1) comparison <- loo_compare(list(meg = meg_loo, mleg = mleg_loo, meu = meu_loo, mswu = mswu_loo, uh = uh_loo, h = h_loo)) loo_compare(list(meg = meg_loo, mleg = mleg_loo, h = h_loo)) models_plt <- c("MELV", "MEG", "MSWU", "UH") labels <- c("LA","MEG","NLVP","UH") pdat <- tibble(ELPD = c(mleg_loo$elpd_loo, meg_loo$elpd_loo, mswu_loo$elpd_loo, uh_loo$elpd_loo), se = c(mleg_loo$se_elpd, meg_loo$se_elpd, mswu_loo$se_elpd, uh_loo$se_elpd), M = models_plt) %>% mutate(M = factor(M, levels=models_plt)) pdat %>% ggplot(aes(x=M,y=ELPD,ymax=ELPD+se,ymin=ELPD-se,colour=M)) + geom_point() + geom_errorbar(width=0) + scale_x_discrete("Model",labels=labels) + scale_y_continuous("LOO ELPD") + scale_colour_manual("Model", values=wong[c(2,4,3,1)], labels=labels) + theme_tufte(base_size=12,base_family="sans") + theme(axis.line=element_line(size=.25), legend.pos="none") ggsave("visuals/raw/exp2/revision/exp2_LOOIC.pdf",units="in",height=3,width=3)
/modeling/exp2/compare_loo_revision.R
no_license
adkinsty/reach
R
false
false
2,011
r
# Compares models using leave-one-out (LOO) expected log posterior density (elpd) library(tidyverse) library(ggthemes) library(loo) wong <- c("#000000", "#e69f00", "#56b4e9", "#009e73", "#f0e442", "#0072b2", "#d55e00", "#cc79a7") setwd("/Users/adkinsty/Box/LeeLab/Experiments/Exp_files/reach/") models <- c("MEG", "MELV", "MEU", "MSWU", "UH", "H") meg_loo <- loo(readRDS("modeling/exp2/stan/rds/MEG.rds") ,cores = 1) mleg_loo <- loo(readRDS("modeling/exp2/stan/rds/MLEG.rds"),cores = 1) meu_loo <- loo(readRDS("modeling/exp2/stan/rds/MEU.rds") ,cores = 1) mswu_loo <- loo(readRDS("modeling/exp2/stan/rds/MSWU.rds") ,cores = 1) h_loo <- loo(readRDS("modeling/exp2/stan/rds/H.rds"),cores = 1) uh_loo <- loo(readRDS("modeling/exp2/stan/rds/UH.rds"),cores = 1) #ulh_loo <- loo(readRDS("modeling/exp2/stan/rds/ULH.rds"),cores = 1) #meg_vp_loo <- loo(readRDS("modeling/exp2/stan/rds/MEG_vp.rds") ,cores = 1) comparison <- loo_compare(list(meg = meg_loo, mleg = mleg_loo, meu = meu_loo, mswu = mswu_loo, uh = uh_loo, h = h_loo)) loo_compare(list(meg = meg_loo, mleg = mleg_loo, h = h_loo)) models_plt <- c("MELV", "MEG", "MSWU", "UH") labels <- c("LA","MEG","NLVP","UH") pdat <- tibble(ELPD = c(mleg_loo$elpd_loo, meg_loo$elpd_loo, mswu_loo$elpd_loo, uh_loo$elpd_loo), se = c(mleg_loo$se_elpd, meg_loo$se_elpd, mswu_loo$se_elpd, uh_loo$se_elpd), M = models_plt) %>% mutate(M = factor(M, levels=models_plt)) pdat %>% ggplot(aes(x=M,y=ELPD,ymax=ELPD+se,ymin=ELPD-se,colour=M)) + geom_point() + geom_errorbar(width=0) + scale_x_discrete("Model",labels=labels) + scale_y_continuous("LOO ELPD") + scale_colour_manual("Model", values=wong[c(2,4,3,1)], labels=labels) + theme_tufte(base_size=12,base_family="sans") + theme(axis.line=element_line(size=.25), legend.pos="none") ggsave("visuals/raw/exp2/revision/exp2_LOOIC.pdf",units="in",height=3,width=3)
library(shiny) # Define UI for application that draws a histogram shinyUI(navbarPage("Poll Analytics", tabPanel("Introduction", #plotOutput("background"), column(10, offset=3, h1(strong("Twitter Data Analytics"))), hr(),hr(),hr(), helpText("Click on the tabs above to check out the statistics."), hr(), column(8,offset=2, plotOutput("word_cloud"), hr(), column(5,offset=3, textInput("search","Search",value="",placeholder = "Type to see trend..")) ) ), tabPanel("Candidate Popularity", h2("Who is more Popular?"), plotOutput("popularity_plot"), hr(), h4("We have analyzed thousands of tweets over a range of days to figure out who is being talked about the most."), h5("\nClick on the radio button below to see the variation of each Candidates popularity."), column(12,offset=1,radioButtons("radio_button", label = h3("Candidates"), choices = list("All"=1,"Hillary Clinton"=2,"Bernie Sanders"=3, "Donald Trump"=4,"Marco Rubio"=5,"Ted Cruz"=6,"Ben Carson"=7), selected=1, inline=T)) ), tabPanel("Always on Twitter", #!!!For this, you need to have the indiviual json files for each #candidates. This is provided in the candidate files in the #tar archive.!!! h2("Who is most Active on Twitter?"), helpText("In the digital age that we live in, being active on Social Media can make or break a campaign. By accessing the Presidential candidates user timelines, we can determine who among the top 6 use their Twitter handle the most."), plotOutput("most.active"), hr(),hr(), column(12,offset=4, helpText("Have a closer look by choosing your candidate."), selectInput("select",label=h3("Candidate"), choices = list("Select"=1,"Hillary Clinton"=2,"Bernie Sanders"=3, "Donald Trump"=4,"Marco Rubio"=5,"Ted Cruz"=6,"Ben Carson"=7), selected=1)) ), tabPanel("Trending Hashtags", column(12,offset=4,h2("Trending Hashtags")), hr(), br(), column(12,offest=4,helpText("Select the date to find the top five trending hashtags.")) , column(12,offset=4,dateInput("date","Date", value="2016-02-28",min="2016-02-28", max=Sys.Date()), column(12, offset=1,tableOutput("hashtag_table"))) )))
/prob5/ui.R
no_license
mithunatri/EDA-with-R
R
false
false
3,023
r
library(shiny) # Define UI for application that draws a histogram shinyUI(navbarPage("Poll Analytics", tabPanel("Introduction", #plotOutput("background"), column(10, offset=3, h1(strong("Twitter Data Analytics"))), hr(),hr(),hr(), helpText("Click on the tabs above to check out the statistics."), hr(), column(8,offset=2, plotOutput("word_cloud"), hr(), column(5,offset=3, textInput("search","Search",value="",placeholder = "Type to see trend..")) ) ), tabPanel("Candidate Popularity", h2("Who is more Popular?"), plotOutput("popularity_plot"), hr(), h4("We have analyzed thousands of tweets over a range of days to figure out who is being talked about the most."), h5("\nClick on the radio button below to see the variation of each Candidates popularity."), column(12,offset=1,radioButtons("radio_button", label = h3("Candidates"), choices = list("All"=1,"Hillary Clinton"=2,"Bernie Sanders"=3, "Donald Trump"=4,"Marco Rubio"=5,"Ted Cruz"=6,"Ben Carson"=7), selected=1, inline=T)) ), tabPanel("Always on Twitter", #!!!For this, you need to have the indiviual json files for each #candidates. This is provided in the candidate files in the #tar archive.!!! h2("Who is most Active on Twitter?"), helpText("In the digital age that we live in, being active on Social Media can make or break a campaign. By accessing the Presidential candidates user timelines, we can determine who among the top 6 use their Twitter handle the most."), plotOutput("most.active"), hr(),hr(), column(12,offset=4, helpText("Have a closer look by choosing your candidate."), selectInput("select",label=h3("Candidate"), choices = list("Select"=1,"Hillary Clinton"=2,"Bernie Sanders"=3, "Donald Trump"=4,"Marco Rubio"=5,"Ted Cruz"=6,"Ben Carson"=7), selected=1)) ), tabPanel("Trending Hashtags", column(12,offset=4,h2("Trending Hashtags")), hr(), br(), column(12,offest=4,helpText("Select the date to find the top five trending hashtags.")) , column(12,offset=4,dateInput("date","Date", value="2016-02-28",min="2016-02-28", max=Sys.Date()), column(12, offset=1,tableOutput("hashtag_table"))) )))
#### Build GSE95630_Tang_fetal_tissues datasets #### ### Step1 Expression data filePath <- "/data8t_4/JH/scRNA_seq/GEO/Normal_tissues/GSE103239_Tang_digestive_tract/GSE95630_fetal_tissues/GSE95630_Digestion_TPM_new.txt.gz" fetal.tissues <- read.table(gzfile(filePath), header = T) geneNames <- fetal.tissues[,1] rownames(fetal.tissues) <- geneNames fetal.tissues <-fetal.tissues[, !(names(fetal.tissues) %in% "Gene")] head(fetal.tissues[1:10,1:10]) summary(colSums(fetal.tissues)) # the resault showed this was no-transformed tpm value ### Step2 Phenotype information ## extract from sample names sampleName <- colnames(fetal.tissues) pd <- as.data.frame(sampleName) pd$tissue <- unlist(lapply(strsplit(sampleName,"_"), '[[', 1)) pd$emDays <-unlist(lapply(strsplit(sampleName,"_"), '[[', 2)) pd$pateintID <- unlist(lapply(strsplit(sampleName,"_"), '[[', 3)) pd$cellNum <- unlist(lapply(strsplit(sampleName,"_"), '[[', 4)) ## from supplementary data cellTypeCluster <- readRDS("/data8t_4/JH/MyJobs/Colon_SC_Project/GSE103239_Tang_GI_development/GSE95630_fetal_tissues/GSE95630_Tang_fetal_tissues_cellTypeCluster.rds") ### Step3 Gene features # they don`t provide ### Step4 Build datasets GSE95630_Tang_fetal_tissues_datasets <- list(Fetal.colon.TPM = fetal.tissues, pd = pd, cellTypeCluster = cellTypeCluster) saveRDS(GSE95630_Tang_fetal_tissues_datasets, file = "GSE95630_Tang_fetal_tissues_datasets.rds")
/Normal_cell_reference/pre_Step1_Build_normal_cell_dataset/Build_GSE95630_Tang_fetal_colon_datasets.R
no_license
haojiang9999/HCA_script
R
false
false
1,519
r
#### Build GSE95630_Tang_fetal_tissues datasets #### ### Step1 Expression data filePath <- "/data8t_4/JH/scRNA_seq/GEO/Normal_tissues/GSE103239_Tang_digestive_tract/GSE95630_fetal_tissues/GSE95630_Digestion_TPM_new.txt.gz" fetal.tissues <- read.table(gzfile(filePath), header = T) geneNames <- fetal.tissues[,1] rownames(fetal.tissues) <- geneNames fetal.tissues <-fetal.tissues[, !(names(fetal.tissues) %in% "Gene")] head(fetal.tissues[1:10,1:10]) summary(colSums(fetal.tissues)) # the resault showed this was no-transformed tpm value ### Step2 Phenotype information ## extract from sample names sampleName <- colnames(fetal.tissues) pd <- as.data.frame(sampleName) pd$tissue <- unlist(lapply(strsplit(sampleName,"_"), '[[', 1)) pd$emDays <-unlist(lapply(strsplit(sampleName,"_"), '[[', 2)) pd$pateintID <- unlist(lapply(strsplit(sampleName,"_"), '[[', 3)) pd$cellNum <- unlist(lapply(strsplit(sampleName,"_"), '[[', 4)) ## from supplementary data cellTypeCluster <- readRDS("/data8t_4/JH/MyJobs/Colon_SC_Project/GSE103239_Tang_GI_development/GSE95630_fetal_tissues/GSE95630_Tang_fetal_tissues_cellTypeCluster.rds") ### Step3 Gene features # they don`t provide ### Step4 Build datasets GSE95630_Tang_fetal_tissues_datasets <- list(Fetal.colon.TPM = fetal.tissues, pd = pd, cellTypeCluster = cellTypeCluster) saveRDS(GSE95630_Tang_fetal_tissues_datasets, file = "GSE95630_Tang_fetal_tissues_datasets.rds")
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/conta_functions.R \name{get_s3_folders} \alias{get_s3_folders} \title{Get folders under a specified s3 directory} \usage{ get_s3_folders(s3_path) } \arguments{ \item{s3_path}{to display the contents} } \description{ Returns the set of folders under a given s3 path. get_bucket_df normally returns all folders and files recursively, this method parses get_bucket_df output to return only folders directly under the specified dir. }
/man/get_s3_folders.Rd
permissive
Dxiaomai/conta
R
false
true
509
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/conta_functions.R \name{get_s3_folders} \alias{get_s3_folders} \title{Get folders under a specified s3 directory} \usage{ get_s3_folders(s3_path) } \arguments{ \item{s3_path}{to display the contents} } \description{ Returns the set of folders under a given s3 path. get_bucket_df normally returns all folders and files recursively, this method parses get_bucket_df output to return only folders directly under the specified dir. }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/scripts.R \name{run_civis} \alias{run_civis} \title{Evaluate an R expression in a Civis Platform container} \usage{ run_civis(expr, ...) } \arguments{ \item{expr}{code to evaluate} \item{...}{arguments to \code{\link{CivisFuture}}} } \description{ Evaluate an R expression in a Civis Platform container } \details{ \code{run_civis} blocks until completion. For non-blocking calls, use futures directly with \code{\link{civis_platform}}. Attempts are made at detecting and installing necessary packages within the container, and detecting global variables required in \code{expr}. } \examples{ \dontrun{ run_civis(2+2) # specify required resources, and a specific image run_civis(2+2, required_resources = list(cpu = 1024, memory = 2048), docker_image_name='image', docker_image_tag = 'latest') } } \seealso{ Other script_utils: \code{\link{civis_script}}, \code{\link{fetch_output_file_ids}}, \code{\link{run_template}} } \concept{script_utils}
/man/run_civis.Rd
no_license
mheilman/civis-r
R
false
true
1,036
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/scripts.R \name{run_civis} \alias{run_civis} \title{Evaluate an R expression in a Civis Platform container} \usage{ run_civis(expr, ...) } \arguments{ \item{expr}{code to evaluate} \item{...}{arguments to \code{\link{CivisFuture}}} } \description{ Evaluate an R expression in a Civis Platform container } \details{ \code{run_civis} blocks until completion. For non-blocking calls, use futures directly with \code{\link{civis_platform}}. Attempts are made at detecting and installing necessary packages within the container, and detecting global variables required in \code{expr}. } \examples{ \dontrun{ run_civis(2+2) # specify required resources, and a specific image run_civis(2+2, required_resources = list(cpu = 1024, memory = 2048), docker_image_name='image', docker_image_tag = 'latest') } } \seealso{ Other script_utils: \code{\link{civis_script}}, \code{\link{fetch_output_file_ids}}, \code{\link{run_template}} } \concept{script_utils}
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/separate.R \name{separate_dt} \alias{separate_dt} \title{Separate a character column into two columns using a regular expression separator} \usage{ separate_dt( .data, separated_colname, into, sep = "[^[:alnum:]]+", remove = TRUE ) } \arguments{ \item{.data}{A data frame.} \item{separated_colname}{Column to be separated, can be a character or alias.} \item{into}{Character vector of length 2.} \item{sep}{Separator between columns.} \item{remove}{If \code{TRUE}, remove input column from output data frame.} } \description{ Given either regular expression, \code{separate_dt()} turns a single character column into two columns. } \examples{ df <- data.frame(x = c(NA, "a.b", "a.d", "b.c")) df \%>\% separate_dt(x, c("A", "B")) # equals to df \%>\% separate_dt("x", c("A", "B")) # If you just want the second variable: df \%>\% separate_dt(x,into = c(NA,"B")) } \seealso{ \code{\link[tidyr]{separate}}, \code{\link[tidyfst]{unite_dt}} }
/man/separate_dt.Rd
permissive
hope-data-science/tidyfst
R
false
true
1,031
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/separate.R \name{separate_dt} \alias{separate_dt} \title{Separate a character column into two columns using a regular expression separator} \usage{ separate_dt( .data, separated_colname, into, sep = "[^[:alnum:]]+", remove = TRUE ) } \arguments{ \item{.data}{A data frame.} \item{separated_colname}{Column to be separated, can be a character or alias.} \item{into}{Character vector of length 2.} \item{sep}{Separator between columns.} \item{remove}{If \code{TRUE}, remove input column from output data frame.} } \description{ Given either regular expression, \code{separate_dt()} turns a single character column into two columns. } \examples{ df <- data.frame(x = c(NA, "a.b", "a.d", "b.c")) df \%>\% separate_dt(x, c("A", "B")) # equals to df \%>\% separate_dt("x", c("A", "B")) # If you just want the second variable: df \%>\% separate_dt(x,into = c(NA,"B")) } \seealso{ \code{\link[tidyr]{separate}}, \code{\link[tidyfst]{unite_dt}} }