content
large_stringlengths
0
6.46M
path
large_stringlengths
3
331
license_type
large_stringclasses
2 values
repo_name
large_stringlengths
5
125
language
large_stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
4
6.46M
extension
large_stringclasses
75 values
text
stringlengths
0
6.46M
### ========================================================================= ### TENxMatrixSeed objects ### ------------------------------------------------------------------------- setClass("TENxMatrixSeed", contains="Array", representation( filepath="character", # Absolute path to the HDF5 file so the # object doesn't break when the user # changes the working directory (e.g. with # setwd()). group="character", # Name of the group in the HDF5 file # containing the 10x Genomics data. dim="integer", dimnames="list", col_ranges="data.frame" # Can't use an IRanges object for this at the # moment because they don't support Linteger # start/end values yet. ) ) ### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ### path() getter/setter ### ### Does NOT access the file. setMethod("path", "TENxMatrixSeed", function(object) object@filepath) ### Just a placeholder for now. Doesn't actually allow changing the path of ### the object yet. setReplaceMethod("path", "TENxMatrixSeed", function(object, value) { new_filepath <- normarg_path(value, "the supplied path", "10x Genomics dataset") old_filepath <- path(object) if (new_filepath != old_filepath) stop(wmsg("changing the path of a TENxMatrixSeed object ", "is not supported yet")) object } ) ### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ### dim() getter ### ### Does NOT access the file. setMethod("dim", "TENxMatrixSeed", function(x) x@dim) ### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ### dimnames() getter ### ### Does NOT access the file. setMethod("dimnames", "TENxMatrixSeed", function(x) DelayedArray:::simplify_NULL_dimnames(x@dimnames) ) ### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ### Low-level internal disk data extractors ### ### All the 10xGenomics components are monodimensional. .read_tenx_component <- function(filepath, group, name, start=NULL, count=NULL, as.integer=FALSE) { name <- paste0(group, "/", name) if (!is.null(start)) start <- list(start) if (!is.null(count)) count <- list(count) as.vector(h5mread(filepath, name, starts=start, counts=count, as.integer=as.integer)) } ### Return the dimensions of the matrix. .get_tenx_shape <- function(filepath, group) .read_tenx_component(filepath, group, "shape") .get_tenx_indptr <- function(filepath, group) .read_tenx_component(filepath, group, "indptr") .get_tenx_data <- function(filepath, group, start=NULL, count=NULL) .read_tenx_component(filepath, group, "data", start=start, count=count) ### The row indices in the HDF5 file are 0-based but we return them 1-based. .get_tenx_row_indices <- function(filepath, group, start=NULL, count=NULL) .read_tenx_component(filepath, group, "indices", start=start, count=count, as.integer=TRUE) + 1L ### Return the rownames of the matrix. .get_tenx_genes <- function(filepath, group) { if (!h5exists(filepath, paste0(group, "/genes"))) return(NULL) .read_tenx_component(filepath, group, "genes") } ### Currently unused. .get_tenx_gene_names <- function(filepath, group) { if (!h5exists(filepath, paste0(group, "/gene_names"))) return(NULL) .read_tenx_component(filepath, group, "gene_names") } ### Return the colnames of the matrix. .get_tenx_barcodes <- function(filepath, group) { if (!h5exists(filepath, paste0(group, "/barcodes"))) return(NULL) .read_tenx_component(filepath, group, "barcodes") } ### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ### .get_data_indices_by_col() ### ### S4Vectors:::fancy_mseq() does not accept 'offset' of type double yet so ### we implement a version that does. ### Will this work if sum(lengths) is > .Machine$integer.max? .fancy_mseq <- function(lengths, offset=0) { lengths_len <- length(lengths) if (lengths_len == 0L) return(numeric(0)) offsets <- offset - cumsum(c(0L, lengths[-lengths_len])) seq_len(sum(lengths)) + rep.int(offsets, lengths) } ### 'j' must be an integer vector containing valid col indices. ### Return data indices in a NumericList object parallel to 'j' i.e. with ### one list element per col index in 'j'. .get_data_indices_by_col <- function(x, j) { col_ranges <- S4Vectors:::extract_data_frame_rows(x@col_ranges, j) start2 <- col_ranges[ , "start"] width2 <- col_ranges[ , "width"] idx2 <- .fancy_mseq(width2, offset=start2 - 1L) ### Will this work if 'idx2' is a long vector? relist(idx2, PartitioningByWidth(width2)) } ### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ### .extract_data_from_adjacent_cols() ### ### 'j1' and 'j2' must be 2 single integers representing a valid range of ### col indices. ### If 'as.sparse=FALSE', return a NumericList or IntegerList object parallel ### to 'j1:j2' i.e. with one list element per col index in 'j1:j2'. ### If 'as.sparse=TRUE', return a SparseArraySeed object. .extract_data_from_adjacent_cols <- function(x, j1, j2, as.sparse=FALSE) { j12 <- j1:j2 start <- x@col_ranges[j1, "start"] count_per_col <- x@col_ranges[j12, "width"] count <- sum(count_per_col) ans_nzdata <- .get_tenx_data(x@filepath, x@group, start=start, count=count) if (!as.sparse) return(relist(ans_nzdata, PartitioningByWidth(count_per_col))) row_indices <- .get_tenx_row_indices(x@filepath, x@group, start=start, count=count) col_indices <- rep.int(j12, count_per_col) ans_nzindex <- cbind(row_indices, col_indices, deparse.level=0L) SparseArraySeed(dim(x), ans_nzindex, ans_nzdata, check=FALSE) } ### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ### .extract_nonzero_data_by_col() ### ### Extract nonzero data using the "random" method. ### This method is based on h5mread( , starts=list(start)) which retrieves ### an arbitrary/random subset of the data. ### 'j' must be an integer vector containing valid col indices. It cannot ### be NULL. .random_extract_nonzero_data_by_col <- function(x, j) { data_indices <- .get_data_indices_by_col(x, j) idx2 <- unlist(data_indices, use.names=FALSE) data <- .get_tenx_data(x@filepath, x@group, start=idx2) relist(data, data_indices) } ### Extract nonzero data using the "linear" method. ### This method is based on h5mread( , starts=list(start), counts=list(count)) ### which retrieves a linear subset of the data and should be more efficient ### than doing h5mread( , starts=list(seq(start, length.out=count))). ### 'j' must be NULL or an integer vector containing valid col indices. It ### should not be empty. .linear_extract_nonzero_data_by_col <- function(x, j) { if (is.null(j)) { j1 <- 1L j2 <- ncol(x) } else { stopifnot(is.numeric(j), length(j) != 0L) j1 <- min(j) j2 <- max(j) } nonzero_data <- .extract_data_from_adjacent_cols(x, j1, j2) if (is.null(j)) return(nonzero_data) nonzero_data[match(j, j1:j2)] } .normarg_method <- function(method, j) { if (method != "auto") return(method) if (is.null(j)) return("linear") if (length(j) == 0L) return("random") j1 <- min(j) j2 <- max(j) ## 'ratio' is > 0 and <= 1. A value close to 1 indicates that the columns ## to extract are close from each other (a value of 1 indicating that ## they are adjacent e.g. j <- 18:25). A value close to 0 indicates that ## they are far apart from each other i.e. that they are separated by many ## columns that are not requested. The "linear" method is very efficient ## when 'ratio' is close to 1. It is so much more efficient than the ## "random" method (typically 10x or 20x faster) that we choose it when ## 'ratio' is >= 0.2 ratio <- length(j) / (j2 - j1 + 1L) if (ratio >= 0.2) "linear" else "random" } ### 'j' must be NULL or an integer vector containing valid col indices. ### Return a NumericList or IntegerList object parallel to 'j' i.e. with ### one list element per col index in 'j'. .extract_nonzero_data_by_col <- function(x, j, method=c("auto", "random", "linear")) { method <- match.arg(method) method <- .normarg_method(method, j) if (method == "random") { .random_extract_nonzero_data_by_col(x, j) } else { .linear_extract_nonzero_data_by_col(x, j) } } ### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ### .load_SparseArraySeed_from_TENxMatrixSeed() ### ### Load sparse data using the "random" method. ### This method is based on h5mread( , starts=list(start)) which retrieves ### an arbitrary/random subset of the data. ### 'i' must be NULL or an integer vector containing valid row indices. ### 'j' must be an integer vector containing valid col indices. It cannot ### be NULL. ### Both 'i' and 'j' can contain duplicates. Duplicates in 'i' have no effect ### on the output but duplicates in 'j' will produce duplicates in the output. ### Return a SparseArraySeed object. .random_load_SparseArraySeed_from_TENxMatrixSeed <- function(x, i, j) { stopifnot(is.null(i) || is.numeric(i), is.numeric(j)) data_indices <- .get_data_indices_by_col(x, j) idx2 <- unlist(data_indices, use.names=FALSE) row_indices <- .get_tenx_row_indices(x@filepath, x@group, start=idx2) col_indices <- rep.int(j, lengths(data_indices)) if (!is.null(i)) { keep_idx <- which(row_indices %in% i) idx2 <- idx2[keep_idx] row_indices <- row_indices[keep_idx] col_indices <- col_indices[keep_idx] } ans_nzindex <- cbind(row_indices, col_indices, deparse.level=0L) ans_nzdata <- .get_tenx_data(x@filepath, x@group, start=idx2) SparseArraySeed(dim(x), ans_nzindex, ans_nzdata, check=FALSE) } ### Load sparse data using the "linear" method. ### This method is based on h5mread( , starts=list(start), counts=list(count)) ### which retrieves a linear subset of the data and should be more efficient ### than doing h5mread( , starts=list(seq(start, length.out=count))). ### 'j' must be NULL or a non-empty integer vector containing valid ### col indices. The output is not affected by duplicates in 'j'. ### Return a SparseArraySeed object. .linear_load_SparseArraySeed_from_TENxMatrixSeed <- function(x, j) { if (is.null(j)) { j1 <- 1L j2 <- ncol(x) } else { stopifnot(is.numeric(j), length(j) != 0L) j1 <- min(j) j2 <- max(j) } .extract_data_from_adjacent_cols(x, j1, j2, as.sparse=TRUE) } ### Duplicates in 'index[[1]]' are ok and won't affect the output. ### Duplicates in 'index[[2]]' are ok but might introduce duplicates ### in the output so should be avoided. ### Return a SparseArraySeed object. .load_SparseArraySeed_from_TENxMatrixSeed <- function(x, index, method=c("auto", "random", "linear")) { i <- index[[1L]] j <- index[[2L]] method <- match.arg(method) method <- .normarg_method(method, j) if (method == "random") { .random_load_SparseArraySeed_from_TENxMatrixSeed(x, i, j) } else { .linear_load_SparseArraySeed_from_TENxMatrixSeed(x, j) } } ### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ### extract_array() ### .extract_array_from_TENxMatrixSeed <- function(x, index) { ans_dim <- DelayedArray:::get_Nindex_lengths(index, dim(x)) if (any(ans_dim == 0L)) { ## Return an empty matrix. data0 <- .get_tenx_data(x@filepath, x@group, start=integer(0)) return(array(data0, dim=ans_dim)) } sas <- .load_SparseArraySeed_from_TENxMatrixSeed(x, index) # I/O extract_array(sas, index) # in-memory } setMethod("extract_array", "TENxMatrixSeed", .extract_array_from_TENxMatrixSeed ) ### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ### chunkdim() getter ### ### Does NOT access the file. setMethod("chunkdim", "TENxMatrixSeed", function(x) c(nrow(x), 1L)) ### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ### Constructor ### TENxMatrixSeed <- function(filepath, group="mm10") { filepath <- normarg_path(filepath, "'filepath'", "10x Genomics dataset") if (!isSingleString(group)) stop(wmsg("'group' must be a single string specifying the name ", "of the group in the HDF5 file containing the ", "10x Genomics data")) if (group == "") stop(wmsg("'group' cannot be the empty string")) ## dim dim <- .get_tenx_shape(filepath, group) stopifnot(length(dim) == 2L) ## dimnames rownames <- .get_tenx_genes(filepath, group) stopifnot(is.null(rownames) || length(rownames) == dim[[1L]]) colnames <- .get_tenx_barcodes(filepath, group) stopifnot(is.null(colnames) || length(colnames) == dim[[2L]]) dimnames <- list(rownames, colnames) ## col_ranges data_len <- h5length(filepath, paste0(group, "/data")) indices_len <- h5length(filepath, paste0(group, "/indices")) stopifnot(data_len == indices_len) indptr <- .get_tenx_indptr(filepath, group) stopifnot(length(indptr) == dim[[2L]] + 1L, indptr[[1L]] == 0L, indptr[[length(indptr)]] == data_len) col_ranges <- data.frame(start=indptr[-length(indptr)] + 1, width=as.integer(diff(indptr))) new2("TENxMatrixSeed", filepath=filepath, group=group, dim=dim, dimnames=dimnames, col_ranges=col_ranges) } ### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ### Show ### setMethod("show", "TENxMatrixSeed", function(object) { cat(DelayedArray:::array_as_one_line_summary(object), ":\n", sep="") cat("# dirname: ", dirname(object), "\n", sep="") cat("# basename: ", basename(object), "\n", sep="") cat("# group: ", object@group, "\n", sep="") } ) ### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ### Taking advantage of sparsity ### setMethod("sparsity", "TENxMatrixSeed", function(x) { data_len <- h5length(x@filepath, paste0(x@group, "/data")) 1 - data_len / length(x) } ) ### This is about **structural** sparsity, not about quantitative sparsity ### measured by sparsity(). setMethod("is_sparse", "TENxMatrixSeed", function(x) TRUE) .extract_sparse_array_from_TENxMatrixSeed <- function(x, index) { sas <- .load_SparseArraySeed_from_TENxMatrixSeed(x, index) # I/O extract_sparse_array(sas, index) # in-memory } setMethod("extract_sparse_array", "TENxMatrixSeed", .extract_sparse_array_from_TENxMatrixSeed ) ### The default "read_sparse_block" method defined in DelayedArray would work ### just fine on a TENxMatrixSeed object (thanks to the "extract_sparse_array" ### method for TENxMatrixSeed objects defined above), but we overwrite it with ### the method below which is slightly more efficient. That's because the ### method below calls read_sparse_block() on the SparseArraySeed object ### returned by .load_SparseArraySeed_from_TENxMatrixSeed() and this is ### faster than calling extract_sparse_array() on the same object (which ### is what the "extract_sparse_array" method for TENxMatrixSeed would do ### when called by the default "read_sparse_block" method). ### Not sure the difference is significant enough for this extra method to ### be worth it though, because, time is really dominated by I/O here, that ### is, by the call to .load_SparseArraySeed_from_TENxMatrixSeed(). .read_sparse_block_from_TENxMatrixSeed <- function(x, viewport) { index <- makeNindexFromArrayViewport(viewport, expand.RangeNSBS=TRUE) sas <- .load_SparseArraySeed_from_TENxMatrixSeed(x, index) # I/O ## Unlike the "extract_sparse_array" method for TENxMatrixSeed objects ## defined above, we use read_sparse_block() here, which is faster than ## using extract_sparse_array(). read_sparse_block(sas, viewport) # in-memory } setMethod("read_sparse_block", "TENxMatrixSeed", .read_sparse_block_from_TENxMatrixSeed ) ### Return a NumericList or IntegerList object parallel to 'j' i.e. with ### one list element per col index in 'j'. ### Spelling: "nonzero" preferred over "non-zero". See: ### https://gcc.gnu.org/ml/gcc/2001-10/msg00610.html setGeneric("extractNonzeroDataByCol", signature="x", function(x, j) standardGeneric("extractNonzeroDataByCol") ) setMethod("extractNonzeroDataByCol", "TENxMatrixSeed", function(x, j) { j <- DelayedArray:::normalizeSingleBracketSubscript2(j, ncol(x), colnames(x)) .extract_nonzero_data_by_col(x, j) } ) ### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ### Coercion to dgCMatrix ### .from_TENxMatrixSeed_to_dgCMatrix <- function(from) { row_indices <- .get_tenx_row_indices(from@filepath, from@group) indptr <- .get_tenx_indptr(from@filepath, from@group) data <- .get_tenx_data(from@filepath, from@group) sparseMatrix(i=row_indices, p=indptr, x=data, dims=dim(from), dimnames=dimnames(from)) } setAs("TENxMatrixSeed", "dgCMatrix", .from_TENxMatrixSeed_to_dgCMatrix) setAs("TENxMatrixSeed", "sparseMatrix", .from_TENxMatrixSeed_to_dgCMatrix)
/R/TENxMatrixSeed-class.R
no_license
muschellij2/HDF5Array
R
false
false
17,938
r
### ========================================================================= ### TENxMatrixSeed objects ### ------------------------------------------------------------------------- setClass("TENxMatrixSeed", contains="Array", representation( filepath="character", # Absolute path to the HDF5 file so the # object doesn't break when the user # changes the working directory (e.g. with # setwd()). group="character", # Name of the group in the HDF5 file # containing the 10x Genomics data. dim="integer", dimnames="list", col_ranges="data.frame" # Can't use an IRanges object for this at the # moment because they don't support Linteger # start/end values yet. ) ) ### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ### path() getter/setter ### ### Does NOT access the file. setMethod("path", "TENxMatrixSeed", function(object) object@filepath) ### Just a placeholder for now. Doesn't actually allow changing the path of ### the object yet. setReplaceMethod("path", "TENxMatrixSeed", function(object, value) { new_filepath <- normarg_path(value, "the supplied path", "10x Genomics dataset") old_filepath <- path(object) if (new_filepath != old_filepath) stop(wmsg("changing the path of a TENxMatrixSeed object ", "is not supported yet")) object } ) ### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ### dim() getter ### ### Does NOT access the file. setMethod("dim", "TENxMatrixSeed", function(x) x@dim) ### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ### dimnames() getter ### ### Does NOT access the file. setMethod("dimnames", "TENxMatrixSeed", function(x) DelayedArray:::simplify_NULL_dimnames(x@dimnames) ) ### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ### Low-level internal disk data extractors ### ### All the 10xGenomics components are monodimensional. .read_tenx_component <- function(filepath, group, name, start=NULL, count=NULL, as.integer=FALSE) { name <- paste0(group, "/", name) if (!is.null(start)) start <- list(start) if (!is.null(count)) count <- list(count) as.vector(h5mread(filepath, name, starts=start, counts=count, as.integer=as.integer)) } ### Return the dimensions of the matrix. .get_tenx_shape <- function(filepath, group) .read_tenx_component(filepath, group, "shape") .get_tenx_indptr <- function(filepath, group) .read_tenx_component(filepath, group, "indptr") .get_tenx_data <- function(filepath, group, start=NULL, count=NULL) .read_tenx_component(filepath, group, "data", start=start, count=count) ### The row indices in the HDF5 file are 0-based but we return them 1-based. .get_tenx_row_indices <- function(filepath, group, start=NULL, count=NULL) .read_tenx_component(filepath, group, "indices", start=start, count=count, as.integer=TRUE) + 1L ### Return the rownames of the matrix. .get_tenx_genes <- function(filepath, group) { if (!h5exists(filepath, paste0(group, "/genes"))) return(NULL) .read_tenx_component(filepath, group, "genes") } ### Currently unused. .get_tenx_gene_names <- function(filepath, group) { if (!h5exists(filepath, paste0(group, "/gene_names"))) return(NULL) .read_tenx_component(filepath, group, "gene_names") } ### Return the colnames of the matrix. .get_tenx_barcodes <- function(filepath, group) { if (!h5exists(filepath, paste0(group, "/barcodes"))) return(NULL) .read_tenx_component(filepath, group, "barcodes") } ### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ### .get_data_indices_by_col() ### ### S4Vectors:::fancy_mseq() does not accept 'offset' of type double yet so ### we implement a version that does. ### Will this work if sum(lengths) is > .Machine$integer.max? .fancy_mseq <- function(lengths, offset=0) { lengths_len <- length(lengths) if (lengths_len == 0L) return(numeric(0)) offsets <- offset - cumsum(c(0L, lengths[-lengths_len])) seq_len(sum(lengths)) + rep.int(offsets, lengths) } ### 'j' must be an integer vector containing valid col indices. ### Return data indices in a NumericList object parallel to 'j' i.e. with ### one list element per col index in 'j'. .get_data_indices_by_col <- function(x, j) { col_ranges <- S4Vectors:::extract_data_frame_rows(x@col_ranges, j) start2 <- col_ranges[ , "start"] width2 <- col_ranges[ , "width"] idx2 <- .fancy_mseq(width2, offset=start2 - 1L) ### Will this work if 'idx2' is a long vector? relist(idx2, PartitioningByWidth(width2)) } ### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ### .extract_data_from_adjacent_cols() ### ### 'j1' and 'j2' must be 2 single integers representing a valid range of ### col indices. ### If 'as.sparse=FALSE', return a NumericList or IntegerList object parallel ### to 'j1:j2' i.e. with one list element per col index in 'j1:j2'. ### If 'as.sparse=TRUE', return a SparseArraySeed object. .extract_data_from_adjacent_cols <- function(x, j1, j2, as.sparse=FALSE) { j12 <- j1:j2 start <- x@col_ranges[j1, "start"] count_per_col <- x@col_ranges[j12, "width"] count <- sum(count_per_col) ans_nzdata <- .get_tenx_data(x@filepath, x@group, start=start, count=count) if (!as.sparse) return(relist(ans_nzdata, PartitioningByWidth(count_per_col))) row_indices <- .get_tenx_row_indices(x@filepath, x@group, start=start, count=count) col_indices <- rep.int(j12, count_per_col) ans_nzindex <- cbind(row_indices, col_indices, deparse.level=0L) SparseArraySeed(dim(x), ans_nzindex, ans_nzdata, check=FALSE) } ### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ### .extract_nonzero_data_by_col() ### ### Extract nonzero data using the "random" method. ### This method is based on h5mread( , starts=list(start)) which retrieves ### an arbitrary/random subset of the data. ### 'j' must be an integer vector containing valid col indices. It cannot ### be NULL. .random_extract_nonzero_data_by_col <- function(x, j) { data_indices <- .get_data_indices_by_col(x, j) idx2 <- unlist(data_indices, use.names=FALSE) data <- .get_tenx_data(x@filepath, x@group, start=idx2) relist(data, data_indices) } ### Extract nonzero data using the "linear" method. ### This method is based on h5mread( , starts=list(start), counts=list(count)) ### which retrieves a linear subset of the data and should be more efficient ### than doing h5mread( , starts=list(seq(start, length.out=count))). ### 'j' must be NULL or an integer vector containing valid col indices. It ### should not be empty. .linear_extract_nonzero_data_by_col <- function(x, j) { if (is.null(j)) { j1 <- 1L j2 <- ncol(x) } else { stopifnot(is.numeric(j), length(j) != 0L) j1 <- min(j) j2 <- max(j) } nonzero_data <- .extract_data_from_adjacent_cols(x, j1, j2) if (is.null(j)) return(nonzero_data) nonzero_data[match(j, j1:j2)] } .normarg_method <- function(method, j) { if (method != "auto") return(method) if (is.null(j)) return("linear") if (length(j) == 0L) return("random") j1 <- min(j) j2 <- max(j) ## 'ratio' is > 0 and <= 1. A value close to 1 indicates that the columns ## to extract are close from each other (a value of 1 indicating that ## they are adjacent e.g. j <- 18:25). A value close to 0 indicates that ## they are far apart from each other i.e. that they are separated by many ## columns that are not requested. The "linear" method is very efficient ## when 'ratio' is close to 1. It is so much more efficient than the ## "random" method (typically 10x or 20x faster) that we choose it when ## 'ratio' is >= 0.2 ratio <- length(j) / (j2 - j1 + 1L) if (ratio >= 0.2) "linear" else "random" } ### 'j' must be NULL or an integer vector containing valid col indices. ### Return a NumericList or IntegerList object parallel to 'j' i.e. with ### one list element per col index in 'j'. .extract_nonzero_data_by_col <- function(x, j, method=c("auto", "random", "linear")) { method <- match.arg(method) method <- .normarg_method(method, j) if (method == "random") { .random_extract_nonzero_data_by_col(x, j) } else { .linear_extract_nonzero_data_by_col(x, j) } } ### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ### .load_SparseArraySeed_from_TENxMatrixSeed() ### ### Load sparse data using the "random" method. ### This method is based on h5mread( , starts=list(start)) which retrieves ### an arbitrary/random subset of the data. ### 'i' must be NULL or an integer vector containing valid row indices. ### 'j' must be an integer vector containing valid col indices. It cannot ### be NULL. ### Both 'i' and 'j' can contain duplicates. Duplicates in 'i' have no effect ### on the output but duplicates in 'j' will produce duplicates in the output. ### Return a SparseArraySeed object. .random_load_SparseArraySeed_from_TENxMatrixSeed <- function(x, i, j) { stopifnot(is.null(i) || is.numeric(i), is.numeric(j)) data_indices <- .get_data_indices_by_col(x, j) idx2 <- unlist(data_indices, use.names=FALSE) row_indices <- .get_tenx_row_indices(x@filepath, x@group, start=idx2) col_indices <- rep.int(j, lengths(data_indices)) if (!is.null(i)) { keep_idx <- which(row_indices %in% i) idx2 <- idx2[keep_idx] row_indices <- row_indices[keep_idx] col_indices <- col_indices[keep_idx] } ans_nzindex <- cbind(row_indices, col_indices, deparse.level=0L) ans_nzdata <- .get_tenx_data(x@filepath, x@group, start=idx2) SparseArraySeed(dim(x), ans_nzindex, ans_nzdata, check=FALSE) } ### Load sparse data using the "linear" method. ### This method is based on h5mread( , starts=list(start), counts=list(count)) ### which retrieves a linear subset of the data and should be more efficient ### than doing h5mread( , starts=list(seq(start, length.out=count))). ### 'j' must be NULL or a non-empty integer vector containing valid ### col indices. The output is not affected by duplicates in 'j'. ### Return a SparseArraySeed object. .linear_load_SparseArraySeed_from_TENxMatrixSeed <- function(x, j) { if (is.null(j)) { j1 <- 1L j2 <- ncol(x) } else { stopifnot(is.numeric(j), length(j) != 0L) j1 <- min(j) j2 <- max(j) } .extract_data_from_adjacent_cols(x, j1, j2, as.sparse=TRUE) } ### Duplicates in 'index[[1]]' are ok and won't affect the output. ### Duplicates in 'index[[2]]' are ok but might introduce duplicates ### in the output so should be avoided. ### Return a SparseArraySeed object. .load_SparseArraySeed_from_TENxMatrixSeed <- function(x, index, method=c("auto", "random", "linear")) { i <- index[[1L]] j <- index[[2L]] method <- match.arg(method) method <- .normarg_method(method, j) if (method == "random") { .random_load_SparseArraySeed_from_TENxMatrixSeed(x, i, j) } else { .linear_load_SparseArraySeed_from_TENxMatrixSeed(x, j) } } ### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ### extract_array() ### .extract_array_from_TENxMatrixSeed <- function(x, index) { ans_dim <- DelayedArray:::get_Nindex_lengths(index, dim(x)) if (any(ans_dim == 0L)) { ## Return an empty matrix. data0 <- .get_tenx_data(x@filepath, x@group, start=integer(0)) return(array(data0, dim=ans_dim)) } sas <- .load_SparseArraySeed_from_TENxMatrixSeed(x, index) # I/O extract_array(sas, index) # in-memory } setMethod("extract_array", "TENxMatrixSeed", .extract_array_from_TENxMatrixSeed ) ### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ### chunkdim() getter ### ### Does NOT access the file. setMethod("chunkdim", "TENxMatrixSeed", function(x) c(nrow(x), 1L)) ### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ### Constructor ### TENxMatrixSeed <- function(filepath, group="mm10") { filepath <- normarg_path(filepath, "'filepath'", "10x Genomics dataset") if (!isSingleString(group)) stop(wmsg("'group' must be a single string specifying the name ", "of the group in the HDF5 file containing the ", "10x Genomics data")) if (group == "") stop(wmsg("'group' cannot be the empty string")) ## dim dim <- .get_tenx_shape(filepath, group) stopifnot(length(dim) == 2L) ## dimnames rownames <- .get_tenx_genes(filepath, group) stopifnot(is.null(rownames) || length(rownames) == dim[[1L]]) colnames <- .get_tenx_barcodes(filepath, group) stopifnot(is.null(colnames) || length(colnames) == dim[[2L]]) dimnames <- list(rownames, colnames) ## col_ranges data_len <- h5length(filepath, paste0(group, "/data")) indices_len <- h5length(filepath, paste0(group, "/indices")) stopifnot(data_len == indices_len) indptr <- .get_tenx_indptr(filepath, group) stopifnot(length(indptr) == dim[[2L]] + 1L, indptr[[1L]] == 0L, indptr[[length(indptr)]] == data_len) col_ranges <- data.frame(start=indptr[-length(indptr)] + 1, width=as.integer(diff(indptr))) new2("TENxMatrixSeed", filepath=filepath, group=group, dim=dim, dimnames=dimnames, col_ranges=col_ranges) } ### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ### Show ### setMethod("show", "TENxMatrixSeed", function(object) { cat(DelayedArray:::array_as_one_line_summary(object), ":\n", sep="") cat("# dirname: ", dirname(object), "\n", sep="") cat("# basename: ", basename(object), "\n", sep="") cat("# group: ", object@group, "\n", sep="") } ) ### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ### Taking advantage of sparsity ### setMethod("sparsity", "TENxMatrixSeed", function(x) { data_len <- h5length(x@filepath, paste0(x@group, "/data")) 1 - data_len / length(x) } ) ### This is about **structural** sparsity, not about quantitative sparsity ### measured by sparsity(). setMethod("is_sparse", "TENxMatrixSeed", function(x) TRUE) .extract_sparse_array_from_TENxMatrixSeed <- function(x, index) { sas <- .load_SparseArraySeed_from_TENxMatrixSeed(x, index) # I/O extract_sparse_array(sas, index) # in-memory } setMethod("extract_sparse_array", "TENxMatrixSeed", .extract_sparse_array_from_TENxMatrixSeed ) ### The default "read_sparse_block" method defined in DelayedArray would work ### just fine on a TENxMatrixSeed object (thanks to the "extract_sparse_array" ### method for TENxMatrixSeed objects defined above), but we overwrite it with ### the method below which is slightly more efficient. That's because the ### method below calls read_sparse_block() on the SparseArraySeed object ### returned by .load_SparseArraySeed_from_TENxMatrixSeed() and this is ### faster than calling extract_sparse_array() on the same object (which ### is what the "extract_sparse_array" method for TENxMatrixSeed would do ### when called by the default "read_sparse_block" method). ### Not sure the difference is significant enough for this extra method to ### be worth it though, because, time is really dominated by I/O here, that ### is, by the call to .load_SparseArraySeed_from_TENxMatrixSeed(). .read_sparse_block_from_TENxMatrixSeed <- function(x, viewport) { index <- makeNindexFromArrayViewport(viewport, expand.RangeNSBS=TRUE) sas <- .load_SparseArraySeed_from_TENxMatrixSeed(x, index) # I/O ## Unlike the "extract_sparse_array" method for TENxMatrixSeed objects ## defined above, we use read_sparse_block() here, which is faster than ## using extract_sparse_array(). read_sparse_block(sas, viewport) # in-memory } setMethod("read_sparse_block", "TENxMatrixSeed", .read_sparse_block_from_TENxMatrixSeed ) ### Return a NumericList or IntegerList object parallel to 'j' i.e. with ### one list element per col index in 'j'. ### Spelling: "nonzero" preferred over "non-zero". See: ### https://gcc.gnu.org/ml/gcc/2001-10/msg00610.html setGeneric("extractNonzeroDataByCol", signature="x", function(x, j) standardGeneric("extractNonzeroDataByCol") ) setMethod("extractNonzeroDataByCol", "TENxMatrixSeed", function(x, j) { j <- DelayedArray:::normalizeSingleBracketSubscript2(j, ncol(x), colnames(x)) .extract_nonzero_data_by_col(x, j) } ) ### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ### Coercion to dgCMatrix ### .from_TENxMatrixSeed_to_dgCMatrix <- function(from) { row_indices <- .get_tenx_row_indices(from@filepath, from@group) indptr <- .get_tenx_indptr(from@filepath, from@group) data <- .get_tenx_data(from@filepath, from@group) sparseMatrix(i=row_indices, p=indptr, x=data, dims=dim(from), dimnames=dimnames(from)) } setAs("TENxMatrixSeed", "dgCMatrix", .from_TENxMatrixSeed_to_dgCMatrix) setAs("TENxMatrixSeed", "sparseMatrix", .from_TENxMatrixSeed_to_dgCMatrix)
library(ape) testtree <- read.tree("6618_5.txt") unrooted_tr <- unroot(testtree) write.tree(unrooted_tr, file="6618_5_unrooted.txt")
/codeml_files/newick_trees_processed/6618_5/rinput.R
no_license
DaniBoo/cyanobacteria_project
R
false
false
135
r
library(ape) testtree <- read.tree("6618_5.txt") unrooted_tr <- unroot(testtree) write.tree(unrooted_tr, file="6618_5_unrooted.txt")
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/functions.r \name{adMCMC} \alias{adMCMC} \title{Estimate a continuous posterior distribution via adaptive MCMC.} \usage{ adMCMC(inits, logLik, logPrior, nIter, nThin, logScaleMCMC = TRUE, blocks = NULL, ...) } \arguments{ \item{inits}{Vector of initial parameter values. The log likelihood function and prior need to have finite values for these parameters.} \item{logLik}{A function giving the log likelihood value for the model. The function should take parameter values to be estimated via an argument called pars.} \item{logPrior}{A function giving the log prior density for the parameters. The function should take a single argument named pars. For parameter values that are out of range the function should return -Inf.} \item{nIter}{The size of the MCMC sample.} \item{nThin}{The number of thining iterations in between each saved sample.} \item{logScaleMCMC}{If true, the MCMC is run on the log scale of all parameters. Requires that all parameters are positive.} \item{blocks}{May be used for blocking MCMC updates. In this case a vector of integers of the same length as inits where each unique integer represents a block. If NULL, no blocking is applied.} \item{...}{Further arguments passed to \code{logLik}.} } \value{ An \code{\link[coda]{mcmc}} object. } \description{ The function is intended to be used with computationally expensive likelihoods, and little attention has been giving to speed up the adaptive MCMC algorithm itself. The underlying algorithm is an adaptive MCMC sampler with adaptive global scaling and using the empirical covariance matrix to generate proposals. } \examples{ ## Simulate a cohort with four stages, sampled at 15 occasions with 30 initial individuals in ## each sample. cdata = simMNCohort(seq(1,50, length.out=15), N0 = 30, rGammaSD, mu = c(5, 15, 8), cv = c(.5,.5,.5), mortRate = c(0.01, 0.01,0.01,0.01), Narg = "N") ## Define a likelihood function using the function mnLogLik and rGammaSD. ## Note that it must have the argument named 'pars'. exLogLik = function(pars, data, nSim) { mnLogLik(data, rStageDur = rGammaSD, mu = pars[1:3], cv = rep(pars[4], 3), mortRate = rep(pars[5], 4), N = nSim) } ## Define the log-prior function using bounded uniform priors. exLogPrior = function(pars) { mu = pars[1:3] cv = pars[4] mr = pars[5] sum(log(all(c(cv < 2,mu < 30,mr < 1,cv > 0,mu > 0,mr > 0)))) } \dontrun{ ## Fit model inits = c(10,10,10,1,.001) names(inits) = c(paste0("mu", 1:3), "cv", "mr") mcmc = adMCMC(inits, exLogLik, exLogPrior, nIter = 1000, nThin = 10, data = cdata, nSim = 1000) plot(mcmc) } }
/man/adMCMC.Rd
no_license
jknape/stageFreq
R
false
false
2,733
rd
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/functions.r \name{adMCMC} \alias{adMCMC} \title{Estimate a continuous posterior distribution via adaptive MCMC.} \usage{ adMCMC(inits, logLik, logPrior, nIter, nThin, logScaleMCMC = TRUE, blocks = NULL, ...) } \arguments{ \item{inits}{Vector of initial parameter values. The log likelihood function and prior need to have finite values for these parameters.} \item{logLik}{A function giving the log likelihood value for the model. The function should take parameter values to be estimated via an argument called pars.} \item{logPrior}{A function giving the log prior density for the parameters. The function should take a single argument named pars. For parameter values that are out of range the function should return -Inf.} \item{nIter}{The size of the MCMC sample.} \item{nThin}{The number of thining iterations in between each saved sample.} \item{logScaleMCMC}{If true, the MCMC is run on the log scale of all parameters. Requires that all parameters are positive.} \item{blocks}{May be used for blocking MCMC updates. In this case a vector of integers of the same length as inits where each unique integer represents a block. If NULL, no blocking is applied.} \item{...}{Further arguments passed to \code{logLik}.} } \value{ An \code{\link[coda]{mcmc}} object. } \description{ The function is intended to be used with computationally expensive likelihoods, and little attention has been giving to speed up the adaptive MCMC algorithm itself. The underlying algorithm is an adaptive MCMC sampler with adaptive global scaling and using the empirical covariance matrix to generate proposals. } \examples{ ## Simulate a cohort with four stages, sampled at 15 occasions with 30 initial individuals in ## each sample. cdata = simMNCohort(seq(1,50, length.out=15), N0 = 30, rGammaSD, mu = c(5, 15, 8), cv = c(.5,.5,.5), mortRate = c(0.01, 0.01,0.01,0.01), Narg = "N") ## Define a likelihood function using the function mnLogLik and rGammaSD. ## Note that it must have the argument named 'pars'. exLogLik = function(pars, data, nSim) { mnLogLik(data, rStageDur = rGammaSD, mu = pars[1:3], cv = rep(pars[4], 3), mortRate = rep(pars[5], 4), N = nSim) } ## Define the log-prior function using bounded uniform priors. exLogPrior = function(pars) { mu = pars[1:3] cv = pars[4] mr = pars[5] sum(log(all(c(cv < 2,mu < 30,mr < 1,cv > 0,mu > 0,mr > 0)))) } \dontrun{ ## Fit model inits = c(10,10,10,1,.001) names(inits) = c(paste0("mu", 1:3), "cv", "mr") mcmc = adMCMC(inits, exLogLik, exLogPrior, nIter = 1000, nThin = 10, data = cdata, nSim = 1000) plot(mcmc) } }
#************************************ # # (C) Copyright IBM Corp. 2015 # # Author: Bradley J Eck # #************************************ #' Epanet's Net1 Example #' #' A dataset created by reading the Net1.inp file #' distributed with Epanet using this package's #' read.inp() function. #' #' @name Net1rpt #' @docType data #' @usage Net1rpt #' @format An object of class \code{epanet.rpt} created by \link{read.inp}. NULL
/epanetReader/R/Net1rpt-data.r
no_license
ingted/R-Examples
R
false
false
445
r
#************************************ # # (C) Copyright IBM Corp. 2015 # # Author: Bradley J Eck # #************************************ #' Epanet's Net1 Example #' #' A dataset created by reading the Net1.inp file #' distributed with Epanet using this package's #' read.inp() function. #' #' @name Net1rpt #' @docType data #' @usage Net1rpt #' @format An object of class \code{epanet.rpt} created by \link{read.inp}. NULL
## Load libraries ================================ library(tidyverse) library(lubridate) library(forecast) ## Load preprocessed data ======================== powerData <- readRDS('./output/powerData.RDS') ## Create data per week per submeter ========================== powerWeek <- powerData %>% group_by(year, month, day, week, weekday, day) %>% summarise('meter1' = mean(Sub_metering_1), 'meter2' = mean(Sub_metering_2), 'meter3' = mean(Sub_metering_3), 'meternon' = mean(Sub_unnumbered)) %>% mutate(date = as_date(paste(year, month, day, sep="-"), "%Y-%m-%d"), totalpower = meter1 + meter2 + meter3 + meternon) ggplot(powerWeek, aes(x = date, y = meter1)) + geom_line() powerWeekTS <- powerData %>% filter(year == 2008, week %in% c(2,3,4)) %>% mutate(date = as_date(paste(year, month, day, sep="-"), "%Y-%m-%d"), totalpower = Global_active_power * 1000 / 60) powerWeekTS <- ts(powerWeek$totalpower, frequency = 365, start = c(powerWeek$year[1], powerWeek$day[1])) library(doParallel) ## Prepare clusters ================================= cl <- makeCluster(3) registerDoParallel(cl) taylor <- msts(powerWeek$totalpower, seasonal.periods=c(60, 1440, 10080)) taylor.fit <- tbats(taylor) plot(forecast(taylor.fit, h = 1440)) test <- HoltWinters(powerWeekTS) plot(test, ylim = c(0, 25)) forecasttest <- forecast(test, h = 90) plot(forecasttest) # # y <- head(powerWeek, 1000) # View(y) decomp <- stl(powerWeekTS, s.window="periodic") ap.sa <- seasadj(decomp) autoplot(cbind(powerWeekTS, SeasonallyAdjusted=ap.sa)) + xlab("Year") + ylab("Number of passengers (thousands)") ap.sa %>% diff() %>% ggtsdisplay(main="") fit <- auto.arima(ap.sa) checkresiduals(fit) autoplot(forecast(fit))
/IoT/R/week_profile.R
no_license
Taros007/ubiqum_projects
R
false
false
1,807
r
## Load libraries ================================ library(tidyverse) library(lubridate) library(forecast) ## Load preprocessed data ======================== powerData <- readRDS('./output/powerData.RDS') ## Create data per week per submeter ========================== powerWeek <- powerData %>% group_by(year, month, day, week, weekday, day) %>% summarise('meter1' = mean(Sub_metering_1), 'meter2' = mean(Sub_metering_2), 'meter3' = mean(Sub_metering_3), 'meternon' = mean(Sub_unnumbered)) %>% mutate(date = as_date(paste(year, month, day, sep="-"), "%Y-%m-%d"), totalpower = meter1 + meter2 + meter3 + meternon) ggplot(powerWeek, aes(x = date, y = meter1)) + geom_line() powerWeekTS <- powerData %>% filter(year == 2008, week %in% c(2,3,4)) %>% mutate(date = as_date(paste(year, month, day, sep="-"), "%Y-%m-%d"), totalpower = Global_active_power * 1000 / 60) powerWeekTS <- ts(powerWeek$totalpower, frequency = 365, start = c(powerWeek$year[1], powerWeek$day[1])) library(doParallel) ## Prepare clusters ================================= cl <- makeCluster(3) registerDoParallel(cl) taylor <- msts(powerWeek$totalpower, seasonal.periods=c(60, 1440, 10080)) taylor.fit <- tbats(taylor) plot(forecast(taylor.fit, h = 1440)) test <- HoltWinters(powerWeekTS) plot(test, ylim = c(0, 25)) forecasttest <- forecast(test, h = 90) plot(forecasttest) # # y <- head(powerWeek, 1000) # View(y) decomp <- stl(powerWeekTS, s.window="periodic") ap.sa <- seasadj(decomp) autoplot(cbind(powerWeekTS, SeasonallyAdjusted=ap.sa)) + xlab("Year") + ylab("Number of passengers (thousands)") ap.sa %>% diff() %>% ggtsdisplay(main="") fit <- auto.arima(ap.sa) checkresiduals(fit) autoplot(forecast(fit))
library(tm) library(topicmodels) dtm<-readRDS("dtm.rds") idx<-unlist(lapply(which(grepl('clinton', dtm$dimnames$Terms)), function(x) which(dtm$j %in% x))) dtm.clinton <- dtm[ sort(unique(dtm$i[idx])),] dtm.clinton <- removeSparseTerms(dtm.clinton,0.995) ui.clinton = unique(dtm.clinton$i) dtm.clinton = dtm.clinton[ui.clinton,] idx<-unlist(lapply(which(grepl('trump', dtm$dimnames$Terms)), function(x) which(dtm$j %in% x))) dtm.trump <- dtm[ sort(unique(dtm$i[idx])),] dtm.trump <- removeSparseTerms(dtm.trump,0.995) ui.trump = unique(dtm.trump$i) dtm.trump = dtm.trump[ui.trump,] #Set parameters for Gibbs sampling burnin <- 4000 iter <- 6000 thin <- 20 seed <-c(1:10) nstart <- 10 best <- TRUE #Number of topics k <- 20 # Clinton lda.clinton <-LDA(dtm.clinton,k, method="Gibbs", control=list(nstart=nstart, seed = seed, best=best, burnin = burnin, iter = iter, thin=thin)) clinton.topics <- as.matrix(topics(lda.clinton)) clinton.terms <- as.matrix(terms(lda.clinton,20)) # Trump lda.trump <-LDA(dtm.trump,k, method="Gibbs", control=list(nstart=nstart, seed = seed, best=best, burnin = burnin, iter = iter, thin=thin)) trump.topics <- as.matrix(topics(lda.trump)) trump.terms <- as.matrix(terms(lda.trump,20)) int.hist = function(x,ylab="Frequency",...) { barplot(table(factor(x,levels=min(x):max(x))),space=0,xaxt="n",ylab=ylab,...);axis(1) }
/clinton_trump_analysis2.R
no_license
milkha/FBElec16
R
false
false
1,452
r
library(tm) library(topicmodels) dtm<-readRDS("dtm.rds") idx<-unlist(lapply(which(grepl('clinton', dtm$dimnames$Terms)), function(x) which(dtm$j %in% x))) dtm.clinton <- dtm[ sort(unique(dtm$i[idx])),] dtm.clinton <- removeSparseTerms(dtm.clinton,0.995) ui.clinton = unique(dtm.clinton$i) dtm.clinton = dtm.clinton[ui.clinton,] idx<-unlist(lapply(which(grepl('trump', dtm$dimnames$Terms)), function(x) which(dtm$j %in% x))) dtm.trump <- dtm[ sort(unique(dtm$i[idx])),] dtm.trump <- removeSparseTerms(dtm.trump,0.995) ui.trump = unique(dtm.trump$i) dtm.trump = dtm.trump[ui.trump,] #Set parameters for Gibbs sampling burnin <- 4000 iter <- 6000 thin <- 20 seed <-c(1:10) nstart <- 10 best <- TRUE #Number of topics k <- 20 # Clinton lda.clinton <-LDA(dtm.clinton,k, method="Gibbs", control=list(nstart=nstart, seed = seed, best=best, burnin = burnin, iter = iter, thin=thin)) clinton.topics <- as.matrix(topics(lda.clinton)) clinton.terms <- as.matrix(terms(lda.clinton,20)) # Trump lda.trump <-LDA(dtm.trump,k, method="Gibbs", control=list(nstart=nstart, seed = seed, best=best, burnin = burnin, iter = iter, thin=thin)) trump.topics <- as.matrix(topics(lda.trump)) trump.terms <- as.matrix(terms(lda.trump,20)) int.hist = function(x,ylab="Frequency",...) { barplot(table(factor(x,levels=min(x):max(x))),space=0,xaxt="n",ylab=ylab,...);axis(1) }
#Tracks EON_ Figure 1B, Figure 2A, Fig S5A ##Read Data EONs timewtmengod<- read.csv2("wtmengod_EONSkin.csv", header=TRUE) View(timewtmengod) subtabletEON = rbind( timewtmengod[grep( "EON", timewtmengod[, 1] ), ]) View(subtabletEON) ## Read data Control WTEON_Dis = rbind( subtabletEON[grep( "wt", subtabletEON[, 1] ), ]) View(WTEON_Dis) ## Transform values for A, C & E, and store all values for col 3 in sym sym = list(); for( n in c( "1wt_A1EON", "1wt_A2EON", "2wt_A1EON", "2wt_A2EON", "3wt_A1EON", "3wt_A2EON", "1wt_C1EON", "1wt_C2EON", "2wt_C1EON", "2wt_C2EON", "3wt_C1EON", "3wt_C2EON", "1wt_E1EON", "1wt_E2EON", "2wt_E1EON", "2wt_E2EON", "3wt_E1EON", "3wt_E2EON" ) ){ sym[[n]] <- WTEON_Dis [WTEON_Dis$Track.name == n, 3]*-1; } for( n in c( "1wt_B1EON", "1wt_B2EON", "2wt_B1EON", "2wt_B2EON", "3wt_B1EON", "3wt_B2EON", "1wt_D1EON", "1wt_D2EON", "2wt_D1EON", "2wt_D2EON", "3wt_D1EON", "3wt_D2EON", "1wt_F1EON", "1wt_F2EON", "2wt_F1EON", "2wt_F2EON", "3wt_F1EON", "3wt_F2EON" ) ){ sym[[n]] <- WTEON_Dis [WTEON_Dis$Track.name == n, 3]; } ## Define plot function addPoints = function( tabToPlot, varToSelect = WTEON_Dis[WTEON_Dis$Track.name == "1wt_A1EON", 2], Thr = 37, col1 = 1, col2 = 8 ){ points( tabToPlot[varToSelect < Thr, 1:2], col = col1, lwd = 3, type = 'l' ); points( tabToPlot[varToSelect > Thr - 1, 1:2], col = col2, lwd = 3, type = 'l' ); } ## Plot1 : Anterior Plot WT - Fig 1B plot( 1, type = "n", xlim = c( -100, 100 ), ylim = c( -100, 120 ), main = "", xlab = "", ylab = "" ); for( n in c( "1wt_A1EON", "1wt_A2EON", "1wt_B1EON", "1wt_B2EON", "2wt_A1EON", "2wt_A2EON", "2wt_B1EON", "2wt_B2EON", "3wt_A1EON", "3wt_A2EON", "3wt_B1EON", "3wt_B2EON" ) ){ addPoints( tabToPlot = cbind( sym[[n]], WTEON_Dis[WTEON_Dis$Track.name == n, 4] ) ); } abline( h = 0, col = 1 ); abline( v = 0, col = 1 ); ## Plot2 : Middel WT- Fig 1B plot( 1, type = "n", xlim = c( -100, 100 ), ylim = c( -100, 120 ), main = "", xlab = "", ylab = "" ); for( n in c( "1wt_C1EON", "1wt_C2EON", "1wt_D1EON", "1wt_D2EON", "2wt_C1EON", "2wt_C2EON", "2wt_D1EON", "2wt_D2EON", "3wt_C1EON", "3wt_C2EON", "3wt_D1EON", "3wt_D2EON" ) ){ addPoints( tabToPlot = cbind( sym[[n]], WTEON_Dis[WTEON_Dis$Track.name == n, 4] ) ); } abline( h = 0, col = 1 ); abline( v = 0, col = 1 ); ## Plot3 : Posterior WT- Fig 1B plot( 1, type = "n", xlim = c( -100, 100 ), ylim = c( -100, 120 ), main = "", xlab = "", ylab = "" ); for( n in c( "1wt_E1EON", "1wt_E2EON", "1wt_F1EON", "1wt_F2EON", "2wt_E1EON", "2wt_E2EON", "2wt_F1EON", "2wt_F2EON", "3wt_E1EON", "3wt_E2EON", "3wt_F1EON", "3wt_F2EON" ) ){ addPoints( tabToPlot = cbind( sym[[n]], WTEON_Dis[WTEON_Dis$Track.name == n, 4] ) ); } abline( h = 0, col = 1 ); abline( v = 0, col = 1 ); #### Read data neurog1mut ngEON_Dis = rbind( subtabletEON[grep( "ng", subtabletEON[, 1] ), ]) View(ngEON_Dis) ## Transform values for A, C & E, and store all values for col 3 in ngsym ngsym = list(); for( n in c( "1ng_A1EON", "2ng_A1EON", "2ng_A2EON", "3ng_A1EON", "3ng_A2EON", "1ng_C1EON", "2ng_C1EON", "2ng_C2EON", "3ng_C1EON", "3ng_C2EON", "1ng_E1EON", "2ng_E1EON", "2ng_E2EON","2ng_F1EON", "2ng_F2EON" ) ){ ngsym[[n]] <- ngEON_Dis [ngEON_Dis$Track.name == n, 3]*-1; } for( n in c( "1ng_B1EON", "1ng_A2EON", "1ng_B2EON", "2ng_B1EON", "2ng_B2EON", "3ng_B1EON", "3ng_B2EON", "1ng_C2EON", "1ng_D1EON", "1ng_D2EON", "2ng_D1EON", "2ng_D2EON", "3ng_D1EON", "3ng_D2EON", "1ng_E2EON", "1ng_F1EON", "1ng_F2EON", "3ng_E1EON", "3ng_E2EON","3ng_F1EON", "3ng_F2EON" ) ){ ngsym[[n]] <- ngEON_Dis [ngEON_Dis$Track.name == n, 3]; } ## Define plot function addPoints = function( tabngToPlot, varngToSelect = ngEON_Dis[ngEON_Dis$Track.name == "1ng_A1EON", 2], Thr = 37, col1 = 6, col2 = 8 ){ points( tabngToPlot[varngToSelect < Thr, 1:2], col = col1, lwd = 3, type = 'l' ); points( tabngToPlot[varngToSelect > Thr - 1, 1:2], col = col2, lwd = 3, type = 'l' ); } ## Plot1 : Anterior Plot ng- Fig 1B plot( 1, type = "n", xlim = c( -100, 100 ), ylim = c( -100, 120 ), main = "", xlab = "", ylab = "" ); for( n in c( "1ng_A1EON", "1ng_A2EON", "1ng_B1EON", "1ng_B2EON", "2ng_A1EON", "2ng_A2EON", "2ng_B1EON", "2ng_B2EON", "3ng_A1EON", "3ng_A2EON", "3ng_B1EON", "3ng_B2EON" ) ){ addPoints( tabngToPlot = cbind( ngsym[[n]], ngEON_Dis[ngEON_Dis$Track.name == n, 4] ) ); } abline( h = 0, col = 1 ); abline( v = 0, col = 1 ); ## Plot2 : Middel ng- Fig 1B plot( 1, type = "n", xlim = c( -100, 100 ), ylim = c( -100, 120 ), main = "", xlab = "", ylab = "" ); for( n in c( "1ng_C1EON", "1ng_C2EON", "1ng_D1EON", "1ng_D2EON", "2ng_C1EON", "2ng_C2EON", "2ng_D1EON", "2ng_D2EON", "3ng_C1EON", "3ng_C2EON", "3ng_D1EON", "3ng_D2EON" ) ){ addPoints( tabngToPlot = cbind( ngsym[[n]], ngEON_Dis[ngEON_Dis$Track.name == n, 4] ) ); } abline( h = 0, col = 1 ); abline( v = 0, col = 1 ); ## Plot3 : Posterior ng- Fig 1B plot( 1, type = "n", xlim = c( -100, 100 ), ylim = c( -100, 120 ), main = "", xlab = "", ylab = "" ); for( n in c( "1ng_E1EON", "1ng_E2EON", "1ng_F1EON", "1ng_F2EON", "2ng_E1EON", "2ng_E2EON", "2ng_F1EON", "2ng_F2EON", "3ng_E1EON", "3ng_E2EON", "3ng_F1EON", "3ng_F2EON" ) ){ addPoints( tabngToPlot = cbind( ngsym[[n]], ngEON_Dis[ngEON_Dis$Track.name == n, 4] ) ); } abline( h = 0, col = 1 ); abline( v = 0, col = 1 ); ###Figure 2A & Figure S5A #### Read data ody mutant odEON_Dis = rbind( subtabletEON[grep( "od", subtabletEON[, 1] ), ]) View(odEON_Dis) ## Transform values and store all values for col 3 in odsym odsym = list(); for( n in c( "1od_A1EON", "1od_A2EON", "2od_A1EON", "2od_A2EON","2od_B1EON", "3od_A1EON", "3od_A2EON", "1od_C1EON","1od_C2EON", "1od_D1EON", "2od_C1EON", "2od_C2EON", "3od_C1EON", "3od_C2EON", "1od_E1EON","1od_E2EON", "2od_E1EON", "2od_E2EON","3od_E1EON", "3od_E2EON" ) ){ odsym[[n]] <- odEON_Dis [odEON_Dis$Track.name == n, 3]*-1; } for( n in c( "1od_B1EON", "1od_B2EON", "2od_B2EON", "3od_B1EON", "3od_B2EON", "1od_D2EON", "2od_D1EON", "2od_D2EON", "3od_D1EON", "3od_D2EON", "1od_F1EON", "1od_F2EON", "2od_F1EON", "2od_F2EON", "3od_F1EON", "3od_F2EON" ) ){ odsym[[n]] <- odEON_Dis [odEON_Dis$Track.name == n, 3]; } ## Define plot function addPoints = function( tabodToPlot, varodToSelect = odEON_Dis[odEON_Dis$Track.name == "1od_A1EON", 2], Thr = 37, col1 = 4, col2 = 8 ){ points( tabodToPlot[varodToSelect < Thr, 1:2], col = col1, lwd = 3, type = 'l' ); points( tabodToPlot[varodToSelect > Thr - 1, 1:2], col = col2, lwd = 3, type = 'l' ); } ## Plot1 : Anterior Plot od - Figure 2A plot( 1, type = "n", xlim = c( -100, 100 ), ylim = c( -100, 120 ), main = "", xlab = "", ylab = "" ); for( n in c( "1od_A1EON", "1od_A2EON", "1od_B1EON", "1od_B2EON", "2od_A1EON", "2od_A2EON", "2od_B1EON", "2od_B2EON", "3od_A1EON", "3od_A2EON", "3od_B1EON", "3od_B2EON" ) ){ addPoints( tabodToPlot = cbind( odsym[[n]], odEON_Dis[odEON_Dis$Track.name == n, 4] ) ); } abline( h = 0, col = 1 ); abline( v = 0, col = 1 ); ## Plot2 : Middel od - Fig S5A plot( 1, type = "n", xlim = c( -100, 100 ), ylim = c( -100, 120 ), main = "", xlab = "", ylab = "" ); for( n in c( "1od_C1EON", "1od_C2EON", "1od_D1EON", "1od_D2EON", "2od_C1EON", "2od_C2EON", "2od_D1EON", "2od_D2EON", "3od_C1EON", "3od_C2EON", "3od_D1EON", "3od_D2EON" ) ){ addPoints( tabodToPlot = cbind( odsym[[n]], odEON_Dis[odEON_Dis$Track.name == n, 4] ) ); } abline( h = 0, col = 1 ); abline( v = 0, col = 1 ); ## Plot3 : Posterior od - Fig S5A plot( 1, type = "n", xlim = c( -100, 100 ), ylim = c( -100, 120 ), main = "", xlab = "", ylab = "" ); for( n in c( "1od_E1EON", "1od_E2EON", "1od_F1EON", "1od_F2EON", "2od_E1EON", "2od_E2EON", "2od_F1EON", "2od_F2EON", "3od_E1EON", "3od_E2EON", "3od_F1EON", "3od_F2EON" ) ){ addPoints( tabodToPlot = cbind( odsym[[n]], odEON_Dis[odEON_Dis$Track.name == n, 4] ) ); } abline( h = 0, col = 1 ); abline( v = 0, col = 1 ); #### Read data medusa (me) mutant meEON_Dis = rbind( subtabletEON[grep( "me", subtabletEON[, 1] ), ]) View(meEON_Dis) ## Transform values and store all values for col 3 in mesym mesym = list(); for( n in c( "1me_B1EON", "1me_B2EON", "2me_B2EON", "2me_B1EON", "3me_B1EON", "3me_B2EON", "2me_D1EON", "2me_D2EON", "3me_D1EON", "3me_D2EON", "1me_E1EON","1me_E2EON","1me_F2EON","2me_F1EON", "2me_F2EON", "3me_F1EON", "3me_F2EON" ) ){ mesym[[n]] <- meEON_Dis [meEON_Dis$Track.name == n, 3]*-1; } for( n in c( "1me_A1EON", "1me_A2EON", "2me_A1EON", "2me_A2EON","3me_A1EON", "3me_A2EON", "1me_C1EON","1me_C2EON", "1me_D1EON", "1me_D2EON", "2me_C1EON", "2me_C2EON", "3me_C1EON", "3me_C2EON", "1me_F1EON", "2me_E1EON", "2me_E2EON", "3me_E1EON", "3me_E2EON" ) ){ mesym[[n]] <- meEON_Dis [meEON_Dis$Track.name == n, 3]; } ## Define plot function addPoints = function( tabmeToPlot, varmeToSelect = meEON_Dis[meEON_Dis$Track.name == "1me_A1EON", 2], Thr = 37, col1 = 3, col2 = 8 ){ points( tabmeToPlot[varmeToSelect < Thr, 1:2], col = col1, lwd = 3, type = 'l' ); points( tabmeToPlot[varmeToSelect > Thr - 1, 1:2], col = col2, lwd = 3, type = 'l' ); } ## Plot1 : Anterior Plot me- Figure 2A plot( 1, type = "n", xlim = c( -100, 100 ), ylim = c( -100, 120 ), main = "", xlab = "", ylab = "" ); for( n in c( "1me_A1EON", "1me_A2EON", "1me_B1EON", "1me_B2EON", "2me_A1EON", "2me_A2EON", "2me_B1EON", "2me_B2EON", "3me_A1EON", "3me_A2EON", "3me_B1EON", "3me_B2EON" ) ){ addPoints( tabmeToPlot = cbind( mesym[[n]], meEON_Dis[meEON_Dis$Track.name == n, 4] ) ); } abline( h = 0, col = 1 ); abline( v = 0, col = 1 ); ## Plot2 : Middel me - Fig S5A plot( 1, type = "n", xlim = c( -100, 100 ), ylim = c( -100, 120 ), main = "", xlab = "", ylab = "" ); for( n in c( "1me_C1EON", "1me_C2EON", "1me_D1EON", "1me_D2EON", "2me_C1EON", "2me_C2EON", "2me_D1EON", "2me_D2EON", "3me_C1EON", "3me_C2EON", "3me_D1EON", "3me_D2EON" ) ){ addPoints( tabmeToPlot = cbind( mesym[[n]], meEON_Dis[meEON_Dis$Track.name == n, 4] ) ); } abline( h = 0, col = 1 ); abline( v = 0, col = 1 ); ## Plot3 : Posterior me - Fig S5A plot( 1, type = "n", xlim = c( -100, 100 ), ylim = c( -100, 120 ), main = "", xlab = "", ylab = "" ); for( n in c( "1me_E1EON", "1me_E2EON", "1me_F1EON", "1me_F2EON", "2me_E1EON", "2me_E2EON", "2me_F1EON", "2me_F2EON", "3me_E1EON", "3me_E2EON", "3me_F1EON", "3me_F2EON" ) ){ addPoints( tabmeToPlot = cbind( mesym[[n]], meEON_Dis[meEON_Dis$Track.name == n, 4] ) ); } abline( h = 0, col = 1 ); abline( v = 0, col = 1 ); ######@### Figure 3C ## Define plot function WT Ant EON addPoints = function( tabToPlot, varToSelect = WTEON_Dis[WTEON_Dis$Track.name == "1wt_A1EON", 2], Thr = 37, col1 = 1, col2 = 8 ){ points( tabToPlot[varToSelect < Thr, 1:2], col = col1, lwd = 3, type = 'l' ); points( tabToPlot[varToSelect > Thr - 1, 1:2], col = col2, lwd = 3, type = 'l' ); } ## Plot1 : Anterior Plot WT - Fig 1B plot( 1, type = "n", xlim = c( -100, 100 ), ylim = c( -100, 120 ), main = "", xlab = "", ylab = "" ); for( n in c( "1wt_A1EON", "1wt_A2EON", "1wt_B1EON", "1wt_B2EON", "2wt_A1EON", "2wt_A2EON", "2wt_B1EON", "2wt_B2EON", "3wt_A1EON", "3wt_A2EON", "3wt_B1EON", "3wt_B2EON" ) ){ addPoints( tabToPlot = cbind( sym[[n]], WTEON_Dis[WTEON_Dis$Track.name == n, 4] ) ); } abline( h = 0, col = 1 ); abline( v = 0, col = 1 ); ## Define plot function neurog1 Ant EON addPoints = function( tabngToPlot, varngToSelect = ngEON_Dis[ngEON_Dis$Track.name == "1ng_A1EON", 2], Thr = 37, col1 = 6, col2 = 8 ){ points( tabngToPlot[varngToSelect < Thr, 1:2], col = col1, lwd = 3, type = 'l' ); points( tabngToPlot[varngToSelect > Thr - 1, 1:2], col = col2, lwd = 3, type = 'l' ); } ## Plot2 : Anterior Plot ng- Fig 3C plot( 1, type = "n", xlim = c( -100, 100 ), ylim = c( -100, 120 ), main = "", xlab = "", ylab = "" ); for( n in c( "1ng_A1EON", "1ng_A2EON", "1ng_B1EON", "1ng_B2EON", "2ng_A1EON", "2ng_A2EON", "2ng_B1EON", "2ng_B2EON", "3ng_A1EON", "3ng_A2EON", "3ng_B1EON", "3ng_B2EON" ) ){ addPoints( tabngToPlot = cbind( ngsym[[n]], ngEON_Dis[ngEON_Dis$Track.name == n, 4] ) ); } abline( h = 0, col = 1 ); abline( v = 0, col = 1 ); # Read data neurog1 mutant + cxcr4b = rescue resEON_Dis <-read.csv2("res_EON_Dis.csv", header=TRUE ) View(resEON_Dis) ## Transform values and store all values for col 3 in ressym ressym = list(); for( n in c( "1res_B1EON", "1res_B3EON" ) ){ ressym[[n]] <- resEON_Dis [resEON_Dis$Track.name == n, 3]*-1; } for( n in c( "1res_A1EON", "1res_A4EON", "1res_A5EON", "1res_B2EON", "2res_A1EON", "2res_A2EON","3res_B1EON", "4res_B1EON", "4res_B2EON", "4res_B3EON") ){ ressym[[n]] <- resEON_Dis [resEON_Dis$Track.name == n, 3]; } ## Define plot function rescue ANT EON addPoints = function( tabresToPlot, varresToSelect = resEON_Dis[resEON_Dis$Track.name == "1res_A1EON", 2], Thr = 37, col1 = 5, col2 = 8 ){ points( tabresToPlot[varresToSelect < Thr, 1:2], col = col1, lwd = 3, type = 'l' ); points( tabresToPlot[varresToSelect > Thr - 1, 1:2], col = col2, lwd = 3, type = 'l' ); } ## Plot3 : Anterior Plot Res - Figure 3C plot( 1, type = "n", xlim = c( -100, 100 ), ylim = c( -100, 120 ), main = "", xlab = "", ylab = "" ); for( n in c( "1res_A1EON", "1res_A4EON", "1res_A5EON","1res_B1EON", "1res_B2EON", "1res_B3EON" , "2res_A1EON", "2res_A2EON","3res_B1EON", "4res_B1EON", "4res_B2EON", "4res_B3EON") ){ addPoints( tabresToPlot = cbind( ressym[[n]], resEON_Dis[resEON_Dis$Track.name == n, 4] ) ); } abline( h = 0, col = 1 ); abline( v = 0, col = 1 ); ######Read data cxcr4b surexpression surEON_Dis <-read.csv2("sur_EON_Dis.csv", header=TRUE ) View(surEON_Dis) ## Transform value and store all values in sursym sursym = list(); for( n in c( "1sur_B1EON", "1sur_B2EON", "1sur_B3EON", "3sur_A1EON","3sur_A2EON", "3sur_A3EON", "3sur_A4EON" ) ){ sursym[[n]] <- surEON_Dis [surEON_Dis$Track.name == n, 3]*-1; } for( n in c( "1sur_A1EON", "2sur_A1EON", "2sur_A2EON", "2sur_A4EON", "4sur_B1EON", "4sur_B2EON" ) ){ sursym[[n]] <- surEON_Dis [surEON_Dis$Track.name == n, 3]; } ## Define plot function overexpression plot 4 EON addPoints = function( tabsurToPlot, varsurToSelect = surEON_Dis[surEON_Dis$Track.name == "1sur_A1EON", 2], Thr = 37, col1 = "lightblue", col2 = 8 ){ points( tabsurToPlot[varsurToSelect < Thr, 1:2], col = col1, lwd = 3, type = 'l' ); points( tabsurToPlot[varsurToSelect > Thr - 1, 1:2], col = col2, lwd = 3, type = 'l' ); } ## Plot4 : Anterior Plot sur - Figure 3C plot( 1, type = "n", xlim = c( -100, 100 ), ylim = c( -100, 120 ), main = "", xlab = "", ylab = "" ); for( n in c( "1sur_A1EON", "1sur_B1EON", "1sur_B2EON", "1sur_B3EON", "2sur_A1EON", "2sur_A2EON", "2sur_A4EON", "3sur_A1EON", "3sur_A2EON", "3sur_A3EON", "3sur_A4EON", "4sur_B1EON", "4sur_B2EON" ) ){ addPoints( tabsurToPlot = cbind( sursym[[n]], surEON_Dis[surEON_Dis$Track.name == n, 4] ) ); } abline( h = 0, col = 1 ); abline( v = 0, col = 1 ); ####
/Figure1B_2A_S5A_3C_EONs_control_neurog1_ody_me_res_sur_Track.R
no_license
BladerLab/Aguillon_2020
R
false
false
14,677
r
#Tracks EON_ Figure 1B, Figure 2A, Fig S5A ##Read Data EONs timewtmengod<- read.csv2("wtmengod_EONSkin.csv", header=TRUE) View(timewtmengod) subtabletEON = rbind( timewtmengod[grep( "EON", timewtmengod[, 1] ), ]) View(subtabletEON) ## Read data Control WTEON_Dis = rbind( subtabletEON[grep( "wt", subtabletEON[, 1] ), ]) View(WTEON_Dis) ## Transform values for A, C & E, and store all values for col 3 in sym sym = list(); for( n in c( "1wt_A1EON", "1wt_A2EON", "2wt_A1EON", "2wt_A2EON", "3wt_A1EON", "3wt_A2EON", "1wt_C1EON", "1wt_C2EON", "2wt_C1EON", "2wt_C2EON", "3wt_C1EON", "3wt_C2EON", "1wt_E1EON", "1wt_E2EON", "2wt_E1EON", "2wt_E2EON", "3wt_E1EON", "3wt_E2EON" ) ){ sym[[n]] <- WTEON_Dis [WTEON_Dis$Track.name == n, 3]*-1; } for( n in c( "1wt_B1EON", "1wt_B2EON", "2wt_B1EON", "2wt_B2EON", "3wt_B1EON", "3wt_B2EON", "1wt_D1EON", "1wt_D2EON", "2wt_D1EON", "2wt_D2EON", "3wt_D1EON", "3wt_D2EON", "1wt_F1EON", "1wt_F2EON", "2wt_F1EON", "2wt_F2EON", "3wt_F1EON", "3wt_F2EON" ) ){ sym[[n]] <- WTEON_Dis [WTEON_Dis$Track.name == n, 3]; } ## Define plot function addPoints = function( tabToPlot, varToSelect = WTEON_Dis[WTEON_Dis$Track.name == "1wt_A1EON", 2], Thr = 37, col1 = 1, col2 = 8 ){ points( tabToPlot[varToSelect < Thr, 1:2], col = col1, lwd = 3, type = 'l' ); points( tabToPlot[varToSelect > Thr - 1, 1:2], col = col2, lwd = 3, type = 'l' ); } ## Plot1 : Anterior Plot WT - Fig 1B plot( 1, type = "n", xlim = c( -100, 100 ), ylim = c( -100, 120 ), main = "", xlab = "", ylab = "" ); for( n in c( "1wt_A1EON", "1wt_A2EON", "1wt_B1EON", "1wt_B2EON", "2wt_A1EON", "2wt_A2EON", "2wt_B1EON", "2wt_B2EON", "3wt_A1EON", "3wt_A2EON", "3wt_B1EON", "3wt_B2EON" ) ){ addPoints( tabToPlot = cbind( sym[[n]], WTEON_Dis[WTEON_Dis$Track.name == n, 4] ) ); } abline( h = 0, col = 1 ); abline( v = 0, col = 1 ); ## Plot2 : Middel WT- Fig 1B plot( 1, type = "n", xlim = c( -100, 100 ), ylim = c( -100, 120 ), main = "", xlab = "", ylab = "" ); for( n in c( "1wt_C1EON", "1wt_C2EON", "1wt_D1EON", "1wt_D2EON", "2wt_C1EON", "2wt_C2EON", "2wt_D1EON", "2wt_D2EON", "3wt_C1EON", "3wt_C2EON", "3wt_D1EON", "3wt_D2EON" ) ){ addPoints( tabToPlot = cbind( sym[[n]], WTEON_Dis[WTEON_Dis$Track.name == n, 4] ) ); } abline( h = 0, col = 1 ); abline( v = 0, col = 1 ); ## Plot3 : Posterior WT- Fig 1B plot( 1, type = "n", xlim = c( -100, 100 ), ylim = c( -100, 120 ), main = "", xlab = "", ylab = "" ); for( n in c( "1wt_E1EON", "1wt_E2EON", "1wt_F1EON", "1wt_F2EON", "2wt_E1EON", "2wt_E2EON", "2wt_F1EON", "2wt_F2EON", "3wt_E1EON", "3wt_E2EON", "3wt_F1EON", "3wt_F2EON" ) ){ addPoints( tabToPlot = cbind( sym[[n]], WTEON_Dis[WTEON_Dis$Track.name == n, 4] ) ); } abline( h = 0, col = 1 ); abline( v = 0, col = 1 ); #### Read data neurog1mut ngEON_Dis = rbind( subtabletEON[grep( "ng", subtabletEON[, 1] ), ]) View(ngEON_Dis) ## Transform values for A, C & E, and store all values for col 3 in ngsym ngsym = list(); for( n in c( "1ng_A1EON", "2ng_A1EON", "2ng_A2EON", "3ng_A1EON", "3ng_A2EON", "1ng_C1EON", "2ng_C1EON", "2ng_C2EON", "3ng_C1EON", "3ng_C2EON", "1ng_E1EON", "2ng_E1EON", "2ng_E2EON","2ng_F1EON", "2ng_F2EON" ) ){ ngsym[[n]] <- ngEON_Dis [ngEON_Dis$Track.name == n, 3]*-1; } for( n in c( "1ng_B1EON", "1ng_A2EON", "1ng_B2EON", "2ng_B1EON", "2ng_B2EON", "3ng_B1EON", "3ng_B2EON", "1ng_C2EON", "1ng_D1EON", "1ng_D2EON", "2ng_D1EON", "2ng_D2EON", "3ng_D1EON", "3ng_D2EON", "1ng_E2EON", "1ng_F1EON", "1ng_F2EON", "3ng_E1EON", "3ng_E2EON","3ng_F1EON", "3ng_F2EON" ) ){ ngsym[[n]] <- ngEON_Dis [ngEON_Dis$Track.name == n, 3]; } ## Define plot function addPoints = function( tabngToPlot, varngToSelect = ngEON_Dis[ngEON_Dis$Track.name == "1ng_A1EON", 2], Thr = 37, col1 = 6, col2 = 8 ){ points( tabngToPlot[varngToSelect < Thr, 1:2], col = col1, lwd = 3, type = 'l' ); points( tabngToPlot[varngToSelect > Thr - 1, 1:2], col = col2, lwd = 3, type = 'l' ); } ## Plot1 : Anterior Plot ng- Fig 1B plot( 1, type = "n", xlim = c( -100, 100 ), ylim = c( -100, 120 ), main = "", xlab = "", ylab = "" ); for( n in c( "1ng_A1EON", "1ng_A2EON", "1ng_B1EON", "1ng_B2EON", "2ng_A1EON", "2ng_A2EON", "2ng_B1EON", "2ng_B2EON", "3ng_A1EON", "3ng_A2EON", "3ng_B1EON", "3ng_B2EON" ) ){ addPoints( tabngToPlot = cbind( ngsym[[n]], ngEON_Dis[ngEON_Dis$Track.name == n, 4] ) ); } abline( h = 0, col = 1 ); abline( v = 0, col = 1 ); ## Plot2 : Middel ng- Fig 1B plot( 1, type = "n", xlim = c( -100, 100 ), ylim = c( -100, 120 ), main = "", xlab = "", ylab = "" ); for( n in c( "1ng_C1EON", "1ng_C2EON", "1ng_D1EON", "1ng_D2EON", "2ng_C1EON", "2ng_C2EON", "2ng_D1EON", "2ng_D2EON", "3ng_C1EON", "3ng_C2EON", "3ng_D1EON", "3ng_D2EON" ) ){ addPoints( tabngToPlot = cbind( ngsym[[n]], ngEON_Dis[ngEON_Dis$Track.name == n, 4] ) ); } abline( h = 0, col = 1 ); abline( v = 0, col = 1 ); ## Plot3 : Posterior ng- Fig 1B plot( 1, type = "n", xlim = c( -100, 100 ), ylim = c( -100, 120 ), main = "", xlab = "", ylab = "" ); for( n in c( "1ng_E1EON", "1ng_E2EON", "1ng_F1EON", "1ng_F2EON", "2ng_E1EON", "2ng_E2EON", "2ng_F1EON", "2ng_F2EON", "3ng_E1EON", "3ng_E2EON", "3ng_F1EON", "3ng_F2EON" ) ){ addPoints( tabngToPlot = cbind( ngsym[[n]], ngEON_Dis[ngEON_Dis$Track.name == n, 4] ) ); } abline( h = 0, col = 1 ); abline( v = 0, col = 1 ); ###Figure 2A & Figure S5A #### Read data ody mutant odEON_Dis = rbind( subtabletEON[grep( "od", subtabletEON[, 1] ), ]) View(odEON_Dis) ## Transform values and store all values for col 3 in odsym odsym = list(); for( n in c( "1od_A1EON", "1od_A2EON", "2od_A1EON", "2od_A2EON","2od_B1EON", "3od_A1EON", "3od_A2EON", "1od_C1EON","1od_C2EON", "1od_D1EON", "2od_C1EON", "2od_C2EON", "3od_C1EON", "3od_C2EON", "1od_E1EON","1od_E2EON", "2od_E1EON", "2od_E2EON","3od_E1EON", "3od_E2EON" ) ){ odsym[[n]] <- odEON_Dis [odEON_Dis$Track.name == n, 3]*-1; } for( n in c( "1od_B1EON", "1od_B2EON", "2od_B2EON", "3od_B1EON", "3od_B2EON", "1od_D2EON", "2od_D1EON", "2od_D2EON", "3od_D1EON", "3od_D2EON", "1od_F1EON", "1od_F2EON", "2od_F1EON", "2od_F2EON", "3od_F1EON", "3od_F2EON" ) ){ odsym[[n]] <- odEON_Dis [odEON_Dis$Track.name == n, 3]; } ## Define plot function addPoints = function( tabodToPlot, varodToSelect = odEON_Dis[odEON_Dis$Track.name == "1od_A1EON", 2], Thr = 37, col1 = 4, col2 = 8 ){ points( tabodToPlot[varodToSelect < Thr, 1:2], col = col1, lwd = 3, type = 'l' ); points( tabodToPlot[varodToSelect > Thr - 1, 1:2], col = col2, lwd = 3, type = 'l' ); } ## Plot1 : Anterior Plot od - Figure 2A plot( 1, type = "n", xlim = c( -100, 100 ), ylim = c( -100, 120 ), main = "", xlab = "", ylab = "" ); for( n in c( "1od_A1EON", "1od_A2EON", "1od_B1EON", "1od_B2EON", "2od_A1EON", "2od_A2EON", "2od_B1EON", "2od_B2EON", "3od_A1EON", "3od_A2EON", "3od_B1EON", "3od_B2EON" ) ){ addPoints( tabodToPlot = cbind( odsym[[n]], odEON_Dis[odEON_Dis$Track.name == n, 4] ) ); } abline( h = 0, col = 1 ); abline( v = 0, col = 1 ); ## Plot2 : Middel od - Fig S5A plot( 1, type = "n", xlim = c( -100, 100 ), ylim = c( -100, 120 ), main = "", xlab = "", ylab = "" ); for( n in c( "1od_C1EON", "1od_C2EON", "1od_D1EON", "1od_D2EON", "2od_C1EON", "2od_C2EON", "2od_D1EON", "2od_D2EON", "3od_C1EON", "3od_C2EON", "3od_D1EON", "3od_D2EON" ) ){ addPoints( tabodToPlot = cbind( odsym[[n]], odEON_Dis[odEON_Dis$Track.name == n, 4] ) ); } abline( h = 0, col = 1 ); abline( v = 0, col = 1 ); ## Plot3 : Posterior od - Fig S5A plot( 1, type = "n", xlim = c( -100, 100 ), ylim = c( -100, 120 ), main = "", xlab = "", ylab = "" ); for( n in c( "1od_E1EON", "1od_E2EON", "1od_F1EON", "1od_F2EON", "2od_E1EON", "2od_E2EON", "2od_F1EON", "2od_F2EON", "3od_E1EON", "3od_E2EON", "3od_F1EON", "3od_F2EON" ) ){ addPoints( tabodToPlot = cbind( odsym[[n]], odEON_Dis[odEON_Dis$Track.name == n, 4] ) ); } abline( h = 0, col = 1 ); abline( v = 0, col = 1 ); #### Read data medusa (me) mutant meEON_Dis = rbind( subtabletEON[grep( "me", subtabletEON[, 1] ), ]) View(meEON_Dis) ## Transform values and store all values for col 3 in mesym mesym = list(); for( n in c( "1me_B1EON", "1me_B2EON", "2me_B2EON", "2me_B1EON", "3me_B1EON", "3me_B2EON", "2me_D1EON", "2me_D2EON", "3me_D1EON", "3me_D2EON", "1me_E1EON","1me_E2EON","1me_F2EON","2me_F1EON", "2me_F2EON", "3me_F1EON", "3me_F2EON" ) ){ mesym[[n]] <- meEON_Dis [meEON_Dis$Track.name == n, 3]*-1; } for( n in c( "1me_A1EON", "1me_A2EON", "2me_A1EON", "2me_A2EON","3me_A1EON", "3me_A2EON", "1me_C1EON","1me_C2EON", "1me_D1EON", "1me_D2EON", "2me_C1EON", "2me_C2EON", "3me_C1EON", "3me_C2EON", "1me_F1EON", "2me_E1EON", "2me_E2EON", "3me_E1EON", "3me_E2EON" ) ){ mesym[[n]] <- meEON_Dis [meEON_Dis$Track.name == n, 3]; } ## Define plot function addPoints = function( tabmeToPlot, varmeToSelect = meEON_Dis[meEON_Dis$Track.name == "1me_A1EON", 2], Thr = 37, col1 = 3, col2 = 8 ){ points( tabmeToPlot[varmeToSelect < Thr, 1:2], col = col1, lwd = 3, type = 'l' ); points( tabmeToPlot[varmeToSelect > Thr - 1, 1:2], col = col2, lwd = 3, type = 'l' ); } ## Plot1 : Anterior Plot me- Figure 2A plot( 1, type = "n", xlim = c( -100, 100 ), ylim = c( -100, 120 ), main = "", xlab = "", ylab = "" ); for( n in c( "1me_A1EON", "1me_A2EON", "1me_B1EON", "1me_B2EON", "2me_A1EON", "2me_A2EON", "2me_B1EON", "2me_B2EON", "3me_A1EON", "3me_A2EON", "3me_B1EON", "3me_B2EON" ) ){ addPoints( tabmeToPlot = cbind( mesym[[n]], meEON_Dis[meEON_Dis$Track.name == n, 4] ) ); } abline( h = 0, col = 1 ); abline( v = 0, col = 1 ); ## Plot2 : Middel me - Fig S5A plot( 1, type = "n", xlim = c( -100, 100 ), ylim = c( -100, 120 ), main = "", xlab = "", ylab = "" ); for( n in c( "1me_C1EON", "1me_C2EON", "1me_D1EON", "1me_D2EON", "2me_C1EON", "2me_C2EON", "2me_D1EON", "2me_D2EON", "3me_C1EON", "3me_C2EON", "3me_D1EON", "3me_D2EON" ) ){ addPoints( tabmeToPlot = cbind( mesym[[n]], meEON_Dis[meEON_Dis$Track.name == n, 4] ) ); } abline( h = 0, col = 1 ); abline( v = 0, col = 1 ); ## Plot3 : Posterior me - Fig S5A plot( 1, type = "n", xlim = c( -100, 100 ), ylim = c( -100, 120 ), main = "", xlab = "", ylab = "" ); for( n in c( "1me_E1EON", "1me_E2EON", "1me_F1EON", "1me_F2EON", "2me_E1EON", "2me_E2EON", "2me_F1EON", "2me_F2EON", "3me_E1EON", "3me_E2EON", "3me_F1EON", "3me_F2EON" ) ){ addPoints( tabmeToPlot = cbind( mesym[[n]], meEON_Dis[meEON_Dis$Track.name == n, 4] ) ); } abline( h = 0, col = 1 ); abline( v = 0, col = 1 ); ######@### Figure 3C ## Define plot function WT Ant EON addPoints = function( tabToPlot, varToSelect = WTEON_Dis[WTEON_Dis$Track.name == "1wt_A1EON", 2], Thr = 37, col1 = 1, col2 = 8 ){ points( tabToPlot[varToSelect < Thr, 1:2], col = col1, lwd = 3, type = 'l' ); points( tabToPlot[varToSelect > Thr - 1, 1:2], col = col2, lwd = 3, type = 'l' ); } ## Plot1 : Anterior Plot WT - Fig 1B plot( 1, type = "n", xlim = c( -100, 100 ), ylim = c( -100, 120 ), main = "", xlab = "", ylab = "" ); for( n in c( "1wt_A1EON", "1wt_A2EON", "1wt_B1EON", "1wt_B2EON", "2wt_A1EON", "2wt_A2EON", "2wt_B1EON", "2wt_B2EON", "3wt_A1EON", "3wt_A2EON", "3wt_B1EON", "3wt_B2EON" ) ){ addPoints( tabToPlot = cbind( sym[[n]], WTEON_Dis[WTEON_Dis$Track.name == n, 4] ) ); } abline( h = 0, col = 1 ); abline( v = 0, col = 1 ); ## Define plot function neurog1 Ant EON addPoints = function( tabngToPlot, varngToSelect = ngEON_Dis[ngEON_Dis$Track.name == "1ng_A1EON", 2], Thr = 37, col1 = 6, col2 = 8 ){ points( tabngToPlot[varngToSelect < Thr, 1:2], col = col1, lwd = 3, type = 'l' ); points( tabngToPlot[varngToSelect > Thr - 1, 1:2], col = col2, lwd = 3, type = 'l' ); } ## Plot2 : Anterior Plot ng- Fig 3C plot( 1, type = "n", xlim = c( -100, 100 ), ylim = c( -100, 120 ), main = "", xlab = "", ylab = "" ); for( n in c( "1ng_A1EON", "1ng_A2EON", "1ng_B1EON", "1ng_B2EON", "2ng_A1EON", "2ng_A2EON", "2ng_B1EON", "2ng_B2EON", "3ng_A1EON", "3ng_A2EON", "3ng_B1EON", "3ng_B2EON" ) ){ addPoints( tabngToPlot = cbind( ngsym[[n]], ngEON_Dis[ngEON_Dis$Track.name == n, 4] ) ); } abline( h = 0, col = 1 ); abline( v = 0, col = 1 ); # Read data neurog1 mutant + cxcr4b = rescue resEON_Dis <-read.csv2("res_EON_Dis.csv", header=TRUE ) View(resEON_Dis) ## Transform values and store all values for col 3 in ressym ressym = list(); for( n in c( "1res_B1EON", "1res_B3EON" ) ){ ressym[[n]] <- resEON_Dis [resEON_Dis$Track.name == n, 3]*-1; } for( n in c( "1res_A1EON", "1res_A4EON", "1res_A5EON", "1res_B2EON", "2res_A1EON", "2res_A2EON","3res_B1EON", "4res_B1EON", "4res_B2EON", "4res_B3EON") ){ ressym[[n]] <- resEON_Dis [resEON_Dis$Track.name == n, 3]; } ## Define plot function rescue ANT EON addPoints = function( tabresToPlot, varresToSelect = resEON_Dis[resEON_Dis$Track.name == "1res_A1EON", 2], Thr = 37, col1 = 5, col2 = 8 ){ points( tabresToPlot[varresToSelect < Thr, 1:2], col = col1, lwd = 3, type = 'l' ); points( tabresToPlot[varresToSelect > Thr - 1, 1:2], col = col2, lwd = 3, type = 'l' ); } ## Plot3 : Anterior Plot Res - Figure 3C plot( 1, type = "n", xlim = c( -100, 100 ), ylim = c( -100, 120 ), main = "", xlab = "", ylab = "" ); for( n in c( "1res_A1EON", "1res_A4EON", "1res_A5EON","1res_B1EON", "1res_B2EON", "1res_B3EON" , "2res_A1EON", "2res_A2EON","3res_B1EON", "4res_B1EON", "4res_B2EON", "4res_B3EON") ){ addPoints( tabresToPlot = cbind( ressym[[n]], resEON_Dis[resEON_Dis$Track.name == n, 4] ) ); } abline( h = 0, col = 1 ); abline( v = 0, col = 1 ); ######Read data cxcr4b surexpression surEON_Dis <-read.csv2("sur_EON_Dis.csv", header=TRUE ) View(surEON_Dis) ## Transform value and store all values in sursym sursym = list(); for( n in c( "1sur_B1EON", "1sur_B2EON", "1sur_B3EON", "3sur_A1EON","3sur_A2EON", "3sur_A3EON", "3sur_A4EON" ) ){ sursym[[n]] <- surEON_Dis [surEON_Dis$Track.name == n, 3]*-1; } for( n in c( "1sur_A1EON", "2sur_A1EON", "2sur_A2EON", "2sur_A4EON", "4sur_B1EON", "4sur_B2EON" ) ){ sursym[[n]] <- surEON_Dis [surEON_Dis$Track.name == n, 3]; } ## Define plot function overexpression plot 4 EON addPoints = function( tabsurToPlot, varsurToSelect = surEON_Dis[surEON_Dis$Track.name == "1sur_A1EON", 2], Thr = 37, col1 = "lightblue", col2 = 8 ){ points( tabsurToPlot[varsurToSelect < Thr, 1:2], col = col1, lwd = 3, type = 'l' ); points( tabsurToPlot[varsurToSelect > Thr - 1, 1:2], col = col2, lwd = 3, type = 'l' ); } ## Plot4 : Anterior Plot sur - Figure 3C plot( 1, type = "n", xlim = c( -100, 100 ), ylim = c( -100, 120 ), main = "", xlab = "", ylab = "" ); for( n in c( "1sur_A1EON", "1sur_B1EON", "1sur_B2EON", "1sur_B3EON", "2sur_A1EON", "2sur_A2EON", "2sur_A4EON", "3sur_A1EON", "3sur_A2EON", "3sur_A3EON", "3sur_A4EON", "4sur_B1EON", "4sur_B2EON" ) ){ addPoints( tabsurToPlot = cbind( sursym[[n]], surEON_Dis[surEON_Dis$Track.name == n, 4] ) ); } abline( h = 0, col = 1 ); abline( v = 0, col = 1 ); ####
#' Pixel_shuffle #' #' Rearranges elements in a tensor of shape \eqn{(*, C \times r^2, H, W)} to a #' tensor of shape \eqn{(*, C, H \times r, W \times r)}. #' #' @param input (Tensor) the input tensor #' @param upscale_factor (int) factor to increase spatial resolution by #' #' @export nnf_pixel_shuffle <- function(input, upscale_factor) { torch_pixel_shuffle(input, upscale_factor) }
/R/nnf-pixelshuffle.R
permissive
mlverse/torch
R
false
false
389
r
#' Pixel_shuffle #' #' Rearranges elements in a tensor of shape \eqn{(*, C \times r^2, H, W)} to a #' tensor of shape \eqn{(*, C, H \times r, W \times r)}. #' #' @param input (Tensor) the input tensor #' @param upscale_factor (int) factor to increase spatial resolution by #' #' @export nnf_pixel_shuffle <- function(input, upscale_factor) { torch_pixel_shuffle(input, upscale_factor) }
rm(list = ls()) data <- read.table("household_power_consumption.txt", header = T, sep = ";", na.strings = "?") # convert the date variable to Date class data$Date <- as.Date(data$Date, format = "%d/%m/%Y") # Subset the data data <- subset(data, subset = (Date >= "2007-02-01" & Date <= "2007-02-02")) # Convert dates and times data$datetime <- strptime(paste(data$Date, data$Time), "%Y-%m-%d %H:%M:%S") # Plot 2 data$datetime <- as.POSIXct(data$datetime) attach(data) plot(Global_active_power ~ datetime, type = "l", ylab = "Global Active Power (kilowatts)", xlab = "") dev.copy(png, file = "plot2.png", height = 480, width = 480) dev.off() detach(data)
/plot2.R
no_license
Joel11-N/ExploratoryDataAnalisisCourseProject
R
false
false
681
r
rm(list = ls()) data <- read.table("household_power_consumption.txt", header = T, sep = ";", na.strings = "?") # convert the date variable to Date class data$Date <- as.Date(data$Date, format = "%d/%m/%Y") # Subset the data data <- subset(data, subset = (Date >= "2007-02-01" & Date <= "2007-02-02")) # Convert dates and times data$datetime <- strptime(paste(data$Date, data$Time), "%Y-%m-%d %H:%M:%S") # Plot 2 data$datetime <- as.POSIXct(data$datetime) attach(data) plot(Global_active_power ~ datetime, type = "l", ylab = "Global Active Power (kilowatts)", xlab = "") dev.copy(png, file = "plot2.png", height = 480, width = 480) dev.off() detach(data)
## Plot 3 ## try to read the data file from the working directory data <- 0 data <- read.table('household_power_consumption.txt', header = TRUE, sep=";") ## if the file is absent, download it from the internet if (length(data)==1) { code <- download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", "household_power_consumption.zip") if (code==0) { data <- read.table(unz("household_power_consumption.zip", "household_power_consumption.txt"), header = TRUE, sep=";") } else { stop("The data file cannot be downloaded!") } } ## get data from two days of interest day1 <- data[data$Date=='1/2/2007',] day2 <- data[data$Date=='2/2/2007',] days <- rbind(day1, day2) ## create a vector for the y axis times <- c(1:length(days$Time)) ## convert data into numeric format days$Sub_metering_1 <- as.numeric(as.character(days$Sub_metering_1)) days$Sub_metering_2 <- as.numeric(as.character(days$Sub_metering_2)) days$Sub_metering_3 <- as.numeric(days$Sub_metering_3) # set saving the plot into a png file png('plot3.png', width = 480, height = 480) # draw the plot plot(times, days$Sub_metering_1, type="l", xlab="", ylab="Energy sub metering", axes=FALSE, ylim=c(0,max(days$Sub_metering_1))) lines(times, days$Sub_metering_2, col="red") lines(times, days$Sub_metering_3, col="blue") axis(1, pos=-1.5, at=c(0,max(times)/2,max(times)), labels=c("Thu", "Fri", "Sat")) axis(2) # add legend legend(1866, 40, c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=c(1,1),lwd=c(2.5,2.5, 2.5),col=c("black", "red","blue")) box() # saving is finished dev.off()
/plot3.R
no_license
dpol2000/ExData_Plotting1
R
false
false
1,722
r
## Plot 3 ## try to read the data file from the working directory data <- 0 data <- read.table('household_power_consumption.txt', header = TRUE, sep=";") ## if the file is absent, download it from the internet if (length(data)==1) { code <- download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", "household_power_consumption.zip") if (code==0) { data <- read.table(unz("household_power_consumption.zip", "household_power_consumption.txt"), header = TRUE, sep=";") } else { stop("The data file cannot be downloaded!") } } ## get data from two days of interest day1 <- data[data$Date=='1/2/2007',] day2 <- data[data$Date=='2/2/2007',] days <- rbind(day1, day2) ## create a vector for the y axis times <- c(1:length(days$Time)) ## convert data into numeric format days$Sub_metering_1 <- as.numeric(as.character(days$Sub_metering_1)) days$Sub_metering_2 <- as.numeric(as.character(days$Sub_metering_2)) days$Sub_metering_3 <- as.numeric(days$Sub_metering_3) # set saving the plot into a png file png('plot3.png', width = 480, height = 480) # draw the plot plot(times, days$Sub_metering_1, type="l", xlab="", ylab="Energy sub metering", axes=FALSE, ylim=c(0,max(days$Sub_metering_1))) lines(times, days$Sub_metering_2, col="red") lines(times, days$Sub_metering_3, col="blue") axis(1, pos=-1.5, at=c(0,max(times)/2,max(times)), labels=c("Thu", "Fri", "Sat")) axis(2) # add legend legend(1866, 40, c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=c(1,1),lwd=c(2.5,2.5, 2.5),col=c("black", "red","blue")) box() # saving is finished dev.off()
vecteur_1 <- 1:5 vecteur_2 <- c(1, 4, 9, -2, -1, 4:9) vecteur_3 <- c(1, 4, 9, -2, 1) vecteur_4 <- c(1, 4, 9, NA, -1) n_1 <- length(vecteur_1) prod(vecteur_1)^(1 / n_1) # Exercices --------------------------------------------------------------- # Dans ce document, on vous fournit le script pour calculer la moyenne # géométrique. Tâches : # 1. Attribuer les valeurs de la moyenne géométrique à valeur_vecteur_1 # et valeur_vecteur_2. # 2. Extraire le script dans une fonction moyenne_geo. # 3. Exécuter les tests, et les deux premiers passent. # 4. On désire gérer les nombres négatifs. Coder la formule au tableau. # Le test 3 passe. # 5. On désire ignorer les NA. Utiliser un argument de la fonction mean # pour ignorer les NA. Le test 4 passe. valeur_vecteur_1 <- NA valeur_vecteur_2 <- NA tryCatch( { comparaison <- all.equal(moyenne_geo(vecteur_1), valeur_vecteur_1) if(!isTRUE(comparaison)) { print('Le vecteur 1 a échoué') } } ) tryCatch( { comparaison <- all.equal(moyenne_geo(vecteur_2), valeur_vecteur_2) if(!isTRUE(comparaison)) { print('Le vecteur 2 a échoué') } } ) tryCatch( { comparaison <- all.equal(moyenne_geo(vecteur_3), -2.352158, tolerance = 1e-5) if(!isTRUE(comparaison)) { print('Le vecteur 3 a échoué') } } ) tryCatch( { comparaison <- all.equal(moyenne_geo(vecteur_4), -2.44949, tolerance = 1e-5) if(!isTRUE(comparaison)) { print('Le vecteur 4 a échoué') } } )
/Exercices/partie_1/exemple_1.R
no_license
vigou3/tests-automatises-en-r
R
false
false
1,598
r
vecteur_1 <- 1:5 vecteur_2 <- c(1, 4, 9, -2, -1, 4:9) vecteur_3 <- c(1, 4, 9, -2, 1) vecteur_4 <- c(1, 4, 9, NA, -1) n_1 <- length(vecteur_1) prod(vecteur_1)^(1 / n_1) # Exercices --------------------------------------------------------------- # Dans ce document, on vous fournit le script pour calculer la moyenne # géométrique. Tâches : # 1. Attribuer les valeurs de la moyenne géométrique à valeur_vecteur_1 # et valeur_vecteur_2. # 2. Extraire le script dans une fonction moyenne_geo. # 3. Exécuter les tests, et les deux premiers passent. # 4. On désire gérer les nombres négatifs. Coder la formule au tableau. # Le test 3 passe. # 5. On désire ignorer les NA. Utiliser un argument de la fonction mean # pour ignorer les NA. Le test 4 passe. valeur_vecteur_1 <- NA valeur_vecteur_2 <- NA tryCatch( { comparaison <- all.equal(moyenne_geo(vecteur_1), valeur_vecteur_1) if(!isTRUE(comparaison)) { print('Le vecteur 1 a échoué') } } ) tryCatch( { comparaison <- all.equal(moyenne_geo(vecteur_2), valeur_vecteur_2) if(!isTRUE(comparaison)) { print('Le vecteur 2 a échoué') } } ) tryCatch( { comparaison <- all.equal(moyenne_geo(vecteur_3), -2.352158, tolerance = 1e-5) if(!isTRUE(comparaison)) { print('Le vecteur 3 a échoué') } } ) tryCatch( { comparaison <- all.equal(moyenne_geo(vecteur_4), -2.44949, tolerance = 1e-5) if(!isTRUE(comparaison)) { print('Le vecteur 4 a échoué') } } )
#predicting photosythesis #A = mmol CO2 m-2 s-1 #A=7.309*log(height) - 23.612 #note that log in R is ln #equation for total leaf area:van pelt et al 2016? #LA = x*height....blah #y = -0.0819x2 + 10.867x - 176.95#stand in ####### calculate estimated productivity given different potential G fractions across all heights based on total LA and photosythesis (TSF-respiration penalty) ######Make a function and calculate a z matrix productivity <- function(height, G_frac){ ((-0.0819*height^2) + (10.867*height) - 176.95)*((7.309*log(height) - 23.612)*(1/G_frac)) } #the function "productivity" calculates the estimated range of photosynthetic output across the leaf investment space (all combos of height and G fraction) photosynth <- outer(G_frac,height,productivity)#outer() function applies the function "productivity" at every combination of gs and VPD. Seconds is the z axis (a matrix) rownames(photosynth) = height colnames(photosynth) = G_frac class(photosynth) ####### calculate uptake (hydraulic flux)given different potential G fractions across all heights ######Make a function and calculate a z matrix #uptake for L leaves = 0.0017*height^2 - 0.1135*height + 5.8981 #uptake for G leaves = 22.85 g/m^2/min up <- function(height, G_frac){ ((0.0017*height^2 - 0.1135*height + 5.8981)*((-0.0819*height^2 + 10.867*height - 176.95)*(1/G_frac)))+(22.85*((-0.0819*height^2 + 10.867*height - 176.95)*G_frac)) } #the function "up" calculates the estimated range of foliar water uptake across the leaf investment space (all combos of height and G fraction) uptake <- outer(G_frac,height,productivity)#outer() function applies the function "up" at every combination of gs and VPD. Seconds is the z axis (a matrix)
/scripts/paper 2/photosyth and uptake functions.R
no_license
alanaroseo/fogdata
R
false
false
1,733
r
#predicting photosythesis #A = mmol CO2 m-2 s-1 #A=7.309*log(height) - 23.612 #note that log in R is ln #equation for total leaf area:van pelt et al 2016? #LA = x*height....blah #y = -0.0819x2 + 10.867x - 176.95#stand in ####### calculate estimated productivity given different potential G fractions across all heights based on total LA and photosythesis (TSF-respiration penalty) ######Make a function and calculate a z matrix productivity <- function(height, G_frac){ ((-0.0819*height^2) + (10.867*height) - 176.95)*((7.309*log(height) - 23.612)*(1/G_frac)) } #the function "productivity" calculates the estimated range of photosynthetic output across the leaf investment space (all combos of height and G fraction) photosynth <- outer(G_frac,height,productivity)#outer() function applies the function "productivity" at every combination of gs and VPD. Seconds is the z axis (a matrix) rownames(photosynth) = height colnames(photosynth) = G_frac class(photosynth) ####### calculate uptake (hydraulic flux)given different potential G fractions across all heights ######Make a function and calculate a z matrix #uptake for L leaves = 0.0017*height^2 - 0.1135*height + 5.8981 #uptake for G leaves = 22.85 g/m^2/min up <- function(height, G_frac){ ((0.0017*height^2 - 0.1135*height + 5.8981)*((-0.0819*height^2 + 10.867*height - 176.95)*(1/G_frac)))+(22.85*((-0.0819*height^2 + 10.867*height - 176.95)*G_frac)) } #the function "up" calculates the estimated range of foliar water uptake across the leaf investment space (all combos of height and G fraction) uptake <- outer(G_frac,height,productivity)#outer() function applies the function "up" at every combination of gs and VPD. Seconds is the z axis (a matrix)
prepare_dumbell <- function(graph_type){ if(graph_type == 'top'){ data_for_graph <- data_for_analysis %>% arrange(`Budget Difference`) %>% tail(10) } else { data_for_graph <- data_for_analysis %>% arrange(`Budget Difference`) %>% head(10) } graph_for_analysis <- data_for_graph graph_for_analysis$Particulars <- factor(graph_for_analysis$Particulars, levels = as.character(graph_for_analysis$Particulars)) grapher <- ggplot(graph_for_analysis, aes(x=`Actual 2018-2019 Total`, xend=`Budget 2020-2021 Total`, y=Particulars, group=Particulars)) + geom_dumbbell(color="#0e668b", size=0.75, point.colour.l="#0e668b") + scale_x_continuous() + labs(x=NULL, y=NULL, title="Budget Allocation (In Crores): 2018/19 vs 2020/21", subtitle=stringr::str_to_title(glue::glue("{graph_type} 20 departments in terms of increase in budget allocation", caption="Source: OpenBudgetsIndia"))) + theme(plot.title = element_text(hjust=0.5, face="bold"), plot.background=element_rect(fill="#f7f7f7"), panel.background=element_rect(fill="#f7f7f7"), panel.grid.minor=element_blank(), panel.grid.major.y=element_blank(), panel.grid.major.x=element_line(), axis.ticks=element_blank(), legend.position="top", panel.border=element_blank()) return(grapher) }
/utils/helpers.R
no_license
apoorv74/OBI-data-explorer
R
false
false
1,436
r
prepare_dumbell <- function(graph_type){ if(graph_type == 'top'){ data_for_graph <- data_for_analysis %>% arrange(`Budget Difference`) %>% tail(10) } else { data_for_graph <- data_for_analysis %>% arrange(`Budget Difference`) %>% head(10) } graph_for_analysis <- data_for_graph graph_for_analysis$Particulars <- factor(graph_for_analysis$Particulars, levels = as.character(graph_for_analysis$Particulars)) grapher <- ggplot(graph_for_analysis, aes(x=`Actual 2018-2019 Total`, xend=`Budget 2020-2021 Total`, y=Particulars, group=Particulars)) + geom_dumbbell(color="#0e668b", size=0.75, point.colour.l="#0e668b") + scale_x_continuous() + labs(x=NULL, y=NULL, title="Budget Allocation (In Crores): 2018/19 vs 2020/21", subtitle=stringr::str_to_title(glue::glue("{graph_type} 20 departments in terms of increase in budget allocation", caption="Source: OpenBudgetsIndia"))) + theme(plot.title = element_text(hjust=0.5, face="bold"), plot.background=element_rect(fill="#f7f7f7"), panel.background=element_rect(fill="#f7f7f7"), panel.grid.minor=element_blank(), panel.grid.major.y=element_blank(), panel.grid.major.x=element_line(), axis.ticks=element_blank(), legend.position="top", panel.border=element_blank()) return(grapher) }
#' Gets a table's sample #' #' @param con connection #' @param tbl table name #' @param column optional column name to apply filter #' @param value optional column value to apply filter #' @param n number of rows, defaults to 10. #' #' @return a data.frame sample according to specified arguements. #' @export #' psql_stj_sample <- function(con, tbl = NULL, column = NULL, value = NULL, n = 10) { if (is.null(column)) { query <- glue::glue_sql("SElECT * FROM {`tbl`} ORDER BY random() LIMIT {n}", .con = con ) } else { query <- glue::glue_sql("SElECT * FROM {`tbl`} WHERE {`tbl`}.{`column`} = {value} ORDER BY random() LIMIT {n}", .con = con ) } DBI::dbGetQuery(con, query) }
/R/psql_stj_sample.R
permissive
thgmoraes/stj
R
false
false
866
r
#' Gets a table's sample #' #' @param con connection #' @param tbl table name #' @param column optional column name to apply filter #' @param value optional column value to apply filter #' @param n number of rows, defaults to 10. #' #' @return a data.frame sample according to specified arguements. #' @export #' psql_stj_sample <- function(con, tbl = NULL, column = NULL, value = NULL, n = 10) { if (is.null(column)) { query <- glue::glue_sql("SElECT * FROM {`tbl`} ORDER BY random() LIMIT {n}", .con = con ) } else { query <- glue::glue_sql("SElECT * FROM {`tbl`} WHERE {`tbl`}.{`column`} = {value} ORDER BY random() LIMIT {n}", .con = con ) } DBI::dbGetQuery(con, query) }
library(spectrolab) ### Name: max.spectra ### Title: Maximum reflectance ### Aliases: max.spectra ### ** Examples library(spectrolab) spec = as.spectra(spec_matrix_example, name_idx = 1) max(spec)
/data/genthat_extracted_code/spectrolab/examples/max.spectra.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
204
r
library(spectrolab) ### Name: max.spectra ### Title: Maximum reflectance ### Aliases: max.spectra ### ** Examples library(spectrolab) spec = as.spectra(spec_matrix_example, name_idx = 1) max(spec)
variable_assignment <- 'Data can be in double quotations' # Single line comment "Multi line" assign('variable_assignment2',3) # keep in quotes if not declared # new environment my.environment <- new.env() assign('var1',10, my.environment) my.environment$var2 = 1 my.environment1 <- new.env() assign('var1',10, my.environment1) my.environment1$var2 = 1 india <-new.env() assign("temp",30,india) india[["var_4"]]=10 # maths abs() log() exp() factorial() ## Special constants pi ## Special Numbers # Inf -Inf ## Infinity and -Infininty # NaN ## Not a number # months <- c('Item1', 'Item2') ## Types of operations on vector ## Type 1: vector -> gives a single value ## Type 2: vector -> operation on each item of vector ## Type 3: multiple vector -> pairwise operation ### Eg: Addition, == ## Complex Numbers a <- 1 + 6i b <- 6 + 2i a + b ## Factors of a set a <- c(1,2,3,4,5,1,2,3) b <- factor(a) b c <- c(a,a,a,a) c c = data.frame(a,a,a,a) c BMI = data.frame( gender = c('M','F'), height = c(1,2) ) BMI
/Language/R/Suyog_R.R
permissive
ankschoubey/Personal-Developer-Notes
R
false
false
1,040
r
variable_assignment <- 'Data can be in double quotations' # Single line comment "Multi line" assign('variable_assignment2',3) # keep in quotes if not declared # new environment my.environment <- new.env() assign('var1',10, my.environment) my.environment$var2 = 1 my.environment1 <- new.env() assign('var1',10, my.environment1) my.environment1$var2 = 1 india <-new.env() assign("temp",30,india) india[["var_4"]]=10 # maths abs() log() exp() factorial() ## Special constants pi ## Special Numbers # Inf -Inf ## Infinity and -Infininty # NaN ## Not a number # months <- c('Item1', 'Item2') ## Types of operations on vector ## Type 1: vector -> gives a single value ## Type 2: vector -> operation on each item of vector ## Type 3: multiple vector -> pairwise operation ### Eg: Addition, == ## Complex Numbers a <- 1 + 6i b <- 6 + 2i a + b ## Factors of a set a <- c(1,2,3,4,5,1,2,3) b <- factor(a) b c <- c(a,a,a,a) c c = data.frame(a,a,a,a) c BMI = data.frame( gender = c('M','F'), height = c(1,2) ) BMI
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Rfunctions_GC.R \name{krig.mh.stx_carA_smple} \alias{krig.mh.stx_carA_smple} \title{Inverse-Gamma DLM MCMC Simulation} \usage{ krig.mh.stx_carA_smple(i, datpred, parms, cov_names, linmod, nsize = 800) } \arguments{ \item{dat}{vector of length \emph{T} containing a time series of VI data} } \value{ List containing MCMC samples of \eqn{\sigma_e^2}, \eqn{\sigma_w^2}, and \eqn{\theta_{1:T}} (if \code{save.theta = TRUE}) \describe{ \item{sigma2s:}{mc x 2 matrix containing MCMC samples of \eqn{\sigma_e^2} (first column) and \eqn{\sigma_w^2} (second column)} \item{thetas:}{if \code{save_value = TRUE}, a T x p x mc array containing MCMC samples of the latent states} } } \description{ MCMC simulation of the reduced Fourier-form Bayesian DLM with conjugate Inverse-Gamma priors. } \details{ blah blah blah }
/man/krig.mh.stx_carA_smple.Rd
no_license
reich-group/Google-Car
R
false
true
887
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Rfunctions_GC.R \name{krig.mh.stx_carA_smple} \alias{krig.mh.stx_carA_smple} \title{Inverse-Gamma DLM MCMC Simulation} \usage{ krig.mh.stx_carA_smple(i, datpred, parms, cov_names, linmod, nsize = 800) } \arguments{ \item{dat}{vector of length \emph{T} containing a time series of VI data} } \value{ List containing MCMC samples of \eqn{\sigma_e^2}, \eqn{\sigma_w^2}, and \eqn{\theta_{1:T}} (if \code{save.theta = TRUE}) \describe{ \item{sigma2s:}{mc x 2 matrix containing MCMC samples of \eqn{\sigma_e^2} (first column) and \eqn{\sigma_w^2} (second column)} \item{thetas:}{if \code{save_value = TRUE}, a T x p x mc array containing MCMC samples of the latent states} } } \description{ MCMC simulation of the reduced Fourier-form Bayesian DLM with conjugate Inverse-Gamma priors. } \details{ blah blah blah }
nullirwsva = function (dat, n.sv, B = 5) { n <- ncol(dat) m <- nrow(dat) mod = as.matrix(rep(1, n), ncol = 1) Id <- diag(n) resid <- dat %*% (Id - mod %*% solve(t(mod) %*% mod) %*% t(mod)) uu <- eigen(t(resid) %*% resid) vv <- uu$vectors ndf <- n - dim(mod)[2] pprob <- rep(1, m) one <- rep(1, n) Id <- diag(n) df1 <- dim(mod)[2] + n.sv rm(resid) cat(paste("Iteration (out of", B, "):")) for (i in 1:B) { mod.gam <- cbind(mod, uu$vectors[, 1:n.sv]) mod0.gam <- cbind(mod) ptmp <- f.pvalue(dat, mod.gam, mod0.gam) pprob.gam <- (1 - edge.lfdr(ptmp)) dats <- dat * pprob.gam uu <- eigen(t(dats) %*% dats) cat(paste(i, " ")) } sv = svd(dats)$v[, 1:n.sv] retval <- list(sv = sv, pprob.gam = pprob.gam, n.sv = n.sv) return(retval) }
/Mengjie/single_cell/R/nullirwsva.R
no_license
MeileiJiang/robust-against-heterogeneity
R
false
false
837
r
nullirwsva = function (dat, n.sv, B = 5) { n <- ncol(dat) m <- nrow(dat) mod = as.matrix(rep(1, n), ncol = 1) Id <- diag(n) resid <- dat %*% (Id - mod %*% solve(t(mod) %*% mod) %*% t(mod)) uu <- eigen(t(resid) %*% resid) vv <- uu$vectors ndf <- n - dim(mod)[2] pprob <- rep(1, m) one <- rep(1, n) Id <- diag(n) df1 <- dim(mod)[2] + n.sv rm(resid) cat(paste("Iteration (out of", B, "):")) for (i in 1:B) { mod.gam <- cbind(mod, uu$vectors[, 1:n.sv]) mod0.gam <- cbind(mod) ptmp <- f.pvalue(dat, mod.gam, mod0.gam) pprob.gam <- (1 - edge.lfdr(ptmp)) dats <- dat * pprob.gam uu <- eigen(t(dats) %*% dats) cat(paste(i, " ")) } sv = svd(dats)$v[, 1:n.sv] retval <- list(sv = sv, pprob.gam = pprob.gam, n.sv = n.sv) return(retval) }
library(shiny) require(RJSONIO) require(ggplot2) require(scales) # This function is used to access the SODA API and download the data from the NJ Open # Data website. The funtion takes the year as an input and downloads the first 1000 # entries. GetData<- function(n){ # Returns the dataframe from the download. The dataframe is 1000 rows by 1 column. # The filters are applied by the string after the '?'. The 'master' record type # was used because it had the YTD earnings. The alternative was to use the # 'detail' record type which had current period payroll entries. URL<-paste("http://data.nj.gov/resource/iqwc-r2w7.json?record_type=master&calendar_year=",n,sep="") RawData<-fromJSON(URL) Temp<- data.frame(salary= as.numeric(sapply(RawData,"[[","master_ytd_earnings")), stringsAsFactors = F) L<-list(mu=mean(Temp$salary), SD=sd(Temp$salary), values=Temp$salary) L } shinyServer( function(input,output){ # Use reactive functions so the data gets re-queried whenever the year input # is changed by the user. Data<- reactive({GetData({input$year})}) output$mu <- renderText(Data()$mu) output$SD <- renderText((Data()[2])) # Need to use reactive again to update these values when the data set # is queried. Norm<-reactive({ xval<- seq(0,150000,length=10000) yval<- dnorm(xval,mean=as.numeric(Data()$mu), sd=as.numeric(Data()$SD)) data.frame(x=xval,y=yval) }) # Calculate the theoretical percentile the user's salary would # fall into. Use theoretical percentile as a statistical # inference of where they user would fall. Percentile<- reactive(pnorm(as.numeric({input$isalary}), mean=as.numeric(unlist(Data()[1])), sd=as.numeric(Data()[2]) )) # Draw the plot. The vertical line is the user's salary. output$plot1<- renderPlot({ ggplot(Norm()) + aes(x=x,y=y) + geom_line(size=1.5) + labs(title="Distribution of Salaries for New Jersey State Employees", y= element_blank()) + scale_x_continuous(labels=comma, limits=c(0,150000)) + geom_vline(xintercept={input$isalary}, colour="blue", size=2) + theme(plot.title=element_text(size=rel(2)), axis.text.y=element_blank(), panel.background=element_rect(fill="white",colour="black"), panel.grid.major=element_blank(), panel.grid.minor= element_blank()) }) # Craft the sentence at the bottom of the chart. The statement includes # a binary value indicating if the user's salary is greater than or less # than the mean. It also gives the mean salary during the year. Bigger<-reactive({ifelse({input$isalary}< as.numeric(Data()[1]),"less","more")}) output$statement<- renderText(paste("Your salary is ", Bigger()[1], " than the average NJ state worker salary in ", {input$year}, ". The average pay that year was $", format(round(as.numeric(Data()[1]),0),big.mark = ","), " and you made more money than ", round(Percentile(),3)*100, "% of the state employees. Congrats!", sep="")) } )
/server.R
no_license
kuhnrl30/NJPayroll
R
false
false
3,651
r
library(shiny) require(RJSONIO) require(ggplot2) require(scales) # This function is used to access the SODA API and download the data from the NJ Open # Data website. The funtion takes the year as an input and downloads the first 1000 # entries. GetData<- function(n){ # Returns the dataframe from the download. The dataframe is 1000 rows by 1 column. # The filters are applied by the string after the '?'. The 'master' record type # was used because it had the YTD earnings. The alternative was to use the # 'detail' record type which had current period payroll entries. URL<-paste("http://data.nj.gov/resource/iqwc-r2w7.json?record_type=master&calendar_year=",n,sep="") RawData<-fromJSON(URL) Temp<- data.frame(salary= as.numeric(sapply(RawData,"[[","master_ytd_earnings")), stringsAsFactors = F) L<-list(mu=mean(Temp$salary), SD=sd(Temp$salary), values=Temp$salary) L } shinyServer( function(input,output){ # Use reactive functions so the data gets re-queried whenever the year input # is changed by the user. Data<- reactive({GetData({input$year})}) output$mu <- renderText(Data()$mu) output$SD <- renderText((Data()[2])) # Need to use reactive again to update these values when the data set # is queried. Norm<-reactive({ xval<- seq(0,150000,length=10000) yval<- dnorm(xval,mean=as.numeric(Data()$mu), sd=as.numeric(Data()$SD)) data.frame(x=xval,y=yval) }) # Calculate the theoretical percentile the user's salary would # fall into. Use theoretical percentile as a statistical # inference of where they user would fall. Percentile<- reactive(pnorm(as.numeric({input$isalary}), mean=as.numeric(unlist(Data()[1])), sd=as.numeric(Data()[2]) )) # Draw the plot. The vertical line is the user's salary. output$plot1<- renderPlot({ ggplot(Norm()) + aes(x=x,y=y) + geom_line(size=1.5) + labs(title="Distribution of Salaries for New Jersey State Employees", y= element_blank()) + scale_x_continuous(labels=comma, limits=c(0,150000)) + geom_vline(xintercept={input$isalary}, colour="blue", size=2) + theme(plot.title=element_text(size=rel(2)), axis.text.y=element_blank(), panel.background=element_rect(fill="white",colour="black"), panel.grid.major=element_blank(), panel.grid.minor= element_blank()) }) # Craft the sentence at the bottom of the chart. The statement includes # a binary value indicating if the user's salary is greater than or less # than the mean. It also gives the mean salary during the year. Bigger<-reactive({ifelse({input$isalary}< as.numeric(Data()[1]),"less","more")}) output$statement<- renderText(paste("Your salary is ", Bigger()[1], " than the average NJ state worker salary in ", {input$year}, ". The average pay that year was $", format(round(as.numeric(Data()[1]),0),big.mark = ","), " and you made more money than ", round(Percentile(),3)*100, "% of the state employees. Congrats!", sep="")) } )
\name{psmelt} \alias{psmelt} \title{Melt phyloseq data object into large data.frame} \usage{ psmelt(physeq) } \arguments{ \item{physeq}{(Required). An \code{\link{otu_table-class}} or \code{\link{phyloseq-class}}. Function most useful for phyloseq-class.} } \value{ A \code{\link{data.frame}}-class table. } \description{ The psmelt function is a specialized melt function for melting phyloseq objects (instances of the phyloseq class), usually for the purpose of graphics production in ggplot2-based phyloseq-generated graphics. It relies heavily on the \code{\link[reshape]{melt}} and \code{\link{merge}} functions. Note that ``melted'' phyloseq data is stored much less efficiently, and so RAM storage issues could arise with a smaller dataset (smaller number of samples/OTUs/variables) than one might otherwise expect. For average-sized datasets, however, this should not be a problem. Because the number of OTU entries has a large effect on the RAM requirement, methods to reduce the number of separate OTU entries, for instance by agglomerating based on phylogenetic distance using \code{\link{tipglom}}, can help alleviate RAM usage problems. This function is made user-accessible for flexibility, but is also used extensively by plot functions in phyloseq. } \examples{ data("GlobalPatterns") gp.ch = subset_taxa(GlobalPatterns, Phylum == "Chlamydiae") mdf = psmelt(gp.ch) nrow(mdf) ncol(mdf) colnames(mdf) head(rownames(mdf)) # Create a ggplot similar to library("ggplot2") p = ggplot(mdf, aes(x=SampleType, y=Abundance, fill=Genus)) p = p + geom_bar(color="black", stat="identity", position="stack") print(p) } \seealso{ \code{\link{plot_bar}} \code{\link[reshape]{melt}} \code{\link{merge}} }
/man/psmelt.Rd
no_license
BioinformaticsArchive/phyloseq
R
false
false
1,761
rd
\name{psmelt} \alias{psmelt} \title{Melt phyloseq data object into large data.frame} \usage{ psmelt(physeq) } \arguments{ \item{physeq}{(Required). An \code{\link{otu_table-class}} or \code{\link{phyloseq-class}}. Function most useful for phyloseq-class.} } \value{ A \code{\link{data.frame}}-class table. } \description{ The psmelt function is a specialized melt function for melting phyloseq objects (instances of the phyloseq class), usually for the purpose of graphics production in ggplot2-based phyloseq-generated graphics. It relies heavily on the \code{\link[reshape]{melt}} and \code{\link{merge}} functions. Note that ``melted'' phyloseq data is stored much less efficiently, and so RAM storage issues could arise with a smaller dataset (smaller number of samples/OTUs/variables) than one might otherwise expect. For average-sized datasets, however, this should not be a problem. Because the number of OTU entries has a large effect on the RAM requirement, methods to reduce the number of separate OTU entries, for instance by agglomerating based on phylogenetic distance using \code{\link{tipglom}}, can help alleviate RAM usage problems. This function is made user-accessible for flexibility, but is also used extensively by plot functions in phyloseq. } \examples{ data("GlobalPatterns") gp.ch = subset_taxa(GlobalPatterns, Phylum == "Chlamydiae") mdf = psmelt(gp.ch) nrow(mdf) ncol(mdf) colnames(mdf) head(rownames(mdf)) # Create a ggplot similar to library("ggplot2") p = ggplot(mdf, aes(x=SampleType, y=Abundance, fill=Genus)) p = p + geom_bar(color="black", stat="identity", position="stack") print(p) } \seealso{ \code{\link{plot_bar}} \code{\link[reshape]{melt}} \code{\link{merge}} }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/pltltn.R \name{pltltn} \alias{pltltn} \title{Correction for p>>n for an object of class \code{Bvs}} \usage{ pltltn(object) } \arguments{ \item{object}{An object of class \code{Bvs} obtained with \code{GibbsBvs}} } \value{ \code{pltltn} returns a list with the following elements: \item{pS }{An estimation of the probability that the true model is irregular (k>n)} \item{postprobdim }{A corrected estimation of the posterior probabilities over the dimensions} \item{inclprob }{A corrected estimation of the posterior inclusion probabilities} } \description{ In cases where p>>n and the true model is expected to be sparse, it is very unlikely that the Gibbs sampling will sample models in the singular subset of the model space (models with k>n). Nevertheless, depending on how large is p/n and the strenght of the signal, this part of the model space could be very influential in the final response. } \details{ From an object created with GibbsBvs and prior probabilities specified as Scott-Berger, this function provides an estimation of the posterior probability of models with k>n which is a measure of the importance of these models. In summary, when this probability is large, the sample size is not large enough to beat such large p. Additionally, \code{pltltn} gives corrections of the posterior inclusion probabilities and posterior probabilities of dimension of the true model. } \examples{ \dontrun{ data(riboflavin, package="hdi") set.seed(16091956) #the following sentence took 37.3 minutes in a single core #(a trick to see the evolution of the algorithm is to monitor #the files created by the function. #you can see the working directory running #tempdir() #copy this path in the clipboard. Then open another R session #and from there (once the simulating process is running and the burnin has completed) #write #system("wc (path from clipboard)/AllBF") #the number here appearing is the number of completed iterations # testRB<- GibbsBvs(formula=y~., data=riboflavin, prior.betas="Robust", init.model="null", time.test=F, n.iter=10000, n.burnin=1000) set.seed(16091956) system.time( testRB<- GibbsBvs(formula=y~., data=riboflavin, prior.betas="Robust", init.model="null", time.test=F, n.iter=10000, n.burnin=1000) ) #notice the large sparsity of the result since #the great majority of covariates are not influential: boxplot(testRB$inclprob) testRB$inclprob[testRB$inclprob>.5] #xYOAB_at xYXLE_at # 0.9661 0.6502 #we can discharge all covariates except xYOAB_at and xYXLE_at #the method does not reach to inform about xYXLE_at and its posterior #probability is only slightly bigger than its prior probability #We see that dimensions of visited models are small: plot(testRB, option="d", xlim=c(0,100)) #so the part of the model space with singular models (k>n) #has not been explored. #To correct this issue we run: corrected.testRB<- pltltn(testRB) #Estimate of the posterior probability of the # model space with singular models is: 0 #Meaning that it is extremely unlikely that the true model is such that k>n #The corrected inclusion probabilities can be accessed through #corrected.testRB but, in this case, these are essentially the same as in the #original object (due to the unimportance of the singular part of the model space) } } \references{ Berger, J.O., Garcia-Donato, G., Martínez-Beneito M.A. and Peña, V. (2016) Bayesian variable selection in high dimensional problems without assumptions on prior model probabilities. arXiv:1607.02993 } \seealso{ See \code{\link[BayesVarSel]{GibbsBvs}} for creating objects of the class \code{Bvs}. } \author{ Gonzalo Garcia-Donato Maintainer: <gonzalo.garciadonato@uclm.es> }
/man/pltltn.Rd
no_license
comodin19/BayesVarSel
R
false
true
3,959
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/pltltn.R \name{pltltn} \alias{pltltn} \title{Correction for p>>n for an object of class \code{Bvs}} \usage{ pltltn(object) } \arguments{ \item{object}{An object of class \code{Bvs} obtained with \code{GibbsBvs}} } \value{ \code{pltltn} returns a list with the following elements: \item{pS }{An estimation of the probability that the true model is irregular (k>n)} \item{postprobdim }{A corrected estimation of the posterior probabilities over the dimensions} \item{inclprob }{A corrected estimation of the posterior inclusion probabilities} } \description{ In cases where p>>n and the true model is expected to be sparse, it is very unlikely that the Gibbs sampling will sample models in the singular subset of the model space (models with k>n). Nevertheless, depending on how large is p/n and the strenght of the signal, this part of the model space could be very influential in the final response. } \details{ From an object created with GibbsBvs and prior probabilities specified as Scott-Berger, this function provides an estimation of the posterior probability of models with k>n which is a measure of the importance of these models. In summary, when this probability is large, the sample size is not large enough to beat such large p. Additionally, \code{pltltn} gives corrections of the posterior inclusion probabilities and posterior probabilities of dimension of the true model. } \examples{ \dontrun{ data(riboflavin, package="hdi") set.seed(16091956) #the following sentence took 37.3 minutes in a single core #(a trick to see the evolution of the algorithm is to monitor #the files created by the function. #you can see the working directory running #tempdir() #copy this path in the clipboard. Then open another R session #and from there (once the simulating process is running and the burnin has completed) #write #system("wc (path from clipboard)/AllBF") #the number here appearing is the number of completed iterations # testRB<- GibbsBvs(formula=y~., data=riboflavin, prior.betas="Robust", init.model="null", time.test=F, n.iter=10000, n.burnin=1000) set.seed(16091956) system.time( testRB<- GibbsBvs(formula=y~., data=riboflavin, prior.betas="Robust", init.model="null", time.test=F, n.iter=10000, n.burnin=1000) ) #notice the large sparsity of the result since #the great majority of covariates are not influential: boxplot(testRB$inclprob) testRB$inclprob[testRB$inclprob>.5] #xYOAB_at xYXLE_at # 0.9661 0.6502 #we can discharge all covariates except xYOAB_at and xYXLE_at #the method does not reach to inform about xYXLE_at and its posterior #probability is only slightly bigger than its prior probability #We see that dimensions of visited models are small: plot(testRB, option="d", xlim=c(0,100)) #so the part of the model space with singular models (k>n) #has not been explored. #To correct this issue we run: corrected.testRB<- pltltn(testRB) #Estimate of the posterior probability of the # model space with singular models is: 0 #Meaning that it is extremely unlikely that the true model is such that k>n #The corrected inclusion probabilities can be accessed through #corrected.testRB but, in this case, these are essentially the same as in the #original object (due to the unimportance of the singular part of the model space) } } \references{ Berger, J.O., Garcia-Donato, G., Martínez-Beneito M.A. and Peña, V. (2016) Bayesian variable selection in high dimensional problems without assumptions on prior model probabilities. arXiv:1607.02993 } \seealso{ See \code{\link[BayesVarSel]{GibbsBvs}} for creating objects of the class \code{Bvs}. } \author{ Gonzalo Garcia-Donato Maintainer: <gonzalo.garciadonato@uclm.es> }
## Function to run joint models with simulated data joint_model <- function(structured_data, unstructured_data, dat1, biasfield, plotting=FALSE, mesh.edge = c(20,40), # added mesh.offset = c(5,20), # added resolution = c(10,10)){ # added #packages library(INLA) library(reshape2) library(rgeos) library(fields) max_x <- max(biasfield$x) max_y <- max(biasfield$y) #preparation - mesh construction - use the loc.domain argument mesh <- inla.mesh.2d(loc.domain = biasfield[,c(1,2)], max.edge=mesh.edge, cutoff=2, offset = mesh.offset) #plot the mesh to see what it looks like if(plotting == TRUE){ par(mfrow=c(1,1)) plot(mesh)} ##set the spde representation to be the mesh just created spde <- inla.spde2.matern(mesh) #make A matrix for structured data - should this be pulling the x and y coordinates for the location? structured_data_A <- inla.spde.make.A(mesh = mesh, loc = as.matrix(structured_data[,2:3])) #make A matrix for unstructured data unstructured_data_A <- inla.spde.make.A(mesh = mesh, loc = as.matrix(unstructured_data[,1:2])) # Joint model # One spatial field # Uses (as far as I can tell) Simpson approach for PP data # Binomial model for PA data # Using cloglog # create integration stack loc.d <- t(matrix(c(0,0,max_x,0,max_x,max_y,0,max_y,0,0), 2)) #make dual mesh dd <- deldir::deldir(mesh$loc[, 1], mesh$loc[, 2]) tiles <- deldir::tile.list(dd) #make domain into spatial polygon domainSP <- SpatialPolygons(list(Polygons( list(Polygon(loc.d)), '0'))) #intersection between domain and dual mesh poly.gpc <- as(domainSP@polygons[[1]]@Polygons[[1]]@coords, "gpc.poly") # w now contains area of voronoi polygons w <- sapply(tiles, function(p) rgeos::area.poly(rgeos::intersect(as(cbind(p$x, p$y), "gpc.poly"), poly.gpc))) #check some have 0 weight table(w>0) nv <- mesh$n n <- nrow(unstructured_data) #change data to include 0s for nodes and 1s for presences y.pp <- rep(0:1, c(nv, n)) #add expectation vector (area for integration points/nodes and 0 for presences) e.pp <- c(w, rep(0, n)) #diagonal matrix for integration point A matrix imat <- Diagonal(nv, rep(1, nv)) A.pp <- rBind(imat, unstructured_data_A) #get covariate for integration points covariate = dat1$gridcov[Reduce('cbind', nearest.pixel(mesh$loc[,1], mesh$loc[,2], im(dat1$gridcov)))] #unstructured data stack with integration points stk_unstructured_data <- inla.stack(data=list(y=cbind(y.pp, NA), e = e.pp), effects=list(list(data.frame(interceptB=rep(1,nv+n)), env = c(covariate, unstructured_data$env)), list(uns_field=1:spde$n.spde)), A=list(1,A.pp), tag="unstructured_data") #stack for structured data #note intercept with different name stk_structured_data <- inla.stack(data=list(y=cbind(NA, structured_data$presence), Ntrials = rep(1, nrow(structured_data))), effects=list(list(data.frame(interceptA=rep(1,length(structured_data$x)), env = structured_data$env)), list(str_field=1:spde$n.spde)), A=list(1,structured_data_A), tag="structured_data") ##NOTE: doesn't use the copy function initially stk <- inla.stack(stk_unstructured_data, stk_structured_data) # join.stack <- stk # source("Create prediction stack.R") join.stack <- create_prediction_stack(stk, resolution=resolution, biasfield = biasfield, dat1 = dat1, mesh, spde) formulaJ = y ~ interceptA + interceptB + env + f(uns_field, model = spde) + f(str_field, copy = "uns_field", fixed = TRUE) -1 result <- inla(formulaJ,family=c("poisson", "binomial"), data=inla.stack.data(join.stack), control.predictor=list(A=inla.stack.A(join.stack), compute=TRUE), control.family = list(list(link = "log"), list(link = "cloglog")), E = inla.stack.data(join.stack)$e, Ntrials = inla.stack.data(join.stack)$Ntrials, control.compute = list(dic = FALSE, cpo = FALSE, waic = FALSE) ) ##project the mesh onto the initial simulated grid 100x100 cells in dimension proj1<-inla.mesh.projector(mesh, ylim=c(1,max_y), xlim=c(1,max_x), dims=c(max_x,max_y)) ##pull out the mean of the random field for the NPMS model xmean1 <- inla.mesh.project(proj1, result$summary.random$uns_field$mean) ##plot the estimated random field # plot with the original library(fields) # some of the commands below were giving warnings as not graphical parameters - I have fixed what I can # scales and col.region did nothing on my version if(plotting == TRUE){ #png("joint_model.png") # option to save output #, height = 1000, width = 2500, pointsize = 30) par(mfrow=c(1,1)) image.plot(1:max_x,1:max_y, xmean1, col=tim.colors(), xlab='', ylab='', main="Joint-mean of r.f", asp=1) #zlim=c(-3,3)) # image.plot(list(x=dat1$Lam$xcol*100, # y=dat1$Lam$yrow*100, # z=t(dat1$rf.s)), # main='Truth', # asp=1, # zlim=c(-3,3)) # make sure scale = same # points(structured_data[structured_data[,4] %in% 0,2:3], pch=16, col='white') #absences # points(structured_data[structured_data[,4] %in% 1,2:3], pch=16, col='black') ##plot the standard deviation of random field xsd1 <- inla.mesh.project(proj1, result$summary.random$uns_field$sd) image.plot(1:max_x,1:max_y, xsd1, col=tim.colors(), xlab='', ylab='', main="Joint-sd of r.f", asp=1) #dev.off() } result$summary.fixed return(list(join.stack = join.stack, result = result)) }
/Run models joint.R
no_license
ssarahas/MScProject_ISDM
R
false
false
7,076
r
## Function to run joint models with simulated data joint_model <- function(structured_data, unstructured_data, dat1, biasfield, plotting=FALSE, mesh.edge = c(20,40), # added mesh.offset = c(5,20), # added resolution = c(10,10)){ # added #packages library(INLA) library(reshape2) library(rgeos) library(fields) max_x <- max(biasfield$x) max_y <- max(biasfield$y) #preparation - mesh construction - use the loc.domain argument mesh <- inla.mesh.2d(loc.domain = biasfield[,c(1,2)], max.edge=mesh.edge, cutoff=2, offset = mesh.offset) #plot the mesh to see what it looks like if(plotting == TRUE){ par(mfrow=c(1,1)) plot(mesh)} ##set the spde representation to be the mesh just created spde <- inla.spde2.matern(mesh) #make A matrix for structured data - should this be pulling the x and y coordinates for the location? structured_data_A <- inla.spde.make.A(mesh = mesh, loc = as.matrix(structured_data[,2:3])) #make A matrix for unstructured data unstructured_data_A <- inla.spde.make.A(mesh = mesh, loc = as.matrix(unstructured_data[,1:2])) # Joint model # One spatial field # Uses (as far as I can tell) Simpson approach for PP data # Binomial model for PA data # Using cloglog # create integration stack loc.d <- t(matrix(c(0,0,max_x,0,max_x,max_y,0,max_y,0,0), 2)) #make dual mesh dd <- deldir::deldir(mesh$loc[, 1], mesh$loc[, 2]) tiles <- deldir::tile.list(dd) #make domain into spatial polygon domainSP <- SpatialPolygons(list(Polygons( list(Polygon(loc.d)), '0'))) #intersection between domain and dual mesh poly.gpc <- as(domainSP@polygons[[1]]@Polygons[[1]]@coords, "gpc.poly") # w now contains area of voronoi polygons w <- sapply(tiles, function(p) rgeos::area.poly(rgeos::intersect(as(cbind(p$x, p$y), "gpc.poly"), poly.gpc))) #check some have 0 weight table(w>0) nv <- mesh$n n <- nrow(unstructured_data) #change data to include 0s for nodes and 1s for presences y.pp <- rep(0:1, c(nv, n)) #add expectation vector (area for integration points/nodes and 0 for presences) e.pp <- c(w, rep(0, n)) #diagonal matrix for integration point A matrix imat <- Diagonal(nv, rep(1, nv)) A.pp <- rBind(imat, unstructured_data_A) #get covariate for integration points covariate = dat1$gridcov[Reduce('cbind', nearest.pixel(mesh$loc[,1], mesh$loc[,2], im(dat1$gridcov)))] #unstructured data stack with integration points stk_unstructured_data <- inla.stack(data=list(y=cbind(y.pp, NA), e = e.pp), effects=list(list(data.frame(interceptB=rep(1,nv+n)), env = c(covariate, unstructured_data$env)), list(uns_field=1:spde$n.spde)), A=list(1,A.pp), tag="unstructured_data") #stack for structured data #note intercept with different name stk_structured_data <- inla.stack(data=list(y=cbind(NA, structured_data$presence), Ntrials = rep(1, nrow(structured_data))), effects=list(list(data.frame(interceptA=rep(1,length(structured_data$x)), env = structured_data$env)), list(str_field=1:spde$n.spde)), A=list(1,structured_data_A), tag="structured_data") ##NOTE: doesn't use the copy function initially stk <- inla.stack(stk_unstructured_data, stk_structured_data) # join.stack <- stk # source("Create prediction stack.R") join.stack <- create_prediction_stack(stk, resolution=resolution, biasfield = biasfield, dat1 = dat1, mesh, spde) formulaJ = y ~ interceptA + interceptB + env + f(uns_field, model = spde) + f(str_field, copy = "uns_field", fixed = TRUE) -1 result <- inla(formulaJ,family=c("poisson", "binomial"), data=inla.stack.data(join.stack), control.predictor=list(A=inla.stack.A(join.stack), compute=TRUE), control.family = list(list(link = "log"), list(link = "cloglog")), E = inla.stack.data(join.stack)$e, Ntrials = inla.stack.data(join.stack)$Ntrials, control.compute = list(dic = FALSE, cpo = FALSE, waic = FALSE) ) ##project the mesh onto the initial simulated grid 100x100 cells in dimension proj1<-inla.mesh.projector(mesh, ylim=c(1,max_y), xlim=c(1,max_x), dims=c(max_x,max_y)) ##pull out the mean of the random field for the NPMS model xmean1 <- inla.mesh.project(proj1, result$summary.random$uns_field$mean) ##plot the estimated random field # plot with the original library(fields) # some of the commands below were giving warnings as not graphical parameters - I have fixed what I can # scales and col.region did nothing on my version if(plotting == TRUE){ #png("joint_model.png") # option to save output #, height = 1000, width = 2500, pointsize = 30) par(mfrow=c(1,1)) image.plot(1:max_x,1:max_y, xmean1, col=tim.colors(), xlab='', ylab='', main="Joint-mean of r.f", asp=1) #zlim=c(-3,3)) # image.plot(list(x=dat1$Lam$xcol*100, # y=dat1$Lam$yrow*100, # z=t(dat1$rf.s)), # main='Truth', # asp=1, # zlim=c(-3,3)) # make sure scale = same # points(structured_data[structured_data[,4] %in% 0,2:3], pch=16, col='white') #absences # points(structured_data[structured_data[,4] %in% 1,2:3], pch=16, col='black') ##plot the standard deviation of random field xsd1 <- inla.mesh.project(proj1, result$summary.random$uns_field$sd) image.plot(1:max_x,1:max_y, xsd1, col=tim.colors(), xlab='', ylab='', main="Joint-sd of r.f", asp=1) #dev.off() } result$summary.fixed return(list(join.stack = join.stack, result = result)) }
##manual deisotope deisotope <- function(data=...){ require(stringr) str_which(data$isotopes, pattern = "\\+\\d") data_1 <- data[-str_which(data$isotopes, pattern = "\\+\\d|\\d\\+"),] return(data_1)}
/deisotope.R
no_license
tuhulab/bfi-wholegrain
R
false
false
213
r
##manual deisotope deisotope <- function(data=...){ require(stringr) str_which(data$isotopes, pattern = "\\+\\d") data_1 <- data[-str_which(data$isotopes, pattern = "\\+\\d|\\d\\+"),] return(data_1)}
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/s.SGD.R \name{s.SGD} \alias{s.SGD} \title{Stochastic Gradient Descent (SGD) [C, R]} \usage{ s.SGD( x, y = NULL, x.test = NULL, y.test = NULL, x.name = NULL, y.name = NULL, model = NULL, model.control = list(lambda1 = 0, lambda2 = 0), sgd.control = list(method = "ai-sgd"), upsample = FALSE, downsample = FALSE, resample.seed = NULL, print.plot = TRUE, plot.fitted = NULL, plot.predicted = NULL, plot.theme = getOption("rt.fit.theme", "lightgrid"), question = NULL, verbose = TRUE, outdir = NULL, save.mod = ifelse(!is.null(outdir), TRUE, FALSE), ... ) } \arguments{ \item{x}{Numeric vector or matrix / data frame of features i.e. independent variables} \item{y}{Numeric vector of outcome, i.e. dependent variable} \item{x.test}{Numeric vector or matrix / data frame of testing set features Columns must correspond to columns in \code{x}} \item{y.test}{Numeric vector of testing set outcome} \item{x.name}{Character: Name for feature set} \item{y.name}{Character: Name for outcome} \item{model}{character specifying the model to be used: \code{"lm"} (linear model), \code{"glm"} (generalized linear model), \code{"cox"} (Cox proportional hazards model), \code{"gmm"} (generalized method of moments), \code{"m"} (M-estimation). See \sQuote{Details}.} \item{model.control}{a list of parameters for controlling the model. \describe{ \item{\code{family} (\code{"glm"})}{a description of the error distribution and link function to be used in the model. This can be a character string naming a family function, a family function or the result of a call to a family function. (See \code{\link[stats]{family}} for details of family functions.)} \item{\code{rank} (\code{"glm"})}{logical. Should the rank of the design matrix be checked?} \item{\code{fn} (\code{"gmm"})}{a function \eqn{g(\theta,x)} which returns a \eqn{k}-vector corresponding to the \eqn{k} moment conditions. It is a required argument if \code{gr} not specified.} \item{\code{gr} (\code{"gmm"})}{a function to return the gradient. If unspecified, a finite-difference approximation will be used.} \item{\code{nparams} (\code{"gmm"})}{number of model parameters. This is automatically determined for other models.} \item{\code{type} (\code{"gmm"})}{character specifying the generalized method of moments procedure: \code{"twostep"} (Hansen, 1982), \code{"iterative"} (Hansen et al., 1996). Defaults to \code{"iterative"}.} \item{\code{wmatrix} (\code{"gmm"})}{weighting matrix to be used in the loss function. Defaults to the identity matrix.} \item{\code{loss} (\code{"m"})}{character specifying the loss function to be used in the estimating equation. Default is the Huber loss.} \item{\code{lambda1}}{L1 regularization parameter. Default is 0.} \item{\code{lambda2}}{L2 regularization parameter. Default is 0.} }} \item{sgd.control}{an optional list of parameters for controlling the estimation. \describe{ \item{\code{method}}{character specifying the method to be used: \code{"sgd"}, \code{"implicit"}, \code{"asgd"}, \code{"ai-sgd"}, \code{"momentum"}, \code{"nesterov"}. Default is \code{"ai-sgd"}. See \sQuote{Details}.} \item{\code{lr}}{character specifying the learning rate to be used: \code{"one-dim"}, \code{"one-dim-eigen"}, \code{"d-dim"}, \code{"adagrad"}, \code{"rmsprop"}. Default is \code{"one-dim"}. See \sQuote{Details}.} \item{\code{lr.control}}{vector of scalar hyperparameters one can set dependent on the learning rate. For hyperparameters aimed to be left as default, specify \code{NA} in the corresponding entries. See \sQuote{Details}.} \item{\code{start}}{starting values for the parameter estimates. Default is random initialization around zero.} \item{\code{size}}{number of SGD estimates to store for diagnostic purposes (distributed log-uniformly over total number of iterations)} \item{\code{reltol}}{relative convergence tolerance. The algorithm stops if it is unable to change the relative mean squared difference in the parameters by more than the amount. Default is \code{1e-05}.} \item{\code{npasses}}{the maximum number of passes over the data. Default is 3.} \item{\code{pass}}{logical. Should \code{tol} be ignored and run the algorithm for all of \code{npasses}?} \item{\code{shuffle}}{logical. Should the algorithm shuffle the data set including for each pass?} \item{\code{verbose}}{logical. Should the algorithm print progress?} }} \item{upsample}{Logical: If TRUE, upsample cases to balance outcome classes (for Classification only) Caution: upsample will randomly sample with replacement if the length of the majority class is more than double the length of the class you are upsampling, thereby introducing randomness} \item{resample.seed}{Integer: If provided, will be used to set the seed during upsampling. Default = NULL (random seed)} \item{print.plot}{Logical: if TRUE, produce plot using \code{mplot3} Takes precedence over \code{plot.fitted} and \code{plot.predicted}. Default = TRUE} \item{plot.fitted}{Logical: if TRUE, plot True (y) vs Fitted} \item{plot.predicted}{Logical: if TRUE, plot True (y.test) vs Predicted. Requires \code{x.test} and \code{y.test}} \item{plot.theme}{Character: "zero", "dark", "box", "darkbox"} \item{question}{Character: the question you are attempting to answer with this model, in plain language.} \item{verbose}{Logical: If TRUE, print summary to screen.} \item{outdir}{Path to output directory. If defined, will save Predicted vs. True plot, if available, as well as full model output, if \code{save.mod} is TRUE} \item{save.mod}{Logical: If TRUE, save all output to an RDS file in \code{outdir} \code{save.mod} is TRUE by default if an \code{outdir} is defined. If set to TRUE, and no \code{outdir} is defined, outdir defaults to \code{paste0("./s.", mod.name)}} \item{...}{Additional arguments to be passed to \code{sgd.control}} } \value{ Object of class \pkg{rtemis} } \description{ Train a model by Stochastic Gradient Descent using \code{sgd::sgd} } \details{ From \code{sgd::sgd}: "Models: The Cox model assumes that the survival data is ordered when passed in, i.e., such that the risk set of an observation i is all data points after it." } \seealso{ \link{elevate} for external cross-validation Other Supervised Learning: \code{\link{s.ADABOOST}()}, \code{\link{s.ADDTREE}()}, \code{\link{s.BART}()}, \code{\link{s.BAYESGLM}()}, \code{\link{s.BRUTO}()}, \code{\link{s.C50}()}, \code{\link{s.CART}()}, \code{\link{s.CTREE}()}, \code{\link{s.DA}()}, \code{\link{s.ET}()}, \code{\link{s.EVTREE}()}, \code{\link{s.GAM.default}()}, \code{\link{s.GAM.formula}()}, \code{\link{s.GAMSELX2}()}, \code{\link{s.GAMSELX}()}, \code{\link{s.GAMSEL}()}, \code{\link{s.GAM}()}, \code{\link{s.GBM3}()}, \code{\link{s.GBM}()}, \code{\link{s.GLMNET}()}, \code{\link{s.GLM}()}, \code{\link{s.GLS}()}, \code{\link{s.H2ODL}()}, \code{\link{s.H2OGBM}()}, \code{\link{s.H2ORF}()}, \code{\link{s.IRF}()}, \code{\link{s.KNN}()}, \code{\link{s.LDA}()}, \code{\link{s.LM}()}, \code{\link{s.MARS}()}, \code{\link{s.MLRF}()}, \code{\link{s.NBAYES}()}, \code{\link{s.NLA}()}, \code{\link{s.NLS}()}, \code{\link{s.NW}()}, \code{\link{s.POLYMARS}()}, \code{\link{s.PPR}()}, \code{\link{s.PPTREE}()}, \code{\link{s.QDA}()}, \code{\link{s.QRNN}()}, \code{\link{s.RANGER}()}, \code{\link{s.RFSRC}()}, \code{\link{s.RF}()}, \code{\link{s.SPLS}()}, \code{\link{s.SVM}()}, \code{\link{s.TFN}()}, \code{\link{s.XGBLIN}()}, \code{\link{s.XGB}()} } \author{ Efstathios D. Gennatas } \concept{Supervised Learning}
/man/s.SGD.Rd
no_license
zeta1999/rtemis
R
false
true
7,725
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/s.SGD.R \name{s.SGD} \alias{s.SGD} \title{Stochastic Gradient Descent (SGD) [C, R]} \usage{ s.SGD( x, y = NULL, x.test = NULL, y.test = NULL, x.name = NULL, y.name = NULL, model = NULL, model.control = list(lambda1 = 0, lambda2 = 0), sgd.control = list(method = "ai-sgd"), upsample = FALSE, downsample = FALSE, resample.seed = NULL, print.plot = TRUE, plot.fitted = NULL, plot.predicted = NULL, plot.theme = getOption("rt.fit.theme", "lightgrid"), question = NULL, verbose = TRUE, outdir = NULL, save.mod = ifelse(!is.null(outdir), TRUE, FALSE), ... ) } \arguments{ \item{x}{Numeric vector or matrix / data frame of features i.e. independent variables} \item{y}{Numeric vector of outcome, i.e. dependent variable} \item{x.test}{Numeric vector or matrix / data frame of testing set features Columns must correspond to columns in \code{x}} \item{y.test}{Numeric vector of testing set outcome} \item{x.name}{Character: Name for feature set} \item{y.name}{Character: Name for outcome} \item{model}{character specifying the model to be used: \code{"lm"} (linear model), \code{"glm"} (generalized linear model), \code{"cox"} (Cox proportional hazards model), \code{"gmm"} (generalized method of moments), \code{"m"} (M-estimation). See \sQuote{Details}.} \item{model.control}{a list of parameters for controlling the model. \describe{ \item{\code{family} (\code{"glm"})}{a description of the error distribution and link function to be used in the model. This can be a character string naming a family function, a family function or the result of a call to a family function. (See \code{\link[stats]{family}} for details of family functions.)} \item{\code{rank} (\code{"glm"})}{logical. Should the rank of the design matrix be checked?} \item{\code{fn} (\code{"gmm"})}{a function \eqn{g(\theta,x)} which returns a \eqn{k}-vector corresponding to the \eqn{k} moment conditions. It is a required argument if \code{gr} not specified.} \item{\code{gr} (\code{"gmm"})}{a function to return the gradient. If unspecified, a finite-difference approximation will be used.} \item{\code{nparams} (\code{"gmm"})}{number of model parameters. This is automatically determined for other models.} \item{\code{type} (\code{"gmm"})}{character specifying the generalized method of moments procedure: \code{"twostep"} (Hansen, 1982), \code{"iterative"} (Hansen et al., 1996). Defaults to \code{"iterative"}.} \item{\code{wmatrix} (\code{"gmm"})}{weighting matrix to be used in the loss function. Defaults to the identity matrix.} \item{\code{loss} (\code{"m"})}{character specifying the loss function to be used in the estimating equation. Default is the Huber loss.} \item{\code{lambda1}}{L1 regularization parameter. Default is 0.} \item{\code{lambda2}}{L2 regularization parameter. Default is 0.} }} \item{sgd.control}{an optional list of parameters for controlling the estimation. \describe{ \item{\code{method}}{character specifying the method to be used: \code{"sgd"}, \code{"implicit"}, \code{"asgd"}, \code{"ai-sgd"}, \code{"momentum"}, \code{"nesterov"}. Default is \code{"ai-sgd"}. See \sQuote{Details}.} \item{\code{lr}}{character specifying the learning rate to be used: \code{"one-dim"}, \code{"one-dim-eigen"}, \code{"d-dim"}, \code{"adagrad"}, \code{"rmsprop"}. Default is \code{"one-dim"}. See \sQuote{Details}.} \item{\code{lr.control}}{vector of scalar hyperparameters one can set dependent on the learning rate. For hyperparameters aimed to be left as default, specify \code{NA} in the corresponding entries. See \sQuote{Details}.} \item{\code{start}}{starting values for the parameter estimates. Default is random initialization around zero.} \item{\code{size}}{number of SGD estimates to store for diagnostic purposes (distributed log-uniformly over total number of iterations)} \item{\code{reltol}}{relative convergence tolerance. The algorithm stops if it is unable to change the relative mean squared difference in the parameters by more than the amount. Default is \code{1e-05}.} \item{\code{npasses}}{the maximum number of passes over the data. Default is 3.} \item{\code{pass}}{logical. Should \code{tol} be ignored and run the algorithm for all of \code{npasses}?} \item{\code{shuffle}}{logical. Should the algorithm shuffle the data set including for each pass?} \item{\code{verbose}}{logical. Should the algorithm print progress?} }} \item{upsample}{Logical: If TRUE, upsample cases to balance outcome classes (for Classification only) Caution: upsample will randomly sample with replacement if the length of the majority class is more than double the length of the class you are upsampling, thereby introducing randomness} \item{resample.seed}{Integer: If provided, will be used to set the seed during upsampling. Default = NULL (random seed)} \item{print.plot}{Logical: if TRUE, produce plot using \code{mplot3} Takes precedence over \code{plot.fitted} and \code{plot.predicted}. Default = TRUE} \item{plot.fitted}{Logical: if TRUE, plot True (y) vs Fitted} \item{plot.predicted}{Logical: if TRUE, plot True (y.test) vs Predicted. Requires \code{x.test} and \code{y.test}} \item{plot.theme}{Character: "zero", "dark", "box", "darkbox"} \item{question}{Character: the question you are attempting to answer with this model, in plain language.} \item{verbose}{Logical: If TRUE, print summary to screen.} \item{outdir}{Path to output directory. If defined, will save Predicted vs. True plot, if available, as well as full model output, if \code{save.mod} is TRUE} \item{save.mod}{Logical: If TRUE, save all output to an RDS file in \code{outdir} \code{save.mod} is TRUE by default if an \code{outdir} is defined. If set to TRUE, and no \code{outdir} is defined, outdir defaults to \code{paste0("./s.", mod.name)}} \item{...}{Additional arguments to be passed to \code{sgd.control}} } \value{ Object of class \pkg{rtemis} } \description{ Train a model by Stochastic Gradient Descent using \code{sgd::sgd} } \details{ From \code{sgd::sgd}: "Models: The Cox model assumes that the survival data is ordered when passed in, i.e., such that the risk set of an observation i is all data points after it." } \seealso{ \link{elevate} for external cross-validation Other Supervised Learning: \code{\link{s.ADABOOST}()}, \code{\link{s.ADDTREE}()}, \code{\link{s.BART}()}, \code{\link{s.BAYESGLM}()}, \code{\link{s.BRUTO}()}, \code{\link{s.C50}()}, \code{\link{s.CART}()}, \code{\link{s.CTREE}()}, \code{\link{s.DA}()}, \code{\link{s.ET}()}, \code{\link{s.EVTREE}()}, \code{\link{s.GAM.default}()}, \code{\link{s.GAM.formula}()}, \code{\link{s.GAMSELX2}()}, \code{\link{s.GAMSELX}()}, \code{\link{s.GAMSEL}()}, \code{\link{s.GAM}()}, \code{\link{s.GBM3}()}, \code{\link{s.GBM}()}, \code{\link{s.GLMNET}()}, \code{\link{s.GLM}()}, \code{\link{s.GLS}()}, \code{\link{s.H2ODL}()}, \code{\link{s.H2OGBM}()}, \code{\link{s.H2ORF}()}, \code{\link{s.IRF}()}, \code{\link{s.KNN}()}, \code{\link{s.LDA}()}, \code{\link{s.LM}()}, \code{\link{s.MARS}()}, \code{\link{s.MLRF}()}, \code{\link{s.NBAYES}()}, \code{\link{s.NLA}()}, \code{\link{s.NLS}()}, \code{\link{s.NW}()}, \code{\link{s.POLYMARS}()}, \code{\link{s.PPR}()}, \code{\link{s.PPTREE}()}, \code{\link{s.QDA}()}, \code{\link{s.QRNN}()}, \code{\link{s.RANGER}()}, \code{\link{s.RFSRC}()}, \code{\link{s.RF}()}, \code{\link{s.SPLS}()}, \code{\link{s.SVM}()}, \code{\link{s.TFN}()}, \code{\link{s.XGBLIN}()}, \code{\link{s.XGB}()} } \author{ Efstathios D. Gennatas } \concept{Supervised Learning}
#' Prepare object for argument \code{design} of \code{spsurvey.analysis()} #' #' This function returns an object to feed the argument \code{design} when #' creating an object of class \code{spsurvey.analysis}. #' #' The argument \code{design} used to create object of class #' \code{spsurvey.analysis} requires a series of inputs. However, it can be fed #' with data about site ID and coordinates. \code{coordenadas()} returns a data #' frame that provides this information, assuming that all other design #' variables are provided manually in the arguments list. #' #' @param x Object of class \code{SpatialPointsDataFrame} from #' which site ID and XY coordinates are to be returned. #' @return An object of class \code{data.frame} containing three columns with #' names \code{siteID}, \code{xcoord}, and \code{ycoord}. #' @author Alessandro Samuel-Rosa \email{alessandrosamuelrosa@@gmail.com} #' @seealso \code{\link[pedometrics]{gcpDiff}}, #' \code{\link[spsurvey]{cont.analysis}}. #' @references Kincaid, T. M. and Olsen, A. R. (2013). spsurvey: Spatial Survey #' Design and Analysis. R package version 2.6. URL: #' <\url{http://www.epa.gov/nheerl/arm/}>. #' @keywords methods #' @export #' @examples #' #' \dontrun{ #' ## Create an spsurvey.analysis object #' my.spsurvey <- #' spsurvey.analysis(design = coordenadas(my.data), #' data.cont = delta(ref.data, my.data), #' popcorrect = TRUE, pcfsize = length(my.data$id), #' support = rep(1, length(my.data$id)), #' wgt = rep(1, length(my.data$id)), vartype = "SRS") #' } #' # FUNCTION ##################################################################### coordenadas <- function(x) { # Check if suggested packages are installed pkg <- c("sp") id <- !sapply(pkg, requireNamespace, quietly = TRUE) if (any(id)) { pkg <- paste(pkg[which(id)], collapse = " ") stop(paste("Package(s) needed for this function to work but not", "installed: ", pkg, sep = ""), call. = FALSE) } coo <- data.frame(x$siteID, sp::coordinates(x)) coo <- coo[order(as.numeric(x$siteID)), ] colnames(coo) <- c("siteID", "xcoord", "ycoord") row.names(coo) <- NULL return(coo) }
/pedometrics/R/coordenadas.R
no_license
ingted/R-Examples
R
false
false
2,272
r
#' Prepare object for argument \code{design} of \code{spsurvey.analysis()} #' #' This function returns an object to feed the argument \code{design} when #' creating an object of class \code{spsurvey.analysis}. #' #' The argument \code{design} used to create object of class #' \code{spsurvey.analysis} requires a series of inputs. However, it can be fed #' with data about site ID and coordinates. \code{coordenadas()} returns a data #' frame that provides this information, assuming that all other design #' variables are provided manually in the arguments list. #' #' @param x Object of class \code{SpatialPointsDataFrame} from #' which site ID and XY coordinates are to be returned. #' @return An object of class \code{data.frame} containing three columns with #' names \code{siteID}, \code{xcoord}, and \code{ycoord}. #' @author Alessandro Samuel-Rosa \email{alessandrosamuelrosa@@gmail.com} #' @seealso \code{\link[pedometrics]{gcpDiff}}, #' \code{\link[spsurvey]{cont.analysis}}. #' @references Kincaid, T. M. and Olsen, A. R. (2013). spsurvey: Spatial Survey #' Design and Analysis. R package version 2.6. URL: #' <\url{http://www.epa.gov/nheerl/arm/}>. #' @keywords methods #' @export #' @examples #' #' \dontrun{ #' ## Create an spsurvey.analysis object #' my.spsurvey <- #' spsurvey.analysis(design = coordenadas(my.data), #' data.cont = delta(ref.data, my.data), #' popcorrect = TRUE, pcfsize = length(my.data$id), #' support = rep(1, length(my.data$id)), #' wgt = rep(1, length(my.data$id)), vartype = "SRS") #' } #' # FUNCTION ##################################################################### coordenadas <- function(x) { # Check if suggested packages are installed pkg <- c("sp") id <- !sapply(pkg, requireNamespace, quietly = TRUE) if (any(id)) { pkg <- paste(pkg[which(id)], collapse = " ") stop(paste("Package(s) needed for this function to work but not", "installed: ", pkg, sep = ""), call. = FALSE) } coo <- data.frame(x$siteID, sp::coordinates(x)) coo <- coo[order(as.numeric(x$siteID)), ] colnames(coo) <- c("siteID", "xcoord", "ycoord") row.names(coo) <- NULL return(coo) }
### discreteRoot.R --- ##---------------------------------------------------------------------- ## Author: Brice Ozenne ## Created: nov 22 2017 (13:39) ## Version: ## Last-Updated: Feb 10 2023 (09:19) ## By: Thomas Alexander Gerds ## Update #: 250 ##---------------------------------------------------------------------- ## ### Commentary: ## ### Change Log: ##---------------------------------------------------------------------- ## ### Code: ## * discreteRoot - Documentation #' @title Dichotomic search for monotone function #' @description Find the root of a monotone function on a discrete grid of value using dichotomic search #' @name discreteRoot #' #' @param fn [function] objective function to minimize in absolute value. #' @param grid [vector] possible minimizers. #' @param increasing [logical] is the function fn increasing? #' @param check [logical] should the program check that fn takes a different sign for the first vs. the last value of the grid? #' @param tol [numeric] the absolute convergence tolerance. ## * discreteRoot #' @rdname dicreteRoot #' @export discreteRoot <- function(fn, grid, increasing = TRUE, check = TRUE, tol = .Machine$double.eps ^ 0.5) { n.grid <- length(grid) value.grid <- rep(NA, n.grid) iter <- 1 ncv <- TRUE iSet <- 1:n.grid factor <- c(-1,1)[increasing+1] ### ** Check if(check){ value.grid[1] <- fn(grid[1]) value.grid[n.grid] <- fn(grid[n.grid]) if(sign(value.grid[1])==value.grid[n.grid]){ return(list(par = NA, value = NA, counts = 0, cv = 1, message = "Cannot find a solution because the function does not change sign \n")) } if(increasing[[1]] && value.grid[[1]] > value.grid[[n.grid]]){ return(list(par = NA, value = NA, counts = 0, cv = 1, message = "Cannot find a solution - argument \'increasing\' does not match the variations of the functions \n")) } if(!increasing[[1]] && value.grid[[1]] < value.grid[[n.grid]]){ return(list(par = NA, value = NA, counts = 0, cv = 1, message = "Cannot find a solution - argument \'increasing\' does not match the variations of the functions \n")) } } ### ** Expore the grid using dichotomic search while(iter[[1]] <= n.grid[[1]] && ncv[[1]]==TRUE && length(iSet)>0){ iMiddle <- ceiling(length(iSet)/2) iIndexInSet <- iSet[iMiddle] if(check[[1]]==FALSE || iIndexInSet %in% c(1,n.grid) == FALSE){ ## if the current index we are looking at has not already been computed, ## then evaluate the objective function. ## this is only the case when check is TRUE and we look at the borders value.grid[iIndexInSet] <- fn(grid[iIndexInSet]) } if(is.na(value.grid[iIndexInSet])){ ## handle NA value by just removing the observation from the set of possibilities iSet <- setdiff(iSet,iMiddle) iter <- iter + 1 }else if(factor*value.grid[iIndexInSet] > tol){ ## look in subgrid corresponding to the lowest values (left part) iSet <- iSet[setdiff(1:iMiddle,iMiddle)] iter <- iter + 1 }else if(factor*value.grid[iIndexInSet] < -tol){ ## look in subgrid corresponding to the largest values (right part) iN.set <- length(iSet) iSet <- iSet[setdiff(iMiddle:iN.set,iMiddle)] iter <- iter + 1 }else{ ## convergence ncv <- FALSE solution <- grid[iIndexInSet] value <- value.grid[iIndexInSet] } } ### ** If did not find a value whose image matched tol, give the closest solution if(ncv){ iIndexInSet <- which.min(abs(value.grid)) ncv <- FALSE solution <- grid[iIndexInSet] value <- value.grid[iIndexInSet] } return(list(par = solution, value = value, ## grid = setNames(value.grid,grid), counts = iter, cv = ncv, message = NULL)) } ## * boot2pvalue - Documentation #' @title Compute the p.value from the distribution under H1 #' @description Compute the p.value associated with the estimated statistic #' using a bootstrap sample of its distribution under H1. #' #' @param x [numeric vector] a vector of bootstrap estimates of the statistic. #' @param null [numeric] value of the statistic under the null hypothesis. #' @param estimate [numeric] the estimated statistic. #' @param FUN.ci [function] the function used to compute the confidence interval. #' Must take \code{x}, \code{alternative}, \code{conf.level} and \code{sign.estimate} as arguments #' and only return the relevant limit (either upper or lower) of the confidence interval. #' @param alternative [character] a character string specifying the alternative hypothesis, must be one of "two.sided" (default), "greater" or "less". #' @param tol [numeric] the absolute convergence tolerance. #' @details #' For test statistic close to 0, this function returns 1. \cr \cr #' #' For positive test statistic, this function search the quantile alpha such that: #'\itemize{ #' \item \code{quantile(x, probs = alpha)=0} when the argument alternative is set to \code{"greater"}. #' \item \code{quantile(x, probs = 0.5*alpha)=0} when the argument alternative is set to \code{"two.sided"}. #' } #' If the argument alternative is set to \code{"less"}, it returns 1. \cr \cr #' #' For negative test statistic, this function search the quantile alpha such that: #' \itemize{ #' \item \code{quantile(x, probs = 1-alpha=0} when the argument alternative is set to \code{"less"}. #' \item \code{quantile(x, probs = 1-0.5*alpha=0} when the argument alternative is set to \code{"two.sided"}. #' } #' If the argument alternative is set to \code{"greater"}, it returns 1. #' #' @examples #' set.seed(10) #' #' #### no effect #### #' x <- rnorm(1e3) #' boot2pvalue(x, null = 0, estimate = mean(x), alternative = "two.sided") #' ## expected value of 1 #' boot2pvalue(x, null = 0, estimate = mean(x), alternative = "greater") #' ## expected value of 0.5 #' boot2pvalue(x, null = 0, estimate = mean(x), alternative = "less") #' ## expected value of 0.5 #' #' #### positive effect #### #' x <- rnorm(1e3, mean = 1) #' boot2pvalue(x, null = 0, estimate = 1, alternative = "two.sided") #' ## expected value of 0.32 = 2*pnorm(q = 0, mean = -1) = 2*mean(x<=0) #' boot2pvalue(x, null = 0, estimate = 1, alternative = "greater") #' ## expected value of 0.16 = pnorm(q = 0, mean = 1) = mean(x<=0) #' boot2pvalue(x, null = 0, estimate = 1, alternative = "less") #' ## expected value of 0.84 = 1-pnorm(q = 0, mean = 1) = mean(x>=0) #' #' #### negative effect #### #' x <- rnorm(1e3, mean = -1) #' boot2pvalue(x, null = 0, estimate = -1, alternative = "two.sided") #' ## expected value of 0.32 = 2*(1-pnorm(q = 0, mean = -1)) = 2*mean(x>=0) #' boot2pvalue(x, null = 0, estimate = -1, alternative = "greater") #' ## expected value of 0.84 = pnorm(q = 0, mean = -1) = mean(x<=0) #' boot2pvalue(x, null = 0, estimate = -1, alternative = "less") # pnorm(q = 0, mean = -1) #' ## expected value of 0.16 = 1-pnorm(q = 0, mean = -1) = mean(x>=0) ## * boot2pvalue #' @rdname boot2pvalue #' @export boot2pvalue <- function(x, null, estimate = NULL, alternative = "two.sided", FUN.ci = quantileCI, tol = .Machine$double.eps ^ 0.5){ x.boot <- na.omit(x) n.boot <- length(x.boot) statistic.boot <- mean(x.boot) - null if(is.null(estimate)){ statistic <- statistic.boot }else{ statistic <- estimate - null if(sign(statistic.boot)!=sign(statistic)){ warning("the estimate and the average bootstrap estimate do not have same sign \n") } } sign.statistic <- statistic>=0 if(abs(statistic) < tol){ ## too small test statistic p.value <- 1 }else if(n.boot < 10){ ## too few bootstrap samples p.value <- as.numeric(NA) }else if(all(x.boot>null)){ ## clear p.value p.value <- switch(alternative, "two.sided" = 0, "less" = 1, "greater" = 0) } else if(all(x.boot<null)){ ## clear p.value p.value <- switch(alternative, "two.sided" = 0, "less" = 0, "greater" = 1) }else{ ## need search to obtain p.value ## when the p.value=1-coverage increases, does the quantile increases? increasing <- switch(alternative, "two.sided" = sign.statistic, "less" = FALSE, "greater" = TRUE) ## grid of confidence level grid <- seq(0,by=1/n.boot,length.out=n.boot) ## search for critical confidence level resSearch <- discreteRoot(fn = function(p.value){ CI <- FUN.ci(x = x.boot, p.value = p.value, alternative = alternative, sign.estimate = sign.statistic) return(CI[1]-null) }, grid = grid, increasing = increasing, check = FALSE) ## check change sign sign.before <- sign(FUN.ci(x = x.boot, p.value = max(0,resSearch$par-1/n.boot), alternative = alternative, sign.estimate = sign.statistic)-null) sign.after <- sign(FUN.ci(x = x.boot, p.value = min(1,resSearch$par+1/n.boot), alternative = alternative, sign.estimate = sign.statistic)-null) ## if (is.na(resSearch$value[[1]]) || is.na(sign.before[[1]])|| is.na(sign.after[[1]]) || length(resSearch$value)==0 || resSearch$par[[1]]<0 || resSearch$par[[1]]>1 || sign.before[[1]]==sign.after[[1]]){ warning("incorrect convergence of the algorithm finding the critical quantile \n", "p-value may not be reliable \n") } p.value <- resSearch$par } if(p.value %in% c(0,1)){ message("Estimated p-value of ",p.value," - consider increasing the number of bootstrap samples \n") } return(p.value) } ## * quantileCI quantileCI <- function(x, alternative, p.value, sign.estimate, ...){ probs <- switch(alternative, "two.sided" = c(p.value/2,1-p.value/2)[2-sign.estimate], ## if positive p.value/2 otherwise 1-p.value/2 "less" = 1-p.value, "greater" = p.value) return(quantile(x, probs = probs)[1]) } ##---------------------------------------------------------------------- ### discreteRoot.R ends here
/R/discreteRoot.R
no_license
tagteam/riskRegression
R
false
false
11,254
r
### discreteRoot.R --- ##---------------------------------------------------------------------- ## Author: Brice Ozenne ## Created: nov 22 2017 (13:39) ## Version: ## Last-Updated: Feb 10 2023 (09:19) ## By: Thomas Alexander Gerds ## Update #: 250 ##---------------------------------------------------------------------- ## ### Commentary: ## ### Change Log: ##---------------------------------------------------------------------- ## ### Code: ## * discreteRoot - Documentation #' @title Dichotomic search for monotone function #' @description Find the root of a monotone function on a discrete grid of value using dichotomic search #' @name discreteRoot #' #' @param fn [function] objective function to minimize in absolute value. #' @param grid [vector] possible minimizers. #' @param increasing [logical] is the function fn increasing? #' @param check [logical] should the program check that fn takes a different sign for the first vs. the last value of the grid? #' @param tol [numeric] the absolute convergence tolerance. ## * discreteRoot #' @rdname dicreteRoot #' @export discreteRoot <- function(fn, grid, increasing = TRUE, check = TRUE, tol = .Machine$double.eps ^ 0.5) { n.grid <- length(grid) value.grid <- rep(NA, n.grid) iter <- 1 ncv <- TRUE iSet <- 1:n.grid factor <- c(-1,1)[increasing+1] ### ** Check if(check){ value.grid[1] <- fn(grid[1]) value.grid[n.grid] <- fn(grid[n.grid]) if(sign(value.grid[1])==value.grid[n.grid]){ return(list(par = NA, value = NA, counts = 0, cv = 1, message = "Cannot find a solution because the function does not change sign \n")) } if(increasing[[1]] && value.grid[[1]] > value.grid[[n.grid]]){ return(list(par = NA, value = NA, counts = 0, cv = 1, message = "Cannot find a solution - argument \'increasing\' does not match the variations of the functions \n")) } if(!increasing[[1]] && value.grid[[1]] < value.grid[[n.grid]]){ return(list(par = NA, value = NA, counts = 0, cv = 1, message = "Cannot find a solution - argument \'increasing\' does not match the variations of the functions \n")) } } ### ** Expore the grid using dichotomic search while(iter[[1]] <= n.grid[[1]] && ncv[[1]]==TRUE && length(iSet)>0){ iMiddle <- ceiling(length(iSet)/2) iIndexInSet <- iSet[iMiddle] if(check[[1]]==FALSE || iIndexInSet %in% c(1,n.grid) == FALSE){ ## if the current index we are looking at has not already been computed, ## then evaluate the objective function. ## this is only the case when check is TRUE and we look at the borders value.grid[iIndexInSet] <- fn(grid[iIndexInSet]) } if(is.na(value.grid[iIndexInSet])){ ## handle NA value by just removing the observation from the set of possibilities iSet <- setdiff(iSet,iMiddle) iter <- iter + 1 }else if(factor*value.grid[iIndexInSet] > tol){ ## look in subgrid corresponding to the lowest values (left part) iSet <- iSet[setdiff(1:iMiddle,iMiddle)] iter <- iter + 1 }else if(factor*value.grid[iIndexInSet] < -tol){ ## look in subgrid corresponding to the largest values (right part) iN.set <- length(iSet) iSet <- iSet[setdiff(iMiddle:iN.set,iMiddle)] iter <- iter + 1 }else{ ## convergence ncv <- FALSE solution <- grid[iIndexInSet] value <- value.grid[iIndexInSet] } } ### ** If did not find a value whose image matched tol, give the closest solution if(ncv){ iIndexInSet <- which.min(abs(value.grid)) ncv <- FALSE solution <- grid[iIndexInSet] value <- value.grid[iIndexInSet] } return(list(par = solution, value = value, ## grid = setNames(value.grid,grid), counts = iter, cv = ncv, message = NULL)) } ## * boot2pvalue - Documentation #' @title Compute the p.value from the distribution under H1 #' @description Compute the p.value associated with the estimated statistic #' using a bootstrap sample of its distribution under H1. #' #' @param x [numeric vector] a vector of bootstrap estimates of the statistic. #' @param null [numeric] value of the statistic under the null hypothesis. #' @param estimate [numeric] the estimated statistic. #' @param FUN.ci [function] the function used to compute the confidence interval. #' Must take \code{x}, \code{alternative}, \code{conf.level} and \code{sign.estimate} as arguments #' and only return the relevant limit (either upper or lower) of the confidence interval. #' @param alternative [character] a character string specifying the alternative hypothesis, must be one of "two.sided" (default), "greater" or "less". #' @param tol [numeric] the absolute convergence tolerance. #' @details #' For test statistic close to 0, this function returns 1. \cr \cr #' #' For positive test statistic, this function search the quantile alpha such that: #'\itemize{ #' \item \code{quantile(x, probs = alpha)=0} when the argument alternative is set to \code{"greater"}. #' \item \code{quantile(x, probs = 0.5*alpha)=0} when the argument alternative is set to \code{"two.sided"}. #' } #' If the argument alternative is set to \code{"less"}, it returns 1. \cr \cr #' #' For negative test statistic, this function search the quantile alpha such that: #' \itemize{ #' \item \code{quantile(x, probs = 1-alpha=0} when the argument alternative is set to \code{"less"}. #' \item \code{quantile(x, probs = 1-0.5*alpha=0} when the argument alternative is set to \code{"two.sided"}. #' } #' If the argument alternative is set to \code{"greater"}, it returns 1. #' #' @examples #' set.seed(10) #' #' #### no effect #### #' x <- rnorm(1e3) #' boot2pvalue(x, null = 0, estimate = mean(x), alternative = "two.sided") #' ## expected value of 1 #' boot2pvalue(x, null = 0, estimate = mean(x), alternative = "greater") #' ## expected value of 0.5 #' boot2pvalue(x, null = 0, estimate = mean(x), alternative = "less") #' ## expected value of 0.5 #' #' #### positive effect #### #' x <- rnorm(1e3, mean = 1) #' boot2pvalue(x, null = 0, estimate = 1, alternative = "two.sided") #' ## expected value of 0.32 = 2*pnorm(q = 0, mean = -1) = 2*mean(x<=0) #' boot2pvalue(x, null = 0, estimate = 1, alternative = "greater") #' ## expected value of 0.16 = pnorm(q = 0, mean = 1) = mean(x<=0) #' boot2pvalue(x, null = 0, estimate = 1, alternative = "less") #' ## expected value of 0.84 = 1-pnorm(q = 0, mean = 1) = mean(x>=0) #' #' #### negative effect #### #' x <- rnorm(1e3, mean = -1) #' boot2pvalue(x, null = 0, estimate = -1, alternative = "two.sided") #' ## expected value of 0.32 = 2*(1-pnorm(q = 0, mean = -1)) = 2*mean(x>=0) #' boot2pvalue(x, null = 0, estimate = -1, alternative = "greater") #' ## expected value of 0.84 = pnorm(q = 0, mean = -1) = mean(x<=0) #' boot2pvalue(x, null = 0, estimate = -1, alternative = "less") # pnorm(q = 0, mean = -1) #' ## expected value of 0.16 = 1-pnorm(q = 0, mean = -1) = mean(x>=0) ## * boot2pvalue #' @rdname boot2pvalue #' @export boot2pvalue <- function(x, null, estimate = NULL, alternative = "two.sided", FUN.ci = quantileCI, tol = .Machine$double.eps ^ 0.5){ x.boot <- na.omit(x) n.boot <- length(x.boot) statistic.boot <- mean(x.boot) - null if(is.null(estimate)){ statistic <- statistic.boot }else{ statistic <- estimate - null if(sign(statistic.boot)!=sign(statistic)){ warning("the estimate and the average bootstrap estimate do not have same sign \n") } } sign.statistic <- statistic>=0 if(abs(statistic) < tol){ ## too small test statistic p.value <- 1 }else if(n.boot < 10){ ## too few bootstrap samples p.value <- as.numeric(NA) }else if(all(x.boot>null)){ ## clear p.value p.value <- switch(alternative, "two.sided" = 0, "less" = 1, "greater" = 0) } else if(all(x.boot<null)){ ## clear p.value p.value <- switch(alternative, "two.sided" = 0, "less" = 0, "greater" = 1) }else{ ## need search to obtain p.value ## when the p.value=1-coverage increases, does the quantile increases? increasing <- switch(alternative, "two.sided" = sign.statistic, "less" = FALSE, "greater" = TRUE) ## grid of confidence level grid <- seq(0,by=1/n.boot,length.out=n.boot) ## search for critical confidence level resSearch <- discreteRoot(fn = function(p.value){ CI <- FUN.ci(x = x.boot, p.value = p.value, alternative = alternative, sign.estimate = sign.statistic) return(CI[1]-null) }, grid = grid, increasing = increasing, check = FALSE) ## check change sign sign.before <- sign(FUN.ci(x = x.boot, p.value = max(0,resSearch$par-1/n.boot), alternative = alternative, sign.estimate = sign.statistic)-null) sign.after <- sign(FUN.ci(x = x.boot, p.value = min(1,resSearch$par+1/n.boot), alternative = alternative, sign.estimate = sign.statistic)-null) ## if (is.na(resSearch$value[[1]]) || is.na(sign.before[[1]])|| is.na(sign.after[[1]]) || length(resSearch$value)==0 || resSearch$par[[1]]<0 || resSearch$par[[1]]>1 || sign.before[[1]]==sign.after[[1]]){ warning("incorrect convergence of the algorithm finding the critical quantile \n", "p-value may not be reliable \n") } p.value <- resSearch$par } if(p.value %in% c(0,1)){ message("Estimated p-value of ",p.value," - consider increasing the number of bootstrap samples \n") } return(p.value) } ## * quantileCI quantileCI <- function(x, alternative, p.value, sign.estimate, ...){ probs <- switch(alternative, "two.sided" = c(p.value/2,1-p.value/2)[2-sign.estimate], ## if positive p.value/2 otherwise 1-p.value/2 "less" = 1-p.value, "greater" = p.value) return(quantile(x, probs = probs)[1]) } ##---------------------------------------------------------------------- ### discreteRoot.R ends here
\name{get.model} \alias{get.model} \title{Get Model} \description{ Get the model for a node. } \usage{ get.model(domain, node, model.nodes) } \arguments{ \item{domain}{an RHugin domain.} \item{node}{a character string specifying the name of a node in \code{domain}.} \item{model.nodes}{an optional character vector of \emph{model nodes}. If provided, an empty model suitable for \code{node} is returned. Use \code{model.nodes = character(0)} to create an empty model containing a single expression.} } \details{ This function has two uses. The first is simply to retrieve the model from a node (an error is generated if the node does not have a model). In this case, the \code{model.nodes} argument must be omitted. The second use is to create a template for a node's model. In this case, the \code{model.nodes} argument must be provided. } \value{ a \code{data.frame}-like object with class \code{RHugin.model}. There is one column for each model node and a final column named \code{Expression} containing the expression for each configuration of the model nodes. When there are no model nodes, the \code{Expression} column will have a single entry containing the model's expression. } \references{ HUGIN API Reference Manual \url{http://download.hugin.com/webdocs/manuals/api-manual.pdf}: \code{h_node_new_model} and \code{h_node_get_model}. } \author{Kjell Konis \email{kjell.konis@icloud.com}} \examples{ # Create an RHugin domain hd <- hugin.domain() # Add node add.node(hd, "Node", states = 0:10) # Generate a template for Node's model model <- get.model(hd, "Node", character(0)) # Enter an expression can call set.model model$Expression <- "Poisson (2.25)" set.model(hd, "Node", model) # Retrieve the model from Note get.model(hd, "Node") } \keyword{programming}
/man/get.model.Rd
no_license
huginexpert/RHugin
R
false
false
1,799
rd
\name{get.model} \alias{get.model} \title{Get Model} \description{ Get the model for a node. } \usage{ get.model(domain, node, model.nodes) } \arguments{ \item{domain}{an RHugin domain.} \item{node}{a character string specifying the name of a node in \code{domain}.} \item{model.nodes}{an optional character vector of \emph{model nodes}. If provided, an empty model suitable for \code{node} is returned. Use \code{model.nodes = character(0)} to create an empty model containing a single expression.} } \details{ This function has two uses. The first is simply to retrieve the model from a node (an error is generated if the node does not have a model). In this case, the \code{model.nodes} argument must be omitted. The second use is to create a template for a node's model. In this case, the \code{model.nodes} argument must be provided. } \value{ a \code{data.frame}-like object with class \code{RHugin.model}. There is one column for each model node and a final column named \code{Expression} containing the expression for each configuration of the model nodes. When there are no model nodes, the \code{Expression} column will have a single entry containing the model's expression. } \references{ HUGIN API Reference Manual \url{http://download.hugin.com/webdocs/manuals/api-manual.pdf}: \code{h_node_new_model} and \code{h_node_get_model}. } \author{Kjell Konis \email{kjell.konis@icloud.com}} \examples{ # Create an RHugin domain hd <- hugin.domain() # Add node add.node(hd, "Node", states = 0:10) # Generate a template for Node's model model <- get.model(hd, "Node", character(0)) # Enter an expression can call set.model model$Expression <- "Poisson (2.25)" set.model(hd, "Node", model) # Retrieve the model from Note get.model(hd, "Node") } \keyword{programming}
library(dplyr) library(class) library(e1071) library(xgboost) library(magrittr) library(dplyr) library(Matrix) set.seed(123) sample_split = function(data,size,n){ sample_data <- data[sample(1:nrow(data), size*3), ] sample_split <-split(sample_data, rep(1:3, length.out = nrow(sample_data), each = ceiling(nrow(sample_data)/3))) return(sample_split[[n]]) } normalize <- function(x) { return ((x - min(x)) / (max(x) - min(x))) } train_data = read.csv('train_data.csv') test_data = read.csv('test_data.csv') sample_size = c(5000, 10000, 20000) dat_500_1 = sample_split(train_data, sample_size[1], 1) SVM = function(data){ model = svm(as.factor(label) ~ ., data = data, kernel = "linear", scale = T) pred = predict(model, test_data[,-1]) return(pred) } XGB = function(data){ train.label = as.integer(as.factor(data$label))-1 train_matrix = as.matrix(data[,-1]) test.label = as.integer(as.factor(test_data$label))-1 test_matrix = as.matrix(test_data[,-1]) xgb.train = xgb.DMatrix(data=train_matrix,label=train.label) xgb.test = xgb.DMatrix(data=test_matrix,label=test.label) model = xgboost(data = xgb.train, max.depth = 30, eta = 0.001, nthread = 2, nrounds = 2, num_class = length(unique(data$label)), objective = "multi:softprob") pred <- predict(model, newdata = xgb.test,reshape=T) pred = as.data.frame(pred) colnames(pred) = levels(as.factor(test_data$label)) pred$prediction = apply(pred,1,function(x) colnames(pred)[which.max(x)]) pred$label = levels(as.factor(test_data$label))[train.label+1] return(pred$prediction) } models = c(SVM, XGB) ## add your models scoreboard = data.frame() for (k in 1:length(models)) { for (i in 1:length(sample_size)) { for (j in 1:3) { train = sample_split(train_data, sample_size[i], j) start_time <- Sys.time() pred = models[[k]](train) end_time <- Sys.time() sys_time = end_time - start_time Model = paste('Model',k) Data = paste('dat_',sample_size[i],'_',j, sep = '') A = sample_size[i]/60000 B = min(1,sys_time/60) C = sum(test_data$label == pred)/NROW(test_data$label) Points = 0.15 * A + 0.1 * B + 0.75 * C score_row = data.frame(Model, 'Sample Size' = sample_size[i], Data, A, B, C, Points) scoreboard = rbind(scoreboard,score_row) } }} print(scoreboard)
/Midterm Project - Heyden.R
no_license
drd4/Practical-Machine-Learning-Project
R
false
false
2,410
r
library(dplyr) library(class) library(e1071) library(xgboost) library(magrittr) library(dplyr) library(Matrix) set.seed(123) sample_split = function(data,size,n){ sample_data <- data[sample(1:nrow(data), size*3), ] sample_split <-split(sample_data, rep(1:3, length.out = nrow(sample_data), each = ceiling(nrow(sample_data)/3))) return(sample_split[[n]]) } normalize <- function(x) { return ((x - min(x)) / (max(x) - min(x))) } train_data = read.csv('train_data.csv') test_data = read.csv('test_data.csv') sample_size = c(5000, 10000, 20000) dat_500_1 = sample_split(train_data, sample_size[1], 1) SVM = function(data){ model = svm(as.factor(label) ~ ., data = data, kernel = "linear", scale = T) pred = predict(model, test_data[,-1]) return(pred) } XGB = function(data){ train.label = as.integer(as.factor(data$label))-1 train_matrix = as.matrix(data[,-1]) test.label = as.integer(as.factor(test_data$label))-1 test_matrix = as.matrix(test_data[,-1]) xgb.train = xgb.DMatrix(data=train_matrix,label=train.label) xgb.test = xgb.DMatrix(data=test_matrix,label=test.label) model = xgboost(data = xgb.train, max.depth = 30, eta = 0.001, nthread = 2, nrounds = 2, num_class = length(unique(data$label)), objective = "multi:softprob") pred <- predict(model, newdata = xgb.test,reshape=T) pred = as.data.frame(pred) colnames(pred) = levels(as.factor(test_data$label)) pred$prediction = apply(pred,1,function(x) colnames(pred)[which.max(x)]) pred$label = levels(as.factor(test_data$label))[train.label+1] return(pred$prediction) } models = c(SVM, XGB) ## add your models scoreboard = data.frame() for (k in 1:length(models)) { for (i in 1:length(sample_size)) { for (j in 1:3) { train = sample_split(train_data, sample_size[i], j) start_time <- Sys.time() pred = models[[k]](train) end_time <- Sys.time() sys_time = end_time - start_time Model = paste('Model',k) Data = paste('dat_',sample_size[i],'_',j, sep = '') A = sample_size[i]/60000 B = min(1,sys_time/60) C = sum(test_data$label == pred)/NROW(test_data$label) Points = 0.15 * A + 0.1 * B + 0.75 * C score_row = data.frame(Model, 'Sample Size' = sample_size[i], Data, A, B, C, Points) scoreboard = rbind(scoreboard,score_row) } }} print(scoreboard)
testlist <- list(testX = c(191493125665849920, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), trainX = structure(c(1.78844646178735e+212, 1.93075223605916e+156, 121373.193669204, 1.26689771433298e+26, 2.46020195254853e+129, 8.54794497535107e-83, 2.61907806894971e-213, 1.5105425626729e+200, 6.51877713351675e+25, 4.40467528702727e-93, 7.6427933587945, 34208333744.1307, 1.6400690920442e-111, 3.9769673154778e-304, 4.76127371594362e-307, 8.63819952335095e+122, 1.18662128550178e-59, 1128.83285802937, 3.80478583615452e-72, 1.21321365773924e-195, 9.69744674150153e-268, 8.98899319496613e+272, 7.63669788330223e+285, 3.85830749537493e+266, 2.65348875902107e+136, 8.14965241967603e+92, 6.64773495141057e-171, 1.55228780425777e-91, 8.25550184376779e+105, 1.18572662524891e+134, 1.04113208597565e+183, 1.01971211553913e-259, 1.23680594512923e-165, 5.24757023065221e+62, 3.41816623041351e-96 ), .Dim = c(5L, 7L))) result <- do.call(dann:::calc_distance_C,testlist) str(result)
/dann/inst/testfiles/calc_distance_C/AFL_calc_distance_C/calc_distance_C_valgrind_files/1609867321-test.R
no_license
akhikolla/updated-only-Issues
R
false
false
1,199
r
testlist <- list(testX = c(191493125665849920, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), trainX = structure(c(1.78844646178735e+212, 1.93075223605916e+156, 121373.193669204, 1.26689771433298e+26, 2.46020195254853e+129, 8.54794497535107e-83, 2.61907806894971e-213, 1.5105425626729e+200, 6.51877713351675e+25, 4.40467528702727e-93, 7.6427933587945, 34208333744.1307, 1.6400690920442e-111, 3.9769673154778e-304, 4.76127371594362e-307, 8.63819952335095e+122, 1.18662128550178e-59, 1128.83285802937, 3.80478583615452e-72, 1.21321365773924e-195, 9.69744674150153e-268, 8.98899319496613e+272, 7.63669788330223e+285, 3.85830749537493e+266, 2.65348875902107e+136, 8.14965241967603e+92, 6.64773495141057e-171, 1.55228780425777e-91, 8.25550184376779e+105, 1.18572662524891e+134, 1.04113208597565e+183, 1.01971211553913e-259, 1.23680594512923e-165, 5.24757023065221e+62, 3.41816623041351e-96 ), .Dim = c(5L, 7L))) result <- do.call(dann:::calc_distance_C,testlist) str(result)
ui<- fluidPage( tags$head( tags$style( ".h2,h2 { font-size: 50px; font-weight: bold; font-family: Microsoft JhengHei; color: white; position: relative; top: 15px;};"), tags$style( "#currentTime{ color: white; font-size: 80px; font-weight: bold; font-family: Century Gothic; position: relative; top: 120px; };"), tags$style( "#text1{ color: white; font-size: 22px; font-weight: bold; font-family: Microsoft JhengHei; position: relative; top: 160px; left: 20px; };"), tags$style( "#text2{ color: white; font-size: 22px; font-weight: bold; font-family: Microsoft JhengHei; position: relative; top: 160px; left: 30px; };"), tags$style( "#text3{ color: white; font-size: 22px; font-weight: bold; font-family: Microsoft JhengHei; position: relative; top: 160px; left: 30px; };"), tags$style( "#projecttable{ position: relative; top: 50px; border: 0px;};"), tags$style( "#donut_plot1{ position: relative; top: -150px; left: -10px};"), tags$style( "#donut_plot2{ position: relative; top: -150px; left: -10px};"), tags$style( "#casemeantime_plot{ position: relative; top: -150px; left: -10px};"), tags$style( "#day30bar{ position: relative; top: -280px; left: -25px};"), tags$style( "#myplot_dataplot{ position: relative; top: 75px};"), tags$style( "#day180_done_area{ position: relative; top: 120px};"), tags$style( "#projectplot{ position: relative; left: -350px};"), tags$style( "#case_upper{ color: white; font-size: 15px; font-weight: bold; font-family: Microsoft JhengHei; position: relative; top: 110px; left: -25px };"), tags$style( "#case_lower{ color: white; font-size: 15px; font-weight: bold; font-family: Microsoft JhengHei; position: relative; top: 270px; left: -25px };") ), titlePanel("2019-01-21 XX部案件Dashboard") , fluidRow( column(2, DTOutput("projecttable"), textOutput("currentTime") ), column(7, plotOutput("projectplot",width = "160%" ,height = "700px"), plotOutput("myplot_dataplot",width = "100%" ,height = "125px"), plotOutput("day180_done_area",width = "102%" ,height = "150px") ), column(1, textOutput("text1"),plotOutput("casemeantime_plot",width = "125%" ,height = "400px"), textOutput("case_upper"), textOutput("case_lower")), column(1, textOutput("text2"),plotOutput("donut_plot1",width = "125%" ,height = "400px")), column(1, textOutput("text3"),plotOutput("donut_plot2",width = "125%" ,height = "400px")), fluidRow( fixedRow(column(3,plotOutput("day30bar",width = "100%" ,height = "900px"))) ) ), setBackgroundColor(color = "black") )
/dashboard_for_workload/ui.R
no_license
renardbao/shiny_project
R
false
false
3,225
r
ui<- fluidPage( tags$head( tags$style( ".h2,h2 { font-size: 50px; font-weight: bold; font-family: Microsoft JhengHei; color: white; position: relative; top: 15px;};"), tags$style( "#currentTime{ color: white; font-size: 80px; font-weight: bold; font-family: Century Gothic; position: relative; top: 120px; };"), tags$style( "#text1{ color: white; font-size: 22px; font-weight: bold; font-family: Microsoft JhengHei; position: relative; top: 160px; left: 20px; };"), tags$style( "#text2{ color: white; font-size: 22px; font-weight: bold; font-family: Microsoft JhengHei; position: relative; top: 160px; left: 30px; };"), tags$style( "#text3{ color: white; font-size: 22px; font-weight: bold; font-family: Microsoft JhengHei; position: relative; top: 160px; left: 30px; };"), tags$style( "#projecttable{ position: relative; top: 50px; border: 0px;};"), tags$style( "#donut_plot1{ position: relative; top: -150px; left: -10px};"), tags$style( "#donut_plot2{ position: relative; top: -150px; left: -10px};"), tags$style( "#casemeantime_plot{ position: relative; top: -150px; left: -10px};"), tags$style( "#day30bar{ position: relative; top: -280px; left: -25px};"), tags$style( "#myplot_dataplot{ position: relative; top: 75px};"), tags$style( "#day180_done_area{ position: relative; top: 120px};"), tags$style( "#projectplot{ position: relative; left: -350px};"), tags$style( "#case_upper{ color: white; font-size: 15px; font-weight: bold; font-family: Microsoft JhengHei; position: relative; top: 110px; left: -25px };"), tags$style( "#case_lower{ color: white; font-size: 15px; font-weight: bold; font-family: Microsoft JhengHei; position: relative; top: 270px; left: -25px };") ), titlePanel("2019-01-21 XX部案件Dashboard") , fluidRow( column(2, DTOutput("projecttable"), textOutput("currentTime") ), column(7, plotOutput("projectplot",width = "160%" ,height = "700px"), plotOutput("myplot_dataplot",width = "100%" ,height = "125px"), plotOutput("day180_done_area",width = "102%" ,height = "150px") ), column(1, textOutput("text1"),plotOutput("casemeantime_plot",width = "125%" ,height = "400px"), textOutput("case_upper"), textOutput("case_lower")), column(1, textOutput("text2"),plotOutput("donut_plot1",width = "125%" ,height = "400px")), column(1, textOutput("text3"),plotOutput("donut_plot2",width = "125%" ,height = "400px")), fluidRow( fixedRow(column(3,plotOutput("day30bar",width = "100%" ,height = "900px"))) ) ), setBackgroundColor(color = "black") )
#Plot 3 with(study_data, plot(datetime, Sub_metering_1, type = "l", xlab = "", ylab = "Energy sub metering")) with(study_data, lines(datetime, Sub_metering_2, type = "l", col = "red")) with(study_data, lines(datetime, Sub_metering_3, type = "l", col = "blue")) legend("topright", lty=1, col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3")) dev.copy(png, file = "plot3.png", width = 480, height = 480) dev.off()
/plot3.R
no_license
yuanyuantulip/Exploratory-Data-Analysis-course-project-1
R
false
false
488
r
#Plot 3 with(study_data, plot(datetime, Sub_metering_1, type = "l", xlab = "", ylab = "Energy sub metering")) with(study_data, lines(datetime, Sub_metering_2, type = "l", col = "red")) with(study_data, lines(datetime, Sub_metering_3, type = "l", col = "blue")) legend("topright", lty=1, col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3")) dev.copy(png, file = "plot3.png", width = 480, height = 480) dev.off()
# validateModel.R # third piece of code for the clearScience Demo Project # takes the output from buildModel(), and fits that model # on held out validation data validateModel <- function(returnTwo){ # SOURCE LIBRARIES require(randomForest) require(ggplot2) require(ROCR) # DEFINE VARIABLES rfERFit <- returnTwo$rfERFit validExpress <- returnTwo$returnOne$validExpress validScore <- returnTwo$returnOne$validScore # VALIDATE & VISUALIZE WITH HELD OUT VALIDATION COHORT cat("[1] Evaluating predictions on held out validation set\n") validScoreHat <- predict(rfERFit, t(validExpress), type = "prob") validScoreHat <- validScoreHat[ , 2] validScoreDF <- as.data.frame(cbind(validScore, validScoreHat)) colnames(validScoreDF) <- c("yValid", "yValidHat") cat("[2] Producing diagnostic boxplot of predictions in the held out validation set\n") validBoxPlot <- ggplot(validScoreDF, aes(factor(yValid), yValidHat)) + geom_boxplot() + geom_jitter(aes(colour = as.factor(yValid)), size = 4) + opts(title = "ER Random Forest Model Indepenedent Validation Set") + ylab("Validation Set ER Prediction") + xlab("True ER Status") + opts(plot.title = theme_text(size = 14)) # Alternative visualization (density plots) cat("[3] Producing diagnostic density plot of predictions in the held out validation set\n") validDensPlot <- ggplot(validScoreDF, aes(yValidHat, fill = factor(yValid))) + geom_density(alpha = 0.3) + ylab("Density") + xlab("True ER Status") + opts(plot.title = theme_text(size = 14)) # EVALUATE VALIDATION MODEL PERFORMANCE erPred <- prediction(as.numeric(validScoreHat), as.numeric(validScore)) erPerf <- performance(erPred, "tpr", "fpr") erAUC <- performance(erPred, "auc") # FIND YOUDEN'S J POINT AND OPTIMAL SENSITIVITY AND SPECIFICITY erRFPerf <- performance(erPred, "sens", "spec") youdensJ <- erRFPerf@x.values[[1]] + erRFPerf@y.values[[1]] - 1 jMax <- which.max(youdensJ) optCut <- erPerf@alpha.values[[1]][jMax] optSens <- unlist(erRFPerf@x.values)[jMax] optSpec <- unlist(erRFPerf@y.values)[jMax] rankSum <- wilcox.test(validScoreHat[validScore == 0], validScoreHat[validScore == 1]) ## CREATE A ROC CURVE USING GGPLOT cat("[4] Assessing model performance via ROC curve in held out validation set\n") dfPerf <- as.data.frame(cbind(unlist(erPerf@x.values), unlist(erPerf@y.values))) colnames(dfPerf) <- c("FalsePositiveRate", "TruePositiveRate") rocCurve <- ggplot(dfPerf, aes(FalsePositiveRate, TruePositiveRate)) + geom_line() + geom_abline(slope = 1, colour = "red") + opts(title = "Validation Cohort ROC Curve") + ylab("False Positive Rate") + xlab("True Positive Rate") + opts(plot.title = theme_text(size = 14)) ## RETURN return(list("validScoreDF" = validScoreDF, "validBoxPlot" = validBoxPlot, "validDensPlot" = validDensPlot, "rankSum" = rankSum, "rocCurve" = rocCurve, "sensitivity" = optSens, "specificity" = optSpec, "auc" = erAUC@y.values)) }
/analysisFunctions/validateModel.R
no_license
erichhuang/clearScience-demo
R
false
false
3,340
r
# validateModel.R # third piece of code for the clearScience Demo Project # takes the output from buildModel(), and fits that model # on held out validation data validateModel <- function(returnTwo){ # SOURCE LIBRARIES require(randomForest) require(ggplot2) require(ROCR) # DEFINE VARIABLES rfERFit <- returnTwo$rfERFit validExpress <- returnTwo$returnOne$validExpress validScore <- returnTwo$returnOne$validScore # VALIDATE & VISUALIZE WITH HELD OUT VALIDATION COHORT cat("[1] Evaluating predictions on held out validation set\n") validScoreHat <- predict(rfERFit, t(validExpress), type = "prob") validScoreHat <- validScoreHat[ , 2] validScoreDF <- as.data.frame(cbind(validScore, validScoreHat)) colnames(validScoreDF) <- c("yValid", "yValidHat") cat("[2] Producing diagnostic boxplot of predictions in the held out validation set\n") validBoxPlot <- ggplot(validScoreDF, aes(factor(yValid), yValidHat)) + geom_boxplot() + geom_jitter(aes(colour = as.factor(yValid)), size = 4) + opts(title = "ER Random Forest Model Indepenedent Validation Set") + ylab("Validation Set ER Prediction") + xlab("True ER Status") + opts(plot.title = theme_text(size = 14)) # Alternative visualization (density plots) cat("[3] Producing diagnostic density plot of predictions in the held out validation set\n") validDensPlot <- ggplot(validScoreDF, aes(yValidHat, fill = factor(yValid))) + geom_density(alpha = 0.3) + ylab("Density") + xlab("True ER Status") + opts(plot.title = theme_text(size = 14)) # EVALUATE VALIDATION MODEL PERFORMANCE erPred <- prediction(as.numeric(validScoreHat), as.numeric(validScore)) erPerf <- performance(erPred, "tpr", "fpr") erAUC <- performance(erPred, "auc") # FIND YOUDEN'S J POINT AND OPTIMAL SENSITIVITY AND SPECIFICITY erRFPerf <- performance(erPred, "sens", "spec") youdensJ <- erRFPerf@x.values[[1]] + erRFPerf@y.values[[1]] - 1 jMax <- which.max(youdensJ) optCut <- erPerf@alpha.values[[1]][jMax] optSens <- unlist(erRFPerf@x.values)[jMax] optSpec <- unlist(erRFPerf@y.values)[jMax] rankSum <- wilcox.test(validScoreHat[validScore == 0], validScoreHat[validScore == 1]) ## CREATE A ROC CURVE USING GGPLOT cat("[4] Assessing model performance via ROC curve in held out validation set\n") dfPerf <- as.data.frame(cbind(unlist(erPerf@x.values), unlist(erPerf@y.values))) colnames(dfPerf) <- c("FalsePositiveRate", "TruePositiveRate") rocCurve <- ggplot(dfPerf, aes(FalsePositiveRate, TruePositiveRate)) + geom_line() + geom_abline(slope = 1, colour = "red") + opts(title = "Validation Cohort ROC Curve") + ylab("False Positive Rate") + xlab("True Positive Rate") + opts(plot.title = theme_text(size = 14)) ## RETURN return(list("validScoreDF" = validScoreDF, "validBoxPlot" = validBoxPlot, "validDensPlot" = validDensPlot, "rankSum" = rankSum, "rocCurve" = rocCurve, "sensitivity" = optSens, "specificity" = optSpec, "auc" = erAUC@y.values)) }
rm(list = ls()) library(Daniel) library(dplyr) library(nnet) CalcCImultinom <- function(fit) { s <- summary(fit) coef <- s$coefficients ses <- s$standard.errors ci.1 <- coef[1,2] + c(-1, 1)*1.96*ses[1, 2] ci.2 <- coef[2,2] + c(-1, 1)*1.96*ses[2, 2] return(rbind(ci.1,ci.2)) } #key # A, B,C,D,E,F - betaE[2] = 1.25, 1.5, 1.75, 2, 2.25, 2.5 # A,B,C, D, E,F - betaU = 2,3,4,5,6,7 patt <- "EE" beta0 <- c(-6, -5) betaE <- c(log(2.5), log(2.25)) betaU <- c(log(6), log(1/1.5)) sigmaU <- 1 n.sample <- 50000 n.sim <- 1000 AllY <- matrix(nr = n.sim, nc = 3) sace.diff1 <- sace.diff2 <- ace.diff1 <- ace.diff2 <- sace.or1 <- sace.or2 <- ace.or1 <- ace.or2 <- or.approx1 <- or.approx2 <- or.approx.true1 <- or.approx.true2 <- pop.never.s1 <- pop.never.s2 <- vector(length = n.sim) ci1 <- ci2 <- matrix(nr = n.sim, nc = 2) for (j in 1:n.sim) { CatIndex(j) # Simulate genetic score U <- rnorm(n.sample, 0, sd = sigmaU) #### Calcualte probabilites for each subtype with and without the exposure #### e1E0 <- exp(beta0[1] + betaU[1]*U) e1E1 <- exp(beta0[1] + betaE[1] + betaU[1]*U) e2E0 <- exp(beta0[2] + betaU[2]*U) e2E1 <- exp(beta0[2] + betaE[2] + betaU[2]*U) prE0Y1 <- e1E0/(1 + e1E0 + e2E0) prE0Y2 <- e2E0/(1 + e1E0 + e2E0) prE1Y1 <- e1E1/(1 + e1E1 + e2E1) prE1Y2 <- e2E1/(1 + e1E1 + e2E1) probsE0 <- cbind(prE0Y1, prE0Y2, 1 - prE0Y1 - prE0Y2) probsE1 <- cbind(prE1Y1, prE1Y2, 1 - prE1Y1 - prE1Y2) # Simulate subtypes # Yctrl <- Ytrt <- vector(length = n.sample) X <- rbinom(n = n.sample, 1, 0.5) for (i in 1:n.sample) { Yctrl[i] <- sample(c(1,2,0), 1, replace = T, prob = probsE0[i, ]) Ytrt[i] <- sample(c(1,2,0), 1, replace = T, prob = probsE1[i, ]) } Y <- (1-X)*Yctrl + X*Ytrt AllY[j, ] <- table(Y) Y1ctrl <- Yctrl==1 Y1trt <- Ytrt==1 Y2ctrl <- Yctrl==2 Y2trt <- Ytrt==2 pop.never.s1[j] <- mean(Y1ctrl==0 & Y1trt==0) pop.never.s2[j] <- mean(Y2ctrl==0 & Y2trt==0) # estimate causal parameters sace.diff1[j] <- mean((Y1trt - Y1ctrl)[Y2ctrl==0 & Y2trt==0]) sace.diff2[j]<- mean((Y2trt - Y2ctrl)[Y1ctrl==0 & Y1trt==0]) ace.diff1[j] <- mean((Y1trt[Y2trt==0 & X==1]) - mean(Y1ctrl[Y2ctrl==0 & X==0])) ace.diff2[j] <- mean((Y2trt[Y1trt==0 & X==1]) - mean(Y2ctrl[Y1ctrl==0 & X==0])) # Ypo <- c(Yctrl, Ytrt) # Upo <- rep(U,2) # Xpo <- rep(x = c(0,1), each = n.sample) # fit.full.po <- multinom(Ypo~ Xpo + Upo) # fit.po <- multinom(Ypo~ Xpo) fit <- multinom(Y~ X) cis <- CalcCImultinom(fit) ci1[j, ] <- cis[1, ] ci2[j, ] <- cis[2, ] Y1only <- Y[Y<2] X1only <- X[Y<2] U1only <-U[Y<2] Y2only <- Y[Y!=1] X2only <- X[Y!=1] U2only <-U[Y!=1] Y2only[Y2only>0] <- 1 vec.for.or.1only <- c(sum((1 - Y1only) * (1 - X1only)) , sum(Y1only * (1 - X1only)), sum((1 - Y1only) * X1only), sum(Y1only*X1only)) vec.for.or.2only <- c(sum((1 - Y2only) * (1 - X2only)) , sum(Y2only * (1 - X2only)), sum((1 - Y2only) * X2only), sum(Y2only*X2only)) ace.or1[j] <- CalcOR(vec.for.or.1only) ace.or2[j] <- CalcOR(vec.for.or.2only) Y1only.sace <- Y[Ytrt <2 & Yctrl < 2] X1only.sace <- X[Ytrt <2 & Yctrl < 2] U1only.sace <-U[Ytrt <2 & Yctrl < 2] Y2only.sace <- Y[Ytrt!=1 & Y1ctrl!=1] X2only.sace <- X[Ytrt!=1 & Y1ctrl!=1] U2only.sace <-U[Ytrt!=1 & Y1ctrl!=1] Y2only.sace[Y2only.sace>0] <- 1 vec.for.or.sace1 <- c(sum((1 - Y1only.sace) * (1 - X1only.sace)) , sum(Y1only.sace * (1 - X1only.sace)), sum((1 - Y1only.sace) * X1only.sace), sum(Y1only.sace*X1only.sace)) vec.for.or.sace2 <- c(sum((1 - Y2only.sace) * (1 - X2only.sace)) , sum(Y2only.sace * (1 - X2only.sace)), sum((1 - Y2only.sace) * X2only.sace), sum(Y2only.sace*X2only.sace)) sace.or1[j] <- CalcOR(vec.for.or.sace1) sace.or2[j] <- CalcOR(vec.for.or.sace2) Y1 <- Y==1 Y2 <- Y==2 fit.logistic.Y1 <- glm(Y1 ~ X, family = "binomial") fit.logistic.true.Y1 <- glm(Y1 ~ X + U, family = "binomial") fit.logistic.Y2 <- glm(Y2 ~ X, family = "binomial") fit.logistic.true.Y2 <- glm(Y2 ~ X + U, family = "binomial") or.approx1[j] <- exp(coef(fit.logistic.Y1)[2]) or.approx.true1[j] <- exp(coef(fit.logistic.true.Y1)[2]) or.approx2[j] <- exp(coef(fit.logistic.Y2)[2]) or.approx.true2[j] <- exp(coef(fit.logistic.true.Y2)[2]) } save.image(paste0("CMPEn50krareScen19a",patt,".RData"))
/Simulations/Scripts/R/Rare/Scenario 19a/CMPEn50KrareScen19aEE.R
no_license
yadevi/CausalMPE
R
false
false
4,224
r
rm(list = ls()) library(Daniel) library(dplyr) library(nnet) CalcCImultinom <- function(fit) { s <- summary(fit) coef <- s$coefficients ses <- s$standard.errors ci.1 <- coef[1,2] + c(-1, 1)*1.96*ses[1, 2] ci.2 <- coef[2,2] + c(-1, 1)*1.96*ses[2, 2] return(rbind(ci.1,ci.2)) } #key # A, B,C,D,E,F - betaE[2] = 1.25, 1.5, 1.75, 2, 2.25, 2.5 # A,B,C, D, E,F - betaU = 2,3,4,5,6,7 patt <- "EE" beta0 <- c(-6, -5) betaE <- c(log(2.5), log(2.25)) betaU <- c(log(6), log(1/1.5)) sigmaU <- 1 n.sample <- 50000 n.sim <- 1000 AllY <- matrix(nr = n.sim, nc = 3) sace.diff1 <- sace.diff2 <- ace.diff1 <- ace.diff2 <- sace.or1 <- sace.or2 <- ace.or1 <- ace.or2 <- or.approx1 <- or.approx2 <- or.approx.true1 <- or.approx.true2 <- pop.never.s1 <- pop.never.s2 <- vector(length = n.sim) ci1 <- ci2 <- matrix(nr = n.sim, nc = 2) for (j in 1:n.sim) { CatIndex(j) # Simulate genetic score U <- rnorm(n.sample, 0, sd = sigmaU) #### Calcualte probabilites for each subtype with and without the exposure #### e1E0 <- exp(beta0[1] + betaU[1]*U) e1E1 <- exp(beta0[1] + betaE[1] + betaU[1]*U) e2E0 <- exp(beta0[2] + betaU[2]*U) e2E1 <- exp(beta0[2] + betaE[2] + betaU[2]*U) prE0Y1 <- e1E0/(1 + e1E0 + e2E0) prE0Y2 <- e2E0/(1 + e1E0 + e2E0) prE1Y1 <- e1E1/(1 + e1E1 + e2E1) prE1Y2 <- e2E1/(1 + e1E1 + e2E1) probsE0 <- cbind(prE0Y1, prE0Y2, 1 - prE0Y1 - prE0Y2) probsE1 <- cbind(prE1Y1, prE1Y2, 1 - prE1Y1 - prE1Y2) # Simulate subtypes # Yctrl <- Ytrt <- vector(length = n.sample) X <- rbinom(n = n.sample, 1, 0.5) for (i in 1:n.sample) { Yctrl[i] <- sample(c(1,2,0), 1, replace = T, prob = probsE0[i, ]) Ytrt[i] <- sample(c(1,2,0), 1, replace = T, prob = probsE1[i, ]) } Y <- (1-X)*Yctrl + X*Ytrt AllY[j, ] <- table(Y) Y1ctrl <- Yctrl==1 Y1trt <- Ytrt==1 Y2ctrl <- Yctrl==2 Y2trt <- Ytrt==2 pop.never.s1[j] <- mean(Y1ctrl==0 & Y1trt==0) pop.never.s2[j] <- mean(Y2ctrl==0 & Y2trt==0) # estimate causal parameters sace.diff1[j] <- mean((Y1trt - Y1ctrl)[Y2ctrl==0 & Y2trt==0]) sace.diff2[j]<- mean((Y2trt - Y2ctrl)[Y1ctrl==0 & Y1trt==0]) ace.diff1[j] <- mean((Y1trt[Y2trt==0 & X==1]) - mean(Y1ctrl[Y2ctrl==0 & X==0])) ace.diff2[j] <- mean((Y2trt[Y1trt==0 & X==1]) - mean(Y2ctrl[Y1ctrl==0 & X==0])) # Ypo <- c(Yctrl, Ytrt) # Upo <- rep(U,2) # Xpo <- rep(x = c(0,1), each = n.sample) # fit.full.po <- multinom(Ypo~ Xpo + Upo) # fit.po <- multinom(Ypo~ Xpo) fit <- multinom(Y~ X) cis <- CalcCImultinom(fit) ci1[j, ] <- cis[1, ] ci2[j, ] <- cis[2, ] Y1only <- Y[Y<2] X1only <- X[Y<2] U1only <-U[Y<2] Y2only <- Y[Y!=1] X2only <- X[Y!=1] U2only <-U[Y!=1] Y2only[Y2only>0] <- 1 vec.for.or.1only <- c(sum((1 - Y1only) * (1 - X1only)) , sum(Y1only * (1 - X1only)), sum((1 - Y1only) * X1only), sum(Y1only*X1only)) vec.for.or.2only <- c(sum((1 - Y2only) * (1 - X2only)) , sum(Y2only * (1 - X2only)), sum((1 - Y2only) * X2only), sum(Y2only*X2only)) ace.or1[j] <- CalcOR(vec.for.or.1only) ace.or2[j] <- CalcOR(vec.for.or.2only) Y1only.sace <- Y[Ytrt <2 & Yctrl < 2] X1only.sace <- X[Ytrt <2 & Yctrl < 2] U1only.sace <-U[Ytrt <2 & Yctrl < 2] Y2only.sace <- Y[Ytrt!=1 & Y1ctrl!=1] X2only.sace <- X[Ytrt!=1 & Y1ctrl!=1] U2only.sace <-U[Ytrt!=1 & Y1ctrl!=1] Y2only.sace[Y2only.sace>0] <- 1 vec.for.or.sace1 <- c(sum((1 - Y1only.sace) * (1 - X1only.sace)) , sum(Y1only.sace * (1 - X1only.sace)), sum((1 - Y1only.sace) * X1only.sace), sum(Y1only.sace*X1only.sace)) vec.for.or.sace2 <- c(sum((1 - Y2only.sace) * (1 - X2only.sace)) , sum(Y2only.sace * (1 - X2only.sace)), sum((1 - Y2only.sace) * X2only.sace), sum(Y2only.sace*X2only.sace)) sace.or1[j] <- CalcOR(vec.for.or.sace1) sace.or2[j] <- CalcOR(vec.for.or.sace2) Y1 <- Y==1 Y2 <- Y==2 fit.logistic.Y1 <- glm(Y1 ~ X, family = "binomial") fit.logistic.true.Y1 <- glm(Y1 ~ X + U, family = "binomial") fit.logistic.Y2 <- glm(Y2 ~ X, family = "binomial") fit.logistic.true.Y2 <- glm(Y2 ~ X + U, family = "binomial") or.approx1[j] <- exp(coef(fit.logistic.Y1)[2]) or.approx.true1[j] <- exp(coef(fit.logistic.true.Y1)[2]) or.approx2[j] <- exp(coef(fit.logistic.Y2)[2]) or.approx.true2[j] <- exp(coef(fit.logistic.true.Y2)[2]) } save.image(paste0("CMPEn50krareScen19a",patt,".RData"))
\name{interpolate.score} \alias{interpolate.score} \title{ Interpolate the \code{\link{score()}} of a (sparse) signal down to base pair level. } \description{ Tiling arrays have a lower density of information per kbase. To compare data measured with such techniques with basepair resolution obtained from NGS, it can be useful to interpolate.score the sparser data. } \usage{ interpolate.score(granges, seqnames = NULL, wanted.dist = function(dists){median(dists)}, max.dist = function(dists){3*median(dists)}, every = 1) } \arguments{ \item{granges}{ The \code{GRanges} object whose \code{score} is to be interpolated. The \code{\link{seqlength}}'s of this object may not be NULL. } \item{seqnames}{ The subset of seqnames for which to do the interpolation. If \code{NULL}, all \code{unique(seqnames(granges))} are taken. } \item{wanted.dist}{ Passed to \code{\link{uuutils::zeroterminate.islands}} } \item{max.dist}{ Passed to \code{\link{uuutils::zeroterminate.islands}} } \item{every}{ Interpolate every this many base pairs. } } \details{ To avoid interpolation over too long distances, the 'islands' in the data are first 'zero-terminated' on either side using \code{\link{uuutils::zeroterminate.islands}}. This makes sure that, for the simple linear interpolation done subsequently, the complete gap is interpolated as having the value zero. } \value{ A \code{GRanges} object with, with 1-width ranges at the interpolate.scored points, and \code{score} the interpolated values. } \author{ Philip Lijnzaad <plijnzaad@gmail.com> } \seealso{ \code{\link{uuutils::zeroterminate.islands}},\code{\link{granges.apply}} } \note{ Don't be seduced by the luscious curves of spline interpolation. They are visually attractive but inappropriate as they overshoot wildly. Better stick to the straight and narrow, as in real life. } \examples{ gr <- GRanges(ranges=IRanges(start=seq(1, 1000, by=100),width=1), score=rnorm(10), seqnames=factor('foo'),strand='*', seqlengths=c(foo=1000)) interpolation <- interpolate.score(gr) } \keyword{misc}
/ngs/R/ngsutils/man/interpolate.score.Rd
no_license
plijnzaad/phtools
R
false
false
2,235
rd
\name{interpolate.score} \alias{interpolate.score} \title{ Interpolate the \code{\link{score()}} of a (sparse) signal down to base pair level. } \description{ Tiling arrays have a lower density of information per kbase. To compare data measured with such techniques with basepair resolution obtained from NGS, it can be useful to interpolate.score the sparser data. } \usage{ interpolate.score(granges, seqnames = NULL, wanted.dist = function(dists){median(dists)}, max.dist = function(dists){3*median(dists)}, every = 1) } \arguments{ \item{granges}{ The \code{GRanges} object whose \code{score} is to be interpolated. The \code{\link{seqlength}}'s of this object may not be NULL. } \item{seqnames}{ The subset of seqnames for which to do the interpolation. If \code{NULL}, all \code{unique(seqnames(granges))} are taken. } \item{wanted.dist}{ Passed to \code{\link{uuutils::zeroterminate.islands}} } \item{max.dist}{ Passed to \code{\link{uuutils::zeroterminate.islands}} } \item{every}{ Interpolate every this many base pairs. } } \details{ To avoid interpolation over too long distances, the 'islands' in the data are first 'zero-terminated' on either side using \code{\link{uuutils::zeroterminate.islands}}. This makes sure that, for the simple linear interpolation done subsequently, the complete gap is interpolated as having the value zero. } \value{ A \code{GRanges} object with, with 1-width ranges at the interpolate.scored points, and \code{score} the interpolated values. } \author{ Philip Lijnzaad <plijnzaad@gmail.com> } \seealso{ \code{\link{uuutils::zeroterminate.islands}},\code{\link{granges.apply}} } \note{ Don't be seduced by the luscious curves of spline interpolation. They are visually attractive but inappropriate as they overshoot wildly. Better stick to the straight and narrow, as in real life. } \examples{ gr <- GRanges(ranges=IRanges(start=seq(1, 1000, by=100),width=1), score=rnorm(10), seqnames=factor('foo'),strand='*', seqlengths=c(foo=1000)) interpolation <- interpolate.score(gr) } \keyword{misc}
fcsFile<-system.file("extdata/List-modeDataFiles","int-10_events_6_parameters.fcs",package="gatingMLData") gateFile <- system.file("extdata/Gating-MLFiles","23TrLn.xml",package="gatingMLData") csvFile<-paste(system.file("extdata/ExpectedResults/23TrLn",package="gatingMLData")) flowEnv=new.env() read.gatingML(gateFile,flowEnv) fcs <- read.FCS(fcsFile,transformation=FALSE) test.TrLnG1<- function() { gateId<-"TrLnG1" csvFile<-paste(csvFile,"/",gateId,".txt",sep="") expectedResult<-read.csv(csvFile,header=TRUE) flowUtils:::performGateTest(gateId,fcs,expectedResult,flowEnv) } test.TrLnG2<- function() { gateId<-"TrLnG2" csvFile<-paste(csvFile,"/",gateId,".txt",sep="") expectedResult<-read.csv(csvFile,header=TRUE) flowUtils:::performGateTest(gateId,fcs,expectedResult,flowEnv) } test.TrLnG3<- function() { gateId<-"TrLnG3" csvFile<-paste(csvFile,"/",gateId,".txt",sep="") expectedResult<-read.csv(csvFile,header=TRUE) flowUtils:::performGateTest(gateId,fcs,expectedResult,flowEnv) } test.TrLnG4<- function() { gateId<-"TrLnG4" csvFile<-paste(csvFile,"/",gateId,".txt",sep="") expectedResult<-read.csv(csvFile,header=TRUE) flowUtils:::performGateTest(gateId,fcs,expectedResult,flowEnv) } test.TrLnG5<- function() { gateId<-"TrLnG5" csvFile<-paste(csvFile,"/",gateId,".txt",sep="") expectedResult<-read.csv(csvFile,header=TRUE) flowUtils:::performGateTest(gateId,fcs,expectedResult,flowEnv) } test.TrLnG6<- function() { gateId<-"TrLnG6" csvFile<-paste(csvFile,"/",gateId,".txt",sep="") expectedResult<-read.csv(csvFile,header=TRUE) flowUtils:::performGateTest(gateId,fcs,expectedResult,flowEnv) }
/inst/RUnitScript_Files/runit.23TrLn.R
no_license
jspidlen/flowUtils
R
false
false
1,689
r
fcsFile<-system.file("extdata/List-modeDataFiles","int-10_events_6_parameters.fcs",package="gatingMLData") gateFile <- system.file("extdata/Gating-MLFiles","23TrLn.xml",package="gatingMLData") csvFile<-paste(system.file("extdata/ExpectedResults/23TrLn",package="gatingMLData")) flowEnv=new.env() read.gatingML(gateFile,flowEnv) fcs <- read.FCS(fcsFile,transformation=FALSE) test.TrLnG1<- function() { gateId<-"TrLnG1" csvFile<-paste(csvFile,"/",gateId,".txt",sep="") expectedResult<-read.csv(csvFile,header=TRUE) flowUtils:::performGateTest(gateId,fcs,expectedResult,flowEnv) } test.TrLnG2<- function() { gateId<-"TrLnG2" csvFile<-paste(csvFile,"/",gateId,".txt",sep="") expectedResult<-read.csv(csvFile,header=TRUE) flowUtils:::performGateTest(gateId,fcs,expectedResult,flowEnv) } test.TrLnG3<- function() { gateId<-"TrLnG3" csvFile<-paste(csvFile,"/",gateId,".txt",sep="") expectedResult<-read.csv(csvFile,header=TRUE) flowUtils:::performGateTest(gateId,fcs,expectedResult,flowEnv) } test.TrLnG4<- function() { gateId<-"TrLnG4" csvFile<-paste(csvFile,"/",gateId,".txt",sep="") expectedResult<-read.csv(csvFile,header=TRUE) flowUtils:::performGateTest(gateId,fcs,expectedResult,flowEnv) } test.TrLnG5<- function() { gateId<-"TrLnG5" csvFile<-paste(csvFile,"/",gateId,".txt",sep="") expectedResult<-read.csv(csvFile,header=TRUE) flowUtils:::performGateTest(gateId,fcs,expectedResult,flowEnv) } test.TrLnG6<- function() { gateId<-"TrLnG6" csvFile<-paste(csvFile,"/",gateId,".txt",sep="") expectedResult<-read.csv(csvFile,header=TRUE) flowUtils:::performGateTest(gateId,fcs,expectedResult,flowEnv) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/pdfAndProb.R \name{getProbForDiscrete} \alias{getProbForDiscrete} \title{Get a probability of a discrete value.} \usage{ getProbForDiscrete(data, value) } \arguments{ \item{data}{vector of observations that have the same type as the given value.} \item{value}{a single observation of the same type as the data vector.} } \value{ the probability of value given data. } \description{ Similar to @seealso \code{estimatePdf}, this function returns the probability for a discrete value, given some observations. } \note{ If no observations are given, then this function will warn and return a probability of zero for the value given. While we could technically return positive infinity, 0 is more suitable in the context of Bayesian inferencing. } \examples{ mmb::getProbForDiscrete(data = c(), value = iris[1,]$Species) mmb::getProbForDiscrete(data = iris$Species, value = iris[1,]$Species) } \author{ Sebastian Hönel \href{mailto:sebastian.honel@lnu.se}{sebastian.honel@lnu.se} } \keyword{likelihood} \keyword{probability}
/man/getProbForDiscrete.Rd
no_license
cran/mmb
R
false
true
1,134
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/pdfAndProb.R \name{getProbForDiscrete} \alias{getProbForDiscrete} \title{Get a probability of a discrete value.} \usage{ getProbForDiscrete(data, value) } \arguments{ \item{data}{vector of observations that have the same type as the given value.} \item{value}{a single observation of the same type as the data vector.} } \value{ the probability of value given data. } \description{ Similar to @seealso \code{estimatePdf}, this function returns the probability for a discrete value, given some observations. } \note{ If no observations are given, then this function will warn and return a probability of zero for the value given. While we could technically return positive infinity, 0 is more suitable in the context of Bayesian inferencing. } \examples{ mmb::getProbForDiscrete(data = c(), value = iris[1,]$Species) mmb::getProbForDiscrete(data = iris$Species, value = iris[1,]$Species) } \author{ Sebastian Hönel \href{mailto:sebastian.honel@lnu.se}{sebastian.honel@lnu.se} } \keyword{likelihood} \keyword{probability}
# 1. Loading and Cleaning Data ------------------------------------------------------------ # This creates a clean data file to merge with pre-NOV 2020 abstractions # This cleans data from final NOV 2020 abstraction (N=93 aggregate studies) library(dplyr) setwd("/Users/kristinandrejko/Box/01-Research/02-SP-AMR/LancetMicrobe-Revision/PneumoAMR-LMicrobe") addt_93 <- read.csv("data/data_addt_93_final.csv") #Last pull of data from the 2007-2009 papers and 2020 update completed 12-2-20, note I manually removed Okade data from excel length(unique(addt_93$studyID)) #93 #pcv_intro <- read_excel("data/PCV Vaccine Intro.xlsx") #found tiny errors on intro date, update to new sheet pcv_intro <- read_excel("data/PCV Vaccine Intro Updated Nov2020.xlsx") #found tiny errors on intro date, update to new sheetincome_dta <- read_excel("data/income_status_worldbank.xlsx") gbd_region_new <- read_excel("data/gbd_region_new_r.xlsx") world_bank_gdp_new <- read_excel("data/world_bank_gdp_new2.xlsx") addt_93 <- addt_93 %>% dplyr::select(-serotype.data.) all_data4 <- addt_93 length(unique(all_data4$studyID)) #93 #levels(factor(extra_data3$country[!(extra_data3$country %in% all_data2$country)])) #this was used when merging names levels(factor(all_data4$country[!(all_data4$country %in% world_bank_gdp_new$country)])) #this was used when merging names #Countries to fix from addt_93: # England -> United Kingdom # United States -> United States of America # Taiwan all_data4$country <- as.character(all_data4$country) all_data4$country[all_data4$country == "United States"] <- "United States of America" all_data4$country[all_data4$country == "United States "] <- "United States of America" all_data4$country[all_data4$country == "Taiwan"] <- "China" all_data4$country[all_data4$country == "England"] <- "United Kingdom" #1. Merge Data newmerge <- merge(gbd_region_new, pcv_intro, by = "country", all = T) #replace gbd_region_new with who_region newmerge <- merge(newmerge, income_dta, by = "country", all = T) #2. Merge with wide_dta fulldta <- left_join(all_data4, newmerge, by = "country") #added all_data instead of wide_dta length(unique(all_data4$country)) length(unique(fulldta$country)) #3. Merge with GDP per capital - haven't done this yet! #3.1 Items which are in fulldta which are not in world_bank_gdp_new levels(factor(fulldta$country[!(fulldta$country %in% gbd_region_new$country)])) levels(factor(fulldta$country[!(fulldta$country %in% pcv_intro$country)])) levels(factor(fulldta$country[!(fulldta$country %in% world_bank_gdp_new$country)])) #4. melt world_bank_gdp (need to convert world_bank_gdp into data frame for melt to work) world_bank_gdp_new_v2 <- as.data.frame(world_bank_gdp_new) world_bank_gdp_new_v2$'2020' <- world_bank_gdp_new_v2$"2019" #add 2020 data that is the same as 2019 world_bank_gdp_melt <- melt(data = world_bank_gdp_new_v2, id.vars= "country", measure.vars= c("1973", "1974", "1974", "1978", "1979", "1980", "1981", "1982", "1983", "1984", "1985", "1986", "1987", "1989", "1990", "1991", "1992", "1993", "1994", "1995", "1996", "1997", "1998", "1999", "2000", "2001", "2002", "2003", "2004", "2005", "2006", "2007", "2008", "2009", "2010", "2011", "2012", "2013", "2014", "2015", "2016", "2017", "2018", "2019", "2020"), variable.name = "midpoint_yr", value.name = "gdp") colnames(world_bank_gdp_melt)[2] <- "midpoint_yr" colnames(world_bank_gdp_melt)[3] <- "gdp" #4.1. convert world_bank_gdp_melt from factor to numeric world_bank_gdp_melt$midpoint_yr <- as.numeric(levels(world_bank_gdp_melt$midpoint_yr))[world_bank_gdp_melt$midpoint_yr] #5 Create midpoint_yr value fulldta <- fulldta %>% mutate(midpoint_yr = ((sample_collection_endyear - sample_collection_startyear) / 2) + sample_collection_startyear) #5.1 Round midpoint_year up fulldta$midpoint_yr <- round(fulldta$midpoint_yr, digits = 0) #6. Merge GDP with data set- a lot of these are NA's becuase midpoint year is not a whole number fulldta <- left_join(fulldta, world_bank_gdp_melt, by = c("country", "midpoint_yr")) #Check where there are not values of GDP and manually replace na_df <- fulldta[is.na(fulldta$gdp),] fulldta[293, "gdp"] <- 15068.982 #Turkey GDP in 2020 nrow(na_df) #7. Add yr since vax (used start year of sample collection rather than midpoint yr bc otherwise 90 studies had pre = 0 and yr since vax >0) fulldta <- fulldta %>% mutate(yr_since_vax = case_when(as.numeric(sample_collection_startyear) - as.numeric(pcv_intro_year) > 0 ~ sample_collection_startyear - as.numeric(pcv_intro_year), as.numeric(sample_collection_startyear) - as.numeric(pcv_intro_year) == 0 ~ 0, as.numeric(sample_collection_startyear) - as.numeric(pcv_intro_year) < 0 ~ 0, pcv_intro_year == "NI" ~ 0)) #pre length(which(is.na(fulldta$yr_since_vax))) check <- fulldta %>% dplyr::select(pcv_intro_year, sample_collection_endyear, sample_collection_startyear, yr_since_vax) #View(check) #8. Add prepost variable fulldta <- fulldta %>% mutate(prepost = case_when(as.numeric(pcv_intro_year) == sample_collection_startyear ~ "no_analysis", as.numeric(pcv_intro_year) == sample_collection_endyear ~ "no_analysis", sample_collection_startyear - as.numeric(pcv_intro_year) == 1 ~ "no_analysis", as.numeric(pcv_intro_year) > sample_collection_endyear ~ "naive", sample_collection_startyear - as.numeric(pcv_intro_year) >= 3 ~ "not_naive", #changed from 5 #sample_collection_startyear - as.numeric(pcv_intro_year) == 4 ~ "no_analysis", #sample_collection_startyear - as.numeric(pcv_intro_year) == 3 ~ "no_analysis", sample_collection_startyear - as.numeric(pcv_intro_year) == 2 ~ "no_analysis", pcv_intro_year == "NI" ~ "naive")) fulldta$prepost[is.na(fulldta$prepost)] <- "no_analysis" length(which(is.na(fulldta$prepost))) #8.1 Add prepost variable for meta-regression analysis fulldta <- fulldta %>% mutate(prepost_meta = case_when(yr_since_vax >= 1 ~ "post", yr_since_vax == 0 ~ "pre")) # check <- fulldta %>% # dplyr::select(prepost_meta, yr_since_vax) # View(check) #9. Recode study ID to r_id studyid_df <- fulldta %>% group_by(studyID) %>% summarize(var = mean(gdp)) studyid_df <- data.frame(studyid_df) studyid_df <- studyid_df %>% mutate(r_id = row_number()) length(unique(studyid_df$studyID)) length(unique(studyid_df$r_id)) #392 studyid_df <- studyid_df[,-c(2)] fulldta <- merge(studyid_df, fulldta, by = "studyID") nrow(fulldta) names(fulldta) fulldta$r_id <- fulldta$r_id + 5000 #add 450 to each of the r_id to not mess up the earlier stuff -> CHANGE TO 5000 unique(fulldta$r_id) clsi_list <- c("National Committee for Clinical Laboratory Standards", "National Committee for Clinical Laboratory Standards " , "NCCLS", "CLSI were used for sus, int, res; EUCAST was used for benzylpenicillin") for(i in 1:nrow(fulldta)){ fulldta$criteria_cl[i] <- ifelse(fulldta$criteria_specify[i] %in% clsi_list, 1, fulldta$criteria[i]) } #Clean drug_class variable!! fulldta$drug_class <- as.character(fulldta$drug) #fulldta$drug_class <- as.character(fulldta$drug_class) fulldta$drug_class[fulldta$drug_class == "Ampicillin"] <- "penicillin" fulldta$drug_class[fulldta$drug_class == "Amoxacillin"] <- "penicillin" fulldta$drug_class[fulldta$drug_class == "Penicilin"] <- "penicillin" fulldta$drug_class[fulldta$drug_class == "penicillin"] <- "penicillin" fulldta$drug_class[fulldta$drug_class == "Ceftriaxone"] <- "3gen_cephalosporin" fulldta$drug_class[fulldta$drug_class == "Tetracycline"] <- "tetracycline" fulldta$drug_class[fulldta$drug_class == "Cotrimoxazole"] <- "SXT" fulldta$drug_class[fulldta$drug_class == "Cefotaxime"] <- "3gen_cephalosporin" fulldta$drug_class[fulldta$drug_class == "Ceftoxamine"] <- "3gen_cephalosporin" fulldta$drug_class[fulldta$drug_class == "Ceftiaxone"] <- "3gen_cephalosporin" fulldta$drug_class[fulldta$drug_class == "Cefuroxime"] <- "3gen_cephalosporin" fulldta$drug_class[fulldta$drug_class == "Cefuroxime "] <- "3gen_cephalosporin" fulldta$drug_class[fulldta$drug_class == "Amoxicillin"] <- "penicillin" fulldta$drug_class[fulldta$drug_class == "Clindamycin"] <- "tetracycline" #CHECK fulldta$drug_class[fulldta$drug_class == "Cefaclor"] <- "3gen_cephalosporin" #CHECK fulldta$drug_class[fulldta$drug_class == "Imipenem"] <- "imipenem" #REMOVE fulldta$drug_class[fulldta$drug_class == "Levofloxacin"] <- "fluoroquinolone" #CHECK fulldta$drug_class[fulldta$drug_class == "Vancomycin"] <- "vancomycin" #CHECK fulldta$drug_class[fulldta$drug_class == "Chloramphenicol"] <- "chloramphenicol" #CHECK fulldta$drug_class[fulldta$drug_class == "Ciprofloxacin"] <- "ciprofloxacin" #CHECK fulldta$drug_class[fulldta$drug_class == "Linezolid"] <- "linezolid" fulldta$drug_class[fulldta$drug_class == "Penicillin"] <- "penicillin" fulldta$drug_class[fulldta$drug_class == "Erythromycin"] <- "macrolide" fulldta$drug_class[fulldta$drug_class == "Azithromycn"] <- "macrolide" table(fulldta$drug, fulldta$drug_class) extra_data4 <- fulldta length(unique(extra_data4$studyID)) #93 length(unique(extra_data4$r_id)) #93 range(extra_data4$studyID, na.rm = T); range(extra_data4$r_id, na.rm = T) save(extra_data4, file = "data/data_120220.rda") View(extra_data4) #Take extra_data4 and select doi, country, sample_collection_startyear, sample_collection_endyear, prepost finalTable <- extra_data4 %>% dplyr::select(author, doi, pub_year, country, sample_collection_startyear, sample_collection_endyear, prepost, total_isolates_drug) View(finalTable)
/code/00-PneumoAMR-LMicrobe-DataCleaning-AddtStudies.R
no_license
joelewnard/amrPneumo
R
false
false
10,224
r
# 1. Loading and Cleaning Data ------------------------------------------------------------ # This creates a clean data file to merge with pre-NOV 2020 abstractions # This cleans data from final NOV 2020 abstraction (N=93 aggregate studies) library(dplyr) setwd("/Users/kristinandrejko/Box/01-Research/02-SP-AMR/LancetMicrobe-Revision/PneumoAMR-LMicrobe") addt_93 <- read.csv("data/data_addt_93_final.csv") #Last pull of data from the 2007-2009 papers and 2020 update completed 12-2-20, note I manually removed Okade data from excel length(unique(addt_93$studyID)) #93 #pcv_intro <- read_excel("data/PCV Vaccine Intro.xlsx") #found tiny errors on intro date, update to new sheet pcv_intro <- read_excel("data/PCV Vaccine Intro Updated Nov2020.xlsx") #found tiny errors on intro date, update to new sheetincome_dta <- read_excel("data/income_status_worldbank.xlsx") gbd_region_new <- read_excel("data/gbd_region_new_r.xlsx") world_bank_gdp_new <- read_excel("data/world_bank_gdp_new2.xlsx") addt_93 <- addt_93 %>% dplyr::select(-serotype.data.) all_data4 <- addt_93 length(unique(all_data4$studyID)) #93 #levels(factor(extra_data3$country[!(extra_data3$country %in% all_data2$country)])) #this was used when merging names levels(factor(all_data4$country[!(all_data4$country %in% world_bank_gdp_new$country)])) #this was used when merging names #Countries to fix from addt_93: # England -> United Kingdom # United States -> United States of America # Taiwan all_data4$country <- as.character(all_data4$country) all_data4$country[all_data4$country == "United States"] <- "United States of America" all_data4$country[all_data4$country == "United States "] <- "United States of America" all_data4$country[all_data4$country == "Taiwan"] <- "China" all_data4$country[all_data4$country == "England"] <- "United Kingdom" #1. Merge Data newmerge <- merge(gbd_region_new, pcv_intro, by = "country", all = T) #replace gbd_region_new with who_region newmerge <- merge(newmerge, income_dta, by = "country", all = T) #2. Merge with wide_dta fulldta <- left_join(all_data4, newmerge, by = "country") #added all_data instead of wide_dta length(unique(all_data4$country)) length(unique(fulldta$country)) #3. Merge with GDP per capital - haven't done this yet! #3.1 Items which are in fulldta which are not in world_bank_gdp_new levels(factor(fulldta$country[!(fulldta$country %in% gbd_region_new$country)])) levels(factor(fulldta$country[!(fulldta$country %in% pcv_intro$country)])) levels(factor(fulldta$country[!(fulldta$country %in% world_bank_gdp_new$country)])) #4. melt world_bank_gdp (need to convert world_bank_gdp into data frame for melt to work) world_bank_gdp_new_v2 <- as.data.frame(world_bank_gdp_new) world_bank_gdp_new_v2$'2020' <- world_bank_gdp_new_v2$"2019" #add 2020 data that is the same as 2019 world_bank_gdp_melt <- melt(data = world_bank_gdp_new_v2, id.vars= "country", measure.vars= c("1973", "1974", "1974", "1978", "1979", "1980", "1981", "1982", "1983", "1984", "1985", "1986", "1987", "1989", "1990", "1991", "1992", "1993", "1994", "1995", "1996", "1997", "1998", "1999", "2000", "2001", "2002", "2003", "2004", "2005", "2006", "2007", "2008", "2009", "2010", "2011", "2012", "2013", "2014", "2015", "2016", "2017", "2018", "2019", "2020"), variable.name = "midpoint_yr", value.name = "gdp") colnames(world_bank_gdp_melt)[2] <- "midpoint_yr" colnames(world_bank_gdp_melt)[3] <- "gdp" #4.1. convert world_bank_gdp_melt from factor to numeric world_bank_gdp_melt$midpoint_yr <- as.numeric(levels(world_bank_gdp_melt$midpoint_yr))[world_bank_gdp_melt$midpoint_yr] #5 Create midpoint_yr value fulldta <- fulldta %>% mutate(midpoint_yr = ((sample_collection_endyear - sample_collection_startyear) / 2) + sample_collection_startyear) #5.1 Round midpoint_year up fulldta$midpoint_yr <- round(fulldta$midpoint_yr, digits = 0) #6. Merge GDP with data set- a lot of these are NA's becuase midpoint year is not a whole number fulldta <- left_join(fulldta, world_bank_gdp_melt, by = c("country", "midpoint_yr")) #Check where there are not values of GDP and manually replace na_df <- fulldta[is.na(fulldta$gdp),] fulldta[293, "gdp"] <- 15068.982 #Turkey GDP in 2020 nrow(na_df) #7. Add yr since vax (used start year of sample collection rather than midpoint yr bc otherwise 90 studies had pre = 0 and yr since vax >0) fulldta <- fulldta %>% mutate(yr_since_vax = case_when(as.numeric(sample_collection_startyear) - as.numeric(pcv_intro_year) > 0 ~ sample_collection_startyear - as.numeric(pcv_intro_year), as.numeric(sample_collection_startyear) - as.numeric(pcv_intro_year) == 0 ~ 0, as.numeric(sample_collection_startyear) - as.numeric(pcv_intro_year) < 0 ~ 0, pcv_intro_year == "NI" ~ 0)) #pre length(which(is.na(fulldta$yr_since_vax))) check <- fulldta %>% dplyr::select(pcv_intro_year, sample_collection_endyear, sample_collection_startyear, yr_since_vax) #View(check) #8. Add prepost variable fulldta <- fulldta %>% mutate(prepost = case_when(as.numeric(pcv_intro_year) == sample_collection_startyear ~ "no_analysis", as.numeric(pcv_intro_year) == sample_collection_endyear ~ "no_analysis", sample_collection_startyear - as.numeric(pcv_intro_year) == 1 ~ "no_analysis", as.numeric(pcv_intro_year) > sample_collection_endyear ~ "naive", sample_collection_startyear - as.numeric(pcv_intro_year) >= 3 ~ "not_naive", #changed from 5 #sample_collection_startyear - as.numeric(pcv_intro_year) == 4 ~ "no_analysis", #sample_collection_startyear - as.numeric(pcv_intro_year) == 3 ~ "no_analysis", sample_collection_startyear - as.numeric(pcv_intro_year) == 2 ~ "no_analysis", pcv_intro_year == "NI" ~ "naive")) fulldta$prepost[is.na(fulldta$prepost)] <- "no_analysis" length(which(is.na(fulldta$prepost))) #8.1 Add prepost variable for meta-regression analysis fulldta <- fulldta %>% mutate(prepost_meta = case_when(yr_since_vax >= 1 ~ "post", yr_since_vax == 0 ~ "pre")) # check <- fulldta %>% # dplyr::select(prepost_meta, yr_since_vax) # View(check) #9. Recode study ID to r_id studyid_df <- fulldta %>% group_by(studyID) %>% summarize(var = mean(gdp)) studyid_df <- data.frame(studyid_df) studyid_df <- studyid_df %>% mutate(r_id = row_number()) length(unique(studyid_df$studyID)) length(unique(studyid_df$r_id)) #392 studyid_df <- studyid_df[,-c(2)] fulldta <- merge(studyid_df, fulldta, by = "studyID") nrow(fulldta) names(fulldta) fulldta$r_id <- fulldta$r_id + 5000 #add 450 to each of the r_id to not mess up the earlier stuff -> CHANGE TO 5000 unique(fulldta$r_id) clsi_list <- c("National Committee for Clinical Laboratory Standards", "National Committee for Clinical Laboratory Standards " , "NCCLS", "CLSI were used for sus, int, res; EUCAST was used for benzylpenicillin") for(i in 1:nrow(fulldta)){ fulldta$criteria_cl[i] <- ifelse(fulldta$criteria_specify[i] %in% clsi_list, 1, fulldta$criteria[i]) } #Clean drug_class variable!! fulldta$drug_class <- as.character(fulldta$drug) #fulldta$drug_class <- as.character(fulldta$drug_class) fulldta$drug_class[fulldta$drug_class == "Ampicillin"] <- "penicillin" fulldta$drug_class[fulldta$drug_class == "Amoxacillin"] <- "penicillin" fulldta$drug_class[fulldta$drug_class == "Penicilin"] <- "penicillin" fulldta$drug_class[fulldta$drug_class == "penicillin"] <- "penicillin" fulldta$drug_class[fulldta$drug_class == "Ceftriaxone"] <- "3gen_cephalosporin" fulldta$drug_class[fulldta$drug_class == "Tetracycline"] <- "tetracycline" fulldta$drug_class[fulldta$drug_class == "Cotrimoxazole"] <- "SXT" fulldta$drug_class[fulldta$drug_class == "Cefotaxime"] <- "3gen_cephalosporin" fulldta$drug_class[fulldta$drug_class == "Ceftoxamine"] <- "3gen_cephalosporin" fulldta$drug_class[fulldta$drug_class == "Ceftiaxone"] <- "3gen_cephalosporin" fulldta$drug_class[fulldta$drug_class == "Cefuroxime"] <- "3gen_cephalosporin" fulldta$drug_class[fulldta$drug_class == "Cefuroxime "] <- "3gen_cephalosporin" fulldta$drug_class[fulldta$drug_class == "Amoxicillin"] <- "penicillin" fulldta$drug_class[fulldta$drug_class == "Clindamycin"] <- "tetracycline" #CHECK fulldta$drug_class[fulldta$drug_class == "Cefaclor"] <- "3gen_cephalosporin" #CHECK fulldta$drug_class[fulldta$drug_class == "Imipenem"] <- "imipenem" #REMOVE fulldta$drug_class[fulldta$drug_class == "Levofloxacin"] <- "fluoroquinolone" #CHECK fulldta$drug_class[fulldta$drug_class == "Vancomycin"] <- "vancomycin" #CHECK fulldta$drug_class[fulldta$drug_class == "Chloramphenicol"] <- "chloramphenicol" #CHECK fulldta$drug_class[fulldta$drug_class == "Ciprofloxacin"] <- "ciprofloxacin" #CHECK fulldta$drug_class[fulldta$drug_class == "Linezolid"] <- "linezolid" fulldta$drug_class[fulldta$drug_class == "Penicillin"] <- "penicillin" fulldta$drug_class[fulldta$drug_class == "Erythromycin"] <- "macrolide" fulldta$drug_class[fulldta$drug_class == "Azithromycn"] <- "macrolide" table(fulldta$drug, fulldta$drug_class) extra_data4 <- fulldta length(unique(extra_data4$studyID)) #93 length(unique(extra_data4$r_id)) #93 range(extra_data4$studyID, na.rm = T); range(extra_data4$r_id, na.rm = T) save(extra_data4, file = "data/data_120220.rda") View(extra_data4) #Take extra_data4 and select doi, country, sample_collection_startyear, sample_collection_endyear, prepost finalTable <- extra_data4 %>% dplyr::select(author, doi, pub_year, country, sample_collection_startyear, sample_collection_endyear, prepost, total_isolates_drug) View(finalTable)
# Revision Date: 01-02-2016 outlyx <- function(x, iqr=TRUE, iqrP=2, pc=1, mv=TRUE, mvP=0.15, plot=TRUE, ...) { # {{{ ### Computes outliers within methylomic datasets # Arguments: # x : Methylumi/Minfi object or raw betas. # # pc : The desired principal component for outlier identification # # iqr : Logical, to indicate whether to determine outliers by # interquartile ranges. # # iqrP : The number of interquartile ranges one wishes to # discriminate from the upper and lower quartiles. # Default = 2, Tukey's rule suggests 1.5 # # mv : Logical, to indicate whether to determine outliers # using distance measures using a modified version of pcout # from mvoutlier. # # mvP : The threshold bywhich one wishes to screen # data based on the final weight output from # the pcout function # Value between 0 and 1. Default is 0.15 # plot : Logical, to indicate if a graphical device to display # sample outlyingness. # ### # Initialising objects to be used: df <- list() # Converted into dataframe later on. # Removing probes with NA values. x <- na.omit(x) if(iqr){ # {{{ pccompbetx <- prcomp(x, retx=FALSE) out1 <- iqrFun(pccompbetx$rot, pc=pc, iqrP=iqrP) v1 <- colnames(x) %in% out1[[1]]==TRUE df[["iqr"]] <- v1 low <- min(c(min(pccompbetx$rot[,pc]),out1[["low"]]))-out1[[2]] # Used if Plot=T upp <- max(c(max(pccompbetx$rot[,pc]),out1[["hi"]]))+out1[[2]] # Used if Plot=T } # }}} if(mv){ # {{{ tbetx <- t(x) out2 <- mvFun(tbetx, mvP=mvP) v2 <- colnames(x) %in% out2[[1]]==TRUE df[["mv"]] <- v2 } # }}} if(plot&mv&iqr){ plot(pccompbetx$rot[,pc], out2[[2]], xlim=c(low, upp), xlab="Transformed Betas", ylab="Final Weight", ...) abline(v=c(out1[["low"]],out1[["hi"]]), h=mvP, lty=2) rect(xleft=low, xright=out1[["low"]], ybottom=0, ytop=mvP, col="red", density=10) rect(xleft=out1[["hi"]], xright=upp, ybottom=0, ytop=mvP, col="red", density=10) } # Possibly a better way to do the below. df[["outliers"]] <- if(mv){df[["mv"]]==TRUE}&if(iqr){df[["iqr"]]==TRUE} df <- data.frame(df) rownames(df) <- colnames(x) return(df) } # }}} iqrFun <- function(x, pc, iqrP){ # {{{ quantilesbx <- apply(x, 2, quantile) # fivenum also works IQR <- quantilesbx[4, pc] - quantilesbx[2, pc] thresh <- iqrP*IQR hiOutlyx <- quantilesbx[4,pc] + thresh # Upper threshold loOutlyx <- quantilesbx[2,pc] - thresh # Lower threshold outhi <- x[, pc] > hiOutlyx # Upper Outliers outlo <- x[, pc] < loOutlyx # Lower Outliers return(list(c(rownames(x)[outhi], rownames(x)[outlo]), IQR, "hi" = hiOutlyx, "low" = loOutlyx)) } # }}} mvFun <- function(x, mvP, ...){ # {{{ pcoutbetx <- pcouted(x,...) #pcout(x) outmvbetx <- pcoutbetx < mvP #pcoutbetx$wfinal < mvP return(list(rownames(as.matrix(pcoutbetx[outmvbetx])), pcoutbetx)) } # }}} pcouted <- function(x,explvar=0.99,crit.M1=1/3,crit.c1=2.5, crit.M2=1/4,crit.c2=0.99,cs=0.25, outbound=0.25, ...){ # {{{ # Modified version of the pcout function from mvoutlier to # calculated distance measures for methylomic data. # Two minor things have been changed, mainly how the function # copes with x.mad values = 0 and the output. # # x.mad=apply(x,2,mad) x <- x[,!x.mad==0] # Cheat to allow it to continue to function. x.mad <- x.mad[!x.mad==0] p = ncol(x) n = nrow(x) # # PHASE 1: # Step 1: robustly sphere the data: x.sc <- scale(x,apply(x,2,median),x.mad) # Step 2: PC decomposition; compute p*, robustly sphere: x.svd <- svd(scale(x.sc,TRUE,FALSE)) a <- x.svd$d^2/(n-1) p1 <- (1:p)[(cumsum(a)/sum(a)>explvar)][1] x.pc <- x.sc%*%x.svd$v[,1:p1] xpc.sc <- scale(x.pc,apply(x.pc,2,median),apply(x.pc,2,mad)) # Step 3: compute robust kurtosis weights, transform to distances: wp <- abs(apply(xpc.sc^4,2,mean)-3) xpcw.sc <- xpc.sc%*%diag(wp/sum(wp)) xpc.norm <- sqrt(apply(xpcw.sc^2,1,sum)) x.dist1 <- xpc.norm*sqrt(qchisq(0.5,p1))/median(xpc.norm) # Step 4: determine weights according to translated biweight: M1 <- quantile(x.dist1,crit.M1) const1 <- median(x.dist1)+crit.c1*mad(x.dist1) w1 <- (1-((x.dist1-M1)/(const1-M1))^2)^2 w1[x.dist1<M1] <- 1 w1[x.dist1>const1] <- 0 # # PHASE 2: # Step 5: compute Euclidean norms of PCs and their distances: xpc.norm <- sqrt(apply(xpc.sc^2,1,sum)) x.dist2 <- xpc.norm*sqrt(qchisq(0.5,p1))/median(xpc.norm) # Step 6: determine weight according to translated biweight: M2 <- sqrt(qchisq(crit.M2,p1)) const2 <- sqrt(qchisq(crit.c2,p1)) w2 <- (1-((x.dist2-M2)/(const2-M2))^2)^2 w2[x.dist2<M2] <- 1 w2[x.dist2>const2] <- 0 # # Combine PHASE1 and PHASE 2: compute final weights: # Changed output slightly wfinal <- (w1+cs)*(w2+cs)/((1+cs)^2) return(wfinal) } # }}}
/R/outlyx.R
no_license
schalkwyk/wateRmelon
R
false
false
5,248
r
# Revision Date: 01-02-2016 outlyx <- function(x, iqr=TRUE, iqrP=2, pc=1, mv=TRUE, mvP=0.15, plot=TRUE, ...) { # {{{ ### Computes outliers within methylomic datasets # Arguments: # x : Methylumi/Minfi object or raw betas. # # pc : The desired principal component for outlier identification # # iqr : Logical, to indicate whether to determine outliers by # interquartile ranges. # # iqrP : The number of interquartile ranges one wishes to # discriminate from the upper and lower quartiles. # Default = 2, Tukey's rule suggests 1.5 # # mv : Logical, to indicate whether to determine outliers # using distance measures using a modified version of pcout # from mvoutlier. # # mvP : The threshold bywhich one wishes to screen # data based on the final weight output from # the pcout function # Value between 0 and 1. Default is 0.15 # plot : Logical, to indicate if a graphical device to display # sample outlyingness. # ### # Initialising objects to be used: df <- list() # Converted into dataframe later on. # Removing probes with NA values. x <- na.omit(x) if(iqr){ # {{{ pccompbetx <- prcomp(x, retx=FALSE) out1 <- iqrFun(pccompbetx$rot, pc=pc, iqrP=iqrP) v1 <- colnames(x) %in% out1[[1]]==TRUE df[["iqr"]] <- v1 low <- min(c(min(pccompbetx$rot[,pc]),out1[["low"]]))-out1[[2]] # Used if Plot=T upp <- max(c(max(pccompbetx$rot[,pc]),out1[["hi"]]))+out1[[2]] # Used if Plot=T } # }}} if(mv){ # {{{ tbetx <- t(x) out2 <- mvFun(tbetx, mvP=mvP) v2 <- colnames(x) %in% out2[[1]]==TRUE df[["mv"]] <- v2 } # }}} if(plot&mv&iqr){ plot(pccompbetx$rot[,pc], out2[[2]], xlim=c(low, upp), xlab="Transformed Betas", ylab="Final Weight", ...) abline(v=c(out1[["low"]],out1[["hi"]]), h=mvP, lty=2) rect(xleft=low, xright=out1[["low"]], ybottom=0, ytop=mvP, col="red", density=10) rect(xleft=out1[["hi"]], xright=upp, ybottom=0, ytop=mvP, col="red", density=10) } # Possibly a better way to do the below. df[["outliers"]] <- if(mv){df[["mv"]]==TRUE}&if(iqr){df[["iqr"]]==TRUE} df <- data.frame(df) rownames(df) <- colnames(x) return(df) } # }}} iqrFun <- function(x, pc, iqrP){ # {{{ quantilesbx <- apply(x, 2, quantile) # fivenum also works IQR <- quantilesbx[4, pc] - quantilesbx[2, pc] thresh <- iqrP*IQR hiOutlyx <- quantilesbx[4,pc] + thresh # Upper threshold loOutlyx <- quantilesbx[2,pc] - thresh # Lower threshold outhi <- x[, pc] > hiOutlyx # Upper Outliers outlo <- x[, pc] < loOutlyx # Lower Outliers return(list(c(rownames(x)[outhi], rownames(x)[outlo]), IQR, "hi" = hiOutlyx, "low" = loOutlyx)) } # }}} mvFun <- function(x, mvP, ...){ # {{{ pcoutbetx <- pcouted(x,...) #pcout(x) outmvbetx <- pcoutbetx < mvP #pcoutbetx$wfinal < mvP return(list(rownames(as.matrix(pcoutbetx[outmvbetx])), pcoutbetx)) } # }}} pcouted <- function(x,explvar=0.99,crit.M1=1/3,crit.c1=2.5, crit.M2=1/4,crit.c2=0.99,cs=0.25, outbound=0.25, ...){ # {{{ # Modified version of the pcout function from mvoutlier to # calculated distance measures for methylomic data. # Two minor things have been changed, mainly how the function # copes with x.mad values = 0 and the output. # # x.mad=apply(x,2,mad) x <- x[,!x.mad==0] # Cheat to allow it to continue to function. x.mad <- x.mad[!x.mad==0] p = ncol(x) n = nrow(x) # # PHASE 1: # Step 1: robustly sphere the data: x.sc <- scale(x,apply(x,2,median),x.mad) # Step 2: PC decomposition; compute p*, robustly sphere: x.svd <- svd(scale(x.sc,TRUE,FALSE)) a <- x.svd$d^2/(n-1) p1 <- (1:p)[(cumsum(a)/sum(a)>explvar)][1] x.pc <- x.sc%*%x.svd$v[,1:p1] xpc.sc <- scale(x.pc,apply(x.pc,2,median),apply(x.pc,2,mad)) # Step 3: compute robust kurtosis weights, transform to distances: wp <- abs(apply(xpc.sc^4,2,mean)-3) xpcw.sc <- xpc.sc%*%diag(wp/sum(wp)) xpc.norm <- sqrt(apply(xpcw.sc^2,1,sum)) x.dist1 <- xpc.norm*sqrt(qchisq(0.5,p1))/median(xpc.norm) # Step 4: determine weights according to translated biweight: M1 <- quantile(x.dist1,crit.M1) const1 <- median(x.dist1)+crit.c1*mad(x.dist1) w1 <- (1-((x.dist1-M1)/(const1-M1))^2)^2 w1[x.dist1<M1] <- 1 w1[x.dist1>const1] <- 0 # # PHASE 2: # Step 5: compute Euclidean norms of PCs and their distances: xpc.norm <- sqrt(apply(xpc.sc^2,1,sum)) x.dist2 <- xpc.norm*sqrt(qchisq(0.5,p1))/median(xpc.norm) # Step 6: determine weight according to translated biweight: M2 <- sqrt(qchisq(crit.M2,p1)) const2 <- sqrt(qchisq(crit.c2,p1)) w2 <- (1-((x.dist2-M2)/(const2-M2))^2)^2 w2[x.dist2<M2] <- 1 w2[x.dist2>const2] <- 0 # # Combine PHASE1 and PHASE 2: compute final weights: # Changed output slightly wfinal <- (w1+cs)*(w2+cs)/((1+cs)^2) return(wfinal) } # }}}
c DCNF-Autarky [version 0.0.1]. c Copyright (c) 2018-2019 Swansea University. c c Input Clause Count: 77330 c Performing E1-Autarky iteration. c Remaining clauses count after E-Reduction: 75034 c c Performing E1-Autarky iteration. c Remaining clauses count after E-Reduction: 75034 c c Input Parameter (command line, file): c input filename QBFLIB/Gent-Rowley/Connect4/connect_7x6_3_W.qdimacs c output filename /tmp/dcnfAutarky.dimacs c autarky level 1 c conformity level 0 c encoding type 2 c no.of var 97742 c no.of clauses 77330 c no.of taut cls 0 c c Output Parameters: c remaining no.of clauses 75034 c c QBFLIB/Gent-Rowley/Connect4/connect_7x6_3_W.qdimacs 97742 77330 E1 [10101 10102 10103 10104 10105 10106 10107 10108 10109 10110 10111 10112 10113 10114 10115 10116 10117 10118 10119 10120 10121 10122 10123 10124 10125 10126 10127 10128 10129 10130 10131 10132 10133 10134 10135 10136 10137 10138 10139 10140 10141 10142 10201 10202 10203 10204 10205 10206 10207 10208 10209 10210 10211 10212 10213 10214 10215 10216 10217 10218 10219 10220 10221 10222 10223 10224 10225 10226 10227 10228 10229 10230 10231 10232 10233 10234 10235 10236 10237 10238 10239 10240 10241 10242 10301 10302 10303 10304 10305 10306 10307 10308 10309 10310 10311 10312 10313 10314 10315 10316 10317 10318 10319 10320 10321 10322 10323 10324 10325 10326 10327 10328 10329 10330 10331 10332 10333 10334 10335 10336 10337 10338 10339 10340 10341 10342 10401 10402 10403 10404 10405 10406 10407 10408 10409 10410 10411 10412 10413 10414 10415 10416 10417 10418 10419 10420 10421 10422 10423 10424 10425 10426 10427 10428 10429 10430 10431 10432 10433 10434 10435 10436 10437 10438 10439 10440 10441 10442 10501 10502 10503 10504 10505 10506 10507 10508 10509 10510 10511 10512 10513 10514 10515 10516 10517 10518 10519 10520 10521 10522 10523 10524 10525 10526 10527 10528 10529 10530 10531 10532 10533 10534 10535 10536 10537 10538 10539 10540 10541 10542 10601 10602 10603 10604 10605 10606 10607 10608 10609 10610 10611 10612 10613 10614 10615 10616 10617 10618 10619 10620 10621 10622 10623 10624 10625 10626 10627 10628 10629 10630 10631 10632 10633 10634 10635 10636 10637 10638 10639 10640 10641 10642 10701 10702 10703 10704 10705 10706 10707 10708 10709 10710 10711 10712 10713 10714 10715 10716 10717 10718 10719 10720 10721 10722 10723 10724 10725 10726 10727 10728 10729 10730 10731 10732 10733 10734 10735 10736 10737 10738 10739 10740 10741 10742 20101 20102 20103 20104 20105 20106 20107 20108 20109 20110 20111 20112 20113 20114 20115 20116 20117 20118 20119 20120 20121 20122 20123 20124 20125 20126 20127 20128 20129 20130 20131 20132 20133 20134 20135 20136 20137 20138 20139 20140 20141 20142 20201 20202 20203 20204 20205 20206 20207 20208 20209 20210 20211 20212 20213 20214 20215 20216 20217 20218 20219 20220 20221 20222 20223 20224 20225 20226 20227 20228 20229 20230 20231 20232 20233 20234 20235 20236 20237 20238 20239 20240 20241 20242 20301 20302 20303 20304 20305 20306 20307 20308 20309 20310 20311 20312 20313 20314 20315 20316 20317 20318 20319 20320 20321 20322 20323 20324 20325 20326 20327 20328 20329 20330 20331 20332 20333 20334 20335 20336 20337 20338 20339 20340 20341 20342 20401 20402 20403 20404 20405 20406 20407 20408 20409 20410 20411 20412 20413 20414 20415 20416 20417 20418 20419 20420 20421 20422 20423 20424 20425 20426 20427 20428 20429 20430 20431 20432 20433 20434 20435 20436 20437 20438 20439 20440 20441 20442 20501 20502 20503 20504 20505 20506 20507 20508 20509 20510 20511 20512 20513 20514 20515 20516 20517 20518 20519 20520 20521 20522 20523 20524 20525 20526 20527 20528 20529 20530 20531 20532 20533 20534 20535 20536 20537 20538 20539 20540 20541 20542 20601 20602 20603 20604 20605 20606 20607 20608 20609 20610 20611 20612 20613 20614 20615 20616 20617 20618 20619 20620 20621 20622 20623 20624 20625 20626 20627 20628 20629 20630 20631 20632 20633 20634 20635 20636 20637 20638 20639 20640 20641 20642 20701 20702 20703 20704 20705 20706 20707 20708 20709 20710 20711 20712 20713 20714 20715 20716 20717 20718 20719 20720 20721 20722 20723 20724 20725 20726 20727 20728 20729 20730 20731 20732 20733 20734 20735 20736 20737 20738 20739 20740 20741 20742 80101 80102 80103 80104 80105 80106 80107 80108 80109 80110 80111 80112 80113 80114 80115 80116 80117 80118 80119 80120 80121 80122 80123 80124 80125 80126 80127 80128 80129 80130 80131 80132 80133 80134 80135 80136 80137 80138 80139 80140 80141 80201 80202 80203 80204 80205 80206 80207 80208 80209 80210 80211 80212 80213 80214 80215 80216 80217 80218 80219 80220 80221 80222 80223 80224 80225 80226 80227 80228 80229 80230 80231 80232 80233 80234 80235 80236 80237 80238 80239 80240 80241 80301 80302 80303 80304 80305 80306 80307 80308 80309 80310 80311 80312 80313 80314 80315 80316 80317 80318 80319 80320 80321 80322 80323 80324 80325 80326 80327 80328 80329 80330 80331 80332 80333 80334 80335 80336 80337 80338 80339 80340 80341 80401 80402 80403 80404 80405 80406 80407 80408 80409 80410 80411 80412 80413 80414 80415 80416 80417 80418 80419 80420 80421 80422 80423 80424 80425 80426 80427 80428 80429 80430 80431 80432 80433 80434 80435 80436 80437 80438 80439 80440 80441 80501 80502 80503 80504 80505 80506 80507 80508 80509 80510 80511 80512 80513 80514 80515 80516 80517 80518 80519 80520 80521 80522 80523 80524 80525 80526 80527 80528 80529 80530 80531 80532 80533 80534 80535 80536 80537 80538 80539 80540 80541 80601 80602 80603 80604 80605 80606 80607 80608 80609 80610 80611 80612 80613 80614 80615 80616 80617 80618 80619 80620 80621 80622 80623 80624 80625 80626 80627 80628 80629 80630 80631 80632 80633 80634 80635 80636 80637 80638 80639 80640 80641 80701 80702 80703 80704 80705 80706 80707 80708 80709 80710 80711 80712 80713 80714 80715 80716 80717 80718 80719 80720 80721 80722 80723 80724 80725 80726 80727 80728 80729 80730 80731 80732 80733 80734 80735 80736 80737 80738 80739 80740 80741 81102 81104 81106 81108 81110 81112 81114 81116 81118 81120 81122 81124 81126 81128 81130 81132 81134 81136 81138 81140 81202 81204 81206 81208 81210 81212 81214 81216 81218 81220 81222 81224 81226 81228 81230 81232 81234 81236 81238 81240 81302 81304 81306 81308 81310 81312 81314 81316 81318 81320 81322 81324 81326 81328 81330 81332 81334 81336 81338 81340 81402 81404 81406 81408 81410 81412 81414 81416 81418 81420 81422 81424 81426 81428 81430 81432 81434 81436 81438 81440 81502 81504 81506 81508 81510 81512 81514 81516 81518 81520 81522 81524 81526 81528 81530 81532 81534 81536 81538 81540 81602 81604 81606 81608 81610 81612 81614 81616 81618 81620 81622 81624 81626 81628 81630 81632 81634 81636 81638 81640 81702 81704 81706 81708 81710 81712 81714 81716 81718 81720 81722 81724 81726 81728 81730 81732 81734 81736 81738 81740 82102 82104 82106 82108 82110 82112 82114 82116 82118 82120 82122 82124 82126 82128 82130 82132 82134 82136 82138 82140 82202 82204 82206 82208 82210 82212 82214 82216 82218 82220 82222 82224 82226 82228 82230 82232 82234 82236 82238 82240 82302 82304 82306 82308 82310 82312 82314 82316 82318 82320 82322 82324 82326 82328 82330 82332 82334 82336 82338 82340 82402 82404 82406 82408 82410 82412 82414 82416 82418 82420 82422 82424 82426 82428 82430 82432 82434 82436 82438 82440 82502 82504 82506 82508 82510 82512 82514 82516 82518 82520 82522 82524 82526 82528 82530 82532 82534 82536 82538 82540 82602 82604 82606 82608 82610 82612 82614 82616 82618 82620 82622 82624 82626 82628 82630 82632 82634 82636 82638 82640 82702 82704 82706 82708 82710 82712 82714 82716 82718 82720 82722 82724 82726 82728 82730 82732 82734 82736 82738 82740 83102 83104 83106 83108 83110 83112 83114 83116 83118 83120 83122 83124 83126 83128 83130 83132 83134 83136 83138 83140 83202 83204 83206 83208 83210 83212 83214 83216 83218 83220 83222 83224 83226 83228 83230 83232 83234 83236 83238 83240 83302 83304 83306 83308 83310 83312 83314 83316 83318 83320 83322 83324 83326 83328 83330 83332 83334 83336 83338 83340 83402 83404 83406 83408 83410 83412 83414 83416 83418 83420 83422 83424 83426 83428 83430 83432 83434 83436 83438 83440 83502 83504 83506 83508 83510 83512 83514 83516 83518 83520 83522 83524 83526 83528 83530 83532 83534 83536 83538 83540 83602 83604 83606 83608 83610 83612 83614 83616 83618 83620 83622 83624 83626 83628 83630 83632 83634 83636 83638 83640 83702 83704 83706 83708 83710 83712 83714 83716 83718 83720 83722 83724 83726 83728 83730 83732 83734 83736 83738 83740 84102 84104 84106 84108 84110 84112 84114 84116 84118 84120 84122 84124 84126 84128 84130 84132 84134 84136 84138 84140 84202 84204 84206 84208 84210 84212 84214 84216 84218 84220 84222 84224 84226 84228 84230 84232 84234 84236 84238 84240 84302 84304 84306 84308 84310 84312 84314 84316 84318 84320 84322 84324 84326 84328 84330 84332 84334 84336 84338 84340 84402 84404 84406 84408 84410 84412 84414 84416 84418 84420 84422 84424 84426 84428 84430 84432 84434 84436 84438 84440 84502 84504 84506 84508 84510 84512 84514 84516 84518 84520 84522 84524 84526 84528 84530 84532 84534 84536 84538 84540 84602 84604 84606 84608 84610 84612 84614 84616 84618 84620 84622 84624 84626 84628 84630 84632 84634 84636 84638 84640 84702 84704 84706 84708 84710 84712 84714 84716 84718 84720 84722 84724 84726 84728 84730 84732 84734 84736 84738 84740 85102 85104 85106 85108 85110 85112 85114 85116 85118 85120 85122 85124 85126 85128 85130 85132 85134 85136 85138 85140 85202 85204 85206 85208 85210 85212 85214 85216 85218 85220 85222 85224 85226 85228 85230 85232 85234 85236 85238 85240 85302 85304 85306 85308 85310 85312 85314 85316 85318 85320 85322 85324 85326 85328 85330 85332 85334 85336 85338 85340 85402 85404 85406 85408 85410 85412 85414 85416 85418 85420 85422 85424 85426 85428 85430 85432 85434 85436 85438 85440 85502 85504 85506 85508 85510 85512 85514 85516 85518 85520 85522 85524 85526 85528 85530 85532 85534 85536 85538 85540 85602 85604 85606 85608 85610 85612 85614 85616 85618 85620 85622 85624 85626 85628 85630 85632 85634 85636 85638 85640 85702 85704 85706 85708 85710 85712 85714 85716 85718 85720 85722 85724 85726 85728 85730 85732 85734 85736 85738 85740 86102 86104 86106 86108 86110 86112 86114 86116 86118 86120 86122 86124 86126 86128 86130 86132 86134 86136 86138 86140 86202 86204 86206 86208 86210 86212 86214 86216 86218 86220 86222 86224 86226 86228 86230 86232 86234 86236 86238 86240 86302 86304 86306 86308 86310 86312 86314 86316 86318 86320 86322 86324 86326 86328 86330 86332 86334 86336 86338 86340 86402 86404 86406 86408 86410 86412 86414 86416 86418 86420 86422 86424 86426 86428 86430 86432 86434 86436 86438 86440 86502 86504 86506 86508 86510 86512 86514 86516 86518 86520 86522 86524 86526 86528 86530 86532 86534 86536 86538 86540 86602 86604 86606 86608 86610 86612 86614 86616 86618 86620 86622 86624 86626 86628 86630 86632 86634 86636 86638 86640 86702 86704 86706 86708 86710 86712 86714 86716 86718 86720 86722 86724 86726 86728 86730 86732 86734 86736 86738 86740 90101 90102 90103 90104 90105 90106 90107 90108 90109 90110 90111 90112 90113 90114 90115 90116 90117 90118 90119 90120 90121 90122 90123 90124 90125 90126 90127 90128 90129 90130 90131 90132 90133 90134 90135 90136 90137 90138 90139 90140 90141 90201 90202 90203 90204 90205 90206 90207 90208 90209 90210 90211 90212 90213 90214 90215 90216 90217 90218 90219 90220 90221 90222 90223 90224 90225 90226 90227 90228 90229 90230 90231 90232 90233 90234 90235 90236 90237 90238 90239 90240 90241 90301 90302 90303 90304 90305 90306 90307 90308 90309 90310 90311 90312 90313 90314 90315 90316 90317 90318 90319 90320 90321 90322 90323 90324 90325 90326 90327 90328 90329 90330 90331 90332 90333 90334 90335 90336 90337 90338 90339 90340 90341 90401 90402 90403 90404 90405 90406 90407 90408 90409 90410 90411 90412 90413 90414 90415 90416 90417 90418 90419 90420 90421 90422 90423 90424 90425 90426 90427 90428 90429 90430 90431 90432 90433 90434 90435 90436 90437 90438 90439 90440 90441 90501 90502 90503 90504 90505 90506 90507 90508 90509 90510 90511 90512 90513 90514 90515 90516 90517 90518 90519 90520 90521 90522 90523 90524 90525 90526 90527 90528 90529 90530 90531 90532 90533 90534 90535 90536 90537 90538 90539 90540 90541 90601 90602 90603 90604 90605 90606 90607 90608 90609 90610 90611 90612 90613 90614 90615 90616 90617 90618 90619 90620 90621 90622 90623 90624 90625 90626 90627 90628 90629 90630 90631 90632 90633 90634 90635 90636 90637 90638 90639 90640 90641 90701 90702 90703 90704 90705 90706 90707 90708 90709 90710 90711 90712 90713 90714 90715 90716 90717 90718 90719 90720 90721 90722 90723 90724 90725 90726 90727 90728 90729 90730 90731 90732 90733 90734 90735 90736 90737 90738 90739 90740 90741 91101 91103 91105 91107 91109 91111 91113 91115 91117 91119 91121 91123 91125 91127 91129 91131 91133 91135 91137 91139 91141 91201 91203 91205 91207 91209 91211 91213 91215 91217 91219 91221 91223 91225 91227 91229 91231 91233 91235 91237 91239 91241 91301 91303 91305 91307 91309 91311 91313 91315 91317 91319 91321 91323 91325 91327 91329 91331 91333 91335 91337 91339 91341 91401 91403 91405 91407 91409 91411 91413 91415 91417 91419 91421 91423 91425 91427 91429 91431 91433 91435 91437 91439 91441 91501 91503 91505 91507 91509 91511 91513 91515 91517 91519 91521 91523 91525 91527 91529 91531 91533 91535 91537 91539 91541 91601 91603 91605 91607 91609 91611 91613 91615 91617 91619 91621 91623 91625 91627 91629 91631 91633 91635 91637 91639 91641 91701 91703 91705 91707 91709 91711 91713 91715 91717 91719 91721 91723 91725 91727 91729 91731 91733 91735 91737 91739 91741 92101 92103 92105 92107 92109 92111 92113 92115 92117 92119 92121 92123 92125 92127 92129 92131 92133 92135 92137 92139 92141 92201 92203 92205 92207 92209 92211 92213 92215 92217 92219 92221 92223 92225 92227 92229 92231 92233 92235 92237 92239 92241 92301 92303 92305 92307 92309 92311 92313 92315 92317 92319 92321 92323 92325 92327 92329 92331 92333 92335 92337 92339 92341 92401 92403 92405 92407 92409 92411 92413 92415 92417 92419 92421 92423 92425 92427 92429 92431 92433 92435 92437 92439 92441 92501 92503 92505 92507 92509 92511 92513 92515 92517 92519 92521 92523 92525 92527 92529 92531 92533 92535 92537 92539 92541 92601 92603 92605 92607 92609 92611 92613 92615 92617 92619 92621 92623 92625 92627 92629 92631 92633 92635 92637 92639 92641 92701 92703 92705 92707 92709 92711 92713 92715 92717 92719 92721 92723 92725 92727 92729 92731 92733 92735 92737 92739 92741 93101 93103 93105 93107 93109 93111 93113 93115 93117 93119 93121 93123 93125 93127 93129 93131 93133 93135 93137 93139 93141 93201 93203 93205 93207 93209 93211 93213 93215 93217 93219 93221 93223 93225 93227 93229 93231 93233 93235 93237 93239 93241 93301 93303 93305 93307 93309 93311 93313 93315 93317 93319 93321 93323 93325 93327 93329 93331 93333 93335 93337 93339 93341 93401 93403 93405 93407 93409 93411 93413 93415 93417 93419 93421 93423 93425 93427 93429 93431 93433 93435 93437 93439 93441 93501 93503 93505 93507 93509 93511 93513 93515 93517 93519 93521 93523 93525 93527 93529 93531 93533 93535 93537 93539 93541 93601 93603 93605 93607 93609 93611 93613 93615 93617 93619 93621 93623 93625 93627 93629 93631 93633 93635 93637 93639 93641 93701 93703 93705 93707 93709 93711 93713 93715 93717 93719 93721 93723 93725 93727 93729 93731 93733 93735 93737 93739 93741 94101 94103 94105 94107 94109 94111 94113 94115 94117 94119 94121 94123 94125 94127 94129 94131 94133 94135 94137 94139 94141 94201 94203 94205 94207 94209 94211 94213 94215 94217 94219 94221 94223 94225 94227 94229 94231 94233 94235 94237 94239 94241 94301 94303 94305 94307 94309 94311 94313 94315 94317 94319 94321 94323 94325 94327 94329 94331 94333 94335 94337 94339 94341 94401 94403 94405 94407 94409 94411 94413 94415 94417 94419 94421 94423 94425 94427 94429 94431 94433 94435 94437 94439 94441 94501 94503 94505 94507 94509 94511 94513 94515 94517 94519 94521 94523 94525 94527 94529 94531 94533 94535 94537 94539 94541 94601 94603 94605 94607 94609 94611 94613 94615 94617 94619 94621 94623 94625 94627 94629 94631 94633 94635 94637 94639 94641 94701 94703 94705 94707 94709 94711 94713 94715 94717 94719 94721 94723 94725 94727 94729 94731 94733 94735 94737 94739 94741 95101 95103 95105 95107 95109 95111 95113 95115 95117 95119 95121 95123 95125 95127 95129 95131 95133 95135 95137 95139 95141 95201 95203 95205 95207 95209 95211 95213 95215 95217 95219 95221 95223 95225 95227 95229 95231 95233 95235 95237 95239 95241 95301 95303 95305 95307 95309 95311 95313 95315 95317 95319 95321 95323 95325 95327 95329 95331 95333 95335 95337 95339 95341 95401 95403 95405 95407 95409 95411 95413 95415 95417 95419 95421 95423 95425 95427 95429 95431 95433 95435 95437 95439 95441 95501 95503 95505 95507 95509 95511 95513 95515 95517 95519 95521 95523 95525 95527 95529 95531 95533 95535 95537 95539 95541 95601 95603 95605 95607 95609 95611 95613 95615 95617 95619 95621 95623 95625 95627 95629 95631 95633 95635 95637 95639 95641 95701 95703 95705 95707 95709 95711 95713 95715 95717 95719 95721 95723 95725 95727 95729 95731 95733 95735 95737 95739 95741 96101 96103 96105 96107 96109 96111 96113 96115 96117 96119 96121 96123 96125 96127 96129 96131 96133 96135 96137 96139 96141 96201 96203 96205 96207 96209 96211 96213 96215 96217 96219 96221 96223 96225 96227 96229 96231 96233 96235 96237 96239 96241 96301 96303 96305 96307 96309 96311 96313 96315 96317 96319 96321 96323 96325 96327 96329 96331 96333 96335 96337 96339 96341 96401 96403 96405 96407 96409 96411 96413 96415 96417 96419 96421 96423 96425 96427 96429 96431 96433 96435 96437 96439 96441 96501 96503 96505 96507 96509 96511 96513 96515 96517 96519 96521 96523 96525 96527 96529 96531 96533 96535 96537 96539 96541 96601 96603 96605 96607 96609 96611 96613 96615 96617 96619 96621 96623 96625 96627 96629 96631 96633 96635 96637 96639 96641 96701 96703 96705 96707 96709 96711 96713 96715 96717 96719 96721 96723 96725 96727 96729 96731 96733 96735 96737 96739 96741] 0 147 18092 75034 RED
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Gent-Rowley/Connect4/connect_7x6_3_W/connect_7x6_3_W.R
no_license
arey0pushpa/dcnf-autarky
R
false
false
18,033
r
c DCNF-Autarky [version 0.0.1]. c Copyright (c) 2018-2019 Swansea University. c c Input Clause Count: 77330 c Performing E1-Autarky iteration. c Remaining clauses count after E-Reduction: 75034 c c Performing E1-Autarky iteration. c Remaining clauses count after E-Reduction: 75034 c c Input Parameter (command line, file): c input filename QBFLIB/Gent-Rowley/Connect4/connect_7x6_3_W.qdimacs c output filename /tmp/dcnfAutarky.dimacs c autarky level 1 c conformity level 0 c encoding type 2 c no.of var 97742 c no.of clauses 77330 c no.of taut cls 0 c c Output Parameters: c remaining no.of clauses 75034 c c QBFLIB/Gent-Rowley/Connect4/connect_7x6_3_W.qdimacs 97742 77330 E1 [10101 10102 10103 10104 10105 10106 10107 10108 10109 10110 10111 10112 10113 10114 10115 10116 10117 10118 10119 10120 10121 10122 10123 10124 10125 10126 10127 10128 10129 10130 10131 10132 10133 10134 10135 10136 10137 10138 10139 10140 10141 10142 10201 10202 10203 10204 10205 10206 10207 10208 10209 10210 10211 10212 10213 10214 10215 10216 10217 10218 10219 10220 10221 10222 10223 10224 10225 10226 10227 10228 10229 10230 10231 10232 10233 10234 10235 10236 10237 10238 10239 10240 10241 10242 10301 10302 10303 10304 10305 10306 10307 10308 10309 10310 10311 10312 10313 10314 10315 10316 10317 10318 10319 10320 10321 10322 10323 10324 10325 10326 10327 10328 10329 10330 10331 10332 10333 10334 10335 10336 10337 10338 10339 10340 10341 10342 10401 10402 10403 10404 10405 10406 10407 10408 10409 10410 10411 10412 10413 10414 10415 10416 10417 10418 10419 10420 10421 10422 10423 10424 10425 10426 10427 10428 10429 10430 10431 10432 10433 10434 10435 10436 10437 10438 10439 10440 10441 10442 10501 10502 10503 10504 10505 10506 10507 10508 10509 10510 10511 10512 10513 10514 10515 10516 10517 10518 10519 10520 10521 10522 10523 10524 10525 10526 10527 10528 10529 10530 10531 10532 10533 10534 10535 10536 10537 10538 10539 10540 10541 10542 10601 10602 10603 10604 10605 10606 10607 10608 10609 10610 10611 10612 10613 10614 10615 10616 10617 10618 10619 10620 10621 10622 10623 10624 10625 10626 10627 10628 10629 10630 10631 10632 10633 10634 10635 10636 10637 10638 10639 10640 10641 10642 10701 10702 10703 10704 10705 10706 10707 10708 10709 10710 10711 10712 10713 10714 10715 10716 10717 10718 10719 10720 10721 10722 10723 10724 10725 10726 10727 10728 10729 10730 10731 10732 10733 10734 10735 10736 10737 10738 10739 10740 10741 10742 20101 20102 20103 20104 20105 20106 20107 20108 20109 20110 20111 20112 20113 20114 20115 20116 20117 20118 20119 20120 20121 20122 20123 20124 20125 20126 20127 20128 20129 20130 20131 20132 20133 20134 20135 20136 20137 20138 20139 20140 20141 20142 20201 20202 20203 20204 20205 20206 20207 20208 20209 20210 20211 20212 20213 20214 20215 20216 20217 20218 20219 20220 20221 20222 20223 20224 20225 20226 20227 20228 20229 20230 20231 20232 20233 20234 20235 20236 20237 20238 20239 20240 20241 20242 20301 20302 20303 20304 20305 20306 20307 20308 20309 20310 20311 20312 20313 20314 20315 20316 20317 20318 20319 20320 20321 20322 20323 20324 20325 20326 20327 20328 20329 20330 20331 20332 20333 20334 20335 20336 20337 20338 20339 20340 20341 20342 20401 20402 20403 20404 20405 20406 20407 20408 20409 20410 20411 20412 20413 20414 20415 20416 20417 20418 20419 20420 20421 20422 20423 20424 20425 20426 20427 20428 20429 20430 20431 20432 20433 20434 20435 20436 20437 20438 20439 20440 20441 20442 20501 20502 20503 20504 20505 20506 20507 20508 20509 20510 20511 20512 20513 20514 20515 20516 20517 20518 20519 20520 20521 20522 20523 20524 20525 20526 20527 20528 20529 20530 20531 20532 20533 20534 20535 20536 20537 20538 20539 20540 20541 20542 20601 20602 20603 20604 20605 20606 20607 20608 20609 20610 20611 20612 20613 20614 20615 20616 20617 20618 20619 20620 20621 20622 20623 20624 20625 20626 20627 20628 20629 20630 20631 20632 20633 20634 20635 20636 20637 20638 20639 20640 20641 20642 20701 20702 20703 20704 20705 20706 20707 20708 20709 20710 20711 20712 20713 20714 20715 20716 20717 20718 20719 20720 20721 20722 20723 20724 20725 20726 20727 20728 20729 20730 20731 20732 20733 20734 20735 20736 20737 20738 20739 20740 20741 20742 80101 80102 80103 80104 80105 80106 80107 80108 80109 80110 80111 80112 80113 80114 80115 80116 80117 80118 80119 80120 80121 80122 80123 80124 80125 80126 80127 80128 80129 80130 80131 80132 80133 80134 80135 80136 80137 80138 80139 80140 80141 80201 80202 80203 80204 80205 80206 80207 80208 80209 80210 80211 80212 80213 80214 80215 80216 80217 80218 80219 80220 80221 80222 80223 80224 80225 80226 80227 80228 80229 80230 80231 80232 80233 80234 80235 80236 80237 80238 80239 80240 80241 80301 80302 80303 80304 80305 80306 80307 80308 80309 80310 80311 80312 80313 80314 80315 80316 80317 80318 80319 80320 80321 80322 80323 80324 80325 80326 80327 80328 80329 80330 80331 80332 80333 80334 80335 80336 80337 80338 80339 80340 80341 80401 80402 80403 80404 80405 80406 80407 80408 80409 80410 80411 80412 80413 80414 80415 80416 80417 80418 80419 80420 80421 80422 80423 80424 80425 80426 80427 80428 80429 80430 80431 80432 80433 80434 80435 80436 80437 80438 80439 80440 80441 80501 80502 80503 80504 80505 80506 80507 80508 80509 80510 80511 80512 80513 80514 80515 80516 80517 80518 80519 80520 80521 80522 80523 80524 80525 80526 80527 80528 80529 80530 80531 80532 80533 80534 80535 80536 80537 80538 80539 80540 80541 80601 80602 80603 80604 80605 80606 80607 80608 80609 80610 80611 80612 80613 80614 80615 80616 80617 80618 80619 80620 80621 80622 80623 80624 80625 80626 80627 80628 80629 80630 80631 80632 80633 80634 80635 80636 80637 80638 80639 80640 80641 80701 80702 80703 80704 80705 80706 80707 80708 80709 80710 80711 80712 80713 80714 80715 80716 80717 80718 80719 80720 80721 80722 80723 80724 80725 80726 80727 80728 80729 80730 80731 80732 80733 80734 80735 80736 80737 80738 80739 80740 80741 81102 81104 81106 81108 81110 81112 81114 81116 81118 81120 81122 81124 81126 81128 81130 81132 81134 81136 81138 81140 81202 81204 81206 81208 81210 81212 81214 81216 81218 81220 81222 81224 81226 81228 81230 81232 81234 81236 81238 81240 81302 81304 81306 81308 81310 81312 81314 81316 81318 81320 81322 81324 81326 81328 81330 81332 81334 81336 81338 81340 81402 81404 81406 81408 81410 81412 81414 81416 81418 81420 81422 81424 81426 81428 81430 81432 81434 81436 81438 81440 81502 81504 81506 81508 81510 81512 81514 81516 81518 81520 81522 81524 81526 81528 81530 81532 81534 81536 81538 81540 81602 81604 81606 81608 81610 81612 81614 81616 81618 81620 81622 81624 81626 81628 81630 81632 81634 81636 81638 81640 81702 81704 81706 81708 81710 81712 81714 81716 81718 81720 81722 81724 81726 81728 81730 81732 81734 81736 81738 81740 82102 82104 82106 82108 82110 82112 82114 82116 82118 82120 82122 82124 82126 82128 82130 82132 82134 82136 82138 82140 82202 82204 82206 82208 82210 82212 82214 82216 82218 82220 82222 82224 82226 82228 82230 82232 82234 82236 82238 82240 82302 82304 82306 82308 82310 82312 82314 82316 82318 82320 82322 82324 82326 82328 82330 82332 82334 82336 82338 82340 82402 82404 82406 82408 82410 82412 82414 82416 82418 82420 82422 82424 82426 82428 82430 82432 82434 82436 82438 82440 82502 82504 82506 82508 82510 82512 82514 82516 82518 82520 82522 82524 82526 82528 82530 82532 82534 82536 82538 82540 82602 82604 82606 82608 82610 82612 82614 82616 82618 82620 82622 82624 82626 82628 82630 82632 82634 82636 82638 82640 82702 82704 82706 82708 82710 82712 82714 82716 82718 82720 82722 82724 82726 82728 82730 82732 82734 82736 82738 82740 83102 83104 83106 83108 83110 83112 83114 83116 83118 83120 83122 83124 83126 83128 83130 83132 83134 83136 83138 83140 83202 83204 83206 83208 83210 83212 83214 83216 83218 83220 83222 83224 83226 83228 83230 83232 83234 83236 83238 83240 83302 83304 83306 83308 83310 83312 83314 83316 83318 83320 83322 83324 83326 83328 83330 83332 83334 83336 83338 83340 83402 83404 83406 83408 83410 83412 83414 83416 83418 83420 83422 83424 83426 83428 83430 83432 83434 83436 83438 83440 83502 83504 83506 83508 83510 83512 83514 83516 83518 83520 83522 83524 83526 83528 83530 83532 83534 83536 83538 83540 83602 83604 83606 83608 83610 83612 83614 83616 83618 83620 83622 83624 83626 83628 83630 83632 83634 83636 83638 83640 83702 83704 83706 83708 83710 83712 83714 83716 83718 83720 83722 83724 83726 83728 83730 83732 83734 83736 83738 83740 84102 84104 84106 84108 84110 84112 84114 84116 84118 84120 84122 84124 84126 84128 84130 84132 84134 84136 84138 84140 84202 84204 84206 84208 84210 84212 84214 84216 84218 84220 84222 84224 84226 84228 84230 84232 84234 84236 84238 84240 84302 84304 84306 84308 84310 84312 84314 84316 84318 84320 84322 84324 84326 84328 84330 84332 84334 84336 84338 84340 84402 84404 84406 84408 84410 84412 84414 84416 84418 84420 84422 84424 84426 84428 84430 84432 84434 84436 84438 84440 84502 84504 84506 84508 84510 84512 84514 84516 84518 84520 84522 84524 84526 84528 84530 84532 84534 84536 84538 84540 84602 84604 84606 84608 84610 84612 84614 84616 84618 84620 84622 84624 84626 84628 84630 84632 84634 84636 84638 84640 84702 84704 84706 84708 84710 84712 84714 84716 84718 84720 84722 84724 84726 84728 84730 84732 84734 84736 84738 84740 85102 85104 85106 85108 85110 85112 85114 85116 85118 85120 85122 85124 85126 85128 85130 85132 85134 85136 85138 85140 85202 85204 85206 85208 85210 85212 85214 85216 85218 85220 85222 85224 85226 85228 85230 85232 85234 85236 85238 85240 85302 85304 85306 85308 85310 85312 85314 85316 85318 85320 85322 85324 85326 85328 85330 85332 85334 85336 85338 85340 85402 85404 85406 85408 85410 85412 85414 85416 85418 85420 85422 85424 85426 85428 85430 85432 85434 85436 85438 85440 85502 85504 85506 85508 85510 85512 85514 85516 85518 85520 85522 85524 85526 85528 85530 85532 85534 85536 85538 85540 85602 85604 85606 85608 85610 85612 85614 85616 85618 85620 85622 85624 85626 85628 85630 85632 85634 85636 85638 85640 85702 85704 85706 85708 85710 85712 85714 85716 85718 85720 85722 85724 85726 85728 85730 85732 85734 85736 85738 85740 86102 86104 86106 86108 86110 86112 86114 86116 86118 86120 86122 86124 86126 86128 86130 86132 86134 86136 86138 86140 86202 86204 86206 86208 86210 86212 86214 86216 86218 86220 86222 86224 86226 86228 86230 86232 86234 86236 86238 86240 86302 86304 86306 86308 86310 86312 86314 86316 86318 86320 86322 86324 86326 86328 86330 86332 86334 86336 86338 86340 86402 86404 86406 86408 86410 86412 86414 86416 86418 86420 86422 86424 86426 86428 86430 86432 86434 86436 86438 86440 86502 86504 86506 86508 86510 86512 86514 86516 86518 86520 86522 86524 86526 86528 86530 86532 86534 86536 86538 86540 86602 86604 86606 86608 86610 86612 86614 86616 86618 86620 86622 86624 86626 86628 86630 86632 86634 86636 86638 86640 86702 86704 86706 86708 86710 86712 86714 86716 86718 86720 86722 86724 86726 86728 86730 86732 86734 86736 86738 86740 90101 90102 90103 90104 90105 90106 90107 90108 90109 90110 90111 90112 90113 90114 90115 90116 90117 90118 90119 90120 90121 90122 90123 90124 90125 90126 90127 90128 90129 90130 90131 90132 90133 90134 90135 90136 90137 90138 90139 90140 90141 90201 90202 90203 90204 90205 90206 90207 90208 90209 90210 90211 90212 90213 90214 90215 90216 90217 90218 90219 90220 90221 90222 90223 90224 90225 90226 90227 90228 90229 90230 90231 90232 90233 90234 90235 90236 90237 90238 90239 90240 90241 90301 90302 90303 90304 90305 90306 90307 90308 90309 90310 90311 90312 90313 90314 90315 90316 90317 90318 90319 90320 90321 90322 90323 90324 90325 90326 90327 90328 90329 90330 90331 90332 90333 90334 90335 90336 90337 90338 90339 90340 90341 90401 90402 90403 90404 90405 90406 90407 90408 90409 90410 90411 90412 90413 90414 90415 90416 90417 90418 90419 90420 90421 90422 90423 90424 90425 90426 90427 90428 90429 90430 90431 90432 90433 90434 90435 90436 90437 90438 90439 90440 90441 90501 90502 90503 90504 90505 90506 90507 90508 90509 90510 90511 90512 90513 90514 90515 90516 90517 90518 90519 90520 90521 90522 90523 90524 90525 90526 90527 90528 90529 90530 90531 90532 90533 90534 90535 90536 90537 90538 90539 90540 90541 90601 90602 90603 90604 90605 90606 90607 90608 90609 90610 90611 90612 90613 90614 90615 90616 90617 90618 90619 90620 90621 90622 90623 90624 90625 90626 90627 90628 90629 90630 90631 90632 90633 90634 90635 90636 90637 90638 90639 90640 90641 90701 90702 90703 90704 90705 90706 90707 90708 90709 90710 90711 90712 90713 90714 90715 90716 90717 90718 90719 90720 90721 90722 90723 90724 90725 90726 90727 90728 90729 90730 90731 90732 90733 90734 90735 90736 90737 90738 90739 90740 90741 91101 91103 91105 91107 91109 91111 91113 91115 91117 91119 91121 91123 91125 91127 91129 91131 91133 91135 91137 91139 91141 91201 91203 91205 91207 91209 91211 91213 91215 91217 91219 91221 91223 91225 91227 91229 91231 91233 91235 91237 91239 91241 91301 91303 91305 91307 91309 91311 91313 91315 91317 91319 91321 91323 91325 91327 91329 91331 91333 91335 91337 91339 91341 91401 91403 91405 91407 91409 91411 91413 91415 91417 91419 91421 91423 91425 91427 91429 91431 91433 91435 91437 91439 91441 91501 91503 91505 91507 91509 91511 91513 91515 91517 91519 91521 91523 91525 91527 91529 91531 91533 91535 91537 91539 91541 91601 91603 91605 91607 91609 91611 91613 91615 91617 91619 91621 91623 91625 91627 91629 91631 91633 91635 91637 91639 91641 91701 91703 91705 91707 91709 91711 91713 91715 91717 91719 91721 91723 91725 91727 91729 91731 91733 91735 91737 91739 91741 92101 92103 92105 92107 92109 92111 92113 92115 92117 92119 92121 92123 92125 92127 92129 92131 92133 92135 92137 92139 92141 92201 92203 92205 92207 92209 92211 92213 92215 92217 92219 92221 92223 92225 92227 92229 92231 92233 92235 92237 92239 92241 92301 92303 92305 92307 92309 92311 92313 92315 92317 92319 92321 92323 92325 92327 92329 92331 92333 92335 92337 92339 92341 92401 92403 92405 92407 92409 92411 92413 92415 92417 92419 92421 92423 92425 92427 92429 92431 92433 92435 92437 92439 92441 92501 92503 92505 92507 92509 92511 92513 92515 92517 92519 92521 92523 92525 92527 92529 92531 92533 92535 92537 92539 92541 92601 92603 92605 92607 92609 92611 92613 92615 92617 92619 92621 92623 92625 92627 92629 92631 92633 92635 92637 92639 92641 92701 92703 92705 92707 92709 92711 92713 92715 92717 92719 92721 92723 92725 92727 92729 92731 92733 92735 92737 92739 92741 93101 93103 93105 93107 93109 93111 93113 93115 93117 93119 93121 93123 93125 93127 93129 93131 93133 93135 93137 93139 93141 93201 93203 93205 93207 93209 93211 93213 93215 93217 93219 93221 93223 93225 93227 93229 93231 93233 93235 93237 93239 93241 93301 93303 93305 93307 93309 93311 93313 93315 93317 93319 93321 93323 93325 93327 93329 93331 93333 93335 93337 93339 93341 93401 93403 93405 93407 93409 93411 93413 93415 93417 93419 93421 93423 93425 93427 93429 93431 93433 93435 93437 93439 93441 93501 93503 93505 93507 93509 93511 93513 93515 93517 93519 93521 93523 93525 93527 93529 93531 93533 93535 93537 93539 93541 93601 93603 93605 93607 93609 93611 93613 93615 93617 93619 93621 93623 93625 93627 93629 93631 93633 93635 93637 93639 93641 93701 93703 93705 93707 93709 93711 93713 93715 93717 93719 93721 93723 93725 93727 93729 93731 93733 93735 93737 93739 93741 94101 94103 94105 94107 94109 94111 94113 94115 94117 94119 94121 94123 94125 94127 94129 94131 94133 94135 94137 94139 94141 94201 94203 94205 94207 94209 94211 94213 94215 94217 94219 94221 94223 94225 94227 94229 94231 94233 94235 94237 94239 94241 94301 94303 94305 94307 94309 94311 94313 94315 94317 94319 94321 94323 94325 94327 94329 94331 94333 94335 94337 94339 94341 94401 94403 94405 94407 94409 94411 94413 94415 94417 94419 94421 94423 94425 94427 94429 94431 94433 94435 94437 94439 94441 94501 94503 94505 94507 94509 94511 94513 94515 94517 94519 94521 94523 94525 94527 94529 94531 94533 94535 94537 94539 94541 94601 94603 94605 94607 94609 94611 94613 94615 94617 94619 94621 94623 94625 94627 94629 94631 94633 94635 94637 94639 94641 94701 94703 94705 94707 94709 94711 94713 94715 94717 94719 94721 94723 94725 94727 94729 94731 94733 94735 94737 94739 94741 95101 95103 95105 95107 95109 95111 95113 95115 95117 95119 95121 95123 95125 95127 95129 95131 95133 95135 95137 95139 95141 95201 95203 95205 95207 95209 95211 95213 95215 95217 95219 95221 95223 95225 95227 95229 95231 95233 95235 95237 95239 95241 95301 95303 95305 95307 95309 95311 95313 95315 95317 95319 95321 95323 95325 95327 95329 95331 95333 95335 95337 95339 95341 95401 95403 95405 95407 95409 95411 95413 95415 95417 95419 95421 95423 95425 95427 95429 95431 95433 95435 95437 95439 95441 95501 95503 95505 95507 95509 95511 95513 95515 95517 95519 95521 95523 95525 95527 95529 95531 95533 95535 95537 95539 95541 95601 95603 95605 95607 95609 95611 95613 95615 95617 95619 95621 95623 95625 95627 95629 95631 95633 95635 95637 95639 95641 95701 95703 95705 95707 95709 95711 95713 95715 95717 95719 95721 95723 95725 95727 95729 95731 95733 95735 95737 95739 95741 96101 96103 96105 96107 96109 96111 96113 96115 96117 96119 96121 96123 96125 96127 96129 96131 96133 96135 96137 96139 96141 96201 96203 96205 96207 96209 96211 96213 96215 96217 96219 96221 96223 96225 96227 96229 96231 96233 96235 96237 96239 96241 96301 96303 96305 96307 96309 96311 96313 96315 96317 96319 96321 96323 96325 96327 96329 96331 96333 96335 96337 96339 96341 96401 96403 96405 96407 96409 96411 96413 96415 96417 96419 96421 96423 96425 96427 96429 96431 96433 96435 96437 96439 96441 96501 96503 96505 96507 96509 96511 96513 96515 96517 96519 96521 96523 96525 96527 96529 96531 96533 96535 96537 96539 96541 96601 96603 96605 96607 96609 96611 96613 96615 96617 96619 96621 96623 96625 96627 96629 96631 96633 96635 96637 96639 96641 96701 96703 96705 96707 96709 96711 96713 96715 96717 96719 96721 96723 96725 96727 96729 96731 96733 96735 96737 96739 96741] 0 147 18092 75034 RED
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/3_ComplexModels.R \name{M_M_INF} \alias{M_M_INF} \title{Obtains the main characteristics of a M/M/\eqn{\infty} queueing model} \usage{ M_M_INF(lambda = 3, mu = 6) } \arguments{ \item{lambda}{Mean arrival rate} \item{mu}{Mean service rate} } \value{ Returns the next information of a M/M/\eqn{\infty} model: \item{rho}{Constant coefficient: \eqn{\lambda/\mu}} \item{barrho}{Traffic intensity: \ifelse{latex}{\eqn{\bar{\rho}}}{\out{<i>&#862;&rho;</i>}}} \item{p0}{Probability of empty system: \ifelse{latex}{\eqn{P_{0}}}{\out{<i>P<sub>0</sub></i>}}} \item{l}{Number of customers in the system: \eqn{L}} \item{lq}{Number of customers in the queue: \ifelse{latex}{\eqn{L_q} (\eqn{L_q} = 0 in this model)}{\out{<i>L<sub>q</sub> (L<sub>q</sub> = 0 in this model)</i>}}} \item{w}{Waiting time in the system: \eqn{W}} \item{wq}{Waiting time in the queue: \ifelse{latex}{\eqn{W_q} (\eqn{W_q} = 0 in this model)}{\out{<i>W<sub>q</sub> (W<sub>q</sub> = 0 in this model)</i>}}} \item{eff}{System efficiency: \ifelse{latex}{\eqn{Eff = W/(W-W_q)}}{\out{<i>Eff = W/(W-W<sub>q</sub></i>)}}} } \description{ Obtains the main characteristics of a M/M/\eqn{\infty} queueing model } \examples{ #The number of people turning on their television sets #on Saturday evening during prime time can be described #rather well by a Poisson distribution with a mean of #100000/hr. #There are five major TV stations, and a given person #choose among these essentially at random. #Surveys have also shown that the average person tunes #in for 90 min and that viewing times are approximately #exponentially distributed. M_M_INF(lambda=100000/5, mu=60/90) } \seealso{ Other AnaliticalModels: \code{\link{ClosedJacksonNetwork}}; \code{\link{M_M_1_INF_H}}; \code{\link{M_M_1_K}}; \code{\link{M_M_1}}; \code{\link{M_M_S_INF_H_Y}}; \code{\link{M_M_S_INF_H}}; \code{\link{M_M_S_K}}; \code{\link{M_M_S}}; \code{\link{OpenJacksonNetwork}} }
/man/M_M_INF.Rd
no_license
ghobbs9495/arqas
R
false
false
2,043
rd
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/3_ComplexModels.R \name{M_M_INF} \alias{M_M_INF} \title{Obtains the main characteristics of a M/M/\eqn{\infty} queueing model} \usage{ M_M_INF(lambda = 3, mu = 6) } \arguments{ \item{lambda}{Mean arrival rate} \item{mu}{Mean service rate} } \value{ Returns the next information of a M/M/\eqn{\infty} model: \item{rho}{Constant coefficient: \eqn{\lambda/\mu}} \item{barrho}{Traffic intensity: \ifelse{latex}{\eqn{\bar{\rho}}}{\out{<i>&#862;&rho;</i>}}} \item{p0}{Probability of empty system: \ifelse{latex}{\eqn{P_{0}}}{\out{<i>P<sub>0</sub></i>}}} \item{l}{Number of customers in the system: \eqn{L}} \item{lq}{Number of customers in the queue: \ifelse{latex}{\eqn{L_q} (\eqn{L_q} = 0 in this model)}{\out{<i>L<sub>q</sub> (L<sub>q</sub> = 0 in this model)</i>}}} \item{w}{Waiting time in the system: \eqn{W}} \item{wq}{Waiting time in the queue: \ifelse{latex}{\eqn{W_q} (\eqn{W_q} = 0 in this model)}{\out{<i>W<sub>q</sub> (W<sub>q</sub> = 0 in this model)</i>}}} \item{eff}{System efficiency: \ifelse{latex}{\eqn{Eff = W/(W-W_q)}}{\out{<i>Eff = W/(W-W<sub>q</sub></i>)}}} } \description{ Obtains the main characteristics of a M/M/\eqn{\infty} queueing model } \examples{ #The number of people turning on their television sets #on Saturday evening during prime time can be described #rather well by a Poisson distribution with a mean of #100000/hr. #There are five major TV stations, and a given person #choose among these essentially at random. #Surveys have also shown that the average person tunes #in for 90 min and that viewing times are approximately #exponentially distributed. M_M_INF(lambda=100000/5, mu=60/90) } \seealso{ Other AnaliticalModels: \code{\link{ClosedJacksonNetwork}}; \code{\link{M_M_1_INF_H}}; \code{\link{M_M_1_K}}; \code{\link{M_M_1}}; \code{\link{M_M_S_INF_H_Y}}; \code{\link{M_M_S_INF_H}}; \code{\link{M_M_S_K}}; \code{\link{M_M_S}}; \code{\link{OpenJacksonNetwork}} }
#' DSAIDE: A package to learn about Dynamical Systems Approaches to Infectious #' Disease Epidemiology #' #' The DSAIDE package provides a number of shiny apps that simulate various #' infectious disease dynamics models By manipulating the models and working #' through the instructions provided within the shiny UI, you can learn about #' some important concepts in infectious disease epidemiology. You will also #' learn how models can be used to study such concepts. #' #' Start the main menu of the package by calling dsaidemenu(). #' #' To learn more about how to use the package, see the vignette #' or the short introduction on the package github repository. #' https://github.com/ahgroup/DSAIDE #' #' @docType package #' @name DSAIDE NULL
/R/DSAIDE.R
no_license
cgolden1993/DSAIDE
R
false
false
750
r
#' DSAIDE: A package to learn about Dynamical Systems Approaches to Infectious #' Disease Epidemiology #' #' The DSAIDE package provides a number of shiny apps that simulate various #' infectious disease dynamics models By manipulating the models and working #' through the instructions provided within the shiny UI, you can learn about #' some important concepts in infectious disease epidemiology. You will also #' learn how models can be used to study such concepts. #' #' Start the main menu of the package by calling dsaidemenu(). #' #' To learn more about how to use the package, see the vignette #' or the short introduction on the package github repository. #' https://github.com/ahgroup/DSAIDE #' #' @docType package #' @name DSAIDE NULL
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Sql.R \name{lowLevelQuerySql.ffdf} \alias{lowLevelQuerySql.ffdf} \title{Low level function for retrieving data to an ffdf object} \usage{ lowLevelQuerySql.ffdf(connection, query = "", batchSize = "auto", datesAsString = FALSE) } \arguments{ \item{connection}{The connection to the database server.} \item{query}{The SQL statement to retrieve the data} \item{batchSize}{The number of rows that will be retrieved at a time from the server. A larger batchSize means less calls to the server so better performance, but too large a batchSize could lead to out-of-memory errors. The default is "auto", meaning heuristics will determine the appropriate batch size.} \item{datesAsString}{Should dates be imported as character vectors, our should they be converted to R's date format?} } \value{ A ffdf object containing the data. If there are 0 rows, a regular data frame is returned instead (ffdf cannot have 0 rows) } \description{ This is the equivalent of the \code{\link{querySql.ffdf}} function, except no error report is written when an error occurs. } \details{ Retrieves data from the database server and stores it in an ffdf object. This allows very large data sets to be retrieved without running out of memory. }
/man/lowLevelQuerySql.ffdf.Rd
permissive
TriNetX/DatabaseConnector
R
false
true
1,300
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Sql.R \name{lowLevelQuerySql.ffdf} \alias{lowLevelQuerySql.ffdf} \title{Low level function for retrieving data to an ffdf object} \usage{ lowLevelQuerySql.ffdf(connection, query = "", batchSize = "auto", datesAsString = FALSE) } \arguments{ \item{connection}{The connection to the database server.} \item{query}{The SQL statement to retrieve the data} \item{batchSize}{The number of rows that will be retrieved at a time from the server. A larger batchSize means less calls to the server so better performance, but too large a batchSize could lead to out-of-memory errors. The default is "auto", meaning heuristics will determine the appropriate batch size.} \item{datesAsString}{Should dates be imported as character vectors, our should they be converted to R's date format?} } \value{ A ffdf object containing the data. If there are 0 rows, a regular data frame is returned instead (ffdf cannot have 0 rows) } \description{ This is the equivalent of the \code{\link{querySql.ffdf}} function, except no error report is written when an error occurs. } \details{ Retrieves data from the database server and stores it in an ffdf object. This allows very large data sets to be retrieved without running out of memory. }
# user options my.runs<- c('Single_species', # Single species 'Diri_Uniform_noNumber_noMesh_Sum_type2', 'Diri_Uniform_noNumber_noMesh_Sum_type3', 'Diri_Uniform_noNumber_noMesh_noSum_type2', 'Diri_Uniform_noNumber_noMesh_noSum_type3', 'Diri_Size_noNumber_noMesh_noSum_type2', 'Diri_Size_noNumber_noMesh_Sum_type2', 'Diri_Size_Number_noMesh_Sum_type2', 'Diri_Size_Number_noMesh_noSum_type2', 'Diri_Size_noNumber_noMesh_Sum_type2_ALK', 'lNorm_Size_noNumber_noMesh_Sum_type2_ALK', 'lNorm_Size_noNumber_noMesh_Sum_type2', 'lNorm_Size_Number_noMesh_Sum_type2', 'lNorm_Size_Number_noMesh_noSum_type2' ) #my.runs<-c('lNorm_Size_noNumber_noMesh_Sum_type2_ALK') area<-c('NorthSea','Baltic')[1] do.run<- T # run the assessment (or just present results) # make use of number model by species (if Number options has been chosen) # FUL GLT HEG KTW GBG GNT PUF RAZ RAJ GUR W_M N_M W_H N_H GSE HBP COD WHG HAD POK use.number.model<- c( F, F, F, F, F, F, F, F, F, F, F, F, F, F, F, F, T, T, F, T) use.size.model<-use.number.model #use.size.model<- c( F, F, F, F, F, F, F, F, F, F, F, F, F, F, F, F, T, T, F, T) # Cod Whiting Haddock Saithe Herring Sandeel Nor. pout Sprat Plaice Sole mesh.selction<-c( -1, -1, -1, -1, -1, -1, -1, -1, -1, -1) file.copy(file.path(data.path,"SMS_org.dat"),file.path(data.path,"SMS.dat"),overwrite=TRUE) ###### end user options ################### cleanup() setwd(data.path) oldDir<-data.path copySMSfiles<-function(scenario.dir) { if (file.exists(scenario.dir)) unlink(scenario.dir,recursive = T) dir.create(scenario.dir,showWarnings = FALSE) SMS.files.single<-c("area_names.in","natmor.in","canum.in","west.in","weca.in","propmat.in","fleet_catch.in", "fleet_names.in","fleet_info.dat","just_one.in","sms.psv","species_names.in", "SSB_R.in","Prediction_F.in","reference_points.in","predict_stock_N.in", "proportion_M_and_F_before_spawning.in","proportion_landed.in", "Exploitation_pattern.in","covariance_N.in", "HCR_options.dat","sms.dat", "SMS.exe") for (from.file in SMS.files.single) { to.file<-file.path(scenario.dir,from.file) file.copy(from.file, to.file, overwrite = TRUE) } SMS.files.multi<-c("alk_stom.in","ALK_all.in","consum.in","Length_weight_relations.in","lsea.in","N_haul_at_length.in", "natmor1.in","other_food.in","season_overlap.in","stom_pred_length_at_sizecl.in","stom_struc_at_length.in", "stomcon_at_length.in","stomlen_at_length.in","stomweight_at_length.in","stomnumber_at_length.in","stomtype_at_length.in","pred_prey_size_range_param.in", "incl_stom.in","temperature.in") for (from.file in SMS.files.multi) { to.file<-file.path(scenario.dir,from.file) file.copy(from.file, to.file, overwrite = TRUE) } SMS.files.area<-c("stock_distribution.in","predator_area_presence.in") if (control@no.areas > 1) for (from.file in SMS.files.area) { to.file<-file.path(scenario.dir,from.file) file.copy(from.file, to.file, overwrite = TRUE) } if (area=='NorthSea') { SMS.NS.files<-c("zero_catch_season_ages.in","zero_catch_year_season.in","F_q_ini.in","known_recruitment.in","other_pred_N.in") for (from.file in SMS.NS.files) { to.file<-file.path(scenario.dir,from.file) file.copy(from.file, to.file, overwrite = TRUE) } } } if (do.run) { # read data and options into FLR objects control<-read.FLSMS.control() # write all the files in a number of directories for (r in my.runs){ # retrospective runs are made in a separate directory my.dir<-file.path(oldDir,r) copySMSfiles(my.dir) setwd(my.dir) if (area=='NorthSea') { #default settings control@incl.stom.all<- 1 control@use.Nbar<- 0 control@M2.iterations<- 3 control@max.M2.sum2<- 3 control@stom.likelihood<- 1 control@stomach.variance<- 3 control@simple.ALK<- 0 control@consum<- 0 control@size.select.model<- 2 # 2=prey weights from stom_obs, 3=from l-W relation control@L50.mesh[]<- -1 control@size.selection[]<- 0 control@sum.stom.like[]<- 0 control@stom.obs.var[]<- 1 # FUL GLT HEG KTW GBG GNT PUF RAZ RAJ GUR W_M N_M W_H N_H GSE HBP COD WHG HAD POK control@stom.max.sumP[]<- c(100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 500, 100, 100, 100) control@var.scale.stom[]<- 1 # FUL GLT HEG KTW GBG GNT PUF RAZ RAJ GUR W_M N_M W_H N_H GSE HBP COD WHG HAD POK control@size.other.food.suit<- c( 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1 , 0, 0, 0, 0, 0, 1, 1, 1) control@min.stom.cont[]<- 1E-09 control@max.stom.sampl[]<- 1000 control@stom.type.include[]<- 2 # FUL GLT HEG KTW GBG GNT PUF RAZ RAJ GUR W_M N_M W_H N_H GSE HBP COD WHG HAD POK control@prey.pred.size.fac<- c(5, 5, 5, 5, 5, 5, 5, 5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 50, 50, 0.5, 0.6, 0.5, 0.5) control@use.overlap<- 0 control@phase.vulnera<- 2 control@phase.other.suit.slope<- 2 control@phase.pref.size.ratio<- -1 control@phase.pref.size.ratio.correction<- -1 control@phase.prey.size.adjustment<- -1 control@phase.var.size.ratio<- -1 control@phase.season.overlap<- 2 control@phase.stom.var<- 2 control@phase.mesh.adjust<- -1 # common settings for size selection other.food.uniform<- paste("# FUL GLT HEG KTW GBG GNT PUF RAZ RAJ GUR W_M N_M W_H N_H GSE HBP COD WHG HAD POK\n", " 1E6 1E5 1E7 1E7 1E7 1E7 1E5 1E5 1E6 1E6 1E6 1E7 1E6 1E5 1E6 1E6 1E6 1E6 1E7 1E6\n") #Other food, size selection other.food.size <- paste("# FUL GLT HEG KTW GBG GNT PUF RAZ RAJ GUR W_M N_M W_H N_H GSE HBP COD WHG HAD POK\n", " 1E6 1E5 1E7 1E7 1E7 1E7 1E5 1E5 1E6 1E6 1E7 1E7 1E6 1E5 1E6 1E6 1E6 1E5 1E7 1E6\n") # FUL GLT HEG KTW GBG GNT PUF RAZ RAJ GUR W_M N_M W_H N_H GSE HBP COD WHG HAD POK var.scale.stom<- c( 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) var.scale.stom.uniform<-c( 10, 10, 10, 10, 10, 10, 10, 10, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) var.scale.stom.uniform<- c( 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) # FUL GLT HEG KTW GBG GNT PUF RAZ RAJ GUR W_M N_M W_H N_H GSE HBP COD WHG HAD POK sum.stom.like<- c( 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) no.sum.stom.like<- c( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) } if (area=='Baltic' & F) { other.food.uniform<- paste("# COD\n", " 1E6 \n") other.food.size <- paste("# COD\n", " 1E6 \n") var.scale.stom<- c(0.1) var.scale.stom.uniform<-c(0.1) } control@test.output <- 0 if (grepl('Single',r)) bio.interact<-F else bio.interact<-T if (grepl('Diri_',r)) control@stomach.variance<- 3 if (grepl('lNorm_',r)) control@stomach.variance<- 1 if (grepl('_ALK',r)) control@simple.ALK<-1 if (grepl('_Uniform',r)) { control@size.selection[]<-0 control@var.scale.stom<-var.scale.stom.uniform control@phase.pref.size.ratio<- -2 control@phase.var.size.ratio<- -2 cat(other.food.uniform,file="other_food.in") } if (grepl('_Size',r)) { do.size<-T control@size.selection[]<-0; control@size.selection[use.size.model]<-1 control@var.scale.stom<-var.scale.stom control@phase.pref.size.ratio<-2 control@phase.var.size.ratio<-2 cat(other.food.size,file="other_food.in") } else do.size<-F if (grepl('_Number_',r)) { control@stom.likelihood<-2 do.number<-T } else { control@stom.likelihood<-1 do.number<-F } if (grepl('noSum',r)) control@sum.stom.like[]<-no.sum.stom.like else if (grepl('_Sum',r)) control@sum.stom.like[]<-sum.stom.like if (grepl('Single',r)) bio.interact<-F else bio.interact<-T if (grepl('_type1',r)) { control@stom.type.include[ ]<-1 if (do.number) control@stom.type.include[use.number.model]<-3 } if (grepl('_type2',r)) { control@stom.type.include[ ]<-2 if (do.number) control@stom.type.include[use.number.model]<-3 if (do.size) control@stom.type.include[use.size.model]<-3 } if (grepl('_type3',r)) { control@stom.type.include[ ]<-3 } #if (grepl('74',r)) control@first.year.model<-1974 write.FLSMS.control(control,write.multi=bio.interact) cat("\nDoing run:",r,"\n") do.a.full.SMS.run(outdir=my.dir, rundir=my.dir, label="run_", # label for output cleanup=F, # delete files in the deleteFiles variable? do.single=T, # run SMS in single species mode do.multi.1=bio.interact, # Make preliminary estimate of "predation parameters" do.multi.2=bio.interact, # Run the full model, with simultaneously estimation of all parameters except the stomach variance parameter do.multi.2.redo=bio.interact, # bio.interact, # Run the full model, with simultaneously estimation of all parameters do.multi.2.redo.Nbar=F, # Run the full model, with simultaneously estimation of all parameters, Use mean stock numbers (Nbar) for predation do.hessian=F, # Make the Hessian matrix and estimate uncertainties do.MCMC=F, # Prepare for MCMC analysis mcmc=1000,mcsave=100, # Options for MCMS analysis do.prediction=F, # Make a prediction pause=F, # Make a pause between each stage Screen.show=F, # show the output on screen, or save it in file do.run=T, # Make the run immediately, or just make the batch file for the run deleteFiles=NA , # clean up in files before the run is made new.version=F) # copy a (new) version of the sms program from the program directory (default=FALSE) # if (!file.copy("summary.out", paste("summary",y,".out",sep=""), overwrite = TRUE)) stop(paste("Retro stopped: something went wrong in copying summary.dat for year:",y)) # file.remove("summary.out") } } dirs<-my.runs labels<-my.runs setwd(oldDir) oldRoot<-root; root<-oldDir cleanup() source(file.path(prog.path,"compare_runs_objective_function.R")) source(file.path(prog.path,"compare_runs.R")) source(file.path(prog.path,"compare_runs_prey_size_selection.r")) #if (bio.interact) source(file.path(prog.path,"compare_runs_M2.R")) #if (bio.interact) source(file.path(prog.path,"compare_runs_N.R")) # data.path<-file.path(data.path,'Size') ; setwd(data.path)
/SMS_r_prog/r_prog_less_frequently_used/script-number_model.r
permissive
ices-eg/wg_WGSAM
R
false
false
12,425
r
# user options my.runs<- c('Single_species', # Single species 'Diri_Uniform_noNumber_noMesh_Sum_type2', 'Diri_Uniform_noNumber_noMesh_Sum_type3', 'Diri_Uniform_noNumber_noMesh_noSum_type2', 'Diri_Uniform_noNumber_noMesh_noSum_type3', 'Diri_Size_noNumber_noMesh_noSum_type2', 'Diri_Size_noNumber_noMesh_Sum_type2', 'Diri_Size_Number_noMesh_Sum_type2', 'Diri_Size_Number_noMesh_noSum_type2', 'Diri_Size_noNumber_noMesh_Sum_type2_ALK', 'lNorm_Size_noNumber_noMesh_Sum_type2_ALK', 'lNorm_Size_noNumber_noMesh_Sum_type2', 'lNorm_Size_Number_noMesh_Sum_type2', 'lNorm_Size_Number_noMesh_noSum_type2' ) #my.runs<-c('lNorm_Size_noNumber_noMesh_Sum_type2_ALK') area<-c('NorthSea','Baltic')[1] do.run<- T # run the assessment (or just present results) # make use of number model by species (if Number options has been chosen) # FUL GLT HEG KTW GBG GNT PUF RAZ RAJ GUR W_M N_M W_H N_H GSE HBP COD WHG HAD POK use.number.model<- c( F, F, F, F, F, F, F, F, F, F, F, F, F, F, F, F, T, T, F, T) use.size.model<-use.number.model #use.size.model<- c( F, F, F, F, F, F, F, F, F, F, F, F, F, F, F, F, T, T, F, T) # Cod Whiting Haddock Saithe Herring Sandeel Nor. pout Sprat Plaice Sole mesh.selction<-c( -1, -1, -1, -1, -1, -1, -1, -1, -1, -1) file.copy(file.path(data.path,"SMS_org.dat"),file.path(data.path,"SMS.dat"),overwrite=TRUE) ###### end user options ################### cleanup() setwd(data.path) oldDir<-data.path copySMSfiles<-function(scenario.dir) { if (file.exists(scenario.dir)) unlink(scenario.dir,recursive = T) dir.create(scenario.dir,showWarnings = FALSE) SMS.files.single<-c("area_names.in","natmor.in","canum.in","west.in","weca.in","propmat.in","fleet_catch.in", "fleet_names.in","fleet_info.dat","just_one.in","sms.psv","species_names.in", "SSB_R.in","Prediction_F.in","reference_points.in","predict_stock_N.in", "proportion_M_and_F_before_spawning.in","proportion_landed.in", "Exploitation_pattern.in","covariance_N.in", "HCR_options.dat","sms.dat", "SMS.exe") for (from.file in SMS.files.single) { to.file<-file.path(scenario.dir,from.file) file.copy(from.file, to.file, overwrite = TRUE) } SMS.files.multi<-c("alk_stom.in","ALK_all.in","consum.in","Length_weight_relations.in","lsea.in","N_haul_at_length.in", "natmor1.in","other_food.in","season_overlap.in","stom_pred_length_at_sizecl.in","stom_struc_at_length.in", "stomcon_at_length.in","stomlen_at_length.in","stomweight_at_length.in","stomnumber_at_length.in","stomtype_at_length.in","pred_prey_size_range_param.in", "incl_stom.in","temperature.in") for (from.file in SMS.files.multi) { to.file<-file.path(scenario.dir,from.file) file.copy(from.file, to.file, overwrite = TRUE) } SMS.files.area<-c("stock_distribution.in","predator_area_presence.in") if (control@no.areas > 1) for (from.file in SMS.files.area) { to.file<-file.path(scenario.dir,from.file) file.copy(from.file, to.file, overwrite = TRUE) } if (area=='NorthSea') { SMS.NS.files<-c("zero_catch_season_ages.in","zero_catch_year_season.in","F_q_ini.in","known_recruitment.in","other_pred_N.in") for (from.file in SMS.NS.files) { to.file<-file.path(scenario.dir,from.file) file.copy(from.file, to.file, overwrite = TRUE) } } } if (do.run) { # read data and options into FLR objects control<-read.FLSMS.control() # write all the files in a number of directories for (r in my.runs){ # retrospective runs are made in a separate directory my.dir<-file.path(oldDir,r) copySMSfiles(my.dir) setwd(my.dir) if (area=='NorthSea') { #default settings control@incl.stom.all<- 1 control@use.Nbar<- 0 control@M2.iterations<- 3 control@max.M2.sum2<- 3 control@stom.likelihood<- 1 control@stomach.variance<- 3 control@simple.ALK<- 0 control@consum<- 0 control@size.select.model<- 2 # 2=prey weights from stom_obs, 3=from l-W relation control@L50.mesh[]<- -1 control@size.selection[]<- 0 control@sum.stom.like[]<- 0 control@stom.obs.var[]<- 1 # FUL GLT HEG KTW GBG GNT PUF RAZ RAJ GUR W_M N_M W_H N_H GSE HBP COD WHG HAD POK control@stom.max.sumP[]<- c(100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 500, 100, 100, 100) control@var.scale.stom[]<- 1 # FUL GLT HEG KTW GBG GNT PUF RAZ RAJ GUR W_M N_M W_H N_H GSE HBP COD WHG HAD POK control@size.other.food.suit<- c( 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1 , 0, 0, 0, 0, 0, 1, 1, 1) control@min.stom.cont[]<- 1E-09 control@max.stom.sampl[]<- 1000 control@stom.type.include[]<- 2 # FUL GLT HEG KTW GBG GNT PUF RAZ RAJ GUR W_M N_M W_H N_H GSE HBP COD WHG HAD POK control@prey.pred.size.fac<- c(5, 5, 5, 5, 5, 5, 5, 5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 50, 50, 0.5, 0.6, 0.5, 0.5) control@use.overlap<- 0 control@phase.vulnera<- 2 control@phase.other.suit.slope<- 2 control@phase.pref.size.ratio<- -1 control@phase.pref.size.ratio.correction<- -1 control@phase.prey.size.adjustment<- -1 control@phase.var.size.ratio<- -1 control@phase.season.overlap<- 2 control@phase.stom.var<- 2 control@phase.mesh.adjust<- -1 # common settings for size selection other.food.uniform<- paste("# FUL GLT HEG KTW GBG GNT PUF RAZ RAJ GUR W_M N_M W_H N_H GSE HBP COD WHG HAD POK\n", " 1E6 1E5 1E7 1E7 1E7 1E7 1E5 1E5 1E6 1E6 1E6 1E7 1E6 1E5 1E6 1E6 1E6 1E6 1E7 1E6\n") #Other food, size selection other.food.size <- paste("# FUL GLT HEG KTW GBG GNT PUF RAZ RAJ GUR W_M N_M W_H N_H GSE HBP COD WHG HAD POK\n", " 1E6 1E5 1E7 1E7 1E7 1E7 1E5 1E5 1E6 1E6 1E7 1E7 1E6 1E5 1E6 1E6 1E6 1E5 1E7 1E6\n") # FUL GLT HEG KTW GBG GNT PUF RAZ RAJ GUR W_M N_M W_H N_H GSE HBP COD WHG HAD POK var.scale.stom<- c( 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) var.scale.stom.uniform<-c( 10, 10, 10, 10, 10, 10, 10, 10, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) var.scale.stom.uniform<- c( 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) # FUL GLT HEG KTW GBG GNT PUF RAZ RAJ GUR W_M N_M W_H N_H GSE HBP COD WHG HAD POK sum.stom.like<- c( 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) no.sum.stom.like<- c( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) } if (area=='Baltic' & F) { other.food.uniform<- paste("# COD\n", " 1E6 \n") other.food.size <- paste("# COD\n", " 1E6 \n") var.scale.stom<- c(0.1) var.scale.stom.uniform<-c(0.1) } control@test.output <- 0 if (grepl('Single',r)) bio.interact<-F else bio.interact<-T if (grepl('Diri_',r)) control@stomach.variance<- 3 if (grepl('lNorm_',r)) control@stomach.variance<- 1 if (grepl('_ALK',r)) control@simple.ALK<-1 if (grepl('_Uniform',r)) { control@size.selection[]<-0 control@var.scale.stom<-var.scale.stom.uniform control@phase.pref.size.ratio<- -2 control@phase.var.size.ratio<- -2 cat(other.food.uniform,file="other_food.in") } if (grepl('_Size',r)) { do.size<-T control@size.selection[]<-0; control@size.selection[use.size.model]<-1 control@var.scale.stom<-var.scale.stom control@phase.pref.size.ratio<-2 control@phase.var.size.ratio<-2 cat(other.food.size,file="other_food.in") } else do.size<-F if (grepl('_Number_',r)) { control@stom.likelihood<-2 do.number<-T } else { control@stom.likelihood<-1 do.number<-F } if (grepl('noSum',r)) control@sum.stom.like[]<-no.sum.stom.like else if (grepl('_Sum',r)) control@sum.stom.like[]<-sum.stom.like if (grepl('Single',r)) bio.interact<-F else bio.interact<-T if (grepl('_type1',r)) { control@stom.type.include[ ]<-1 if (do.number) control@stom.type.include[use.number.model]<-3 } if (grepl('_type2',r)) { control@stom.type.include[ ]<-2 if (do.number) control@stom.type.include[use.number.model]<-3 if (do.size) control@stom.type.include[use.size.model]<-3 } if (grepl('_type3',r)) { control@stom.type.include[ ]<-3 } #if (grepl('74',r)) control@first.year.model<-1974 write.FLSMS.control(control,write.multi=bio.interact) cat("\nDoing run:",r,"\n") do.a.full.SMS.run(outdir=my.dir, rundir=my.dir, label="run_", # label for output cleanup=F, # delete files in the deleteFiles variable? do.single=T, # run SMS in single species mode do.multi.1=bio.interact, # Make preliminary estimate of "predation parameters" do.multi.2=bio.interact, # Run the full model, with simultaneously estimation of all parameters except the stomach variance parameter do.multi.2.redo=bio.interact, # bio.interact, # Run the full model, with simultaneously estimation of all parameters do.multi.2.redo.Nbar=F, # Run the full model, with simultaneously estimation of all parameters, Use mean stock numbers (Nbar) for predation do.hessian=F, # Make the Hessian matrix and estimate uncertainties do.MCMC=F, # Prepare for MCMC analysis mcmc=1000,mcsave=100, # Options for MCMS analysis do.prediction=F, # Make a prediction pause=F, # Make a pause between each stage Screen.show=F, # show the output on screen, or save it in file do.run=T, # Make the run immediately, or just make the batch file for the run deleteFiles=NA , # clean up in files before the run is made new.version=F) # copy a (new) version of the sms program from the program directory (default=FALSE) # if (!file.copy("summary.out", paste("summary",y,".out",sep=""), overwrite = TRUE)) stop(paste("Retro stopped: something went wrong in copying summary.dat for year:",y)) # file.remove("summary.out") } } dirs<-my.runs labels<-my.runs setwd(oldDir) oldRoot<-root; root<-oldDir cleanup() source(file.path(prog.path,"compare_runs_objective_function.R")) source(file.path(prog.path,"compare_runs.R")) source(file.path(prog.path,"compare_runs_prey_size_selection.r")) #if (bio.interact) source(file.path(prog.path,"compare_runs_M2.R")) #if (bio.interact) source(file.path(prog.path,"compare_runs_N.R")) # data.path<-file.path(data.path,'Size') ; setwd(data.path)
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{CH19TA07} \alias{CH19TA07} \title{CH19TA07} \format{\preformatted{'data.frame': 12 obs. of 4 variables: $ V1: int 47 43 46 40 62 68 67 71 41 39 ... $ V2: int 1 1 1 1 2 2 2 2 3 3 ... $ V3: int 1 1 2 2 1 1 2 2 1 1 ... $ V4: int 1 2 1 2 1 2 1 2 1 2 ... }} \usage{ CH19TA07 } \description{ CH19TA07 } \keyword{datasets}
/man/CH19TA07.Rd
no_license
bryangoodrich/ALSM
R
false
false
440
rd
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{CH19TA07} \alias{CH19TA07} \title{CH19TA07} \format{\preformatted{'data.frame': 12 obs. of 4 variables: $ V1: int 47 43 46 40 62 68 67 71 41 39 ... $ V2: int 1 1 1 1 2 2 2 2 3 3 ... $ V3: int 1 1 2 2 1 1 2 2 1 1 ... $ V4: int 1 2 1 2 1 2 1 2 1 2 ... }} \usage{ CH19TA07 } \description{ CH19TA07 } \keyword{datasets}
function(dataset,label_column = "Label"){ colnames(dataset)[which(colnames(dataset)==label_column)]='Label' Boruta_models=Boruta(Label~.,data=dataset[,-1],doTrace=0) Boruta_variable_importance <- TentativeRoughFix(Boruta_models) Boruta_variable_importance <- attStats(Boruta_variable_importance) Boruta_variable_importance[,7]=row.names(Boruta_variable_importance) Boruta_variable_importance <- Boruta_variable_importance[order(Boruta_variable_importance$meanImp),] #to view plot type print(Boruta_plots) #Boruta_plots=plot(Boruta_models, cex.axis=1, las=2, xlab="", main="Variable Importance") return(x = Boruta_variable_importance) #assign(x = "Boruta_plots",value = Boruta_plots,envir = globalenv()) }
/SKRYPTY/R_SCRIPTS/Scripts/Boruta_feature_selection.R
no_license
MMandziej/praca_magisterska
R
false
false
725
r
function(dataset,label_column = "Label"){ colnames(dataset)[which(colnames(dataset)==label_column)]='Label' Boruta_models=Boruta(Label~.,data=dataset[,-1],doTrace=0) Boruta_variable_importance <- TentativeRoughFix(Boruta_models) Boruta_variable_importance <- attStats(Boruta_variable_importance) Boruta_variable_importance[,7]=row.names(Boruta_variable_importance) Boruta_variable_importance <- Boruta_variable_importance[order(Boruta_variable_importance$meanImp),] #to view plot type print(Boruta_plots) #Boruta_plots=plot(Boruta_models, cex.axis=1, las=2, xlab="", main="Variable Importance") return(x = Boruta_variable_importance) #assign(x = "Boruta_plots",value = Boruta_plots,envir = globalenv()) }
# Conclusion: use fipscounty. 3271 fipscounties, 410 CBSAs. To compare to Datawork we aggregate JOLTS data to CBSAs. # Data aggregations needed: for JOLTS, Datawork, and Openjobs, a table with CBSA/County/Month-Yr/#jobs/#stem jobss
/src/jobsCountTables/makeDataworkJobsTable.R
no_license
uva-bi-sdad/stem_edu
R
false
false
233
r
# Conclusion: use fipscounty. 3271 fipscounties, 410 CBSAs. To compare to Datawork we aggregate JOLTS data to CBSAs. # Data aggregations needed: for JOLTS, Datawork, and Openjobs, a table with CBSA/County/Month-Yr/#jobs/#stem jobss
library(xgboost) library(Matrix) library(data.table) #library(bit64, pos = .Machine$integer.max) #junk package that won't let use commercially and also won't let unmask functions even when you specify predescence library(vtreat) library(outliers) library(R.utils) remove(list = ls()) setwd("C:/Users/Laurae/Documents/Data Science/Santander/") ##set your own working directory train <- read.csv("train.csv") test <- read.csv("test.csv") #train <- as.data.frame(fread("train.csv", header = TRUE, sep = ",")) #test <- as.data.frame(fread("test.csv", header = TRUE, sep = ",")) #unloadNamespace("bit64") #detach("package:bit64", unload=TRUE, force=TRUE) train_temp <- train test_temp <- test # removing ID train_temp$ID <- NULL test_temp$ID <- NULL # extracting label train_target <- train$TARGET train_temp$TARGET <- NULL ##### 0 count per line count0 <- function(x) { return( sum(x == 0) ) } train_temp$n0 <- apply(train_temp, 1, FUN=count0) test_temp$n0 <- apply(test_temp, 1, FUN=count0) ##### Removing constant features cat("\n## Removing the constants features.\n") for (f in names(train_temp)) { if (length(unique(train_temp[[f]])) == 1) { cat(f, "is constant in train. We delete it.\n") train_temp[[f]] <- NULL test_temp[[f]] <- NULL } } ##### Removing identical features features_pair <- combn(names(train_temp), 2, simplify = F) toRemove <- c() for(pair in features_pair) { f1 <- pair[1] f2 <- pair[2] if (!(f1 %in% toRemove) & !(f2 %in% toRemove)) { if (all(train_temp[[f1]] == train_temp[[f2]])) { cat(f1, "and", f2, "are equals.\n") toRemove <- c(toRemove, f2) } } } feature.names <- setdiff(names(train_temp), toRemove) train_temp <- train_temp[, feature.names] test_temp <- test_temp[, feature.names] # ~~~~ GRADIENT DESCENT ASSOCIATION RULE TESTING optimized_func <- function(target, scores, min_score, min_node, false_neg, cutoff) { # cutoff has two values: the minimum and maximum. Everything between is dished out. if (cutoff[2] < cutoff[1]) { known <- integer(0) } else { known <- target[which(((scores >= cutoff[2]) == TRUE) | ((scores <= cutoff[1]) == TRUE))] #takes values in exterior to [cutoff1, cutoff2], else dishes out empty numeric } if (length(known) >= min_node) { #if cutoff not leading to empty variable best <- ifelse(sum(known == 1) == 0, 0, ifelse(((sum(known == 0) / sum(known == 1)) >= min_score) & (sum(known == 1) <= false_neg), sum(known == 1) / sum(known == 0), 999)) #return 0 if pure rule, else return the probability if ratio over 25 (better than random), else return 999 (worse than random) } else { best <- 999 # node empty } return(best) # objective: return the purest node } # THIS IS FOR UNIVARIATE prog_bar <- txtProgressBar(style = 3) data_scores <- rbind(train_temp, test_temp) for (i in colnames(train_temp)) { data_scores[[i]] <- scores(data_scores[[i]]) #cat("Computed ", i, "'s scores. (", which(i == colnames(train_temp)), "/", length(colnames(train_temp)), ")\n", sep = "") setTxtProgressBar(prog_bar, which(i == colnames(train_temp))/length(colnames(train_temp))) } close(prog_bar) data_scores_parsed <- data.frame(matrix(ncol = ncol(data_scores)+1, nrow = nrow(data_scores))) colnames(data_scores_parsed) <- c(colnames(data_scores), "Final") data_scores_parsed$Final <- rep(1, nrow(data_scores)) minimal_score <- 25 #don't accept any node under the allowed score minimal_node <- 5 #don't accept any node containing under that specific amount of samples false_negatives <- 2 #allow at most 1 false negative | higher allows a more permissive algorithm, lower makes it very difficult to converge for (i in colnames(train_temp)) { data_scores_parsed[[i]] <- rep(1, nrow(data_scores)) scoring_input <- data_scores[[i]][1:nrow(train)] #get scores from train set min_allowance <- min(scoring_input) #get the maximum allowed score max_allowance <- max(scoring_input) #get the maximum allowed score optimized_output <- optim(par = c(min_allowance, max_allowance), optimized_func, method = "L-BFGS-B", target = train_target, scores = scoring_input, min_score = minimal_score, min_node = minimal_node, false_neg = false_negatives, lower = min_allowance, upper = max_allowance, control = list(maxit = 1000, trace = 0)) cat("[", which(i == colnames(train_temp)), ": ", i, "] ", ifelse(optimized_output$value >= 999, "Failed to optimize with gradient descent (you should loose conditions!).", paste("Best node: ", ifelse(optimized_output$value == 0, "Inf", 1/optimized_output$value), ":1 (", round(optimized_output$value*100, digits = 3) , "%) [ ", sum((scoring_input >= optimized_output$par[2]) | (scoring_input <= optimized_output$par[1])), "(train) | ", sum((data_scores[[i]][(nrow(train)+1):NROW(data_scores_parsed[[i]])] >= optimized_output$par[2]) | (data_scores[[i]][(nrow(train)+1):NROW(data_scores_parsed[[i]])] <= optimized_output$par[1])), "(test) ] for params (", optimized_output$par[1], ", ", optimized_output$par[2], ").", sep = "")), sep = "") if ((optimized_output$value >= 999) | (sum((data_scores[[i]][(nrow(train)+1):NROW(data_scores_parsed[[i]])] >= optimized_output$par[2]) | (data_scores[[i]][(nrow(train)+1):NROW(data_scores_parsed[[i]])] <= optimized_output$par[1]))) == 0) { #do nothing cat(" | was useless!\n", sep = "") } else { data_output <- ifelse(optimized_output$value == 0, 0, optimized_output$value) data_scores_parsed[[i]][(scoring_input >= optimized_output$par[2]) | (scoring_input <= optimized_output$par[1])] <- data_output data_scores_parsed$Final <- data_scores_parsed$Final * data_scores_parsed[[i]] cat(" | was stored!\n", sep = "") } } cat("\n-----\nSummary:\nTrain rows soft-ruled: ", nrow(train) - sum(data_scores_parsed$Final[1:nrow(train)] == 1), " (pure: ", sum(data_scores_parsed$Final[1:nrow(train)] == 0), ")\nTest rows soft-ruled: ", nrow(test) - sum(data_scores_parsed$Final[(nrow(train)+1):nrow(data_scores_parsed)] == 1), " (pure: ", sum(data_scores_parsed$Final[(nrow(train)+1):nrow(data_scores_parsed)] == 0), ")", sep = "") # THIS IS FOR BIVARIATE scored_rows <- rep(1, nrow(train_temp)+nrow(test_temp)) minimal_score <- 25 #don't accept any node under the allowed score minimal_node <- 25 #don't accept any node containing under that specific amount of samples false_negatives <- 0 #allow at most 1 false negative | higher allows a more permissive algorithm, lower makes it very difficult to converge #for bivariate outliers = use Mahalonobis distance Counter <- 0 MaxCounter <- ncol(train_temp)*(ncol(train_temp) - 1) MaxChar <- 0 Paster <- paste("%0", nchar(MaxCounter, "width"), "d", sep = "") TrainRows <- 1:nrow(train_temp) TestRows <- (nrow(train_temp)+1):(nrow(train_temp)+nrow(test_temp)) StartTime <- System$currentTimeMillis() for (i in colnames(train_temp)) { for (j in colnames(train_temp)[-which(i == colnames(train_temp))]) { #print text for default checking Counter <- Counter + 1 #tempText <- paste("\r[", ( (which(i == colnames(train_temp)) - 1) * ncol(train_temp) ) + ( which(j == colnames(train_temp))) , "/", ncol(train_temp)*ncol(train_temp), "]: ", i, ":", j, " parsed!", sep = "") CurrentTime <- System$currentTimeMillis() SpentTime <- (CurrentTime - StartTime) / 1000 tempText <- paste("\r[", sprintf(Paster, Counter) , "/", MaxCounter, " | CPU = ", round(SpentTime, digits = 2), "s | ETA = ", round((MaxCounter - Counter) * SpentTime / Counter, 2), "s]: ", i, ":", j, " parsed!", sep = "") #cat(ifelse(nchar(tempText) < MaxChar, paste(tempText, rep(" ", MaxChar - nchar(tempText) + 1), sep = ""), tempText), sep = "") cat(tempText, sep = "") #merge columns #tempCol <- as.data.frame(cbind(c(train_temp[[i]], test_temp[[i]]), c(train_temp[[j]], test_temp[[j]]))) tempCol <- data.frame(v1 = c(train_temp[[i]], test_temp[[i]]), v2 = c(train_temp[[j]], test_temp[[j]]), check.names = FALSE, stringsAsFactors = FALSE) #compute Mahalonobis distance (df, m, sx) with near-zero tolerance to avoid unexpected interruptions if (sum(cor(tempCol)) > 3.999999) { #give up computing the inverse matrix and Mahalonobis distance due to singularity/ill-conditioned matrix } else { #try compute inverse matrix and Mahalonobis distance tryCatch(tempCol <- mahalanobis(tempCol, colMeans(tempCol), cov(tempCol), tol=1e-30)) } if (class(tempCol) == "data.frame") { #computation failed, ignore what to do MaxChar <- nchar(tempText) cat("\r", rep(" ", MaxChar), sep = "") CurrentTime <- System$currentTimeMillis() SpentTime <- (CurrentTime - StartTime) / 1000 tempText <- paste("\r[", sprintf(Paster, Counter) , "/", MaxCounter, " | CPU = ", round(SpentTime, digits = 2), "s | ETA = ", round((MaxCounter - Counter) * SpentTime / Counter, 2), "s]: ", i, ":", j, " is singular.\n", sep = "") cat(tempText, sep = "") } else { #successful computation, continue #score the data against outliers locally data_scores <- scores(tempCol) scoring_input <- data_scores[TrainRows] #get scores from train set min_allowance <- min(scoring_input) #get the maximum allowed score max_allowance <- max(scoring_input) #get the maximum allowed score #gradient descent the outliers to find local isolated nodes optimized_output <- optim(par = c(min_allowance, max_allowance), optimized_func, method = "L-BFGS-B", target = train_target, scores = scoring_input, min_score = minimal_score, min_node = minimal_node, false_neg = false_negatives, lower = min_allowance, upper = max_allowance, control = list(maxit = 1000, trace = 0)) if (!(optimized_output$value == 0)) { #not pure node? #has no value for us #overwrite print MaxChar <- nchar(tempText) cat("\r", rep(" ", MaxChar), sep = "") } else { #pure node? #has value for us #MaxChar <- 0 #compute rows found tempRows <- (data_scores >= optimized_output$par[2]) | (data_scores <= optimized_output$par[1]) tempRows_train <- sum(tempRows[TrainRows]) tempRows_test <- sum(tempRows[TestRows]) #update target rows tempInt <- sum(scored_rows[TestRows] == 0) scored_rows[tempRows] <- 0 tempInt <- sum(scored_rows[TestRows] == 0) - tempInt #rewrite the current line #cat("\r", rep(" ", nchar(tempText)), sep = "") #cat("\r[", ( (which(i == colnames(train_temp)) - 1) * ncol(train_temp) ) + ( which(j == colnames(train_temp))) , "/", ncol(train_temp)*ncol(train_temp), "]: ", i, ":", j, " analysis led to: ", length(tempRows_train), "|", length(tempRows_test), " (", length(scored_rows[scored_rows[1:nrow(train_temp)] == 0]), "|", length(scored_rows[scored_rows[(nrow(train_temp)+1):(nrow(train_temp)+nrow(test_temp))] == 0]), ")", sep = "") CurrentTime <- System$currentTimeMillis() SpentTime <- (CurrentTime - StartTime) / 1000 cat("\r[", sprintf(Paster, Counter) , "/", MaxCounter, " | CPU = ", round(SpentTime, digits = 2), "s | ETA = ", round((MaxCounter - Counter) * SpentTime / Counter, 2),"s]: ", i, ":", j, " analysis led to: ", tempRows_train, "|", tempRows_test, " (", sum(scored_rows[TrainRows] == 0), "|", sum(scored_rows[TestRows] == 0), ")", sep = "") if (tempInt == 0) { #if it added nothing to our test set cat(" - No improvement.\n", sep = "") } else { #if it added something to our test set cat(" | improved slightly! (+", tempInt, ")\n", sep = "") } } } } } # CHOOSE YOUR SUBMISSION!!! submission <- read.csv("submission_rules.csv") #take the best script? tempInt <- which(data_scores_parsed$Final[76021:151838] == 0) submission$TARGET[tempInt] #submission$TARGET <- submission$TARGET * data_scores_parsed$Final[76021:151838] submission$TARGET[submission$TARGET == 0] <- 0.00000001 #because we know some are wrong submission$TARGET[tempInt] <- 0 #because we know we are right there write.csv(submission, file = "submission_rules_out.csv", row.names = FALSE)
/scripts/Two-Way Outliers.R
no_license
Laurae2/Santander
R
false
false
12,365
r
library(xgboost) library(Matrix) library(data.table) #library(bit64, pos = .Machine$integer.max) #junk package that won't let use commercially and also won't let unmask functions even when you specify predescence library(vtreat) library(outliers) library(R.utils) remove(list = ls()) setwd("C:/Users/Laurae/Documents/Data Science/Santander/") ##set your own working directory train <- read.csv("train.csv") test <- read.csv("test.csv") #train <- as.data.frame(fread("train.csv", header = TRUE, sep = ",")) #test <- as.data.frame(fread("test.csv", header = TRUE, sep = ",")) #unloadNamespace("bit64") #detach("package:bit64", unload=TRUE, force=TRUE) train_temp <- train test_temp <- test # removing ID train_temp$ID <- NULL test_temp$ID <- NULL # extracting label train_target <- train$TARGET train_temp$TARGET <- NULL ##### 0 count per line count0 <- function(x) { return( sum(x == 0) ) } train_temp$n0 <- apply(train_temp, 1, FUN=count0) test_temp$n0 <- apply(test_temp, 1, FUN=count0) ##### Removing constant features cat("\n## Removing the constants features.\n") for (f in names(train_temp)) { if (length(unique(train_temp[[f]])) == 1) { cat(f, "is constant in train. We delete it.\n") train_temp[[f]] <- NULL test_temp[[f]] <- NULL } } ##### Removing identical features features_pair <- combn(names(train_temp), 2, simplify = F) toRemove <- c() for(pair in features_pair) { f1 <- pair[1] f2 <- pair[2] if (!(f1 %in% toRemove) & !(f2 %in% toRemove)) { if (all(train_temp[[f1]] == train_temp[[f2]])) { cat(f1, "and", f2, "are equals.\n") toRemove <- c(toRemove, f2) } } } feature.names <- setdiff(names(train_temp), toRemove) train_temp <- train_temp[, feature.names] test_temp <- test_temp[, feature.names] # ~~~~ GRADIENT DESCENT ASSOCIATION RULE TESTING optimized_func <- function(target, scores, min_score, min_node, false_neg, cutoff) { # cutoff has two values: the minimum and maximum. Everything between is dished out. if (cutoff[2] < cutoff[1]) { known <- integer(0) } else { known <- target[which(((scores >= cutoff[2]) == TRUE) | ((scores <= cutoff[1]) == TRUE))] #takes values in exterior to [cutoff1, cutoff2], else dishes out empty numeric } if (length(known) >= min_node) { #if cutoff not leading to empty variable best <- ifelse(sum(known == 1) == 0, 0, ifelse(((sum(known == 0) / sum(known == 1)) >= min_score) & (sum(known == 1) <= false_neg), sum(known == 1) / sum(known == 0), 999)) #return 0 if pure rule, else return the probability if ratio over 25 (better than random), else return 999 (worse than random) } else { best <- 999 # node empty } return(best) # objective: return the purest node } # THIS IS FOR UNIVARIATE prog_bar <- txtProgressBar(style = 3) data_scores <- rbind(train_temp, test_temp) for (i in colnames(train_temp)) { data_scores[[i]] <- scores(data_scores[[i]]) #cat("Computed ", i, "'s scores. (", which(i == colnames(train_temp)), "/", length(colnames(train_temp)), ")\n", sep = "") setTxtProgressBar(prog_bar, which(i == colnames(train_temp))/length(colnames(train_temp))) } close(prog_bar) data_scores_parsed <- data.frame(matrix(ncol = ncol(data_scores)+1, nrow = nrow(data_scores))) colnames(data_scores_parsed) <- c(colnames(data_scores), "Final") data_scores_parsed$Final <- rep(1, nrow(data_scores)) minimal_score <- 25 #don't accept any node under the allowed score minimal_node <- 5 #don't accept any node containing under that specific amount of samples false_negatives <- 2 #allow at most 1 false negative | higher allows a more permissive algorithm, lower makes it very difficult to converge for (i in colnames(train_temp)) { data_scores_parsed[[i]] <- rep(1, nrow(data_scores)) scoring_input <- data_scores[[i]][1:nrow(train)] #get scores from train set min_allowance <- min(scoring_input) #get the maximum allowed score max_allowance <- max(scoring_input) #get the maximum allowed score optimized_output <- optim(par = c(min_allowance, max_allowance), optimized_func, method = "L-BFGS-B", target = train_target, scores = scoring_input, min_score = minimal_score, min_node = minimal_node, false_neg = false_negatives, lower = min_allowance, upper = max_allowance, control = list(maxit = 1000, trace = 0)) cat("[", which(i == colnames(train_temp)), ": ", i, "] ", ifelse(optimized_output$value >= 999, "Failed to optimize with gradient descent (you should loose conditions!).", paste("Best node: ", ifelse(optimized_output$value == 0, "Inf", 1/optimized_output$value), ":1 (", round(optimized_output$value*100, digits = 3) , "%) [ ", sum((scoring_input >= optimized_output$par[2]) | (scoring_input <= optimized_output$par[1])), "(train) | ", sum((data_scores[[i]][(nrow(train)+1):NROW(data_scores_parsed[[i]])] >= optimized_output$par[2]) | (data_scores[[i]][(nrow(train)+1):NROW(data_scores_parsed[[i]])] <= optimized_output$par[1])), "(test) ] for params (", optimized_output$par[1], ", ", optimized_output$par[2], ").", sep = "")), sep = "") if ((optimized_output$value >= 999) | (sum((data_scores[[i]][(nrow(train)+1):NROW(data_scores_parsed[[i]])] >= optimized_output$par[2]) | (data_scores[[i]][(nrow(train)+1):NROW(data_scores_parsed[[i]])] <= optimized_output$par[1]))) == 0) { #do nothing cat(" | was useless!\n", sep = "") } else { data_output <- ifelse(optimized_output$value == 0, 0, optimized_output$value) data_scores_parsed[[i]][(scoring_input >= optimized_output$par[2]) | (scoring_input <= optimized_output$par[1])] <- data_output data_scores_parsed$Final <- data_scores_parsed$Final * data_scores_parsed[[i]] cat(" | was stored!\n", sep = "") } } cat("\n-----\nSummary:\nTrain rows soft-ruled: ", nrow(train) - sum(data_scores_parsed$Final[1:nrow(train)] == 1), " (pure: ", sum(data_scores_parsed$Final[1:nrow(train)] == 0), ")\nTest rows soft-ruled: ", nrow(test) - sum(data_scores_parsed$Final[(nrow(train)+1):nrow(data_scores_parsed)] == 1), " (pure: ", sum(data_scores_parsed$Final[(nrow(train)+1):nrow(data_scores_parsed)] == 0), ")", sep = "") # THIS IS FOR BIVARIATE scored_rows <- rep(1, nrow(train_temp)+nrow(test_temp)) minimal_score <- 25 #don't accept any node under the allowed score minimal_node <- 25 #don't accept any node containing under that specific amount of samples false_negatives <- 0 #allow at most 1 false negative | higher allows a more permissive algorithm, lower makes it very difficult to converge #for bivariate outliers = use Mahalonobis distance Counter <- 0 MaxCounter <- ncol(train_temp)*(ncol(train_temp) - 1) MaxChar <- 0 Paster <- paste("%0", nchar(MaxCounter, "width"), "d", sep = "") TrainRows <- 1:nrow(train_temp) TestRows <- (nrow(train_temp)+1):(nrow(train_temp)+nrow(test_temp)) StartTime <- System$currentTimeMillis() for (i in colnames(train_temp)) { for (j in colnames(train_temp)[-which(i == colnames(train_temp))]) { #print text for default checking Counter <- Counter + 1 #tempText <- paste("\r[", ( (which(i == colnames(train_temp)) - 1) * ncol(train_temp) ) + ( which(j == colnames(train_temp))) , "/", ncol(train_temp)*ncol(train_temp), "]: ", i, ":", j, " parsed!", sep = "") CurrentTime <- System$currentTimeMillis() SpentTime <- (CurrentTime - StartTime) / 1000 tempText <- paste("\r[", sprintf(Paster, Counter) , "/", MaxCounter, " | CPU = ", round(SpentTime, digits = 2), "s | ETA = ", round((MaxCounter - Counter) * SpentTime / Counter, 2), "s]: ", i, ":", j, " parsed!", sep = "") #cat(ifelse(nchar(tempText) < MaxChar, paste(tempText, rep(" ", MaxChar - nchar(tempText) + 1), sep = ""), tempText), sep = "") cat(tempText, sep = "") #merge columns #tempCol <- as.data.frame(cbind(c(train_temp[[i]], test_temp[[i]]), c(train_temp[[j]], test_temp[[j]]))) tempCol <- data.frame(v1 = c(train_temp[[i]], test_temp[[i]]), v2 = c(train_temp[[j]], test_temp[[j]]), check.names = FALSE, stringsAsFactors = FALSE) #compute Mahalonobis distance (df, m, sx) with near-zero tolerance to avoid unexpected interruptions if (sum(cor(tempCol)) > 3.999999) { #give up computing the inverse matrix and Mahalonobis distance due to singularity/ill-conditioned matrix } else { #try compute inverse matrix and Mahalonobis distance tryCatch(tempCol <- mahalanobis(tempCol, colMeans(tempCol), cov(tempCol), tol=1e-30)) } if (class(tempCol) == "data.frame") { #computation failed, ignore what to do MaxChar <- nchar(tempText) cat("\r", rep(" ", MaxChar), sep = "") CurrentTime <- System$currentTimeMillis() SpentTime <- (CurrentTime - StartTime) / 1000 tempText <- paste("\r[", sprintf(Paster, Counter) , "/", MaxCounter, " | CPU = ", round(SpentTime, digits = 2), "s | ETA = ", round((MaxCounter - Counter) * SpentTime / Counter, 2), "s]: ", i, ":", j, " is singular.\n", sep = "") cat(tempText, sep = "") } else { #successful computation, continue #score the data against outliers locally data_scores <- scores(tempCol) scoring_input <- data_scores[TrainRows] #get scores from train set min_allowance <- min(scoring_input) #get the maximum allowed score max_allowance <- max(scoring_input) #get the maximum allowed score #gradient descent the outliers to find local isolated nodes optimized_output <- optim(par = c(min_allowance, max_allowance), optimized_func, method = "L-BFGS-B", target = train_target, scores = scoring_input, min_score = minimal_score, min_node = minimal_node, false_neg = false_negatives, lower = min_allowance, upper = max_allowance, control = list(maxit = 1000, trace = 0)) if (!(optimized_output$value == 0)) { #not pure node? #has no value for us #overwrite print MaxChar <- nchar(tempText) cat("\r", rep(" ", MaxChar), sep = "") } else { #pure node? #has value for us #MaxChar <- 0 #compute rows found tempRows <- (data_scores >= optimized_output$par[2]) | (data_scores <= optimized_output$par[1]) tempRows_train <- sum(tempRows[TrainRows]) tempRows_test <- sum(tempRows[TestRows]) #update target rows tempInt <- sum(scored_rows[TestRows] == 0) scored_rows[tempRows] <- 0 tempInt <- sum(scored_rows[TestRows] == 0) - tempInt #rewrite the current line #cat("\r", rep(" ", nchar(tempText)), sep = "") #cat("\r[", ( (which(i == colnames(train_temp)) - 1) * ncol(train_temp) ) + ( which(j == colnames(train_temp))) , "/", ncol(train_temp)*ncol(train_temp), "]: ", i, ":", j, " analysis led to: ", length(tempRows_train), "|", length(tempRows_test), " (", length(scored_rows[scored_rows[1:nrow(train_temp)] == 0]), "|", length(scored_rows[scored_rows[(nrow(train_temp)+1):(nrow(train_temp)+nrow(test_temp))] == 0]), ")", sep = "") CurrentTime <- System$currentTimeMillis() SpentTime <- (CurrentTime - StartTime) / 1000 cat("\r[", sprintf(Paster, Counter) , "/", MaxCounter, " | CPU = ", round(SpentTime, digits = 2), "s | ETA = ", round((MaxCounter - Counter) * SpentTime / Counter, 2),"s]: ", i, ":", j, " analysis led to: ", tempRows_train, "|", tempRows_test, " (", sum(scored_rows[TrainRows] == 0), "|", sum(scored_rows[TestRows] == 0), ")", sep = "") if (tempInt == 0) { #if it added nothing to our test set cat(" - No improvement.\n", sep = "") } else { #if it added something to our test set cat(" | improved slightly! (+", tempInt, ")\n", sep = "") } } } } } # CHOOSE YOUR SUBMISSION!!! submission <- read.csv("submission_rules.csv") #take the best script? tempInt <- which(data_scores_parsed$Final[76021:151838] == 0) submission$TARGET[tempInt] #submission$TARGET <- submission$TARGET * data_scores_parsed$Final[76021:151838] submission$TARGET[submission$TARGET == 0] <- 0.00000001 #because we know some are wrong submission$TARGET[tempInt] <- 0 #because we know we are right there write.csv(submission, file = "submission_rules_out.csv", row.names = FALSE)
# Definimos el espacio muestral m <- factor(c(0,1), levels = c("cara","cruz")) m <- c(0,1) x <- sample(m,5,replace = TRUE) sum(x)/length(x) x <- sample(m,25,replace = TRUE) sum(x)/length(x) x <- sample(m,100,replace = TRUE) sum(x)/length(x) x <- sample(m,1000,replace = TRUE) sum(x)/length(x) x <- sample(m,1000000,replace = TRUE) sum(x)/length(x)
/probabilidad/probabilidad.R
no_license
jrlacalle/material_docente
R
false
false
348
r
# Definimos el espacio muestral m <- factor(c(0,1), levels = c("cara","cruz")) m <- c(0,1) x <- sample(m,5,replace = TRUE) sum(x)/length(x) x <- sample(m,25,replace = TRUE) sum(x)/length(x) x <- sample(m,100,replace = TRUE) sum(x)/length(x) x <- sample(m,1000,replace = TRUE) sum(x)/length(x) x <- sample(m,1000000,replace = TRUE) sum(x)/length(x)
# This script gets all documents (e.g. public comments) from regulations.gov # UNDER CONSTRUCTION FOR V4 ## load required packages source("setup.R") # I keep keep data I have already downloaded in ascending order in a directory called "ascending" directory <- "ascending" list.files(here::here(directory)) # The api call below loads data in ascending order. # To start with the earliest comment, set page to 1 # To dowload more recent comments, either change order to "DESC" or specify a page of results to start at page <- 1 # set defaults for regulations.gov api call url <- "https://api.data.gov" rpp <- 1000 # 1000 = max results per page order <- "ASC" # DESC = Decending, ASC = Ascending sortby <- "postedDate" #docketId (Docket ID) docId (Document ID) title (Title) postedDate (Posted Date) agency (Agency) documentType (Document Type) submitterName (Submitter Name) organization (Organization) pages <- c(1, (seq(100000000)*rpp)+1) # up to 100,000,000 results documenttype <- "PS" # "N%2BPR%2BFR%2BPS%2BSR%2BO" ## N: Notice, ## PR: Proposed Rule, ## FR: Rule, ## O: Other, ## SR: Supporting & Related Material, ## PS: Public Submission ## add your API Key source("api-key.R") api_key <- api_key ## initial API call (first page of 1000 results) raw.result <- GET( url = url, path = paste0( "/regulations/v4/documents?api_key=", api_key, "&rpp=", rpp, "&so=", order, "&sb=", sortby, "&dct=", "PS", # searching Public Submissions "&po=", pages[page] ) ) raw.result$status_code # extract content to list content <- fromJSON(rawToChar(raw.result$content)) # make a data frame d <- as.data.frame(content[[1]]) unique(d$postedDate) # to strings (to gaurentee consistent classes) #FIXME mutate_at/if if("organization" %in% names(d)){d$organization %<>% as.character()} if("commentDueDate" %in% names(d)){d$commentDueDate %<>% as.character()} if("commentStartDate" %in% names(d)){d$commentStartDate %<>% as.character()} if("postedDate" %in% names(d)){d$postedDate %<>% as.character()} # initialize page <- page error <- 0 skip <- NA ################################################################################## # If adding to saved results, first run: load("data/comments.Rdata") # loop until API fails for more than 1 hour while (error < 61) { # if returning errors for more than 1 hr # API call raw.result <- GET( url = url, path = paste0( "/regulations/v3/documents?api_key=", api_key, "&rpp=", rpp, "&so=", order, "&sb=", sortby, "&dct=", "PS", "&po=", pages[page] ) ) # API call error counter ifelse(raw.result$status_code != 200, error <- error + 1, error <- 0) # If call fails, wait a minute if (error > 0) { message(print(paste("Error", raw.result$status_code,"on page", page, "- waiting", error,"minutes" ))) Sys.sleep(60) } # If call works, merge in new data if (raw.result$status_code == 200) { # extract content to list content <- fromJSON(rawToChar(raw.result$content)) # to data frame temp <- as.data.frame(content[[1]]) if("organization" %in% names(temp)){temp$organization %<>% as.character()} if("commentDueDate" %in% names(temp)){temp$commentDueDate %<>% as.character()} if("commentStartDate" %in% names(temp)){temp$commentStartDate %<>% as.character()} if("postedDate" %in% names(temp)){temp$postedDate %<>% as.character()} # merge with previous pages silently suppressMessages( d %<>% full_join(temp) ) message(paste("Page", page, "added", Sys.time())) page <- page + 1 } # If server error more than twice, skip if (raw.result$status_code == 500 & error > 1) { message(paste("Skipping page", page)) skip <- c(skip, page) page <- page + 1 } # save after each half million docs (it takes ~30 minutes to get 500k and you don't want to start over if you hit an error) if (grepl("000$|500$", page)){ message("Saving", paste0(page, "comments.Rdata")) save(d, page, skip, file = here::here(directory, paste0(page, "comments.Rdata"))) d <- temp } }# END LOOP # Save last comments save(d, "lastcomments.Rdata") save(d, page, skip, file = here::here(directory, "lastcomments.Rdata") ) save.image() # Save recent comments save(d, file = "data/recentcomments.Rdata") tail(d %>% drop_na(postedDate) %>% .$postedDate) max(d$postedDate, na.rm = T) min(d$postedDate, na.rm = T)
/functions/regulations-gov-get-all-comments4.R
no_license
zoeang/rulemaking
R
false
false
4,501
r
# This script gets all documents (e.g. public comments) from regulations.gov # UNDER CONSTRUCTION FOR V4 ## load required packages source("setup.R") # I keep keep data I have already downloaded in ascending order in a directory called "ascending" directory <- "ascending" list.files(here::here(directory)) # The api call below loads data in ascending order. # To start with the earliest comment, set page to 1 # To dowload more recent comments, either change order to "DESC" or specify a page of results to start at page <- 1 # set defaults for regulations.gov api call url <- "https://api.data.gov" rpp <- 1000 # 1000 = max results per page order <- "ASC" # DESC = Decending, ASC = Ascending sortby <- "postedDate" #docketId (Docket ID) docId (Document ID) title (Title) postedDate (Posted Date) agency (Agency) documentType (Document Type) submitterName (Submitter Name) organization (Organization) pages <- c(1, (seq(100000000)*rpp)+1) # up to 100,000,000 results documenttype <- "PS" # "N%2BPR%2BFR%2BPS%2BSR%2BO" ## N: Notice, ## PR: Proposed Rule, ## FR: Rule, ## O: Other, ## SR: Supporting & Related Material, ## PS: Public Submission ## add your API Key source("api-key.R") api_key <- api_key ## initial API call (first page of 1000 results) raw.result <- GET( url = url, path = paste0( "/regulations/v4/documents?api_key=", api_key, "&rpp=", rpp, "&so=", order, "&sb=", sortby, "&dct=", "PS", # searching Public Submissions "&po=", pages[page] ) ) raw.result$status_code # extract content to list content <- fromJSON(rawToChar(raw.result$content)) # make a data frame d <- as.data.frame(content[[1]]) unique(d$postedDate) # to strings (to gaurentee consistent classes) #FIXME mutate_at/if if("organization" %in% names(d)){d$organization %<>% as.character()} if("commentDueDate" %in% names(d)){d$commentDueDate %<>% as.character()} if("commentStartDate" %in% names(d)){d$commentStartDate %<>% as.character()} if("postedDate" %in% names(d)){d$postedDate %<>% as.character()} # initialize page <- page error <- 0 skip <- NA ################################################################################## # If adding to saved results, first run: load("data/comments.Rdata") # loop until API fails for more than 1 hour while (error < 61) { # if returning errors for more than 1 hr # API call raw.result <- GET( url = url, path = paste0( "/regulations/v3/documents?api_key=", api_key, "&rpp=", rpp, "&so=", order, "&sb=", sortby, "&dct=", "PS", "&po=", pages[page] ) ) # API call error counter ifelse(raw.result$status_code != 200, error <- error + 1, error <- 0) # If call fails, wait a minute if (error > 0) { message(print(paste("Error", raw.result$status_code,"on page", page, "- waiting", error,"minutes" ))) Sys.sleep(60) } # If call works, merge in new data if (raw.result$status_code == 200) { # extract content to list content <- fromJSON(rawToChar(raw.result$content)) # to data frame temp <- as.data.frame(content[[1]]) if("organization" %in% names(temp)){temp$organization %<>% as.character()} if("commentDueDate" %in% names(temp)){temp$commentDueDate %<>% as.character()} if("commentStartDate" %in% names(temp)){temp$commentStartDate %<>% as.character()} if("postedDate" %in% names(temp)){temp$postedDate %<>% as.character()} # merge with previous pages silently suppressMessages( d %<>% full_join(temp) ) message(paste("Page", page, "added", Sys.time())) page <- page + 1 } # If server error more than twice, skip if (raw.result$status_code == 500 & error > 1) { message(paste("Skipping page", page)) skip <- c(skip, page) page <- page + 1 } # save after each half million docs (it takes ~30 minutes to get 500k and you don't want to start over if you hit an error) if (grepl("000$|500$", page)){ message("Saving", paste0(page, "comments.Rdata")) save(d, page, skip, file = here::here(directory, paste0(page, "comments.Rdata"))) d <- temp } }# END LOOP # Save last comments save(d, "lastcomments.Rdata") save(d, page, skip, file = here::here(directory, "lastcomments.Rdata") ) save.image() # Save recent comments save(d, file = "data/recentcomments.Rdata") tail(d %>% drop_na(postedDate) %>% .$postedDate) max(d$postedDate, na.rm = T) min(d$postedDate, na.rm = T)
#!/usr/bin/env Rscript suppressMessages(library(Matrix)) suppressMessages(library(parallel)) suppressMessages(library(optparse)) option_list <- list(make_option(c("-m", "--mutation_data"), action = "store", type = "character", help = "Mutation Matrix", default = NULL), make_option(c("-e", '--expression_data'), action = "store", type = "character", help = "Expression Matrix", default = NULL), make_option(c("-n", "--network_data"), action = "store", type = "character", help = "Network Data", default = NULL), make_option(c("-t", "--num_threads"), action = "store", type = "integer", help = "Number of Threads", default = 2), make_option(c("-o", "--output_prefix"), action = "store", type = "character", help = "Output Prefix", default = "NetworkProfile")) opt <- parse_args(OptionParser(option_list = option_list)) if(is.null(opt$mutation_data) || is.null(opt$network_data) || is.null(opt$expression_data)){ warning("!Provide data files") quit(save = "no") } message("...loading data") mut.mat <- readRDS(opt$mutation_data) expr.mat <- readRDS(opt$expression_data) ppi <- readRDS(opt$network_data) message("...processing data") if(length(intersect(rownames(mut.mat), intersect(rownames(expr.mat), rownames(ppi)))) == 0){ warning("!Feature names do not match") quit(save = "no") } ## Filter Samples ## samples.common <- intersect(colnames(mut.mat), colnames(expr.mat)) mut.mat <- mut.mat[,colnames(mut.mat) %in% samples.common] expr.mat <- expr.mat[,colnames(expr.mat) %in% samples.common] ## Filter Features ## idx <- colnames(ppi) %in% na.omit(intersect(rownames(mut.mat), rownames(expr.mat))) ppi <- ppi[idx, idx] idx <- colSums(ppi) == 0 ppi <- ppi[!idx,!idx] mut.mat <- mut.mat[match(rownames(ppi),table=rownames(mut.mat)),] mut.mat <- mut.mat[,colSums(mut.mat) > 8] expr.mat <- expr.mat[match(rownames(ppi),table=rownames(expr.mat)),] expr.mat <- expr.mat[,match(colnames(mut.mat),table=colnames(expr.mat))] expr.mat <- t(scale(t(expr.mat))) gc() network.prof <- sapply(1:ncol(mut.mat), function(i){ cat(sprintf('\rProcessing Sample: %d',i)) ## Weighted Network ## edge.idx <- which(ppi, arr.ind = TRUE, useNames = FALSE) edge.idx <- cbind(edge.idx, apply(edge.idx, 1, function(x){ sqrt(expr.mat[x[1],i]^2 + expr.mat[x[2],i]^2) })) edge.idx[is.na(edge.idx[,3]),3] <- 0.1 ## Small dysregulation ppi.w <- sparseMatrix(i = edge.idx[,1], j = edge.idx[,2], x = edge.idx[,3], dims = c(nrow(ppi), nrow(ppi)), dimnames = list(rownames(ppi), rownames(ppi)), use.last.ij = TRUE) ppi.w <- ppi.w %*% Matrix::Diagonal(x = Matrix::colSums(ppi.w)^-1) ## Original ## p0 <- Matrix(mut.mat[,i], ncol = 1, nrow = nrow(ppi.w)) p0 <- p0 / sum(p0) pt <- Matrix(1/nrow(ppi.w), ncol = 1, nrow = nrow(ppi.w)) delta <- 1 count <- 1 r <- 0.75 while(delta > 1e-16 && count < 100){ px <- (1-r) * ppi.w %*% pt + r * p0 delta <- sum(abs(px - pt)) count <- count + 1 pt <- px } res.orig <- pt ## Random ## nSeed <- sum(mut.mat[,i]) cl <- makeCluster(opt$num_threads) clusterExport(cl, varlist = c('ppi.w', 'r', 'nSeed'), envir = environment()) res.random <- parSapply(cl, 1:1000, function(k){ require(Matrix) p0 <- Matrix(0, ncol = 1, nrow = nrow(ppi.w)) p0[sample(1:nrow(ppi.w), size = nSeed, replace = FALSE), 1] <- 1/nSeed pt <- Matrix(1/nrow(ppi.w), ncol = 1, nrow = nrow(ppi.w)) delta <- 1 count <- 1 while(delta > 1e-16 && count < 100){ px <- (1-r) * ppi.w %*% pt + r * p0 delta <- sum(abs(px - pt)) count <- count + 1 pt <- px } return(as.vector(pt)) }) stopCluster(cl) ## Significance ## res.random <- log10(res.random) mean.vals <- apply(res.random, 1, function(x){mean(x, na.rm = TRUE)}) sd.vals <- apply(res.random, 1, function(x){sd(x, na.rm = TRUE)}) p.vals <- pnorm(log10(as.vector(res.orig)), mean = mean.vals, sd = sd.vals, lower.tail = FALSE) p.vals <- p.adjust(p.vals, method = 'fdr') return(setNames(p.vals, nm = rownames(ppi.w))) }) colnames(network.prof) <- colnames(mut.mat) out.file <- sprintf("%s/%s_NetworkProfile.rds", getwd(), opt$output_prefix) message("...saving results to file ", out.file) saveRDS(network.prof, file = out.file) quit(save = "no")
/NetworkProfilesWeighted.R
no_license
ardadurmaz/pancancer-fsm
R
false
false
4,543
r
#!/usr/bin/env Rscript suppressMessages(library(Matrix)) suppressMessages(library(parallel)) suppressMessages(library(optparse)) option_list <- list(make_option(c("-m", "--mutation_data"), action = "store", type = "character", help = "Mutation Matrix", default = NULL), make_option(c("-e", '--expression_data'), action = "store", type = "character", help = "Expression Matrix", default = NULL), make_option(c("-n", "--network_data"), action = "store", type = "character", help = "Network Data", default = NULL), make_option(c("-t", "--num_threads"), action = "store", type = "integer", help = "Number of Threads", default = 2), make_option(c("-o", "--output_prefix"), action = "store", type = "character", help = "Output Prefix", default = "NetworkProfile")) opt <- parse_args(OptionParser(option_list = option_list)) if(is.null(opt$mutation_data) || is.null(opt$network_data) || is.null(opt$expression_data)){ warning("!Provide data files") quit(save = "no") } message("...loading data") mut.mat <- readRDS(opt$mutation_data) expr.mat <- readRDS(opt$expression_data) ppi <- readRDS(opt$network_data) message("...processing data") if(length(intersect(rownames(mut.mat), intersect(rownames(expr.mat), rownames(ppi)))) == 0){ warning("!Feature names do not match") quit(save = "no") } ## Filter Samples ## samples.common <- intersect(colnames(mut.mat), colnames(expr.mat)) mut.mat <- mut.mat[,colnames(mut.mat) %in% samples.common] expr.mat <- expr.mat[,colnames(expr.mat) %in% samples.common] ## Filter Features ## idx <- colnames(ppi) %in% na.omit(intersect(rownames(mut.mat), rownames(expr.mat))) ppi <- ppi[idx, idx] idx <- colSums(ppi) == 0 ppi <- ppi[!idx,!idx] mut.mat <- mut.mat[match(rownames(ppi),table=rownames(mut.mat)),] mut.mat <- mut.mat[,colSums(mut.mat) > 8] expr.mat <- expr.mat[match(rownames(ppi),table=rownames(expr.mat)),] expr.mat <- expr.mat[,match(colnames(mut.mat),table=colnames(expr.mat))] expr.mat <- t(scale(t(expr.mat))) gc() network.prof <- sapply(1:ncol(mut.mat), function(i){ cat(sprintf('\rProcessing Sample: %d',i)) ## Weighted Network ## edge.idx <- which(ppi, arr.ind = TRUE, useNames = FALSE) edge.idx <- cbind(edge.idx, apply(edge.idx, 1, function(x){ sqrt(expr.mat[x[1],i]^2 + expr.mat[x[2],i]^2) })) edge.idx[is.na(edge.idx[,3]),3] <- 0.1 ## Small dysregulation ppi.w <- sparseMatrix(i = edge.idx[,1], j = edge.idx[,2], x = edge.idx[,3], dims = c(nrow(ppi), nrow(ppi)), dimnames = list(rownames(ppi), rownames(ppi)), use.last.ij = TRUE) ppi.w <- ppi.w %*% Matrix::Diagonal(x = Matrix::colSums(ppi.w)^-1) ## Original ## p0 <- Matrix(mut.mat[,i], ncol = 1, nrow = nrow(ppi.w)) p0 <- p0 / sum(p0) pt <- Matrix(1/nrow(ppi.w), ncol = 1, nrow = nrow(ppi.w)) delta <- 1 count <- 1 r <- 0.75 while(delta > 1e-16 && count < 100){ px <- (1-r) * ppi.w %*% pt + r * p0 delta <- sum(abs(px - pt)) count <- count + 1 pt <- px } res.orig <- pt ## Random ## nSeed <- sum(mut.mat[,i]) cl <- makeCluster(opt$num_threads) clusterExport(cl, varlist = c('ppi.w', 'r', 'nSeed'), envir = environment()) res.random <- parSapply(cl, 1:1000, function(k){ require(Matrix) p0 <- Matrix(0, ncol = 1, nrow = nrow(ppi.w)) p0[sample(1:nrow(ppi.w), size = nSeed, replace = FALSE), 1] <- 1/nSeed pt <- Matrix(1/nrow(ppi.w), ncol = 1, nrow = nrow(ppi.w)) delta <- 1 count <- 1 while(delta > 1e-16 && count < 100){ px <- (1-r) * ppi.w %*% pt + r * p0 delta <- sum(abs(px - pt)) count <- count + 1 pt <- px } return(as.vector(pt)) }) stopCluster(cl) ## Significance ## res.random <- log10(res.random) mean.vals <- apply(res.random, 1, function(x){mean(x, na.rm = TRUE)}) sd.vals <- apply(res.random, 1, function(x){sd(x, na.rm = TRUE)}) p.vals <- pnorm(log10(as.vector(res.orig)), mean = mean.vals, sd = sd.vals, lower.tail = FALSE) p.vals <- p.adjust(p.vals, method = 'fdr') return(setNames(p.vals, nm = rownames(ppi.w))) }) colnames(network.prof) <- colnames(mut.mat) out.file <- sprintf("%s/%s_NetworkProfile.rds", getwd(), opt$output_prefix) message("...saving results to file ", out.file) saveRDS(network.prof, file = out.file) quit(save = "no")
squirrels <- read.table('http://www.isi-stats.com/isi2/data/Squirrels.txt', header=T) glimpse(squirrels) squirrels <- squirrels %>% mutate(Location = as.factor(Location)) contrasts(squirrels$Location) <- contr.sum squirrels.lm <- lm(Length~Location,data = squirrels) summary(squirrels.lm) anova(squirrels.lm) R2 <- summary(squirrels.lm)$r.squared M<-5000 stats.df<-data.frame(trial=seq(1,M),stat=NA) for(j in 1:M){ squirrels$shuffled.cat<-sample(squirrels$Location) shuff.lm<-lm(Length~shuffled.cat,data=squirrels) stats.df[j,]$stat<-summary(shuff.lm)$r.squared }
/Lesson5 Code From Class.R
no_license
nick3703/MA376
R
false
false
579
r
squirrels <- read.table('http://www.isi-stats.com/isi2/data/Squirrels.txt', header=T) glimpse(squirrels) squirrels <- squirrels %>% mutate(Location = as.factor(Location)) contrasts(squirrels$Location) <- contr.sum squirrels.lm <- lm(Length~Location,data = squirrels) summary(squirrels.lm) anova(squirrels.lm) R2 <- summary(squirrels.lm)$r.squared M<-5000 stats.df<-data.frame(trial=seq(1,M),stat=NA) for(j in 1:M){ squirrels$shuffled.cat<-sample(squirrels$Location) shuff.lm<-lm(Length~shuffled.cat,data=squirrels) stats.df[j,]$stat<-summary(shuff.lm)$r.squared }
rm(list=ls()) setwd("~/Dropbox/enseignement/ENSAI/EvaluationDecisionsPubliques/TP") require(FactoMineR) # Importation des donnees temperature <- read.table("data/temperatures.csv",header = TRUE, sep = ";", dec = ".", row.names = 1) ##Stat descriptives (on verifie l'importation des donnees) summary(temperature) # Graph bivariee avec la region en couleur plot(temperature[,1:12], col=temperature[,17]) # Liste des villes rownames(temperature) # On regarde la matrice de correlation pour savoir si l'ACP est pertinente correlations <- round(cor(temperature[,-17]), 2) # Attention pour visualiser, il vaut mieux inverser les colonnes (plus c'est rouge, plus c'est corrélé) image(correlations[,16:1]) # On met en individu actif les capitales et en variables actives les mesures au cours des 12 mois res <- PCA(temperature, ind.sup = 24:35, quanti.sup = 13:16, quali.sup = 17, graph = FALSE) # Pour retenir le nombre d'axes barplot(res$eig[,1]) # Nuage des individus dans le premier plan factoriel avec la region en couleur plot(res, choix = "ind", habillage = 17) # Resume des sorties de l'ACP summary(res) # Resume pratique des axes en fonction des variables de départ dimdesc(res) # Cercle des correlations (quel bel effet taille!) plot(res, choix = "var") # On regarde ce qui se passe dans le plan 2/3 (mais en faite l'axe 3 n'explique rien car pas d'inertie) plot(res, choix = "ind", axes = c(2,3), habillage = 17) plot(res, choix = "var", axes = c(2,3), habillage = 17) # Ellipses de confiance en supposant que les variables suivent une Gaussienne multivariée conditionnelle à la variable quali plotellipses(res, cex=.8, level = .95)
/enseignements/rcode/mspes/Exercice1.R
permissive
masedki/masedki.github.io
R
false
false
1,644
r
rm(list=ls()) setwd("~/Dropbox/enseignement/ENSAI/EvaluationDecisionsPubliques/TP") require(FactoMineR) # Importation des donnees temperature <- read.table("data/temperatures.csv",header = TRUE, sep = ";", dec = ".", row.names = 1) ##Stat descriptives (on verifie l'importation des donnees) summary(temperature) # Graph bivariee avec la region en couleur plot(temperature[,1:12], col=temperature[,17]) # Liste des villes rownames(temperature) # On regarde la matrice de correlation pour savoir si l'ACP est pertinente correlations <- round(cor(temperature[,-17]), 2) # Attention pour visualiser, il vaut mieux inverser les colonnes (plus c'est rouge, plus c'est corrélé) image(correlations[,16:1]) # On met en individu actif les capitales et en variables actives les mesures au cours des 12 mois res <- PCA(temperature, ind.sup = 24:35, quanti.sup = 13:16, quali.sup = 17, graph = FALSE) # Pour retenir le nombre d'axes barplot(res$eig[,1]) # Nuage des individus dans le premier plan factoriel avec la region en couleur plot(res, choix = "ind", habillage = 17) # Resume des sorties de l'ACP summary(res) # Resume pratique des axes en fonction des variables de départ dimdesc(res) # Cercle des correlations (quel bel effet taille!) plot(res, choix = "var") # On regarde ce qui se passe dans le plan 2/3 (mais en faite l'axe 3 n'explique rien car pas d'inertie) plot(res, choix = "ind", axes = c(2,3), habillage = 17) plot(res, choix = "var", axes = c(2,3), habillage = 17) # Ellipses de confiance en supposant que les variables suivent une Gaussienne multivariée conditionnelle à la variable quali plotellipses(res, cex=.8, level = .95)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/faraway-package.R \docType{data} \name{chicago} \alias{chicago} \title{Chicago insurance redlining} \format{ This dataframe contains the following columns \describe{ \item{race}{ racial composition in percent minority } \item{fire}{ fires per 100 housing units } \item{theft}{ theft per 1000 population } \item{age}{ percent of housing units built before 1939 } \item{involact}{ new FAIR plan policies and renewals per 100 housing units } \item{income}{ median family income in thousands of dollars} \item{side}{ North or South side of Chicago} } } \source{ Adapted from "Data : A Collection of Problems from Many Fields for the Student and Research Worker" by D. Andrews and A. Herzberg published by Springer-Verlag, in 1985 } \description{ Data from a 1970's study on the relationship between insurance redlining in Chicago and racial composition, fire and theft rates, age of housing and income in 47 zip codes. } \keyword{datasets}
/man/chicago.Rd
no_license
cran/faraway
R
false
true
1,016
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/faraway-package.R \docType{data} \name{chicago} \alias{chicago} \title{Chicago insurance redlining} \format{ This dataframe contains the following columns \describe{ \item{race}{ racial composition in percent minority } \item{fire}{ fires per 100 housing units } \item{theft}{ theft per 1000 population } \item{age}{ percent of housing units built before 1939 } \item{involact}{ new FAIR plan policies and renewals per 100 housing units } \item{income}{ median family income in thousands of dollars} \item{side}{ North or South side of Chicago} } } \source{ Adapted from "Data : A Collection of Problems from Many Fields for the Student and Research Worker" by D. Andrews and A. Herzberg published by Springer-Verlag, in 1985 } \description{ Data from a 1970's study on the relationship between insurance redlining in Chicago and racial composition, fire and theft rates, age of housing and income in 47 zip codes. } \keyword{datasets}
crtrate_UCB = rep(NA,itertimes) exp_reg_UCB = rep(NA,itertimes) post_reg_UCB = rep(NA,itertimes) sum_reg_UCB = rep(NA,itertimes) for(ldai in 1:itertimes){ usbcal = function(diff,N,N1,n){ ans = (8*N^2*log(N)/(sqrt(N*(4*sqrt(2*log(N)*(2*log(N)+N*diff^2))+8*log(N)+N*diff^2))-diff*N)^2-N1)/n #ans=0.8 #print((sqrt(2*log(N)/(ans*n+N1))-sqrt(2*log(N)/(N-ans*n-N1))+diff)) return(ans) } correct_rpm = rep(0,num_iter) for(iter in 1:num_iter){ #alpha1_store[iter,1] = control_size*p1[iter] #beta1_store[iter,1] = control_size - alpha1_store[iter,1] alpha1_store[iter,1] = floor(control_size*p1[iter]+0.5) beta1_store[iter,1] = control_size-alpha1_store[iter,1] #alpha1_store[iter,1] = 0 #beta1_store[iter,1] = 0 alpha2_store[iter,1] = 0 beta2_store[iter,1] = 0 idx_1 = 0 idx_2 = 0 stop = 0 for(i in 1:num_time){ if((alpha1_store[iter,i]==0)||(alpha2_store[iter,i]==0)) prob_1=0.5 else prob_1 = usbcal(alpha1_store[iter,i]/(alpha1_store[iter,i]+beta1_store[iter,i])-alpha2_store[iter,i]/(alpha2_store[iter,i]+beta2_store[iter,i]),(alpha1_store[iter,i]+beta1_store[iter,i]+alpha2_store[iter,i]+beta2_store[iter,i]+num_batch),(alpha1_store[iter,i]+beta1_store[iter,i]),num_batch) prob_1 = max(0,min(1,prob_1)) #if (stop==0) #prob_1 = (prob_1-0.5)*(1-(num_time-i)/num_time*0.5)+0.5 n_p1 = rbinom(1,size=num_actual_batch[i], prob=prob_1) n_p2 = num_actual_batch[i] - n_p1 s = 0 if(n_p1>0) s = sum(data1[iter,idx_1:(idx_1+n_p1-1)]) alpha1_store[iter,i+1] = alpha1_store[iter,i] + s beta1_store[iter,i+1] = beta1_store[iter,i] + n_p1 - s idx_1 = idx_1 + n_p1 s = 0 if(n_p2>0) s = sum(data2[iter,idx_2:(idx_2+n_p2-1)]) alpha2_store[iter,i+1] = alpha2_store[iter,i] + s beta2_store[iter,i+1] = beta2_store[iter,i] + n_p2 - s idx_2 = idx_2 + n_p2 if(p1[iter]>=p2[iter]) loss_rpm[iter,i] = n_p2*(p1[iter]-p2[iter]) else loss_rpm[iter,i] = n_p1*(p2[iter]-p1[iter]) } post_loss[iter] = pos_time * num_batch *abs(p1[iter]-p2[iter]) if(((alpha1_store[iter,num_time+1]/beta1_store[iter,num_time+1])>=(alpha2_store[iter,num_time+1]/beta2_store[iter,num_time+1]))==(p1[iter]>=p2[iter])){ correct_rpm[iter] = 1 post_loss[iter] = 0 } cum_rew[iter] = sum(data1[iter,1:idx_1])+sum(data2[iter,1:idx_2]) if(((alpha1_store[iter,num_time+1]/beta1_store[iter,num_time+1])>=(alpha2_store[iter,num_time+1]/beta2_store[iter,num_time+1]))==(p1[iter]>=p2[iter])) post_cum_rew[iter] = rbinom(1,size=pos_time * num_batch, prob=p1[iter]) else post_cum_rew[iter] = rbinom(1,size=pos_time * num_batch, prob=p2[iter]) } crtrate_UCB[ldai]=(sum(correct_rpm)) sum_reg_UCB[ldai]=(mean(rowSums(loss_rpm)+post_loss)) exp_reg_UCB[ldai]=(mean(rowSums(loss_rpm))) post_reg_UCB[ldai]=(mean(post_loss)) } print(mean(crtrate_UCB)) print(mean(sum_reg_UCB))
/Experiment/UCB.R
no_license
liangdai/online-experiment
R
false
false
2,766
r
crtrate_UCB = rep(NA,itertimes) exp_reg_UCB = rep(NA,itertimes) post_reg_UCB = rep(NA,itertimes) sum_reg_UCB = rep(NA,itertimes) for(ldai in 1:itertimes){ usbcal = function(diff,N,N1,n){ ans = (8*N^2*log(N)/(sqrt(N*(4*sqrt(2*log(N)*(2*log(N)+N*diff^2))+8*log(N)+N*diff^2))-diff*N)^2-N1)/n #ans=0.8 #print((sqrt(2*log(N)/(ans*n+N1))-sqrt(2*log(N)/(N-ans*n-N1))+diff)) return(ans) } correct_rpm = rep(0,num_iter) for(iter in 1:num_iter){ #alpha1_store[iter,1] = control_size*p1[iter] #beta1_store[iter,1] = control_size - alpha1_store[iter,1] alpha1_store[iter,1] = floor(control_size*p1[iter]+0.5) beta1_store[iter,1] = control_size-alpha1_store[iter,1] #alpha1_store[iter,1] = 0 #beta1_store[iter,1] = 0 alpha2_store[iter,1] = 0 beta2_store[iter,1] = 0 idx_1 = 0 idx_2 = 0 stop = 0 for(i in 1:num_time){ if((alpha1_store[iter,i]==0)||(alpha2_store[iter,i]==0)) prob_1=0.5 else prob_1 = usbcal(alpha1_store[iter,i]/(alpha1_store[iter,i]+beta1_store[iter,i])-alpha2_store[iter,i]/(alpha2_store[iter,i]+beta2_store[iter,i]),(alpha1_store[iter,i]+beta1_store[iter,i]+alpha2_store[iter,i]+beta2_store[iter,i]+num_batch),(alpha1_store[iter,i]+beta1_store[iter,i]),num_batch) prob_1 = max(0,min(1,prob_1)) #if (stop==0) #prob_1 = (prob_1-0.5)*(1-(num_time-i)/num_time*0.5)+0.5 n_p1 = rbinom(1,size=num_actual_batch[i], prob=prob_1) n_p2 = num_actual_batch[i] - n_p1 s = 0 if(n_p1>0) s = sum(data1[iter,idx_1:(idx_1+n_p1-1)]) alpha1_store[iter,i+1] = alpha1_store[iter,i] + s beta1_store[iter,i+1] = beta1_store[iter,i] + n_p1 - s idx_1 = idx_1 + n_p1 s = 0 if(n_p2>0) s = sum(data2[iter,idx_2:(idx_2+n_p2-1)]) alpha2_store[iter,i+1] = alpha2_store[iter,i] + s beta2_store[iter,i+1] = beta2_store[iter,i] + n_p2 - s idx_2 = idx_2 + n_p2 if(p1[iter]>=p2[iter]) loss_rpm[iter,i] = n_p2*(p1[iter]-p2[iter]) else loss_rpm[iter,i] = n_p1*(p2[iter]-p1[iter]) } post_loss[iter] = pos_time * num_batch *abs(p1[iter]-p2[iter]) if(((alpha1_store[iter,num_time+1]/beta1_store[iter,num_time+1])>=(alpha2_store[iter,num_time+1]/beta2_store[iter,num_time+1]))==(p1[iter]>=p2[iter])){ correct_rpm[iter] = 1 post_loss[iter] = 0 } cum_rew[iter] = sum(data1[iter,1:idx_1])+sum(data2[iter,1:idx_2]) if(((alpha1_store[iter,num_time+1]/beta1_store[iter,num_time+1])>=(alpha2_store[iter,num_time+1]/beta2_store[iter,num_time+1]))==(p1[iter]>=p2[iter])) post_cum_rew[iter] = rbinom(1,size=pos_time * num_batch, prob=p1[iter]) else post_cum_rew[iter] = rbinom(1,size=pos_time * num_batch, prob=p2[iter]) } crtrate_UCB[ldai]=(sum(correct_rpm)) sum_reg_UCB[ldai]=(mean(rowSums(loss_rpm)+post_loss)) exp_reg_UCB[ldai]=(mean(rowSums(loss_rpm))) post_reg_UCB[ldai]=(mean(post_loss)) } print(mean(crtrate_UCB)) print(mean(sum_reg_UCB))
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/EPFR.r \name{sql.median} \alias{sql.median} \title{sql.median} \usage{ sql.median(x, y, n, w = 0.5) } \arguments{ \item{x}{= column on which computation is run} \item{y}{= column on which partitioning is performed} \item{n}{= SQL statement} \item{w}{= desired ptile break point} } \description{ median (or alternate ptile point) of <x> within <y> } \seealso{ Other sql: \code{\link{sql.1dActWtTrend.Alloc}}, \code{\link{sql.1dActWtTrend.Final}}, \code{\link{sql.1dActWtTrend.Flow}}, \code{\link{sql.1dActWtTrend.select}}, \code{\link{sql.1dActWtTrend.topline.from}}, \code{\link{sql.1dActWtTrend.topline}}, \code{\link{sql.1dActWtTrend.underlying.basic}}, \code{\link{sql.1dActWtTrend.underlying}}, \code{\link{sql.1dActWtTrend}}, \code{\link{sql.1dFloMo.CountryId.List}}, \code{\link{sql.1dFloMo.FI}}, \code{\link{sql.1dFloMo.Rgn}}, \code{\link{sql.1dFloMo.Sec.topline}}, \code{\link{sql.1dFloMo.filter}}, \code{\link{sql.1dFloMo.grp}}, \code{\link{sql.1dFloMo.select.wrapper}}, \code{\link{sql.1dFloMo.select}}, \code{\link{sql.1dFloMo.underlying}}, \code{\link{sql.1dFloMoAggr}}, \code{\link{sql.1dFloMo}}, \code{\link{sql.1dFloTrend.Alloc.data}}, \code{\link{sql.1dFloTrend.Alloc.fetch}}, \code{\link{sql.1dFloTrend.Alloc.final}}, \code{\link{sql.1dFloTrend.Alloc.from}}, \code{\link{sql.1dFloTrend.Alloc.purge}}, \code{\link{sql.1dFloTrend.Alloc}}, \code{\link{sql.1dFloTrend.select}}, \code{\link{sql.1dFloTrend.underlying}}, \code{\link{sql.1dFloTrend}}, \code{\link{sql.1dFundCt}}, \code{\link{sql.1dFundRet}}, \code{\link{sql.1dION}}, \code{\link{sql.1mActWt.underlying}}, \code{\link{sql.1mActWtIncrPct}}, \code{\link{sql.1mActWtTrend.underlying}}, \code{\link{sql.1mActWtTrend}}, \code{\link{sql.1mActWt}}, \code{\link{sql.1mAllocD.from}}, \code{\link{sql.1mAllocD.select}}, \code{\link{sql.1mAllocD.topline.from}}, \code{\link{sql.1mAllocD}}, \code{\link{sql.1mAllocMo.select}}, \code{\link{sql.1mAllocMo.underlying.from}}, \code{\link{sql.1mAllocMo.underlying.pre}}, \code{\link{sql.1mAllocMo}}, \code{\link{sql.1mAllocSkew.topline.from}}, \code{\link{sql.1mAllocSkew}}, \code{\link{sql.1mBullish.Alloc}}, \code{\link{sql.1mBullish.Final}}, \code{\link{sql.1mChActWt}}, \code{\link{sql.1mFloMo}}, \code{\link{sql.1mFloTrend.underlying}}, \code{\link{sql.1mFloTrend}}, \code{\link{sql.1mFundCt}}, \code{\link{sql.1mHoldAum}}, \code{\link{sql.1mSRIAdvisorPct}}, \code{\link{sql.1wFlow.Corp}}, \code{\link{sql.ActWtDiff2}}, \code{\link{sql.Allocation.Sec.FinsExREst}}, \code{\link{sql.Allocation.Sec}}, \code{\link{sql.Allocations.bulk.EqWtAvg}}, \code{\link{sql.Allocations.bulk.Single}}, \code{\link{sql.Allocation}}, \code{\link{sql.BenchIndex.duplication}}, \code{\link{sql.Bullish}}, \code{\link{sql.DailyFlo}}, \code{\link{sql.Diff}}, \code{\link{sql.Dispersion}}, \code{\link{sql.FloMo.Funds}}, \code{\link{sql.Flow}}, \code{\link{sql.Foreign}}, \code{\link{sql.FundHistory.macro}}, \code{\link{sql.FundHistory.sf}}, \code{\link{sql.FundHistory}}, \code{\link{sql.HSIdmap}}, \code{\link{sql.HerdingLSV}}, \code{\link{sql.Holdings.bulk.wrapper}}, \code{\link{sql.Holdings.bulk}}, \code{\link{sql.Holdings}}, \code{\link{sql.ION}}, \code{\link{sql.MonthlyAlloc}}, \code{\link{sql.MonthlyAssetsEnd}}, \code{\link{sql.Mo}}, \code{\link{sql.Overweight}}, \code{\link{sql.RDSuniv}}, \code{\link{sql.ReportDate}}, \code{\link{sql.SRI}}, \code{\link{sql.ShareClass}}, \code{\link{sql.TopDownAllocs.items}}, \code{\link{sql.TopDownAllocs.underlying}}, \code{\link{sql.TopDownAllocs}}, \code{\link{sql.Trend}}, \code{\link{sql.and}}, \code{\link{sql.arguments}}, \code{\link{sql.bcp}}, \code{\link{sql.breakdown}}, \code{\link{sql.case}}, \code{\link{sql.close}}, \code{\link{sql.connect.wrapper}}, \code{\link{sql.connect}}, \code{\link{sql.cross.border}}, \code{\link{sql.datediff}}, \code{\link{sql.declare}}, \code{\link{sql.delete}}, \code{\link{sql.drop}}, \code{\link{sql.exists}}, \code{\link{sql.extra.domicile}}, \code{\link{sql.index}}, \code{\link{sql.into}}, \code{\link{sql.in}}, \code{\link{sql.isin.old.to.new}}, \code{\link{sql.label}}, \code{\link{sql.map.classif}}, \code{\link{sql.mat.cofactor}}, \code{\link{sql.mat.crossprod.vector}}, \code{\link{sql.mat.crossprod}}, \code{\link{sql.mat.determinant}}, \code{\link{sql.mat.flip}}, \code{\link{sql.mat.multiply}}, \code{\link{sql.nonneg}}, \code{\link{sql.query.underlying}}, \code{\link{sql.query}}, \code{\link{sql.regr}}, \code{\link{sql.tbl}}, \code{\link{sql.ui}}, \code{\link{sql.unbracket}}, \code{\link{sql.update}}, \code{\link{sql.yield.curve.1dFloMo}}, \code{\link{sql.yield.curve}}, \code{\link{sql.yyyymmdd}}, \code{\link{sql.yyyymm}} } \keyword{sql.median}
/man/sql.median.Rd
no_license
vsrimurthy/EPFR
R
false
true
4,936
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/EPFR.r \name{sql.median} \alias{sql.median} \title{sql.median} \usage{ sql.median(x, y, n, w = 0.5) } \arguments{ \item{x}{= column on which computation is run} \item{y}{= column on which partitioning is performed} \item{n}{= SQL statement} \item{w}{= desired ptile break point} } \description{ median (or alternate ptile point) of <x> within <y> } \seealso{ Other sql: \code{\link{sql.1dActWtTrend.Alloc}}, \code{\link{sql.1dActWtTrend.Final}}, \code{\link{sql.1dActWtTrend.Flow}}, \code{\link{sql.1dActWtTrend.select}}, \code{\link{sql.1dActWtTrend.topline.from}}, \code{\link{sql.1dActWtTrend.topline}}, \code{\link{sql.1dActWtTrend.underlying.basic}}, \code{\link{sql.1dActWtTrend.underlying}}, \code{\link{sql.1dActWtTrend}}, \code{\link{sql.1dFloMo.CountryId.List}}, \code{\link{sql.1dFloMo.FI}}, \code{\link{sql.1dFloMo.Rgn}}, \code{\link{sql.1dFloMo.Sec.topline}}, \code{\link{sql.1dFloMo.filter}}, \code{\link{sql.1dFloMo.grp}}, \code{\link{sql.1dFloMo.select.wrapper}}, \code{\link{sql.1dFloMo.select}}, \code{\link{sql.1dFloMo.underlying}}, \code{\link{sql.1dFloMoAggr}}, \code{\link{sql.1dFloMo}}, \code{\link{sql.1dFloTrend.Alloc.data}}, \code{\link{sql.1dFloTrend.Alloc.fetch}}, \code{\link{sql.1dFloTrend.Alloc.final}}, \code{\link{sql.1dFloTrend.Alloc.from}}, \code{\link{sql.1dFloTrend.Alloc.purge}}, \code{\link{sql.1dFloTrend.Alloc}}, \code{\link{sql.1dFloTrend.select}}, \code{\link{sql.1dFloTrend.underlying}}, \code{\link{sql.1dFloTrend}}, \code{\link{sql.1dFundCt}}, \code{\link{sql.1dFundRet}}, \code{\link{sql.1dION}}, \code{\link{sql.1mActWt.underlying}}, \code{\link{sql.1mActWtIncrPct}}, \code{\link{sql.1mActWtTrend.underlying}}, \code{\link{sql.1mActWtTrend}}, \code{\link{sql.1mActWt}}, \code{\link{sql.1mAllocD.from}}, \code{\link{sql.1mAllocD.select}}, \code{\link{sql.1mAllocD.topline.from}}, \code{\link{sql.1mAllocD}}, \code{\link{sql.1mAllocMo.select}}, \code{\link{sql.1mAllocMo.underlying.from}}, \code{\link{sql.1mAllocMo.underlying.pre}}, \code{\link{sql.1mAllocMo}}, \code{\link{sql.1mAllocSkew.topline.from}}, \code{\link{sql.1mAllocSkew}}, \code{\link{sql.1mBullish.Alloc}}, \code{\link{sql.1mBullish.Final}}, \code{\link{sql.1mChActWt}}, \code{\link{sql.1mFloMo}}, \code{\link{sql.1mFloTrend.underlying}}, \code{\link{sql.1mFloTrend}}, \code{\link{sql.1mFundCt}}, \code{\link{sql.1mHoldAum}}, \code{\link{sql.1mSRIAdvisorPct}}, \code{\link{sql.1wFlow.Corp}}, \code{\link{sql.ActWtDiff2}}, \code{\link{sql.Allocation.Sec.FinsExREst}}, \code{\link{sql.Allocation.Sec}}, \code{\link{sql.Allocations.bulk.EqWtAvg}}, \code{\link{sql.Allocations.bulk.Single}}, \code{\link{sql.Allocation}}, \code{\link{sql.BenchIndex.duplication}}, \code{\link{sql.Bullish}}, \code{\link{sql.DailyFlo}}, \code{\link{sql.Diff}}, \code{\link{sql.Dispersion}}, \code{\link{sql.FloMo.Funds}}, \code{\link{sql.Flow}}, \code{\link{sql.Foreign}}, \code{\link{sql.FundHistory.macro}}, \code{\link{sql.FundHistory.sf}}, \code{\link{sql.FundHistory}}, \code{\link{sql.HSIdmap}}, \code{\link{sql.HerdingLSV}}, \code{\link{sql.Holdings.bulk.wrapper}}, \code{\link{sql.Holdings.bulk}}, \code{\link{sql.Holdings}}, \code{\link{sql.ION}}, \code{\link{sql.MonthlyAlloc}}, \code{\link{sql.MonthlyAssetsEnd}}, \code{\link{sql.Mo}}, \code{\link{sql.Overweight}}, \code{\link{sql.RDSuniv}}, \code{\link{sql.ReportDate}}, \code{\link{sql.SRI}}, \code{\link{sql.ShareClass}}, \code{\link{sql.TopDownAllocs.items}}, \code{\link{sql.TopDownAllocs.underlying}}, \code{\link{sql.TopDownAllocs}}, \code{\link{sql.Trend}}, \code{\link{sql.and}}, \code{\link{sql.arguments}}, \code{\link{sql.bcp}}, \code{\link{sql.breakdown}}, \code{\link{sql.case}}, \code{\link{sql.close}}, \code{\link{sql.connect.wrapper}}, \code{\link{sql.connect}}, \code{\link{sql.cross.border}}, \code{\link{sql.datediff}}, \code{\link{sql.declare}}, \code{\link{sql.delete}}, \code{\link{sql.drop}}, \code{\link{sql.exists}}, \code{\link{sql.extra.domicile}}, \code{\link{sql.index}}, \code{\link{sql.into}}, \code{\link{sql.in}}, \code{\link{sql.isin.old.to.new}}, \code{\link{sql.label}}, \code{\link{sql.map.classif}}, \code{\link{sql.mat.cofactor}}, \code{\link{sql.mat.crossprod.vector}}, \code{\link{sql.mat.crossprod}}, \code{\link{sql.mat.determinant}}, \code{\link{sql.mat.flip}}, \code{\link{sql.mat.multiply}}, \code{\link{sql.nonneg}}, \code{\link{sql.query.underlying}}, \code{\link{sql.query}}, \code{\link{sql.regr}}, \code{\link{sql.tbl}}, \code{\link{sql.ui}}, \code{\link{sql.unbracket}}, \code{\link{sql.update}}, \code{\link{sql.yield.curve.1dFloMo}}, \code{\link{sql.yield.curve}}, \code{\link{sql.yyyymmdd}}, \code{\link{sql.yyyymm}} } \keyword{sql.median}
library(raster) # Package to handle raster-formatted spatial data library(rasterVis) # The rasterVis package complements the raster package, providing a set of methods for enhanced visualization and interaction # Defines visualisation methods with 'levelplot' library(dismo) # Dismo has the SDM analyses for maxent and support vector machines used by R library(rgeos) # To define circles with a radius around the subsampled points # geos is a geometry engine, need to install package to access these capabilities (such as defining circumfrances) library(rJava) library(rgdal) # Provides access to projection/transformation operations from a different library # Coordinate referancing system** library(sp) # Coordinate referancing system library(ncdf4) # Opens access to read and write on netCDF files library(kernlab) # Required for support vector machines # installed and running BUT UNSURE of function library(grDevices) # For colouring maps library(colorRamps) #Allows easy construction of color palettes #Loading data for project now #Ensure WD is in correct place WILL BE IN NEW PLACE FOR EACH SPECIES setwd("~/Documents/UoY/Dissertation/Pout") locs = read.csv("Pout_Severn_UTM.csv", header=T, sep = ",") #loading severn files #had to add the file location for R to access the severn files, is this right? dry_always<-raster("Severn_unaltered Pout/always_dry_masked.tif") tidal_range<-raster("Severn_unaltered Pout/tidal_range_masked.tif") subtidal<-raster("Severn_unaltered Pout/subtidal_masked.tif") min_elev<-raster("Severn_unaltered Pout/min_elev_masked.tif") max_velocity<-raster("Severn_unaltered Pout/max_vel_masked.tif") max_elev<-raster("Severn_unaltered Pout/max_elev_masked.tif") mask_2<-raster("Severn_unaltered Pout/mask2.tif") intertidal<-raster("Severn_unaltered Pout/intertidal_masked.tif") depth<-raster("Severn_unaltered Pout/bathy_masked.tif") avg_velocity<-raster("Severn_unaltered Pout/av_vel_masked.tif") #ALL raster data is uploaded here mask<-depth #DO NOT HAVE 'distance_to_coast' comparison in our data set as in MaxEnt Code #DO NOT HAVE 'lat and lon' tifs as in MaxEnt Code # Extract depth values to table of species co-ordinates locs_ext=extract(depth, locs[,c("X","Y")]) #this has created a VALUE of depth for each single point as dictated by x&y coordinates from species data #now each species seen has a depth based on its coordinates in the depth raster file we are given!! # Build a data frame of species occurrence data and depth data locs = data.frame(locs, locs_ext) # added locs_ext to the final column in locs file so now coordinates for species can be coupled with their depth in teh same file # Remove points with NA values for depth, i.e. on land locs = subset(locs, !is.na(locs_ext)) e = extent(depth) #subset extracted all values and rows with 'na' from the locs_ext column # WHAT DOES EXTENT DO?! # without using the 'mask' technique above will this still remove all 'land' data above? #what is "e"?? - is it simply giving the 'extent' of the data set in a min and max of x and y? # Create sequences of X and Y values to define a grid # this a 1x1 km grid xgrid = seq(e@xmin,e@xmax,1000) ygrid = seq(e@ymin,e@ymax,1000) #"seq()" works by 'from', 'to', 'by incremental step' #generated a sequence from xmin value to xmax value in "e" that increase by 1000 # Identify occurrence points within each grid cell, then draw one at random subs = c() for(i in 1:(length(xgrid)-1)) { for(j in 1:(length(ygrid)-1)) { gridsq = subset(locs, Y > ygrid[j] & Y < ygrid[j+1] & X > xgrid[i] & X < xgrid[i+1]) if(dim(gridsq)[1]>0) { subs = rbind(subs, gridsq[sample(1:dim(gridsq)[1],1 ), ]) } } } dim(locs);dim(subs) # Confirm that you have a smaller dataset than you started with (1st number) #for is an argument that will loop a desired action on a given value in a vector #length will get value the legth of vectors and factors in a defined object ##this a loop going through x values (every 1000m) and at each new x square, looping through all the y's related to that x (and so on for all the x values) #gridsq is a complex way of saying the square is greater than the start of one x/y value and less than the next one after it #rbind & cbind combine/create a matrix by rows (rbind) or columns (cbind) of the two seperate vector sets # Assign correct co-ordinate reference system to subset coordinates <- cbind(subs$X, subs$Y) subs_df <- SpatialPointsDataFrame(coordinates, subs, proj4string=CRS("+proj=utm +zone=30 ellps=WGS84")) #cbind of subs$X and subs$Y created a new data set/matrix called coordinates that only has coordinate data in it! # we create 20,000 random "background points". There are other ways to do this, but start with this. #NOTE psa <- randomPoints(mask, 20000, ext=e) #need to make sure all is up-to-date: previous error due to 'dismo' not being updated # Stack raster layers into one variable #NOTE WITHOUT INTERTIDAL LAYER env_uk<-stack(depth,max_elev,avg_velocity,subtidal,tidal_range) # Pull environmental data for the sumbsampled-presence points from the raster stack presence_uk= extract(env_uk, subs_df[,c("X","Y")]) #Warning messages: transforming SpatialPoints to the CRS of the Raster? # Pull environmental data for the pseudo-absence points from the raster stack pseudo_uk = extract(env_uk, psa) # Build some useful dataframes, with two columns of coordinates followed by the environmental variables. For the presence points: presence_uk = data.frame(X=subs_df$X, Y=subs_df$Y, presence_uk) #HOW IS THIS DIFFERENT TO ABOVE FUCNTION WITH "EXTRACT"? # Convert psa from atomic vector matrix to data.frame psapoints=data.frame(psa) # Bind co-ordinates coordinates <- cbind(psapoints$x, psapoints$y) # Create spatial data frame of pseudo absences psadf <- SpatialPointsDataFrame(coordinates, psapoints, proj4string=CRS("+proj=utm +zone=30 ellps=WGS84")) # Build dataframe, with two columns of coordinates followed by the 5 environmental variables. For the pseudo-absences: psadfx = psadf@coords colnames(psadfx) = c("X","Y") pseudo_uk = data.frame(cbind(psadfx,pseudo_uk)) # Vector of group assignments splitting the subsampled presence points data fram with environmental data into 5 groups group_p = kfold(presence_uk, 5) #kfold partitions a data set k times (in this case 5 times) for model testing purposes # Repeat above step for pseudo-absence points group_a = kfold(pseudo_uk, 5) # create output required for the loop evaluations = list(5) models = list(5) # where it says maxent - you may need to swap this for other functions if you're exploring different models # Note that some model may need different inputs etc. Read the docs to figure this out. # This is our k-fold test. You will want to spend a bit of time making predictions on each of the 5 sub-models # created here to check you can make decent predictions even with missing data for (test in 1:5) { # Then we use test and the kfold groupings to divide the presence and absence points: train_p = presence_uk[group_p!=test, c("X","Y")] train_a = pseudo_uk[group_a!=test, c("X","Y")] test_p = presence_uk[group_p==test, c("X","Y")] test_a = pseudo_uk[group_a==test, c("X","Y")] # Now, estimate a maxent model using the "training" points and the environmental data. This may take a few moments to run: models[test] = maxent(env_uk, p=train_p, a=train_a) # To validate the model, we use the appropriately named function. # Produces warning message about implicit list embedding being depreciated. May fail in future versions of R evaluations[test] = evaluate(test_p, test_a, models[[test]], env_uk) } # print out the AUC for the k-fold tests # ideally should be > 0.75 for all cat("K-FOLD AUC: ") for (test in 1:5) { cat(paste0(evaluations[[test]]@auc,",")) } #IF ONE WANTED to visualise the 1st model NEED TO FINISH WITH THE OTHER 4 KFOLDS ## pred <- predict(models[[1]], env_uk) ## plot(pred) #-could visualise all k-fold models to show they are the same (strong statistical result) #BUT DONT NEED TO # Assess Spatial Sorting Bias (SSB) pres_train_me <- train_p pres_test_me <- test_p back_train_me <- train_a back_test_me <- test_a sb <- ssb(pres_test_me, back_test_me, pres_train_me) sb[,1] / sb[,2] #creates a model of spacial biasing to compare to given preditions # Adjust for SSB if present via distance based point-wise sampling i <- pwdSample(pres_test_me, back_test_me, pres_train_me, n=1, tr=0.1) pres_test_pwd_me <- pres_test_me[!is.na(i[,1]), ] back_test_pwd_me <- back_test_me[na.omit(as.vector(i)), ] sb2 <- ssb(pres_test_pwd_me, back_test_pwd_me, pres_train_me) sb2[1]/ sb2[2] #creates full model without any K fold statistics etc pres_points = presence_uk[c("X","Y")] abs_points = pseudo_uk[c("X","Y")] # create full maxent with all points model <- maxent(env_uk, p=pres_points, a=abs_points) #turn model into prediction that can be plotted into a raster pred_PredFull <- predict(model, env_uk) #to see model and obtain jpeg plot(pred_PredFull) #Gives AUC for full model (pred_PredFull) evaluate_full <- evaluate(presence_uk[c("X","Y")], pseudo_uk[c("X","Y")], model, env_uk) #see what AUC is by typing it in evaluate_full #see what the specific sensitivity values is of species #use value givenas a base level or higher that one would expect to see species (compare to evaluate_full) message(threshold(evaluate_full)$spec_sens) #check response curves to see if they change the FULL MODEL response(model) #creates a raster file in the WD #will be useful when putting a file into qgis! writeRaster(pred_PredFull, filename="pred5_me.tif", options="INTERLEAVE=BAND", overwrite=TRUE)
/5th Code Pout -Inter-Max_vel-Min_fs-ADry.R
no_license
laxmack21/Severn-Estuary-SDMS
R
false
false
9,623
r
library(raster) # Package to handle raster-formatted spatial data library(rasterVis) # The rasterVis package complements the raster package, providing a set of methods for enhanced visualization and interaction # Defines visualisation methods with 'levelplot' library(dismo) # Dismo has the SDM analyses for maxent and support vector machines used by R library(rgeos) # To define circles with a radius around the subsampled points # geos is a geometry engine, need to install package to access these capabilities (such as defining circumfrances) library(rJava) library(rgdal) # Provides access to projection/transformation operations from a different library # Coordinate referancing system** library(sp) # Coordinate referancing system library(ncdf4) # Opens access to read and write on netCDF files library(kernlab) # Required for support vector machines # installed and running BUT UNSURE of function library(grDevices) # For colouring maps library(colorRamps) #Allows easy construction of color palettes #Loading data for project now #Ensure WD is in correct place WILL BE IN NEW PLACE FOR EACH SPECIES setwd("~/Documents/UoY/Dissertation/Pout") locs = read.csv("Pout_Severn_UTM.csv", header=T, sep = ",") #loading severn files #had to add the file location for R to access the severn files, is this right? dry_always<-raster("Severn_unaltered Pout/always_dry_masked.tif") tidal_range<-raster("Severn_unaltered Pout/tidal_range_masked.tif") subtidal<-raster("Severn_unaltered Pout/subtidal_masked.tif") min_elev<-raster("Severn_unaltered Pout/min_elev_masked.tif") max_velocity<-raster("Severn_unaltered Pout/max_vel_masked.tif") max_elev<-raster("Severn_unaltered Pout/max_elev_masked.tif") mask_2<-raster("Severn_unaltered Pout/mask2.tif") intertidal<-raster("Severn_unaltered Pout/intertidal_masked.tif") depth<-raster("Severn_unaltered Pout/bathy_masked.tif") avg_velocity<-raster("Severn_unaltered Pout/av_vel_masked.tif") #ALL raster data is uploaded here mask<-depth #DO NOT HAVE 'distance_to_coast' comparison in our data set as in MaxEnt Code #DO NOT HAVE 'lat and lon' tifs as in MaxEnt Code # Extract depth values to table of species co-ordinates locs_ext=extract(depth, locs[,c("X","Y")]) #this has created a VALUE of depth for each single point as dictated by x&y coordinates from species data #now each species seen has a depth based on its coordinates in the depth raster file we are given!! # Build a data frame of species occurrence data and depth data locs = data.frame(locs, locs_ext) # added locs_ext to the final column in locs file so now coordinates for species can be coupled with their depth in teh same file # Remove points with NA values for depth, i.e. on land locs = subset(locs, !is.na(locs_ext)) e = extent(depth) #subset extracted all values and rows with 'na' from the locs_ext column # WHAT DOES EXTENT DO?! # without using the 'mask' technique above will this still remove all 'land' data above? #what is "e"?? - is it simply giving the 'extent' of the data set in a min and max of x and y? # Create sequences of X and Y values to define a grid # this a 1x1 km grid xgrid = seq(e@xmin,e@xmax,1000) ygrid = seq(e@ymin,e@ymax,1000) #"seq()" works by 'from', 'to', 'by incremental step' #generated a sequence from xmin value to xmax value in "e" that increase by 1000 # Identify occurrence points within each grid cell, then draw one at random subs = c() for(i in 1:(length(xgrid)-1)) { for(j in 1:(length(ygrid)-1)) { gridsq = subset(locs, Y > ygrid[j] & Y < ygrid[j+1] & X > xgrid[i] & X < xgrid[i+1]) if(dim(gridsq)[1]>0) { subs = rbind(subs, gridsq[sample(1:dim(gridsq)[1],1 ), ]) } } } dim(locs);dim(subs) # Confirm that you have a smaller dataset than you started with (1st number) #for is an argument that will loop a desired action on a given value in a vector #length will get value the legth of vectors and factors in a defined object ##this a loop going through x values (every 1000m) and at each new x square, looping through all the y's related to that x (and so on for all the x values) #gridsq is a complex way of saying the square is greater than the start of one x/y value and less than the next one after it #rbind & cbind combine/create a matrix by rows (rbind) or columns (cbind) of the two seperate vector sets # Assign correct co-ordinate reference system to subset coordinates <- cbind(subs$X, subs$Y) subs_df <- SpatialPointsDataFrame(coordinates, subs, proj4string=CRS("+proj=utm +zone=30 ellps=WGS84")) #cbind of subs$X and subs$Y created a new data set/matrix called coordinates that only has coordinate data in it! # we create 20,000 random "background points". There are other ways to do this, but start with this. #NOTE psa <- randomPoints(mask, 20000, ext=e) #need to make sure all is up-to-date: previous error due to 'dismo' not being updated # Stack raster layers into one variable #NOTE WITHOUT INTERTIDAL LAYER env_uk<-stack(depth,max_elev,avg_velocity,subtidal,tidal_range) # Pull environmental data for the sumbsampled-presence points from the raster stack presence_uk= extract(env_uk, subs_df[,c("X","Y")]) #Warning messages: transforming SpatialPoints to the CRS of the Raster? # Pull environmental data for the pseudo-absence points from the raster stack pseudo_uk = extract(env_uk, psa) # Build some useful dataframes, with two columns of coordinates followed by the environmental variables. For the presence points: presence_uk = data.frame(X=subs_df$X, Y=subs_df$Y, presence_uk) #HOW IS THIS DIFFERENT TO ABOVE FUCNTION WITH "EXTRACT"? # Convert psa from atomic vector matrix to data.frame psapoints=data.frame(psa) # Bind co-ordinates coordinates <- cbind(psapoints$x, psapoints$y) # Create spatial data frame of pseudo absences psadf <- SpatialPointsDataFrame(coordinates, psapoints, proj4string=CRS("+proj=utm +zone=30 ellps=WGS84")) # Build dataframe, with two columns of coordinates followed by the 5 environmental variables. For the pseudo-absences: psadfx = psadf@coords colnames(psadfx) = c("X","Y") pseudo_uk = data.frame(cbind(psadfx,pseudo_uk)) # Vector of group assignments splitting the subsampled presence points data fram with environmental data into 5 groups group_p = kfold(presence_uk, 5) #kfold partitions a data set k times (in this case 5 times) for model testing purposes # Repeat above step for pseudo-absence points group_a = kfold(pseudo_uk, 5) # create output required for the loop evaluations = list(5) models = list(5) # where it says maxent - you may need to swap this for other functions if you're exploring different models # Note that some model may need different inputs etc. Read the docs to figure this out. # This is our k-fold test. You will want to spend a bit of time making predictions on each of the 5 sub-models # created here to check you can make decent predictions even with missing data for (test in 1:5) { # Then we use test and the kfold groupings to divide the presence and absence points: train_p = presence_uk[group_p!=test, c("X","Y")] train_a = pseudo_uk[group_a!=test, c("X","Y")] test_p = presence_uk[group_p==test, c("X","Y")] test_a = pseudo_uk[group_a==test, c("X","Y")] # Now, estimate a maxent model using the "training" points and the environmental data. This may take a few moments to run: models[test] = maxent(env_uk, p=train_p, a=train_a) # To validate the model, we use the appropriately named function. # Produces warning message about implicit list embedding being depreciated. May fail in future versions of R evaluations[test] = evaluate(test_p, test_a, models[[test]], env_uk) } # print out the AUC for the k-fold tests # ideally should be > 0.75 for all cat("K-FOLD AUC: ") for (test in 1:5) { cat(paste0(evaluations[[test]]@auc,",")) } #IF ONE WANTED to visualise the 1st model NEED TO FINISH WITH THE OTHER 4 KFOLDS ## pred <- predict(models[[1]], env_uk) ## plot(pred) #-could visualise all k-fold models to show they are the same (strong statistical result) #BUT DONT NEED TO # Assess Spatial Sorting Bias (SSB) pres_train_me <- train_p pres_test_me <- test_p back_train_me <- train_a back_test_me <- test_a sb <- ssb(pres_test_me, back_test_me, pres_train_me) sb[,1] / sb[,2] #creates a model of spacial biasing to compare to given preditions # Adjust for SSB if present via distance based point-wise sampling i <- pwdSample(pres_test_me, back_test_me, pres_train_me, n=1, tr=0.1) pres_test_pwd_me <- pres_test_me[!is.na(i[,1]), ] back_test_pwd_me <- back_test_me[na.omit(as.vector(i)), ] sb2 <- ssb(pres_test_pwd_me, back_test_pwd_me, pres_train_me) sb2[1]/ sb2[2] #creates full model without any K fold statistics etc pres_points = presence_uk[c("X","Y")] abs_points = pseudo_uk[c("X","Y")] # create full maxent with all points model <- maxent(env_uk, p=pres_points, a=abs_points) #turn model into prediction that can be plotted into a raster pred_PredFull <- predict(model, env_uk) #to see model and obtain jpeg plot(pred_PredFull) #Gives AUC for full model (pred_PredFull) evaluate_full <- evaluate(presence_uk[c("X","Y")], pseudo_uk[c("X","Y")], model, env_uk) #see what AUC is by typing it in evaluate_full #see what the specific sensitivity values is of species #use value givenas a base level or higher that one would expect to see species (compare to evaluate_full) message(threshold(evaluate_full)$spec_sens) #check response curves to see if they change the FULL MODEL response(model) #creates a raster file in the WD #will be useful when putting a file into qgis! writeRaster(pred_PredFull, filename="pred5_me.tif", options="INTERLEAVE=BAND", overwrite=TRUE)
d1 <- tibble::tibble(a = 1:5, b = letters[1:5]) d2 <- tibble::tibble(a = c(1,3:6), b = letters[1:5]) d3 <- tibble::tibble(c = 1:5) d4 <- tibble::tibble(c = c(1:5,5)) d5 <- tibble::tibble(a = 1:5) d6 <- tibble::tibble(c = 1:4) check_cardinality_0_n(d1, a, d2, a) check_cardinality_0_n(d1, a, d3, c) check_cardinality_1_n(d1, a, d3, c) check_cardinality_1_1(d1, a, d3, c) check_cardinality_1_1(d1, a, d4, c) check_cardinality_1_1(d4, c, d1, a) check_set_equality(d1, a, d3, c) check_cardinality_0_1(d1, a, d2, a) check_cardinality_0_1(d1, a, d4, c) check_cardinality_0_1(d4, c, d1, a) check_cardinality_0_n(d4, c, d5, a) check_cardinality_0_n(d5, a, d4, c) check_cardinality_1_1(d5, a, d4, c) check_cardinality_0_1(d5, a, d6, c)
/scratch/other_test.R
permissive
philipp-baumann/dm
R
false
false
730
r
d1 <- tibble::tibble(a = 1:5, b = letters[1:5]) d2 <- tibble::tibble(a = c(1,3:6), b = letters[1:5]) d3 <- tibble::tibble(c = 1:5) d4 <- tibble::tibble(c = c(1:5,5)) d5 <- tibble::tibble(a = 1:5) d6 <- tibble::tibble(c = 1:4) check_cardinality_0_n(d1, a, d2, a) check_cardinality_0_n(d1, a, d3, c) check_cardinality_1_n(d1, a, d3, c) check_cardinality_1_1(d1, a, d3, c) check_cardinality_1_1(d1, a, d4, c) check_cardinality_1_1(d4, c, d1, a) check_set_equality(d1, a, d3, c) check_cardinality_0_1(d1, a, d2, a) check_cardinality_0_1(d1, a, d4, c) check_cardinality_0_1(d4, c, d1, a) check_cardinality_0_n(d4, c, d5, a) check_cardinality_0_n(d5, a, d4, c) check_cardinality_1_1(d5, a, d4, c) check_cardinality_0_1(d5, a, d6, c)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plot.bivden.R, R/plot.msden.R, R/plot.rrs.R, % R/plot.rrst.R, R/plot.stden.R \name{plot.bivden} \alias{plot.bivden} \alias{plot.rrs} \alias{plot.msden} \alias{plot.stden} \alias{plot.rrst} \title{Plotting sparr objects} \usage{ \method{plot}{bivden}(x, what = c("z", "edge", "bw"), add.pts = FALSE, auto.axes = TRUE, override.par = TRUE, ...) \method{plot}{msden}(x, what = c("z", "edge", "bw"), sleep = 0.2, override.par = TRUE, ...) \method{plot}{rrs}(x, auto.axes = TRUE, tol.show = TRUE, tol.type = c("upper", "lower", "two.sided"), tol.args = list(levels = 0.05, lty = 1, drawlabels = TRUE), ...) \method{plot}{rrst}(x, tselect = NULL, type = c("joint", "conditional"), fix.range = FALSE, tol.show = TRUE, tol.type = c("upper", "lower", "two.sided"), tol.args = list(levels = 0.05, lty = 1, drawlabels = TRUE), sleep = 0.2, override.par = TRUE, expscale = FALSE, ...) \method{plot}{stden}(x, tselect = NULL, type = c("joint", "conditional"), fix.range = FALSE, sleep = 0.2, override.par = TRUE, ...) } \arguments{ \item{x}{An object of class \code{\link{bivden}}, \code{\link{stden}}, \code{\link{rrs}}, \code{\link{rrst}}, or \code{\link{msden}}.} \item{what}{A character string to select plotting of result (\code{"z"}; default); edge-correction surface (\code{"edge"}); or variable bandwidth surface (\code{"bw"}).} \item{add.pts}{Logical value indicating whether to add the observations to the image plot using default \code{\link{points}}.} \item{auto.axes}{Logical value indicating whether to display the plot with automatically added x-y axes and an `L' box in default styles.} \item{override.par}{Logical value indicating whether to override the existing graphics device parameters prior to plotting, resetting \code{mfrow} and \code{mar}. See `Details' for when you might want to disable this.} \item{...}{Additional graphical parameters to be passed to \code{\link[spatstat.geom]{plot.im}}, or in one instance, to \code{\link[spatstat.geom]{plot.ppp}} (see `Details').} \item{sleep}{Single positive numeric value giving the amount of time (in seconds) to \code{\link[base]{Sys.sleep}} before drawing the next image in the animation.} \item{tol.show}{Logical value indicating whether to show pre-computed tolerance contours on the plot(s). The object \code{x} must already have the relevant \emph{p}-value surface(s) stored in order for this argument to have any effect.} \item{tol.type}{A character string used to control the type of tolerance contour displayed; a test for elevated risk (\code{"upper"}), decreased risk (\code{"lower"}), or a two-tailed test (\code{two.sided}).} \item{tol.args}{A named list of valid arguments to be passed directly to \code{\link[graphics]{contour}} to control the appearance of plotted contours. Commonly used items are \code{levels}, \code{lty}, \code{lwd} and \code{drawlabels}.} \item{tselect}{Either a single numeric value giving the time at which to return the plot, or a vector of length 2 giving an interval of times over which to plot. This argument must respect the stored temporal bound in \code{x$tlim}, else an error will be thrown. By default, the full set of images (i.e. over the entire available time span) is plotted.} \item{type}{A character string to select plotting of joint/unconditional spatiotemporal estimate (default) or conditional spatial density given time.} \item{fix.range}{Logical value indicating whether use the same color scale limits for each plot in the sequence. Ignored if the user supplies a pre-defined \code{\link[spatstat.geom]{colourmap}} to the \code{col} argument, which is matched to \code{...} above and passed to \code{\link[spatstat.geom]{plot.im}}. See `Examples'.} \item{expscale}{Logical value indicating whether to force a raw-risk scale. Useful for users wishing to plot a log-relative risk surface, but to have the raw-risk displayed on the colour ribbon.} } \value{ Plots to the relevant graphics device. } \description{ \code{plot} methods for classes \code{\link{bivden}}, \code{\link{stden}}, \code{\link{rrs}}, \code{\link{rrst}} and \code{\link{msden}}. } \details{ In all instances, visualisation is deferred to \code{\link[spatstat.geom]{plot.im}}, for which there are a variety of customisations available the user can access via \code{...}. The one exception is when plotting observation-specific \code{"diggle"} edge correction factors---in this instance, a plot of the spatial observations is returned with size proportional to the influence of each correction weight. When plotting a \code{\link{rrs}} object, a pre-computed \emph{p}-value surface (see argument \code{tolerate} in \code{\link{risk}}) will automatically be superimposed at a significance level of 0.05. Greater flexibility in visualisation is gained by using \code{\link{tolerance}} in conjunction with \code{\link{contour}}. An \code{\link{msden}}, \code{\link{stden}}, or \code{\link{rrst}} object is plotted as an animation, one pixel image after another, separated by \code{sleep} seconds. If instead you intend the individual images to be plotted in an array of images, you should first set up your plot device layout, and ensure \code{override.par = FALSE} so that the function does not reset these device parameters itself. In such an instance, one might also want to set \code{sleep = 0}. } \examples{ \donttest{ data(pbc) data(fmd) data(burk) # 'bivden' object pbcden <- bivariate.density(split(pbc)$case,h0=3,hp=2,adapt=TRUE,davies.baddeley=0.05,verbose=FALSE) plot(pbcden) plot(pbcden,what="bw",main="PBC cases\\n variable bandwidth surface",xlab="Easting",ylab="Northing") # 'stden' object burkden <- spattemp.density(burk$cases,tres=128) # observation times are stored in marks(burk$cases) plot(burkden,fix.range=TRUE,sleep=0.1) # animation plot(burkden,tselect=c(1000,3000),type="conditional") # spatial densities conditional on each time # 'rrs' object pbcrr <- risk(pbc,h0=4,hp=3,adapt=TRUE,tolerate=TRUE,davies.baddeley=0.025,edge="diggle") plot(pbcrr) # default plot(pbcrr,tol.args=list(levels=c(0.05,0.01),lty=2:1,col="seagreen4"),auto.axes=FALSE) # 'rrst' object f <- spattemp.density(fmd$cases,h=6,lambda=8) g <- bivariate.density(fmd$controls,h0=6) fmdrr <- spattemp.risk(f,g,tolerate=TRUE) plot(fmdrr,sleep=0.1,fix.range=TRUE) plot(fmdrr,type="conditional",sleep=0.1,tol.type="two.sided", tol.args=list(levels=0.05,drawlabels=FALSE)) # 'msden' object pbcmult <- multiscale.density(split(pbc)$case,h0=4,h0fac=c(0.25,2.5)) plot(pbcmult) # densities plot(pbcmult,what="edge") # edge correction surfaces plot(pbcmult,what="bw") # bandwidth surfaces } } \author{ T.M. Davies }
/sparr/man/plotsparr.Rd
no_license
albrizre/spatstat.revdep
R
false
true
6,711
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plot.bivden.R, R/plot.msden.R, R/plot.rrs.R, % R/plot.rrst.R, R/plot.stden.R \name{plot.bivden} \alias{plot.bivden} \alias{plot.rrs} \alias{plot.msden} \alias{plot.stden} \alias{plot.rrst} \title{Plotting sparr objects} \usage{ \method{plot}{bivden}(x, what = c("z", "edge", "bw"), add.pts = FALSE, auto.axes = TRUE, override.par = TRUE, ...) \method{plot}{msden}(x, what = c("z", "edge", "bw"), sleep = 0.2, override.par = TRUE, ...) \method{plot}{rrs}(x, auto.axes = TRUE, tol.show = TRUE, tol.type = c("upper", "lower", "two.sided"), tol.args = list(levels = 0.05, lty = 1, drawlabels = TRUE), ...) \method{plot}{rrst}(x, tselect = NULL, type = c("joint", "conditional"), fix.range = FALSE, tol.show = TRUE, tol.type = c("upper", "lower", "two.sided"), tol.args = list(levels = 0.05, lty = 1, drawlabels = TRUE), sleep = 0.2, override.par = TRUE, expscale = FALSE, ...) \method{plot}{stden}(x, tselect = NULL, type = c("joint", "conditional"), fix.range = FALSE, sleep = 0.2, override.par = TRUE, ...) } \arguments{ \item{x}{An object of class \code{\link{bivden}}, \code{\link{stden}}, \code{\link{rrs}}, \code{\link{rrst}}, or \code{\link{msden}}.} \item{what}{A character string to select plotting of result (\code{"z"}; default); edge-correction surface (\code{"edge"}); or variable bandwidth surface (\code{"bw"}).} \item{add.pts}{Logical value indicating whether to add the observations to the image plot using default \code{\link{points}}.} \item{auto.axes}{Logical value indicating whether to display the plot with automatically added x-y axes and an `L' box in default styles.} \item{override.par}{Logical value indicating whether to override the existing graphics device parameters prior to plotting, resetting \code{mfrow} and \code{mar}. See `Details' for when you might want to disable this.} \item{...}{Additional graphical parameters to be passed to \code{\link[spatstat.geom]{plot.im}}, or in one instance, to \code{\link[spatstat.geom]{plot.ppp}} (see `Details').} \item{sleep}{Single positive numeric value giving the amount of time (in seconds) to \code{\link[base]{Sys.sleep}} before drawing the next image in the animation.} \item{tol.show}{Logical value indicating whether to show pre-computed tolerance contours on the plot(s). The object \code{x} must already have the relevant \emph{p}-value surface(s) stored in order for this argument to have any effect.} \item{tol.type}{A character string used to control the type of tolerance contour displayed; a test for elevated risk (\code{"upper"}), decreased risk (\code{"lower"}), or a two-tailed test (\code{two.sided}).} \item{tol.args}{A named list of valid arguments to be passed directly to \code{\link[graphics]{contour}} to control the appearance of plotted contours. Commonly used items are \code{levels}, \code{lty}, \code{lwd} and \code{drawlabels}.} \item{tselect}{Either a single numeric value giving the time at which to return the plot, or a vector of length 2 giving an interval of times over which to plot. This argument must respect the stored temporal bound in \code{x$tlim}, else an error will be thrown. By default, the full set of images (i.e. over the entire available time span) is plotted.} \item{type}{A character string to select plotting of joint/unconditional spatiotemporal estimate (default) or conditional spatial density given time.} \item{fix.range}{Logical value indicating whether use the same color scale limits for each plot in the sequence. Ignored if the user supplies a pre-defined \code{\link[spatstat.geom]{colourmap}} to the \code{col} argument, which is matched to \code{...} above and passed to \code{\link[spatstat.geom]{plot.im}}. See `Examples'.} \item{expscale}{Logical value indicating whether to force a raw-risk scale. Useful for users wishing to plot a log-relative risk surface, but to have the raw-risk displayed on the colour ribbon.} } \value{ Plots to the relevant graphics device. } \description{ \code{plot} methods for classes \code{\link{bivden}}, \code{\link{stden}}, \code{\link{rrs}}, \code{\link{rrst}} and \code{\link{msden}}. } \details{ In all instances, visualisation is deferred to \code{\link[spatstat.geom]{plot.im}}, for which there are a variety of customisations available the user can access via \code{...}. The one exception is when plotting observation-specific \code{"diggle"} edge correction factors---in this instance, a plot of the spatial observations is returned with size proportional to the influence of each correction weight. When plotting a \code{\link{rrs}} object, a pre-computed \emph{p}-value surface (see argument \code{tolerate} in \code{\link{risk}}) will automatically be superimposed at a significance level of 0.05. Greater flexibility in visualisation is gained by using \code{\link{tolerance}} in conjunction with \code{\link{contour}}. An \code{\link{msden}}, \code{\link{stden}}, or \code{\link{rrst}} object is plotted as an animation, one pixel image after another, separated by \code{sleep} seconds. If instead you intend the individual images to be plotted in an array of images, you should first set up your plot device layout, and ensure \code{override.par = FALSE} so that the function does not reset these device parameters itself. In such an instance, one might also want to set \code{sleep = 0}. } \examples{ \donttest{ data(pbc) data(fmd) data(burk) # 'bivden' object pbcden <- bivariate.density(split(pbc)$case,h0=3,hp=2,adapt=TRUE,davies.baddeley=0.05,verbose=FALSE) plot(pbcden) plot(pbcden,what="bw",main="PBC cases\\n variable bandwidth surface",xlab="Easting",ylab="Northing") # 'stden' object burkden <- spattemp.density(burk$cases,tres=128) # observation times are stored in marks(burk$cases) plot(burkden,fix.range=TRUE,sleep=0.1) # animation plot(burkden,tselect=c(1000,3000),type="conditional") # spatial densities conditional on each time # 'rrs' object pbcrr <- risk(pbc,h0=4,hp=3,adapt=TRUE,tolerate=TRUE,davies.baddeley=0.025,edge="diggle") plot(pbcrr) # default plot(pbcrr,tol.args=list(levels=c(0.05,0.01),lty=2:1,col="seagreen4"),auto.axes=FALSE) # 'rrst' object f <- spattemp.density(fmd$cases,h=6,lambda=8) g <- bivariate.density(fmd$controls,h0=6) fmdrr <- spattemp.risk(f,g,tolerate=TRUE) plot(fmdrr,sleep=0.1,fix.range=TRUE) plot(fmdrr,type="conditional",sleep=0.1,tol.type="two.sided", tol.args=list(levels=0.05,drawlabels=FALSE)) # 'msden' object pbcmult <- multiscale.density(split(pbc)$case,h0=4,h0fac=c(0.25,2.5)) plot(pbcmult) # densities plot(pbcmult,what="edge") # edge correction surfaces plot(pbcmult,what="bw") # bandwidth surfaces } } \author{ T.M. Davies }
#define the file thefile <- "./data/household_power_consumption.txt" #read the file thedata <- read.table(thefile, header=TRUE, sep=";", na.string="?") #subset data to contain only the important period subset_data <- subset(thedata, Date %in% c("1/2/2007","2/2/2007")) rm(thedata) #open a png device png("plot3.png", width=480, height=480) #get sub_metering variables, use as.numeric(), because R says it's not numeric sub_metering1 <- as.numeric(subset_data$Sub_metering_1, na.rm = TRUE) sub_metering2 <- as.numeric(subset_data$Sub_metering_2, na.rm = TRUE) sub_metering3 <- as.numeric(subset_data$Sub_metering_3, na.rm = TRUE) #get date and time columns into 1 variable with strptime date_time <- strptime(paste(subset_data$Date, subset_data$Time, sep=" "), "%d/%m/%Y %H:%M:%S") #make the first plot with(subset_data,{ plot(date_time, sub_metering1, type="l", xlab="", ylab="Energy sub metering") #add data to the plot lines(date_time, sub_metering2, type="l", col="red") lines(date_time, sub_metering3, type="l", col="blue") }) #add legend legend("topright", c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), lty=1, col=c("black","red","blue")) #close the device dev.off()
/plot3.R
no_license
kurowska/ExData_Plotting1
R
false
false
1,213
r
#define the file thefile <- "./data/household_power_consumption.txt" #read the file thedata <- read.table(thefile, header=TRUE, sep=";", na.string="?") #subset data to contain only the important period subset_data <- subset(thedata, Date %in% c("1/2/2007","2/2/2007")) rm(thedata) #open a png device png("plot3.png", width=480, height=480) #get sub_metering variables, use as.numeric(), because R says it's not numeric sub_metering1 <- as.numeric(subset_data$Sub_metering_1, na.rm = TRUE) sub_metering2 <- as.numeric(subset_data$Sub_metering_2, na.rm = TRUE) sub_metering3 <- as.numeric(subset_data$Sub_metering_3, na.rm = TRUE) #get date and time columns into 1 variable with strptime date_time <- strptime(paste(subset_data$Date, subset_data$Time, sep=" "), "%d/%m/%Y %H:%M:%S") #make the first plot with(subset_data,{ plot(date_time, sub_metering1, type="l", xlab="", ylab="Energy sub metering") #add data to the plot lines(date_time, sub_metering2, type="l", col="red") lines(date_time, sub_metering3, type="l", col="blue") }) #add legend legend("topright", c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), lty=1, col=c("black","red","blue")) #close the device dev.off()
# ********************************************************************************************* # Title : Seeded KMeans # Function : seeded_kmeans # Created by: Owner # Created on: 2021/01/26 # URL : https://dicits.ugr.es/software/SSLR/reference/seeded_kmeans.html # ********************************************************************************************* # <概要> # - 初期化時にラベル付きデータの存在するクラスの数、つまり特定のクラスのラベル付きデータの平均と同じ数のクラスターが存在する # <構文> # seeded_kmeans(max_iter = 10, method = "euclidean") # <目次> # 0 準備 # 1 データ作成 # 2 モデリング # 0 準備 ------------------------------------------------------------------------------- library(tidyverse) library(caret) library(SSLR) library(tidymodels) # データ準備 data <- iris # 1 データ作成 -------------------------------------------------------------------------- # 列番号の取得 cls <- which(colnames(iris) == "Species") # ラベルをNAに置換 set.seed(1) labeled.index <- createDataPartition(data$Species, p = .2, list = FALSE) data[-labeled.index,cls] <- NA # 2 モデリング ---------------------------------------------------------------------------- # モデル構築 m <- seeded_kmeans() %>% fit(Species ~ ., data) # ラベル分類の取得 # --- k-meansなので番号を出力 labels <- m %>% cluster_labels() # 確認 data %>% mutate(label = unlist(cluster_labels(m)))
/library/SSLR/function/clustering/seeded_kmeans.R
no_license
delta0726/r-semi_supervised
R
false
false
1,541
r
# ********************************************************************************************* # Title : Seeded KMeans # Function : seeded_kmeans # Created by: Owner # Created on: 2021/01/26 # URL : https://dicits.ugr.es/software/SSLR/reference/seeded_kmeans.html # ********************************************************************************************* # <概要> # - 初期化時にラベル付きデータの存在するクラスの数、つまり特定のクラスのラベル付きデータの平均と同じ数のクラスターが存在する # <構文> # seeded_kmeans(max_iter = 10, method = "euclidean") # <目次> # 0 準備 # 1 データ作成 # 2 モデリング # 0 準備 ------------------------------------------------------------------------------- library(tidyverse) library(caret) library(SSLR) library(tidymodels) # データ準備 data <- iris # 1 データ作成 -------------------------------------------------------------------------- # 列番号の取得 cls <- which(colnames(iris) == "Species") # ラベルをNAに置換 set.seed(1) labeled.index <- createDataPartition(data$Species, p = .2, list = FALSE) data[-labeled.index,cls] <- NA # 2 モデリング ---------------------------------------------------------------------------- # モデル構築 m <- seeded_kmeans() %>% fit(Species ~ ., data) # ラベル分類の取得 # --- k-meansなので番号を出力 labels <- m %>% cluster_labels() # 確認 data %>% mutate(label = unlist(cluster_labels(m)))
library(tidyverse) library(data.table) library(plotly) ################################################# # ANALYSIS # ################################################# data <- read.csv("Data/data.csv") native_only <- data %>% filter(AmIAKN == "Yes") non_native <- data %>% filter(AmIAKN == "No") total_kids <- nrow(data) native_kids <- nrow(native_only) non_native_kids <- nrow(non_native) # native placement settings in current FC setting native_placement <- native_only %>% select(CurPlSet) %>% group_by(CurPlSet) %>% mutate(count = n()) %>% distinct(CurPlSet, .keep_all = T) %>% ungroup() %>% mutate(percent = signif((count * 100) / sum(count), digits = 4)) # overall placement settings in current FC setting overall_placement <- data %>% select(CurPlSet) %>% group_by(CurPlSet) %>% mutate(count = n()) %>% distinct(CurPlSet, .keep_all = T) %>% ungroup() %>% mutate(percent = signif((count * 100) / sum(count), digits = 4)) # non-native placement settings in current FC setting non_native_placement <- non_native %>% select(CurPlSet) %>% group_by(CurPlSet) %>% mutate(count = n()) %>% distinct(CurPlSet, .keep_all = T) %>% ungroup() %>% mutate(percent = signif((count * 100) / sum(count), digits = 4)) # native average number of removals native_avg_rems <- native_only %>% select(TotalRem) %>% mutate(avg = signif(sum(TotalRem) / native_kids, digits = 4)) %>% distinct(avg) %>% pull(avg) # overall average number of removals overall_avg_rems <- data %>% select(TotalRem) %>% mutate(TotalRem = ifelse(is.na(TotalRem), 0, TotalRem)) %>% mutate(avg = signif(sum(TotalRem) / n(), digits = 4)) %>% distinct(avg) %>% pull(avg) # non-native average number of removals non_native_avg_rems <- non_native %>% select(TotalRem) %>% mutate(TotalRem = ifelse(is.na(TotalRem), 0, TotalRem)) %>% mutate(avg = signif(sum(TotalRem) / n(), digits = 4)) %>% distinct(avg) %>% pull(avg) avg_rems = data.table( native = c("Native", "Non-Native", "Overall"), num_rems = c(native_avg_rems, non_native_avg_rems, overall_avg_rems ) ) # native average number of placements native_avg_num_plep <- native_only %>% select(NumPlep) %>% mutate(avg = signif(sum(NumPlep) / native_kids, digits = 4)) %>% distinct(avg) %>% pull(avg) # overall avg number of placements overall_avg_num_plep <- data %>% select(NumPlep) %>% mutate(avg = signif(sum(NumPlep) / n(), digits = 4)) %>% distinct(avg) %>% pull(avg) # non-native avg number of placements non_native_avg_num_plep <- non_native %>% select(NumPlep) %>% mutate(avg = signif(sum(NumPlep) / n(), digits = 4)) %>% distinct(avg) %>% pull(avg) avg_num_plep = data.table( native = c("Native", "Non-Native", "Overall"), num_pleps = c(native_avg_num_plep, non_native_avg_num_plep, overall_avg_num_plep ) ) # native repeaters vs non repeaters percentages native_rep <- native_only %>% select(TotalRem) %>% mutate(isRep = ifelse(TotalRem > 1, TRUE, FALSE)) %>% group_by(isRep) %>% mutate(count = n()) %>% mutate(percent = signif((count * 100) / native_kids, digits = 4)) %>% distinct(percent) # overall avg repeater percentages overall_rep <- data %>% select(TotalRem) %>% mutate(isRep = ifelse(TotalRem > 1, TRUE, FALSE)) %>% group_by(isRep) %>% mutate(count = n()) %>% ungroup() %>% mutate(percent = signif((count * 100) / n(), digits = 4)) %>% distinct(percent, .keep_all = T) %>% select(isRep, percent) # non-native avg repeater percentages non_native_rep <- non_native %>% select(TotalRem) %>% mutate(isRep = ifelse(TotalRem > 1, TRUE, FALSE)) %>% group_by(isRep) %>% mutate(count = n()) %>% ungroup() %>% mutate(percent = signif((count * 100) / n(), digits = 4)) %>% distinct(percent, .keep_all = T) %>% select(isRep, percent) avg_repeater <- data.frame( native = c(rep("native" , 2) , rep("non-native" , 2) , rep("overall" , 2)), repeater = rep(c(TRUE , FALSE) , 3), percent = c(native_rep$percent, non_native_rep$percent , overall_rep$percent) ) # native average stay in FC native_LifeLOS <- native_only %>% select(LifeLOS) %>% mutate(LifeLOS = ifelse(is.na(LifeLOS), 0, LifeLOS)) %>% mutate(LifeLOS = round(sum(LifeLOS) / native_kids)) %>% distinct(LifeLOS) %>% pull(LifeLOS) # overall average stay in FC overall_LifeLOS <- data %>% select(LifeLOS) %>% mutate(LifeLOS = ifelse(is.na(LifeLOS), 0, LifeLOS)) %>% mutate(LifeLOS = round(sum(LifeLOS) / n())) %>% distinct(LifeLOS) %>% pull(LifeLOS) # non-native average stay in FC non_native_LifeLOS <- non_native %>% select(LifeLOS) %>% mutate(LifeLOS = ifelse(is.na(LifeLOS), 0, LifeLOS)) %>% mutate(LifeLOS = round(sum(LifeLOS) / n())) %>% distinct(LifeLOS) %>% pull(LifeLOS) avg_LifeLOS <- data.frame( native = c("Native", "Non-Native"), LifeLOS = c(native_LifeLOS, overall_LifeLOS) ) # Do native children placed with native caretakers have less LifeLOS and # numPleps? by_caretaker <- native_only %>% select(NumPlep, LifeLOS, RF1AMAKN, RF1ASIAN, RF1BLKAA, RF1NHOPI, RF1WHITE, RF1UTOD) %>% mutate(caretaker_native = ifelse(RF1AMAKN == "Yes", "Yes", "No")) %>% mutate(caretaker_native = ifelse(is.na(caretaker_native), "No caretaker", caretaker_native)) %>% group_by(caretaker_native) %>% mutate(Frequency = n(), LifeLOS = sum(LifeLOS) / n(), NumPlep = sum(NumPlep) / n()) %>% distinct(caretaker_native, .keep_all = T) %>% select(NumPlep, LifeLOS, caretaker_native, Frequency) ################################################# # PLOTS # ################################################# # average number of removals by native status bubblefont <- list(size = 15, color = '#ffefc1', family = 'alte_haas_groteskregular') titlefont <- list(family = 'alte_haas_groteskbold', color = '#ff9770') color_palette <- c('#610453', '#1b3496', '#026E5E', '#390561', '#116DAB') ax <- list( title = "", zeroline = FALSE, showline = FALSE, showticklabels = FALSE, showgrid = FALSE ) num_rems_viz <- plot_ly(avg_rems, x = ~native, y = "", type = 'scatter', mode = 'markers', hovertemplate = paste("Average <br>", "Number of <br>", "Removals: <br>", avg_rems$num_rems), size = ~num_rems, color = ~native, sizes = c(117, 143), marker = list(opacity = 0.55, sizemode = 'radius' ), colors = color_palette, showlegend = F ) %>% add_text(text = ~native, textfont = bubblefont, showlegend = F, ) %>% layout(title = 'Native American Children Have a Higher Number of Removals Than Non-Native Children', font= titlefont, xaxis = ax, yaxis = ax) %>% layout(plot_bgcolor = "rgba(0, 0, 0, 0)", paper_bgcolor = "rgba(0, 0, 0, 0)") num_rems_viz # average number of placements by native status num_pleps_viz <- plot_ly(avg_num_plep, x = ~native, y = "", type = 'scatter', mode = 'markers', hovertemplate = paste("Average <br>", "Number of <br>", "Placements: <br>", avg_num_plep$num_pleps), size = ~num_pleps, color = ~native, sizes = c(166, 200), marker = list(opacity = 0.55, sizemode = 'radius' ), colors = color_palette, showlegend = F ) %>% add_text(text = ~native, textfont = bubblefont, showlegend = F, ) %>% layout(title = 'Native American Children Have a Higher Number of Placements Than Non-Native Children', xaxis = ax, yaxis = ax, font= titlefont) %>% layout(plot_bgcolor = "rgba(0, 0, 0, 0)", paper_bgcolor = "rgba(0, 0, 0, 0)") num_pleps_viz # percentage of repeaters by native status repeater_viz <- plot_ly(labels = c("Native", "Non-Native", "Overall"), marker = list(colors = "Set1")) repeater_viz <- repeater_viz %>% add_pie(data = native_rep, labels = ~isRep, values = ~percent, name = "Native", text = c("Repeaters", "Non-Repeaters"), textfont = bubblefont, marker = list(colors = color_palette), title = "Native American", hoverinfo = none, domain = list(x = c(0, 0.4), y = c(0.4, 1))) repeater_viz <- repeater_viz %>% add_pie(data = non_native_rep, labels = ~isRep, values = ~percent, text = c("Repeaters", "Non-Repeaters"), textfont = bubblefont, marker = list(colors = color_palette), title = "Non-Native American", hoverinfo = none, colors = color_palette, name = "Non Native", domain = list(x = c(0.6, 1), y = c(0.4, 1))) repeater_viz <- repeater_viz %>% add_pie(data = overall_rep, labels = ~isRep, values = ~percent, text = c("Repeaters", "Non-Repeaters"), textfont = bubblefont, marker = list(colors = color_palette), title = "Overall", colors = color_palette, hoverinfo = none, name = "Overall", domain = list(x = c(0.25, 0.75), y = c(0, 0.6))) repeater_viz <- repeater_viz %>% layout( title = "Native American Children are More Likely To Go Back Into Foster Care", font = titlefont, showlegend = F, grid=list(rows=2, columns=2 ), xaxis = list(showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE ), yaxis = list(showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE ) ) %>% layout( plot_bgcolor = "rgba(0, 0, 0, 0)", paper_bgcolor = "rgba(0, 0, 0, 0)" ) repeater_viz # Total stay in FC by native status LifeLOS_viz <- plot_ly(avg_LifeLOS, x = "", y = "", color = ~native, type = 'scatter', mode = 'markers', hovertemplate = paste("Average <br>", "Stay in <br>", "Foster Care: <br>", avg_LifeLOS$LifeLOS, "days"), size = ~LifeLOS, sizes = c(215, 247), marker = list( sizemode = 'radius' ), colors = color_palette, showlegend = T ) %>% layout(title = "Native American Children Spend Longer in Foster Care\n than Non-Native Children", xaxis = ax, yaxis = ax, font = titlefont, plot_bgcolor='transparent', paper_bgcolor='transparent' ) %>% layout(plot_bgcolor = "rgba(0, 0, 0, 0)", paper_bgcolor = "rgba(0, 0, 0, 0)") LifeLOS_viz
/native.R
permissive
marinawooden/children-in-foster-care
R
false
false
11,946
r
library(tidyverse) library(data.table) library(plotly) ################################################# # ANALYSIS # ################################################# data <- read.csv("Data/data.csv") native_only <- data %>% filter(AmIAKN == "Yes") non_native <- data %>% filter(AmIAKN == "No") total_kids <- nrow(data) native_kids <- nrow(native_only) non_native_kids <- nrow(non_native) # native placement settings in current FC setting native_placement <- native_only %>% select(CurPlSet) %>% group_by(CurPlSet) %>% mutate(count = n()) %>% distinct(CurPlSet, .keep_all = T) %>% ungroup() %>% mutate(percent = signif((count * 100) / sum(count), digits = 4)) # overall placement settings in current FC setting overall_placement <- data %>% select(CurPlSet) %>% group_by(CurPlSet) %>% mutate(count = n()) %>% distinct(CurPlSet, .keep_all = T) %>% ungroup() %>% mutate(percent = signif((count * 100) / sum(count), digits = 4)) # non-native placement settings in current FC setting non_native_placement <- non_native %>% select(CurPlSet) %>% group_by(CurPlSet) %>% mutate(count = n()) %>% distinct(CurPlSet, .keep_all = T) %>% ungroup() %>% mutate(percent = signif((count * 100) / sum(count), digits = 4)) # native average number of removals native_avg_rems <- native_only %>% select(TotalRem) %>% mutate(avg = signif(sum(TotalRem) / native_kids, digits = 4)) %>% distinct(avg) %>% pull(avg) # overall average number of removals overall_avg_rems <- data %>% select(TotalRem) %>% mutate(TotalRem = ifelse(is.na(TotalRem), 0, TotalRem)) %>% mutate(avg = signif(sum(TotalRem) / n(), digits = 4)) %>% distinct(avg) %>% pull(avg) # non-native average number of removals non_native_avg_rems <- non_native %>% select(TotalRem) %>% mutate(TotalRem = ifelse(is.na(TotalRem), 0, TotalRem)) %>% mutate(avg = signif(sum(TotalRem) / n(), digits = 4)) %>% distinct(avg) %>% pull(avg) avg_rems = data.table( native = c("Native", "Non-Native", "Overall"), num_rems = c(native_avg_rems, non_native_avg_rems, overall_avg_rems ) ) # native average number of placements native_avg_num_plep <- native_only %>% select(NumPlep) %>% mutate(avg = signif(sum(NumPlep) / native_kids, digits = 4)) %>% distinct(avg) %>% pull(avg) # overall avg number of placements overall_avg_num_plep <- data %>% select(NumPlep) %>% mutate(avg = signif(sum(NumPlep) / n(), digits = 4)) %>% distinct(avg) %>% pull(avg) # non-native avg number of placements non_native_avg_num_plep <- non_native %>% select(NumPlep) %>% mutate(avg = signif(sum(NumPlep) / n(), digits = 4)) %>% distinct(avg) %>% pull(avg) avg_num_plep = data.table( native = c("Native", "Non-Native", "Overall"), num_pleps = c(native_avg_num_plep, non_native_avg_num_plep, overall_avg_num_plep ) ) # native repeaters vs non repeaters percentages native_rep <- native_only %>% select(TotalRem) %>% mutate(isRep = ifelse(TotalRem > 1, TRUE, FALSE)) %>% group_by(isRep) %>% mutate(count = n()) %>% mutate(percent = signif((count * 100) / native_kids, digits = 4)) %>% distinct(percent) # overall avg repeater percentages overall_rep <- data %>% select(TotalRem) %>% mutate(isRep = ifelse(TotalRem > 1, TRUE, FALSE)) %>% group_by(isRep) %>% mutate(count = n()) %>% ungroup() %>% mutate(percent = signif((count * 100) / n(), digits = 4)) %>% distinct(percent, .keep_all = T) %>% select(isRep, percent) # non-native avg repeater percentages non_native_rep <- non_native %>% select(TotalRem) %>% mutate(isRep = ifelse(TotalRem > 1, TRUE, FALSE)) %>% group_by(isRep) %>% mutate(count = n()) %>% ungroup() %>% mutate(percent = signif((count * 100) / n(), digits = 4)) %>% distinct(percent, .keep_all = T) %>% select(isRep, percent) avg_repeater <- data.frame( native = c(rep("native" , 2) , rep("non-native" , 2) , rep("overall" , 2)), repeater = rep(c(TRUE , FALSE) , 3), percent = c(native_rep$percent, non_native_rep$percent , overall_rep$percent) ) # native average stay in FC native_LifeLOS <- native_only %>% select(LifeLOS) %>% mutate(LifeLOS = ifelse(is.na(LifeLOS), 0, LifeLOS)) %>% mutate(LifeLOS = round(sum(LifeLOS) / native_kids)) %>% distinct(LifeLOS) %>% pull(LifeLOS) # overall average stay in FC overall_LifeLOS <- data %>% select(LifeLOS) %>% mutate(LifeLOS = ifelse(is.na(LifeLOS), 0, LifeLOS)) %>% mutate(LifeLOS = round(sum(LifeLOS) / n())) %>% distinct(LifeLOS) %>% pull(LifeLOS) # non-native average stay in FC non_native_LifeLOS <- non_native %>% select(LifeLOS) %>% mutate(LifeLOS = ifelse(is.na(LifeLOS), 0, LifeLOS)) %>% mutate(LifeLOS = round(sum(LifeLOS) / n())) %>% distinct(LifeLOS) %>% pull(LifeLOS) avg_LifeLOS <- data.frame( native = c("Native", "Non-Native"), LifeLOS = c(native_LifeLOS, overall_LifeLOS) ) # Do native children placed with native caretakers have less LifeLOS and # numPleps? by_caretaker <- native_only %>% select(NumPlep, LifeLOS, RF1AMAKN, RF1ASIAN, RF1BLKAA, RF1NHOPI, RF1WHITE, RF1UTOD) %>% mutate(caretaker_native = ifelse(RF1AMAKN == "Yes", "Yes", "No")) %>% mutate(caretaker_native = ifelse(is.na(caretaker_native), "No caretaker", caretaker_native)) %>% group_by(caretaker_native) %>% mutate(Frequency = n(), LifeLOS = sum(LifeLOS) / n(), NumPlep = sum(NumPlep) / n()) %>% distinct(caretaker_native, .keep_all = T) %>% select(NumPlep, LifeLOS, caretaker_native, Frequency) ################################################# # PLOTS # ################################################# # average number of removals by native status bubblefont <- list(size = 15, color = '#ffefc1', family = 'alte_haas_groteskregular') titlefont <- list(family = 'alte_haas_groteskbold', color = '#ff9770') color_palette <- c('#610453', '#1b3496', '#026E5E', '#390561', '#116DAB') ax <- list( title = "", zeroline = FALSE, showline = FALSE, showticklabels = FALSE, showgrid = FALSE ) num_rems_viz <- plot_ly(avg_rems, x = ~native, y = "", type = 'scatter', mode = 'markers', hovertemplate = paste("Average <br>", "Number of <br>", "Removals: <br>", avg_rems$num_rems), size = ~num_rems, color = ~native, sizes = c(117, 143), marker = list(opacity = 0.55, sizemode = 'radius' ), colors = color_palette, showlegend = F ) %>% add_text(text = ~native, textfont = bubblefont, showlegend = F, ) %>% layout(title = 'Native American Children Have a Higher Number of Removals Than Non-Native Children', font= titlefont, xaxis = ax, yaxis = ax) %>% layout(plot_bgcolor = "rgba(0, 0, 0, 0)", paper_bgcolor = "rgba(0, 0, 0, 0)") num_rems_viz # average number of placements by native status num_pleps_viz <- plot_ly(avg_num_plep, x = ~native, y = "", type = 'scatter', mode = 'markers', hovertemplate = paste("Average <br>", "Number of <br>", "Placements: <br>", avg_num_plep$num_pleps), size = ~num_pleps, color = ~native, sizes = c(166, 200), marker = list(opacity = 0.55, sizemode = 'radius' ), colors = color_palette, showlegend = F ) %>% add_text(text = ~native, textfont = bubblefont, showlegend = F, ) %>% layout(title = 'Native American Children Have a Higher Number of Placements Than Non-Native Children', xaxis = ax, yaxis = ax, font= titlefont) %>% layout(plot_bgcolor = "rgba(0, 0, 0, 0)", paper_bgcolor = "rgba(0, 0, 0, 0)") num_pleps_viz # percentage of repeaters by native status repeater_viz <- plot_ly(labels = c("Native", "Non-Native", "Overall"), marker = list(colors = "Set1")) repeater_viz <- repeater_viz %>% add_pie(data = native_rep, labels = ~isRep, values = ~percent, name = "Native", text = c("Repeaters", "Non-Repeaters"), textfont = bubblefont, marker = list(colors = color_palette), title = "Native American", hoverinfo = none, domain = list(x = c(0, 0.4), y = c(0.4, 1))) repeater_viz <- repeater_viz %>% add_pie(data = non_native_rep, labels = ~isRep, values = ~percent, text = c("Repeaters", "Non-Repeaters"), textfont = bubblefont, marker = list(colors = color_palette), title = "Non-Native American", hoverinfo = none, colors = color_palette, name = "Non Native", domain = list(x = c(0.6, 1), y = c(0.4, 1))) repeater_viz <- repeater_viz %>% add_pie(data = overall_rep, labels = ~isRep, values = ~percent, text = c("Repeaters", "Non-Repeaters"), textfont = bubblefont, marker = list(colors = color_palette), title = "Overall", colors = color_palette, hoverinfo = none, name = "Overall", domain = list(x = c(0.25, 0.75), y = c(0, 0.6))) repeater_viz <- repeater_viz %>% layout( title = "Native American Children are More Likely To Go Back Into Foster Care", font = titlefont, showlegend = F, grid=list(rows=2, columns=2 ), xaxis = list(showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE ), yaxis = list(showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE ) ) %>% layout( plot_bgcolor = "rgba(0, 0, 0, 0)", paper_bgcolor = "rgba(0, 0, 0, 0)" ) repeater_viz # Total stay in FC by native status LifeLOS_viz <- plot_ly(avg_LifeLOS, x = "", y = "", color = ~native, type = 'scatter', mode = 'markers', hovertemplate = paste("Average <br>", "Stay in <br>", "Foster Care: <br>", avg_LifeLOS$LifeLOS, "days"), size = ~LifeLOS, sizes = c(215, 247), marker = list( sizemode = 'radius' ), colors = color_palette, showlegend = T ) %>% layout(title = "Native American Children Spend Longer in Foster Care\n than Non-Native Children", xaxis = ax, yaxis = ax, font = titlefont, plot_bgcolor='transparent', paper_bgcolor='transparent' ) %>% layout(plot_bgcolor = "rgba(0, 0, 0, 0)", paper_bgcolor = "rgba(0, 0, 0, 0)") LifeLOS_viz
#' NMR spectra of urine sample of rats with bariatric surgery. A complete description of the dataset can be found here: https://www.frontiersin.org/articles/10.3389/fmicb.2011.00183 #' #' The data set contains 59 nmr spectra of length 4582 It includes metadata (425 exprimental parameters) #' #' @format A data frame with 59 rows, X, metadata and ppm scale #' \describe{ #' \item{data}{NMR spectra} #' \item{binned.ppm}{a ppm scale} #' } #' @source {https://www.frontiersin.org/articles/10.3389/fmicb.2011.00183} "bariatricRat.binned.5"
/R/dataset_bariatricRat.binned.5.R
no_license
jwist/hastaLaVista
R
false
false
542
r
#' NMR spectra of urine sample of rats with bariatric surgery. A complete description of the dataset can be found here: https://www.frontiersin.org/articles/10.3389/fmicb.2011.00183 #' #' The data set contains 59 nmr spectra of length 4582 It includes metadata (425 exprimental parameters) #' #' @format A data frame with 59 rows, X, metadata and ppm scale #' \describe{ #' \item{data}{NMR spectra} #' \item{binned.ppm}{a ppm scale} #' } #' @source {https://www.frontiersin.org/articles/10.3389/fmicb.2011.00183} "bariatricRat.binned.5"
\name{compute.threshold.AROC.bnp} \alias{compute.threshold.AROC.bnp} %- Also NEED an '\alias' for EACH other topic documented here. \title{ AROC-based threshold values. } \description{ Estimates AROC-based threshold values using the nonparametric Bayesian approach proposed by Inacio de Carvalho and Rodriguez-Alvarez (2018). } \usage{ compute.threshold.AROC.bnp(object, newdata, FPF = 0.5) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{object}{An object of class \code{AROC} as produced by \code{\link{AROC.bnp}}.} \item{newdata}{Data frame with the covariate values at which threshold values are required.} \item{FPF}{Numeric vector with the FPF at which to calculate the AROC-based threshold values. Atomic values are also valid.} } \details{ Estimation of the covariate-adjusted ROC curve (AROC) using the nonparametric Bayesian approach proposed by Inacio de Carvalho and Rodriguez-Alvarez (2018) involves the estimation of the conditional distribution function for the diagnostic test outcome in the healthy population \deqn{F_{\bar{D}}(y|\mathbf{X}_{\bar{D}}) = Pr\{Y_{\bar{D}} \leq y | \mathbf{X}_{\bar{D}}\}.} This function makes use of this estimate in order to calculate AROC-based threshold values. In particular, for a covariate value \eqn{\mathbf{x}} and a FPF = t, the AROC-based threshold value at the \eqn{s}-th posterior sample (\eqn{s = 1,\ldots,S}) is calculated as follows \deqn{c^{(s)}_{\mathbf{x}} = \hat{F}^{-1(s)}_{\bar{D}}(1-t|\mathbf{X}_{\bar{D}} = \mathbf{x}).} from which the posterior mean can be computed \deqn{\hat{c}_{\mathbf{x}} = \frac{1}{S}\sum_{s = 1}^{S}c^{(s)}_{\mathbf{x}}.} } \value{As a result, the function provides a list with the following components: \item{thresholds.est}{A matrix with the posterior mean of the AROC-based threshold values. The matrix has as many columns as different covariate vector values, and as many rows as different FPFs.} \item{thresholds.ql}{A matrix with the posterior 2.5\% quantile of the AROC-based threshold values. The matrix has as many columns as different covariate vector values, and as many rows as different FPFs.} \item{thresholds.qh}{A matrix with the posterior 97.5\% quantile of the AROC-based threshold values. The matrix has as many columns as different covariate vector values, and as many rows as different FPFs.} } \references{ Inacio de Carvalho, V., and Rodriguez-Alvarez, M. X. (2018). Bayesian nonparametric inference for the covariate-adjusted ROC curve. arXiv preprint arXiv:1806.00473. } %\author{ %% ~~who you are~~ %} %\note{ %% ~~further notes~~ %} %% ~Make other sections like Warning with \section{Warning }{....} ~ \seealso{ \code{\link{AROC.bnp}} } \examples{ library(AROC) data(psa) # Select the last measurement newpsa <- psa[!duplicated(psa$id, fromLast = TRUE),] # Log-transform the biomarker newpsa$l_marker1 <- log(newpsa$marker1) \donttest{ m0 <- AROC.bnp(formula.healthy = l_marker1 ~ f(age, K = 0), group = "status", tag.healthy = 0, data = newpsa, scale = TRUE, p = seq(0,1,l=101), compute.lpml = TRUE, compute.WAIC = TRUE, a = 2, b = 0.5, L = 10, nsim = 5000, nburn = 1000) # Compute the threshold values FPF = c(0.1, 0.3) newdata <- data.frame(age = seq(52, 80, l = 50)) th_bnp <- compute.threshold.AROC.bnp(m0, newdata, FPF) names(th_bnp) } \dontshow{ m0 <- AROC.bnp(formula.healthy = l_marker1 ~ f(age, K = 0), group = "status", tag.healthy = 0, data = newpsa, scale = TRUE, p = seq(0,1,l=101), compute.lpml = TRUE, compute.WAIC = TRUE, a = 2, b = 0.5, L = 10, nsim = 50, nburn = 10) # Compute the threshold values FPF = c(0.1, 0.3) newdata <- data.frame(age = 52) th_bnp <- compute.threshold.AROC.bnp(m0, newdata, FPF) names(th_bnp) } } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. %\keyword{ ~kwd1 }% use one of RShowDoc("KEYWORDS") %\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
/man/compute.threshold.AROC.bnp.Rd
no_license
cran/AROC
R
false
false
3,909
rd
\name{compute.threshold.AROC.bnp} \alias{compute.threshold.AROC.bnp} %- Also NEED an '\alias' for EACH other topic documented here. \title{ AROC-based threshold values. } \description{ Estimates AROC-based threshold values using the nonparametric Bayesian approach proposed by Inacio de Carvalho and Rodriguez-Alvarez (2018). } \usage{ compute.threshold.AROC.bnp(object, newdata, FPF = 0.5) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{object}{An object of class \code{AROC} as produced by \code{\link{AROC.bnp}}.} \item{newdata}{Data frame with the covariate values at which threshold values are required.} \item{FPF}{Numeric vector with the FPF at which to calculate the AROC-based threshold values. Atomic values are also valid.} } \details{ Estimation of the covariate-adjusted ROC curve (AROC) using the nonparametric Bayesian approach proposed by Inacio de Carvalho and Rodriguez-Alvarez (2018) involves the estimation of the conditional distribution function for the diagnostic test outcome in the healthy population \deqn{F_{\bar{D}}(y|\mathbf{X}_{\bar{D}}) = Pr\{Y_{\bar{D}} \leq y | \mathbf{X}_{\bar{D}}\}.} This function makes use of this estimate in order to calculate AROC-based threshold values. In particular, for a covariate value \eqn{\mathbf{x}} and a FPF = t, the AROC-based threshold value at the \eqn{s}-th posterior sample (\eqn{s = 1,\ldots,S}) is calculated as follows \deqn{c^{(s)}_{\mathbf{x}} = \hat{F}^{-1(s)}_{\bar{D}}(1-t|\mathbf{X}_{\bar{D}} = \mathbf{x}).} from which the posterior mean can be computed \deqn{\hat{c}_{\mathbf{x}} = \frac{1}{S}\sum_{s = 1}^{S}c^{(s)}_{\mathbf{x}}.} } \value{As a result, the function provides a list with the following components: \item{thresholds.est}{A matrix with the posterior mean of the AROC-based threshold values. The matrix has as many columns as different covariate vector values, and as many rows as different FPFs.} \item{thresholds.ql}{A matrix with the posterior 2.5\% quantile of the AROC-based threshold values. The matrix has as many columns as different covariate vector values, and as many rows as different FPFs.} \item{thresholds.qh}{A matrix with the posterior 97.5\% quantile of the AROC-based threshold values. The matrix has as many columns as different covariate vector values, and as many rows as different FPFs.} } \references{ Inacio de Carvalho, V., and Rodriguez-Alvarez, M. X. (2018). Bayesian nonparametric inference for the covariate-adjusted ROC curve. arXiv preprint arXiv:1806.00473. } %\author{ %% ~~who you are~~ %} %\note{ %% ~~further notes~~ %} %% ~Make other sections like Warning with \section{Warning }{....} ~ \seealso{ \code{\link{AROC.bnp}} } \examples{ library(AROC) data(psa) # Select the last measurement newpsa <- psa[!duplicated(psa$id, fromLast = TRUE),] # Log-transform the biomarker newpsa$l_marker1 <- log(newpsa$marker1) \donttest{ m0 <- AROC.bnp(formula.healthy = l_marker1 ~ f(age, K = 0), group = "status", tag.healthy = 0, data = newpsa, scale = TRUE, p = seq(0,1,l=101), compute.lpml = TRUE, compute.WAIC = TRUE, a = 2, b = 0.5, L = 10, nsim = 5000, nburn = 1000) # Compute the threshold values FPF = c(0.1, 0.3) newdata <- data.frame(age = seq(52, 80, l = 50)) th_bnp <- compute.threshold.AROC.bnp(m0, newdata, FPF) names(th_bnp) } \dontshow{ m0 <- AROC.bnp(formula.healthy = l_marker1 ~ f(age, K = 0), group = "status", tag.healthy = 0, data = newpsa, scale = TRUE, p = seq(0,1,l=101), compute.lpml = TRUE, compute.WAIC = TRUE, a = 2, b = 0.5, L = 10, nsim = 50, nburn = 10) # Compute the threshold values FPF = c(0.1, 0.3) newdata <- data.frame(age = 52) th_bnp <- compute.threshold.AROC.bnp(m0, newdata, FPF) names(th_bnp) } } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. %\keyword{ ~kwd1 }% use one of RShowDoc("KEYWORDS") %\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
## ## nsga2.R - Interface to nsga2.c ## ## Authors: ## Heike Trautmann <trautmann@statistik.uni-dortmund.de> ## Detlef Steuer <detlef.steuer@hsu-hamburg.de> ## Olaf Mersmann <olafm@statistik.uni-dortmund.de> ## nsga2 <- function(fn, idim, odim, ..., constraints=NULL, cdim=0, lower.bounds=rep(-Inf, idim), upper.bounds=rep(Inf, idim), popsize=100, generations=100, cprob=0.7, cdist=5, mprob=0.2, mdist=10, vectorized=FALSE) { ff <- if (vectorized) { function(x) fn(x, ...) } else { function(x) apply(x, 1, fn, ...) } cf <- if (vectorized) { function(x) constraints(x, ...) } else { function(x) apply(x, 1, constraints, ...) } ## Make sure popsize is a multiple of 4 if (popsize %% 4 != 0) stop("Population size must be a multiple of 4") ## Check bounding box if (any(is.infinite(lower.bounds)) || any(is.infinite(upper.bounds))) { warning("While it is possible to optimize an unconstrained problem, it is not recommended. Please consider adding finite upper and lower bounds.") sane_lower <- -.Machine$double.xmax / 4 sane_upper <- .Machine$double.xmax / 4 lower.bounds <- ifelse(lower.bounds == -Inf, sane_lower, ifelse(lower.bounds == Inf, sane_upper, lower.bounds)) upper.bounds <- ifelse(upper.bounds == -Inf, sane_lower, ifelse(upper.bounds == Inf, sane_upper, upper.bounds)) } ## Lag generations: ## C source expects each element of generations to be the number of ## generations to go forward before saving the next result set. This is ## unintuitive to specify, so we compute the lagged differences here. if (length(generations) > 1) generations <- c(generations[1], diff(generations)) if (!all(generations > 0)) stop("Cannot go back in time! Your generations argument must be sorted!") ## Set cdim = 0 if no cfn was given: if (is.null(constraints)) cdim <- 0 res <- .Call(do_nsga2, ff, cf, sys.frame(), as.integer(odim), as.integer(cdim), as.integer(idim), lower.bounds, upper.bounds, as.integer(popsize), as.integer(generations), cprob, as.integer(cdist), mprob, as.integer(mdist)) if (1 == length(res)) { res <- res[[1]] names(res) <- c("par", "value", "pareto.optimal") class(res) <- c("nsga2", "mco") } else { for (i in 1:length(res)) { names(res[[i]]) <- c("par", "value", "pareto.optimal") class(res[[i]]) <- c("nsga2", "mco") } class(res) <- "nsga2.collection" } return (res) } plot.nsga2 <- function(x, ...) { v <- x$value o <- x$pareto.optimal d <- ncol(v) col <- ifelse(o, "red", "blue") pch <- ifelse(o, 4, 19) if (d <= 2) { plot(v, col=col, pch=pch, ...) ov <- v[o,] ov <- ov[order(ov[,1]),] lines (ov, col="red", type="s") } else if (d == 3) { if (require("scatterplot3d")) { scatterplot3d::scatterplot3d(v, color=ifelse(o, "red", "blue")) } else { pairs(v, col=col, pch=pch, ...) } } else { pairs(v, col=col, pch=pch, ...) } } plot.nsga2.collection <- function(x, ...) { oask <- devAskNewPage(TRUE) on.exit(devAskNewPage(oask)) sapply(x, plot) return; } paretoSet.nsga2 <- function(x, ...) x$par[x$pareto.optimal,] paretoFront.nsga2 <- function(x, ...) x$value[x$pareto.optimal,,drop=FALSE]
/mco/R/nsga2.R
no_license
ingted/R-Examples
R
false
false
3,611
r
## ## nsga2.R - Interface to nsga2.c ## ## Authors: ## Heike Trautmann <trautmann@statistik.uni-dortmund.de> ## Detlef Steuer <detlef.steuer@hsu-hamburg.de> ## Olaf Mersmann <olafm@statistik.uni-dortmund.de> ## nsga2 <- function(fn, idim, odim, ..., constraints=NULL, cdim=0, lower.bounds=rep(-Inf, idim), upper.bounds=rep(Inf, idim), popsize=100, generations=100, cprob=0.7, cdist=5, mprob=0.2, mdist=10, vectorized=FALSE) { ff <- if (vectorized) { function(x) fn(x, ...) } else { function(x) apply(x, 1, fn, ...) } cf <- if (vectorized) { function(x) constraints(x, ...) } else { function(x) apply(x, 1, constraints, ...) } ## Make sure popsize is a multiple of 4 if (popsize %% 4 != 0) stop("Population size must be a multiple of 4") ## Check bounding box if (any(is.infinite(lower.bounds)) || any(is.infinite(upper.bounds))) { warning("While it is possible to optimize an unconstrained problem, it is not recommended. Please consider adding finite upper and lower bounds.") sane_lower <- -.Machine$double.xmax / 4 sane_upper <- .Machine$double.xmax / 4 lower.bounds <- ifelse(lower.bounds == -Inf, sane_lower, ifelse(lower.bounds == Inf, sane_upper, lower.bounds)) upper.bounds <- ifelse(upper.bounds == -Inf, sane_lower, ifelse(upper.bounds == Inf, sane_upper, upper.bounds)) } ## Lag generations: ## C source expects each element of generations to be the number of ## generations to go forward before saving the next result set. This is ## unintuitive to specify, so we compute the lagged differences here. if (length(generations) > 1) generations <- c(generations[1], diff(generations)) if (!all(generations > 0)) stop("Cannot go back in time! Your generations argument must be sorted!") ## Set cdim = 0 if no cfn was given: if (is.null(constraints)) cdim <- 0 res <- .Call(do_nsga2, ff, cf, sys.frame(), as.integer(odim), as.integer(cdim), as.integer(idim), lower.bounds, upper.bounds, as.integer(popsize), as.integer(generations), cprob, as.integer(cdist), mprob, as.integer(mdist)) if (1 == length(res)) { res <- res[[1]] names(res) <- c("par", "value", "pareto.optimal") class(res) <- c("nsga2", "mco") } else { for (i in 1:length(res)) { names(res[[i]]) <- c("par", "value", "pareto.optimal") class(res[[i]]) <- c("nsga2", "mco") } class(res) <- "nsga2.collection" } return (res) } plot.nsga2 <- function(x, ...) { v <- x$value o <- x$pareto.optimal d <- ncol(v) col <- ifelse(o, "red", "blue") pch <- ifelse(o, 4, 19) if (d <= 2) { plot(v, col=col, pch=pch, ...) ov <- v[o,] ov <- ov[order(ov[,1]),] lines (ov, col="red", type="s") } else if (d == 3) { if (require("scatterplot3d")) { scatterplot3d::scatterplot3d(v, color=ifelse(o, "red", "blue")) } else { pairs(v, col=col, pch=pch, ...) } } else { pairs(v, col=col, pch=pch, ...) } } plot.nsga2.collection <- function(x, ...) { oask <- devAskNewPage(TRUE) on.exit(devAskNewPage(oask)) sapply(x, plot) return; } paretoSet.nsga2 <- function(x, ...) x$par[x$pareto.optimal,] paretoFront.nsga2 <- function(x, ...) x$value[x$pareto.optimal,,drop=FALSE]
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/stat-summary.r \name{stat_summary} \alias{stat_summary} \title{Summarise y values at every unique x.} \usage{ stat_summary(mapping = NULL, data = NULL, geom = "pointrange", position = "identity", ...) } \arguments{ \item{mapping}{The aesthetic mapping, usually constructed with \code{\link{aes}} or \code{\link{aes_string}}. Only needs to be set at the layer level if you are overriding the plot defaults.} \item{data}{A layer specific dataset - only needed if you want to override the plot defaults.} \item{geom}{The geometric object to use display the data} \item{position}{The position adjustment to use for overlappling points on this layer} \item{...}{other arguments passed on to \code{\link{layer}}. This can include aesthetics whose values you want to set, not map. See \code{\link{layer}} for more details.} } \value{ a data.frame with additional columns: \item{fun.data}{Complete summary function. Should take numeric vector as input and return data frame as output} \item{fun.ymin}{ymin summary function (should take numeric vector and return single number)} \item{fun.y}{y summary function (should take numeric vector and return single number)} \item{fun.ymax}{ymax summary function (should take numeric vector and return single number)} } \description{ \code{stat_summary} allows for tremendous flexibilty in the specification of summary functions. The summary function can either supply individual summary functions for each of y, ymin and ymax (with \code{fun.y}, \code{fun.ymax}, \code{fun.ymin}), or return a data frame containing any number of aesthetiics with with \code{fun.data}. All summary functions are called with a single vector of values, \code{x}. } \details{ A simple vector function is easiest to work with as you can return a single number, but is somewhat less flexible. If your summary function operates on a data.frame it should return a data frame with variables that the geom can use. } \section{Aesthetics}{ \Sexpr[results=rd,stage=build]{ggplot2.SparkR:::rd_aesthetics("stat", "summary")} } \examples{ \donttest{ # Basic operation on a small dataset d <- ggplot(mtcars, aes(cyl, mpg)) + geom_point() d + stat_summary(fun.data = "mean_cl_boot", colour = "red") p <- ggplot(mtcars, aes(cyl, mpg)) + stat_summary(fun.y = "mean", geom = "point") p # Don't use ylim to zoom into a summary plot - this throws the # data away p + ylim(15, 30) # Instead use coord_cartesian p + coord_cartesian(ylim = c(15, 30)) # You can supply individual functions to summarise the value at # each x: stat_sum_single <- function(fun, geom="point", ...) { stat_summary(fun.y=fun, colour="red", geom=geom, size = 3, ...) } d + stat_sum_single(mean) d + stat_sum_single(mean, geom="line") d + stat_sum_single(median) d + stat_sum_single(sd) d + stat_summary(fun.y = mean, fun.ymin = min, fun.ymax = max, colour = "red") d + aes(colour = factor(vs)) + stat_summary(fun.y = mean, geom="line") # Alternatively, you can supply a function that operates on a data.frame. # A set of useful summary functions is provided from the Hmisc package: stat_sum_df <- function(fun, geom="crossbar", ...) { stat_summary(fun.data=fun, colour="red", geom=geom, width=0.2, ...) } # The crossbar geom needs grouping to be specified when used with # a continuous x axis. d + stat_sum_df("mean_cl_boot", mapping = aes(group = cyl)) d + stat_sum_df("mean_sdl", mapping = aes(group = cyl)) d + stat_sum_df("mean_sdl", mult = 1, mapping = aes(group = cyl)) d + stat_sum_df("median_hilow", mapping = aes(group = cyl)) # There are lots of different geoms you can use to display the summaries d + stat_sum_df("mean_cl_normal", mapping = aes(group = cyl)) d + stat_sum_df("mean_cl_normal", geom = "errorbar") d + stat_sum_df("mean_cl_normal", geom = "pointrange") d + stat_sum_df("mean_cl_normal", geom = "smooth") # Summaries are more useful with a bigger data set: mpg2 <- subset(mpg, cyl != 5L) m <- ggplot(mpg2, aes(x=cyl, y=hwy)) + geom_point() + stat_summary(fun.data = "mean_sdl", geom = "linerange", colour = "red", size = 2, mult = 1) + xlab("cyl") m # An example with highly skewed distributions: set.seed(596) mov <- movies[sample(nrow(movies), 1000), ] m2 <- ggplot(mov, aes(x= factor(round(rating)), y=votes)) + geom_point() m2 <- m2 + stat_summary(fun.data = "mean_cl_boot", geom = "crossbar", colour = "red", width = 0.3) + xlab("rating") m2 # Notice how the overplotting skews off visual perception of the mean # supplementing the raw data with summary statistics is _very_ important # Next, we'll look at votes on a log scale. # Transforming the scale means the data are transformed # first, after which statistics are computed: m2 + scale_y_log10() # Transforming the coordinate system occurs after the # statistic has been computed. This means we're calculating the summary on the raw data # and stretching the geoms onto the log scale. Compare the widths of the # standard errors. m2 + coord_trans(y="log10") } } \seealso{ \code{\link{geom_errorbar}}, \code{\link{geom_pointrange}}, \code{\link{geom_linerange}}, \code{\link{geom_crossbar}} for geoms to display summarised data }
/man/stat_summary.Rd
no_license
cattapre/ggplot2.SparkR
R
false
true
5,290
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/stat-summary.r \name{stat_summary} \alias{stat_summary} \title{Summarise y values at every unique x.} \usage{ stat_summary(mapping = NULL, data = NULL, geom = "pointrange", position = "identity", ...) } \arguments{ \item{mapping}{The aesthetic mapping, usually constructed with \code{\link{aes}} or \code{\link{aes_string}}. Only needs to be set at the layer level if you are overriding the plot defaults.} \item{data}{A layer specific dataset - only needed if you want to override the plot defaults.} \item{geom}{The geometric object to use display the data} \item{position}{The position adjustment to use for overlappling points on this layer} \item{...}{other arguments passed on to \code{\link{layer}}. This can include aesthetics whose values you want to set, not map. See \code{\link{layer}} for more details.} } \value{ a data.frame with additional columns: \item{fun.data}{Complete summary function. Should take numeric vector as input and return data frame as output} \item{fun.ymin}{ymin summary function (should take numeric vector and return single number)} \item{fun.y}{y summary function (should take numeric vector and return single number)} \item{fun.ymax}{ymax summary function (should take numeric vector and return single number)} } \description{ \code{stat_summary} allows for tremendous flexibilty in the specification of summary functions. The summary function can either supply individual summary functions for each of y, ymin and ymax (with \code{fun.y}, \code{fun.ymax}, \code{fun.ymin}), or return a data frame containing any number of aesthetiics with with \code{fun.data}. All summary functions are called with a single vector of values, \code{x}. } \details{ A simple vector function is easiest to work with as you can return a single number, but is somewhat less flexible. If your summary function operates on a data.frame it should return a data frame with variables that the geom can use. } \section{Aesthetics}{ \Sexpr[results=rd,stage=build]{ggplot2.SparkR:::rd_aesthetics("stat", "summary")} } \examples{ \donttest{ # Basic operation on a small dataset d <- ggplot(mtcars, aes(cyl, mpg)) + geom_point() d + stat_summary(fun.data = "mean_cl_boot", colour = "red") p <- ggplot(mtcars, aes(cyl, mpg)) + stat_summary(fun.y = "mean", geom = "point") p # Don't use ylim to zoom into a summary plot - this throws the # data away p + ylim(15, 30) # Instead use coord_cartesian p + coord_cartesian(ylim = c(15, 30)) # You can supply individual functions to summarise the value at # each x: stat_sum_single <- function(fun, geom="point", ...) { stat_summary(fun.y=fun, colour="red", geom=geom, size = 3, ...) } d + stat_sum_single(mean) d + stat_sum_single(mean, geom="line") d + stat_sum_single(median) d + stat_sum_single(sd) d + stat_summary(fun.y = mean, fun.ymin = min, fun.ymax = max, colour = "red") d + aes(colour = factor(vs)) + stat_summary(fun.y = mean, geom="line") # Alternatively, you can supply a function that operates on a data.frame. # A set of useful summary functions is provided from the Hmisc package: stat_sum_df <- function(fun, geom="crossbar", ...) { stat_summary(fun.data=fun, colour="red", geom=geom, width=0.2, ...) } # The crossbar geom needs grouping to be specified when used with # a continuous x axis. d + stat_sum_df("mean_cl_boot", mapping = aes(group = cyl)) d + stat_sum_df("mean_sdl", mapping = aes(group = cyl)) d + stat_sum_df("mean_sdl", mult = 1, mapping = aes(group = cyl)) d + stat_sum_df("median_hilow", mapping = aes(group = cyl)) # There are lots of different geoms you can use to display the summaries d + stat_sum_df("mean_cl_normal", mapping = aes(group = cyl)) d + stat_sum_df("mean_cl_normal", geom = "errorbar") d + stat_sum_df("mean_cl_normal", geom = "pointrange") d + stat_sum_df("mean_cl_normal", geom = "smooth") # Summaries are more useful with a bigger data set: mpg2 <- subset(mpg, cyl != 5L) m <- ggplot(mpg2, aes(x=cyl, y=hwy)) + geom_point() + stat_summary(fun.data = "mean_sdl", geom = "linerange", colour = "red", size = 2, mult = 1) + xlab("cyl") m # An example with highly skewed distributions: set.seed(596) mov <- movies[sample(nrow(movies), 1000), ] m2 <- ggplot(mov, aes(x= factor(round(rating)), y=votes)) + geom_point() m2 <- m2 + stat_summary(fun.data = "mean_cl_boot", geom = "crossbar", colour = "red", width = 0.3) + xlab("rating") m2 # Notice how the overplotting skews off visual perception of the mean # supplementing the raw data with summary statistics is _very_ important # Next, we'll look at votes on a log scale. # Transforming the scale means the data are transformed # first, after which statistics are computed: m2 + scale_y_log10() # Transforming the coordinate system occurs after the # statistic has been computed. This means we're calculating the summary on the raw data # and stretching the geoms onto the log scale. Compare the widths of the # standard errors. m2 + coord_trans(y="log10") } } \seealso{ \code{\link{geom_errorbar}}, \code{\link{geom_pointrange}}, \code{\link{geom_linerange}}, \code{\link{geom_crossbar}} for geoms to display summarised data }
library(variancePartition) library(BiocParallel) setwd("~/Documents/Batcave/GEO/ccdata/data-raw/") pdata <- readRDS('cmap_es/pdata.rds') all_exprs <- readRDS('cmap_es/all_exprs.rds') # mixed effect model --- pdata$platform <- factor(platform) pdata$cell <- factor(pdata$cell) pdata$batch <- factor(pdata$batch) pdata$treatment <- paste(pdata$drug, paste0(pdata$molar, 'M'), paste0(pdata$hours, 'h'), sep='_') # variance partition (treat all as random effects) --- form <- ~(1|treatment) + (1|cell) + (1|batch) + (1|platform) param <- SerialParam() varPart <- fitExtractVarPartModel(all_exprs, form, pdata, BPPARAM = param) saveRDS(varPart, 'cmap_es/varPart.rds') plotVarPart(sortCols(varPart)) # remove effect of batch and platform then redo varPart <- readRDS('cmap_es/varPart.rds') param <- SerialParam() residList <- fitVarPartModel(all_exprs, ~ (1|batch) + (1|platform), pdata, BPPARAM = param, fxn=residuals) residMatrix <- do.call(rbind, residList) # fit model on residuals form <- ~ (1|treatment) + (1|cell) varPartResid <- fitExtractVarPartModel(residMatrix, form, pdata, BPPARAM = param) saveRDS(varPartResid, 'cmap_es/varPartResid.rds') plotVarPart(sortCols(varPartResid)) # differential expression analysis (treatment has to be fixed effect) ---- param <- SerialParam() form <- ~ 0 + treatment + (1|cell) + (1|batch) + (1|platform) # exclude treatments with colinearity issues (see below) keep <- row.names(pdata)[!pdata$treatment %in% maxs] pdata <- pdata[keep, ] all_exprs <- all_exprs[, keep] # univariate contrasts (faster to supply) Linit <- variancePartition:::.getAllUniContrasts(all_exprs, form, pdata, return.Linit = TRUE) # interested contrasts levels <- unique(pdata$treatment) cons <- paste0('treatment', levels[!grepl('^ctl_', levels)]) L <- lapply(cons, function(con) { ctrl <- ifelse(grepl('_6h$', con), 'treatmentctl_0M_6h', 'treatmentctl_0M_12h') getContrast(all_exprs, form, pdata, c(con, ctrl), L = Linit) }) L <- do.call(cbind, L) colnames(L) <- paste0('L', seq_len(ncol(L))) # initial fit used to speed up subsequent system.time(fitInit <- dream(all_exprs, form, pdata, L = L, Linit = Linit, return.fitInit = TRUE, BPPARAM=param)) variancePartition:::checkModelStatus(fitInit, showWarnings=TRUE, dream=TRUE, colinearityCutoff=0.999) # user system elapsed # 252.463 0.390 230 # levels of treatment with very high correlation to ctrl/each other cause colinearity issues # re-run above excluding maxs vcor <- colinearityScore(fitInit) vcor <- attributes(vcor)[[1]] diag(vcor) <- 0 maxs <- apply(vcor, 2, max) maxs <- names(maxs)[maxs > 0.999] maxs <- setdiff(maxs, 'treatmentctl_0M_6h') maxs <- gsub('^treatment', '', maxs) Lret <- variancePartition:::format.contrasts(all_exprs, form, pdata, L, Linit = Linit) # save arguments to run as parts # see 2-run_dream.R and 2-process_dream.R # ran on O2 dream_args <- list(form = form, pdata = pdata, L = L, Linit = Linit, fitInit = fitInit, Lret = Lret) dir.create('cmap_es/dream') dir.create('cmap_es/dream/resLists') saveRDS(dream_args, 'cmap_es/dream/dream_args.rds') saveRDS(all_exprs, 'cmap_es/all_exprs.rds') # save expression values for each gene as seperate matrix rpath <- '/n/scratch2/ap491/ccdata/data-raw/cmap_es/dream/resLists' for (i in seq_len(nrow(all_exprs))) { cat('Working on', i, 'of', nrow(all_exprs), '\n') exprs_fpath <- file.path(rpath, paste(i, 'exprs.rds', sep = '_')) saveRDS(all_exprs[i,,drop=FALSE], exprs_fpath) }
/data-raw/cmap_es/2b-setup_dream.R
no_license
alexvpickering/ccdata
R
false
false
3,449
r
library(variancePartition) library(BiocParallel) setwd("~/Documents/Batcave/GEO/ccdata/data-raw/") pdata <- readRDS('cmap_es/pdata.rds') all_exprs <- readRDS('cmap_es/all_exprs.rds') # mixed effect model --- pdata$platform <- factor(platform) pdata$cell <- factor(pdata$cell) pdata$batch <- factor(pdata$batch) pdata$treatment <- paste(pdata$drug, paste0(pdata$molar, 'M'), paste0(pdata$hours, 'h'), sep='_') # variance partition (treat all as random effects) --- form <- ~(1|treatment) + (1|cell) + (1|batch) + (1|platform) param <- SerialParam() varPart <- fitExtractVarPartModel(all_exprs, form, pdata, BPPARAM = param) saveRDS(varPart, 'cmap_es/varPart.rds') plotVarPart(sortCols(varPart)) # remove effect of batch and platform then redo varPart <- readRDS('cmap_es/varPart.rds') param <- SerialParam() residList <- fitVarPartModel(all_exprs, ~ (1|batch) + (1|platform), pdata, BPPARAM = param, fxn=residuals) residMatrix <- do.call(rbind, residList) # fit model on residuals form <- ~ (1|treatment) + (1|cell) varPartResid <- fitExtractVarPartModel(residMatrix, form, pdata, BPPARAM = param) saveRDS(varPartResid, 'cmap_es/varPartResid.rds') plotVarPart(sortCols(varPartResid)) # differential expression analysis (treatment has to be fixed effect) ---- param <- SerialParam() form <- ~ 0 + treatment + (1|cell) + (1|batch) + (1|platform) # exclude treatments with colinearity issues (see below) keep <- row.names(pdata)[!pdata$treatment %in% maxs] pdata <- pdata[keep, ] all_exprs <- all_exprs[, keep] # univariate contrasts (faster to supply) Linit <- variancePartition:::.getAllUniContrasts(all_exprs, form, pdata, return.Linit = TRUE) # interested contrasts levels <- unique(pdata$treatment) cons <- paste0('treatment', levels[!grepl('^ctl_', levels)]) L <- lapply(cons, function(con) { ctrl <- ifelse(grepl('_6h$', con), 'treatmentctl_0M_6h', 'treatmentctl_0M_12h') getContrast(all_exprs, form, pdata, c(con, ctrl), L = Linit) }) L <- do.call(cbind, L) colnames(L) <- paste0('L', seq_len(ncol(L))) # initial fit used to speed up subsequent system.time(fitInit <- dream(all_exprs, form, pdata, L = L, Linit = Linit, return.fitInit = TRUE, BPPARAM=param)) variancePartition:::checkModelStatus(fitInit, showWarnings=TRUE, dream=TRUE, colinearityCutoff=0.999) # user system elapsed # 252.463 0.390 230 # levels of treatment with very high correlation to ctrl/each other cause colinearity issues # re-run above excluding maxs vcor <- colinearityScore(fitInit) vcor <- attributes(vcor)[[1]] diag(vcor) <- 0 maxs <- apply(vcor, 2, max) maxs <- names(maxs)[maxs > 0.999] maxs <- setdiff(maxs, 'treatmentctl_0M_6h') maxs <- gsub('^treatment', '', maxs) Lret <- variancePartition:::format.contrasts(all_exprs, form, pdata, L, Linit = Linit) # save arguments to run as parts # see 2-run_dream.R and 2-process_dream.R # ran on O2 dream_args <- list(form = form, pdata = pdata, L = L, Linit = Linit, fitInit = fitInit, Lret = Lret) dir.create('cmap_es/dream') dir.create('cmap_es/dream/resLists') saveRDS(dream_args, 'cmap_es/dream/dream_args.rds') saveRDS(all_exprs, 'cmap_es/all_exprs.rds') # save expression values for each gene as seperate matrix rpath <- '/n/scratch2/ap491/ccdata/data-raw/cmap_es/dream/resLists' for (i in seq_len(nrow(all_exprs))) { cat('Working on', i, 'of', nrow(all_exprs), '\n') exprs_fpath <- file.path(rpath, paste(i, 'exprs.rds', sep = '_')) saveRDS(all_exprs[i,,drop=FALSE], exprs_fpath) }
.assign.synergy <- function(all_data, nest_data = F, return_wide = F) { result_types <- all_data$typeResult %>% unique() result_types %>% walk(function(result_type){ table_name <- glue::glue("dataSynergy{str_to_title(result_type)}") %>% as.character() df_result <- all_data %>% filter(typeResult == result_type) %>% select(-typeResult) %>% select(slugSeason, everything()) %>% arrange(slugSeason) %>% unnest() %>% distinct() if (result_type == "player") { df_result <- df_result %>% filter(!is.na(idPlayer)) } if (return_wide) { df_long <- df_result %>% gather_data( variable_name = 'item', numeric_ids = c("^id", "gp", "numberJersey"), use_logical_keys = TRUE, use_factor_keys = TRUE, unite_columns = NULL, separate_columns = NULL, use_date_keys = FALSE ) df_result <- df_long %>% unite(item, item, categorySynergy, sep = "") %>% return_wide() } if (nest_data) { df_result <- df_result %>% nest(-c(slugSeason)) } assign(table_name, df_result, envir = .GlobalEnv) }) } .number_to_pct <- function(data) { pct_cols <- data %>% select(dplyr::matches("pct[A-Z]")) %>% names() data %>% mutate_at(pct_cols, funs(. %>% as.numeric() / 100)) } .dictionary_synergy_categories <- memoise::memoise(function() { data_frame( nameSynergy = c( "Transition", "Isolation", "PRBallHandler", "PRRollman", "Postup", "Spotup", "Handoff", "Cut", "OffScreen", "OffRebound", "Misc" ), nameTable = c( "Transition", "Isolation", "Pick and Roll Ball Handler", "Pick and Roll Rollman", "Post Up", "Spot Up", "Handoff", "Cut", "OffScreen", "Off Rebound", "Misc" ) ) }) .get_synergy_category_data <- function(season = 2018, result_type = "player", season_type = "Regular Season", category = "transition", set_type = "offensive", results = 500, return_message = T ) { if (season < 2016) { stop("Synergy data starts for the 2015-16 season") } categories <- c( "Transition", "Isolation", "PRBallHandler", "PRRollman", "Postup", "Spotup", "Handoff", "Cut", "OffScreen", "OffRebound", "Misc" ) cat_slug <- category %>% str_to_lower() wrong_cat <- cat_slug %>% str_detect(str_to_lower(categories)) %>% sum(na.rm = T) == 0 if (wrong_cat) { stop(glue::glue( "Synergy categories can only be:\n{str_c(categories, collapse = '\n')}" )) } slug_season_type <- case_when(season_type %>% str_to_lower() %>% str_detect("regular") ~ "REG", TRUE ~ "Post") slug_season <- generate_season_slug(season = season) season_synergy <- season - 1 json_url <- glue::glue( "https://stats-prod.nba.com/wp-json/statscms/v1/synergy/{result_type}/?category={category}&season={season_synergy}&seasonType={slug_season_type}&names={set_type}&limit={results}" ) %>% as.character() if (return_message) { glue::glue( "Acquiring {result_type} synergy data for {str_to_lower(set_type)} {str_to_lower(category)} in the {str_to_lower(season_type)} during the {slug_season}" ) %>% cat(fill = T) } json <- json_url %>% curl_json_to_vector() json_names <- json$results %>% names() actual_names <- json_names %>% resolve_nba_names() data <- json$results %>% as_data_frame() %>% purrr::set_names(actual_names) %>% mutate( categorySynergy = category, typeResult = result_type, slugSeason = slug_season) %>% select(-one_of("yearSeason")) %>% dplyr::select( typeResult, typeSet, categorySynergy, slugSeason, typeSeason, everything() ) %>% mutate_at("idTeam", funs(. %>% as.numeric())) if (result_type %>% str_to_lower() == "player") { data <- data %>% unite(namePlayer, nameFirst, nameLast, sep = " ") %>% mutate(idPlayer = idPlayer %>% as.numeric()) } if (data %>% has_name("tov")){ data <- data %>% dplyr::rename(pctTOV = tov) } num_cols <- data %>% select(-dplyr::matches(char_words())) %>% names() data <- data %>% mutate_at(num_cols, funs(. %>% as.character() %>% readr::parse_number())) ppp_names <- data %>% dplyr::select(dplyr::matches("PPP")) %>% names() if (ppp_names %>% length() >0) { data <- data %>% mutate_at(ppp_names, funs(. %>% as.numeric())) } data <- data %>% .number_to_pct() data %>% nest(-c(slugSeason, typeResult, categorySynergy, typeSet), .key = dataSynergy) } #' Get Synergy data for specified season #' #' Get Synergy data for specified result type, #' category, season type and set #' #' @param seasons vector of seasons from 2016 onward #' @param result_types result type \itemize{ #' \item team #' \item player #' } #' @param categories vector of synergy categories options include: \itemize{ #' \item Transition #' \item Isolation #' \item PRBallHandler #' \item PRRollman #' \item Postup #' \item Spotup #' \item Handoff #' \item Cut #' \item OffScreen #' \item OffRebound #' \item Misc #' } #' @param season_types type of season play \itemize{ #' \item Playoffs #' \item Regular Season #' } #' @param set_types set type \itemize{ #' \item offensive #' \item defensive #' } #' @param results number of results #' @param assign_to_environment if \code{TRUE} assigns table to environment #' @param return_wide if \code{return_wide} returns a spread \code{data_frame} #' @param return_message #' #' @return a \code{data_frame} #' @export #' @import dplyr stringr magrittr curl jsonlite readr magrittr purrr tidyr rlang #' @importFrom glue glue #' @examples #' synergy(seasons = 2019, result_types = c("player", "team"), season_types = c("Regular Season"), set_types = c("offensive", "defensive"), categories = c("Transition", "Isolation", "PRBallHandler", "PRRollman", "Postup", "Spotup", "Handoff", "Cut", "OffScreen", "OffRebound", "Misc"), results = 500, assign_to_environment = TRUE, return_wide = F, return_message = TRUE) synergy <- function(seasons = 2016:2018, result_types = c("player", "team"), season_types = c("Regular Season"), set_types = c("offensive", "defensive"), categories = c("Transition", "Isolation", "PRBallHandler", "PRRollman", "Postup", "Spotup", "Handoff", "Cut", "OffScreen", "OffRebound", "Misc" ), results = 500, assign_to_environment = TRUE, return_wide = F, nest_data = F, return_message = TRUE) { if (seasons %>% purrr::is_null()) { stop("please enter season") } if (types %>% str_to_lower() %in% c("player", "team") %>% sum(na.rm = T) == 0) { stop("Result type can only be player and/or team") } input_df <- expand.grid( season = seasons, result_type = result_types, season_type = season_types, set_type = set_types, category = categories, stringsAsFactors = F ) %>% as_data_frame() %>% distinct() .get_synergy_category_data_safe <- purrr::possibly(.get_synergy_category_data, data_frame()) all_data <- 1:nrow(input_df) %>% future_map_dfr(function(x) { df_row <- input_df %>% slice(x) df_row %$% .get_synergy_category_data_safe( season = season, result_type = result_type, season_type = season_type, category = category, set_type = set_type, results = results, return_message = return_message ) }) %>% suppressWarnings() if (assign_to_environment) { all_data %>% .assign.synergy(nest_data = nest_data, return_wide = return_wide) } all_data }
/R/synergy.R
no_license
franckess/nbastatR
R
false
false
8,668
r
.assign.synergy <- function(all_data, nest_data = F, return_wide = F) { result_types <- all_data$typeResult %>% unique() result_types %>% walk(function(result_type){ table_name <- glue::glue("dataSynergy{str_to_title(result_type)}") %>% as.character() df_result <- all_data %>% filter(typeResult == result_type) %>% select(-typeResult) %>% select(slugSeason, everything()) %>% arrange(slugSeason) %>% unnest() %>% distinct() if (result_type == "player") { df_result <- df_result %>% filter(!is.na(idPlayer)) } if (return_wide) { df_long <- df_result %>% gather_data( variable_name = 'item', numeric_ids = c("^id", "gp", "numberJersey"), use_logical_keys = TRUE, use_factor_keys = TRUE, unite_columns = NULL, separate_columns = NULL, use_date_keys = FALSE ) df_result <- df_long %>% unite(item, item, categorySynergy, sep = "") %>% return_wide() } if (nest_data) { df_result <- df_result %>% nest(-c(slugSeason)) } assign(table_name, df_result, envir = .GlobalEnv) }) } .number_to_pct <- function(data) { pct_cols <- data %>% select(dplyr::matches("pct[A-Z]")) %>% names() data %>% mutate_at(pct_cols, funs(. %>% as.numeric() / 100)) } .dictionary_synergy_categories <- memoise::memoise(function() { data_frame( nameSynergy = c( "Transition", "Isolation", "PRBallHandler", "PRRollman", "Postup", "Spotup", "Handoff", "Cut", "OffScreen", "OffRebound", "Misc" ), nameTable = c( "Transition", "Isolation", "Pick and Roll Ball Handler", "Pick and Roll Rollman", "Post Up", "Spot Up", "Handoff", "Cut", "OffScreen", "Off Rebound", "Misc" ) ) }) .get_synergy_category_data <- function(season = 2018, result_type = "player", season_type = "Regular Season", category = "transition", set_type = "offensive", results = 500, return_message = T ) { if (season < 2016) { stop("Synergy data starts for the 2015-16 season") } categories <- c( "Transition", "Isolation", "PRBallHandler", "PRRollman", "Postup", "Spotup", "Handoff", "Cut", "OffScreen", "OffRebound", "Misc" ) cat_slug <- category %>% str_to_lower() wrong_cat <- cat_slug %>% str_detect(str_to_lower(categories)) %>% sum(na.rm = T) == 0 if (wrong_cat) { stop(glue::glue( "Synergy categories can only be:\n{str_c(categories, collapse = '\n')}" )) } slug_season_type <- case_when(season_type %>% str_to_lower() %>% str_detect("regular") ~ "REG", TRUE ~ "Post") slug_season <- generate_season_slug(season = season) season_synergy <- season - 1 json_url <- glue::glue( "https://stats-prod.nba.com/wp-json/statscms/v1/synergy/{result_type}/?category={category}&season={season_synergy}&seasonType={slug_season_type}&names={set_type}&limit={results}" ) %>% as.character() if (return_message) { glue::glue( "Acquiring {result_type} synergy data for {str_to_lower(set_type)} {str_to_lower(category)} in the {str_to_lower(season_type)} during the {slug_season}" ) %>% cat(fill = T) } json <- json_url %>% curl_json_to_vector() json_names <- json$results %>% names() actual_names <- json_names %>% resolve_nba_names() data <- json$results %>% as_data_frame() %>% purrr::set_names(actual_names) %>% mutate( categorySynergy = category, typeResult = result_type, slugSeason = slug_season) %>% select(-one_of("yearSeason")) %>% dplyr::select( typeResult, typeSet, categorySynergy, slugSeason, typeSeason, everything() ) %>% mutate_at("idTeam", funs(. %>% as.numeric())) if (result_type %>% str_to_lower() == "player") { data <- data %>% unite(namePlayer, nameFirst, nameLast, sep = " ") %>% mutate(idPlayer = idPlayer %>% as.numeric()) } if (data %>% has_name("tov")){ data <- data %>% dplyr::rename(pctTOV = tov) } num_cols <- data %>% select(-dplyr::matches(char_words())) %>% names() data <- data %>% mutate_at(num_cols, funs(. %>% as.character() %>% readr::parse_number())) ppp_names <- data %>% dplyr::select(dplyr::matches("PPP")) %>% names() if (ppp_names %>% length() >0) { data <- data %>% mutate_at(ppp_names, funs(. %>% as.numeric())) } data <- data %>% .number_to_pct() data %>% nest(-c(slugSeason, typeResult, categorySynergy, typeSet), .key = dataSynergy) } #' Get Synergy data for specified season #' #' Get Synergy data for specified result type, #' category, season type and set #' #' @param seasons vector of seasons from 2016 onward #' @param result_types result type \itemize{ #' \item team #' \item player #' } #' @param categories vector of synergy categories options include: \itemize{ #' \item Transition #' \item Isolation #' \item PRBallHandler #' \item PRRollman #' \item Postup #' \item Spotup #' \item Handoff #' \item Cut #' \item OffScreen #' \item OffRebound #' \item Misc #' } #' @param season_types type of season play \itemize{ #' \item Playoffs #' \item Regular Season #' } #' @param set_types set type \itemize{ #' \item offensive #' \item defensive #' } #' @param results number of results #' @param assign_to_environment if \code{TRUE} assigns table to environment #' @param return_wide if \code{return_wide} returns a spread \code{data_frame} #' @param return_message #' #' @return a \code{data_frame} #' @export #' @import dplyr stringr magrittr curl jsonlite readr magrittr purrr tidyr rlang #' @importFrom glue glue #' @examples #' synergy(seasons = 2019, result_types = c("player", "team"), season_types = c("Regular Season"), set_types = c("offensive", "defensive"), categories = c("Transition", "Isolation", "PRBallHandler", "PRRollman", "Postup", "Spotup", "Handoff", "Cut", "OffScreen", "OffRebound", "Misc"), results = 500, assign_to_environment = TRUE, return_wide = F, return_message = TRUE) synergy <- function(seasons = 2016:2018, result_types = c("player", "team"), season_types = c("Regular Season"), set_types = c("offensive", "defensive"), categories = c("Transition", "Isolation", "PRBallHandler", "PRRollman", "Postup", "Spotup", "Handoff", "Cut", "OffScreen", "OffRebound", "Misc" ), results = 500, assign_to_environment = TRUE, return_wide = F, nest_data = F, return_message = TRUE) { if (seasons %>% purrr::is_null()) { stop("please enter season") } if (types %>% str_to_lower() %in% c("player", "team") %>% sum(na.rm = T) == 0) { stop("Result type can only be player and/or team") } input_df <- expand.grid( season = seasons, result_type = result_types, season_type = season_types, set_type = set_types, category = categories, stringsAsFactors = F ) %>% as_data_frame() %>% distinct() .get_synergy_category_data_safe <- purrr::possibly(.get_synergy_category_data, data_frame()) all_data <- 1:nrow(input_df) %>% future_map_dfr(function(x) { df_row <- input_df %>% slice(x) df_row %$% .get_synergy_category_data_safe( season = season, result_type = result_type, season_type = season_type, category = category, set_type = set_type, results = results, return_message = return_message ) }) %>% suppressWarnings() if (assign_to_environment) { all_data %>% .assign.synergy(nest_data = nest_data, return_wide = return_wide) } all_data }
rankhospital <- function(state, outcome, num = "best") { ## Read outcome data data<-read.csv("outcome-of-care-measures.csv", colClasses = "character",na.strings="Not Available") ## Check that state and outcome are valid validNames = c("heart attack","heart failure","pneumonia") if (!outcome %in% validNames) { stop("invalid outcome")} validState = unique(data[,7]) if (!state %in% validState) stop("invalid state") ## convert outcome name into column name fullColName <- c("Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack", "Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure", "Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia") colName <- fullColName[match(outcome,validNames)] ## Return hospital name in that state with the given rank 30-day death rate data.state <- data[data$State==state,] # order data by outcome sorted.data.state <- data.state[order(as.numeric(data.state[[colName]]),data.state[["Hospital.Name"]],decreasing=FALSE,na.last=NA), ] #handle num input if (num=="best") num = 1 if (num=='worst') num = nrow(sorted.data.state) #will automatically return NA if num > nrow, as well as if it's some other text value # if someone passes num < 1, they'll get what's expected #if (is.numeric(num) & num > nrwo(sorted.data.state) return(NA) sorted.data.state[num,"Hospital.Name"] ## 30-day death rate } rankhospital("MN", "heart attack", 5000)
/2. Programming R/4/rankhospital.R
no_license
gravialex/Coursera-Data-Scientist
R
false
false
1,539
r
rankhospital <- function(state, outcome, num = "best") { ## Read outcome data data<-read.csv("outcome-of-care-measures.csv", colClasses = "character",na.strings="Not Available") ## Check that state and outcome are valid validNames = c("heart attack","heart failure","pneumonia") if (!outcome %in% validNames) { stop("invalid outcome")} validState = unique(data[,7]) if (!state %in% validState) stop("invalid state") ## convert outcome name into column name fullColName <- c("Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack", "Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure", "Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia") colName <- fullColName[match(outcome,validNames)] ## Return hospital name in that state with the given rank 30-day death rate data.state <- data[data$State==state,] # order data by outcome sorted.data.state <- data.state[order(as.numeric(data.state[[colName]]),data.state[["Hospital.Name"]],decreasing=FALSE,na.last=NA), ] #handle num input if (num=="best") num = 1 if (num=='worst') num = nrow(sorted.data.state) #will automatically return NA if num > nrow, as well as if it's some other text value # if someone passes num < 1, they'll get what's expected #if (is.numeric(num) & num > nrwo(sorted.data.state) return(NA) sorted.data.state[num,"Hospital.Name"] ## 30-day death rate } rankhospital("MN", "heart attack", 5000)
#' Filter Corpus #' #' A function that removes all occurances from a given list of words from each (.txt) file in ipath. #' Generally used after some analysis of the summary_corpus results. To remove the most and least frequent #' terms. Handles each file in the ipath in parallel over the number of cores specified. Runs filter_file #' on each file in the ipath. #' #' @param words A character vector of all the words to remove. #' @param ipath A string specifying the path to all the text files to handle. #' @param ncores A number specifying the number of cores to use. #' #' @examples #' \dontrun{ #' filter_corpus(most_common_terms, "/path/to/corpus/", 10) #' filter_corpus(stopwords, "/path/to/corpus/", 10) #' } filter_corpus = function (words, ipath, ncores) { # check if ipath exists if (!dir.exists(ipath)) stop("no input directory") # check if there are text files in input directory filelist = list.files(path = ipath, pattern=".txt", full.names = TRUE) if (length(filelist) < 1 ) stop ("no (.txt) files in directory") # parlapply (clean_file) cluster = makeCluster(ncores) # list of results df processed = parLapply (cluster, filelist, filter_file, words) stopCluster(cluster) } #' Filter File #' #' A wrapper around the rcpp_filter function. #' Given a file and a list of words. Remove each occurance of those words from that file. #' Will modify the input file, so no output file is specified. #' #' @param words A character vector of all the words to remove. #' @param ifilepath A string specifying the path to the input file. #' @return ifilepath #' #' @examples #' \dontrun{ #' filter_file(most_common_terms, "/path/to/file.txt") #' filter_file(stopwords, "/path/to/file.txt") #' } filter_file = function (words, ifilepath) { res = rcpp_filter(ifilepath, words) return (res) }
/R/filter_corpus.R
no_license
avkoehl/textprocessingDSI
R
false
false
1,836
r
#' Filter Corpus #' #' A function that removes all occurances from a given list of words from each (.txt) file in ipath. #' Generally used after some analysis of the summary_corpus results. To remove the most and least frequent #' terms. Handles each file in the ipath in parallel over the number of cores specified. Runs filter_file #' on each file in the ipath. #' #' @param words A character vector of all the words to remove. #' @param ipath A string specifying the path to all the text files to handle. #' @param ncores A number specifying the number of cores to use. #' #' @examples #' \dontrun{ #' filter_corpus(most_common_terms, "/path/to/corpus/", 10) #' filter_corpus(stopwords, "/path/to/corpus/", 10) #' } filter_corpus = function (words, ipath, ncores) { # check if ipath exists if (!dir.exists(ipath)) stop("no input directory") # check if there are text files in input directory filelist = list.files(path = ipath, pattern=".txt", full.names = TRUE) if (length(filelist) < 1 ) stop ("no (.txt) files in directory") # parlapply (clean_file) cluster = makeCluster(ncores) # list of results df processed = parLapply (cluster, filelist, filter_file, words) stopCluster(cluster) } #' Filter File #' #' A wrapper around the rcpp_filter function. #' Given a file and a list of words. Remove each occurance of those words from that file. #' Will modify the input file, so no output file is specified. #' #' @param words A character vector of all the words to remove. #' @param ifilepath A string specifying the path to the input file. #' @return ifilepath #' #' @examples #' \dontrun{ #' filter_file(most_common_terms, "/path/to/file.txt") #' filter_file(stopwords, "/path/to/file.txt") #' } filter_file = function (words, ifilepath) { res = rcpp_filter(ifilepath, words) return (res) }
## Course 3 Project ## Step 1: Download the data files ## Get required packages and set the Directory install.packages("data.table") install.packages("reshape2") library(data.table) library(reshape2) setwd("C:/Giri/RProgramming/Courseera/Course3") path <- getwd() ## Download the required data for analysis url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip" f <- "Dataset.zip" if (!file.exists(path)) { dir.create(path) } download.file(url, file.path(path, f)) ## Unzip the file executable <- file.path("C:", "Program Files (x86)", "7-Zip", "7z.exe") parameters <- "x" cmd <- paste(paste0("\"", executable, "\""), parameters, paste0("\"", file.path(path, f), "\"")) system(cmd) ## review files unzipped pathIn <- file.path(path, "UCIHARDataset") list.files(pathIn, recursive = TRUE) ## Step 2: Load the Files in R for analysis dtSubTrain <- fread(file.path(pathIn, "train", "subject_train.txt")) names(dtSubjectTrain) dtSubTest <- fread(file.path(pathIn, "test", "subject_test.txt")) names(dtSubjectTest) dtActTrain <- fread(file.path(pathIn, "train", "Y_train.txt")) names(dtActTrain) dtActTest <- fread(file.path(pathIn, "test", "Y_test.txt")) fileToDataTable <- function(f) { df <- read.table(f) dt <- data.table(df) } dtTrain <- fileToDataTable(file.path(pathIn, "train", "X_train.txt")) dtTest <- fileToDataTable(file.path(pathIn, "test", "X_test.txt")) ## Step 3: Merging the data dtSub <- rbind(dtSubTrain, dtSubTest) setnames(dtSub, "V1", "subject") names(dtSub) dtAct <- rbind(dtActTrain, dtActTest) setnames(dtAct, "V1", "activityNum") dt <- rbind(dtTrain, dtTest) names(dt) dtSubAct <- cbind(dtSubject, dtActivity) dt <- cbind(dtSubAct, dt) setkey(dt, subject, activityNum) names(dt) ## Step 4: Extract Mean and Standard Deviation dtFeatures <- fread(file.path(pathIn, "features.txt")) setnames(dtFeatures, names(dtFeatures), c("featureNum", "featureName")) dtFeatures <- dtFeatures[grepl("mean\\(\\)|std\\(\\)", featureName)] dtFeatures$featureCode <- dtFeatures[, paste0("V", featureNum)] head(dtFeatures) dtFeatures$featureCode select <- c(key(dt), dtFeatures$featureCode) dt <- dt[, select, with = FALSE] ## Step 5: Use descriptive names dtActivityNames <- fread(file.path(pathIn, "activity_labels.txt")) setnames(dtActivityNames, names(dtActivityNames), c("activityNum", "activityName")) ## Step 6: Label with descriptive activity names dt <- merge(dt, dtActivityNames, by = "activityNum", all.x = TRUE) setkey(dt, subject, activityNum, activityName) dt <- data.table(melt(dt, key(dt), variable.name = "featureCode")) head(dt) dt <- merge(dt, dtFeatures[, list(featureNum, featureCode, featureName)], by = "featureCode", all.x = TRUE) head(dt) dt$activity <- factor(dt$activityName) dt$feature <- factor(dt$featureName) head(dt) grepthis <- function(regex) { grepl(regex, dt$feature) } ## Features with 2 categories n <- 2 y <- matrix(seq(1, n), nrow = n) x <- matrix(c(grepthis("^t"), grepthis("^f")), ncol = nrow(y)) dt$featDomain <- factor(x %*% y, labels = c("Time", "Freq")) x <- matrix(c(grepthis("Acc"), grepthis("Gyro")), ncol = nrow(y)) dt$featInstrument <- factor(x %*% y, labels = c("Accelerometer", "Gyroscope")) x <- matrix(c(grepthis("BodyAcc"), grepthis("GravityAcc")), ncol = nrow(y)) dt$featAcceleration <- factor(x %*% y, labels = c(NA, "Body", "Gravity")) x <- matrix(c(grepthis("mean()"), grepthis("std()")), ncol = nrow(y)) dt$featVariable <- factor(x %*% y, labels = c("Mean", "SD")) ## Features with 1 category dt$featJerk <- factor(grepthis("Jerk"), labels = c(NA, "Jerk")) dt$featMagnitude <- factor(grepthis("Mag"), labels = c(NA, "Magnitude")) ## Features with 3 categories n <- 3 y <- matrix(seq(1, n), nrow = n) x <- matrix(c(grepthis("-X"), grepthis("-Y"), grepthis("-Z")), ncol = nrow(y)) dt$featAxis <- factor(x %*% y, labels = c(NA, "X", "Y", "Z")) head(dt) r1 <- nrow(dt[, .N, by = c("feature")]) r2 <- nrow(dt[, .N, by = c("featDomain", "featAcceleration", "featInstrument", "featJerk", "featMagnitude", "featVariable", "featAxis")]) r1 == r2 ## Step 7: Create a Tidy dataset setkey(dt, subject, activity, featDomain, featAcceleration, featInstrument, featJerk, featMagnitude, featVariable, featAxis) dtTidy <- dt[, list(count = .N, average = mean(value)), by = key(dt)] head(dtTidy)
/run_analysis.R
no_license
Giridash/GetCleanData
R
false
false
4,549
r
## Course 3 Project ## Step 1: Download the data files ## Get required packages and set the Directory install.packages("data.table") install.packages("reshape2") library(data.table) library(reshape2) setwd("C:/Giri/RProgramming/Courseera/Course3") path <- getwd() ## Download the required data for analysis url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip" f <- "Dataset.zip" if (!file.exists(path)) { dir.create(path) } download.file(url, file.path(path, f)) ## Unzip the file executable <- file.path("C:", "Program Files (x86)", "7-Zip", "7z.exe") parameters <- "x" cmd <- paste(paste0("\"", executable, "\""), parameters, paste0("\"", file.path(path, f), "\"")) system(cmd) ## review files unzipped pathIn <- file.path(path, "UCIHARDataset") list.files(pathIn, recursive = TRUE) ## Step 2: Load the Files in R for analysis dtSubTrain <- fread(file.path(pathIn, "train", "subject_train.txt")) names(dtSubjectTrain) dtSubTest <- fread(file.path(pathIn, "test", "subject_test.txt")) names(dtSubjectTest) dtActTrain <- fread(file.path(pathIn, "train", "Y_train.txt")) names(dtActTrain) dtActTest <- fread(file.path(pathIn, "test", "Y_test.txt")) fileToDataTable <- function(f) { df <- read.table(f) dt <- data.table(df) } dtTrain <- fileToDataTable(file.path(pathIn, "train", "X_train.txt")) dtTest <- fileToDataTable(file.path(pathIn, "test", "X_test.txt")) ## Step 3: Merging the data dtSub <- rbind(dtSubTrain, dtSubTest) setnames(dtSub, "V1", "subject") names(dtSub) dtAct <- rbind(dtActTrain, dtActTest) setnames(dtAct, "V1", "activityNum") dt <- rbind(dtTrain, dtTest) names(dt) dtSubAct <- cbind(dtSubject, dtActivity) dt <- cbind(dtSubAct, dt) setkey(dt, subject, activityNum) names(dt) ## Step 4: Extract Mean and Standard Deviation dtFeatures <- fread(file.path(pathIn, "features.txt")) setnames(dtFeatures, names(dtFeatures), c("featureNum", "featureName")) dtFeatures <- dtFeatures[grepl("mean\\(\\)|std\\(\\)", featureName)] dtFeatures$featureCode <- dtFeatures[, paste0("V", featureNum)] head(dtFeatures) dtFeatures$featureCode select <- c(key(dt), dtFeatures$featureCode) dt <- dt[, select, with = FALSE] ## Step 5: Use descriptive names dtActivityNames <- fread(file.path(pathIn, "activity_labels.txt")) setnames(dtActivityNames, names(dtActivityNames), c("activityNum", "activityName")) ## Step 6: Label with descriptive activity names dt <- merge(dt, dtActivityNames, by = "activityNum", all.x = TRUE) setkey(dt, subject, activityNum, activityName) dt <- data.table(melt(dt, key(dt), variable.name = "featureCode")) head(dt) dt <- merge(dt, dtFeatures[, list(featureNum, featureCode, featureName)], by = "featureCode", all.x = TRUE) head(dt) dt$activity <- factor(dt$activityName) dt$feature <- factor(dt$featureName) head(dt) grepthis <- function(regex) { grepl(regex, dt$feature) } ## Features with 2 categories n <- 2 y <- matrix(seq(1, n), nrow = n) x <- matrix(c(grepthis("^t"), grepthis("^f")), ncol = nrow(y)) dt$featDomain <- factor(x %*% y, labels = c("Time", "Freq")) x <- matrix(c(grepthis("Acc"), grepthis("Gyro")), ncol = nrow(y)) dt$featInstrument <- factor(x %*% y, labels = c("Accelerometer", "Gyroscope")) x <- matrix(c(grepthis("BodyAcc"), grepthis("GravityAcc")), ncol = nrow(y)) dt$featAcceleration <- factor(x %*% y, labels = c(NA, "Body", "Gravity")) x <- matrix(c(grepthis("mean()"), grepthis("std()")), ncol = nrow(y)) dt$featVariable <- factor(x %*% y, labels = c("Mean", "SD")) ## Features with 1 category dt$featJerk <- factor(grepthis("Jerk"), labels = c(NA, "Jerk")) dt$featMagnitude <- factor(grepthis("Mag"), labels = c(NA, "Magnitude")) ## Features with 3 categories n <- 3 y <- matrix(seq(1, n), nrow = n) x <- matrix(c(grepthis("-X"), grepthis("-Y"), grepthis("-Z")), ncol = nrow(y)) dt$featAxis <- factor(x %*% y, labels = c(NA, "X", "Y", "Z")) head(dt) r1 <- nrow(dt[, .N, by = c("feature")]) r2 <- nrow(dt[, .N, by = c("featDomain", "featAcceleration", "featInstrument", "featJerk", "featMagnitude", "featVariable", "featAxis")]) r1 == r2 ## Step 7: Create a Tidy dataset setkey(dt, subject, activity, featDomain, featAcceleration, featInstrument, featJerk, featMagnitude, featVariable, featAxis) dtTidy <- dt[, list(count = .N, average = mean(value)), by = key(dt)] head(dtTidy)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/rm.R \name{step_rm} \alias{step_rm} \alias{tidy.step_rm} \title{General Variable Filter} \usage{ step_rm(recipe, ..., role = NA, trained = FALSE, removals = NULL, skip = FALSE, id = rand_id("rm")) \method{tidy}{step_rm}(x, ...) } \arguments{ \item{recipe}{A recipe object. The step will be added to the sequence of operations for this recipe.} \item{...}{One or more selector functions to choose which variables that will evaluated by the filtering bake. See \code{\link[=selections]{selections()}} for more details. For the \code{tidy} method, these are not currently used.} \item{role}{Not used by this step since no new variables are created.} \item{trained}{A logical to indicate if the quantities for preprocessing have been estimated.} \item{removals}{A character string that contains the names of columns that should be removed. These values are not determined until \code{\link[=prep.recipe]{prep.recipe()}} is called.} \item{skip}{A logical. Should the step be skipped when the recipe is baked by \code{\link[=bake.recipe]{bake.recipe()}}? While all operations are baked when \code{\link[=prep.recipe]{prep.recipe()}} is run, some operations may not be able to be conducted on new data (e.g. processing the outcome variable(s)). Care should be taken when using \code{skip = TRUE} as it may affect the computations for subsequent operations} \item{id}{A character string that is unique to this step to identify it.} \item{x}{A \code{step_rm} object.} } \value{ An updated version of \code{recipe} with the new step added to the sequence of existing steps (if any). For the \code{tidy} method, a tibble with columns \code{terms} which is the columns that will be removed. } \description{ \code{step_rm} creates a \emph{specification} of a recipe step that will remove variables based on their name, type, or role. } \examples{ data(biomass) biomass_tr <- biomass[biomass$dataset == "Training",] biomass_te <- biomass[biomass$dataset == "Testing",] rec <- recipe(HHV ~ carbon + hydrogen + oxygen + nitrogen + sulfur, data = biomass_tr) library(dplyr) smaller_set <- rec \%>\% step_rm(contains("gen")) smaller_set <- prep(smaller_set, training = biomass_tr) filtered_te <- bake(smaller_set, biomass_te) filtered_te tidy(smaller_set, number = 1) } \concept{preprocessing} \concept{variable_filters} \keyword{datagen}
/man/step_rm.Rd
no_license
ewv88/recipes
R
false
true
2,431
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/rm.R \name{step_rm} \alias{step_rm} \alias{tidy.step_rm} \title{General Variable Filter} \usage{ step_rm(recipe, ..., role = NA, trained = FALSE, removals = NULL, skip = FALSE, id = rand_id("rm")) \method{tidy}{step_rm}(x, ...) } \arguments{ \item{recipe}{A recipe object. The step will be added to the sequence of operations for this recipe.} \item{...}{One or more selector functions to choose which variables that will evaluated by the filtering bake. See \code{\link[=selections]{selections()}} for more details. For the \code{tidy} method, these are not currently used.} \item{role}{Not used by this step since no new variables are created.} \item{trained}{A logical to indicate if the quantities for preprocessing have been estimated.} \item{removals}{A character string that contains the names of columns that should be removed. These values are not determined until \code{\link[=prep.recipe]{prep.recipe()}} is called.} \item{skip}{A logical. Should the step be skipped when the recipe is baked by \code{\link[=bake.recipe]{bake.recipe()}}? While all operations are baked when \code{\link[=prep.recipe]{prep.recipe()}} is run, some operations may not be able to be conducted on new data (e.g. processing the outcome variable(s)). Care should be taken when using \code{skip = TRUE} as it may affect the computations for subsequent operations} \item{id}{A character string that is unique to this step to identify it.} \item{x}{A \code{step_rm} object.} } \value{ An updated version of \code{recipe} with the new step added to the sequence of existing steps (if any). For the \code{tidy} method, a tibble with columns \code{terms} which is the columns that will be removed. } \description{ \code{step_rm} creates a \emph{specification} of a recipe step that will remove variables based on their name, type, or role. } \examples{ data(biomass) biomass_tr <- biomass[biomass$dataset == "Training",] biomass_te <- biomass[biomass$dataset == "Testing",] rec <- recipe(HHV ~ carbon + hydrogen + oxygen + nitrogen + sulfur, data = biomass_tr) library(dplyr) smaller_set <- rec \%>\% step_rm(contains("gen")) smaller_set <- prep(smaller_set, training = biomass_tr) filtered_te <- bake(smaller_set, biomass_te) filtered_te tidy(smaller_set, number = 1) } \concept{preprocessing} \concept{variable_filters} \keyword{datagen}
#position of landmarks land_x <- c(1,2,2.5) land_y <- c(1.5,1.5,0.7) #situation in t-1: position, landmarks, and distance between pos and landmarks plot(0,0, main = "state t-1", col="red", type = "p", xlab="x", ylab="y", xlim = c(-0.5,3), ylim = c(-0.5,2), pch=19, cex = 2, cex.main =3) lines(c(0,1), c(0,1.5), col="red") lines(c(0,2), c(0,1.5), col="red") lines(c(0,2.5), c(0,0.7), col="red") points(land_x,land_y,col="blue", pch=19, cex=3) #prediction step, position changes, landmarks stay, no distances between pos and landmarks plot(c(0,1),c(0,0.5), main = "precition step", col="red", type = "p", xlab="x", ylab="y", xlim = c(-0.5,3), ylim = c(-0.5,2), pch=19, cex = 2, cex.main = 3) arrows(0,0,1,0.5, length=0.2, adj=1, col="black", lwd=3) points(land_x,land_y,col="blue", pch=19, cex=3) #update step: measure distances between new pos and landmarks, use for updates in algorithm plot(c(0,1),c(0,0.5), main = "update step", col="red", type = "p", xlab="x", ylab="y", xlim = c(-0.5,3), ylim = c(-0.5,2), pch=19, cex = 2, cex.main = 3) arrows(0,0,1,0.5, length=0.2, adj=1, col="black", lwd=3) lines(c(1,1), c(0.5,1.5), col="red") lines(c(1,2), c(0.5,1.5), col="red") lines(c(1,2.5), c(0.5,0.7), col="red") points(land_x,land_y,col="blue", pch=19, cex=3)
/data-cleaning/plot-second-milestone.R
permissive
YutingYao/DBPro-EKF-SLAM
R
false
false
1,473
r
#position of landmarks land_x <- c(1,2,2.5) land_y <- c(1.5,1.5,0.7) #situation in t-1: position, landmarks, and distance between pos and landmarks plot(0,0, main = "state t-1", col="red", type = "p", xlab="x", ylab="y", xlim = c(-0.5,3), ylim = c(-0.5,2), pch=19, cex = 2, cex.main =3) lines(c(0,1), c(0,1.5), col="red") lines(c(0,2), c(0,1.5), col="red") lines(c(0,2.5), c(0,0.7), col="red") points(land_x,land_y,col="blue", pch=19, cex=3) #prediction step, position changes, landmarks stay, no distances between pos and landmarks plot(c(0,1),c(0,0.5), main = "precition step", col="red", type = "p", xlab="x", ylab="y", xlim = c(-0.5,3), ylim = c(-0.5,2), pch=19, cex = 2, cex.main = 3) arrows(0,0,1,0.5, length=0.2, adj=1, col="black", lwd=3) points(land_x,land_y,col="blue", pch=19, cex=3) #update step: measure distances between new pos and landmarks, use for updates in algorithm plot(c(0,1),c(0,0.5), main = "update step", col="red", type = "p", xlab="x", ylab="y", xlim = c(-0.5,3), ylim = c(-0.5,2), pch=19, cex = 2, cex.main = 3) arrows(0,0,1,0.5, length=0.2, adj=1, col="black", lwd=3) lines(c(1,1), c(0.5,1.5), col="red") lines(c(1,2), c(0.5,1.5), col="red") lines(c(1,2.5), c(0.5,0.7), col="red") points(land_x,land_y,col="blue", pch=19, cex=3)
library(aroma.affymetrix) ### Name: SnpPlm ### Title: The SnpPlm interface class ### Aliases: SnpPlm ### Keywords: classes ### ** Examples for (zzz in 0) { # Setup verbose output verbose <- Arguments$getVerbose(-2) timestampOn(verbose) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # Define an example dataset using this path # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # Find any SNP dataset path <- NULL if (is.null(path)) break if (!exists("ds")) { ds <- AffymetrixCelSet$fromFiles(path) } print(ds) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # Create a set of various PLMs for this dataset # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if (!exists("models", mode="list")) { mergeStrands <- TRUE models <- list( rma = RmaSnpPlm(ds, mergeStrands=mergeStrands), mbei = MbeiSnpPlm(ds, mergeStrands=mergeStrands) # affine = AffineSnpPlm(ds, background=FALSE, mergeStrands=mergeStrands) ) } print(models) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # For each model, fit a few units # # Note, by fitting the same set of units across models, the internal # caching mechanisms of aroma.affymetrix makes sure that the data is # only read into memory once. See log for reading speed. # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - units <- 55+1:100 for (model in models) { ruler(verbose) fit(model, units=units, force=TRUE, verbose=verbose) } # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # For each unit, plot the estimated (thetaB,thetaA) for all models # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # Should we plot the on the log scale? log <- TRUE # Do only user to press ENTER if more than one unit is plotted opar <- par(ask=(length(units) > 1)) Alab <- expression(theta[A]) Blab <- expression(theta[B]) if (log) { lim <- c(6, 16) } else { lim <- c(0, 2^15) } # For each unit... for (unit in units) { # For all models... for (kk in seq_along(models)) { ces <- getChipEffects(models[[kk]]) ceUnit <- ces[unit] snpName <- names(ceUnit)[1] theta <- ceUnit[[1]] thetaA <- theta[[1]]$theta thetaB <- theta[[2]]$theta if (log) { thetaA <- log(thetaA, base=2) thetaB <- log(thetaB, base=2) } # Create the plot? if (kk == 1) { plot(NA, xlim=lim, ylim=lim, xlab=Blab, ylab=Alab, main=snpName) abline(a=0, b=1, lty=2) } # Plot the estimated parameters points(thetaB, thetaA, col=kk, pch=19) } } # for (unit ...) # Reset graphical parameter settings par(opar) } # for (zzz in 0) rm(zzz)
/data/genthat_extracted_code/aroma.affymetrix/examples/SnpPlm.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
2,707
r
library(aroma.affymetrix) ### Name: SnpPlm ### Title: The SnpPlm interface class ### Aliases: SnpPlm ### Keywords: classes ### ** Examples for (zzz in 0) { # Setup verbose output verbose <- Arguments$getVerbose(-2) timestampOn(verbose) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # Define an example dataset using this path # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # Find any SNP dataset path <- NULL if (is.null(path)) break if (!exists("ds")) { ds <- AffymetrixCelSet$fromFiles(path) } print(ds) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # Create a set of various PLMs for this dataset # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if (!exists("models", mode="list")) { mergeStrands <- TRUE models <- list( rma = RmaSnpPlm(ds, mergeStrands=mergeStrands), mbei = MbeiSnpPlm(ds, mergeStrands=mergeStrands) # affine = AffineSnpPlm(ds, background=FALSE, mergeStrands=mergeStrands) ) } print(models) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # For each model, fit a few units # # Note, by fitting the same set of units across models, the internal # caching mechanisms of aroma.affymetrix makes sure that the data is # only read into memory once. See log for reading speed. # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - units <- 55+1:100 for (model in models) { ruler(verbose) fit(model, units=units, force=TRUE, verbose=verbose) } # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # For each unit, plot the estimated (thetaB,thetaA) for all models # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # Should we plot the on the log scale? log <- TRUE # Do only user to press ENTER if more than one unit is plotted opar <- par(ask=(length(units) > 1)) Alab <- expression(theta[A]) Blab <- expression(theta[B]) if (log) { lim <- c(6, 16) } else { lim <- c(0, 2^15) } # For each unit... for (unit in units) { # For all models... for (kk in seq_along(models)) { ces <- getChipEffects(models[[kk]]) ceUnit <- ces[unit] snpName <- names(ceUnit)[1] theta <- ceUnit[[1]] thetaA <- theta[[1]]$theta thetaB <- theta[[2]]$theta if (log) { thetaA <- log(thetaA, base=2) thetaB <- log(thetaB, base=2) } # Create the plot? if (kk == 1) { plot(NA, xlim=lim, ylim=lim, xlab=Blab, ylab=Alab, main=snpName) abline(a=0, b=1, lty=2) } # Plot the estimated parameters points(thetaB, thetaA, col=kk, pch=19) } } # for (unit ...) # Reset graphical parameter settings par(opar) } # for (zzz in 0) rm(zzz)
## First get data. Not using nrow and skip as it messes with dataframe column names df1<-read.table("household_power_consumption.txt",header=T,sep=";",na.strings="?") df1$Date<-strptime(paste(df1$Date,df1$Time),"%d/%m/%Y %H:%M:%S") df<-df1[df1$Date<strptime("2007-02-03","%Y-%m-%d"),-2] rm(df1) ## Remove memory intensive df1 df<-df[df$Date>=strptime("2007-02-01","%Y-%m-%d"),] ## Now start the plotting of the graphs par(mfrow<-c(2,2)) ## Make 2X2 graphs on screen ## Chart 1: Line Graph between Global Active Power vs Day plot(df$Date,df$Global_active_power,type="l",xlab="",ylab="Global Active Power") ## Chart 2: Line Graph between Voltage vs Day plot(df$Date,df$Voltage,type="l",xlab="datetime",ylab="Voltage") ## Chart 3: Energy Sub Metering graph vs Day plot(df$Date,df$Sub_metering_1,type="n",xlab="",ylab="Energy sub metering") lines(df$Date,df$Sub_metering_1,col="black") lines(df$Date,df$Sub_metering_2,col="red") lines(df$Date,df$Sub_metering_3,col="blue") legend("topright",lty=1,cex=0.8,bty="n",y.intersp=0.2,xjust=1,col=c("black","red","blue"),legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3")) ## Chart 4: Line Graph between Global Reactive Power vs Day plot(df$Date,df$Global_reactive_power,type="l",xlab="datetime",ylab="Global_reactive_power") dev.copy(png,file="plot4.png") dev.off()
/plot4.R
no_license
sharath333/ExData_Plotting1
R
false
false
1,314
r
## First get data. Not using nrow and skip as it messes with dataframe column names df1<-read.table("household_power_consumption.txt",header=T,sep=";",na.strings="?") df1$Date<-strptime(paste(df1$Date,df1$Time),"%d/%m/%Y %H:%M:%S") df<-df1[df1$Date<strptime("2007-02-03","%Y-%m-%d"),-2] rm(df1) ## Remove memory intensive df1 df<-df[df$Date>=strptime("2007-02-01","%Y-%m-%d"),] ## Now start the plotting of the graphs par(mfrow<-c(2,2)) ## Make 2X2 graphs on screen ## Chart 1: Line Graph between Global Active Power vs Day plot(df$Date,df$Global_active_power,type="l",xlab="",ylab="Global Active Power") ## Chart 2: Line Graph between Voltage vs Day plot(df$Date,df$Voltage,type="l",xlab="datetime",ylab="Voltage") ## Chart 3: Energy Sub Metering graph vs Day plot(df$Date,df$Sub_metering_1,type="n",xlab="",ylab="Energy sub metering") lines(df$Date,df$Sub_metering_1,col="black") lines(df$Date,df$Sub_metering_2,col="red") lines(df$Date,df$Sub_metering_3,col="blue") legend("topright",lty=1,cex=0.8,bty="n",y.intersp=0.2,xjust=1,col=c("black","red","blue"),legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3")) ## Chart 4: Line Graph between Global Reactive Power vs Day plot(df$Date,df$Global_reactive_power,type="l",xlab="datetime",ylab="Global_reactive_power") dev.copy(png,file="plot4.png") dev.off()
# Exercise 1: writing and executing functions # Write a function `AddThree` that adds 3 to an input value AddThree <- function(num){ return(num + 3) } # Create a variable `ten` by passing 7 to your `AddThree` function ten <- AddThree(7) # Write a function `FeetToMeters` that converts from feet to meters FeetToMeters <- function(feet){ return(feet * 0.3048) } # Create a variable `height.in.feet` that is your height in feet height.in.feet <- 5.5 # Create a variable `height.in.meters` by passing `height.in.feet` to your `FeetToMeters` function height.in.meters <- FeetToMeters(height.in.feet)
/exercise-1/exercise.R
permissive
KyleAvalani/ch6-functions
R
false
false
604
r
# Exercise 1: writing and executing functions # Write a function `AddThree` that adds 3 to an input value AddThree <- function(num){ return(num + 3) } # Create a variable `ten` by passing 7 to your `AddThree` function ten <- AddThree(7) # Write a function `FeetToMeters` that converts from feet to meters FeetToMeters <- function(feet){ return(feet * 0.3048) } # Create a variable `height.in.feet` that is your height in feet height.in.feet <- 5.5 # Create a variable `height.in.meters` by passing `height.in.feet` to your `FeetToMeters` function height.in.meters <- FeetToMeters(height.in.feet)
library(gstat) ### Name: ncp.grid ### Title: Grid for the NCP, the Dutch part of the North Sea ### Aliases: ncp.grid ### Keywords: datasets ### ** Examples data(ncp.grid) summary(ncp.grid)
/data/genthat_extracted_code/gstat/examples/ncp.grid.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
196
r
library(gstat) ### Name: ncp.grid ### Title: Grid for the NCP, the Dutch part of the North Sea ### Aliases: ncp.grid ### Keywords: datasets ### ** Examples data(ncp.grid) summary(ncp.grid)
### Numerical gradient==== grad <- function(mod, par, ..., Interval=1e-6) { mat <- matrix(par, nrow=length(par), ncol=length(par)) for (i in 1:ncol(mat)) { mat[i,i] <- par[i] + Interval } df <- vector("numeric", length=length(par)) f_x <- mod(par, ...) for (i in 1:ncol(mat)) { df[i] <- (mod(mat[,i], ...) - f_x) / Interval } df[which(!is.finite(df))] <- 0 return(df) return(df) } steep <- function(mod, par, ..., est, df) { mod(est - {par[1]*df*{(abs(df) > sd(df)) * 1}} - {par[2]*df*{(abs(df) <= sd(df)) * 1}}, ...) } steepA <- function(mod, par, ..., est, df) { mod(est - {par*df}, ...) } Gammas <- function(mod, par, ..., est, df, vi, si, iter, epsilon) { v <- {{par[1] * vi} + {{1 - par[1]} * df }}/{1 - {par[1] ^ iter}} s <- {{par[2] * si} + {{1 - par[2]} * {df*df}}}/{1 - {par[2] ^ iter}} newV <- est - {{par[3]*v}/{epsilon+sqrt(s)}} return(mod(newV, ...)) } sHAR <- function(par, mod, ...) { theta <- rnorm(length(par)) d <- theta / sqrt(sum(theta*theta)) u <- suppressWarnings(optim(.5, steepA, mod=mod, df=d, ..., est=par, method="Nelder-Mead")$par) prop <- par - {u * d} return(list("prop"=prop, "u"=u, "d"=d)) } reltol <- function(x, tol) tol * (abs(x) + tol) ### Adam==== ADAM <- function(fn, startvalue, gr, ..., Interval=1e-6, maxit=100, tol=1e-8, verbose=T) { # Opening message if(verbose==T) { cat("Steepest Adam will run for ", maxit, " iterations at most.\n\n", sep="") startTime = proc.time() } # Initial settings epsilon <- Interval if(verbose==T) { pb <- txtProgressBar(min=0, max=maxit, style=3) } par <- s <- v <- G <- matrix(NA, nrow=maxit, ncol=length(startvalue)) GG <- matrix(NA, nrow=maxit, ncol=3) f <- vector("numeric", length=maxit) convergence = F # First step G[1,] <- gr(fn, startvalue, ..., Interval=Interval) alpha <- suppressWarnings(unlist(optim(0, fn=steepA, mod=fn, df=G[1,], ..., est=startvalue, method="Nelder-Mead")$par)) GG[1,] <- c(.5, .5, alpha) v[1,] <- G[1,]*GG[1,1] s[1,] <- G[1,]*GG[1,2] par[1,] <- startvalue - {alpha*G[1,]} f[1] <- fn(par[1,], ...) if(verbose==T) { setTxtProgressBar(pb, 1) } # Run ADAM algorithm for(i in 2:maxit) { G[i,] <- gr(fn, par[i-1,], ..., Interval=Interval) GG[i,] <- suppressWarnings(unlist(optim(par=c(0,0,0), fn=Gammas, mod=fn, df=G[i,], ..., est=par[i-1,], vi=v[i-1,], si=s[i-1,], iter=i, epsilon=epsilon)$par)) if (sum(abs(GG[i,])) == 0) { GG[i,] <- suppressWarnings(unlist(ucminf(par=c(0,0,0), fn=Gammas, mod=fn, df=G[i,], ..., est=par[i-1,], vi=v[i-1,], si=s[i-1,], iter=i, epsilon=epsilon)$par)) } gammav <- GG[i,1] gammas <- GG[i,2] alpha <- GG[i,3] v[i,] <- {{gammav * v[i-1,]} + {{1 - gammav} * G[i,] }}/{1 - {gammav^i}} s[i,] <- {{gammas * s[i-1,]} + {{1 - gammas} * {G[i,]*G[i,]}}}/{1 - {gammas^i}} par[i,] <- par[i-1,] - {{alpha*v[i,]}/{epsilon+sqrt(s[i,])}} f[i] <- fn(par[i,], ...) # Check convergence if (reltol(f[i], tol) > abs(f[i] - f[i-1])) { convergence = T if (verbose==T) { setTxtProgressBar(pb, maxit) cat("\nConvergence achieved!") } break } else { if (verbose==T) { setTxtProgressBar(pb, i) } } } if ({convergence==F} & {verbose==T}) { cat("\nConvergence may not have been achieved!") } if (i < maxit) { f <- f[-c({i+1}:maxit)] par <- as.matrix(par[-c({i+1}:maxit),]) G <- G[-c({i+1}:maxit),] s <- s[-c({i+1}:maxit),] v <- v[-c({i+1}:maxit),] GG <- GG[-c({i+1}:maxit),] } if (verbose==T) { close(pb) cat("\n") # Final messages stopTime = proc.time() elapsedTime = stopTime - startTime cat("It took ",round(elapsedTime[3],2), " secs for the run to finish. \n", sep="") } # Return results Results <- list("Cost"=f, "Estimates"=par, "Gradients"=G, "DSG"=s, "Momentum"=v, "par"=par[which.min(f),], "value"=f[which.min(f)], "steps"=GG) return(Results) } ### Steepest 2-group Gradient Descent==== STDM <- function(fn, startvalue, gr, ..., Interval=1e-6, maxit=100, tol=1e-8, verbose=T) { # Opening message if(verbose==T) { cat("Steepest 2-group Gradient Descent will run for ", maxit, " iterations at most.\n\n", sep="") startTime = proc.time() } # Initial settings if(verbose==T) { pb <- txtProgressBar(min=0, max=maxit, style=3) } #par <- df <- array(dim = c(maxit,length(startvalue))) par <- df <- matrix(NA, nrow=maxit, ncol=length(startvalue)) f <- vector("numeric", length=maxit) step <- array(dim = c(maxit,2)) convergence = F # First step df[1,] <- gr(fn, startvalue, ..., Interval=Interval) step[1,] <- suppressWarnings(unlist(optim(c(0,0), fn=steep, mod=fn, ..., df=df[1,], est=startvalue, method="Nelder-Mead")$par)) par[1,] <- startvalue - {step[1,1]*df[1,]*{(abs(df[1,]) > sd(df[1,])) * 1}} - {step[1,2]*df[1,]*{(abs(df[1,]) <= sd(df[1,])) * 1}} f[1] <- fn(par[1,], ...) if(verbose==T) { setTxtProgressBar(pb, 1) } # Start estimation for(run in 2:maxit) { # Calculate gradient and estimate step parameter df[run,] <- gr(fn, par[run-1,], ..., Interval=Interval) step[run,] <- suppressWarnings(unlist(optim(step[run-1,], fn=steep, mod=fn, ..., df=df[run,], est=par[run-1,], method="Nelder-Mead")$par)) if (sum(abs(step[run,])) == 0) { step[run,] <- suppressWarnings(unlist(ucminf(par=c(0,0), fn=steep, mod=fn, ..., df=df[run,], est=par[run-1,])$par)) } par[run,] <- par[run-1,] - {step[run,1]*df[run,]*{(abs(df[run,]) > sd(df[run,])) * 1}} - {step[run,2]*df[run,]*{(abs(df[run,]) <= sd(df[run,])) * 1}} f[run] <- fn(par[run,], ...) # Check convergence if (reltol(f[run], tol) > abs(f[run] - f[run-1])) { convergence = T if(verbose==T) { setTxtProgressBar(pb, maxit) cat("\nConvergence achieved!") } break } else { if(verbose==T) { setTxtProgressBar(pb, run) } } } if ({convergence==F} && {verbose==T}) { cat("\nConvergence may not have been achieved!") } if (run < maxit) { f <- f[-c({run+1}:maxit)] par <- par[-c({run+1}:maxit),] df <- df[-c({run+1}:maxit),] step <- step[-c({run+1}:maxit),] } if(verbose==T) { close(pb) cat("\n") # Final messages stopTime = proc.time() elapsedTime = stopTime - startTime cat("It took ",round(elapsedTime[3],2)," secs for the run to finish. \n", sep="") } # Return results Results <- list("Cost"=f, "Estimates"=par, "Gradients"=df, "par"=par[which.min(f),], "value"=f[which.min(f)], "steps"=step) return(Results) } ### Steepest Hit-and-Run Gradient Descent==== HRGD <- function(fn, startvalue, gr, ..., Interval=1e-6, maxit=100) { # Opening message cat("Hit-and-Run Gradient Descent will run for ", maxit, " iterations at most.\n\n", sep="") startTime = proc.time() # Initial settings pb <- txtProgressBar(min=0, max=maxit, style=3) par <- df <- array(dim = c(maxit,length(startvalue))) f <- vector("numeric", length=maxit) step <- vector("numeric", length=maxit) convergence = F # First step df[1,] <- gr(fn, startvalue, ..., Interval=Interval) SS <- suppressWarnings(unlist(optim(c(0,0), fn=steep, mod=fn, ..., df=df[1,], est=startvalue, method="Nelder-Mead")$par)) step[1] <- mean(SS) par[1,] <- startvalue - {SS[1]*df[1,]*{(abs(df[1,]) > sd(df[1,])) * 1}} - {SS[2]*df[1,]*{(abs(df[1,]) <= sd(df[1,])) * 1}} f[1] <- fn(par[1,], ...) setTxtProgressBar(pb, 1) # Start estimation for(run in 2:maxit) { # Calculate gradient and estimate step parameter temp <- sHAR(par=par[run-1,], mod=fn, ...) df[run,] <- temp$d step[run] <- temp$u par[run,] <- temp$prop f[run] <- fn(par[run,], ...) # Check convergence if ({step[run] - step[run-1]} == 0) { convergence = T setTxtProgressBar(pb, maxit) cat("\nConvergence achieved!") break } else { setTxtProgressBar(pb, run) } } if (convergence == F) { cat("\nConvergence may not have been achieved!") } if (run < maxit) { f <- f[-c({run+1}:maxit)] par <- par[-c({run+1}:maxit),] df <- df[-c({run+1}:maxit),] step <- step[-c({run+1}:maxit)] } close(pb) cat("\n") # Final messages stopTime = proc.time() elapsedTime = stopTime - startTime cat("It took ",round(elapsedTime[3],2)," secs for the run to finish. \n", sep="") # Return results Results <- list("Cost"=f, "Estimates"=par, "Gradients"=df, "par"=par[which.min(f),], "value"=f[which.min(f)], "steps"=step) return(Results) }
/R/algorithms.R
no_license
cran/optimg
R
false
false
10,029
r
### Numerical gradient==== grad <- function(mod, par, ..., Interval=1e-6) { mat <- matrix(par, nrow=length(par), ncol=length(par)) for (i in 1:ncol(mat)) { mat[i,i] <- par[i] + Interval } df <- vector("numeric", length=length(par)) f_x <- mod(par, ...) for (i in 1:ncol(mat)) { df[i] <- (mod(mat[,i], ...) - f_x) / Interval } df[which(!is.finite(df))] <- 0 return(df) return(df) } steep <- function(mod, par, ..., est, df) { mod(est - {par[1]*df*{(abs(df) > sd(df)) * 1}} - {par[2]*df*{(abs(df) <= sd(df)) * 1}}, ...) } steepA <- function(mod, par, ..., est, df) { mod(est - {par*df}, ...) } Gammas <- function(mod, par, ..., est, df, vi, si, iter, epsilon) { v <- {{par[1] * vi} + {{1 - par[1]} * df }}/{1 - {par[1] ^ iter}} s <- {{par[2] * si} + {{1 - par[2]} * {df*df}}}/{1 - {par[2] ^ iter}} newV <- est - {{par[3]*v}/{epsilon+sqrt(s)}} return(mod(newV, ...)) } sHAR <- function(par, mod, ...) { theta <- rnorm(length(par)) d <- theta / sqrt(sum(theta*theta)) u <- suppressWarnings(optim(.5, steepA, mod=mod, df=d, ..., est=par, method="Nelder-Mead")$par) prop <- par - {u * d} return(list("prop"=prop, "u"=u, "d"=d)) } reltol <- function(x, tol) tol * (abs(x) + tol) ### Adam==== ADAM <- function(fn, startvalue, gr, ..., Interval=1e-6, maxit=100, tol=1e-8, verbose=T) { # Opening message if(verbose==T) { cat("Steepest Adam will run for ", maxit, " iterations at most.\n\n", sep="") startTime = proc.time() } # Initial settings epsilon <- Interval if(verbose==T) { pb <- txtProgressBar(min=0, max=maxit, style=3) } par <- s <- v <- G <- matrix(NA, nrow=maxit, ncol=length(startvalue)) GG <- matrix(NA, nrow=maxit, ncol=3) f <- vector("numeric", length=maxit) convergence = F # First step G[1,] <- gr(fn, startvalue, ..., Interval=Interval) alpha <- suppressWarnings(unlist(optim(0, fn=steepA, mod=fn, df=G[1,], ..., est=startvalue, method="Nelder-Mead")$par)) GG[1,] <- c(.5, .5, alpha) v[1,] <- G[1,]*GG[1,1] s[1,] <- G[1,]*GG[1,2] par[1,] <- startvalue - {alpha*G[1,]} f[1] <- fn(par[1,], ...) if(verbose==T) { setTxtProgressBar(pb, 1) } # Run ADAM algorithm for(i in 2:maxit) { G[i,] <- gr(fn, par[i-1,], ..., Interval=Interval) GG[i,] <- suppressWarnings(unlist(optim(par=c(0,0,0), fn=Gammas, mod=fn, df=G[i,], ..., est=par[i-1,], vi=v[i-1,], si=s[i-1,], iter=i, epsilon=epsilon)$par)) if (sum(abs(GG[i,])) == 0) { GG[i,] <- suppressWarnings(unlist(ucminf(par=c(0,0,0), fn=Gammas, mod=fn, df=G[i,], ..., est=par[i-1,], vi=v[i-1,], si=s[i-1,], iter=i, epsilon=epsilon)$par)) } gammav <- GG[i,1] gammas <- GG[i,2] alpha <- GG[i,3] v[i,] <- {{gammav * v[i-1,]} + {{1 - gammav} * G[i,] }}/{1 - {gammav^i}} s[i,] <- {{gammas * s[i-1,]} + {{1 - gammas} * {G[i,]*G[i,]}}}/{1 - {gammas^i}} par[i,] <- par[i-1,] - {{alpha*v[i,]}/{epsilon+sqrt(s[i,])}} f[i] <- fn(par[i,], ...) # Check convergence if (reltol(f[i], tol) > abs(f[i] - f[i-1])) { convergence = T if (verbose==T) { setTxtProgressBar(pb, maxit) cat("\nConvergence achieved!") } break } else { if (verbose==T) { setTxtProgressBar(pb, i) } } } if ({convergence==F} & {verbose==T}) { cat("\nConvergence may not have been achieved!") } if (i < maxit) { f <- f[-c({i+1}:maxit)] par <- as.matrix(par[-c({i+1}:maxit),]) G <- G[-c({i+1}:maxit),] s <- s[-c({i+1}:maxit),] v <- v[-c({i+1}:maxit),] GG <- GG[-c({i+1}:maxit),] } if (verbose==T) { close(pb) cat("\n") # Final messages stopTime = proc.time() elapsedTime = stopTime - startTime cat("It took ",round(elapsedTime[3],2), " secs for the run to finish. \n", sep="") } # Return results Results <- list("Cost"=f, "Estimates"=par, "Gradients"=G, "DSG"=s, "Momentum"=v, "par"=par[which.min(f),], "value"=f[which.min(f)], "steps"=GG) return(Results) } ### Steepest 2-group Gradient Descent==== STDM <- function(fn, startvalue, gr, ..., Interval=1e-6, maxit=100, tol=1e-8, verbose=T) { # Opening message if(verbose==T) { cat("Steepest 2-group Gradient Descent will run for ", maxit, " iterations at most.\n\n", sep="") startTime = proc.time() } # Initial settings if(verbose==T) { pb <- txtProgressBar(min=0, max=maxit, style=3) } #par <- df <- array(dim = c(maxit,length(startvalue))) par <- df <- matrix(NA, nrow=maxit, ncol=length(startvalue)) f <- vector("numeric", length=maxit) step <- array(dim = c(maxit,2)) convergence = F # First step df[1,] <- gr(fn, startvalue, ..., Interval=Interval) step[1,] <- suppressWarnings(unlist(optim(c(0,0), fn=steep, mod=fn, ..., df=df[1,], est=startvalue, method="Nelder-Mead")$par)) par[1,] <- startvalue - {step[1,1]*df[1,]*{(abs(df[1,]) > sd(df[1,])) * 1}} - {step[1,2]*df[1,]*{(abs(df[1,]) <= sd(df[1,])) * 1}} f[1] <- fn(par[1,], ...) if(verbose==T) { setTxtProgressBar(pb, 1) } # Start estimation for(run in 2:maxit) { # Calculate gradient and estimate step parameter df[run,] <- gr(fn, par[run-1,], ..., Interval=Interval) step[run,] <- suppressWarnings(unlist(optim(step[run-1,], fn=steep, mod=fn, ..., df=df[run,], est=par[run-1,], method="Nelder-Mead")$par)) if (sum(abs(step[run,])) == 0) { step[run,] <- suppressWarnings(unlist(ucminf(par=c(0,0), fn=steep, mod=fn, ..., df=df[run,], est=par[run-1,])$par)) } par[run,] <- par[run-1,] - {step[run,1]*df[run,]*{(abs(df[run,]) > sd(df[run,])) * 1}} - {step[run,2]*df[run,]*{(abs(df[run,]) <= sd(df[run,])) * 1}} f[run] <- fn(par[run,], ...) # Check convergence if (reltol(f[run], tol) > abs(f[run] - f[run-1])) { convergence = T if(verbose==T) { setTxtProgressBar(pb, maxit) cat("\nConvergence achieved!") } break } else { if(verbose==T) { setTxtProgressBar(pb, run) } } } if ({convergence==F} && {verbose==T}) { cat("\nConvergence may not have been achieved!") } if (run < maxit) { f <- f[-c({run+1}:maxit)] par <- par[-c({run+1}:maxit),] df <- df[-c({run+1}:maxit),] step <- step[-c({run+1}:maxit),] } if(verbose==T) { close(pb) cat("\n") # Final messages stopTime = proc.time() elapsedTime = stopTime - startTime cat("It took ",round(elapsedTime[3],2)," secs for the run to finish. \n", sep="") } # Return results Results <- list("Cost"=f, "Estimates"=par, "Gradients"=df, "par"=par[which.min(f),], "value"=f[which.min(f)], "steps"=step) return(Results) } ### Steepest Hit-and-Run Gradient Descent==== HRGD <- function(fn, startvalue, gr, ..., Interval=1e-6, maxit=100) { # Opening message cat("Hit-and-Run Gradient Descent will run for ", maxit, " iterations at most.\n\n", sep="") startTime = proc.time() # Initial settings pb <- txtProgressBar(min=0, max=maxit, style=3) par <- df <- array(dim = c(maxit,length(startvalue))) f <- vector("numeric", length=maxit) step <- vector("numeric", length=maxit) convergence = F # First step df[1,] <- gr(fn, startvalue, ..., Interval=Interval) SS <- suppressWarnings(unlist(optim(c(0,0), fn=steep, mod=fn, ..., df=df[1,], est=startvalue, method="Nelder-Mead")$par)) step[1] <- mean(SS) par[1,] <- startvalue - {SS[1]*df[1,]*{(abs(df[1,]) > sd(df[1,])) * 1}} - {SS[2]*df[1,]*{(abs(df[1,]) <= sd(df[1,])) * 1}} f[1] <- fn(par[1,], ...) setTxtProgressBar(pb, 1) # Start estimation for(run in 2:maxit) { # Calculate gradient and estimate step parameter temp <- sHAR(par=par[run-1,], mod=fn, ...) df[run,] <- temp$d step[run] <- temp$u par[run,] <- temp$prop f[run] <- fn(par[run,], ...) # Check convergence if ({step[run] - step[run-1]} == 0) { convergence = T setTxtProgressBar(pb, maxit) cat("\nConvergence achieved!") break } else { setTxtProgressBar(pb, run) } } if (convergence == F) { cat("\nConvergence may not have been achieved!") } if (run < maxit) { f <- f[-c({run+1}:maxit)] par <- par[-c({run+1}:maxit),] df <- df[-c({run+1}:maxit),] step <- step[-c({run+1}:maxit)] } close(pb) cat("\n") # Final messages stopTime = proc.time() elapsedTime = stopTime - startTime cat("It took ",round(elapsedTime[3],2)," secs for the run to finish. \n", sep="") # Return results Results <- list("Cost"=f, "Estimates"=par, "Gradients"=df, "par"=par[which.min(f),], "value"=f[which.min(f)], "steps"=step) return(Results) }
#' Gets starting state for scenario #' #' @param parameters base model parameters #' @param starting_conditions a list specifying total_prevalence, total_population, cumulative_incidence, and proportion_vaccinated #' @param scenario.specs a list which contains a strains.list which is a list of streans, each specifying #' rel.transmissability. rel.severity, import.rate, frac.prevalence, and frac.cum.inc #' #' @return starting state vector #' @export get_starting_state_from_scenario = function(parameters, starting_conditions, scenario.specs){ with(as.list(c(starting_conditions, scenario.specs)),{ strain_prevalence_frac = get_attribute_from_specs(strains.list, "frac.prevalence") immune_frac = create_immune_distribution(immune.list, strains.list, vaccination.list, cumulative_incidence, proportion_vaccinated) get_starting_state(parameters, starting_conditions, immune_frac, strain_prevalence_frac) }) }
/HutchCOVID/R/get_starting_state_from_scenario.R
no_license
FredHutch/COVID_modeling_schools
R
false
false
1,287
r
#' Gets starting state for scenario #' #' @param parameters base model parameters #' @param starting_conditions a list specifying total_prevalence, total_population, cumulative_incidence, and proportion_vaccinated #' @param scenario.specs a list which contains a strains.list which is a list of streans, each specifying #' rel.transmissability. rel.severity, import.rate, frac.prevalence, and frac.cum.inc #' #' @return starting state vector #' @export get_starting_state_from_scenario = function(parameters, starting_conditions, scenario.specs){ with(as.list(c(starting_conditions, scenario.specs)),{ strain_prevalence_frac = get_attribute_from_specs(strains.list, "frac.prevalence") immune_frac = create_immune_distribution(immune.list, strains.list, vaccination.list, cumulative_incidence, proportion_vaccinated) get_starting_state(parameters, starting_conditions, immune_frac, strain_prevalence_frac) }) }
shinyUI(navbarPage("Exponential Distribution Simulation Navbar!", tabPanel("Plot", sidebarLayout( sidebarPanel( h2('Exponential Distribution Simulation Settings'), sliderInput('lambda', 'Numeric input, labeled lambda', 0.05, min = 0.05, max = 1, step = 0.05), numericInput('popSize', "Population Size",10,min=10,max=100,step=10), numericInput('repTimes', "Repetition Times",100,min=100,max=2000,step=100), checkboxInput("meanLine", "Show the Mean Line", FALSE) ), mainPanel( h2('Distribution of Average'), plotOutput('figure') ) ) ), tabPanel("Supporting Document", h3("This application aims to do simulations on exponential distribution and Central Limit Theorem."), p("1.Users can set the values of lambda,population size(step by 10) and repetition times of exponential simulation(step by 100). The generalized averages of each population are calculated and demonstrated in a histogram."), p("2.Users can choose to add the mean line or not in order to make comparisons between theoretical mean and simulation result.") ) ) )
/ui.r
no_license
richardsun-voyager/ExponentialDistributionApp
R
false
false
1,108
r
shinyUI(navbarPage("Exponential Distribution Simulation Navbar!", tabPanel("Plot", sidebarLayout( sidebarPanel( h2('Exponential Distribution Simulation Settings'), sliderInput('lambda', 'Numeric input, labeled lambda', 0.05, min = 0.05, max = 1, step = 0.05), numericInput('popSize', "Population Size",10,min=10,max=100,step=10), numericInput('repTimes', "Repetition Times",100,min=100,max=2000,step=100), checkboxInput("meanLine", "Show the Mean Line", FALSE) ), mainPanel( h2('Distribution of Average'), plotOutput('figure') ) ) ), tabPanel("Supporting Document", h3("This application aims to do simulations on exponential distribution and Central Limit Theorem."), p("1.Users can set the values of lambda,population size(step by 10) and repetition times of exponential simulation(step by 100). The generalized averages of each population are calculated and demonstrated in a histogram."), p("2.Users can choose to add the mean line or not in order to make comparisons between theoretical mean and simulation result.") ) ) )
## ----setup, include = FALSE---------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ## ------------------------------------------------------------------------ library(texblocks) library(texPreview) ## ----include=FALSE------------------------------------------------------- tex_opts$set(returnType = 'html') tex_opts$append(list(cleanup='tex')) ## ------------------------------------------------------------------------ x <- as.tb('$\\alpha$') y <- as.tb('aaa') z <- as.tb('bbb') ## ------------------------------------------------------------------------ x1 <- x + y + z x2 <- x1 / x1 # 2x3 object x3 <- x2 / x2 # 4x3 object k1 <- lapply(1:3,as.tb) k2 <- lapply(4:6,as.tb) k <- purrr::reduce(k1,`+`) / purrr::reduce(k2,`+`) title <- c('param',sprintf('col%s',1:5))%>% purrr::map(as.tb)%>% purrr::reduce(`+`) ## ----hline--------------------------------------------------------------- (title / (x2 + x3))%>% hline()%>% tabular()%>% texPreview::tex_preview() ## ----hline2-------------------------------------------------------------- (title / (x2 + x3))%>% hline(lines = c(2,3))%>% tabular()%>% texPreview::tex_preview() ## ------------------------------------------------------------------------ l <- list(c(line=1,i=2,j=3),c(line=2,i=1,j=2),c(line=3,i=2,j=3)) d <- data.frame(line=1:3,i=c(1,2,3),j=c(1,2,3)) ## ----cline--------------------------------------------------------------- purrr::reduce(rep(x1,4),`/`)%>% cline(l)%>% tabular()%>% texPreview::tex_preview() purrr::reduce(rep(x1,4),`/`)%>% cline(d)%>% tabular()%>% texPreview::tex_preview() ## ----lines_pipe---------------------------------------------------------- purrr::reduce(rep(x1,4),`/`)%>% hline(c(0,4))%>% cline(d)%>% tabular()%>% texPreview::tex_preview() ## ----multirow,echo=TRUE,results='asis'----------------------------------- title <- as.tb('param') + multicol('vals',3,'c') (title / (multirow('$\\beta$',2) + k))%>% tabular()%>% texPreview::tex_preview()
/vignettes/aesthetics.R
permissive
yonicd/texblocks
R
false
false
2,051
r
## ----setup, include = FALSE---------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ## ------------------------------------------------------------------------ library(texblocks) library(texPreview) ## ----include=FALSE------------------------------------------------------- tex_opts$set(returnType = 'html') tex_opts$append(list(cleanup='tex')) ## ------------------------------------------------------------------------ x <- as.tb('$\\alpha$') y <- as.tb('aaa') z <- as.tb('bbb') ## ------------------------------------------------------------------------ x1 <- x + y + z x2 <- x1 / x1 # 2x3 object x3 <- x2 / x2 # 4x3 object k1 <- lapply(1:3,as.tb) k2 <- lapply(4:6,as.tb) k <- purrr::reduce(k1,`+`) / purrr::reduce(k2,`+`) title <- c('param',sprintf('col%s',1:5))%>% purrr::map(as.tb)%>% purrr::reduce(`+`) ## ----hline--------------------------------------------------------------- (title / (x2 + x3))%>% hline()%>% tabular()%>% texPreview::tex_preview() ## ----hline2-------------------------------------------------------------- (title / (x2 + x3))%>% hline(lines = c(2,3))%>% tabular()%>% texPreview::tex_preview() ## ------------------------------------------------------------------------ l <- list(c(line=1,i=2,j=3),c(line=2,i=1,j=2),c(line=3,i=2,j=3)) d <- data.frame(line=1:3,i=c(1,2,3),j=c(1,2,3)) ## ----cline--------------------------------------------------------------- purrr::reduce(rep(x1,4),`/`)%>% cline(l)%>% tabular()%>% texPreview::tex_preview() purrr::reduce(rep(x1,4),`/`)%>% cline(d)%>% tabular()%>% texPreview::tex_preview() ## ----lines_pipe---------------------------------------------------------- purrr::reduce(rep(x1,4),`/`)%>% hline(c(0,4))%>% cline(d)%>% tabular()%>% texPreview::tex_preview() ## ----multirow,echo=TRUE,results='asis'----------------------------------- title <- as.tb('param') + multicol('vals',3,'c') (title / (multirow('$\\beta$',2) + k))%>% tabular()%>% texPreview::tex_preview()
####### task ############ ## data cleaning and exploration ## wilcox testgg library(readr) library(ggplot2) library(tidyverse) library(rcompanion) usertype_revenue <- read_csv("Downloads/R_shiny_app_GoogleTrends/Seleted dataset/Data_Usertype_Revenue.csv", col_types = cols(Revenue = col_character())) View(usertype_revenue) ######## clean the data and remove symbols from the rows and leave only numbers and characters usertype_revenue$Revenue <- substring(usertype_revenue$Revenue, first = 2) # subset characters beginning from 1nd string to take out $ sign View(usertype_revenue) # Dataset$`Ecommerce Conversion Rate` <- substr(Dataset$`Ecommerce Conversion Rate`, 1, nchar(Dataset$`Ecommerce Conversion Rate`) - 1) # retains 1st to 4th characters and leaveout % sign # Dataset$`Bounce Rate` <- substr(Dataset$`Bounce Rate`, 1, nchar(Dataset$`Bounce Rate`) - 1) # retains 1st to 4th characters and leaveout % sign # View(Dataset) ######## #extract first 5 chars char_first5 <- substr(usertype_revenue$`Day Index`, 1, 5) ## use for Day Index col chracters of length 7 # extarct first 6 chars char_first6 <- substr(usertype_revenue$`Day Index`, 1, 6) ## use for Day index col characters of length 8 #extract last 2 chars char_last2 <- substring(usertype_revenue$`Day Index`, first = nchar(usertype_revenue$`Day Index`) - 1) ## concatenate at the end of values in Day Index colume char_last2 ################ Formating Day Index col with 7 characters into Date format #################### filter_char7 <- filter(usertype_revenue, nchar(usertype_revenue$`Day Index`) == 7) ## filter Day Index col with 7 strings filter_char7_first5 <- substr(filter_char7$`Day Index`, 1, 5) ## Extract 1st 5 strings from col filter_char7_last2 <- substring(filter_char7$`Day Index`, first = nchar(filter_char7$`Day Index`) - 1) #Extract last 2 strings of dataset in Day Index colume filter_char7Re <- mutate(filter_char7, fill_char7Date = paste0("0", filter_char7_first5, "20", filter_char7_last2)) #create col and concatenate strings into it to form Date colnames(filter_char7Re)[5] <- "Date" ## rename col as Date View(filter_char7Re) View(filter_char7) View(filter_char7_last2) ################ Formating Day Index col with 8 characters into Date format #################### filter_char8 <- filter(usertype_revenue, nchar(usertype_revenue$`Day Index`) == 8) ## filter Day Index col with 8 strings filter_char8_first6 <- substr(filter_char8$`Day Index`, 1, 6) ## Extract 1st 6 strings from col filter_char8_last2 <- substring(filter_char8$`Day Index`, first = nchar(filter_char8$`Day Index`) - 1) #Extract last 2 strings of dataset in Day Index colume filter_char8Re <- mutate(filter_char8, fill_char8Date = paste0(filter_char8_first6, "20", filter_char8_last2)) #create col and concatenate strings into it to form Date colnames(filter_char8Re)[5] <- "Date" ## rename col as Date View(filter_char8) View(filter_char8Re) colnames(filter_char) ################ Formating Day Index col with 10 characters into Date format #################### filter_char10 <- filter(usertype_revenue, nchar(usertype_revenue$`Day Index`) == 10) ## filter Day Index col with 10 strings filter_char10["Date"] <- filter_char10$`Day Index` ## create Date col and assign the values of Day Index col to it ########binding rows of datasets ############################################## GA_userType_Revenue_Dataset <- bind_rows(filter_char7Re, filter_char8Re, filter_char10) ## bind rows of all datasets filtered View(GA_userType_Revenue_Dataset) #### Exporting dataset as csv write.csv(GA_userType_Revenue_Dataset, file = "Users\\lin\\Downloads\\R_shiny_app_GoogleTrends\\GA_userType_Revenue_Dataset.csv") ################## data exploration #################### View(GA_userType_Revenue_Dataset) GA_userType_Revenue_Dataset <- read_csv("GA_userType_Revenue_Dataset.csv", col_types = cols(Date = col_date(format = "%m/%d/%Y"))) #View(GA_userType_Revenue_Dataset) #ggplot(data = GA_userType_Revenue_Dataset, aes(x = Segment, y = "Revenue")) + geom_histogram() table(GA_userType_Revenue_Dataset$Segment) data_usertype <- GA_userType_Revenue_Dataset ggplot(data = GA_userType_Revenue_Dataset, aes(x = Segment)) + geom_bar() summary(data_usertype$Revenue) quantile(data_usertype$Transactions) summary(data_usertype$Transactions) var(data_usertype$Transactions) View(data_usertype) ggplot(data_usertype, aes(x = Segment,y = Revenue)) + geom_boxplot() ggplot(data_usertype, aes(x = Segment, y = Transactions)) + geom_boxplot() + coord_flip() ggplot(data_usertype, aes(Transactions)) + geom_histogram() ggplot(data_usertype, aes(Revenue)) + geom_histogram() (transaction_ttest <- t.test(Transactions ~ Segment, data = data_usertype)) ## T test for mean of transactions between (revenue_ttest <- t.test(Revenue ~ Segment, data = data_usertype)) ## wilcon test for testing mean difference for non-normal distribution (transaction_wilcox <- wilcox.test(Transactions ~ Segment, data = data_usertype)) (revenue_wilcox <- wilcox.test(Revenue ~ Segment, data = data_usertype)) plotNormalDensity(data_usertype$Revenue) transformTukey(data_usertype$Revenue) outliers::outlier(data_usertype$Transactions) outliers::outlier(data_usertype$Revenue) outliers::outlier(data_usertype$Transactions, opposite = TRUE) # shapiro.test(data_usertype$Transactions) # shapiro.test(transactions_transform) # leveneTest(y = data_usertype$Transactions, group = data_usertype$Segment, data = data_usertype) # fligner.test(x = data_usertype$Transactions, group = data_usertype$Segment, data = data_usertype ) plotNormalHistogram(data_usertype$Transactions) (transactions_transform <- rcompanion::transformTukey(data_usertype$Transactions)) (transboxplot <- boxplot(data_usertype$Transactions)) (outlierrem <- rm.outlier(data_usertype[,5])) View(outlierrem) boxplot(outlierrem$Revenue) plotDensityHistogram(outlierrem$Revenue)
/Usertype_Revenue_data_wraggling.R
no_license
agbleze/R_shiny_app_GoogleTrends
R
false
false
5,965
r
####### task ############ ## data cleaning and exploration ## wilcox testgg library(readr) library(ggplot2) library(tidyverse) library(rcompanion) usertype_revenue <- read_csv("Downloads/R_shiny_app_GoogleTrends/Seleted dataset/Data_Usertype_Revenue.csv", col_types = cols(Revenue = col_character())) View(usertype_revenue) ######## clean the data and remove symbols from the rows and leave only numbers and characters usertype_revenue$Revenue <- substring(usertype_revenue$Revenue, first = 2) # subset characters beginning from 1nd string to take out $ sign View(usertype_revenue) # Dataset$`Ecommerce Conversion Rate` <- substr(Dataset$`Ecommerce Conversion Rate`, 1, nchar(Dataset$`Ecommerce Conversion Rate`) - 1) # retains 1st to 4th characters and leaveout % sign # Dataset$`Bounce Rate` <- substr(Dataset$`Bounce Rate`, 1, nchar(Dataset$`Bounce Rate`) - 1) # retains 1st to 4th characters and leaveout % sign # View(Dataset) ######## #extract first 5 chars char_first5 <- substr(usertype_revenue$`Day Index`, 1, 5) ## use for Day Index col chracters of length 7 # extarct first 6 chars char_first6 <- substr(usertype_revenue$`Day Index`, 1, 6) ## use for Day index col characters of length 8 #extract last 2 chars char_last2 <- substring(usertype_revenue$`Day Index`, first = nchar(usertype_revenue$`Day Index`) - 1) ## concatenate at the end of values in Day Index colume char_last2 ################ Formating Day Index col with 7 characters into Date format #################### filter_char7 <- filter(usertype_revenue, nchar(usertype_revenue$`Day Index`) == 7) ## filter Day Index col with 7 strings filter_char7_first5 <- substr(filter_char7$`Day Index`, 1, 5) ## Extract 1st 5 strings from col filter_char7_last2 <- substring(filter_char7$`Day Index`, first = nchar(filter_char7$`Day Index`) - 1) #Extract last 2 strings of dataset in Day Index colume filter_char7Re <- mutate(filter_char7, fill_char7Date = paste0("0", filter_char7_first5, "20", filter_char7_last2)) #create col and concatenate strings into it to form Date colnames(filter_char7Re)[5] <- "Date" ## rename col as Date View(filter_char7Re) View(filter_char7) View(filter_char7_last2) ################ Formating Day Index col with 8 characters into Date format #################### filter_char8 <- filter(usertype_revenue, nchar(usertype_revenue$`Day Index`) == 8) ## filter Day Index col with 8 strings filter_char8_first6 <- substr(filter_char8$`Day Index`, 1, 6) ## Extract 1st 6 strings from col filter_char8_last2 <- substring(filter_char8$`Day Index`, first = nchar(filter_char8$`Day Index`) - 1) #Extract last 2 strings of dataset in Day Index colume filter_char8Re <- mutate(filter_char8, fill_char8Date = paste0(filter_char8_first6, "20", filter_char8_last2)) #create col and concatenate strings into it to form Date colnames(filter_char8Re)[5] <- "Date" ## rename col as Date View(filter_char8) View(filter_char8Re) colnames(filter_char) ################ Formating Day Index col with 10 characters into Date format #################### filter_char10 <- filter(usertype_revenue, nchar(usertype_revenue$`Day Index`) == 10) ## filter Day Index col with 10 strings filter_char10["Date"] <- filter_char10$`Day Index` ## create Date col and assign the values of Day Index col to it ########binding rows of datasets ############################################## GA_userType_Revenue_Dataset <- bind_rows(filter_char7Re, filter_char8Re, filter_char10) ## bind rows of all datasets filtered View(GA_userType_Revenue_Dataset) #### Exporting dataset as csv write.csv(GA_userType_Revenue_Dataset, file = "Users\\lin\\Downloads\\R_shiny_app_GoogleTrends\\GA_userType_Revenue_Dataset.csv") ################## data exploration #################### View(GA_userType_Revenue_Dataset) GA_userType_Revenue_Dataset <- read_csv("GA_userType_Revenue_Dataset.csv", col_types = cols(Date = col_date(format = "%m/%d/%Y"))) #View(GA_userType_Revenue_Dataset) #ggplot(data = GA_userType_Revenue_Dataset, aes(x = Segment, y = "Revenue")) + geom_histogram() table(GA_userType_Revenue_Dataset$Segment) data_usertype <- GA_userType_Revenue_Dataset ggplot(data = GA_userType_Revenue_Dataset, aes(x = Segment)) + geom_bar() summary(data_usertype$Revenue) quantile(data_usertype$Transactions) summary(data_usertype$Transactions) var(data_usertype$Transactions) View(data_usertype) ggplot(data_usertype, aes(x = Segment,y = Revenue)) + geom_boxplot() ggplot(data_usertype, aes(x = Segment, y = Transactions)) + geom_boxplot() + coord_flip() ggplot(data_usertype, aes(Transactions)) + geom_histogram() ggplot(data_usertype, aes(Revenue)) + geom_histogram() (transaction_ttest <- t.test(Transactions ~ Segment, data = data_usertype)) ## T test for mean of transactions between (revenue_ttest <- t.test(Revenue ~ Segment, data = data_usertype)) ## wilcon test for testing mean difference for non-normal distribution (transaction_wilcox <- wilcox.test(Transactions ~ Segment, data = data_usertype)) (revenue_wilcox <- wilcox.test(Revenue ~ Segment, data = data_usertype)) plotNormalDensity(data_usertype$Revenue) transformTukey(data_usertype$Revenue) outliers::outlier(data_usertype$Transactions) outliers::outlier(data_usertype$Revenue) outliers::outlier(data_usertype$Transactions, opposite = TRUE) # shapiro.test(data_usertype$Transactions) # shapiro.test(transactions_transform) # leveneTest(y = data_usertype$Transactions, group = data_usertype$Segment, data = data_usertype) # fligner.test(x = data_usertype$Transactions, group = data_usertype$Segment, data = data_usertype ) plotNormalHistogram(data_usertype$Transactions) (transactions_transform <- rcompanion::transformTukey(data_usertype$Transactions)) (transboxplot <- boxplot(data_usertype$Transactions)) (outlierrem <- rm.outlier(data_usertype[,5])) View(outlierrem) boxplot(outlierrem$Revenue) plotDensityHistogram(outlierrem$Revenue)
# Following two functions will calculate and cache the inverse of a matrix at its first use. When we need it again, it can be looked up in the cache rather than recomputed. ################################################################################# # This function creates a special matrix object that can cache its inverse. makeCacheMatrix <- function(x = matrix()) { m <- NULL # set the value of the matrix set <- function(y) { x <<- y m <<- NULL } # get the value of the matrix get <- function() x # set the value of the inverse setinverse <- function(inverse) m <<- inverse # get the value of the inverse getinverse <- function() m # return the results list(set = set, get = get, setinverse = setinverse, getinverse = getinverse) } ################################################################################# # This function retrieves the inverse of the matrix from the cache. # If not in the cache, computes inverse of the special "matrix" returned by makeCacheMatrix. cacheSolve <- function(x, ...) { m <- x$getinverse() # Return the inverse if available in the cashe if(!is.null(m)) { message("getting cached data") return(m) } # calculate the inverse and cashe it when not there data <- x$get() m <- solve(data, ...) message("getting calculated data") x$setinverse(m) # return the results m }
/cachematrix.R
no_license
jairajsingh/ProgrammingAssignment2
R
false
false
1,611
r
# Following two functions will calculate and cache the inverse of a matrix at its first use. When we need it again, it can be looked up in the cache rather than recomputed. ################################################################################# # This function creates a special matrix object that can cache its inverse. makeCacheMatrix <- function(x = matrix()) { m <- NULL # set the value of the matrix set <- function(y) { x <<- y m <<- NULL } # get the value of the matrix get <- function() x # set the value of the inverse setinverse <- function(inverse) m <<- inverse # get the value of the inverse getinverse <- function() m # return the results list(set = set, get = get, setinverse = setinverse, getinverse = getinverse) } ################################################################################# # This function retrieves the inverse of the matrix from the cache. # If not in the cache, computes inverse of the special "matrix" returned by makeCacheMatrix. cacheSolve <- function(x, ...) { m <- x$getinverse() # Return the inverse if available in the cashe if(!is.null(m)) { message("getting cached data") return(m) } # calculate the inverse and cashe it when not there data <- x$get() m <- solve(data, ...) message("getting calculated data") x$setinverse(m) # return the results m }
#' De-identify data through encrypting text columns, grouping rare values, and, #' aggregating numeric or date columns. #' #' #' @param data #' A data.frame with the data you want to de-identify. #' @param cols_to_encrypt #' A string or vector of strings with the columns that you want to encrypt #' using the `seed_cipher()` function from the `caesar` package. #' @param group_rare_values_cols #' A string or vector of strings with the columns that you want to convert #' rare values (below a certain percent of all values as set in #' `group_rare_values_limit`) into NA or a particular string (or NA) set in #' `group_rare_values_text`. #' @param group_rare_values_limit #' A string or vector of strings (one for each col in `group_rare_values_cols`) #' for what threshold (in percent of all non-NA values) to determine that a value #' is rare enough to change to NA (or the string set in `group_rare_values_text`). #' @param group_rare_values_text #' A string or vector of strings (one for each col in `group_rare_values_cols`) #' for what to rename the values that are determined to be rare enough (based on #' threshold set in `group_rare_values_limit` to rename them. If NULL (default), #' and the vector is strings, replaces them with a string that concatenates all #' of the rare values together (separated by a comma). If NA, replaces them with NA. #' @param date_cols #' A vector of strings with the name of date columns that you want to be aggregated #' to the unit set in the `date_aggreagtion` parameter. If NULL, will use #' all date columns in the data. #' @param date_aggregation #' A string with the time unit to aggregate all Date variables to. #' Can take one of the following: 'week', 'month', 'bimonth', 'quarter', #' 'halfyear', 'year'. #' @param quiet #' A Boolean for whether you want to output a message that tells you which #' columns that you are encrypting and the seed set for each column to #' do the encryption. If you don't set the seed yourself, you need these #' seeds to decrypt. #' #' @return #' A data.frame with the selected columns de-identify based on user parameters. #' @examples #' @export deidentify_data <- function(data, date_cols = NULL, date_aggregation = c("week", "month", "bimonth", "quarter", "halfyear", "year"), cols_to_encrypt = NULL, group_rare_values_cols = NULL, group_rare_values_limit = 5, group_rare_values_text = NULL, quiet = FALSE) { # if (!is.null(cols_to_encrypt) & is.null(seeds_for_encryption)) { # seeds_for_encryption <- sample(1e3:1e11, length(cols_to_encrypt)) # } data <- data %>% dplyr::mutate_if(lubridate::is.Date, lubridate::floor_date, unit = date_aggregation) # Looks in each selected columns and replaces all the values that are less # than k% of the non-NA total with a selected replacement text (default # is NA). This is to reduce privacy concerns with rare values (i.e. if # person is only one with X offense, can identify through that). group_rare_values_limit <- rep(group_rare_values_limit, length.out = length(group_rare_values_cols)) if (!is.null(group_rare_values_cols)) { for (i in 1:length(group_rare_values_cols)) { col <- group_rare_values_cols[i] rare_values <- get_rare_values(data[, col], group_rare_values_limit[i]) if (is.null(group_rare_values_text)){ data[, col][data[, col] %in% rare_values] <- paste0(rare_values, collapse = ", ") } else if (is.na(group_rare_values_text)) { data[, col][data[, col] %in% rare_values] <- NA } else { data[, col][data[, col] %in% rare_values] <- group_rare_values_text[i] } } } return(data) } deidentify_date <- function(data, date_aggregation) { data <- lubridate::floor_date(data, date_aggregation) return(data) } deidentify_group <- function(data, group_rare_values_limit, group_rare_values_text) { values_under_k_percent <- get_rare_values(data, group_rare_values_limit) data[data %in% values_under_k_percent] <- group_rare_values_text } get_rare_values <- function(data, k_percent = 5) { numeric_data <- is.numeric(data) values_by_percent <- table(data) / length(data[!is.na(data)]) * 100 values_under_k_percent <- names(values_by_percent[values_by_percent < k_percent]) if(numeric_data) { values_under_k_percent <- as.numeric(values_under_k_percent) } # Sorts alphabetically (or smallest to largest if numeric) for easier testing. values_under_k_percent <- sort(values_under_k_percent) # Returns NULL if no responses if (length(values_under_k_percent) == 0) { values_under_k_percent <- NULL } return(values_under_k_percent) } bin_numbers <- function(data, percent_per_group) { if (!is.numeric(percent_per_group) | length(percent_per_group) != 1 | !percent_per_group %in% 1:100) { stop("percent_per_group must be a single integer from 1:100.") } if (!is.numeric(data)) { stop("data must be a vector of numbers.") } # Convert to a proportion percent_per_group <- percent_per_group / 100 }
/R/deidentify.R
permissive
seathebass/deidentify
R
false
false
5,648
r
#' De-identify data through encrypting text columns, grouping rare values, and, #' aggregating numeric or date columns. #' #' #' @param data #' A data.frame with the data you want to de-identify. #' @param cols_to_encrypt #' A string or vector of strings with the columns that you want to encrypt #' using the `seed_cipher()` function from the `caesar` package. #' @param group_rare_values_cols #' A string or vector of strings with the columns that you want to convert #' rare values (below a certain percent of all values as set in #' `group_rare_values_limit`) into NA or a particular string (or NA) set in #' `group_rare_values_text`. #' @param group_rare_values_limit #' A string or vector of strings (one for each col in `group_rare_values_cols`) #' for what threshold (in percent of all non-NA values) to determine that a value #' is rare enough to change to NA (or the string set in `group_rare_values_text`). #' @param group_rare_values_text #' A string or vector of strings (one for each col in `group_rare_values_cols`) #' for what to rename the values that are determined to be rare enough (based on #' threshold set in `group_rare_values_limit` to rename them. If NULL (default), #' and the vector is strings, replaces them with a string that concatenates all #' of the rare values together (separated by a comma). If NA, replaces them with NA. #' @param date_cols #' A vector of strings with the name of date columns that you want to be aggregated #' to the unit set in the `date_aggreagtion` parameter. If NULL, will use #' all date columns in the data. #' @param date_aggregation #' A string with the time unit to aggregate all Date variables to. #' Can take one of the following: 'week', 'month', 'bimonth', 'quarter', #' 'halfyear', 'year'. #' @param quiet #' A Boolean for whether you want to output a message that tells you which #' columns that you are encrypting and the seed set for each column to #' do the encryption. If you don't set the seed yourself, you need these #' seeds to decrypt. #' #' @return #' A data.frame with the selected columns de-identify based on user parameters. #' @examples #' @export deidentify_data <- function(data, date_cols = NULL, date_aggregation = c("week", "month", "bimonth", "quarter", "halfyear", "year"), cols_to_encrypt = NULL, group_rare_values_cols = NULL, group_rare_values_limit = 5, group_rare_values_text = NULL, quiet = FALSE) { # if (!is.null(cols_to_encrypt) & is.null(seeds_for_encryption)) { # seeds_for_encryption <- sample(1e3:1e11, length(cols_to_encrypt)) # } data <- data %>% dplyr::mutate_if(lubridate::is.Date, lubridate::floor_date, unit = date_aggregation) # Looks in each selected columns and replaces all the values that are less # than k% of the non-NA total with a selected replacement text (default # is NA). This is to reduce privacy concerns with rare values (i.e. if # person is only one with X offense, can identify through that). group_rare_values_limit <- rep(group_rare_values_limit, length.out = length(group_rare_values_cols)) if (!is.null(group_rare_values_cols)) { for (i in 1:length(group_rare_values_cols)) { col <- group_rare_values_cols[i] rare_values <- get_rare_values(data[, col], group_rare_values_limit[i]) if (is.null(group_rare_values_text)){ data[, col][data[, col] %in% rare_values] <- paste0(rare_values, collapse = ", ") } else if (is.na(group_rare_values_text)) { data[, col][data[, col] %in% rare_values] <- NA } else { data[, col][data[, col] %in% rare_values] <- group_rare_values_text[i] } } } return(data) } deidentify_date <- function(data, date_aggregation) { data <- lubridate::floor_date(data, date_aggregation) return(data) } deidentify_group <- function(data, group_rare_values_limit, group_rare_values_text) { values_under_k_percent <- get_rare_values(data, group_rare_values_limit) data[data %in% values_under_k_percent] <- group_rare_values_text } get_rare_values <- function(data, k_percent = 5) { numeric_data <- is.numeric(data) values_by_percent <- table(data) / length(data[!is.na(data)]) * 100 values_under_k_percent <- names(values_by_percent[values_by_percent < k_percent]) if(numeric_data) { values_under_k_percent <- as.numeric(values_under_k_percent) } # Sorts alphabetically (or smallest to largest if numeric) for easier testing. values_under_k_percent <- sort(values_under_k_percent) # Returns NULL if no responses if (length(values_under_k_percent) == 0) { values_under_k_percent <- NULL } return(values_under_k_percent) } bin_numbers <- function(data, percent_per_group) { if (!is.numeric(percent_per_group) | length(percent_per_group) != 1 | !percent_per_group %in% 1:100) { stop("percent_per_group must be a single integer from 1:100.") } if (!is.numeric(data)) { stop("data must be a vector of numbers.") } # Convert to a proportion percent_per_group <- percent_per_group / 100 }
# Plot4 : plot 4 different graphs on 2 rows x 2 column matrix #setup working directory setwd("~/GitHub/ExData_Plotting1/Plot2.R") #Read the data in table format and store it in to dataset variable dataset <- read.table("household_power_consumption.txt",header = TRUE, sep=";",na.strings = "?") # Get the subset from dataset fro dates 1,2 feb 2007 and convert dates to R Date class and time to POSIXct sub_dataset <- subset(dataset, Date %in% c("1/2/2007","2/2/2007")) sub_dataset$Date <- as.Date(sub_dataset$Date, format="%d/%m/%Y") datetime <- paste(as.Date(sub_dataset$Date), sub_dataset$Time) sub_dataset$Datetime <- as.POSIXct(datetime) #save the plot to png format png("plot4.png", width=480, height=480) #Plot 4 graphs in 2 rows and 2 columns par(mfrow=c(2,2), mar=c(4,4,2,1), oma=c(0,0,2,0)) with(sub_dataset, { #plot1 : Global_active_power vs Datetime plot(Global_active_power~Datetime, type="l", ylab="Global Active Power (kilowatts)", xlab="") #plot2:Voltage vs Datetime plot(Voltage~Datetime, type="l", ylab="Voltage (volt)", xlab="") #plot3: submetering vs Dtaetime plot(Sub_metering_1~Datetime, type="l", ylab="Global Active Power (kilowatts)", xlab="") lines(Sub_metering_2~Datetime,col='Red') lines(Sub_metering_3~Datetime,col='Blue') legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2, bty="n", legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3")) #plot4: Global_reactive_power vs Datetime plot(Global_reactive_power~Datetime, type="l", ylab="Global Rective Power (kilowatts)",xlab="") }) #close the png device dev.off()
/Plot4.R
no_license
Ruchipatel91/ExData_Plotting1
R
false
false
1,648
r
# Plot4 : plot 4 different graphs on 2 rows x 2 column matrix #setup working directory setwd("~/GitHub/ExData_Plotting1/Plot2.R") #Read the data in table format and store it in to dataset variable dataset <- read.table("household_power_consumption.txt",header = TRUE, sep=";",na.strings = "?") # Get the subset from dataset fro dates 1,2 feb 2007 and convert dates to R Date class and time to POSIXct sub_dataset <- subset(dataset, Date %in% c("1/2/2007","2/2/2007")) sub_dataset$Date <- as.Date(sub_dataset$Date, format="%d/%m/%Y") datetime <- paste(as.Date(sub_dataset$Date), sub_dataset$Time) sub_dataset$Datetime <- as.POSIXct(datetime) #save the plot to png format png("plot4.png", width=480, height=480) #Plot 4 graphs in 2 rows and 2 columns par(mfrow=c(2,2), mar=c(4,4,2,1), oma=c(0,0,2,0)) with(sub_dataset, { #plot1 : Global_active_power vs Datetime plot(Global_active_power~Datetime, type="l", ylab="Global Active Power (kilowatts)", xlab="") #plot2:Voltage vs Datetime plot(Voltage~Datetime, type="l", ylab="Voltage (volt)", xlab="") #plot3: submetering vs Dtaetime plot(Sub_metering_1~Datetime, type="l", ylab="Global Active Power (kilowatts)", xlab="") lines(Sub_metering_2~Datetime,col='Red') lines(Sub_metering_3~Datetime,col='Blue') legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2, bty="n", legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3")) #plot4: Global_reactive_power vs Datetime plot(Global_reactive_power~Datetime, type="l", ylab="Global Rective Power (kilowatts)",xlab="") }) #close the png device dev.off()
\name{fast.geomorph.compare.multi.evol.rates} \alias{fast.geomorph.compare.multi.evol.rates} \title{ Fast covariance-based implementations of distance-based methods } \description{ The functions \code{fast.geomorph.compare.evol.rates}, \code{fast.geomorph.compare.multi.evol.rates}, \code{fast.geomorph.phylo.integration}, \code{fast.geomorph.procD.pgls} , and \code{fast.geomorph.physignal} are covariance-baesd implementations of the geomorph functions \link[geomorph]{compare.evol.rates}, \link[geomorph]{compare.multi.evol.rates}, \link[geomorph]{phylo.integration}, \link[geomorph]{procD.pgls}, and \link[geomorph]{physignal} using a fast linear-time algorithm. Code is directly modified from the original geomorph code for direct comparison between distance-based and covariance-based methods. } \usage{ fast.geomorph.compare.multi.evol.rates(A, gp, phy, Subset = TRUE, method = "ML", ShowPlot = TRUE, iter = 1000) } \arguments{ \item{A}{ From geomorph: A matrix (n x [p x k]) or 3D array (p x k x n) containing GPA-aligned coordinates for a set of specimens } \item{gp}{ From geomorph: A factor array designating group membership } \item{phy}{ From geomorph: A phylogenetic tree of class phylo } \item{Subset}{ From geomorph: A logical value indicating whether or not the traits are subsets from a single landmark configuration (default is TRUE) } \item{method}{ Maximum likelihood "ML" or restricted maximum likelihood "REML" } \item{ShowPlot}{ From geomorph: A logical value indicating whether or not the plot should be returned } \item{iter}{ From geomorph: Number of iterations for significance testing } } \details{ See \link[geomorph]{compare.multi.evol.rates} } \value{ See \link[geomorph]{compare.multi.evol.rates} } \references{ Goolsby E.W. 2016. Likelihood-Based Parameter Estimation for High-Dimensional Phylogenetic Comparative Models: Overcoming the Limitations of 'Distance-Based' Methods. In review. Adams, D.C. 2014. Quantifying and comparing phylogenetic evolutionary rates for shape and other high-dimensional phenotypic data. Syst. Biol. 63:166-177. Denton, J.S.S., and D.C. Adams. 2015. A new phylogenetic test for comparing multiple high-dimensional evolutionary rates suggests interplay of evolutionary rates and modularity in lanternfishes (Myctophiformes; Myctophidae). Evolution. 69: doi:10.1111/evo.12743 } \author{ Eric W. Goolsby } \seealso{ \link[geomorph]{compare.multi.evol.rates} } \examples{ ### NOTE: this example is identical ### to the example code for the ### analogous geomorph function ### for direct comparisons with ### 'fast.geomorph' phylocurve functions require(geomorph) data(plethspecies) Y.gpa<-gpagen(plethspecies$land) #GPA-alignment land.gp<-c("A","A","A","A","A","B","B","B","B","B","B") #mandible and cranium subsets compare.multi.evol.rates(Y.gpa$coords,land.gp,phy=plethspecies$phy,iter=99) fast.geomorph.compare.multi.evol.rates(Y.gpa$coords,land.gp,plethspecies$phy) }
/man/fast.geomorph.compare.multi.evol.rates.Rd
no_license
cran/phylocurve
R
false
false
3,024
rd
\name{fast.geomorph.compare.multi.evol.rates} \alias{fast.geomorph.compare.multi.evol.rates} \title{ Fast covariance-based implementations of distance-based methods } \description{ The functions \code{fast.geomorph.compare.evol.rates}, \code{fast.geomorph.compare.multi.evol.rates}, \code{fast.geomorph.phylo.integration}, \code{fast.geomorph.procD.pgls} , and \code{fast.geomorph.physignal} are covariance-baesd implementations of the geomorph functions \link[geomorph]{compare.evol.rates}, \link[geomorph]{compare.multi.evol.rates}, \link[geomorph]{phylo.integration}, \link[geomorph]{procD.pgls}, and \link[geomorph]{physignal} using a fast linear-time algorithm. Code is directly modified from the original geomorph code for direct comparison between distance-based and covariance-based methods. } \usage{ fast.geomorph.compare.multi.evol.rates(A, gp, phy, Subset = TRUE, method = "ML", ShowPlot = TRUE, iter = 1000) } \arguments{ \item{A}{ From geomorph: A matrix (n x [p x k]) or 3D array (p x k x n) containing GPA-aligned coordinates for a set of specimens } \item{gp}{ From geomorph: A factor array designating group membership } \item{phy}{ From geomorph: A phylogenetic tree of class phylo } \item{Subset}{ From geomorph: A logical value indicating whether or not the traits are subsets from a single landmark configuration (default is TRUE) } \item{method}{ Maximum likelihood "ML" or restricted maximum likelihood "REML" } \item{ShowPlot}{ From geomorph: A logical value indicating whether or not the plot should be returned } \item{iter}{ From geomorph: Number of iterations for significance testing } } \details{ See \link[geomorph]{compare.multi.evol.rates} } \value{ See \link[geomorph]{compare.multi.evol.rates} } \references{ Goolsby E.W. 2016. Likelihood-Based Parameter Estimation for High-Dimensional Phylogenetic Comparative Models: Overcoming the Limitations of 'Distance-Based' Methods. In review. Adams, D.C. 2014. Quantifying and comparing phylogenetic evolutionary rates for shape and other high-dimensional phenotypic data. Syst. Biol. 63:166-177. Denton, J.S.S., and D.C. Adams. 2015. A new phylogenetic test for comparing multiple high-dimensional evolutionary rates suggests interplay of evolutionary rates and modularity in lanternfishes (Myctophiformes; Myctophidae). Evolution. 69: doi:10.1111/evo.12743 } \author{ Eric W. Goolsby } \seealso{ \link[geomorph]{compare.multi.evol.rates} } \examples{ ### NOTE: this example is identical ### to the example code for the ### analogous geomorph function ### for direct comparisons with ### 'fast.geomorph' phylocurve functions require(geomorph) data(plethspecies) Y.gpa<-gpagen(plethspecies$land) #GPA-alignment land.gp<-c("A","A","A","A","A","B","B","B","B","B","B") #mandible and cranium subsets compare.multi.evol.rates(Y.gpa$coords,land.gp,phy=plethspecies$phy,iter=99) fast.geomorph.compare.multi.evol.rates(Y.gpa$coords,land.gp,plethspecies$phy) }
\name{head} \alias{head} \title{Obtains the first rows of an H2O parsed data object } \description{ Obtains the first rows of an H2O parsed data object } \usage{ head(x, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x}{An H2O parsed data object } \item{\dots}{The number of rows to be returned (the default count is 6). } } \value{ The first 6 rows of an H2O parsed data frame, unless the number of rows is otherwise specified to be N, in which case the first N rows are returned. } \examples{ library(h2o) localH2O = h2o.init(ip = "localhost", port = 54321, startH2O = TRUE, silentUpgrade = TRUE, promptUpgrade = FALSE) ausPath = system.file("extdata", "australia.csv", package="h2oRClient") australia.hex = h2o.importFile(localH2O, path = ausPath) head(australia.hex, 10) }
/R/h2oRClient-package/man/head.Rd
permissive
cloudtrends/h2o
R
false
false
820
rd
\name{head} \alias{head} \title{Obtains the first rows of an H2O parsed data object } \description{ Obtains the first rows of an H2O parsed data object } \usage{ head(x, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x}{An H2O parsed data object } \item{\dots}{The number of rows to be returned (the default count is 6). } } \value{ The first 6 rows of an H2O parsed data frame, unless the number of rows is otherwise specified to be N, in which case the first N rows are returned. } \examples{ library(h2o) localH2O = h2o.init(ip = "localhost", port = 54321, startH2O = TRUE, silentUpgrade = TRUE, promptUpgrade = FALSE) ausPath = system.file("extdata", "australia.csv", package="h2oRClient") australia.hex = h2o.importFile(localH2O, path = ausPath) head(australia.hex, 10) }
#' @include Gap_filling.R NULL #' @import progress #' @import data.table #' @importFrom tools file_path_sans_ext #' @importFrom xcms xcmsRaw #' @importFrom xcms rawEIC NULL #' @title Fill MS-gaps using SAX #' #' @description Function that fills gaps utilizing SAX (Symbol Aggregate approXimation) #' #' @param peak_table A mandatory character vector giving the path to an aligned peak table in csv format or a data.table that is the aligned peak table. See the Details section for more information on how this parameter and the filenames parameter interact. #' @param mzML_filenames The names of the .mzML-mzML_filenames that are references in peak table. See the Details section for more information #' @param SAXGAP_params A list containing the main parameters for the SAXGAP function. See the Details section for more information #' @param output_file An optional charcter vector giving the filepath of where the output table should be stored. #' #' @details #' \subsection{peak_table and mzML_filename:}{ #' #' The peak_table and mzML_filenames parameter need to have the same underlying names for the storage of the relationship of the intensities and retention times to the files. #' If, for example, one supplies the file \code{Sample_1.mzML} then the peak_table MUST contain a column named either \code{Intensity_Sample_1} or \code{Intensity_Sample_1.mzML}. The holds true for #' retention time, i.e. the table needs to have a column named either \code{RT_Sample_1.mzML} or \code{RT_Sample_1}. The peak table also needs general \code{mz} and \code{RT} columns. All retention times must provided in seconds! #' #' } #' #' \subsection{SAXGAP_params:}{ #' The SAXGAP_params paarmeter must be a named list containing the parameters: #' \describe{ #' \item{a}{The alphabet size of the SAX conversion and comparison} #' \item{w}{The word length for peaks} #' \item{t}{The length of the extracted EICs} #' \item{pe}{The percentage cutoff for peak removal} #' \item{dppm}{The instrument-specific allowed ppm error of the peaks} #' \item{intensity_threshold}{The minimum intensity for peaks} #' } #' } #' #' @return The function returns the gap filled table with the gap filled peaks stored as a negative value and an added \code{Removal_Candidate} column that signifies whether a peak was likely picked in error. #' If output_file is set it writes the same table to the path specified by output_file. #' #' @export SAXGAP <- function(peak_table, mzML_filenames, SAXGAP_params = list(a=6,w=6,t=14,pe=0.9,intensity_threshold = 1e4,dppm=5), output_file){ # Parameter check and read section # Check the peak_table parameter # Is it a filepath? If it is, read the file if(is.character(peak_table)){ if(file.exists(peak_table)){ peak_table <- fread(peak_table) } else{ stop(paste("File",peak_table,"does not exist")) } } else{ if(!is.data.table(peak_table)){ stop("peak_table must be an object of class data.table") } } # Check the mzML_filenames parameter # Do all filenames exist? if(is.character(mzML_filenames)){ mzML_filenames_exist <- file.exists(mzML_filenames) if(!all(mzML_filenames_exist)){ not_existing_mzML_files <- mzML_filenames[!mzML_filenames_exist] if(length(not_existing_mzML_files) == 1){ stop(paste0("File does not exist:\n", not_existing_mzML_files)) } if(length(not_existing_mzML_files) > 1){ stop(paste0("Files do not exist:\n", paste(not_existing_mzML_files,collapse = "\n"))) } } } else{ stop("mzML_filenames must be a character vector containg filenames") } # Check the SAXGAP_params parameter # Is SAXGAP_params a list with the proper named elements? if(!is.list(SAXGAP_params) || !all(c("a","w","t","pe","intensity_threshold","dppm") %in% names(SAXGAP_params))){ stop("SAXGAP_params must be a list containing named elements \"a\",\"w\",\"t\",\"pe\",\"intensity_threshold\",\"dppm\"") } # Check the output_file parameter (if it is supplied) # Does the folder exist where the table should go? if(!missing(output_file)){ dirname_output_file <- dirname(output_file) if(!dir.exists(dirname_output_file)){ stop(paste("The target directory of the output file", dirname_output_file, "does not exist")) } } # Get intensity and rt column names INT_column_names <- paste0("Intensity_",file_path_sans_ext(basename(mzML_filenames))) RT_column_names <- paste0("RT_",file_path_sans_ext(basename(mzML_filenames))) # If the column names are not found in the table check if they are found if we don't remove the file extension from the name # It's easy to check this, so we allow this with file extensions and without, for user convenience. if(!any(INT_column_names %in% colnames(peak_table))){ INT_column_names <- paste0("Intensity_",basename(mzML_filenames)) } if(!any(RT_column_names %in% colnames(peak_table))){ RT_column_names <- paste0("RT_",basename(mzML_filenames)) } # Get intensity table and retention time table INT_Table <- peak_table[, INT_column_names,with=FALSE] RT_Table <- peak_table[, RT_column_names,with=FALSE] nPeaks <- nrow(INT_Table) # Check if the intensity table has a column for every file if(ncol(INT_Table) != length(INT_column_names)){ stop("The peak_table does not contain a properly named intensity column for every provided mzML_filename\nThe columns must have the same name as the mzML-files with \"Intensity_\" added in front") } # Check if the retention time table has a column for every file if(ncol(RT_Table) != length(RT_column_names)){ stop("The peak_table does not contain a properly named retention time column for every provided mzML_filename\nThe columns must have the same name as the mzML-files with \"RT_\" added in front") } if(!all(c("mz","RT") %in% colnames(peak_table))){ stop("The peak_table does not contain mandatory columns with names \"mz\",\"RT\"") } if(all(peak_table$RT < 60)){ warning("All retention times are smaller than 60, please note that retention times must be supplied in seconds") } # Set up variables for EIC extraction SAX <- list() detected_peaks <- list() # Which peaks have already been found? detected_peaks_l <- matrix(FALSE, nPeaks, length(mzML_filenames)) # Which peaks have already been found (but as a logical vector) # Much faster with a matrix than with a data.table, so a one time conversion just for this purpose INT_matrix <- as.matrix(INT_Table) for(i in 1:nPeaks){ SAX[[i]] <- vector() detected_peaks_l[i,] <- INT_matrix[i,] > 0 detected_peaks[[i]] <- which(detected_peaks_l[i,]) } ### Set up the EIC extraction and SAX conversion loop # progress_bar stuff length_progressbar <- length(mzML_filenames)*nPeaks pb <- progress_bar$new( format = " [NonTplus] Generating SAX sequences [:bar] :percent ETA: :eta Elapsed: :elapsed ", total = length_progressbar, clear = FALSE, width = 80) # Variable setup, create new data.tables and EIClengths i <- 0 EIClengths <- matrix(0,nPeaks,length(mzML_filenames)) INT_dt_new <- copy(INT_Table) RT_dt_new <- data.table(matrix(0,nrow(RT_Table),ncol(RT_Table))) colnames(RT_dt_new) <- RT_column_names # Calculate the ppmlimits of the mz of each peak and get the SAX string for empty EICs mzranges <- t(sapply(peak_table$mz,.get_ppmlimits,SAXGAP_params$dppm)) empty_SAX <- paste0(rep("a",SAXGAP_params$w),collapse="") # Go through each file for(f in mzML_filenames){ # Read the file suppressMessages(XR <- xcmsRaw(f)) # Go through each peak for(p in 1:nPeaks){ # Only update the progress bar every 1000 peaks, or else the updating takes a noticable amount of time. if(p %% 1000 == 0){ pb$update((i*nPeaks+p)/length_progressbar) } # Get the mzrange for each peak current_mzrange <- mzranges[p,] # Get the RT - either it is peak-specific or we take the "general" supplied RT if(detected_peaks_l[p,i+1]){ RT <- RT_Table[[i+1]][p] } else{ RT <- peak_table$RT[p] } # Retrieve the currently relevant EIC from the file tempEIC <- rawEIC(XR,mzrange=current_mzrange,rtrange = c(RT-2*SAXGAP_params$t,RT+2*SAXGAP_params$t)) # Move the retention time window to center around the weighted means of intensities - i.e. # "Center the middle of the peak if there is one" intensities <- tempEIC$intensity retention_t <- XR@scantime[tempEIC$scan] ind <- .Internal(which(retention_t > RT - SAXGAP_params$t & retention_t < RT + SAXGAP_params$t)) new_RT <- weighted.mean(retention_t[ind],intensities[ind]) if(!is.nan(new_RT)){ ind2 <- which(retention_t > new_RT - SAXGAP_params$t & retention_t < new_RT + SAXGAP_params$t) intensities <- intensities[ind2] retention_t <- retention_t[ind2] } else{ new_RT <- 0 ind <- .Internal(which(retention_t > RT - SAXGAP_params$t & retention_t < RT + SAXGAP_params$t)) intensities <- intensities[ind] } sumint <- sum(intensities) # Save the new Intensities and RT in their tables if we assume a peak to be here # If there is a peak, get the new RT with the weighted mean strategy if(detected_peaks_l[p,i+1]){ set(RT_dt_new,p,RT_column_names[i+1], new_RT) } else{ set(INT_dt_new,p,INT_column_names[i+1], sumint) set(RT_dt_new,p,RT_column_names[i+1], new_RT) } # If the intensity is below the threshold, pretend that it is empty, otherwise create a SAX string if(sumint < SAXGAP_params$intensity_threshold){ SAX[[p]][i+1] <- empty_SAX } else{ SAX[[p]][i+1] <- paste0(.Func.SAX2(intensities, SAXGAP_params$w, SAXGAP_params$a, 0, T), collapse="") } # The EIC length is technically constant, but SAX is dependent on the number of data points and # in MS experiments the same timerange can have a different number of datapoints at different RTs # because scans are not exactly taken at a constant rate but at an only *somewhat* constant rate EIClengths[p,i+1] <- length(intensities) } # Clear the memory gc(reset = T,full = T) i <- i + 1 } # Finish up the progress bar in case it is not, because we only update it every 1000 steps if(!pb$finished){ pb$update(1) } # Get a list of all SAX strings that belong to a "detected peak" valid_SAX <- list() for(p in 1:length(SAX)){ valid_SAX[[p]] <- SAX[[p]][detected_peaks[[p]]] } # Reduce the "valid" SAX strings by the accepted percentage valid_SAX_table <- sort(table(do.call(c,valid_SAX)),decreasing = T) valid_SAX_table_reduced <- valid_SAX_table[1:which(cumsum(valid_SAX_table)/sum(valid_SAX_table) > SAXGAP_params$p)[1]] valid_SAX_sequences <- names(valid_SAX_table_reduced) # Setup for finding the consensus sequences consensus_mean <- list() consensus_majority <- list() length_progressbar <- nPeaks pb <- progress_bar$new( format = " [NonTplus] Finding consensus sequences [:bar] :percent ETA: :eta Elapsed: :elapsed", total = length_progressbar, clear = FALSE, width = 80) # Go through each peak for(p in 1:nPeaks){ # If more than one peak has been detected, get the consensus if(length(detected_peaks[[p]]) > 1){ SAX_correct <- do.call(rbind,strsplit(SAX[[p]][detected_peaks[[p]]],split = "")) consensus_mean[[p]] <- pseudo_consensus(SAX_correct) consensus_majority[[p]] <- consensus(SAX_correct) } else{ # If only one was detected, it IS the consensus consensus_mean[[p]] <- strsplit(SAX[[p]][detected_peaks[[p]]],split = "") consensus_majority[[p]] <- strsplit(SAX[[p]][detected_peaks[[p]]],split = "") } if(p %% 100 == 0){ pb$update(p/length_progressbar) } } # Finish progress bar if(!pb$finished){ pb$update(1) } # Get distance matrix dist_mat <- .Func.matrix2(SAXGAP_params$a) # Set up variables for classification classification_function <- consensus_distance_sax consensus_fun <- consensus mean_s <- vector() sd_s <- vector() cutoff <- vector() A <- vector() count <- 0 classFunc <- median res <- list() classes <- list() single_score_mats <- list() length_progressbar <- nrow(INT_Table) pb <- progress_bar$new( format = " [NonTplus] Adding new peaks [:bar] :percent ETA: :eta Elapsed: :elapsed", total = length_progressbar, clear = FALSE, width = 80) # Classification loop rempeaks <- rep(FALSE,times=nPeaks) Removed_Peaks <- logical(length = nPeaks) for(p in 1:nPeaks){ # Get the index of the filex where the peak has been detected training_index <- detected_peaks[[p]] # In case something with the peak picking has gone horribly wrong (Sometimes the RT doesn't fit) empty_index <- which(SAX[[p]][training_index] == empty_SAX) if(length(empty_index) != 0){ training_index <- training_index[-empty_index] } test_index <- which(!detected_peaks_l[p,]) if(length(test_index) == 0){ next } # Convert the SAX sequences to a table SAXtable <- do.call(rbind,strsplit(SAX[[p]], split="")) # Differentiate method based on whether the current peak is low-coverage or not if(length(training_index) < 0.05 * length(mzML_filenames)){ # Low-coverage: See if the low-coverage SAX sequences are plausibly peaks current_SAX <- SAX[[p]][training_index] current_SAX_test <- SAX[[p]][test_index] if(!any(current_SAX %in% valid_SAX_sequences)){ # If not, remove the peak rempeaks[p] <- TRUE count <- count + 1 Removed_Peaks[p] <- TRUE classes[[p]] <- rep(FALSE,length(mzML_filenames)) }else{ # If they are plausibly peaks, just look which other peaks match any peak of the valid ones (using the SAX distance function) # See if the not yet classified peaks have the same sequence as one of the found peaks SAX_matches <- which(sapply(current_SAX_test, function(SAX_test_sequence){ any(sapply(current_SAX, function(SAX_training_sequence){ .Func.dist2(strsplit(SAX_test_sequence, split="")[[1]], strsplit(SAX_training_sequence, split="")[[1]], dist_mat,n = mean(EIClengths[p,])) }) == 0) })) classes[[p]] <- rep(FALSE,length(mzML_filenames)) if(length(SAX_matches) != 0){ classes[[p]][test_index][SAX_matches] <- TRUE } } } else{ # High-coverage: Mintest with a consensus sequence res[[p]] <- classification_function(SAXtable, consensus_fun, mean(EIClengths[p,]), training_index, dist_mat) # Get mean distances and standard deviations single_score_mats[[p]] <- res[[p]][[1]] mean_s <- res[[p]][[2]] sd_s <- res[[p]][[3]] # If the mean is 0 if(is.nan(mean_s)){ mean_s <- 1 sd_s <- 1 } if(is.na(sd_s)){ sd_s <- 0 } # The cutoff is the maximum distance of one of training peaks cutoff[p] <- max(single_score_mats[[p]][detected_peaks[[p]]]) A[p] <- all(single_score_mats[[p]][detected_peaks[[p]]] <= cutoff[p]) classes[[p]] <- (apply(single_score_mats[[p]],1, classFunc) <= cutoff[p]) & !detected_peaks_l[p,] } # Set the new intensities and retention times in the new tables for(s in which(classes[[p]])){ set(peak_table,p,INT_column_names[s],-INT_dt_new[[s]][p]) set(peak_table,p,RT_column_names[s],RT_dt_new[[s]][p]) } if(p %% 100){ pb$update(p/nPeaks) } } if(!pb$finished){ pb$update(1) } # Clean up peak_table[, Removal_Candidate := rempeaks] if(!missing(output_file)){ fwrite(peak_table,file = output_file) } return(peak_table) }
/R/Gap_filling.R
no_license
oolonek/SAX_Gap_Filling
R
false
false
17,895
r
#' @include Gap_filling.R NULL #' @import progress #' @import data.table #' @importFrom tools file_path_sans_ext #' @importFrom xcms xcmsRaw #' @importFrom xcms rawEIC NULL #' @title Fill MS-gaps using SAX #' #' @description Function that fills gaps utilizing SAX (Symbol Aggregate approXimation) #' #' @param peak_table A mandatory character vector giving the path to an aligned peak table in csv format or a data.table that is the aligned peak table. See the Details section for more information on how this parameter and the filenames parameter interact. #' @param mzML_filenames The names of the .mzML-mzML_filenames that are references in peak table. See the Details section for more information #' @param SAXGAP_params A list containing the main parameters for the SAXGAP function. See the Details section for more information #' @param output_file An optional charcter vector giving the filepath of where the output table should be stored. #' #' @details #' \subsection{peak_table and mzML_filename:}{ #' #' The peak_table and mzML_filenames parameter need to have the same underlying names for the storage of the relationship of the intensities and retention times to the files. #' If, for example, one supplies the file \code{Sample_1.mzML} then the peak_table MUST contain a column named either \code{Intensity_Sample_1} or \code{Intensity_Sample_1.mzML}. The holds true for #' retention time, i.e. the table needs to have a column named either \code{RT_Sample_1.mzML} or \code{RT_Sample_1}. The peak table also needs general \code{mz} and \code{RT} columns. All retention times must provided in seconds! #' #' } #' #' \subsection{SAXGAP_params:}{ #' The SAXGAP_params paarmeter must be a named list containing the parameters: #' \describe{ #' \item{a}{The alphabet size of the SAX conversion and comparison} #' \item{w}{The word length for peaks} #' \item{t}{The length of the extracted EICs} #' \item{pe}{The percentage cutoff for peak removal} #' \item{dppm}{The instrument-specific allowed ppm error of the peaks} #' \item{intensity_threshold}{The minimum intensity for peaks} #' } #' } #' #' @return The function returns the gap filled table with the gap filled peaks stored as a negative value and an added \code{Removal_Candidate} column that signifies whether a peak was likely picked in error. #' If output_file is set it writes the same table to the path specified by output_file. #' #' @export SAXGAP <- function(peak_table, mzML_filenames, SAXGAP_params = list(a=6,w=6,t=14,pe=0.9,intensity_threshold = 1e4,dppm=5), output_file){ # Parameter check and read section # Check the peak_table parameter # Is it a filepath? If it is, read the file if(is.character(peak_table)){ if(file.exists(peak_table)){ peak_table <- fread(peak_table) } else{ stop(paste("File",peak_table,"does not exist")) } } else{ if(!is.data.table(peak_table)){ stop("peak_table must be an object of class data.table") } } # Check the mzML_filenames parameter # Do all filenames exist? if(is.character(mzML_filenames)){ mzML_filenames_exist <- file.exists(mzML_filenames) if(!all(mzML_filenames_exist)){ not_existing_mzML_files <- mzML_filenames[!mzML_filenames_exist] if(length(not_existing_mzML_files) == 1){ stop(paste0("File does not exist:\n", not_existing_mzML_files)) } if(length(not_existing_mzML_files) > 1){ stop(paste0("Files do not exist:\n", paste(not_existing_mzML_files,collapse = "\n"))) } } } else{ stop("mzML_filenames must be a character vector containg filenames") } # Check the SAXGAP_params parameter # Is SAXGAP_params a list with the proper named elements? if(!is.list(SAXGAP_params) || !all(c("a","w","t","pe","intensity_threshold","dppm") %in% names(SAXGAP_params))){ stop("SAXGAP_params must be a list containing named elements \"a\",\"w\",\"t\",\"pe\",\"intensity_threshold\",\"dppm\"") } # Check the output_file parameter (if it is supplied) # Does the folder exist where the table should go? if(!missing(output_file)){ dirname_output_file <- dirname(output_file) if(!dir.exists(dirname_output_file)){ stop(paste("The target directory of the output file", dirname_output_file, "does not exist")) } } # Get intensity and rt column names INT_column_names <- paste0("Intensity_",file_path_sans_ext(basename(mzML_filenames))) RT_column_names <- paste0("RT_",file_path_sans_ext(basename(mzML_filenames))) # If the column names are not found in the table check if they are found if we don't remove the file extension from the name # It's easy to check this, so we allow this with file extensions and without, for user convenience. if(!any(INT_column_names %in% colnames(peak_table))){ INT_column_names <- paste0("Intensity_",basename(mzML_filenames)) } if(!any(RT_column_names %in% colnames(peak_table))){ RT_column_names <- paste0("RT_",basename(mzML_filenames)) } # Get intensity table and retention time table INT_Table <- peak_table[, INT_column_names,with=FALSE] RT_Table <- peak_table[, RT_column_names,with=FALSE] nPeaks <- nrow(INT_Table) # Check if the intensity table has a column for every file if(ncol(INT_Table) != length(INT_column_names)){ stop("The peak_table does not contain a properly named intensity column for every provided mzML_filename\nThe columns must have the same name as the mzML-files with \"Intensity_\" added in front") } # Check if the retention time table has a column for every file if(ncol(RT_Table) != length(RT_column_names)){ stop("The peak_table does not contain a properly named retention time column for every provided mzML_filename\nThe columns must have the same name as the mzML-files with \"RT_\" added in front") } if(!all(c("mz","RT") %in% colnames(peak_table))){ stop("The peak_table does not contain mandatory columns with names \"mz\",\"RT\"") } if(all(peak_table$RT < 60)){ warning("All retention times are smaller than 60, please note that retention times must be supplied in seconds") } # Set up variables for EIC extraction SAX <- list() detected_peaks <- list() # Which peaks have already been found? detected_peaks_l <- matrix(FALSE, nPeaks, length(mzML_filenames)) # Which peaks have already been found (but as a logical vector) # Much faster with a matrix than with a data.table, so a one time conversion just for this purpose INT_matrix <- as.matrix(INT_Table) for(i in 1:nPeaks){ SAX[[i]] <- vector() detected_peaks_l[i,] <- INT_matrix[i,] > 0 detected_peaks[[i]] <- which(detected_peaks_l[i,]) } ### Set up the EIC extraction and SAX conversion loop # progress_bar stuff length_progressbar <- length(mzML_filenames)*nPeaks pb <- progress_bar$new( format = " [NonTplus] Generating SAX sequences [:bar] :percent ETA: :eta Elapsed: :elapsed ", total = length_progressbar, clear = FALSE, width = 80) # Variable setup, create new data.tables and EIClengths i <- 0 EIClengths <- matrix(0,nPeaks,length(mzML_filenames)) INT_dt_new <- copy(INT_Table) RT_dt_new <- data.table(matrix(0,nrow(RT_Table),ncol(RT_Table))) colnames(RT_dt_new) <- RT_column_names # Calculate the ppmlimits of the mz of each peak and get the SAX string for empty EICs mzranges <- t(sapply(peak_table$mz,.get_ppmlimits,SAXGAP_params$dppm)) empty_SAX <- paste0(rep("a",SAXGAP_params$w),collapse="") # Go through each file for(f in mzML_filenames){ # Read the file suppressMessages(XR <- xcmsRaw(f)) # Go through each peak for(p in 1:nPeaks){ # Only update the progress bar every 1000 peaks, or else the updating takes a noticable amount of time. if(p %% 1000 == 0){ pb$update((i*nPeaks+p)/length_progressbar) } # Get the mzrange for each peak current_mzrange <- mzranges[p,] # Get the RT - either it is peak-specific or we take the "general" supplied RT if(detected_peaks_l[p,i+1]){ RT <- RT_Table[[i+1]][p] } else{ RT <- peak_table$RT[p] } # Retrieve the currently relevant EIC from the file tempEIC <- rawEIC(XR,mzrange=current_mzrange,rtrange = c(RT-2*SAXGAP_params$t,RT+2*SAXGAP_params$t)) # Move the retention time window to center around the weighted means of intensities - i.e. # "Center the middle of the peak if there is one" intensities <- tempEIC$intensity retention_t <- XR@scantime[tempEIC$scan] ind <- .Internal(which(retention_t > RT - SAXGAP_params$t & retention_t < RT + SAXGAP_params$t)) new_RT <- weighted.mean(retention_t[ind],intensities[ind]) if(!is.nan(new_RT)){ ind2 <- which(retention_t > new_RT - SAXGAP_params$t & retention_t < new_RT + SAXGAP_params$t) intensities <- intensities[ind2] retention_t <- retention_t[ind2] } else{ new_RT <- 0 ind <- .Internal(which(retention_t > RT - SAXGAP_params$t & retention_t < RT + SAXGAP_params$t)) intensities <- intensities[ind] } sumint <- sum(intensities) # Save the new Intensities and RT in their tables if we assume a peak to be here # If there is a peak, get the new RT with the weighted mean strategy if(detected_peaks_l[p,i+1]){ set(RT_dt_new,p,RT_column_names[i+1], new_RT) } else{ set(INT_dt_new,p,INT_column_names[i+1], sumint) set(RT_dt_new,p,RT_column_names[i+1], new_RT) } # If the intensity is below the threshold, pretend that it is empty, otherwise create a SAX string if(sumint < SAXGAP_params$intensity_threshold){ SAX[[p]][i+1] <- empty_SAX } else{ SAX[[p]][i+1] <- paste0(.Func.SAX2(intensities, SAXGAP_params$w, SAXGAP_params$a, 0, T), collapse="") } # The EIC length is technically constant, but SAX is dependent on the number of data points and # in MS experiments the same timerange can have a different number of datapoints at different RTs # because scans are not exactly taken at a constant rate but at an only *somewhat* constant rate EIClengths[p,i+1] <- length(intensities) } # Clear the memory gc(reset = T,full = T) i <- i + 1 } # Finish up the progress bar in case it is not, because we only update it every 1000 steps if(!pb$finished){ pb$update(1) } # Get a list of all SAX strings that belong to a "detected peak" valid_SAX <- list() for(p in 1:length(SAX)){ valid_SAX[[p]] <- SAX[[p]][detected_peaks[[p]]] } # Reduce the "valid" SAX strings by the accepted percentage valid_SAX_table <- sort(table(do.call(c,valid_SAX)),decreasing = T) valid_SAX_table_reduced <- valid_SAX_table[1:which(cumsum(valid_SAX_table)/sum(valid_SAX_table) > SAXGAP_params$p)[1]] valid_SAX_sequences <- names(valid_SAX_table_reduced) # Setup for finding the consensus sequences consensus_mean <- list() consensus_majority <- list() length_progressbar <- nPeaks pb <- progress_bar$new( format = " [NonTplus] Finding consensus sequences [:bar] :percent ETA: :eta Elapsed: :elapsed", total = length_progressbar, clear = FALSE, width = 80) # Go through each peak for(p in 1:nPeaks){ # If more than one peak has been detected, get the consensus if(length(detected_peaks[[p]]) > 1){ SAX_correct <- do.call(rbind,strsplit(SAX[[p]][detected_peaks[[p]]],split = "")) consensus_mean[[p]] <- pseudo_consensus(SAX_correct) consensus_majority[[p]] <- consensus(SAX_correct) } else{ # If only one was detected, it IS the consensus consensus_mean[[p]] <- strsplit(SAX[[p]][detected_peaks[[p]]],split = "") consensus_majority[[p]] <- strsplit(SAX[[p]][detected_peaks[[p]]],split = "") } if(p %% 100 == 0){ pb$update(p/length_progressbar) } } # Finish progress bar if(!pb$finished){ pb$update(1) } # Get distance matrix dist_mat <- .Func.matrix2(SAXGAP_params$a) # Set up variables for classification classification_function <- consensus_distance_sax consensus_fun <- consensus mean_s <- vector() sd_s <- vector() cutoff <- vector() A <- vector() count <- 0 classFunc <- median res <- list() classes <- list() single_score_mats <- list() length_progressbar <- nrow(INT_Table) pb <- progress_bar$new( format = " [NonTplus] Adding new peaks [:bar] :percent ETA: :eta Elapsed: :elapsed", total = length_progressbar, clear = FALSE, width = 80) # Classification loop rempeaks <- rep(FALSE,times=nPeaks) Removed_Peaks <- logical(length = nPeaks) for(p in 1:nPeaks){ # Get the index of the filex where the peak has been detected training_index <- detected_peaks[[p]] # In case something with the peak picking has gone horribly wrong (Sometimes the RT doesn't fit) empty_index <- which(SAX[[p]][training_index] == empty_SAX) if(length(empty_index) != 0){ training_index <- training_index[-empty_index] } test_index <- which(!detected_peaks_l[p,]) if(length(test_index) == 0){ next } # Convert the SAX sequences to a table SAXtable <- do.call(rbind,strsplit(SAX[[p]], split="")) # Differentiate method based on whether the current peak is low-coverage or not if(length(training_index) < 0.05 * length(mzML_filenames)){ # Low-coverage: See if the low-coverage SAX sequences are plausibly peaks current_SAX <- SAX[[p]][training_index] current_SAX_test <- SAX[[p]][test_index] if(!any(current_SAX %in% valid_SAX_sequences)){ # If not, remove the peak rempeaks[p] <- TRUE count <- count + 1 Removed_Peaks[p] <- TRUE classes[[p]] <- rep(FALSE,length(mzML_filenames)) }else{ # If they are plausibly peaks, just look which other peaks match any peak of the valid ones (using the SAX distance function) # See if the not yet classified peaks have the same sequence as one of the found peaks SAX_matches <- which(sapply(current_SAX_test, function(SAX_test_sequence){ any(sapply(current_SAX, function(SAX_training_sequence){ .Func.dist2(strsplit(SAX_test_sequence, split="")[[1]], strsplit(SAX_training_sequence, split="")[[1]], dist_mat,n = mean(EIClengths[p,])) }) == 0) })) classes[[p]] <- rep(FALSE,length(mzML_filenames)) if(length(SAX_matches) != 0){ classes[[p]][test_index][SAX_matches] <- TRUE } } } else{ # High-coverage: Mintest with a consensus sequence res[[p]] <- classification_function(SAXtable, consensus_fun, mean(EIClengths[p,]), training_index, dist_mat) # Get mean distances and standard deviations single_score_mats[[p]] <- res[[p]][[1]] mean_s <- res[[p]][[2]] sd_s <- res[[p]][[3]] # If the mean is 0 if(is.nan(mean_s)){ mean_s <- 1 sd_s <- 1 } if(is.na(sd_s)){ sd_s <- 0 } # The cutoff is the maximum distance of one of training peaks cutoff[p] <- max(single_score_mats[[p]][detected_peaks[[p]]]) A[p] <- all(single_score_mats[[p]][detected_peaks[[p]]] <= cutoff[p]) classes[[p]] <- (apply(single_score_mats[[p]],1, classFunc) <= cutoff[p]) & !detected_peaks_l[p,] } # Set the new intensities and retention times in the new tables for(s in which(classes[[p]])){ set(peak_table,p,INT_column_names[s],-INT_dt_new[[s]][p]) set(peak_table,p,RT_column_names[s],RT_dt_new[[s]][p]) } if(p %% 100){ pb$update(p/nPeaks) } } if(!pb$finished){ pb$update(1) } # Clean up peak_table[, Removal_Candidate := rempeaks] if(!missing(output_file)){ fwrite(peak_table,file = output_file) } return(peak_table) }
pbart = function(x.train, y.train, x.test=matrix(0.0,0,0), sparse=FALSE, theta=0, omega=1, a=0.5, b=1, augment=FALSE, rho=NULL, xinfo=matrix(0.0,0,0), numcut=100L, usequants=FALSE, cont=FALSE, rm.const=TRUE, grp = NULL, xnames = NULL, categorical_idx = NULL, k=2.0, power=2.0, base=-1.0, split.prob = "polynomial", binaryOffset=NULL, ntree=50L, ndpost=1000L, nskip=1000L, keepevery=1L, nkeeptrain=ndpost, nkeeptest=ndpost, nkeeptreedraws=ndpost, printevery=100L, transposed=FALSE) { #-------------------------------------------------- #data n = length(y.train) if(length(binaryOffset)==0) binaryOffset=qnorm(mean(y.train)) if(!transposed) { if(is.null(dim(x.train))) { xnames = "X" } else { xnames = dimnames(x.train)[[2]] # predictor names before dummification } temp = bartModelMatrix(x.train, numcut, usequants=usequants, cont=cont, xinfo=xinfo, rm.const=rm.const) x.train = t(temp$X) numcut = temp$numcut xinfo = temp$xinfo if(length(x.test)>0) { x.test = bartModelMatrix(x.test) x.test = t(x.test[ , temp$rm.const]) } rm.const <- temp$rm.const grp <- temp$grp rm(temp) if(length(grp)==0){ p0 = nrow(x.train) grp <- 1:p0 } else { p0 = length(unique(grp)) # number of predictors before dummification } categorical_idx = unique(grp)[which(sapply(unique(grp), function(s) sum(s==grp)) > 1)] # indices of categorical predictors in the original design matrix } else { if(any(length(rm.const)==0, length(grp)==0, length(xnames)==0)) stop('Did not provide rm.const, grp and xnames for x.train after transpose!') if(is.logical(rm.const)) stop('Did not provide rm.const for x.train after transpose!') if((length(grp) > length(unique(grp))) & (length(categorical_idx) <= 0)) stop('Did not provide categorical_idx for x.train that contains categorical predictors!') p0 = length(unique(grp)) } if(n!=ncol(x.train)) stop('The length of y.train and the number of rows in x.train must be identical') p = nrow(x.train) np = ncol(x.test) if(length(rho)==0) rho <- p if(!(split.prob %in% c("polynomial", "exponential"))) { stop("split.prob is either polynomial or exponential.") } else { if(split.prob == "polynomial") { if(base < 0) base = 0.95 } if(split.prob == "exponential") { power = -1.0 if(base < 0) base = 0.5 } } #-------------------------------------------------- #set nkeeps for thinning if((nkeeptrain!=0) & ((ndpost %% nkeeptrain) != 0)) { nkeeptrain=ndpost cat('*****nkeeptrain set to ndpost\n') } if((nkeeptest!=0) & ((ndpost %% nkeeptest) != 0)) { nkeeptest=ndpost cat('*****nkeeptest set to ndpost\n') } if((nkeeptreedraws!=0) & ((ndpost %% nkeeptreedraws) != 0)) { nkeeptreedraws=ndpost cat('*****nkeeptreedraws set to ndpost\n') } #-------------------------------------------------- ptm <- proc.time() #call res = .Call("cpbart", n, #number of observations in training data p, #dimension of x np, #number of observations in test data x.train, #p*n training data x y.train, #n*1 training data y x.test, #p*np test data x ntree, numcut, ndpost*keepevery, nskip, power, base, binaryOffset, 3/(k*sqrt(ntree)), sparse, theta, omega, grp, a, b, rho, augment, nkeeptrain, nkeeptest, nkeeptreedraws, printevery, xinfo ) res$proc.time <- proc.time()-ptm #--------------------------------------------------------- #returns if(nkeeptrain>0) { res$yhat.train = res$yhat.train+binaryOffset res$prob.train = pnorm(res$yhat.train) res$prob.train.mean <- apply(res$prob.train, 2, mean) } else { res$yhat.train <- NULL } if(np>0) { res$yhat.test = res$yhat.test+binaryOffset res$prob.test = pnorm(res$yhat.test) res$prob.test.mean <- apply(res$prob.test, 2, mean) } else { res$yhat.test <- NULL } if(nkeeptreedraws>0) names(res$treedraws$cutpoints) = dimnames(x.train)[[1]] #-------------------------------------------------- #importance if(length(grp) == length(unique(grp))) { ## no dummy variables dimnames(res$varcount)[[2]] = as.list(dimnames(x.train)[[1]]) dimnames(res$varprob)[[2]] = as.list(dimnames(x.train)[[1]]) ## vip res$vip = colMeans(t(apply(res$varcount, 1, function(s) s/sum(s)))) ## (marginal) posterior variable inclusion probability res$pvip = colMeans(res$varcount > 0) ## posterior s_j's (in DART) res$varprob.mean <- colMeans(res$varprob) ## mi mr_vecs = lapply(res$mr_vecs, function(s) lapply(s, function(v) v[-1])) # remove the meaningless first 0 res$mr_vecs = mr_vecs mrmean = matrix(unlist(lapply(mr_vecs, function(s) lapply(s, function(v) ifelse(length(v)>0, mean(v), 0.0)))), ncol = p, byrow = TRUE) res$mrmean = mrmean res$mi = colMeans(t(apply(mrmean, 1, function(s) s/sum(s)))) names(res$mi) = as.list(dimnames(x.train)[[1]]) dimnames(res$mrmean)[[2]] = as.list(xnames) } else { ## merge importance scores for dummy variables varcount = matrix(NA, nrow = nkeeptreedraws, ncol = p0) varprob = matrix(NA, nrow = nkeeptreedraws, ncol = p0) mr_vecs = lapply(res$mr_vecs, function(s) list(s[[1]][-1])) varcount[, 1] = res$varcount[, 1] varprob[, 1] = res$varprob[, 1] j = 1 for (l in 2:p) { if (grp[l] == grp[l-1]) { varcount[, j] = varcount[, j] + res$varcount[, l] varprob[, j] = varprob[, j] + res$varprob[, l] for (i in 1:nkeeptreedraws) { mr_vecs[[i]][[j]] = c(mr_vecs[[i]][[j]], res$mr_vecs[[i]][[l]][-1]) } } else { j = j + 1 varcount[, j] = res$varcount[, l] varprob[, j] = res$varprob[, l] for (i in 1:nkeeptreedraws) { mr_vecs[[i]][[j]] = res$mr_vecs[[i]][[l]][-1] } } } dimnames(varcount)[[2]] = as.list(xnames) dimnames(varprob)[[2]] = as.list(xnames) res$varcount = varcount res$varprob = varprob res$mr_vecs = mr_vecs ## vip res$vip = colMeans(t(apply(varcount, 1, function(s) s/sum(s)))) ## within-type vip within_type_vip = rep(0, p0) for (i in 1:nkeeptreedraws) { if (sum(varcount[i, categorical_idx]) != 0) { within_type_vip[categorical_idx] = within_type_vip[categorical_idx] + varcount[i, categorical_idx]/sum(varcount[i, categorical_idx]) } if (sum(varcount[i, -categorical_idx]) != 0) { within_type_vip[-categorical_idx] = within_type_vip[-categorical_idx] + varcount[i, -categorical_idx]/sum(varcount[i, -categorical_idx]) } } res$within_type_vip = within_type_vip / nkeeptreedraws names(res$within_type_vip) = xnames ## (marginal) posterior variable inclusion probability res$pvip = colMeans(varcount > 0) ## posterior s_j's (in DART) res$varprob.mean <- colMeans(varprob) ## mi mrmean = matrix(unlist(lapply(mr_vecs, function(s) lapply(s, function(v) ifelse(length(v)>0, mean(v), 0.0)))), ncol = p0, byrow = TRUE) res$mrmean = mrmean res$mi = colMeans(t(apply(mrmean, 1, function(s) s/sum(s)))) dimnames(res$mrmean)[[2]] = as.list(xnames) names(res$mi) = as.list(xnames) } res$rm.const <- rm.const res$binaryOffset=binaryOffset attr(res, 'class') <- 'pbart' return(res) }
/R/pbart.R
no_license
chujiluo/BNPqte
R
false
false
8,115
r
pbart = function(x.train, y.train, x.test=matrix(0.0,0,0), sparse=FALSE, theta=0, omega=1, a=0.5, b=1, augment=FALSE, rho=NULL, xinfo=matrix(0.0,0,0), numcut=100L, usequants=FALSE, cont=FALSE, rm.const=TRUE, grp = NULL, xnames = NULL, categorical_idx = NULL, k=2.0, power=2.0, base=-1.0, split.prob = "polynomial", binaryOffset=NULL, ntree=50L, ndpost=1000L, nskip=1000L, keepevery=1L, nkeeptrain=ndpost, nkeeptest=ndpost, nkeeptreedraws=ndpost, printevery=100L, transposed=FALSE) { #-------------------------------------------------- #data n = length(y.train) if(length(binaryOffset)==0) binaryOffset=qnorm(mean(y.train)) if(!transposed) { if(is.null(dim(x.train))) { xnames = "X" } else { xnames = dimnames(x.train)[[2]] # predictor names before dummification } temp = bartModelMatrix(x.train, numcut, usequants=usequants, cont=cont, xinfo=xinfo, rm.const=rm.const) x.train = t(temp$X) numcut = temp$numcut xinfo = temp$xinfo if(length(x.test)>0) { x.test = bartModelMatrix(x.test) x.test = t(x.test[ , temp$rm.const]) } rm.const <- temp$rm.const grp <- temp$grp rm(temp) if(length(grp)==0){ p0 = nrow(x.train) grp <- 1:p0 } else { p0 = length(unique(grp)) # number of predictors before dummification } categorical_idx = unique(grp)[which(sapply(unique(grp), function(s) sum(s==grp)) > 1)] # indices of categorical predictors in the original design matrix } else { if(any(length(rm.const)==0, length(grp)==0, length(xnames)==0)) stop('Did not provide rm.const, grp and xnames for x.train after transpose!') if(is.logical(rm.const)) stop('Did not provide rm.const for x.train after transpose!') if((length(grp) > length(unique(grp))) & (length(categorical_idx) <= 0)) stop('Did not provide categorical_idx for x.train that contains categorical predictors!') p0 = length(unique(grp)) } if(n!=ncol(x.train)) stop('The length of y.train and the number of rows in x.train must be identical') p = nrow(x.train) np = ncol(x.test) if(length(rho)==0) rho <- p if(!(split.prob %in% c("polynomial", "exponential"))) { stop("split.prob is either polynomial or exponential.") } else { if(split.prob == "polynomial") { if(base < 0) base = 0.95 } if(split.prob == "exponential") { power = -1.0 if(base < 0) base = 0.5 } } #-------------------------------------------------- #set nkeeps for thinning if((nkeeptrain!=0) & ((ndpost %% nkeeptrain) != 0)) { nkeeptrain=ndpost cat('*****nkeeptrain set to ndpost\n') } if((nkeeptest!=0) & ((ndpost %% nkeeptest) != 0)) { nkeeptest=ndpost cat('*****nkeeptest set to ndpost\n') } if((nkeeptreedraws!=0) & ((ndpost %% nkeeptreedraws) != 0)) { nkeeptreedraws=ndpost cat('*****nkeeptreedraws set to ndpost\n') } #-------------------------------------------------- ptm <- proc.time() #call res = .Call("cpbart", n, #number of observations in training data p, #dimension of x np, #number of observations in test data x.train, #p*n training data x y.train, #n*1 training data y x.test, #p*np test data x ntree, numcut, ndpost*keepevery, nskip, power, base, binaryOffset, 3/(k*sqrt(ntree)), sparse, theta, omega, grp, a, b, rho, augment, nkeeptrain, nkeeptest, nkeeptreedraws, printevery, xinfo ) res$proc.time <- proc.time()-ptm #--------------------------------------------------------- #returns if(nkeeptrain>0) { res$yhat.train = res$yhat.train+binaryOffset res$prob.train = pnorm(res$yhat.train) res$prob.train.mean <- apply(res$prob.train, 2, mean) } else { res$yhat.train <- NULL } if(np>0) { res$yhat.test = res$yhat.test+binaryOffset res$prob.test = pnorm(res$yhat.test) res$prob.test.mean <- apply(res$prob.test, 2, mean) } else { res$yhat.test <- NULL } if(nkeeptreedraws>0) names(res$treedraws$cutpoints) = dimnames(x.train)[[1]] #-------------------------------------------------- #importance if(length(grp) == length(unique(grp))) { ## no dummy variables dimnames(res$varcount)[[2]] = as.list(dimnames(x.train)[[1]]) dimnames(res$varprob)[[2]] = as.list(dimnames(x.train)[[1]]) ## vip res$vip = colMeans(t(apply(res$varcount, 1, function(s) s/sum(s)))) ## (marginal) posterior variable inclusion probability res$pvip = colMeans(res$varcount > 0) ## posterior s_j's (in DART) res$varprob.mean <- colMeans(res$varprob) ## mi mr_vecs = lapply(res$mr_vecs, function(s) lapply(s, function(v) v[-1])) # remove the meaningless first 0 res$mr_vecs = mr_vecs mrmean = matrix(unlist(lapply(mr_vecs, function(s) lapply(s, function(v) ifelse(length(v)>0, mean(v), 0.0)))), ncol = p, byrow = TRUE) res$mrmean = mrmean res$mi = colMeans(t(apply(mrmean, 1, function(s) s/sum(s)))) names(res$mi) = as.list(dimnames(x.train)[[1]]) dimnames(res$mrmean)[[2]] = as.list(xnames) } else { ## merge importance scores for dummy variables varcount = matrix(NA, nrow = nkeeptreedraws, ncol = p0) varprob = matrix(NA, nrow = nkeeptreedraws, ncol = p0) mr_vecs = lapply(res$mr_vecs, function(s) list(s[[1]][-1])) varcount[, 1] = res$varcount[, 1] varprob[, 1] = res$varprob[, 1] j = 1 for (l in 2:p) { if (grp[l] == grp[l-1]) { varcount[, j] = varcount[, j] + res$varcount[, l] varprob[, j] = varprob[, j] + res$varprob[, l] for (i in 1:nkeeptreedraws) { mr_vecs[[i]][[j]] = c(mr_vecs[[i]][[j]], res$mr_vecs[[i]][[l]][-1]) } } else { j = j + 1 varcount[, j] = res$varcount[, l] varprob[, j] = res$varprob[, l] for (i in 1:nkeeptreedraws) { mr_vecs[[i]][[j]] = res$mr_vecs[[i]][[l]][-1] } } } dimnames(varcount)[[2]] = as.list(xnames) dimnames(varprob)[[2]] = as.list(xnames) res$varcount = varcount res$varprob = varprob res$mr_vecs = mr_vecs ## vip res$vip = colMeans(t(apply(varcount, 1, function(s) s/sum(s)))) ## within-type vip within_type_vip = rep(0, p0) for (i in 1:nkeeptreedraws) { if (sum(varcount[i, categorical_idx]) != 0) { within_type_vip[categorical_idx] = within_type_vip[categorical_idx] + varcount[i, categorical_idx]/sum(varcount[i, categorical_idx]) } if (sum(varcount[i, -categorical_idx]) != 0) { within_type_vip[-categorical_idx] = within_type_vip[-categorical_idx] + varcount[i, -categorical_idx]/sum(varcount[i, -categorical_idx]) } } res$within_type_vip = within_type_vip / nkeeptreedraws names(res$within_type_vip) = xnames ## (marginal) posterior variable inclusion probability res$pvip = colMeans(varcount > 0) ## posterior s_j's (in DART) res$varprob.mean <- colMeans(varprob) ## mi mrmean = matrix(unlist(lapply(mr_vecs, function(s) lapply(s, function(v) ifelse(length(v)>0, mean(v), 0.0)))), ncol = p0, byrow = TRUE) res$mrmean = mrmean res$mi = colMeans(t(apply(mrmean, 1, function(s) s/sum(s)))) dimnames(res$mrmean)[[2]] = as.list(xnames) names(res$mi) = as.list(xnames) } res$rm.const <- rm.const res$binaryOffset=binaryOffset attr(res, 'class') <- 'pbart' return(res) }
\name{eVenn-package} \alias{eVenn-package} \alias{eVenn} \docType{package} \title{ A Powerful Tool to Quickly Compare Huge Lists and Draw Venn Diagrams } \description{ Compare lists (from 2 to infinite) and plot the results in a Venn diagram if (N<=4) with regulation details. It allows to produce a complete annotated file, merging the annotations of the compared lists. It is also possible to compute an overlaps table to show the overlaps proportions of all the couples of lists and draw proportional Venn diagrams. } \details{ \tabular{ll}{ Package: \tab eVenn\cr Type: \tab Package\cr Version: \tab 2.2.1\cr Date: \tab 2015-08-04\cr License: \tab GPL\cr LazyLoad: \tab yes\cr } } \author{ Author & Maintainer: Nicolas Cagnard <nicolas.cagnard@gmail.com> } \references{ \url{http://blog.mrbioinfo.com/} } \keyword{ package } \examples{ library(eVenn) YNdisplay = TRUE # Allows commentaries and display of the main steps of the process # Matrix of binary data data(Data_Binary_Matrix) evenn(matLists=Data_Binary_Matrix, display=YNdisplay, CompName="Binary_Matrix") # Matrix of folds # data(Data_Matrix_Of_Folds) # evenn(matLists=Data_Matrix_Of_Folds, display=YNdisplay, CompName="Matrix_Of_Folds") # Matrix of ratios # data(Data_Matrix_Of_Ratios) # evenn(matLists=Data_Matrix_Of_Ratios, display=YNdisplay, CompName="Matrix_Of_Ratios") # List of 2, 3 or 4 matrix w/wo modulations and w/wo profils data data(Data_Lists) # evenn(matLists=Data_Lists[c("List_1", "List_2", "List_3", "List_4")], annot=TRUE, # display=YNdisplay, CompName="Lists_4") # evenn(matLists=Data_Lists[c("List_1", "List_2", "List_3", "List_4")], ud=TRUE, # annot=TRUE, display=YNdisplay, CompName="Lists_4_UD") # evenn(matLists=Data_Lists[c("List_1", "List_2", "List_3", "List_4", "DataMoy")], # ud=TRUE, annot=TRUE, Profils=TRUE, display=YNdisplay, CompName="Lists_4_UD_Profils") }
/man/eVenn-package.Rd
no_license
NicolasCagnard/eVenn_v2.2.1
R
false
false
1,896
rd
\name{eVenn-package} \alias{eVenn-package} \alias{eVenn} \docType{package} \title{ A Powerful Tool to Quickly Compare Huge Lists and Draw Venn Diagrams } \description{ Compare lists (from 2 to infinite) and plot the results in a Venn diagram if (N<=4) with regulation details. It allows to produce a complete annotated file, merging the annotations of the compared lists. It is also possible to compute an overlaps table to show the overlaps proportions of all the couples of lists and draw proportional Venn diagrams. } \details{ \tabular{ll}{ Package: \tab eVenn\cr Type: \tab Package\cr Version: \tab 2.2.1\cr Date: \tab 2015-08-04\cr License: \tab GPL\cr LazyLoad: \tab yes\cr } } \author{ Author & Maintainer: Nicolas Cagnard <nicolas.cagnard@gmail.com> } \references{ \url{http://blog.mrbioinfo.com/} } \keyword{ package } \examples{ library(eVenn) YNdisplay = TRUE # Allows commentaries and display of the main steps of the process # Matrix of binary data data(Data_Binary_Matrix) evenn(matLists=Data_Binary_Matrix, display=YNdisplay, CompName="Binary_Matrix") # Matrix of folds # data(Data_Matrix_Of_Folds) # evenn(matLists=Data_Matrix_Of_Folds, display=YNdisplay, CompName="Matrix_Of_Folds") # Matrix of ratios # data(Data_Matrix_Of_Ratios) # evenn(matLists=Data_Matrix_Of_Ratios, display=YNdisplay, CompName="Matrix_Of_Ratios") # List of 2, 3 or 4 matrix w/wo modulations and w/wo profils data data(Data_Lists) # evenn(matLists=Data_Lists[c("List_1", "List_2", "List_3", "List_4")], annot=TRUE, # display=YNdisplay, CompName="Lists_4") # evenn(matLists=Data_Lists[c("List_1", "List_2", "List_3", "List_4")], ud=TRUE, # annot=TRUE, display=YNdisplay, CompName="Lists_4_UD") # evenn(matLists=Data_Lists[c("List_1", "List_2", "List_3", "List_4", "DataMoy")], # ud=TRUE, annot=TRUE, Profils=TRUE, display=YNdisplay, CompName="Lists_4_UD_Profils") }
### iUTAH GAMUT Aquatic Station ### Grab Sample download ### Version 1.1 ### Written by: Erin Fleming Jones, contact at erinfjones3@gmail.com ### Last updated: 4/23/2017 setwd("~/Box Sync/BYU/metagenomics") ####NOTES: Check ARBR gage code ## Load packages ## library("devtools") library("plyr") Sys.timezone() library("WaterML") ## Data server connections ## # services = GetServices() ##Lists URLs of all available datasets Loganserver= 'http://data.iutahepscor.org/LoganRiverWOF/cuahsi_1_1.asmx?WSDL' ##Conects to each web database RBserver= 'http://data.iutahepscor.org/RedButteCreekWOF/cuahsi_1_1.asmx?WSDL' PRserver= 'http://data.iutahepscor.org/ProvoRiverWOF/cuahsi_1_1.asmx?WSDL' variables=GetVariables(RBserver) ##Lists variable codes Logansites= GetSites(Loganserver) ##Lists site codes Provosites= GetSites(PRserver) RBsites= GetSites(RBserver) ### Set Date and times ### ### Logan ctrl F: 24Nov LRStartDate = "2014-11-24" ##yyyy-mm-dd format LREndDate = "2014-11-25" LR_FB_BADateTime= "2014-11-24 13:15:00" ##yyyy-mm-dd hh:mm:ss format, rounded to 0, 15, 30, or 45 minute mark LR_TG_BADateTime = "2014-11-24 11:45:00" LR_WaterLab_AADateTime = "2014-11-24 14:45:00" LR_MainStreet_BADateTime = "2014-11-24 15:00:00" LR_Mendon_AADateTime = "2014-11-24 15:30:00" ### Red Butte ctrl F: 18Nov RBStartDate = "2014-11-18" ##yyyy-mm-dd format RBEndDate = "2014-11-19" RB_KF_BADateTime = "2014-11-18 12:00:00" ##yyyy-mm-dd hh:mm:ss format, rounded to 0, 15, 30, or 45 minute mark RB_ARBR_AADateTime = "2014-11-18 15:00:00" RB_RBG_BADateTime = "2014-11-18 15:15:00" RB_CG_BADateTime = "2014-11-18 16:00:00" RB_FD_AADateTime = "2014-11-18 16:45:00" ### Provo ctrl F: 12Nov PRStartDate = "2014-11-12" ##yyyy-mm-dd format PREndDate ="2014-11-13" PR_ST_BADateTime = "2014-11-12 12:15:00" ##yyyy-mm-dd hh:mm:ss format, rounded to 0, 15, 30, or 45 minute mark PR_BJ_AADateTime = "2014-11-12 14:00:00" ## must be rounded to on the hour e.g. 14:00:00 PR_LM_BADateTime = "2014-11-12 11:00:00" ## must be rounded to on the hour e.g. 14:00:00 PR_CH_AADateTime = "2014-11-12 13:00:00" ## must be rounded to on the hour e.g. 14:00:00 # Q- PR_BJ, PR_LM, PR_CH only available on the hour (for now) ##### Franklin Basin #### ##Franklin Basin Water Temp## #exo_Temp EXWTempFranklin<- GetValues(Loganserver, siteCode="iutah:LR_FB_BA", variableCode="iutah:WaterTemp_EXO", startDate = LRStartDate, endDate = LREndDate, qcID = "1" ) #Download 24 hrs EXWTempFranklin = subset(EXWTempFranklin[ which(EXWTempFranklin$time == LR_FB_BADateTime),] ) ### Subset 24-hr for single time point EXWTempFranklin$DataValue[EXWTempFranklin$DataValue==-9999.00] <- NA ### Substitute database NA for R NA #Turbidity_Temp TWTempFranklin<- GetValues(Loganserver, siteCode="iutah:LR_FB_BA", variableCode="iutah:WaterTemp_Turb", startDate = LRStartDate, endDate = LREndDate, qcID = "1" ) TWTempFranklin <- subset(TWTempFranklin[ which(TWTempFranklin$time == LR_FB_BADateTime),]) TWTempFranklin$DataValue[TWTempFranklin$DataValue==-9999.00] <- NA ##Franklin Basin pH## pHFranklin<- GetValues(Loganserver, siteCode="iutah:LR_FB_BA", variableCode="iutah:pH", startDate = LRStartDate, endDate = LREndDate, qcID = "1" ) pHFranklin = subset(pHFranklin[ which(pHFranklin$time == LR_FB_BADateTime),] ) pHFranklin$DataValue[pHFranklin$DataValue==-9999.00] <- NA ##Franklin Basin Specific Conductivity## SpConFranklin<- GetValues(Loganserver, siteCode="iutah:LR_FB_BA", variableCode="iutah:SpCond", startDate = LRStartDate, endDate = LREndDate, qcID = "1" ) SpConFranklin = subset(SpConFranklin[ which(SpConFranklin$time == LR_FB_BADateTime),] ) SpConFranklin$DataValue[SpConFranklin$DataValue==-9999.00] <- NA ##Franklin Basin Dissolved Oxygen## ODOFranklin<-GetValues(Loganserver, siteCode="iutah:LR_FB_BA", variableCode="iutah:ODO", startDate = LRStartDate, endDate = LREndDate, qcID = "1" ) ODOFranklin = subset(ODOFranklin[ which(ODOFranklin$time == LR_FB_BADateTime),] ) ODOFranklin$DataValue[ODOFranklin$DataValue==-9999.00] <- NA ##Franklin Basin Specific Turbidity## TurbFranklin<- GetValues(Loganserver, siteCode="iutah:LR_FB_BA", variableCode="iutah:TurbAvg", startDate = LRStartDate, endDate = LREndDate, qcID = "1" ) TurbFranklin = subset(TurbFranklin[ which(TurbFranklin$time == LR_FB_BADateTime),] ) TurbFranklin$DataValue[TurbFranklin$DataValue==-9999.00] <- NA ##Franklin Basin Gage## GageFranklin<- GetValues(Loganserver, siteCode="iutah:LR_FB_BA", variableCode="iutah:Stage", startDate = LRStartDate, endDate = LREndDate, qcID = "1" ) GageFranklin = subset(GageFranklin[ which(GageFranklin$time == LR_FB_BADateTime),] ) GageFranklin$DataValue[GageFranklin$DataValue==-9999.00] <- NA ##### Tony Grove #### ##Tony Grove Water Temp## #exo_Temp EXWTempTonyGrove<- GetValues(Loganserver, siteCode="iutah:LR_TG_BA", variableCode="iutah:WaterTemp_EXO", startDate = LRStartDate, endDate = LREndDate, qcID = "1" ) EXWTempTonyGrove = subset(EXWTempTonyGrove[ which(EXWTempTonyGrove$time == LR_TG_BADateTime),] ) EXWTempTonyGrove$DataValue[EXWTempTonyGrove$DataValue==-9999.00] <- NA #Turbidity_Temp TWTempTonyGrove<- GetValues(Loganserver, siteCode="iutah:LR_TG_BA", variableCode="iutah:WaterTemp_Turb", startDate = LRStartDate, endDate = LREndDate, qcID = "1" ) TWTempTonyGrove = subset(TWTempTonyGrove[ which(TWTempTonyGrove$time == LR_TG_BADateTime),] ) TWTempTonyGrove$DataValue[TWTempTonyGrove$DataValue==-9999.00] <- NA ##Tony Grove pH## pHTonyGrove<- GetValues(Loganserver, siteCode="iutah:LR_TG_BA", variableCode="iutah:pH", startDate = LRStartDate, endDate = LREndDate, qcID = "1" ) pHTonyGrove = subset(pHTonyGrove[ which(pHTonyGrove$time == LR_TG_BADateTime),] ) pHTonyGrove$DataValue[pHTonyGrove$DataValue==-9999.00] <- NA ##Tony Grove Specific Conductivity## SpConTonyGrove<- GetValues(Loganserver, siteCode="iutah:LR_TG_BA", variableCode="iutah:SpCond", startDate = LRStartDate, endDate = LREndDate, qcID = "1" ) SpConTonyGrove = subset(SpConTonyGrove[ which(SpConTonyGrove$time == LR_TG_BADateTime),] ) SpConTonyGrove$DataValue[SpConTonyGrove$DataValue==-9999.00] <- NA ##Tony Grove Dissolved Oxygen## ODOTonyGrove<- GetValues(Loganserver, siteCode="iutah:LR_TG_BA", variableCode="iutah:ODO", startDate = LRStartDate, endDate = LREndDate, qcID = "1" ) ODOTonyGrove = subset(ODOTonyGrove[ which(ODOTonyGrove$time == LR_TG_BADateTime),] ) ODOTonyGrove$DataValue[ODOTonyGrove$DataValue==-9999.00] <- NA ##Tony Grove Specific Turbidity## TurbTonyGrove<- GetValues(Loganserver, siteCode="iutah:LR_TG_BA", variableCode="iutah:TurbAvg", startDate = LRStartDate, endDate = LREndDate, qcID = "1" ) TurbTonyGrove = subset(TurbTonyGrove[ which(TurbTonyGrove$time == LR_TG_BADateTime),] ) TurbTonyGrove$DataValue[TurbTonyGrove$DataValue==-9999.00] <- NA ##Tony Grove Discharge (Gage)## GageTonyGrove<- GetValues(Loganserver, siteCode="iutah:LR_TG_BA", variableCode="iutah:Stage", startDate = LRStartDate, endDate = LREndDate, qcID = "1" ) GageTonyGrove = subset(GageTonyGrove[ which(GageTonyGrove$time == LR_TG_BADateTime),] ) GageTonyGrove$DataValue[GageTonyGrove$DataValue==-9999.00] <- NA ##### Water Lab advanced #### ##Water Lab Water Temperature## #exo_Temp EXWTempWaterLab<- GetValues(Loganserver, siteCode="iutah:LR_WaterLab_AA", variableCode="iutah:WaterTemp_EXO", startDate = LRStartDate, endDate = LREndDate, qcID = "1" ) EXWTempWaterLab = subset(EXWTempWaterLab[ which(EXWTempWaterLab$time == LR_WaterLab_AADateTime),] ) EXWTempWaterLab$DataValue[EXWTempWaterLab$DataValue==-9999.00] <- NA #Converts -9999.0 to NA #Turbidity_Temp TWTempWaterLab = GetValues(Loganserver, siteCode="iutah:LR_WaterLab_AA", variableCode="iutah:WaterTemp_Turb", startDate = LRStartDate, endDate = LREndDate, qcID = "1" ) TWTempWaterLab = subset(TWTempWaterLab[ which(TWTempWaterLab$time == LR_WaterLab_AADateTime),] ) TWTempWaterLab$DataValue[TWTempWaterLab$DataValue==-9999.00] <- NA ##Water Lab pH## pHWaterLab<- GetValues(Loganserver, siteCode="iutah:LR_WaterLab_AA", variableCode="iutah:pH", startDate = LRStartDate, endDate = LREndDate, qcID = "1" ) pHWaterLab = subset(pHWaterLab[ which(pHWaterLab$time == LR_WaterLab_AADateTime),] ) pHWaterLab$DataValue[pHWaterLab$DataValue==-9999.00] <- NA ##Water Lab Specific Conductivity## SpConWaterLab<- GetValues(Loganserver, siteCode="iutah:LR_WaterLab_AA", variableCode="iutah:SpCond", startDate = LRStartDate, endDate = LREndDate, qcID = "1" ) SpConWaterLab = subset(SpConWaterLab[ which(SpConWaterLab$time == LR_WaterLab_AADateTime),] ) SpConWaterLab$DataValue[SpConWaterLab$DataValue==-9999.00] <- NA ##Water Lab Dissolved Oxygen## ODOWaterLab<- GetValues(Loganserver, siteCode="iutah:LR_WaterLab_AA", variableCode="iutah:ODO", startDate = LRStartDate, endDate = LREndDate, qcID = "1" ) ODOWaterLab = subset(ODOWaterLab[ which(ODOWaterLab$time == LR_WaterLab_AADateTime),] ) ODOWaterLab$DataValue[ODOWaterLab$DataValue==-9999.00] <- NA ##Water Lab Specific Tubidity## TurbWaterLab<- GetValues(Loganserver, siteCode="iutah:LR_WaterLab_AA", variableCode="iutah:TurbAvg", startDate = LRStartDate, endDate = LREndDate, qcID = "1" ) TurbWaterLab = subset(TurbWaterLab[ which(TurbWaterLab$time == LR_WaterLab_AADateTime),] ) TurbWaterLab$DataValue[TurbWaterLab$DataValue==-9999.00] <- NA ##Water Lab fDOM## fDOMWaterLab<- GetValues(Loganserver, siteCode="iutah:LR_WaterLab_AA", variableCode="iutah:fDOM", startDate = LRStartDate, endDate = LREndDate, qcID = "1" ) fDOMWaterLab = subset(fDOMWaterLab[ which(fDOMWaterLab$time == LR_WaterLab_AADateTime),] ) fDOMWaterLab$DataValue[fDOMWaterLab$DataValue==-9999.00] <- NA ##Water Lab Chla## chlaWaterLab<- GetValues(Loganserver, siteCode="iutah:LR_WaterLab_AA", variableCode="iutah:chlorophyll", startDate = LRStartDate, endDate = LREndDate, qcID = "1" ) chlaWaterLab = subset(chlaWaterLab[ which(chlaWaterLab$time == LR_WaterLab_AADateTime),] ) chlaWaterLab$DataValue[chlaWaterLab$DataValue==-9999.00] <- NA ##Water Lab Blue Green Algae (BGA)## BGAWaterLab<- GetValues(Loganserver, siteCode="iutah:LR_WaterLab_AA", variableCode="iutah:BGA", startDate = LRStartDate, endDate = LREndDate, qcID = "1" ) BGAWaterLab = subset(BGAWaterLab[ which(BGAWaterLab$time == LR_WaterLab_AADateTime),] ) BGAWaterLab$DataValue[BGAWaterLab$DataValue==-9999.00] <- NA # ##Water Lab Nitrate## # NitrateWaterLab<- GetValues(Loganserver, siteCode="iutah:LR_WaterLab_AA", variableCode="iutah:Nitrate-N", startDate = LRStartDate, endDate = LREndDate, qcID = "1" ) # NitrateWaterLab = subset(NitrateWaterLab[ which(NitrateWaterLab$time == LR_WaterLab_AADateTime),] ) # NitrateWaterLab$DataValue[NitrateWaterLab$DataValue==-9999.00] <- NA ## Water Lab Gage height ## GageWaterLab<- GetValues(Loganserver, siteCode="iutah:LR_WaterLab_AA", variableCode="iutah:Stage", startDate = LRStartDate, endDate = LREndDate, qcID = "1" ) GageWaterLab = subset(GageWaterLab[ which(GageWaterLab$time == LR_WaterLab_AADateTime),] ) GageWaterLab$DataValue[GageWaterLab$DataValue==-9999.00] <- NA ##### Main Street #### ##Main Street Water Temp## #exo_Temp EXWTempMainSt<- GetValues(Loganserver, siteCode="iutah:LR_MainStreet_BA", variableCode="iutah:WaterTemp_EXO", startDate = LRStartDate, endDate = LREndDate, qcID = "1" ) EXWTempMainSt = subset(EXWTempMainSt[ which(EXWTempMainSt$time == LR_MainStreet_BADateTime),] ) EXWTempMainSt$DataValue[EXWTempMainSt$DataValue==-9999.00] <- NA #Turbidity_Temp TWTempMainSt<- GetValues(Loganserver, siteCode="iutah:LR_MainStreet_BA", variableCode="iutah:WaterTemp_Turb", startDate = LRStartDate, endDate = LREndDate, qcID = "1" ) TWTempMainSt = subset(TWTempMainSt[ which(TWTempMainSt$time == LR_MainStreet_BADateTime),] ) TWTempMainSt$DataValue[TWTempMainSt$DataValue==-9999.00] <- NA ##Main Street pH## pHMainSt<- GetValues(Loganserver, siteCode="iutah:LR_MainStreet_BA", variableCode="iutah:pH", startDate = LRStartDate, endDate = LREndDate, qcID = "1" ) pHMainSt = subset(pHMainSt[ which(pHMainSt$time == LR_MainStreet_BADateTime),] ) pHMainSt$DataValue[pHMainSt$DataValue==-9999.00] <- NA ##Main St Specific Conductivity## SpConMainSt<- GetValues(Loganserver, siteCode="iutah:LR_MainStreet_BA", variableCode="iutah:SpCond", startDate = LRStartDate, endDate = LREndDate, qcID = "1" ) SpConMainSt = subset(SpConMainSt[ which(SpConMainSt$time == LR_MainStreet_BADateTime),] ) SpConMainSt$DataValue[SpConMainSt$DataValue==-9999.00] <- NA ##Main St Dissolved Oxygen## ODOMainSt<- GetValues(Loganserver, siteCode="iutah:LR_MainStreet_BA", variableCode="iutah:ODO", startDate = LRStartDate, endDate = LREndDate, qcID = "1" ) ODOMainSt = subset(ODOMainSt[ which(ODOMainSt$time == LR_MainStreet_BADateTime),] ) ODOMainSt$DataValue[ODOMainSt$DataValue==-9999.00] <- NA ##Main St Specific Turbidity## TurbMainSt<- GetValues(Loganserver, siteCode="iutah:LR_MainStreet_BA", variableCode="iutah:TurbAvg", startDate = LRStartDate, endDate = LREndDate, qcID = "1" ) TurbMainSt = subset(TurbMainSt[ which(TurbMainSt$time == LR_MainStreet_BADateTime),] ) TurbMainSt$DataValue[TurbMainSt$DataValue==-9999.00] <- NA ##Main Street Gage height## GageMainSt<- GetValues(Loganserver, siteCode="iutah:LR_MainStreet_BA", variableCode="iutah:Stage", startDate = LRStartDate, endDate = LREndDate, qcID = "1" ) GageMainSt = subset(GageMainSt[ which(GageMainSt$time == LR_MainStreet_BADateTime),] ) GageMainSt$DataValue[GageMainSt$DataValue==-9999.00] <- NA ##### Mendon Rd advanced #### ##Mendon Rd Water Temp## #exo_Temp EXWTempMendon<- GetValues(Loganserver, siteCode="iutah:LR_Mendon_AA", variableCode="iutah:WaterTemp_EXO", startDate = LRStartDate, endDate = LREndDate, qcID = "0" ) EXWTempMendon = subset(EXWTempMendon[ which(EXWTempMendon$time == LR_Mendon_AADateTime),] ) EXWTempMendon$DataValue[EXWTempMendon$DataValue==-9999.00] <- NA #Turbidity_Temp TWTempMendon<- GetValues(Loganserver, siteCode="iutah:LR_Mendon_AA", variableCode="iutah:WaterTemp_Turb", startDate = LRStartDate, endDate = LREndDate, qcID = "0" ) TWTempMendon = subset(TWTempMendon[ which(TWTempMendon$time == LR_Mendon_AADateTime),] ) TWTempMendon$DataValue[TWTempMendon$DataValue==-9999.00] <- NA ##Mendon Rd pH## pHMendon<- GetValues(Loganserver, siteCode="iutah:LR_Mendon_AA", variableCode="iutah:pH", startDate = LRStartDate, endDate = LREndDate, qcID = "0" ) pHMendon = subset(pHMendon[ which(pHMendon$time == LR_Mendon_AADateTime),] ) pHMendon$DataValue[pHMendon$DataValue==-9999.00] <- NA ##Mendon Rd Specific Conductivity## SpConMendon<- GetValues(Loganserver, siteCode="iutah:LR_Mendon_AA", variableCode="iutah:SpCond", startDate = LRStartDate, endDate = LREndDate, qcID = "0" ) SpConMendon = subset(SpConMendon[ which(SpConMendon$time == LR_Mendon_AADateTime),] ) SpConMendon$DataValue[SpConMendon$DataValue==-9999.00] <- NA ##Mendon Rd Dissolved Oxygen## ODOMendon<- GetValues(Loganserver, siteCode="iutah:LR_Mendon_AA", variableCode="iutah:ODO", startDate = LRStartDate, endDate = LREndDate, qcID = "0" ) ODOMendon = subset(ODOMendon[ which(ODOMendon$time == LR_Mendon_AADateTime),] ) ODOMendon$DataValue[ODOMendon$DataValue==-9999.00] <- NA ##Mendon Rd Turbidity## TurbMendon<- GetValues(Loganserver, siteCode="iutah:LR_Mendon_AA", variableCode="iutah:TurbAvg", startDate = LRStartDate, endDate = LREndDate, qcID = "0" ) TurbMendon = subset(TurbMendon[ which(TurbMendon$time == LR_Mendon_AADateTime),] ) TurbMendon$DataValue[TurbMendon$DataValue==-9999.00] <- NA ##Mendon Rd fDOM## fDOMMendon<- GetValues(Loganserver, siteCode="iutah:LR_Mendon_AA", variableCode="iutah:fDOM", startDate = LRStartDate, endDate = LREndDate, qcID = "0" ) fDOMMendon = subset(fDOMMendon[ which(fDOMMendon$time == LR_Mendon_AADateTime),] ) fDOMMendon$DataValue[fDOMMendon$DataValue==-9999.00] <- NA ##Mendon Rd Chla## chlaMendon<- GetValues(Loganserver, siteCode="iutah:LR_Mendon_AA", variableCode="iutah:chlorophyll", startDate = LRStartDate, endDate = LREndDate, qcID = "1" ) chlaMendon = subset(chlaMendon[ which(chlaMendon$time == LR_Mendon_AADateTime),] ) chlaMendon$DataValue[chlaMendon$DataValue==-9999.00] <- NA ##Mendon Rd Blue Green Algea (BGA)## BGAMendon<- GetValues(Loganserver, siteCode="iutah:LR_Mendon_AA", variableCode="iutah:BGA", startDate = LRStartDate, endDate = LREndDate, qcID = "1" ) BGAMendon = subset(BGAMendon[ which(BGAMendon$time == LR_Mendon_AADateTime),] ) BGAMendon$DataValue[BGAMendon$DataValue==-9999.00] <- NA # ##Mendon Rd Nitrate## # NitrateMendon<- GetValues(Loganserver, siteCode="iutah:LR_Mendon_AA", variableCode="iutah:Nitrate-N", startDate = LRStartDate, endDate = LREndDate, qcID = "1" ) # NitrateMendon = subset(NitrateMendon[ which(NitrateMendon$time == LR_Mendon_AADateTime),] ) # NitrateMendon$DataValue[NitrateMendon$DataValue==-9999.00] <- NA ##Mendon Rd Gage height## GageMendon<- GetValues(Loganserver, siteCode="iutah:LR_Mendon_AA", variableCode="iutah:Stage", startDate = LRStartDate, endDate = LREndDate, qcID = "1" ) GageMendon = subset(GageMendon[ which(GageMendon$time == LR_Mendon_AADateTime),] ) GageMendon$DataValue[GageMendon$DataValue==-9999.00] <- NA ##### Logan Spreadsheet ##### Franklin24NovGrabSample <- data.frame(DateTime = EXWTempFranklin$time, Site = "LR_FB_BA", Temp = EXWTempFranklin$DataValue, pH = pHFranklin$DataValue, SpCon = SpConFranklin$DataValue, ODO = ODOFranklin$DataValue, Turbidity = TurbFranklin$DataValue, Gage = GageFranklin$DataValue) Franklin24NovGrabSample TonyGrove24NovGrabSample <- data.frame(DateTime = EXWTempTonyGrove$time, Site = "LR_TG_BA", Temp = EXWTempTonyGrove$DataValue, pH = pHTonyGrove$DataValue, SpCon = SpConTonyGrove$DataValue, ODO = ODOTonyGrove$DataValue, Turbidity = TurbTonyGrove$DataValue, Gage = GageTonyGrove$DataValue) TonyGrove24NovGrabSample WaterLab24NovGrabSample <- data.frame(DateTime = EXWTempWaterLab$time, Site = "LR_WaterLab_AA", Temp = EXWTempWaterLab$DataValue, pH = pHWaterLab$DataValue, SpCon = SpConWaterLab$DataValue, ODO = ODOWaterLab$DataValue, Turbidity = TurbWaterLab$DataValue, Gage = GageWaterLab$DataValue, BGA = BGAWaterLab$DataValue, chla = chlaWaterLab$DataValue, fDOM = fDOMWaterLab$DataValue) #Nitrate = NitrateWaterLab$DataValue) WaterLab24NovGrabSample MainSt24NovGrabSample <- data.frame(DateTime = EXWTempMainSt$time, Site = "LR_MainStreet_BA", Temp = EXWTempMainSt$DataValue, pH = pHMainSt$DataValue, SpCon = SpConMainSt$DataValue, ODO = ODOMainSt$DataValue, Turbidity = TurbMainSt$DataValue, Gage = GageMainSt$DataValue) MainSt24NovGrabSample Mendon24NovGrabSample <- data.frame(DateTime = EXWTempMendon$time, Site = "LR_Mendon_AA", Temp = EXWTempMendon$DataValue, pH = pHMendon$DataValue, SpCon = SpConMendon$DataValue, ODO = ODOMendon$DataValue, Turbidity = TurbMendon$DataValue, Gage = GageMendon$DataValue, BGA = BGAMendon$DataValue, chla = chlaMendon$DataValue, fDOM = fDOMMendon$DataValue) #Nitrate = NitrateMendon$DataValue) Mendon24NovGrabSample Logan24Nov<-rbind.fill(Franklin24NovGrabSample, TonyGrove24NovGrabSample, WaterLab24NovGrabSample, MainSt24NovGrabSample, Mendon24NovGrabSample ) Logan24Nov ##### Knowlton Fork #### ##Knowlton Fork Water Temp## #exo_Temp EXWTempKnowlton<- GetValues(RBserver, siteCode="iutah:RB_KF_BA", variableCode="iutah:WaterTemp_EXO", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) EXWTempKnowlton = subset(EXWTempKnowlton[ which(EXWTempKnowlton$time ==RB_KF_BADateTime),]) EXWTempKnowlton$DataValue[EXWTempKnowlton$DataValue==-9999.00] <- NA #Turbidity_Temp TWTempKnowlton<- GetValues(RBserver, siteCode="iutah:RB_KF_BA", variableCode="iutah:WaterTemp_Turb", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) TWTempKnowlton = subset(TWTempKnowlton[ which(TWTempKnowlton$time ==RB_KF_BADateTime),]) TWTempKnowlton$DataValue[TWTempKnowlton$DataValue==-9999.00] <- NA ##Knowlton Fork pH## pHKnowlton<- GetValues(RBserver, siteCode="iutah:RB_KF_BA", variableCode="iutah:pH", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) pHKnowlton = subset(pHKnowlton[ which(pHKnowlton$time ==RB_KF_BADateTime),]) pHKnowlton$DataValue[pHKnowlton$DataValue==-9999.00] <- NA ##Knowlton Fork Specific Conductivity## SpConKnowlton<- GetValues(RBserver, siteCode="iutah:RB_KF_BA", variableCode="iutah:SpCond", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) SpConKnowlton = subset(SpConKnowlton[ which(SpConKnowlton$time ==RB_KF_BADateTime),]) SpConKnowlton$DataValue[SpConKnowlton$DataValue==-9999.00] <- NA ##Knowlton Fork Dissolved Oxygen## ODOKnowlton<- GetValues(RBserver, siteCode="iutah:RB_KF_BA", variableCode="iutah:ODO", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) ODOKnowlton = subset(ODOKnowlton[ which(ODOKnowlton$time ==RB_KF_BADateTime),]) ODOKnowlton$DataValue[ODOKnowlton$DataValue==-9999.00] <- NA ##Knowlton Fork Specific Turbidity## TurbKnowlton<- GetValues(RBserver, siteCode="iutah:RB_KF_BA", variableCode="iutah:TurbAvg", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) TurbKnowlton = subset(TurbKnowlton[ which(TurbKnowlton$time ==RB_KF_BADateTime),]) TurbKnowlton$DataValue[TurbKnowlton$DataValue==-9999.00] <- NA ##Knowlton Fork Gage## GageKnowlton<- GetValues(RBserver, siteCode="iutah:RB_KF_BA", variableCode="iutah:Stage", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) GageKnowlton = subset(GageKnowlton[ which(GageKnowlton$time ==RB_KF_BADateTime),]) GageKnowlton$DataValue[GageKnowlton$DataValue==-9999.00] <- NA ##### Above Red Butte Reservoir advanced #### ##Above RB Res Water Temp## #exo_Temp EXWTempARBR<- GetValues(RBserver, siteCode="iutah:RB_ARBR_AA", variableCode="iutah:WaterTemp_EXO", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) EXWTempARBR = subset(EXWTempARBR[ which(EXWTempARBR$time ==RB_ARBR_AADateTime),]) EXWTempARBR$DataValue[EXWTempARBR$DataValue==-9999.00] <- NA #Turbidity_Temp TWTempARBR<- GetValues(RBserver, siteCode="iutah:RB_ARBR_AA", variableCode="iutah:WaterTemp_Turb", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) TWTempARBR = subset(TWTempARBR[ which(TWTempARBR$time ==RB_ARBR_AADateTime),]) TWTempARBR$DataValue[TWTempARBR$DataValue==-9999.00] <- NA ##Above RB Res pH## pHARBR<- GetValues(RBserver, siteCode="iutah:RB_ARBR_AA", variableCode="iutah:pH", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) pHARBR = subset(pHARBR[ which(pHARBR$time ==RB_ARBR_AADateTime),]) pHARBR$DataValue[pHARBR$DataValue==-9999.00] <- NA ##Above RB Res Specific Conductivity## SpConARBR<- GetValues(RBserver, siteCode="iutah:RB_ARBR_AA", variableCode="iutah:SpCond", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) SpConARBR = subset(SpConARBR[ which(SpConARBR$time ==RB_ARBR_AADateTime),]) SpConARBR$DataValue[SpConARBR$DataValue==-9999.00] <- NA ##Above RB Res Dissolved Oxygen## ODOARBR<- GetValues(RBserver, siteCode="iutah:RB_ARBR_AA", variableCode="iutah:ODO", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) ODOARBR = subset(ODOARBR[ which(ODOARBR$time ==RB_ARBR_AADateTime),]) ODOARBR$DataValue[ODOARBR$DataValue==-9999.00] <- NA ##Above RB Res Turbidity## TurbARBR<- GetValues(RBserver, siteCode="iutah:RB_ARBR_AA", variableCode="iutah:TurbAvg", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) TurbARBR = subset(TurbARBR[ which(TurbARBR$time ==RB_ARBR_AADateTime),]) TurbARBR$DataValue[TurbARBR$DataValue==-9999.00] <- NA ##Above RB Res fDOM## fDOMARBR<- GetValues(RBserver, siteCode="iutah:RB_ARBR_AA", variableCode="iutah:fDOM", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) fDOMARBR = subset(fDOMARBR[ which(fDOMARBR$time ==RB_ARBR_AADateTime),]) fDOMARBR$DataValue[fDOMARBR$DataValue==-9999.00] <- NA ##Above Red Butte Res Chla## chlaARBR<- GetValues(RBserver, siteCode="iutah:RB_ARBR_AA", variableCode="iutah:chlorophyll", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) chlaARBR = subset(chlaARBR[ which(chlaARBR$time ==RB_ARBR_AADateTime),]) chlaARBR$DataValue[chlaARBR$DataValue==-9999.00] <- NA ##Above RB Res Blue Green Algea (BGA)## BGAARBR<- GetValues(RBserver, siteCode="iutah:RB_ARBR_AA", variableCode="iutah:BGA", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) BGAARBR = subset(BGAARBR[ which(BGAARBR$time ==RB_ARBR_AADateTime),]) BGAARBR$DataValue[BGAARBR$DataValue==-9999.00] <- NA # ##Above RB Res Nitrate## # NitrateARBR<- GetValues(RBserver, siteCode="iutah:RB_ARBR_AA", variableCode="iutah:Nitrate-N", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) # NitrateARBR = subset(NitrateARBR[ which(NitrateARBR$time ==RB_ARBR_AADateTime),]) # NitrateARBR$DataValue[NitrateARBR$DataValue==-9999.00] <- NA ## Above Red Butte Res Gage height ## GageARBR<- GetValues(RBserver, siteCode="iutah:RB_ARBR_USGS", variableCode="iutah:USGSStage", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) GageARBR = subset(GageARBR[ which(GageARBR$time ==RB_ARBR_AADateTime),]) GageARBR$DataValue[GageARBR$DataValue==-9999.00] <- NA ##### Red Butte Gate #### ##Red Butte Gate Water Temperature## #exo_Temp EXWTempRBG<- GetValues(RBserver, siteCode="iutah:RB_RBG_BA", variableCode="iutah:WaterTemp_EXO", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) EXWTempRBG = subset(EXWTempRBG[which(EXWTempRBG$time == RB_RBG_BADateTime),]) EXWTempRBG$DataValue[EXWTempRBG$DataValue==-9999.00] <- NA #Converts -9999.0 to NA #Turbidity_Temp TWTempRBG<- GetValues(RBserver, siteCode="iutah:RB_RBG_BA", variableCode="iutah:WaterTemp_Turb", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) TWTempRBG = subset(TWTempRBG[which(TWTempRBG$time == RB_RBG_BADateTime),]) TWTempRBG$DataValue[TWTempRBG$DataValue==-9999.00] <- NA ##Red Butte Gate pH## pHRBG<- GetValues(RBserver, siteCode="iutah:RB_RBG_BA", variableCode="iutah:pH", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) pHRBG = subset(pHRBG[which(pHRBG$time == RB_RBG_BADateTime),]) pHRBG$DataValue[pHRBG$DataValue==-9999.00] <- NA ##Red Butte Gate Specific Conductivity## SpConRBG<- GetValues(RBserver, siteCode="iutah:RB_RBG_BA", variableCode="iutah:SpCond", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) SpConRBG = subset(SpConRBG[which(SpConRBG$time == RB_RBG_BADateTime),]) SpConRBG$DataValue[SpConRBG$DataValue==-9999.00] <- NA ##Red Butte Gate Dissolved Oxygen## ODORBG<- GetValues(RBserver, siteCode="iutah:RB_RBG_BA", variableCode="iutah:ODO", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) ODORBG = subset(ODORBG[which(ODORBG$time == RB_RBG_BADateTime),]) ODORBG$DataValue[ODORBG$DataValue==-9999.00] <- NA ##Red Butte Gate Specific Tubidity## TurbRBG<- GetValues(RBserver, siteCode="iutah:RB_RBG_BA", variableCode="iutah:TurbAvg", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) TurbRBG = subset(TurbRBG[which(TurbRBG$time == RB_RBG_BADateTime),]) TurbRBG$DataValue[TurbRBG$DataValue==-9999.00] <- NA ##Red Butte Gate Gage## GageRBG<- GetValues(RBserver, siteCode="iutah:RB_RBG_BA", variableCode="iutah:Stage", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) GageRBG = subset(GageRBG[which(GageRBG$time == RB_RBG_BADateTime),]) GageRBG$DataValue[GageRBG$DataValue==-9999.00] <- NA ##### Cottams Grove #### ##Cottams Grove Water Temp## #exo_Temp EXWTempCGrove<- GetValues(RBserver, siteCode="iutah:RB_CG_BA", variableCode="iutah:WaterTemp_EXO", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) EXWTempCGrove= subset(EXWTempCGrove[which(EXWTempCGrove$time == RB_CG_BADateTime),]) EXWTempCGrove$DataValue[EXWTempCGrove$DataValue==-9999.00] <- NA #Turbidity_Temp TWTempCGrove<- GetValues(RBserver, siteCode="iutah:RB_CG_BA", variableCode="iutah:WaterTemp_Turb", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) TWTempCGrove= subset(TWTempCGrove[which(TWTempCGrove$time == RB_CG_BADateTime),]) TWTempCGrove$DataValue[TWTempCGrove$DataValue==-9999.00] <- NA ##Cottams Grove pH## pHCGrove<- GetValues(RBserver, siteCode="iutah:RB_CG_BA", variableCode="iutah:pH", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) pHCGrove= subset(pHCGrove[which(pHCGrove$time == RB_CG_BADateTime),]) pHCGrove$DataValue[pHCGrove$DataValue==-9999.00] <- NA ##Cottams Grove Specific Conductivity## SpConCGrove<- GetValues(RBserver, siteCode="iutah:RB_CG_BA", variableCode="iutah:SpCond", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) SpConCGrove= subset(SpConCGrove[which(SpConCGrove$time == RB_CG_BADateTime),]) SpConCGrove$DataValue[SpConCGrove$DataValue==-9999.00] <- NA ##Cottams Grove Dissolved Oxygen## ODOCGrove<- GetValues(RBserver, siteCode="iutah:RB_CG_BA", variableCode="iutah:ODO", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) ODOCGrove= subset(ODOCGrove[which(ODOCGrove$time == RB_CG_BADateTime),]) ODOCGrove$DataValue[ODOCGrove$DataValue==-9999.00] <- NA ##Cottams Grove Specific Turbidity## TurbCGrove<- GetValues(RBserver, siteCode="iutah:RB_CG_BA", variableCode="iutah:TurbAvg", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) TurbCGrove= subset(TurbCGrove[which(TurbCGrove$time == RB_CG_BADateTime),]) TurbCGrove$DataValue[TurbCGrove$DataValue==-9999.00] <- NA ##Cottam Grove Gage height## GageCGrove<- GetValues(RBserver, siteCode="iutah:RB_CG_BA", variableCode="iutah:Stage", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) GageCGrove= subset(GageCGrove[which(GageCGrove$time == RB_CG_BADateTime),]) GageCGrove$DataValue[GageCGrove$DataValue==-9999.00] <- NA ##### Foothill Drive advanced #### ##Foothill Drive Water Temp## #exo_Temp EXWTempFoothill<- GetValues(RBserver, siteCode="iutah:RB_FD_AA", variableCode="iutah:WaterTemp_EXO", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) EXWTempFoothill= subset(EXWTempFoothill[which(EXWTempFoothill$time == RB_FD_AADateTime),]) EXWTempFoothill$DataValue[EXWTempFoothill$DataValue==-9999.00] <- NA #Turbidity_Temp TWTempFoothill<- GetValues(RBserver, siteCode="iutah:RB_FD_AA", variableCode="iutah:WaterTemp_Turb", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) TWTempFoothill= subset(TWTempFoothill[which(TWTempFoothill$time == RB_FD_AADateTime),]) TWTempFoothill$DataValue[TWTempFoothill$DataValue==-9999.00] <- NA ##Foothill Drive pH## pHFoothill<- GetValues(RBserver, siteCode="iutah:RB_FD_AA", variableCode="iutah:pH", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) pHFoothill= subset(pHFoothill[which(pHFoothill$time == RB_FD_AADateTime),]) pHFoothill$DataValue[pHFoothill$DataValue==-9999.00] <- NA ##Foothill Drive Specific Conductivity## SpConFoothill<- GetValues(RBserver, siteCode="iutah:RB_FD_AA", variableCode="iutah:SpCond", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) SpConFoothill= subset(SpConFoothill[which(SpConFoothill$time == RB_FD_AADateTime),]) SpConFoothill$DataValue[SpConFoothill$DataValue==-9999.00] <- NA ##Foothill Drive Dissolved Oxygen## ODOFoothill<- GetValues(RBserver, siteCode="iutah:RB_FD_AA", variableCode="iutah:ODO", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) ODOFoothill= subset(ODOFoothill[which(ODOFoothill$time == RB_FD_AADateTime),]) ODOFoothill$DataValue[ODOFoothill$DataValue==-9999.00] <- NA ##Foothill Drive Specific Turbidity## TurbFoothill<- GetValues(RBserver, siteCode="iutah:RB_FD_AA", variableCode="iutah:TurbAvg", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) TurbFoothill= subset(TurbFoothill[which(TurbFoothill$time == RB_FD_AADateTime),]) TurbFoothill$DataValue[TurbFoothill$DataValue==-9999.00] <- NA ##Foothill Drive fDOM## fDOMFoothill<- GetValues(RBserver, siteCode="iutah:RB_FD_AA", variableCode="iutah:fDOM", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) fDOMFoothill= subset(fDOMFoothill[which(fDOMFoothill$time == RB_FD_AADateTime),]) fDOMFoothill$DataValue[fDOMFoothill$DataValue==-9999.00] <- NA ##Foothill Drive Chla## chlaFoothill<- GetValues(RBserver, siteCode="iutah:RB_FD_AA", variableCode="iutah:chlorophyll", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) chlaFoothill= subset(chlaFoothill[which(chlaFoothill$time == RB_FD_AADateTime),]) chlaFoothill$DataValue[chlaFoothill$DataValue==-9999.00] <- NA ##Foothill Drive Blue Green Algae (BGA)## BGAFoothill<- GetValues(RBserver, siteCode="iutah:RB_FD_AA", variableCode="iutah:BGA", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) BGAFoothill= subset(BGAFoothill[which(BGAFoothill$time == RB_FD_AADateTime),]) BGAFoothill$DataValue[BGAFoothill$DataValue==-9999.00] <- NA # ##Foothill Drive Nitrate## # NitrateFoothill<- GetValues(RBserver, siteCode="iutah:RB_FD_AA", variableCode="iutah:Nitrate-N", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) # NitrateFoothill= subset(NitrateFoothill[which(NitrateFoothill$time == RB_FD_AADateTime),]) # NitrateFoothill$DataValue[NitrateFoothill$DataValue==-9999.00] <- NA ##Foothill Drive Gage height## GageFoothill<- GetValues(RBserver, siteCode="iutah:RB_FD_AA", variableCode="iutah:Stage", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) GageFoothill= subset(GageFoothill[which(GageFoothill$time == RB_FD_AADateTime),]) GageFoothill$DataValue[GageFoothill$DataValue==-9999.00] <- NA ##### Red Butte Spreadsheet ##### Knowlton18NovGrabSample <- data.frame(DateTime = EXWTempKnowlton$time, Site = "RB_KF_BA", Temp = EXWTempKnowlton$DataValue, pH = pHKnowlton$DataValue, SpCon = SpConKnowlton$DataValue, ODO = ODOKnowlton$DataValue, Turbidity = TurbKnowlton$DataValue, Gage = GageKnowlton$DataValue) Knowlton18NovGrabSample ARBR18NovGrabSample <- data.frame(DateTime = EXWTempARBR$time, Site = "RB_ARBR_AA", Temp = EXWTempARBR$DataValue, pH = pHARBR$DataValue, SpCon = SpConARBR$DataValue, ODO = ODOARBR$DataValue, Turbidity = TurbARBR$DataValue, #Gage = GageARBR$DataValue, BGA = BGAARBR$DataValue, chla = chlaARBR$DataValue, fDOM = fDOMARBR$DataValue) #Nitrate = NitrateARBR$DataValue) ARBR18NovGrabSample RBG18NovGrabSample <- data.frame(DateTime = EXWTempRBG$time, Site = "RB_RBG_BA", Temp = EXWTempRBG$DataValue, pH = pHRBG$DataValue, SpCon = SpConRBG$DataValue, ODO = ODORBG$DataValue, Turbidity = TurbRBG$DataValue, Gage = GageRBG$DataValue) RBG18NovGrabSample CGrove18NovGrabSample <- data.frame(DateTime = EXWTempCGrove$time, Site = "RB_CG_BA", Temp = EXWTempCGrove$DataValue, pH = pHCGrove$DataValue, SpCon = SpConCGrove$DataValue, ODO = ODOCGrove$DataValue, Turbidity = TurbCGrove$DataValue, Gage = GageCGrove$DataValue) CGrove18NovGrabSample Foothill18NovGrabSample <- data.frame(DateTime = EXWTempFoothill$time, Site = "RB_FD_AA", Temp = EXWTempFoothill$DataValue, pH = pHFoothill$DataValue, SpCon = SpConFoothill$DataValue, ODO = ODOFoothill$DataValue, Turbidity = TurbFoothill$DataValue, Gage = GageFoothill$DataValue, BGA = BGAFoothill$DataValue, chla = chlaFoothill$DataValue, fDOM = fDOMFoothill$DataValue) #Nitrate = NitrateFoothill$DataValue) Foothill18NovGrabSample RedButte18Nov<- rbind.fill(Knowlton18NovGrabSample, ARBR18NovGrabSample, RBG18NovGrabSample, CGrove18NovGrabSample, Foothill18NovGrabSample) ##### Soapstone ##### ##Soapstone Water Temp #exo_Temp EXWTempST<- GetValues(PRserver, siteCode="iutah:PR_ST_BA", variableCode="iutah:WaterTemp_EXO", startDate = PRStartDate, endDate = PREndDate, qcID = "1" ) EXWTempST= subset(EXWTempST[which(EXWTempST$time == PR_ST_BADateTime),]) EXWTempST$DataValue[EXWTempST$DataValue==-9999.00] <- NA #Turbidity_Temp TWTempST<- GetValues(PRserver, siteCode="iutah:PR_ST_BA", variableCode="iutah:WaterTemp_Turb", startDate = PRStartDate, endDate = PREndDate, qcID = "1" ) TWTempST= subset(TWTempST[which(TWTempST$time == PR_ST_BADateTime),]) TWTempST$DataValue[TWTempST$DataValue==-9999.00] <- NA ##Soapstone pH## pHST<- GetValues(PRserver, siteCode="iutah:PR_ST_BA", variableCode="iutah:pH", startDate = PRStartDate, endDate = PREndDate, qcID = "1" ) pHST= subset(pHST[which(pHST$time == PR_ST_BADateTime),]) pHST$DataValue[pHST$DataValue==-9999.00] <- NA ##Soapstone Specific Conductivity## SpConST<- GetValues(PRserver, siteCode="iutah:PR_ST_BA", variableCode="iutah:SpCond", startDate = PRStartDate, endDate = PREndDate, qcID = "1" ) SpConST= subset(SpConST[which(SpConST$time == PR_ST_BADateTime),]) SpConST$DataValue[SpConST$DataValue==-9999.00] <- NA ##Soapstone Dissolved Oxygen## ODOST<- GetValues(PRserver, siteCode="iutah:PR_ST_BA", variableCode="iutah:ODO", startDate = PRStartDate, endDate = PREndDate, qcID = "1" ) ODOST= subset(ODOST[which(ODOST$time == PR_ST_BADateTime),]) ODOST$DataValue[ODOST$DataValue==-9999.00] <- NA ##Soapstone Specific Turbidity## TurbST<- GetValues(PRserver, siteCode="iutah:PR_ST_BA", variableCode="iutah:TurbAvg", startDate = PRStartDate, endDate = PREndDate, qcID = "1" ) TurbST= subset(TurbST[which(TurbST$time == PR_ST_BADateTime),]) TurbST$DataValue[TurbST$DataValue==-9999.00] <- NA ##Soapstone Stage## StageST<- GetValues(PRserver, siteCode="iutah:PR_ST_BA", variableCode="iutah:Stage", startDate = PRStartDate, endDate = PREndDate, qcID = "1" ) StageST= subset(StageST[which(StageST$time == PR_ST_BADateTime),]) StageST$DataValue[StageST$DataValue==-9999.00] <- NA ##### Below Jordanelle advanced ######### ##Below Jordanelle Temp #exo_Temp EXWTempBJ<- GetValues(PRserver, siteCode="iutah:PR_BJ_AA", variableCode="iutah:WaterTemp_EXO", startDate = PRStartDate, endDate = PREndDate, methodID= "99", qcID = "1" ) EXWTempBJ= subset(EXWTempBJ[which(EXWTempBJ$time == PR_BJ_AADateTime),]) EXWTempBJ$DataValue[EXWTempBJ$DataValue==-9999.00] <- NA #Turbidity_Temp TWTempBJ<- GetValues(PRserver, siteCode="iutah:PR_BJ_AA", variableCode="iutah:WaterTemp_Turb", startDate = PRStartDate, endDate = PREndDate, qcID = "1" ) TWTempBJ= subset(TWTempBJ[which(TWTempBJ$time == PR_BJ_AADateTime),]) TWTempBJ$DataValue[TWTempBJ$DataValue==-9999.00] <- NA ## Below Jordanelle pH ## pHBJ<- GetValues(PRserver, siteCode="iutah:PR_BJ_AA", variableCode="iutah:pH", startDate = PRStartDate, endDate = PREndDate, qcID = "1" ) pHBJ= subset(pHBJ[which(pHBJ$time == PR_BJ_AADateTime),]) pHBJ$DataValue[pHBJ$DataValue==-9999.00] <- NA ##Below Jordanelle Specific Conductivity## SpConBJ<- GetValues(PRserver, siteCode="iutah:PR_BJ_AA", variableCode="iutah:SpCond", startDate = PRStartDate, endDate = PREndDate, qcID = "1" ) SpConBJ= subset(SpConBJ[which(SpConBJ$time == PR_BJ_AADateTime),]) SpConBJ$DataValue[SpConBJ$DataValue==-9999.00] <- NA ##Below Jordanelle Dissolved Oxygen## ODOBJ<- GetValues(PRserver, siteCode="iutah:PR_BJ_AA", variableCode="iutah:ODO", startDate = PRStartDate, endDate = PREndDate, qcID = "1" ) ODOBJ= subset(ODOBJ[which(ODOBJ$time == PR_BJ_AADateTime),]) ##Below Jordanelle Turbidity## TurbBJ<- GetValues(PRserver, siteCode="iutah:PR_BJ_AA", variableCode="iutah:TurbAvg", startDate = PRStartDate, endDate = PREndDate, qcID = "1" ) TurbBJ= subset(TurbBJ[which(TurbBJ$time == PR_BJ_AADateTime),]) TurbBJ$DataValue[TurbBJ$DataValue==-9999.00] <- NA ##Below Jordanelle fDOM## fDOMBJ<- GetValues(PRserver, siteCode="iutah:PR_BJ_AA", variableCode="iutah:fDOM", startDate = PRStartDate, endDate = PREndDate, qcID = "1" ) fDOMBJ= subset(fDOMBJ[which(fDOMBJ$time == PR_BJ_AADateTime),]) fDOMBJ$DataValue[fDOMBJ$DataValue==-9999.00] <- NA ##Below Jordanelle Chla## chlaBJ<- GetValues(PRserver, siteCode="iutah:PR_BJ_AA", variableCode="iutah:chlorophyll", startDate = PRStartDate, endDate = PREndDate, qcID = "1" ) chlaBJ= subset(chlaBJ[which(chlaBJ$time == PR_BJ_AADateTime),]) chlaBJ$DataValue[chlaBJ$DataValue==-9999.00] <- NA ##Below Jordanelle Blue Green Algea (BGA)## BGABJ<- GetValues(PRserver, siteCode="iutah:PR_BJ_AA", variableCode="iutah:BGA", startDate = PRStartDate, endDate = PREndDate, qcID = "1" ) BGABJ= subset(BGABJ[which(BGABJ$time == PR_BJ_AADateTime),]) BGABJ$DataValue[BGABJ$DataValue==-9999.00] <- NA # ##Below Jordanelle Nitrate## # NitrateBJ<- GetValues(PRserver, siteCode="iutah:PR_BJ_AA", variableCode="iutah:Nitrate-N", startDate = PRStartDate, endDate = PREndDate, qcID = "1" ) # NitrateBJ= subset(NitrateBJ[which(NitrateBJ$time == PR_BJ_AADateTime),]) # NitrateBJ$DataValue[NitrateBJ$DataValue==-9999.00] <- NA ##Below Jordanelle Discharge (Q)## QBJ<- GetValues(PRserver, siteCode="iutah:PR_BJ_CUWCD", variableCode="iutah:CUWCDDischarge", startDate = PRStartDate, endDate = PREndDate, qcID = "1" ) QBJ= subset(QBJ[which(QBJ$time == PR_BJ_AADateTime),]) QBJ$DataValue[QBJ$DataValue==-9999.00] <- NA ##### Lower Midway ####### ##Lower Midway Water Temp #exo_Temp EXWTempLM<- GetValues(PRserver, siteCode="iutah:PR_LM_BA", variableCode="iutah:WaterTemp_EXO", startDate = PRStartDate, endDate = PREndDate, qcID = "1" ) EXWTempLM= subset(EXWTempLM[which(EXWTempLM$time == PR_LM_BADateTime),]) EXWTempLM$DataValue[EXWTempLM$DataValue==-9999.00] <- NA #Converts -9999.0 to NA #Turbidity_Temp TWTempLM<- GetValues(PRserver, siteCode="iutah:PR_LM_BA", variableCode="iutah:WaterTemp_Turb", startDate = PRStartDate, endDate = PREndDate, qcID = "1" ) TWTempLM= subset(TWTempLM[which(TWTempLM$time == PR_LM_BADateTime),]) TWTempLM$DataValue[TWTempLM$DataValue==-9999.00] <- NA ##Lower Midway pH## pHLM<- GetValues(PRserver, siteCode="iutah:PR_LM_BA", variableCode="iutah:pH", startDate = PRStartDate, endDate = PREndDate, qcID = "1" ) pHLM= subset(pHLM[which(pHLM$time == PR_LM_BADateTime),]) pHLM$DataValue[pHLM$DataValue==-9999.00] <- NA ##Lower Midway Specific Conductivity## SpConLM<- GetValues(PRserver, siteCode="iutah:PR_LM_BA", variableCode="iutah:SpCond", startDate = PRStartDate, endDate = PREndDate, qcID = "1" ) SpConLM= subset(SpConLM[which(SpConLM$time == PR_LM_BADateTime),]) SpConLM$DataValue[SpConLM$DataValue==-9999.00] <- NA ##Lower Midway Dissolved Oxygen## ODOLM<- GetValues(PRserver, siteCode="iutah:PR_LM_BA", variableCode="iutah:ODO", startDate = PRStartDate, endDate = PREndDate, qcID = "1" ) ODOLM= subset(ODOLM[which(ODOLM$time == PR_LM_BADateTime),]) ODOLM$DataValue[ODOLM$DataValue==-9999.00] <- NA ##Lower Midway Specific Tubidity## TurbLM<- GetValues(PRserver, siteCode="iutah:PR_LM_BA", variableCode="iutah:TurbAvg", startDate = PRStartDate, endDate = PREndDate, qcID = "1" ) TurbLM= subset(TurbLM[which(TurbLM$time == PR_LM_BADateTime),]) TurbLM$DataValue[TurbLM$DataValue==-9999.00] <- NA ##Lower Midway Discharge (Q)## QLM<- GetValues(PRserver, siteCode="iutah:PR_uM_CUWCD", variableCode="iutah:CUWCDDischarge", startDate = PRStartDate, endDate = PREndDate, qcID = "1" ) QLM= subset(QLM[which(QLM$time == PR_LM_BADateTime),]) QLM$DataValue[QLM$DataValue==-9999.00] <- NA ##### Charleston advanced ###### ## Charleston Water Temp #exo_Temp EXWTempCH<- GetValues(PRserver, siteCode="iutah:PR_CH_AA", variableCode="iutah:WaterTemp_EXO", startDate = PRStartDate, endDate = PREndDate, qcID = "1" ) EXWTempCH= subset(EXWTempCH[which(EXWTempCH$time == PR_CH_AADateTime),]) EXWTempCH$DataValue[EXWTempCH$DataValue==-9999.00] <- NA #Turbidity_Temp TWTempCH<- GetValues(PRserver, siteCode="iutah:PR_CH_AA", variableCode="iutah:WaterTemp_Turb", startDate = PRStartDate, endDate = PREndDate, qcID = "1" ) TWTempCH= subset(TWTempCH[which(TWTempCH$time == PR_CH_AADateTime),]) TWTempCH$DataValue[TWTempCH$DataValue==-9999.00] <- NA ##Charleston pH## pHCH<- GetValues(PRserver, siteCode="iutah:PR_CH_AA", variableCode="iutah:pH", startDate = PRStartDate, endDate = PREndDate, qcID = "1" ) pHCH= subset(pHCH[which(pHCH$time == PR_CH_AADateTime),]) pHCH$DataValue[pHCH$DataValue==-9999.00] <- NA ##Charleston Specific Conductivity## SpConCH<- GetValues(PRserver, siteCode="iutah:PR_CH_AA", variableCode="iutah:SpCond", startDate = PRStartDate, endDate = PREndDate, qcID = "1" ) SpConCH= subset(SpConCH[which(SpConCH$time == PR_CH_AADateTime),]) SpConCH$DataValue[SpConCH$DataValue==-9999.00] <- NA ##Charleston Dissolved Oxygen## ODOCH<- GetValues(PRserver, siteCode="iutah:PR_CH_AA", variableCode="iutah:ODO", startDate = PRStartDate, endDate = PREndDate, qcID = "1" ) ODOCH= subset(ODOCH[which(ODOCH$time == PR_CH_AADateTime),]) ODOCH$DataValue[ODOCH$DataValue==-9999.00] <- NA ##Charleston Specific Turbidity## TurbCH<- GetValues(PRserver, siteCode="iutah:PR_CH_AA", variableCode="iutah:TurbAvg", startDate = PRStartDate, endDate = PREndDate, qcID = "1" ) TurbCH= subset(TurbCH[which(TurbCH$time == PR_CH_AADateTime),]) TurbCH$DataValue[TurbCH$DataValue==-9999.00] <- NA ##Charleston fDOM## fDOMCH<- GetValues(PRserver, siteCode="iutah:PR_CH_AA", variableCode="iutah:fDOM", startDate = PRStartDate, endDate = PREndDate, qcID = "1" ) fDOMCH= subset(fDOMCH[which(fDOMCH$time == PR_CH_AADateTime),]) fDOMCH$DataValue[fDOMCH$DataValue==-9999.00] <- NA ##Charleston Chla## chlaCH<- GetValues(PRserver, siteCode="iutah:PR_CH_AA", variableCode="iutah:chlorophyll", startDate = PRStartDate, endDate = PREndDate, qcID = "1" ) chlaCH= subset(chlaCH[which(chlaCH$time == PR_CH_AADateTime),]) chlaCH$DataValue[chlaCH$DataValue==-9999.00] <- NA ##Charleston Blue Green Algae (BGA)## BGACH<- GetValues(PRserver, siteCode="iutah:PR_CH_AA", variableCode="iutah:BGA", startDate = PRStartDate, endDate = PREndDate, qcID = "1" ) BGACH= subset(BGACH[which(BGACH$time == PR_CH_AADateTime),]) BGACH$DataValue[BGACH$DataValue==-9999.00] <- NA # ##Charleston Nitrate## # NitrateCH<- GetValues(PRserver, siteCode="iutah:PR_CH_AA", variableCode="iutah:Nitrate-N", startDate = PRStartDate, endDate = PREndDate, qcID = "1" ) # NitrateCH= subset(NitrateCH[which(NitrateCH$time == PR_CH_AADateTime),]) # NitrateCH$DataValue[NitrateCH$DataValue==-9999.00] <- NA ##Charleston Discharge (Q)## QCH<- GetValues(PRserver, siteCode="iutah:PR_CH_CUWCD", variableCode="iutah:CUWCDDischarge", startDate = PRStartDate, endDate = PREndDate, qcID = "1" ) QCH= subset(QCH[which(QCH$time == PR_CH_AADateTime),]) QCH$DataValue[QCH$DataValue==-9999.00] <- NA ##### Provo Spreadsheet ##### Soapstone12NovGrabSample <- data.frame(DateTime = EXWTempST$time, Site = "PR_ST_BA", Temp = EXWTempST$DataValue, pH = pHST$DataValue, SpCon = SpConST$DataValue, ODO = ODOST$DataValue, Turbidity = TurbST$DataValue, Gage = StageST$DataValue) Soapstone12NovGrabSample BelowJordanelle12NovGrabSample <- data.frame(DateTime = EXWTempBJ$time, Site = "PR_BJ_AA", Temp = EXWTempBJ$DataValue, pH = pHBJ$DataValue, SpCon = SpConBJ$DataValue, ODO = ODOBJ$DataValue, Turbidity = TurbBJ$DataValue, Gage = QBJ$DataValue, BGA = BGABJ$DataValue, chla = chlaBJ$DataValue, fDOM = fDOMBJ$DataValue) # Nitrate = NitrateBJ$DataValue) BelowJordanelle12NovGrabSample LowerMidway12NovGrabSample <- data.frame(DateTime = EXWTempLM$time, Site = "PR_LM_BA", Temp = EXWTempLM$DataValue, pH = pHLM$DataValue, SpCon = SpConLM$DataValue, ODO = ODOLM$DataValue, Turbidity = TurbLM$DataValue, Gage = QLM$DataValue) LowerMidway12NovGrabSample Charleston12NovGrabSample <- data.frame(DateTime = EXWTempCH$time, Site = "PR_CH_AA", Temp = EXWTempCH$DataValue, pH = pHCH$DataValue, SpCon = SpConCH$DataValue, ODO = ODOCH$DataValue, Turbidity = TurbCH$DataValue, Gage = QCH$DataValue, BGA = BGACH$DataValue, chla = chlaCH$DataValue, fDOM = fDOMCH$DataValue) #Nitrate = NitrateCH$DataValue) Charleston12NovGrabSample Provo12Nov <-rbind.fill(Soapstone12NovGrabSample, BelowJordanelle12NovGrabSample, LowerMidway12NovGrabSample, Charleston12NovGrabSample) ##### All combine and write #### GrabSamples24Nov <- rbind.fill(RedButte18Nov, Logan24Nov, Provo12Nov) write.csv(GrabSamples24Nov, file = "GrabSamples2" )
/GAMUTNov14_nonitrate.R
no_license
erinfjones/GAMUTdownload
R
false
false
51,741
r
### iUTAH GAMUT Aquatic Station ### Grab Sample download ### Version 1.1 ### Written by: Erin Fleming Jones, contact at erinfjones3@gmail.com ### Last updated: 4/23/2017 setwd("~/Box Sync/BYU/metagenomics") ####NOTES: Check ARBR gage code ## Load packages ## library("devtools") library("plyr") Sys.timezone() library("WaterML") ## Data server connections ## # services = GetServices() ##Lists URLs of all available datasets Loganserver= 'http://data.iutahepscor.org/LoganRiverWOF/cuahsi_1_1.asmx?WSDL' ##Conects to each web database RBserver= 'http://data.iutahepscor.org/RedButteCreekWOF/cuahsi_1_1.asmx?WSDL' PRserver= 'http://data.iutahepscor.org/ProvoRiverWOF/cuahsi_1_1.asmx?WSDL' variables=GetVariables(RBserver) ##Lists variable codes Logansites= GetSites(Loganserver) ##Lists site codes Provosites= GetSites(PRserver) RBsites= GetSites(RBserver) ### Set Date and times ### ### Logan ctrl F: 24Nov LRStartDate = "2014-11-24" ##yyyy-mm-dd format LREndDate = "2014-11-25" LR_FB_BADateTime= "2014-11-24 13:15:00" ##yyyy-mm-dd hh:mm:ss format, rounded to 0, 15, 30, or 45 minute mark LR_TG_BADateTime = "2014-11-24 11:45:00" LR_WaterLab_AADateTime = "2014-11-24 14:45:00" LR_MainStreet_BADateTime = "2014-11-24 15:00:00" LR_Mendon_AADateTime = "2014-11-24 15:30:00" ### Red Butte ctrl F: 18Nov RBStartDate = "2014-11-18" ##yyyy-mm-dd format RBEndDate = "2014-11-19" RB_KF_BADateTime = "2014-11-18 12:00:00" ##yyyy-mm-dd hh:mm:ss format, rounded to 0, 15, 30, or 45 minute mark RB_ARBR_AADateTime = "2014-11-18 15:00:00" RB_RBG_BADateTime = "2014-11-18 15:15:00" RB_CG_BADateTime = "2014-11-18 16:00:00" RB_FD_AADateTime = "2014-11-18 16:45:00" ### Provo ctrl F: 12Nov PRStartDate = "2014-11-12" ##yyyy-mm-dd format PREndDate ="2014-11-13" PR_ST_BADateTime = "2014-11-12 12:15:00" ##yyyy-mm-dd hh:mm:ss format, rounded to 0, 15, 30, or 45 minute mark PR_BJ_AADateTime = "2014-11-12 14:00:00" ## must be rounded to on the hour e.g. 14:00:00 PR_LM_BADateTime = "2014-11-12 11:00:00" ## must be rounded to on the hour e.g. 14:00:00 PR_CH_AADateTime = "2014-11-12 13:00:00" ## must be rounded to on the hour e.g. 14:00:00 # Q- PR_BJ, PR_LM, PR_CH only available on the hour (for now) ##### Franklin Basin #### ##Franklin Basin Water Temp## #exo_Temp EXWTempFranklin<- GetValues(Loganserver, siteCode="iutah:LR_FB_BA", variableCode="iutah:WaterTemp_EXO", startDate = LRStartDate, endDate = LREndDate, qcID = "1" ) #Download 24 hrs EXWTempFranklin = subset(EXWTempFranklin[ which(EXWTempFranklin$time == LR_FB_BADateTime),] ) ### Subset 24-hr for single time point EXWTempFranklin$DataValue[EXWTempFranklin$DataValue==-9999.00] <- NA ### Substitute database NA for R NA #Turbidity_Temp TWTempFranklin<- GetValues(Loganserver, siteCode="iutah:LR_FB_BA", variableCode="iutah:WaterTemp_Turb", startDate = LRStartDate, endDate = LREndDate, qcID = "1" ) TWTempFranklin <- subset(TWTempFranklin[ which(TWTempFranklin$time == LR_FB_BADateTime),]) TWTempFranklin$DataValue[TWTempFranklin$DataValue==-9999.00] <- NA ##Franklin Basin pH## pHFranklin<- GetValues(Loganserver, siteCode="iutah:LR_FB_BA", variableCode="iutah:pH", startDate = LRStartDate, endDate = LREndDate, qcID = "1" ) pHFranklin = subset(pHFranklin[ which(pHFranklin$time == LR_FB_BADateTime),] ) pHFranklin$DataValue[pHFranklin$DataValue==-9999.00] <- NA ##Franklin Basin Specific Conductivity## SpConFranklin<- GetValues(Loganserver, siteCode="iutah:LR_FB_BA", variableCode="iutah:SpCond", startDate = LRStartDate, endDate = LREndDate, qcID = "1" ) SpConFranklin = subset(SpConFranklin[ which(SpConFranklin$time == LR_FB_BADateTime),] ) SpConFranklin$DataValue[SpConFranklin$DataValue==-9999.00] <- NA ##Franklin Basin Dissolved Oxygen## ODOFranklin<-GetValues(Loganserver, siteCode="iutah:LR_FB_BA", variableCode="iutah:ODO", startDate = LRStartDate, endDate = LREndDate, qcID = "1" ) ODOFranklin = subset(ODOFranklin[ which(ODOFranklin$time == LR_FB_BADateTime),] ) ODOFranklin$DataValue[ODOFranklin$DataValue==-9999.00] <- NA ##Franklin Basin Specific Turbidity## TurbFranklin<- GetValues(Loganserver, siteCode="iutah:LR_FB_BA", variableCode="iutah:TurbAvg", startDate = LRStartDate, endDate = LREndDate, qcID = "1" ) TurbFranklin = subset(TurbFranklin[ which(TurbFranklin$time == LR_FB_BADateTime),] ) TurbFranklin$DataValue[TurbFranklin$DataValue==-9999.00] <- NA ##Franklin Basin Gage## GageFranklin<- GetValues(Loganserver, siteCode="iutah:LR_FB_BA", variableCode="iutah:Stage", startDate = LRStartDate, endDate = LREndDate, qcID = "1" ) GageFranklin = subset(GageFranklin[ which(GageFranklin$time == LR_FB_BADateTime),] ) GageFranklin$DataValue[GageFranklin$DataValue==-9999.00] <- NA ##### Tony Grove #### ##Tony Grove Water Temp## #exo_Temp EXWTempTonyGrove<- GetValues(Loganserver, siteCode="iutah:LR_TG_BA", variableCode="iutah:WaterTemp_EXO", startDate = LRStartDate, endDate = LREndDate, qcID = "1" ) EXWTempTonyGrove = subset(EXWTempTonyGrove[ which(EXWTempTonyGrove$time == LR_TG_BADateTime),] ) EXWTempTonyGrove$DataValue[EXWTempTonyGrove$DataValue==-9999.00] <- NA #Turbidity_Temp TWTempTonyGrove<- GetValues(Loganserver, siteCode="iutah:LR_TG_BA", variableCode="iutah:WaterTemp_Turb", startDate = LRStartDate, endDate = LREndDate, qcID = "1" ) TWTempTonyGrove = subset(TWTempTonyGrove[ which(TWTempTonyGrove$time == LR_TG_BADateTime),] ) TWTempTonyGrove$DataValue[TWTempTonyGrove$DataValue==-9999.00] <- NA ##Tony Grove pH## pHTonyGrove<- GetValues(Loganserver, siteCode="iutah:LR_TG_BA", variableCode="iutah:pH", startDate = LRStartDate, endDate = LREndDate, qcID = "1" ) pHTonyGrove = subset(pHTonyGrove[ which(pHTonyGrove$time == LR_TG_BADateTime),] ) pHTonyGrove$DataValue[pHTonyGrove$DataValue==-9999.00] <- NA ##Tony Grove Specific Conductivity## SpConTonyGrove<- GetValues(Loganserver, siteCode="iutah:LR_TG_BA", variableCode="iutah:SpCond", startDate = LRStartDate, endDate = LREndDate, qcID = "1" ) SpConTonyGrove = subset(SpConTonyGrove[ which(SpConTonyGrove$time == LR_TG_BADateTime),] ) SpConTonyGrove$DataValue[SpConTonyGrove$DataValue==-9999.00] <- NA ##Tony Grove Dissolved Oxygen## ODOTonyGrove<- GetValues(Loganserver, siteCode="iutah:LR_TG_BA", variableCode="iutah:ODO", startDate = LRStartDate, endDate = LREndDate, qcID = "1" ) ODOTonyGrove = subset(ODOTonyGrove[ which(ODOTonyGrove$time == LR_TG_BADateTime),] ) ODOTonyGrove$DataValue[ODOTonyGrove$DataValue==-9999.00] <- NA ##Tony Grove Specific Turbidity## TurbTonyGrove<- GetValues(Loganserver, siteCode="iutah:LR_TG_BA", variableCode="iutah:TurbAvg", startDate = LRStartDate, endDate = LREndDate, qcID = "1" ) TurbTonyGrove = subset(TurbTonyGrove[ which(TurbTonyGrove$time == LR_TG_BADateTime),] ) TurbTonyGrove$DataValue[TurbTonyGrove$DataValue==-9999.00] <- NA ##Tony Grove Discharge (Gage)## GageTonyGrove<- GetValues(Loganserver, siteCode="iutah:LR_TG_BA", variableCode="iutah:Stage", startDate = LRStartDate, endDate = LREndDate, qcID = "1" ) GageTonyGrove = subset(GageTonyGrove[ which(GageTonyGrove$time == LR_TG_BADateTime),] ) GageTonyGrove$DataValue[GageTonyGrove$DataValue==-9999.00] <- NA ##### Water Lab advanced #### ##Water Lab Water Temperature## #exo_Temp EXWTempWaterLab<- GetValues(Loganserver, siteCode="iutah:LR_WaterLab_AA", variableCode="iutah:WaterTemp_EXO", startDate = LRStartDate, endDate = LREndDate, qcID = "1" ) EXWTempWaterLab = subset(EXWTempWaterLab[ which(EXWTempWaterLab$time == LR_WaterLab_AADateTime),] ) EXWTempWaterLab$DataValue[EXWTempWaterLab$DataValue==-9999.00] <- NA #Converts -9999.0 to NA #Turbidity_Temp TWTempWaterLab = GetValues(Loganserver, siteCode="iutah:LR_WaterLab_AA", variableCode="iutah:WaterTemp_Turb", startDate = LRStartDate, endDate = LREndDate, qcID = "1" ) TWTempWaterLab = subset(TWTempWaterLab[ which(TWTempWaterLab$time == LR_WaterLab_AADateTime),] ) TWTempWaterLab$DataValue[TWTempWaterLab$DataValue==-9999.00] <- NA ##Water Lab pH## pHWaterLab<- GetValues(Loganserver, siteCode="iutah:LR_WaterLab_AA", variableCode="iutah:pH", startDate = LRStartDate, endDate = LREndDate, qcID = "1" ) pHWaterLab = subset(pHWaterLab[ which(pHWaterLab$time == LR_WaterLab_AADateTime),] ) pHWaterLab$DataValue[pHWaterLab$DataValue==-9999.00] <- NA ##Water Lab Specific Conductivity## SpConWaterLab<- GetValues(Loganserver, siteCode="iutah:LR_WaterLab_AA", variableCode="iutah:SpCond", startDate = LRStartDate, endDate = LREndDate, qcID = "1" ) SpConWaterLab = subset(SpConWaterLab[ which(SpConWaterLab$time == LR_WaterLab_AADateTime),] ) SpConWaterLab$DataValue[SpConWaterLab$DataValue==-9999.00] <- NA ##Water Lab Dissolved Oxygen## ODOWaterLab<- GetValues(Loganserver, siteCode="iutah:LR_WaterLab_AA", variableCode="iutah:ODO", startDate = LRStartDate, endDate = LREndDate, qcID = "1" ) ODOWaterLab = subset(ODOWaterLab[ which(ODOWaterLab$time == LR_WaterLab_AADateTime),] ) ODOWaterLab$DataValue[ODOWaterLab$DataValue==-9999.00] <- NA ##Water Lab Specific Tubidity## TurbWaterLab<- GetValues(Loganserver, siteCode="iutah:LR_WaterLab_AA", variableCode="iutah:TurbAvg", startDate = LRStartDate, endDate = LREndDate, qcID = "1" ) TurbWaterLab = subset(TurbWaterLab[ which(TurbWaterLab$time == LR_WaterLab_AADateTime),] ) TurbWaterLab$DataValue[TurbWaterLab$DataValue==-9999.00] <- NA ##Water Lab fDOM## fDOMWaterLab<- GetValues(Loganserver, siteCode="iutah:LR_WaterLab_AA", variableCode="iutah:fDOM", startDate = LRStartDate, endDate = LREndDate, qcID = "1" ) fDOMWaterLab = subset(fDOMWaterLab[ which(fDOMWaterLab$time == LR_WaterLab_AADateTime),] ) fDOMWaterLab$DataValue[fDOMWaterLab$DataValue==-9999.00] <- NA ##Water Lab Chla## chlaWaterLab<- GetValues(Loganserver, siteCode="iutah:LR_WaterLab_AA", variableCode="iutah:chlorophyll", startDate = LRStartDate, endDate = LREndDate, qcID = "1" ) chlaWaterLab = subset(chlaWaterLab[ which(chlaWaterLab$time == LR_WaterLab_AADateTime),] ) chlaWaterLab$DataValue[chlaWaterLab$DataValue==-9999.00] <- NA ##Water Lab Blue Green Algae (BGA)## BGAWaterLab<- GetValues(Loganserver, siteCode="iutah:LR_WaterLab_AA", variableCode="iutah:BGA", startDate = LRStartDate, endDate = LREndDate, qcID = "1" ) BGAWaterLab = subset(BGAWaterLab[ which(BGAWaterLab$time == LR_WaterLab_AADateTime),] ) BGAWaterLab$DataValue[BGAWaterLab$DataValue==-9999.00] <- NA # ##Water Lab Nitrate## # NitrateWaterLab<- GetValues(Loganserver, siteCode="iutah:LR_WaterLab_AA", variableCode="iutah:Nitrate-N", startDate = LRStartDate, endDate = LREndDate, qcID = "1" ) # NitrateWaterLab = subset(NitrateWaterLab[ which(NitrateWaterLab$time == LR_WaterLab_AADateTime),] ) # NitrateWaterLab$DataValue[NitrateWaterLab$DataValue==-9999.00] <- NA ## Water Lab Gage height ## GageWaterLab<- GetValues(Loganserver, siteCode="iutah:LR_WaterLab_AA", variableCode="iutah:Stage", startDate = LRStartDate, endDate = LREndDate, qcID = "1" ) GageWaterLab = subset(GageWaterLab[ which(GageWaterLab$time == LR_WaterLab_AADateTime),] ) GageWaterLab$DataValue[GageWaterLab$DataValue==-9999.00] <- NA ##### Main Street #### ##Main Street Water Temp## #exo_Temp EXWTempMainSt<- GetValues(Loganserver, siteCode="iutah:LR_MainStreet_BA", variableCode="iutah:WaterTemp_EXO", startDate = LRStartDate, endDate = LREndDate, qcID = "1" ) EXWTempMainSt = subset(EXWTempMainSt[ which(EXWTempMainSt$time == LR_MainStreet_BADateTime),] ) EXWTempMainSt$DataValue[EXWTempMainSt$DataValue==-9999.00] <- NA #Turbidity_Temp TWTempMainSt<- GetValues(Loganserver, siteCode="iutah:LR_MainStreet_BA", variableCode="iutah:WaterTemp_Turb", startDate = LRStartDate, endDate = LREndDate, qcID = "1" ) TWTempMainSt = subset(TWTempMainSt[ which(TWTempMainSt$time == LR_MainStreet_BADateTime),] ) TWTempMainSt$DataValue[TWTempMainSt$DataValue==-9999.00] <- NA ##Main Street pH## pHMainSt<- GetValues(Loganserver, siteCode="iutah:LR_MainStreet_BA", variableCode="iutah:pH", startDate = LRStartDate, endDate = LREndDate, qcID = "1" ) pHMainSt = subset(pHMainSt[ which(pHMainSt$time == LR_MainStreet_BADateTime),] ) pHMainSt$DataValue[pHMainSt$DataValue==-9999.00] <- NA ##Main St Specific Conductivity## SpConMainSt<- GetValues(Loganserver, siteCode="iutah:LR_MainStreet_BA", variableCode="iutah:SpCond", startDate = LRStartDate, endDate = LREndDate, qcID = "1" ) SpConMainSt = subset(SpConMainSt[ which(SpConMainSt$time == LR_MainStreet_BADateTime),] ) SpConMainSt$DataValue[SpConMainSt$DataValue==-9999.00] <- NA ##Main St Dissolved Oxygen## ODOMainSt<- GetValues(Loganserver, siteCode="iutah:LR_MainStreet_BA", variableCode="iutah:ODO", startDate = LRStartDate, endDate = LREndDate, qcID = "1" ) ODOMainSt = subset(ODOMainSt[ which(ODOMainSt$time == LR_MainStreet_BADateTime),] ) ODOMainSt$DataValue[ODOMainSt$DataValue==-9999.00] <- NA ##Main St Specific Turbidity## TurbMainSt<- GetValues(Loganserver, siteCode="iutah:LR_MainStreet_BA", variableCode="iutah:TurbAvg", startDate = LRStartDate, endDate = LREndDate, qcID = "1" ) TurbMainSt = subset(TurbMainSt[ which(TurbMainSt$time == LR_MainStreet_BADateTime),] ) TurbMainSt$DataValue[TurbMainSt$DataValue==-9999.00] <- NA ##Main Street Gage height## GageMainSt<- GetValues(Loganserver, siteCode="iutah:LR_MainStreet_BA", variableCode="iutah:Stage", startDate = LRStartDate, endDate = LREndDate, qcID = "1" ) GageMainSt = subset(GageMainSt[ which(GageMainSt$time == LR_MainStreet_BADateTime),] ) GageMainSt$DataValue[GageMainSt$DataValue==-9999.00] <- NA ##### Mendon Rd advanced #### ##Mendon Rd Water Temp## #exo_Temp EXWTempMendon<- GetValues(Loganserver, siteCode="iutah:LR_Mendon_AA", variableCode="iutah:WaterTemp_EXO", startDate = LRStartDate, endDate = LREndDate, qcID = "0" ) EXWTempMendon = subset(EXWTempMendon[ which(EXWTempMendon$time == LR_Mendon_AADateTime),] ) EXWTempMendon$DataValue[EXWTempMendon$DataValue==-9999.00] <- NA #Turbidity_Temp TWTempMendon<- GetValues(Loganserver, siteCode="iutah:LR_Mendon_AA", variableCode="iutah:WaterTemp_Turb", startDate = LRStartDate, endDate = LREndDate, qcID = "0" ) TWTempMendon = subset(TWTempMendon[ which(TWTempMendon$time == LR_Mendon_AADateTime),] ) TWTempMendon$DataValue[TWTempMendon$DataValue==-9999.00] <- NA ##Mendon Rd pH## pHMendon<- GetValues(Loganserver, siteCode="iutah:LR_Mendon_AA", variableCode="iutah:pH", startDate = LRStartDate, endDate = LREndDate, qcID = "0" ) pHMendon = subset(pHMendon[ which(pHMendon$time == LR_Mendon_AADateTime),] ) pHMendon$DataValue[pHMendon$DataValue==-9999.00] <- NA ##Mendon Rd Specific Conductivity## SpConMendon<- GetValues(Loganserver, siteCode="iutah:LR_Mendon_AA", variableCode="iutah:SpCond", startDate = LRStartDate, endDate = LREndDate, qcID = "0" ) SpConMendon = subset(SpConMendon[ which(SpConMendon$time == LR_Mendon_AADateTime),] ) SpConMendon$DataValue[SpConMendon$DataValue==-9999.00] <- NA ##Mendon Rd Dissolved Oxygen## ODOMendon<- GetValues(Loganserver, siteCode="iutah:LR_Mendon_AA", variableCode="iutah:ODO", startDate = LRStartDate, endDate = LREndDate, qcID = "0" ) ODOMendon = subset(ODOMendon[ which(ODOMendon$time == LR_Mendon_AADateTime),] ) ODOMendon$DataValue[ODOMendon$DataValue==-9999.00] <- NA ##Mendon Rd Turbidity## TurbMendon<- GetValues(Loganserver, siteCode="iutah:LR_Mendon_AA", variableCode="iutah:TurbAvg", startDate = LRStartDate, endDate = LREndDate, qcID = "0" ) TurbMendon = subset(TurbMendon[ which(TurbMendon$time == LR_Mendon_AADateTime),] ) TurbMendon$DataValue[TurbMendon$DataValue==-9999.00] <- NA ##Mendon Rd fDOM## fDOMMendon<- GetValues(Loganserver, siteCode="iutah:LR_Mendon_AA", variableCode="iutah:fDOM", startDate = LRStartDate, endDate = LREndDate, qcID = "0" ) fDOMMendon = subset(fDOMMendon[ which(fDOMMendon$time == LR_Mendon_AADateTime),] ) fDOMMendon$DataValue[fDOMMendon$DataValue==-9999.00] <- NA ##Mendon Rd Chla## chlaMendon<- GetValues(Loganserver, siteCode="iutah:LR_Mendon_AA", variableCode="iutah:chlorophyll", startDate = LRStartDate, endDate = LREndDate, qcID = "1" ) chlaMendon = subset(chlaMendon[ which(chlaMendon$time == LR_Mendon_AADateTime),] ) chlaMendon$DataValue[chlaMendon$DataValue==-9999.00] <- NA ##Mendon Rd Blue Green Algea (BGA)## BGAMendon<- GetValues(Loganserver, siteCode="iutah:LR_Mendon_AA", variableCode="iutah:BGA", startDate = LRStartDate, endDate = LREndDate, qcID = "1" ) BGAMendon = subset(BGAMendon[ which(BGAMendon$time == LR_Mendon_AADateTime),] ) BGAMendon$DataValue[BGAMendon$DataValue==-9999.00] <- NA # ##Mendon Rd Nitrate## # NitrateMendon<- GetValues(Loganserver, siteCode="iutah:LR_Mendon_AA", variableCode="iutah:Nitrate-N", startDate = LRStartDate, endDate = LREndDate, qcID = "1" ) # NitrateMendon = subset(NitrateMendon[ which(NitrateMendon$time == LR_Mendon_AADateTime),] ) # NitrateMendon$DataValue[NitrateMendon$DataValue==-9999.00] <- NA ##Mendon Rd Gage height## GageMendon<- GetValues(Loganserver, siteCode="iutah:LR_Mendon_AA", variableCode="iutah:Stage", startDate = LRStartDate, endDate = LREndDate, qcID = "1" ) GageMendon = subset(GageMendon[ which(GageMendon$time == LR_Mendon_AADateTime),] ) GageMendon$DataValue[GageMendon$DataValue==-9999.00] <- NA ##### Logan Spreadsheet ##### Franklin24NovGrabSample <- data.frame(DateTime = EXWTempFranklin$time, Site = "LR_FB_BA", Temp = EXWTempFranklin$DataValue, pH = pHFranklin$DataValue, SpCon = SpConFranklin$DataValue, ODO = ODOFranklin$DataValue, Turbidity = TurbFranklin$DataValue, Gage = GageFranklin$DataValue) Franklin24NovGrabSample TonyGrove24NovGrabSample <- data.frame(DateTime = EXWTempTonyGrove$time, Site = "LR_TG_BA", Temp = EXWTempTonyGrove$DataValue, pH = pHTonyGrove$DataValue, SpCon = SpConTonyGrove$DataValue, ODO = ODOTonyGrove$DataValue, Turbidity = TurbTonyGrove$DataValue, Gage = GageTonyGrove$DataValue) TonyGrove24NovGrabSample WaterLab24NovGrabSample <- data.frame(DateTime = EXWTempWaterLab$time, Site = "LR_WaterLab_AA", Temp = EXWTempWaterLab$DataValue, pH = pHWaterLab$DataValue, SpCon = SpConWaterLab$DataValue, ODO = ODOWaterLab$DataValue, Turbidity = TurbWaterLab$DataValue, Gage = GageWaterLab$DataValue, BGA = BGAWaterLab$DataValue, chla = chlaWaterLab$DataValue, fDOM = fDOMWaterLab$DataValue) #Nitrate = NitrateWaterLab$DataValue) WaterLab24NovGrabSample MainSt24NovGrabSample <- data.frame(DateTime = EXWTempMainSt$time, Site = "LR_MainStreet_BA", Temp = EXWTempMainSt$DataValue, pH = pHMainSt$DataValue, SpCon = SpConMainSt$DataValue, ODO = ODOMainSt$DataValue, Turbidity = TurbMainSt$DataValue, Gage = GageMainSt$DataValue) MainSt24NovGrabSample Mendon24NovGrabSample <- data.frame(DateTime = EXWTempMendon$time, Site = "LR_Mendon_AA", Temp = EXWTempMendon$DataValue, pH = pHMendon$DataValue, SpCon = SpConMendon$DataValue, ODO = ODOMendon$DataValue, Turbidity = TurbMendon$DataValue, Gage = GageMendon$DataValue, BGA = BGAMendon$DataValue, chla = chlaMendon$DataValue, fDOM = fDOMMendon$DataValue) #Nitrate = NitrateMendon$DataValue) Mendon24NovGrabSample Logan24Nov<-rbind.fill(Franklin24NovGrabSample, TonyGrove24NovGrabSample, WaterLab24NovGrabSample, MainSt24NovGrabSample, Mendon24NovGrabSample ) Logan24Nov ##### Knowlton Fork #### ##Knowlton Fork Water Temp## #exo_Temp EXWTempKnowlton<- GetValues(RBserver, siteCode="iutah:RB_KF_BA", variableCode="iutah:WaterTemp_EXO", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) EXWTempKnowlton = subset(EXWTempKnowlton[ which(EXWTempKnowlton$time ==RB_KF_BADateTime),]) EXWTempKnowlton$DataValue[EXWTempKnowlton$DataValue==-9999.00] <- NA #Turbidity_Temp TWTempKnowlton<- GetValues(RBserver, siteCode="iutah:RB_KF_BA", variableCode="iutah:WaterTemp_Turb", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) TWTempKnowlton = subset(TWTempKnowlton[ which(TWTempKnowlton$time ==RB_KF_BADateTime),]) TWTempKnowlton$DataValue[TWTempKnowlton$DataValue==-9999.00] <- NA ##Knowlton Fork pH## pHKnowlton<- GetValues(RBserver, siteCode="iutah:RB_KF_BA", variableCode="iutah:pH", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) pHKnowlton = subset(pHKnowlton[ which(pHKnowlton$time ==RB_KF_BADateTime),]) pHKnowlton$DataValue[pHKnowlton$DataValue==-9999.00] <- NA ##Knowlton Fork Specific Conductivity## SpConKnowlton<- GetValues(RBserver, siteCode="iutah:RB_KF_BA", variableCode="iutah:SpCond", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) SpConKnowlton = subset(SpConKnowlton[ which(SpConKnowlton$time ==RB_KF_BADateTime),]) SpConKnowlton$DataValue[SpConKnowlton$DataValue==-9999.00] <- NA ##Knowlton Fork Dissolved Oxygen## ODOKnowlton<- GetValues(RBserver, siteCode="iutah:RB_KF_BA", variableCode="iutah:ODO", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) ODOKnowlton = subset(ODOKnowlton[ which(ODOKnowlton$time ==RB_KF_BADateTime),]) ODOKnowlton$DataValue[ODOKnowlton$DataValue==-9999.00] <- NA ##Knowlton Fork Specific Turbidity## TurbKnowlton<- GetValues(RBserver, siteCode="iutah:RB_KF_BA", variableCode="iutah:TurbAvg", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) TurbKnowlton = subset(TurbKnowlton[ which(TurbKnowlton$time ==RB_KF_BADateTime),]) TurbKnowlton$DataValue[TurbKnowlton$DataValue==-9999.00] <- NA ##Knowlton Fork Gage## GageKnowlton<- GetValues(RBserver, siteCode="iutah:RB_KF_BA", variableCode="iutah:Stage", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) GageKnowlton = subset(GageKnowlton[ which(GageKnowlton$time ==RB_KF_BADateTime),]) GageKnowlton$DataValue[GageKnowlton$DataValue==-9999.00] <- NA ##### Above Red Butte Reservoir advanced #### ##Above RB Res Water Temp## #exo_Temp EXWTempARBR<- GetValues(RBserver, siteCode="iutah:RB_ARBR_AA", variableCode="iutah:WaterTemp_EXO", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) EXWTempARBR = subset(EXWTempARBR[ which(EXWTempARBR$time ==RB_ARBR_AADateTime),]) EXWTempARBR$DataValue[EXWTempARBR$DataValue==-9999.00] <- NA #Turbidity_Temp TWTempARBR<- GetValues(RBserver, siteCode="iutah:RB_ARBR_AA", variableCode="iutah:WaterTemp_Turb", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) TWTempARBR = subset(TWTempARBR[ which(TWTempARBR$time ==RB_ARBR_AADateTime),]) TWTempARBR$DataValue[TWTempARBR$DataValue==-9999.00] <- NA ##Above RB Res pH## pHARBR<- GetValues(RBserver, siteCode="iutah:RB_ARBR_AA", variableCode="iutah:pH", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) pHARBR = subset(pHARBR[ which(pHARBR$time ==RB_ARBR_AADateTime),]) pHARBR$DataValue[pHARBR$DataValue==-9999.00] <- NA ##Above RB Res Specific Conductivity## SpConARBR<- GetValues(RBserver, siteCode="iutah:RB_ARBR_AA", variableCode="iutah:SpCond", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) SpConARBR = subset(SpConARBR[ which(SpConARBR$time ==RB_ARBR_AADateTime),]) SpConARBR$DataValue[SpConARBR$DataValue==-9999.00] <- NA ##Above RB Res Dissolved Oxygen## ODOARBR<- GetValues(RBserver, siteCode="iutah:RB_ARBR_AA", variableCode="iutah:ODO", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) ODOARBR = subset(ODOARBR[ which(ODOARBR$time ==RB_ARBR_AADateTime),]) ODOARBR$DataValue[ODOARBR$DataValue==-9999.00] <- NA ##Above RB Res Turbidity## TurbARBR<- GetValues(RBserver, siteCode="iutah:RB_ARBR_AA", variableCode="iutah:TurbAvg", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) TurbARBR = subset(TurbARBR[ which(TurbARBR$time ==RB_ARBR_AADateTime),]) TurbARBR$DataValue[TurbARBR$DataValue==-9999.00] <- NA ##Above RB Res fDOM## fDOMARBR<- GetValues(RBserver, siteCode="iutah:RB_ARBR_AA", variableCode="iutah:fDOM", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) fDOMARBR = subset(fDOMARBR[ which(fDOMARBR$time ==RB_ARBR_AADateTime),]) fDOMARBR$DataValue[fDOMARBR$DataValue==-9999.00] <- NA ##Above Red Butte Res Chla## chlaARBR<- GetValues(RBserver, siteCode="iutah:RB_ARBR_AA", variableCode="iutah:chlorophyll", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) chlaARBR = subset(chlaARBR[ which(chlaARBR$time ==RB_ARBR_AADateTime),]) chlaARBR$DataValue[chlaARBR$DataValue==-9999.00] <- NA ##Above RB Res Blue Green Algea (BGA)## BGAARBR<- GetValues(RBserver, siteCode="iutah:RB_ARBR_AA", variableCode="iutah:BGA", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) BGAARBR = subset(BGAARBR[ which(BGAARBR$time ==RB_ARBR_AADateTime),]) BGAARBR$DataValue[BGAARBR$DataValue==-9999.00] <- NA # ##Above RB Res Nitrate## # NitrateARBR<- GetValues(RBserver, siteCode="iutah:RB_ARBR_AA", variableCode="iutah:Nitrate-N", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) # NitrateARBR = subset(NitrateARBR[ which(NitrateARBR$time ==RB_ARBR_AADateTime),]) # NitrateARBR$DataValue[NitrateARBR$DataValue==-9999.00] <- NA ## Above Red Butte Res Gage height ## GageARBR<- GetValues(RBserver, siteCode="iutah:RB_ARBR_USGS", variableCode="iutah:USGSStage", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) GageARBR = subset(GageARBR[ which(GageARBR$time ==RB_ARBR_AADateTime),]) GageARBR$DataValue[GageARBR$DataValue==-9999.00] <- NA ##### Red Butte Gate #### ##Red Butte Gate Water Temperature## #exo_Temp EXWTempRBG<- GetValues(RBserver, siteCode="iutah:RB_RBG_BA", variableCode="iutah:WaterTemp_EXO", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) EXWTempRBG = subset(EXWTempRBG[which(EXWTempRBG$time == RB_RBG_BADateTime),]) EXWTempRBG$DataValue[EXWTempRBG$DataValue==-9999.00] <- NA #Converts -9999.0 to NA #Turbidity_Temp TWTempRBG<- GetValues(RBserver, siteCode="iutah:RB_RBG_BA", variableCode="iutah:WaterTemp_Turb", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) TWTempRBG = subset(TWTempRBG[which(TWTempRBG$time == RB_RBG_BADateTime),]) TWTempRBG$DataValue[TWTempRBG$DataValue==-9999.00] <- NA ##Red Butte Gate pH## pHRBG<- GetValues(RBserver, siteCode="iutah:RB_RBG_BA", variableCode="iutah:pH", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) pHRBG = subset(pHRBG[which(pHRBG$time == RB_RBG_BADateTime),]) pHRBG$DataValue[pHRBG$DataValue==-9999.00] <- NA ##Red Butte Gate Specific Conductivity## SpConRBG<- GetValues(RBserver, siteCode="iutah:RB_RBG_BA", variableCode="iutah:SpCond", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) SpConRBG = subset(SpConRBG[which(SpConRBG$time == RB_RBG_BADateTime),]) SpConRBG$DataValue[SpConRBG$DataValue==-9999.00] <- NA ##Red Butte Gate Dissolved Oxygen## ODORBG<- GetValues(RBserver, siteCode="iutah:RB_RBG_BA", variableCode="iutah:ODO", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) ODORBG = subset(ODORBG[which(ODORBG$time == RB_RBG_BADateTime),]) ODORBG$DataValue[ODORBG$DataValue==-9999.00] <- NA ##Red Butte Gate Specific Tubidity## TurbRBG<- GetValues(RBserver, siteCode="iutah:RB_RBG_BA", variableCode="iutah:TurbAvg", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) TurbRBG = subset(TurbRBG[which(TurbRBG$time == RB_RBG_BADateTime),]) TurbRBG$DataValue[TurbRBG$DataValue==-9999.00] <- NA ##Red Butte Gate Gage## GageRBG<- GetValues(RBserver, siteCode="iutah:RB_RBG_BA", variableCode="iutah:Stage", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) GageRBG = subset(GageRBG[which(GageRBG$time == RB_RBG_BADateTime),]) GageRBG$DataValue[GageRBG$DataValue==-9999.00] <- NA ##### Cottams Grove #### ##Cottams Grove Water Temp## #exo_Temp EXWTempCGrove<- GetValues(RBserver, siteCode="iutah:RB_CG_BA", variableCode="iutah:WaterTemp_EXO", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) EXWTempCGrove= subset(EXWTempCGrove[which(EXWTempCGrove$time == RB_CG_BADateTime),]) EXWTempCGrove$DataValue[EXWTempCGrove$DataValue==-9999.00] <- NA #Turbidity_Temp TWTempCGrove<- GetValues(RBserver, siteCode="iutah:RB_CG_BA", variableCode="iutah:WaterTemp_Turb", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) TWTempCGrove= subset(TWTempCGrove[which(TWTempCGrove$time == RB_CG_BADateTime),]) TWTempCGrove$DataValue[TWTempCGrove$DataValue==-9999.00] <- NA ##Cottams Grove pH## pHCGrove<- GetValues(RBserver, siteCode="iutah:RB_CG_BA", variableCode="iutah:pH", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) pHCGrove= subset(pHCGrove[which(pHCGrove$time == RB_CG_BADateTime),]) pHCGrove$DataValue[pHCGrove$DataValue==-9999.00] <- NA ##Cottams Grove Specific Conductivity## SpConCGrove<- GetValues(RBserver, siteCode="iutah:RB_CG_BA", variableCode="iutah:SpCond", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) SpConCGrove= subset(SpConCGrove[which(SpConCGrove$time == RB_CG_BADateTime),]) SpConCGrove$DataValue[SpConCGrove$DataValue==-9999.00] <- NA ##Cottams Grove Dissolved Oxygen## ODOCGrove<- GetValues(RBserver, siteCode="iutah:RB_CG_BA", variableCode="iutah:ODO", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) ODOCGrove= subset(ODOCGrove[which(ODOCGrove$time == RB_CG_BADateTime),]) ODOCGrove$DataValue[ODOCGrove$DataValue==-9999.00] <- NA ##Cottams Grove Specific Turbidity## TurbCGrove<- GetValues(RBserver, siteCode="iutah:RB_CG_BA", variableCode="iutah:TurbAvg", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) TurbCGrove= subset(TurbCGrove[which(TurbCGrove$time == RB_CG_BADateTime),]) TurbCGrove$DataValue[TurbCGrove$DataValue==-9999.00] <- NA ##Cottam Grove Gage height## GageCGrove<- GetValues(RBserver, siteCode="iutah:RB_CG_BA", variableCode="iutah:Stage", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) GageCGrove= subset(GageCGrove[which(GageCGrove$time == RB_CG_BADateTime),]) GageCGrove$DataValue[GageCGrove$DataValue==-9999.00] <- NA ##### Foothill Drive advanced #### ##Foothill Drive Water Temp## #exo_Temp EXWTempFoothill<- GetValues(RBserver, siteCode="iutah:RB_FD_AA", variableCode="iutah:WaterTemp_EXO", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) EXWTempFoothill= subset(EXWTempFoothill[which(EXWTempFoothill$time == RB_FD_AADateTime),]) EXWTempFoothill$DataValue[EXWTempFoothill$DataValue==-9999.00] <- NA #Turbidity_Temp TWTempFoothill<- GetValues(RBserver, siteCode="iutah:RB_FD_AA", variableCode="iutah:WaterTemp_Turb", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) TWTempFoothill= subset(TWTempFoothill[which(TWTempFoothill$time == RB_FD_AADateTime),]) TWTempFoothill$DataValue[TWTempFoothill$DataValue==-9999.00] <- NA ##Foothill Drive pH## pHFoothill<- GetValues(RBserver, siteCode="iutah:RB_FD_AA", variableCode="iutah:pH", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) pHFoothill= subset(pHFoothill[which(pHFoothill$time == RB_FD_AADateTime),]) pHFoothill$DataValue[pHFoothill$DataValue==-9999.00] <- NA ##Foothill Drive Specific Conductivity## SpConFoothill<- GetValues(RBserver, siteCode="iutah:RB_FD_AA", variableCode="iutah:SpCond", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) SpConFoothill= subset(SpConFoothill[which(SpConFoothill$time == RB_FD_AADateTime),]) SpConFoothill$DataValue[SpConFoothill$DataValue==-9999.00] <- NA ##Foothill Drive Dissolved Oxygen## ODOFoothill<- GetValues(RBserver, siteCode="iutah:RB_FD_AA", variableCode="iutah:ODO", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) ODOFoothill= subset(ODOFoothill[which(ODOFoothill$time == RB_FD_AADateTime),]) ODOFoothill$DataValue[ODOFoothill$DataValue==-9999.00] <- NA ##Foothill Drive Specific Turbidity## TurbFoothill<- GetValues(RBserver, siteCode="iutah:RB_FD_AA", variableCode="iutah:TurbAvg", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) TurbFoothill= subset(TurbFoothill[which(TurbFoothill$time == RB_FD_AADateTime),]) TurbFoothill$DataValue[TurbFoothill$DataValue==-9999.00] <- NA ##Foothill Drive fDOM## fDOMFoothill<- GetValues(RBserver, siteCode="iutah:RB_FD_AA", variableCode="iutah:fDOM", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) fDOMFoothill= subset(fDOMFoothill[which(fDOMFoothill$time == RB_FD_AADateTime),]) fDOMFoothill$DataValue[fDOMFoothill$DataValue==-9999.00] <- NA ##Foothill Drive Chla## chlaFoothill<- GetValues(RBserver, siteCode="iutah:RB_FD_AA", variableCode="iutah:chlorophyll", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) chlaFoothill= subset(chlaFoothill[which(chlaFoothill$time == RB_FD_AADateTime),]) chlaFoothill$DataValue[chlaFoothill$DataValue==-9999.00] <- NA ##Foothill Drive Blue Green Algae (BGA)## BGAFoothill<- GetValues(RBserver, siteCode="iutah:RB_FD_AA", variableCode="iutah:BGA", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) BGAFoothill= subset(BGAFoothill[which(BGAFoothill$time == RB_FD_AADateTime),]) BGAFoothill$DataValue[BGAFoothill$DataValue==-9999.00] <- NA # ##Foothill Drive Nitrate## # NitrateFoothill<- GetValues(RBserver, siteCode="iutah:RB_FD_AA", variableCode="iutah:Nitrate-N", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) # NitrateFoothill= subset(NitrateFoothill[which(NitrateFoothill$time == RB_FD_AADateTime),]) # NitrateFoothill$DataValue[NitrateFoothill$DataValue==-9999.00] <- NA ##Foothill Drive Gage height## GageFoothill<- GetValues(RBserver, siteCode="iutah:RB_FD_AA", variableCode="iutah:Stage", startDate = RBStartDate, endDate = RBEndDate, qcID = "1" ) GageFoothill= subset(GageFoothill[which(GageFoothill$time == RB_FD_AADateTime),]) GageFoothill$DataValue[GageFoothill$DataValue==-9999.00] <- NA ##### Red Butte Spreadsheet ##### Knowlton18NovGrabSample <- data.frame(DateTime = EXWTempKnowlton$time, Site = "RB_KF_BA", Temp = EXWTempKnowlton$DataValue, pH = pHKnowlton$DataValue, SpCon = SpConKnowlton$DataValue, ODO = ODOKnowlton$DataValue, Turbidity = TurbKnowlton$DataValue, Gage = GageKnowlton$DataValue) Knowlton18NovGrabSample ARBR18NovGrabSample <- data.frame(DateTime = EXWTempARBR$time, Site = "RB_ARBR_AA", Temp = EXWTempARBR$DataValue, pH = pHARBR$DataValue, SpCon = SpConARBR$DataValue, ODO = ODOARBR$DataValue, Turbidity = TurbARBR$DataValue, #Gage = GageARBR$DataValue, BGA = BGAARBR$DataValue, chla = chlaARBR$DataValue, fDOM = fDOMARBR$DataValue) #Nitrate = NitrateARBR$DataValue) ARBR18NovGrabSample RBG18NovGrabSample <- data.frame(DateTime = EXWTempRBG$time, Site = "RB_RBG_BA", Temp = EXWTempRBG$DataValue, pH = pHRBG$DataValue, SpCon = SpConRBG$DataValue, ODO = ODORBG$DataValue, Turbidity = TurbRBG$DataValue, Gage = GageRBG$DataValue) RBG18NovGrabSample CGrove18NovGrabSample <- data.frame(DateTime = EXWTempCGrove$time, Site = "RB_CG_BA", Temp = EXWTempCGrove$DataValue, pH = pHCGrove$DataValue, SpCon = SpConCGrove$DataValue, ODO = ODOCGrove$DataValue, Turbidity = TurbCGrove$DataValue, Gage = GageCGrove$DataValue) CGrove18NovGrabSample Foothill18NovGrabSample <- data.frame(DateTime = EXWTempFoothill$time, Site = "RB_FD_AA", Temp = EXWTempFoothill$DataValue, pH = pHFoothill$DataValue, SpCon = SpConFoothill$DataValue, ODO = ODOFoothill$DataValue, Turbidity = TurbFoothill$DataValue, Gage = GageFoothill$DataValue, BGA = BGAFoothill$DataValue, chla = chlaFoothill$DataValue, fDOM = fDOMFoothill$DataValue) #Nitrate = NitrateFoothill$DataValue) Foothill18NovGrabSample RedButte18Nov<- rbind.fill(Knowlton18NovGrabSample, ARBR18NovGrabSample, RBG18NovGrabSample, CGrove18NovGrabSample, Foothill18NovGrabSample) ##### Soapstone ##### ##Soapstone Water Temp #exo_Temp EXWTempST<- GetValues(PRserver, siteCode="iutah:PR_ST_BA", variableCode="iutah:WaterTemp_EXO", startDate = PRStartDate, endDate = PREndDate, qcID = "1" ) EXWTempST= subset(EXWTempST[which(EXWTempST$time == PR_ST_BADateTime),]) EXWTempST$DataValue[EXWTempST$DataValue==-9999.00] <- NA #Turbidity_Temp TWTempST<- GetValues(PRserver, siteCode="iutah:PR_ST_BA", variableCode="iutah:WaterTemp_Turb", startDate = PRStartDate, endDate = PREndDate, qcID = "1" ) TWTempST= subset(TWTempST[which(TWTempST$time == PR_ST_BADateTime),]) TWTempST$DataValue[TWTempST$DataValue==-9999.00] <- NA ##Soapstone pH## pHST<- GetValues(PRserver, siteCode="iutah:PR_ST_BA", variableCode="iutah:pH", startDate = PRStartDate, endDate = PREndDate, qcID = "1" ) pHST= subset(pHST[which(pHST$time == PR_ST_BADateTime),]) pHST$DataValue[pHST$DataValue==-9999.00] <- NA ##Soapstone Specific Conductivity## SpConST<- GetValues(PRserver, siteCode="iutah:PR_ST_BA", variableCode="iutah:SpCond", startDate = PRStartDate, endDate = PREndDate, qcID = "1" ) SpConST= subset(SpConST[which(SpConST$time == PR_ST_BADateTime),]) SpConST$DataValue[SpConST$DataValue==-9999.00] <- NA ##Soapstone Dissolved Oxygen## ODOST<- GetValues(PRserver, siteCode="iutah:PR_ST_BA", variableCode="iutah:ODO", startDate = PRStartDate, endDate = PREndDate, qcID = "1" ) ODOST= subset(ODOST[which(ODOST$time == PR_ST_BADateTime),]) ODOST$DataValue[ODOST$DataValue==-9999.00] <- NA ##Soapstone Specific Turbidity## TurbST<- GetValues(PRserver, siteCode="iutah:PR_ST_BA", variableCode="iutah:TurbAvg", startDate = PRStartDate, endDate = PREndDate, qcID = "1" ) TurbST= subset(TurbST[which(TurbST$time == PR_ST_BADateTime),]) TurbST$DataValue[TurbST$DataValue==-9999.00] <- NA ##Soapstone Stage## StageST<- GetValues(PRserver, siteCode="iutah:PR_ST_BA", variableCode="iutah:Stage", startDate = PRStartDate, endDate = PREndDate, qcID = "1" ) StageST= subset(StageST[which(StageST$time == PR_ST_BADateTime),]) StageST$DataValue[StageST$DataValue==-9999.00] <- NA ##### Below Jordanelle advanced ######### ##Below Jordanelle Temp #exo_Temp EXWTempBJ<- GetValues(PRserver, siteCode="iutah:PR_BJ_AA", variableCode="iutah:WaterTemp_EXO", startDate = PRStartDate, endDate = PREndDate, methodID= "99", qcID = "1" ) EXWTempBJ= subset(EXWTempBJ[which(EXWTempBJ$time == PR_BJ_AADateTime),]) EXWTempBJ$DataValue[EXWTempBJ$DataValue==-9999.00] <- NA #Turbidity_Temp TWTempBJ<- GetValues(PRserver, siteCode="iutah:PR_BJ_AA", variableCode="iutah:WaterTemp_Turb", startDate = PRStartDate, endDate = PREndDate, qcID = "1" ) TWTempBJ= subset(TWTempBJ[which(TWTempBJ$time == PR_BJ_AADateTime),]) TWTempBJ$DataValue[TWTempBJ$DataValue==-9999.00] <- NA ## Below Jordanelle pH ## pHBJ<- GetValues(PRserver, siteCode="iutah:PR_BJ_AA", variableCode="iutah:pH", startDate = PRStartDate, endDate = PREndDate, qcID = "1" ) pHBJ= subset(pHBJ[which(pHBJ$time == PR_BJ_AADateTime),]) pHBJ$DataValue[pHBJ$DataValue==-9999.00] <- NA ##Below Jordanelle Specific Conductivity## SpConBJ<- GetValues(PRserver, siteCode="iutah:PR_BJ_AA", variableCode="iutah:SpCond", startDate = PRStartDate, endDate = PREndDate, qcID = "1" ) SpConBJ= subset(SpConBJ[which(SpConBJ$time == PR_BJ_AADateTime),]) SpConBJ$DataValue[SpConBJ$DataValue==-9999.00] <- NA ##Below Jordanelle Dissolved Oxygen## ODOBJ<- GetValues(PRserver, siteCode="iutah:PR_BJ_AA", variableCode="iutah:ODO", startDate = PRStartDate, endDate = PREndDate, qcID = "1" ) ODOBJ= subset(ODOBJ[which(ODOBJ$time == PR_BJ_AADateTime),]) ##Below Jordanelle Turbidity## TurbBJ<- GetValues(PRserver, siteCode="iutah:PR_BJ_AA", variableCode="iutah:TurbAvg", startDate = PRStartDate, endDate = PREndDate, qcID = "1" ) TurbBJ= subset(TurbBJ[which(TurbBJ$time == PR_BJ_AADateTime),]) TurbBJ$DataValue[TurbBJ$DataValue==-9999.00] <- NA ##Below Jordanelle fDOM## fDOMBJ<- GetValues(PRserver, siteCode="iutah:PR_BJ_AA", variableCode="iutah:fDOM", startDate = PRStartDate, endDate = PREndDate, qcID = "1" ) fDOMBJ= subset(fDOMBJ[which(fDOMBJ$time == PR_BJ_AADateTime),]) fDOMBJ$DataValue[fDOMBJ$DataValue==-9999.00] <- NA ##Below Jordanelle Chla## chlaBJ<- GetValues(PRserver, siteCode="iutah:PR_BJ_AA", variableCode="iutah:chlorophyll", startDate = PRStartDate, endDate = PREndDate, qcID = "1" ) chlaBJ= subset(chlaBJ[which(chlaBJ$time == PR_BJ_AADateTime),]) chlaBJ$DataValue[chlaBJ$DataValue==-9999.00] <- NA ##Below Jordanelle Blue Green Algea (BGA)## BGABJ<- GetValues(PRserver, siteCode="iutah:PR_BJ_AA", variableCode="iutah:BGA", startDate = PRStartDate, endDate = PREndDate, qcID = "1" ) BGABJ= subset(BGABJ[which(BGABJ$time == PR_BJ_AADateTime),]) BGABJ$DataValue[BGABJ$DataValue==-9999.00] <- NA # ##Below Jordanelle Nitrate## # NitrateBJ<- GetValues(PRserver, siteCode="iutah:PR_BJ_AA", variableCode="iutah:Nitrate-N", startDate = PRStartDate, endDate = PREndDate, qcID = "1" ) # NitrateBJ= subset(NitrateBJ[which(NitrateBJ$time == PR_BJ_AADateTime),]) # NitrateBJ$DataValue[NitrateBJ$DataValue==-9999.00] <- NA ##Below Jordanelle Discharge (Q)## QBJ<- GetValues(PRserver, siteCode="iutah:PR_BJ_CUWCD", variableCode="iutah:CUWCDDischarge", startDate = PRStartDate, endDate = PREndDate, qcID = "1" ) QBJ= subset(QBJ[which(QBJ$time == PR_BJ_AADateTime),]) QBJ$DataValue[QBJ$DataValue==-9999.00] <- NA ##### Lower Midway ####### ##Lower Midway Water Temp #exo_Temp EXWTempLM<- GetValues(PRserver, siteCode="iutah:PR_LM_BA", variableCode="iutah:WaterTemp_EXO", startDate = PRStartDate, endDate = PREndDate, qcID = "1" ) EXWTempLM= subset(EXWTempLM[which(EXWTempLM$time == PR_LM_BADateTime),]) EXWTempLM$DataValue[EXWTempLM$DataValue==-9999.00] <- NA #Converts -9999.0 to NA #Turbidity_Temp TWTempLM<- GetValues(PRserver, siteCode="iutah:PR_LM_BA", variableCode="iutah:WaterTemp_Turb", startDate = PRStartDate, endDate = PREndDate, qcID = "1" ) TWTempLM= subset(TWTempLM[which(TWTempLM$time == PR_LM_BADateTime),]) TWTempLM$DataValue[TWTempLM$DataValue==-9999.00] <- NA ##Lower Midway pH## pHLM<- GetValues(PRserver, siteCode="iutah:PR_LM_BA", variableCode="iutah:pH", startDate = PRStartDate, endDate = PREndDate, qcID = "1" ) pHLM= subset(pHLM[which(pHLM$time == PR_LM_BADateTime),]) pHLM$DataValue[pHLM$DataValue==-9999.00] <- NA ##Lower Midway Specific Conductivity## SpConLM<- GetValues(PRserver, siteCode="iutah:PR_LM_BA", variableCode="iutah:SpCond", startDate = PRStartDate, endDate = PREndDate, qcID = "1" ) SpConLM= subset(SpConLM[which(SpConLM$time == PR_LM_BADateTime),]) SpConLM$DataValue[SpConLM$DataValue==-9999.00] <- NA ##Lower Midway Dissolved Oxygen## ODOLM<- GetValues(PRserver, siteCode="iutah:PR_LM_BA", variableCode="iutah:ODO", startDate = PRStartDate, endDate = PREndDate, qcID = "1" ) ODOLM= subset(ODOLM[which(ODOLM$time == PR_LM_BADateTime),]) ODOLM$DataValue[ODOLM$DataValue==-9999.00] <- NA ##Lower Midway Specific Tubidity## TurbLM<- GetValues(PRserver, siteCode="iutah:PR_LM_BA", variableCode="iutah:TurbAvg", startDate = PRStartDate, endDate = PREndDate, qcID = "1" ) TurbLM= subset(TurbLM[which(TurbLM$time == PR_LM_BADateTime),]) TurbLM$DataValue[TurbLM$DataValue==-9999.00] <- NA ##Lower Midway Discharge (Q)## QLM<- GetValues(PRserver, siteCode="iutah:PR_uM_CUWCD", variableCode="iutah:CUWCDDischarge", startDate = PRStartDate, endDate = PREndDate, qcID = "1" ) QLM= subset(QLM[which(QLM$time == PR_LM_BADateTime),]) QLM$DataValue[QLM$DataValue==-9999.00] <- NA ##### Charleston advanced ###### ## Charleston Water Temp #exo_Temp EXWTempCH<- GetValues(PRserver, siteCode="iutah:PR_CH_AA", variableCode="iutah:WaterTemp_EXO", startDate = PRStartDate, endDate = PREndDate, qcID = "1" ) EXWTempCH= subset(EXWTempCH[which(EXWTempCH$time == PR_CH_AADateTime),]) EXWTempCH$DataValue[EXWTempCH$DataValue==-9999.00] <- NA #Turbidity_Temp TWTempCH<- GetValues(PRserver, siteCode="iutah:PR_CH_AA", variableCode="iutah:WaterTemp_Turb", startDate = PRStartDate, endDate = PREndDate, qcID = "1" ) TWTempCH= subset(TWTempCH[which(TWTempCH$time == PR_CH_AADateTime),]) TWTempCH$DataValue[TWTempCH$DataValue==-9999.00] <- NA ##Charleston pH## pHCH<- GetValues(PRserver, siteCode="iutah:PR_CH_AA", variableCode="iutah:pH", startDate = PRStartDate, endDate = PREndDate, qcID = "1" ) pHCH= subset(pHCH[which(pHCH$time == PR_CH_AADateTime),]) pHCH$DataValue[pHCH$DataValue==-9999.00] <- NA ##Charleston Specific Conductivity## SpConCH<- GetValues(PRserver, siteCode="iutah:PR_CH_AA", variableCode="iutah:SpCond", startDate = PRStartDate, endDate = PREndDate, qcID = "1" ) SpConCH= subset(SpConCH[which(SpConCH$time == PR_CH_AADateTime),]) SpConCH$DataValue[SpConCH$DataValue==-9999.00] <- NA ##Charleston Dissolved Oxygen## ODOCH<- GetValues(PRserver, siteCode="iutah:PR_CH_AA", variableCode="iutah:ODO", startDate = PRStartDate, endDate = PREndDate, qcID = "1" ) ODOCH= subset(ODOCH[which(ODOCH$time == PR_CH_AADateTime),]) ODOCH$DataValue[ODOCH$DataValue==-9999.00] <- NA ##Charleston Specific Turbidity## TurbCH<- GetValues(PRserver, siteCode="iutah:PR_CH_AA", variableCode="iutah:TurbAvg", startDate = PRStartDate, endDate = PREndDate, qcID = "1" ) TurbCH= subset(TurbCH[which(TurbCH$time == PR_CH_AADateTime),]) TurbCH$DataValue[TurbCH$DataValue==-9999.00] <- NA ##Charleston fDOM## fDOMCH<- GetValues(PRserver, siteCode="iutah:PR_CH_AA", variableCode="iutah:fDOM", startDate = PRStartDate, endDate = PREndDate, qcID = "1" ) fDOMCH= subset(fDOMCH[which(fDOMCH$time == PR_CH_AADateTime),]) fDOMCH$DataValue[fDOMCH$DataValue==-9999.00] <- NA ##Charleston Chla## chlaCH<- GetValues(PRserver, siteCode="iutah:PR_CH_AA", variableCode="iutah:chlorophyll", startDate = PRStartDate, endDate = PREndDate, qcID = "1" ) chlaCH= subset(chlaCH[which(chlaCH$time == PR_CH_AADateTime),]) chlaCH$DataValue[chlaCH$DataValue==-9999.00] <- NA ##Charleston Blue Green Algae (BGA)## BGACH<- GetValues(PRserver, siteCode="iutah:PR_CH_AA", variableCode="iutah:BGA", startDate = PRStartDate, endDate = PREndDate, qcID = "1" ) BGACH= subset(BGACH[which(BGACH$time == PR_CH_AADateTime),]) BGACH$DataValue[BGACH$DataValue==-9999.00] <- NA # ##Charleston Nitrate## # NitrateCH<- GetValues(PRserver, siteCode="iutah:PR_CH_AA", variableCode="iutah:Nitrate-N", startDate = PRStartDate, endDate = PREndDate, qcID = "1" ) # NitrateCH= subset(NitrateCH[which(NitrateCH$time == PR_CH_AADateTime),]) # NitrateCH$DataValue[NitrateCH$DataValue==-9999.00] <- NA ##Charleston Discharge (Q)## QCH<- GetValues(PRserver, siteCode="iutah:PR_CH_CUWCD", variableCode="iutah:CUWCDDischarge", startDate = PRStartDate, endDate = PREndDate, qcID = "1" ) QCH= subset(QCH[which(QCH$time == PR_CH_AADateTime),]) QCH$DataValue[QCH$DataValue==-9999.00] <- NA ##### Provo Spreadsheet ##### Soapstone12NovGrabSample <- data.frame(DateTime = EXWTempST$time, Site = "PR_ST_BA", Temp = EXWTempST$DataValue, pH = pHST$DataValue, SpCon = SpConST$DataValue, ODO = ODOST$DataValue, Turbidity = TurbST$DataValue, Gage = StageST$DataValue) Soapstone12NovGrabSample BelowJordanelle12NovGrabSample <- data.frame(DateTime = EXWTempBJ$time, Site = "PR_BJ_AA", Temp = EXWTempBJ$DataValue, pH = pHBJ$DataValue, SpCon = SpConBJ$DataValue, ODO = ODOBJ$DataValue, Turbidity = TurbBJ$DataValue, Gage = QBJ$DataValue, BGA = BGABJ$DataValue, chla = chlaBJ$DataValue, fDOM = fDOMBJ$DataValue) # Nitrate = NitrateBJ$DataValue) BelowJordanelle12NovGrabSample LowerMidway12NovGrabSample <- data.frame(DateTime = EXWTempLM$time, Site = "PR_LM_BA", Temp = EXWTempLM$DataValue, pH = pHLM$DataValue, SpCon = SpConLM$DataValue, ODO = ODOLM$DataValue, Turbidity = TurbLM$DataValue, Gage = QLM$DataValue) LowerMidway12NovGrabSample Charleston12NovGrabSample <- data.frame(DateTime = EXWTempCH$time, Site = "PR_CH_AA", Temp = EXWTempCH$DataValue, pH = pHCH$DataValue, SpCon = SpConCH$DataValue, ODO = ODOCH$DataValue, Turbidity = TurbCH$DataValue, Gage = QCH$DataValue, BGA = BGACH$DataValue, chla = chlaCH$DataValue, fDOM = fDOMCH$DataValue) #Nitrate = NitrateCH$DataValue) Charleston12NovGrabSample Provo12Nov <-rbind.fill(Soapstone12NovGrabSample, BelowJordanelle12NovGrabSample, LowerMidway12NovGrabSample, Charleston12NovGrabSample) ##### All combine and write #### GrabSamples24Nov <- rbind.fill(RedButte18Nov, Logan24Nov, Provo12Nov) write.csv(GrabSamples24Nov, file = "GrabSamples2" )
library(readr) library(dplyr) library(tidyr) library(usmap) library(maptools) library(ggplot2) library(stringr) seg_loc <- read_csv("SegregationLocation.csv") hcrimes <- read_csv("HateCrimes.csv") hcrimes$State <- as.factor(hcrimes$State) hcrimes$Agency <- as.factor(hcrimes$Agency) hcrimes$Agency_Type <- as.factor(hcrimes$Agency_Type) hcrimes <- hcrimes %>% mutate(City = as.character(Agency)) hcrimes <- hcrimes[,c(1:4,34,5:33)] for (j in 1:length(hcrimes$Agency)) { if(hcrimes$Agency_Type[j] == "University or College" | hcrimes$Agency_Type[j] == "State Police") { hcrimes$City[j] = hcrimes$Unit[j] } else if(hcrimes$Agency_Type[j] == "Other" | hcrimes$Agency_Type[j] == "Federal" | hcrimes$Agency_Type[j] == "Other State Agency"){ hcrimes$City[j] = str_trim(str_c(as.character(hcrimes$Agency[j]), hcrimes$Unit[j], sep = " ")) } } hcnas <- hcrimes[which(is.na(hcrimes$City)),] hcrimes$City <- as.factor(hcrimes$City) sum_hc <- hcrimes[which(!is.na(hcrimes$City)),] %>% group_by(St, City, Year) %>% summarise(RaceCrimes = sum(RaceBias), ReligionCrimes = sum(ReligionBias), SexCrimes = sum(SexBias), GenderCrimes = sum(GenderBias), DisabilityCrimes = sum(DisBias),) sum_hc <- sum_hc %>% pivot_wider(names_from = Year, values_from = c(RaceCrimes, ReligionCrimes, SexCrimes, GenderCrimes, DisabilityCrimes), values_fill = 0) sum_hc[is.na(sum_hc)] <- 0 sh <- seg_loc %>% left_join(sum_hc, by = c("City" = "City", "State" = "St")) %>% mutate(DisCrimesTotal = (`DisabilityCrimes_1991`+`DisabilityCrimes_1992`+`DisabilityCrimes_1993`+ `DisabilityCrimes_1994`+`DisabilityCrimes_1995`+`DisabilityCrimes_1996`+ `DisabilityCrimes_1997`+`DisabilityCrimes_1998`+`DisabilityCrimes_1999`+ `DisabilityCrimes_2000`+`DisabilityCrimes_2001`+`DisabilityCrimes_2002`+ `DisabilityCrimes_2003`+`DisabilityCrimes_2004`+`DisabilityCrimes_2005`+ `DisabilityCrimes_2006`+`DisabilityCrimes_2007`+`DisabilityCrimes_2008`+ `DisabilityCrimes_2009`+`DisabilityCrimes_2010`+`DisabilityCrimes_2011`+ `DisabilityCrimes_2012`+`DisabilityCrimes_2013`+`DisabilityCrimes_2014`+ `DisabilityCrimes_2015`+`DisabilityCrimes_2016`+`DisabilityCrimes_2017`+ `DisabilityCrimes_2018`+`DisabilityCrimes_2019`), GenderCrimesTotal = (`GenderCrimes_1991`+`GenderCrimes_1992`+`GenderCrimes_1993`+ `GenderCrimes_1994`+`GenderCrimes_1995`+`GenderCrimes_1996`+ `GenderCrimes_1997`+`GenderCrimes_1998`+`GenderCrimes_1999`+ `GenderCrimes_2000`+`GenderCrimes_2001`+`GenderCrimes_2002`+ `GenderCrimes_2003`+`GenderCrimes_2004`+`GenderCrimes_2005`+ `GenderCrimes_2006`+`GenderCrimes_2007`+`GenderCrimes_2008`+ `GenderCrimes_2009`+`GenderCrimes_2010`+`GenderCrimes_2011`+ `GenderCrimes_2012`+`GenderCrimes_2013`+`GenderCrimes_2014`+ `GenderCrimes_2015`+`GenderCrimes_2016`+`GenderCrimes_2017`+ `GenderCrimes_2018`+`GenderCrimes_2019`), SexCrimesTotal = (`SexCrimes_1991`+`SexCrimes_1992`+`SexCrimes_1993`+ `SexCrimes_1994`+`SexCrimes_1995`+`SexCrimes_1996`+ `SexCrimes_1997`+`SexCrimes_1998`+`SexCrimes_1999`+ `SexCrimes_2000`+`SexCrimes_2001`+`SexCrimes_2002`+ `SexCrimes_2003`+`SexCrimes_2004`+`SexCrimes_2005`+ `SexCrimes_2006`+`SexCrimes_2007`+`SexCrimes_2008`+ `SexCrimes_2009`+`SexCrimes_2010`+`SexCrimes_2011`+ `SexCrimes_2012`+`SexCrimes_2013`+`SexCrimes_2014`+ `SexCrimes_2015`+`SexCrimes_2016`+`SexCrimes_2017`+ `SexCrimes_2018`+`SexCrimes_2019`), ReligionCrimesTotal = (`ReligionCrimes_1991`+`ReligionCrimes_1992`+`ReligionCrimes_1993`+ `ReligionCrimes_1994`+`ReligionCrimes_1995`+`ReligionCrimes_1996`+ `ReligionCrimes_1997`+`ReligionCrimes_1998`+`ReligionCrimes_1999`+ `ReligionCrimes_2000`+`ReligionCrimes_2001`+`ReligionCrimes_2002`+ `ReligionCrimes_2003`+`ReligionCrimes_2004`+`ReligionCrimes_2005`+ `ReligionCrimes_2006`+`ReligionCrimes_2007`+`ReligionCrimes_2008`+ `ReligionCrimes_2009`+`ReligionCrimes_2010`+`ReligionCrimes_2011`+ `ReligionCrimes_2012`+`ReligionCrimes_2013`+`ReligionCrimes_2014`+ `ReligionCrimes_2015`+`ReligionCrimes_2016`+`ReligionCrimes_2017`+ `ReligionCrimes_2018`+`ReligionCrimes_2019`), RaceCrimesTotal = (`RaceCrimes_1991`+`RaceCrimes_1992`+`RaceCrimes_1993`+ `RaceCrimes_1994`+`RaceCrimes_1995`+`RaceCrimes_1996`+ `RaceCrimes_1997`+`RaceCrimes_1998`+`RaceCrimes_1999`+ `RaceCrimes_2000`+`RaceCrimes_2001`+`RaceCrimes_2002`+ `RaceCrimes_2003`+`RaceCrimes_2004`+`RaceCrimes_2005`+ `RaceCrimes_2006`+`RaceCrimes_2007`+`RaceCrimes_2008`+ `RaceCrimes_2009`+`RaceCrimes_2010`+`RaceCrimes_2011`+ `RaceCrimes_2012`+`RaceCrimes_2013`+`RaceCrimes_2014`+ `RaceCrimes_2015`+`RaceCrimes_2016`+`RaceCrimes_2017`+ `RaceCrimes_2018`+`RaceCrimes_2019`)) write_csv(sh, "JoinedData.csv") not_joined <- sum_hc %>% anti_join(seg_loc, by = c("City" = "City", "St" = "State")) loc <- read_csv("uscities.csv") hc_loc <- not_joined %>% mutate(DisCrimesTotal = (`DisabilityCrimes_1991`+`DisabilityCrimes_1992`+`DisabilityCrimes_1993`+ `DisabilityCrimes_1994`+`DisabilityCrimes_1995`+`DisabilityCrimes_1996`+ `DisabilityCrimes_1997`+`DisabilityCrimes_1998`+`DisabilityCrimes_1999`+ `DisabilityCrimes_2000`+`DisabilityCrimes_2001`+`DisabilityCrimes_2002`+ `DisabilityCrimes_2003`+`DisabilityCrimes_2004`+`DisabilityCrimes_2005`+ `DisabilityCrimes_2006`+`DisabilityCrimes_2007`+`DisabilityCrimes_2008`+ `DisabilityCrimes_2009`+`DisabilityCrimes_2010`+`DisabilityCrimes_2011`+ `DisabilityCrimes_2012`+`DisabilityCrimes_2013`+`DisabilityCrimes_2014`+ `DisabilityCrimes_2015`+`DisabilityCrimes_2016`+`DisabilityCrimes_2017`+ `DisabilityCrimes_2018`+`DisabilityCrimes_2019`), GenderCrimesTotal = (`GenderCrimes_1991`+`GenderCrimes_1992`+`GenderCrimes_1993`+ `GenderCrimes_1994`+`GenderCrimes_1995`+`GenderCrimes_1996`+ `GenderCrimes_1997`+`GenderCrimes_1998`+`GenderCrimes_1999`+ `GenderCrimes_2000`+`GenderCrimes_2001`+`GenderCrimes_2002`+ `GenderCrimes_2003`+`GenderCrimes_2004`+`GenderCrimes_2005`+ `GenderCrimes_2006`+`GenderCrimes_2007`+`GenderCrimes_2008`+ `GenderCrimes_2009`+`GenderCrimes_2010`+`GenderCrimes_2011`+ `GenderCrimes_2012`+`GenderCrimes_2013`+`GenderCrimes_2014`+ `GenderCrimes_2015`+`GenderCrimes_2016`+`GenderCrimes_2017`+ `GenderCrimes_2018`+`GenderCrimes_2019`), SexCrimesTotal = (`SexCrimes_1991`+`SexCrimes_1992`+`SexCrimes_1993`+ `SexCrimes_1994`+`SexCrimes_1995`+`SexCrimes_1996`+ `SexCrimes_1997`+`SexCrimes_1998`+`SexCrimes_1999`+ `SexCrimes_2000`+`SexCrimes_2001`+`SexCrimes_2002`+ `SexCrimes_2003`+`SexCrimes_2004`+`SexCrimes_2005`+ `SexCrimes_2006`+`SexCrimes_2007`+`SexCrimes_2008`+ `SexCrimes_2009`+`SexCrimes_2010`+`SexCrimes_2011`+ `SexCrimes_2012`+`SexCrimes_2013`+`SexCrimes_2014`+ `SexCrimes_2015`+`SexCrimes_2016`+`SexCrimes_2017`+ `SexCrimes_2018`+`SexCrimes_2019`), ReligionCrimesTotal = (`ReligionCrimes_1991`+`ReligionCrimes_1992`+`ReligionCrimes_1993`+ `ReligionCrimes_1994`+`ReligionCrimes_1995`+`ReligionCrimes_1996`+ `ReligionCrimes_1997`+`ReligionCrimes_1998`+`ReligionCrimes_1999`+ `ReligionCrimes_2000`+`ReligionCrimes_2001`+`ReligionCrimes_2002`+ `ReligionCrimes_2003`+`ReligionCrimes_2004`+`ReligionCrimes_2005`+ `ReligionCrimes_2006`+`ReligionCrimes_2007`+`ReligionCrimes_2008`+ `ReligionCrimes_2009`+`ReligionCrimes_2010`+`ReligionCrimes_2011`+ `ReligionCrimes_2012`+`ReligionCrimes_2013`+`ReligionCrimes_2014`+ `ReligionCrimes_2015`+`ReligionCrimes_2016`+`ReligionCrimes_2017`+ `ReligionCrimes_2018`+`ReligionCrimes_2019`), RaceCrimesTotal = (`RaceCrimes_1991`+`RaceCrimes_1992`+`RaceCrimes_1993`+ `RaceCrimes_1994`+`RaceCrimes_1995`+`RaceCrimes_1996`+ `RaceCrimes_1997`+`RaceCrimes_1998`+`RaceCrimes_1999`+ `RaceCrimes_2000`+`RaceCrimes_2001`+`RaceCrimes_2002`+ `RaceCrimes_2003`+`RaceCrimes_2004`+`RaceCrimes_2005`+ `RaceCrimes_2006`+`RaceCrimes_2007`+`RaceCrimes_2008`+ `RaceCrimes_2009`+`RaceCrimes_2010`+`RaceCrimes_2011`+ `RaceCrimes_2012`+`RaceCrimes_2013`+`RaceCrimes_2014`+ `RaceCrimes_2015`+`RaceCrimes_2016`+`RaceCrimes_2017`+ `RaceCrimes_2018`+`RaceCrimes_2019`)) %>% left_join(loc, by = c("City" = "city_ascii", "St" = "state_id")) %>% select(c(1:152,157,158)) hc_loc_nas <- hc_loc[is.na(hc_loc$lat),] hc_loc <- hc_loc[!is.na(hc_loc$lat),] hc_map_points <- usmap_transform(data.frame(hc_loc$lng, hc_loc$lat)) hc_loc <- hc_loc %>% left_join(hc_map_points, by = c("lng" = "hc_loc.lng", "lat" = "hc_loc.lat")) write_csv(hc_loc, "NotJoinedData.csv") colleges <- hcrimes[which(hcrimes$Agency_Type == "University or College"),] colleges$College <- as.character(colleges$City) for (c in 1:length(colleges$Agency)) { if(!is.na(colleges$Unit[c])) { colleges$College[c] = str_c(as.character(colleges$Agency[c]), as.character(colleges$Unit[c]), sep = " ") } else { colleges$College[c] = as.character(colleges$Agency[c]) } } colleges$College <- as.factor(colleges$College) colleges_sum <- colleges %>% group_by(St, College, Year) %>% summarise(RaceCrimes = sum(RaceBias), ReligionCrimes = sum(ReligionBias), SexCrimes = sum(SexBias), GenderCrimes = sum(GenderBias), DisabilityCrimes = sum(DisBias),) %>% pivot_wider(names_from = Year, values_from = c(RaceCrimes, ReligionCrimes, SexCrimes, GenderCrimes, DisabilityCrimes), values_fill = 0) %>% mutate(DisCrimesTotal = (`DisabilityCrimes_1991`+`DisabilityCrimes_1992`+`DisabilityCrimes_1993`+ `DisabilityCrimes_1994`+`DisabilityCrimes_1995`+`DisabilityCrimes_1996`+ `DisabilityCrimes_1997`+`DisabilityCrimes_1998`+`DisabilityCrimes_1999`+ `DisabilityCrimes_2000`+`DisabilityCrimes_2001`+`DisabilityCrimes_2002`+ `DisabilityCrimes_2003`+`DisabilityCrimes_2004`+`DisabilityCrimes_2005`+ `DisabilityCrimes_2006`+`DisabilityCrimes_2007`+`DisabilityCrimes_2008`+ `DisabilityCrimes_2009`+`DisabilityCrimes_2010`+`DisabilityCrimes_2011`+ `DisabilityCrimes_2012`+`DisabilityCrimes_2013`+`DisabilityCrimes_2014`+ `DisabilityCrimes_2015`+`DisabilityCrimes_2016`+`DisabilityCrimes_2017`+ `DisabilityCrimes_2018`+`DisabilityCrimes_2019`), GenderCrimesTotal = (`GenderCrimes_1991`+`GenderCrimes_1992`+`GenderCrimes_1993`+ `GenderCrimes_1994`+`GenderCrimes_1995`+`GenderCrimes_1996`+ `GenderCrimes_1997`+`GenderCrimes_1998`+`GenderCrimes_1999`+ `GenderCrimes_2000`+`GenderCrimes_2001`+`GenderCrimes_2002`+ `GenderCrimes_2003`+`GenderCrimes_2004`+`GenderCrimes_2005`+ `GenderCrimes_2006`+`GenderCrimes_2007`+`GenderCrimes_2008`+ `GenderCrimes_2009`+`GenderCrimes_2010`+`GenderCrimes_2011`+ `GenderCrimes_2012`+`GenderCrimes_2013`+`GenderCrimes_2014`+ `GenderCrimes_2015`+`GenderCrimes_2016`+`GenderCrimes_2017`+ `GenderCrimes_2018`+`GenderCrimes_2019`), SexCrimesTotal = (`SexCrimes_1991`+`SexCrimes_1992`+`SexCrimes_1993`+ `SexCrimes_1994`+`SexCrimes_1995`+`SexCrimes_1996`+ `SexCrimes_1997`+`SexCrimes_1998`+`SexCrimes_1999`+ `SexCrimes_2000`+`SexCrimes_2001`+`SexCrimes_2002`+ `SexCrimes_2003`+`SexCrimes_2004`+`SexCrimes_2005`+ `SexCrimes_2006`+`SexCrimes_2007`+`SexCrimes_2008`+ `SexCrimes_2009`+`SexCrimes_2010`+`SexCrimes_2011`+ `SexCrimes_2012`+`SexCrimes_2013`+`SexCrimes_2014`+ `SexCrimes_2015`+`SexCrimes_2016`+`SexCrimes_2017`+ `SexCrimes_2018`+`SexCrimes_2019`), ReligionCrimesTotal = (`ReligionCrimes_1991`+`ReligionCrimes_1992`+`ReligionCrimes_1993`+ `ReligionCrimes_1994`+`ReligionCrimes_1995`+`ReligionCrimes_1996`+ `ReligionCrimes_1997`+`ReligionCrimes_1998`+`ReligionCrimes_1999`+ `ReligionCrimes_2000`+`ReligionCrimes_2001`+`ReligionCrimes_2002`+ `ReligionCrimes_2003`+`ReligionCrimes_2004`+`ReligionCrimes_2005`+ `ReligionCrimes_2006`+`ReligionCrimes_2007`+`ReligionCrimes_2008`+ `ReligionCrimes_2009`+`ReligionCrimes_2010`+`ReligionCrimes_2011`+ `ReligionCrimes_2012`+`ReligionCrimes_2013`+`ReligionCrimes_2014`+ `ReligionCrimes_2015`+`ReligionCrimes_2016`+`ReligionCrimes_2017`+ `ReligionCrimes_2018`+`ReligionCrimes_2019`), RaceCrimesTotal = (`RaceCrimes_1991`+`RaceCrimes_1992`+`RaceCrimes_1993`+ `RaceCrimes_1994`+`RaceCrimes_1995`+`RaceCrimes_1996`+ `RaceCrimes_1997`+`RaceCrimes_1998`+`RaceCrimes_1999`+ `RaceCrimes_2000`+`RaceCrimes_2001`+`RaceCrimes_2002`+ `RaceCrimes_2003`+`RaceCrimes_2004`+`RaceCrimes_2005`+ `RaceCrimes_2006`+`RaceCrimes_2007`+`RaceCrimes_2008`+ `RaceCrimes_2009`+`RaceCrimes_2010`+`RaceCrimes_2011`+ `RaceCrimes_2012`+`RaceCrimes_2013`+`RaceCrimes_2014`+ `RaceCrimes_2015`+`RaceCrimes_2016`+`RaceCrimes_2017`+ `RaceCrimes_2018`+`RaceCrimes_2019`)) write_csv(colleges_sum, "CollegeCrimes.csv")
/Background_Code/ShinyData.R
no_license
mahatfield/HateCrimes
R
false
false
17,000
r
library(readr) library(dplyr) library(tidyr) library(usmap) library(maptools) library(ggplot2) library(stringr) seg_loc <- read_csv("SegregationLocation.csv") hcrimes <- read_csv("HateCrimes.csv") hcrimes$State <- as.factor(hcrimes$State) hcrimes$Agency <- as.factor(hcrimes$Agency) hcrimes$Agency_Type <- as.factor(hcrimes$Agency_Type) hcrimes <- hcrimes %>% mutate(City = as.character(Agency)) hcrimes <- hcrimes[,c(1:4,34,5:33)] for (j in 1:length(hcrimes$Agency)) { if(hcrimes$Agency_Type[j] == "University or College" | hcrimes$Agency_Type[j] == "State Police") { hcrimes$City[j] = hcrimes$Unit[j] } else if(hcrimes$Agency_Type[j] == "Other" | hcrimes$Agency_Type[j] == "Federal" | hcrimes$Agency_Type[j] == "Other State Agency"){ hcrimes$City[j] = str_trim(str_c(as.character(hcrimes$Agency[j]), hcrimes$Unit[j], sep = " ")) } } hcnas <- hcrimes[which(is.na(hcrimes$City)),] hcrimes$City <- as.factor(hcrimes$City) sum_hc <- hcrimes[which(!is.na(hcrimes$City)),] %>% group_by(St, City, Year) %>% summarise(RaceCrimes = sum(RaceBias), ReligionCrimes = sum(ReligionBias), SexCrimes = sum(SexBias), GenderCrimes = sum(GenderBias), DisabilityCrimes = sum(DisBias),) sum_hc <- sum_hc %>% pivot_wider(names_from = Year, values_from = c(RaceCrimes, ReligionCrimes, SexCrimes, GenderCrimes, DisabilityCrimes), values_fill = 0) sum_hc[is.na(sum_hc)] <- 0 sh <- seg_loc %>% left_join(sum_hc, by = c("City" = "City", "State" = "St")) %>% mutate(DisCrimesTotal = (`DisabilityCrimes_1991`+`DisabilityCrimes_1992`+`DisabilityCrimes_1993`+ `DisabilityCrimes_1994`+`DisabilityCrimes_1995`+`DisabilityCrimes_1996`+ `DisabilityCrimes_1997`+`DisabilityCrimes_1998`+`DisabilityCrimes_1999`+ `DisabilityCrimes_2000`+`DisabilityCrimes_2001`+`DisabilityCrimes_2002`+ `DisabilityCrimes_2003`+`DisabilityCrimes_2004`+`DisabilityCrimes_2005`+ `DisabilityCrimes_2006`+`DisabilityCrimes_2007`+`DisabilityCrimes_2008`+ `DisabilityCrimes_2009`+`DisabilityCrimes_2010`+`DisabilityCrimes_2011`+ `DisabilityCrimes_2012`+`DisabilityCrimes_2013`+`DisabilityCrimes_2014`+ `DisabilityCrimes_2015`+`DisabilityCrimes_2016`+`DisabilityCrimes_2017`+ `DisabilityCrimes_2018`+`DisabilityCrimes_2019`), GenderCrimesTotal = (`GenderCrimes_1991`+`GenderCrimes_1992`+`GenderCrimes_1993`+ `GenderCrimes_1994`+`GenderCrimes_1995`+`GenderCrimes_1996`+ `GenderCrimes_1997`+`GenderCrimes_1998`+`GenderCrimes_1999`+ `GenderCrimes_2000`+`GenderCrimes_2001`+`GenderCrimes_2002`+ `GenderCrimes_2003`+`GenderCrimes_2004`+`GenderCrimes_2005`+ `GenderCrimes_2006`+`GenderCrimes_2007`+`GenderCrimes_2008`+ `GenderCrimes_2009`+`GenderCrimes_2010`+`GenderCrimes_2011`+ `GenderCrimes_2012`+`GenderCrimes_2013`+`GenderCrimes_2014`+ `GenderCrimes_2015`+`GenderCrimes_2016`+`GenderCrimes_2017`+ `GenderCrimes_2018`+`GenderCrimes_2019`), SexCrimesTotal = (`SexCrimes_1991`+`SexCrimes_1992`+`SexCrimes_1993`+ `SexCrimes_1994`+`SexCrimes_1995`+`SexCrimes_1996`+ `SexCrimes_1997`+`SexCrimes_1998`+`SexCrimes_1999`+ `SexCrimes_2000`+`SexCrimes_2001`+`SexCrimes_2002`+ `SexCrimes_2003`+`SexCrimes_2004`+`SexCrimes_2005`+ `SexCrimes_2006`+`SexCrimes_2007`+`SexCrimes_2008`+ `SexCrimes_2009`+`SexCrimes_2010`+`SexCrimes_2011`+ `SexCrimes_2012`+`SexCrimes_2013`+`SexCrimes_2014`+ `SexCrimes_2015`+`SexCrimes_2016`+`SexCrimes_2017`+ `SexCrimes_2018`+`SexCrimes_2019`), ReligionCrimesTotal = (`ReligionCrimes_1991`+`ReligionCrimes_1992`+`ReligionCrimes_1993`+ `ReligionCrimes_1994`+`ReligionCrimes_1995`+`ReligionCrimes_1996`+ `ReligionCrimes_1997`+`ReligionCrimes_1998`+`ReligionCrimes_1999`+ `ReligionCrimes_2000`+`ReligionCrimes_2001`+`ReligionCrimes_2002`+ `ReligionCrimes_2003`+`ReligionCrimes_2004`+`ReligionCrimes_2005`+ `ReligionCrimes_2006`+`ReligionCrimes_2007`+`ReligionCrimes_2008`+ `ReligionCrimes_2009`+`ReligionCrimes_2010`+`ReligionCrimes_2011`+ `ReligionCrimes_2012`+`ReligionCrimes_2013`+`ReligionCrimes_2014`+ `ReligionCrimes_2015`+`ReligionCrimes_2016`+`ReligionCrimes_2017`+ `ReligionCrimes_2018`+`ReligionCrimes_2019`), RaceCrimesTotal = (`RaceCrimes_1991`+`RaceCrimes_1992`+`RaceCrimes_1993`+ `RaceCrimes_1994`+`RaceCrimes_1995`+`RaceCrimes_1996`+ `RaceCrimes_1997`+`RaceCrimes_1998`+`RaceCrimes_1999`+ `RaceCrimes_2000`+`RaceCrimes_2001`+`RaceCrimes_2002`+ `RaceCrimes_2003`+`RaceCrimes_2004`+`RaceCrimes_2005`+ `RaceCrimes_2006`+`RaceCrimes_2007`+`RaceCrimes_2008`+ `RaceCrimes_2009`+`RaceCrimes_2010`+`RaceCrimes_2011`+ `RaceCrimes_2012`+`RaceCrimes_2013`+`RaceCrimes_2014`+ `RaceCrimes_2015`+`RaceCrimes_2016`+`RaceCrimes_2017`+ `RaceCrimes_2018`+`RaceCrimes_2019`)) write_csv(sh, "JoinedData.csv") not_joined <- sum_hc %>% anti_join(seg_loc, by = c("City" = "City", "St" = "State")) loc <- read_csv("uscities.csv") hc_loc <- not_joined %>% mutate(DisCrimesTotal = (`DisabilityCrimes_1991`+`DisabilityCrimes_1992`+`DisabilityCrimes_1993`+ `DisabilityCrimes_1994`+`DisabilityCrimes_1995`+`DisabilityCrimes_1996`+ `DisabilityCrimes_1997`+`DisabilityCrimes_1998`+`DisabilityCrimes_1999`+ `DisabilityCrimes_2000`+`DisabilityCrimes_2001`+`DisabilityCrimes_2002`+ `DisabilityCrimes_2003`+`DisabilityCrimes_2004`+`DisabilityCrimes_2005`+ `DisabilityCrimes_2006`+`DisabilityCrimes_2007`+`DisabilityCrimes_2008`+ `DisabilityCrimes_2009`+`DisabilityCrimes_2010`+`DisabilityCrimes_2011`+ `DisabilityCrimes_2012`+`DisabilityCrimes_2013`+`DisabilityCrimes_2014`+ `DisabilityCrimes_2015`+`DisabilityCrimes_2016`+`DisabilityCrimes_2017`+ `DisabilityCrimes_2018`+`DisabilityCrimes_2019`), GenderCrimesTotal = (`GenderCrimes_1991`+`GenderCrimes_1992`+`GenderCrimes_1993`+ `GenderCrimes_1994`+`GenderCrimes_1995`+`GenderCrimes_1996`+ `GenderCrimes_1997`+`GenderCrimes_1998`+`GenderCrimes_1999`+ `GenderCrimes_2000`+`GenderCrimes_2001`+`GenderCrimes_2002`+ `GenderCrimes_2003`+`GenderCrimes_2004`+`GenderCrimes_2005`+ `GenderCrimes_2006`+`GenderCrimes_2007`+`GenderCrimes_2008`+ `GenderCrimes_2009`+`GenderCrimes_2010`+`GenderCrimes_2011`+ `GenderCrimes_2012`+`GenderCrimes_2013`+`GenderCrimes_2014`+ `GenderCrimes_2015`+`GenderCrimes_2016`+`GenderCrimes_2017`+ `GenderCrimes_2018`+`GenderCrimes_2019`), SexCrimesTotal = (`SexCrimes_1991`+`SexCrimes_1992`+`SexCrimes_1993`+ `SexCrimes_1994`+`SexCrimes_1995`+`SexCrimes_1996`+ `SexCrimes_1997`+`SexCrimes_1998`+`SexCrimes_1999`+ `SexCrimes_2000`+`SexCrimes_2001`+`SexCrimes_2002`+ `SexCrimes_2003`+`SexCrimes_2004`+`SexCrimes_2005`+ `SexCrimes_2006`+`SexCrimes_2007`+`SexCrimes_2008`+ `SexCrimes_2009`+`SexCrimes_2010`+`SexCrimes_2011`+ `SexCrimes_2012`+`SexCrimes_2013`+`SexCrimes_2014`+ `SexCrimes_2015`+`SexCrimes_2016`+`SexCrimes_2017`+ `SexCrimes_2018`+`SexCrimes_2019`), ReligionCrimesTotal = (`ReligionCrimes_1991`+`ReligionCrimes_1992`+`ReligionCrimes_1993`+ `ReligionCrimes_1994`+`ReligionCrimes_1995`+`ReligionCrimes_1996`+ `ReligionCrimes_1997`+`ReligionCrimes_1998`+`ReligionCrimes_1999`+ `ReligionCrimes_2000`+`ReligionCrimes_2001`+`ReligionCrimes_2002`+ `ReligionCrimes_2003`+`ReligionCrimes_2004`+`ReligionCrimes_2005`+ `ReligionCrimes_2006`+`ReligionCrimes_2007`+`ReligionCrimes_2008`+ `ReligionCrimes_2009`+`ReligionCrimes_2010`+`ReligionCrimes_2011`+ `ReligionCrimes_2012`+`ReligionCrimes_2013`+`ReligionCrimes_2014`+ `ReligionCrimes_2015`+`ReligionCrimes_2016`+`ReligionCrimes_2017`+ `ReligionCrimes_2018`+`ReligionCrimes_2019`), RaceCrimesTotal = (`RaceCrimes_1991`+`RaceCrimes_1992`+`RaceCrimes_1993`+ `RaceCrimes_1994`+`RaceCrimes_1995`+`RaceCrimes_1996`+ `RaceCrimes_1997`+`RaceCrimes_1998`+`RaceCrimes_1999`+ `RaceCrimes_2000`+`RaceCrimes_2001`+`RaceCrimes_2002`+ `RaceCrimes_2003`+`RaceCrimes_2004`+`RaceCrimes_2005`+ `RaceCrimes_2006`+`RaceCrimes_2007`+`RaceCrimes_2008`+ `RaceCrimes_2009`+`RaceCrimes_2010`+`RaceCrimes_2011`+ `RaceCrimes_2012`+`RaceCrimes_2013`+`RaceCrimes_2014`+ `RaceCrimes_2015`+`RaceCrimes_2016`+`RaceCrimes_2017`+ `RaceCrimes_2018`+`RaceCrimes_2019`)) %>% left_join(loc, by = c("City" = "city_ascii", "St" = "state_id")) %>% select(c(1:152,157,158)) hc_loc_nas <- hc_loc[is.na(hc_loc$lat),] hc_loc <- hc_loc[!is.na(hc_loc$lat),] hc_map_points <- usmap_transform(data.frame(hc_loc$lng, hc_loc$lat)) hc_loc <- hc_loc %>% left_join(hc_map_points, by = c("lng" = "hc_loc.lng", "lat" = "hc_loc.lat")) write_csv(hc_loc, "NotJoinedData.csv") colleges <- hcrimes[which(hcrimes$Agency_Type == "University or College"),] colleges$College <- as.character(colleges$City) for (c in 1:length(colleges$Agency)) { if(!is.na(colleges$Unit[c])) { colleges$College[c] = str_c(as.character(colleges$Agency[c]), as.character(colleges$Unit[c]), sep = " ") } else { colleges$College[c] = as.character(colleges$Agency[c]) } } colleges$College <- as.factor(colleges$College) colleges_sum <- colleges %>% group_by(St, College, Year) %>% summarise(RaceCrimes = sum(RaceBias), ReligionCrimes = sum(ReligionBias), SexCrimes = sum(SexBias), GenderCrimes = sum(GenderBias), DisabilityCrimes = sum(DisBias),) %>% pivot_wider(names_from = Year, values_from = c(RaceCrimes, ReligionCrimes, SexCrimes, GenderCrimes, DisabilityCrimes), values_fill = 0) %>% mutate(DisCrimesTotal = (`DisabilityCrimes_1991`+`DisabilityCrimes_1992`+`DisabilityCrimes_1993`+ `DisabilityCrimes_1994`+`DisabilityCrimes_1995`+`DisabilityCrimes_1996`+ `DisabilityCrimes_1997`+`DisabilityCrimes_1998`+`DisabilityCrimes_1999`+ `DisabilityCrimes_2000`+`DisabilityCrimes_2001`+`DisabilityCrimes_2002`+ `DisabilityCrimes_2003`+`DisabilityCrimes_2004`+`DisabilityCrimes_2005`+ `DisabilityCrimes_2006`+`DisabilityCrimes_2007`+`DisabilityCrimes_2008`+ `DisabilityCrimes_2009`+`DisabilityCrimes_2010`+`DisabilityCrimes_2011`+ `DisabilityCrimes_2012`+`DisabilityCrimes_2013`+`DisabilityCrimes_2014`+ `DisabilityCrimes_2015`+`DisabilityCrimes_2016`+`DisabilityCrimes_2017`+ `DisabilityCrimes_2018`+`DisabilityCrimes_2019`), GenderCrimesTotal = (`GenderCrimes_1991`+`GenderCrimes_1992`+`GenderCrimes_1993`+ `GenderCrimes_1994`+`GenderCrimes_1995`+`GenderCrimes_1996`+ `GenderCrimes_1997`+`GenderCrimes_1998`+`GenderCrimes_1999`+ `GenderCrimes_2000`+`GenderCrimes_2001`+`GenderCrimes_2002`+ `GenderCrimes_2003`+`GenderCrimes_2004`+`GenderCrimes_2005`+ `GenderCrimes_2006`+`GenderCrimes_2007`+`GenderCrimes_2008`+ `GenderCrimes_2009`+`GenderCrimes_2010`+`GenderCrimes_2011`+ `GenderCrimes_2012`+`GenderCrimes_2013`+`GenderCrimes_2014`+ `GenderCrimes_2015`+`GenderCrimes_2016`+`GenderCrimes_2017`+ `GenderCrimes_2018`+`GenderCrimes_2019`), SexCrimesTotal = (`SexCrimes_1991`+`SexCrimes_1992`+`SexCrimes_1993`+ `SexCrimes_1994`+`SexCrimes_1995`+`SexCrimes_1996`+ `SexCrimes_1997`+`SexCrimes_1998`+`SexCrimes_1999`+ `SexCrimes_2000`+`SexCrimes_2001`+`SexCrimes_2002`+ `SexCrimes_2003`+`SexCrimes_2004`+`SexCrimes_2005`+ `SexCrimes_2006`+`SexCrimes_2007`+`SexCrimes_2008`+ `SexCrimes_2009`+`SexCrimes_2010`+`SexCrimes_2011`+ `SexCrimes_2012`+`SexCrimes_2013`+`SexCrimes_2014`+ `SexCrimes_2015`+`SexCrimes_2016`+`SexCrimes_2017`+ `SexCrimes_2018`+`SexCrimes_2019`), ReligionCrimesTotal = (`ReligionCrimes_1991`+`ReligionCrimes_1992`+`ReligionCrimes_1993`+ `ReligionCrimes_1994`+`ReligionCrimes_1995`+`ReligionCrimes_1996`+ `ReligionCrimes_1997`+`ReligionCrimes_1998`+`ReligionCrimes_1999`+ `ReligionCrimes_2000`+`ReligionCrimes_2001`+`ReligionCrimes_2002`+ `ReligionCrimes_2003`+`ReligionCrimes_2004`+`ReligionCrimes_2005`+ `ReligionCrimes_2006`+`ReligionCrimes_2007`+`ReligionCrimes_2008`+ `ReligionCrimes_2009`+`ReligionCrimes_2010`+`ReligionCrimes_2011`+ `ReligionCrimes_2012`+`ReligionCrimes_2013`+`ReligionCrimes_2014`+ `ReligionCrimes_2015`+`ReligionCrimes_2016`+`ReligionCrimes_2017`+ `ReligionCrimes_2018`+`ReligionCrimes_2019`), RaceCrimesTotal = (`RaceCrimes_1991`+`RaceCrimes_1992`+`RaceCrimes_1993`+ `RaceCrimes_1994`+`RaceCrimes_1995`+`RaceCrimes_1996`+ `RaceCrimes_1997`+`RaceCrimes_1998`+`RaceCrimes_1999`+ `RaceCrimes_2000`+`RaceCrimes_2001`+`RaceCrimes_2002`+ `RaceCrimes_2003`+`RaceCrimes_2004`+`RaceCrimes_2005`+ `RaceCrimes_2006`+`RaceCrimes_2007`+`RaceCrimes_2008`+ `RaceCrimes_2009`+`RaceCrimes_2010`+`RaceCrimes_2011`+ `RaceCrimes_2012`+`RaceCrimes_2013`+`RaceCrimes_2014`+ `RaceCrimes_2015`+`RaceCrimes_2016`+`RaceCrimes_2017`+ `RaceCrimes_2018`+`RaceCrimes_2019`)) write_csv(colleges_sum, "CollegeCrimes.csv")
# Make also private functions available kwb.utils::assignPackageObjects("kwb.nextcloud") # Are we the test user? nextcloud_user() # Ok, we seem to be the test user, let's mess around! # List the files and folders on the main level list_files() # Get the file and folder properties file_info <- list_files(full_info = TRUE) file_info # Show only the files file_info <- exclude_directories(file_info) file_info # Relative paths to the files file_info$file # Download the files download_files(file_info$href) # Create a folder on the cloud create_folder("testfolder-1") # Did the folder arrive? "testfolder-1/" %in% list_files() # Path to a local temporary file file <- file.path(tempdir(), "testfile.txt") # Path to target folder on the cloud target_path <- "testfolder-1" # Upload first version of the file writeLines("Hi folks version 1!", file) readLines(file) upload_file(file, target_path) # Upload second version of the file writeLines("Hi folks version 2!", file) readLines(file) upload_file(file, target_path) # Upload third version of the file writeLines("Hi folks version 3!", file) readLines(file) upload_file(file, target_path) # Get the fileid of the uploaded file file_info <- list_files(target_path, full_info = TRUE) fileid <- file_info$fileid[file_info$file == "testfile.txt"] fileid # Show the versions of the file (identified by fileid) version_info <- get_version_info(fileid) # There are two versions (the current version is not shown!) version_info # Download all versions (unfortunately named by timestamp) downloaded_files <- download_files(version_info$href) downloaded_files # Read the contents (should be version 1 and 2) lapply(downloaded_files, readLines) # Delete a specific version? This seems to be not working nextcloud_request(version_info$href[1], "DELETE", really = really) # Delete the file # I am using the string constant instead of "href" to see what I do! delete_file_or_folder("testfolder-1/testfile.txt", really = really) # File deleted? Yes! list_files("testfolder-1") # All versions deleted? Yes!!! get_version_info(fileid) # -> error # Upload the testfile again upload_file(file, target_path) # Check for success list_files(target_path) # Now try to delete the whole directory delete_file_or_folder("testfolder-1", really = really) # Try to list again list_files(target_path) # -> not found (any more)! # List the upper level -> no more testfolder! list_files()
/R/.test_put-delete.R
permissive
KWB-R/kwb.nextcloud
R
false
false
2,435
r
# Make also private functions available kwb.utils::assignPackageObjects("kwb.nextcloud") # Are we the test user? nextcloud_user() # Ok, we seem to be the test user, let's mess around! # List the files and folders on the main level list_files() # Get the file and folder properties file_info <- list_files(full_info = TRUE) file_info # Show only the files file_info <- exclude_directories(file_info) file_info # Relative paths to the files file_info$file # Download the files download_files(file_info$href) # Create a folder on the cloud create_folder("testfolder-1") # Did the folder arrive? "testfolder-1/" %in% list_files() # Path to a local temporary file file <- file.path(tempdir(), "testfile.txt") # Path to target folder on the cloud target_path <- "testfolder-1" # Upload first version of the file writeLines("Hi folks version 1!", file) readLines(file) upload_file(file, target_path) # Upload second version of the file writeLines("Hi folks version 2!", file) readLines(file) upload_file(file, target_path) # Upload third version of the file writeLines("Hi folks version 3!", file) readLines(file) upload_file(file, target_path) # Get the fileid of the uploaded file file_info <- list_files(target_path, full_info = TRUE) fileid <- file_info$fileid[file_info$file == "testfile.txt"] fileid # Show the versions of the file (identified by fileid) version_info <- get_version_info(fileid) # There are two versions (the current version is not shown!) version_info # Download all versions (unfortunately named by timestamp) downloaded_files <- download_files(version_info$href) downloaded_files # Read the contents (should be version 1 and 2) lapply(downloaded_files, readLines) # Delete a specific version? This seems to be not working nextcloud_request(version_info$href[1], "DELETE", really = really) # Delete the file # I am using the string constant instead of "href" to see what I do! delete_file_or_folder("testfolder-1/testfile.txt", really = really) # File deleted? Yes! list_files("testfolder-1") # All versions deleted? Yes!!! get_version_info(fileid) # -> error # Upload the testfile again upload_file(file, target_path) # Check for success list_files(target_path) # Now try to delete the whole directory delete_file_or_folder("testfolder-1", really = really) # Try to list again list_files(target_path) # -> not found (any more)! # List the upper level -> no more testfolder! list_files()
library(mtk) ### Name: mtkMorrisAnalyser ### Title: The constructor of the class 'mtkMorrisAnalyser' ### Aliases: mtkMorrisAnalyser ### ** Examples ## Sensitivity analysis of the "Ishigami" model with the "Morris" method # Generate the factors data(Ishigami.factors) # Build the processes and workflow: # 1) the design process exp1.designer <- mtkMorrisDesigner( listParameters = list(r=20, type="oat", levels=4, grid.jump=2)) # 2) the simulation process exp1.evaluator <- mtkNativeEvaluator(model="Ishigami") # 3) the analysis process exp1.analyser <- mtkMorrisAnalyser() # 4) the workflow exp1 <- mtkExpWorkflow(expFactors=Ishigami.factors, processesVector = c(design=exp1.designer, evaluate=exp1.evaluator, analyze=exp1.analyser)) # Run the workflow and report the results. run(exp1) print(exp1)
/data/genthat_extracted_code/mtk/examples/mtkMorrisAnalyser.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
862
r
library(mtk) ### Name: mtkMorrisAnalyser ### Title: The constructor of the class 'mtkMorrisAnalyser' ### Aliases: mtkMorrisAnalyser ### ** Examples ## Sensitivity analysis of the "Ishigami" model with the "Morris" method # Generate the factors data(Ishigami.factors) # Build the processes and workflow: # 1) the design process exp1.designer <- mtkMorrisDesigner( listParameters = list(r=20, type="oat", levels=4, grid.jump=2)) # 2) the simulation process exp1.evaluator <- mtkNativeEvaluator(model="Ishigami") # 3) the analysis process exp1.analyser <- mtkMorrisAnalyser() # 4) the workflow exp1 <- mtkExpWorkflow(expFactors=Ishigami.factors, processesVector = c(design=exp1.designer, evaluate=exp1.evaluator, analyze=exp1.analyser)) # Run the workflow and report the results. run(exp1) print(exp1)
## Reading the data file downloaded and extracted from ## https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip power_cons <- read.table("./household_power_consumption.txt", header=TRUE, sep=";", stringsAsFactors=FALSE, na.strings="?") plot_data <- subset(power_cons, power_cons$Date == "1/2/2007" | power_cons$Date == "2/2/2007") # Joining Date and Time columns of data plot_data$Date_Time <- paste(plot_data$Date, plot_data$Time, sep=" ") # Deleting Date and Time columns as we created new joint column plot_data <- subset(plot_data, select=c(3:10)) plot_data$Date_Time <- strptime(plot_data$Date_Time, format="%d/%m/%Y %H:%M:%S") png("Plot3.png", width = 480, height = 480) plot(plot_data$Date_Time, plot_data$Sub_metering_1, type = "l", xlab ="", ylab = "Energy sub metering") lines(plot_data$Date_Time, plot_data$Sub_metering_2, col = "red") lines(plot_data$Date_Time, plot_data$Sub_metering_3, col = "blue") legend("topright",lty=c(1,1,1), col = c("black", "red", "blue"), legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3")) dev.off()
/Plot3.R
no_license
Channabasavaraj/ExData_Plotting1
R
false
false
1,095
r
## Reading the data file downloaded and extracted from ## https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip power_cons <- read.table("./household_power_consumption.txt", header=TRUE, sep=";", stringsAsFactors=FALSE, na.strings="?") plot_data <- subset(power_cons, power_cons$Date == "1/2/2007" | power_cons$Date == "2/2/2007") # Joining Date and Time columns of data plot_data$Date_Time <- paste(plot_data$Date, plot_data$Time, sep=" ") # Deleting Date and Time columns as we created new joint column plot_data <- subset(plot_data, select=c(3:10)) plot_data$Date_Time <- strptime(plot_data$Date_Time, format="%d/%m/%Y %H:%M:%S") png("Plot3.png", width = 480, height = 480) plot(plot_data$Date_Time, plot_data$Sub_metering_1, type = "l", xlab ="", ylab = "Energy sub metering") lines(plot_data$Date_Time, plot_data$Sub_metering_2, col = "red") lines(plot_data$Date_Time, plot_data$Sub_metering_3, col = "blue") legend("topright",lty=c(1,1,1), col = c("black", "red", "blue"), legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3")) dev.off()
#Set working directory dirPath<-"/Users/MaxTan/Documents/CU_16spring/EDAV/assign1" setwd(dirPath) filename<-"Survey+Response.xlsx" source("tidydata.R") df<-tidydata(filename) source("GenderProgramPlot.R") GenderProgramPlot(df) source("TextEditorProgramPlot.R") TextEditorProgramPlot(df) source("ToolScoreProgramPlot.R") ToolScoreProgramPlot(df) source("SkillScoreProgramPlot.R") SkillScoreProgramPlot(df) source("SkillScorePieChart.R") SkillScorePieChart(df)
/Main.R
no_license
MakarAl/EDAV_Project_Class
R
false
false
468
r
#Set working directory dirPath<-"/Users/MaxTan/Documents/CU_16spring/EDAV/assign1" setwd(dirPath) filename<-"Survey+Response.xlsx" source("tidydata.R") df<-tidydata(filename) source("GenderProgramPlot.R") GenderProgramPlot(df) source("TextEditorProgramPlot.R") TextEditorProgramPlot(df) source("ToolScoreProgramPlot.R") ToolScoreProgramPlot(df) source("SkillScoreProgramPlot.R") SkillScoreProgramPlot(df) source("SkillScorePieChart.R") SkillScorePieChart(df)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/frauddetector_operations.R \name{frauddetector_create_variable} \alias{frauddetector_create_variable} \title{Creates a variable} \usage{ frauddetector_create_variable( name, dataType, dataSource, defaultValue, description = NULL, variableType = NULL, tags = NULL ) } \arguments{ \item{name}{[required] The name of the variable.} \item{dataType}{[required] The data type of the variable.} \item{dataSource}{[required] The source of the data.} \item{defaultValue}{[required] The default value for the variable when no value is received.} \item{description}{The description.} \item{variableType}{The variable type. For more information see \href{https://docs.aws.amazon.com/frauddetector/latest/ug/create-a-variable.html#variable-types}{Variable types}. Valid Values: \code{AUTH_CODE | AVS | BILLING_ADDRESS_L1 | BILLING_ADDRESS_L2 | BILLING_CITY | BILLING_COUNTRY | BILLING_NAME | BILLING_PHONE | BILLING_STATE | BILLING_ZIP | CARD_BIN | CATEGORICAL | CURRENCY_CODE | EMAIL_ADDRESS | FINGERPRINT | FRAUD_LABEL | FREE_FORM_TEXT | IP_ADDRESS | NUMERIC | ORDER_ID | PAYMENT_TYPE | PHONE_NUMBER | PRICE | PRODUCT_CATEGORY | SHIPPING_ADDRESS_L1 | SHIPPING_ADDRESS_L2 | SHIPPING_CITY | SHIPPING_COUNTRY | SHIPPING_NAME | SHIPPING_PHONE | SHIPPING_STATE | SHIPPING_ZIP | USERAGENT}} \item{tags}{A collection of key and value pairs.} } \description{ Creates a variable. See \url{https://www.paws-r-sdk.com/docs/frauddetector_create_variable/} for full documentation. } \keyword{internal}
/cran/paws.machine.learning/man/frauddetector_create_variable.Rd
permissive
paws-r/paws
R
false
true
1,578
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/frauddetector_operations.R \name{frauddetector_create_variable} \alias{frauddetector_create_variable} \title{Creates a variable} \usage{ frauddetector_create_variable( name, dataType, dataSource, defaultValue, description = NULL, variableType = NULL, tags = NULL ) } \arguments{ \item{name}{[required] The name of the variable.} \item{dataType}{[required] The data type of the variable.} \item{dataSource}{[required] The source of the data.} \item{defaultValue}{[required] The default value for the variable when no value is received.} \item{description}{The description.} \item{variableType}{The variable type. For more information see \href{https://docs.aws.amazon.com/frauddetector/latest/ug/create-a-variable.html#variable-types}{Variable types}. Valid Values: \code{AUTH_CODE | AVS | BILLING_ADDRESS_L1 | BILLING_ADDRESS_L2 | BILLING_CITY | BILLING_COUNTRY | BILLING_NAME | BILLING_PHONE | BILLING_STATE | BILLING_ZIP | CARD_BIN | CATEGORICAL | CURRENCY_CODE | EMAIL_ADDRESS | FINGERPRINT | FRAUD_LABEL | FREE_FORM_TEXT | IP_ADDRESS | NUMERIC | ORDER_ID | PAYMENT_TYPE | PHONE_NUMBER | PRICE | PRODUCT_CATEGORY | SHIPPING_ADDRESS_L1 | SHIPPING_ADDRESS_L2 | SHIPPING_CITY | SHIPPING_COUNTRY | SHIPPING_NAME | SHIPPING_PHONE | SHIPPING_STATE | SHIPPING_ZIP | USERAGENT}} \item{tags}{A collection of key and value pairs.} } \description{ Creates a variable. See \url{https://www.paws-r-sdk.com/docs/frauddetector_create_variable/} for full documentation. } \keyword{internal}
library(dplyr) library(ggplot2) library(gridExtra) library(ggpubr) library(data.table) library(reshape2) library(knitr) library(cowplot) mutate_TB<-function(df){ df$disease<-paste(df$TB, df$SM, sep=" ") df$disease<-factor(df$disease, levels=c("HC X", "HC SM", "LTBI X", "LTBI SM", "TB X", "TB SM")) df$TB<-gsub("LTBI", "QFT+", df$TB) df<-filter(df, SM!="N", TB!="HC") df$SM<-factor(df$SM, levels=c("X","SM")) df$TB<-factor(df$TB, levels=c("QFT+","TB")) df } filter_tb<-function(df){ df$SM<-gsub("SM", "SM+", df$SM) df$SM<-gsub("X", "SM-", df$SM) df<-filter(df, SM!="N", TB=="TB") df$SM<-factor(df$SM, levels=c("SM-","SM+")) df$TB<-factor(df$TB, levels=c("TB")) df$disease<-paste(df$TB, df$SM, sep=" ") df$disease<-factor(df$disease, levels=c("TB SM-", "TB SM+")) names(df)<-gsub("Tbet", "T-bet", names(df)) df } filter_ltbi<-function(df){ df$SM<-gsub("SM", "SM+", df$SM) df$SM<-gsub("X", "SM-", df$SM) df<-filter(df, SM!="N", TB=="LTBI") df$SM<-factor(df$SM, levels=c("SM-","SM+")) df$disease<-paste(df$TB, df$SM, sep=" ") df$disease<-factor(df$disease, levels=c("LTBI SM-", "LTBI SM+")) names(df)<-gsub("Tbet", "T-bet", names(df)) df } filter_hc<-function(df){ df$SM<-gsub("SM", "SM+", df$SM) df$SM<-gsub("X", "SM-", df$SM) df<-filter(df, SM!="N", TB=="HC") df$SM<-factor(df$SM, levels=c("SM-","SM+")) df$disease<-paste(df$TB, df$SM, sep=" ") df$disease<-factor(df$disease, levels=c("HC SM-", "HC SM+")) names(df)<-gsub("Tbet", "T-bet", names(df)) df } plot_filter<-function(datatable){ library(dplyr) df<-filter(datatable, TB!="N") df$TB<-factor(df$TB, levels=c("HC","LTBI","TB")) df$SM<-gsub("SM", "SM+", df$SM) df$SM<-gsub("X", "SM-", df$SM) df$SM<-factor(df$SM, levels=c("SM-","SM+")) df$disease<-paste(df$TB, df$SM, sep=" ") df$disease<-factor(df$disease, levels=c("HC SM-", "HC SM+", "LTBI SM-", "LTBI SM+", "TB SM-", "TB SM+")) names(df)<-gsub("Tbet", "T-bet", names(df)) df } filter_all<-function(datatable){ library(dplyr) df<-datatable df$TB<-factor(df$TB, levels=c("N","HC","LTBI","TB")) df$SM<-gsub("SM", "SM+", df$SM) df$SM<-gsub("X", "SM-", df$SM) df$SM<-factor(df$SM, levels=c("N","SM-","SM+")) df$disease<-paste(df$TB, df$SM, sep=" ") df$disease<-gsub("N N", "Naive", df$disease) df$disease<-factor(df$disease, levels=c("Naive","HC SM-", "HC SM+", "LTBI SM-", "LTBI SM+", "TB SM-", "TB SM+")) names(df)<-gsub("Tbet", "T-bet", names(df)) df } plot_compass<-function(DF, yval){ library(dplyr) DF$disease<-paste(DF$TB, DF$SM, sep=" ") DF$disease<-factor(DF$disease, levels=c("HC X", "HC SM", "LTBI X", "LTBI SM", "TB X", "TB SM")) g<-ggplot(DF, aes(x=disease, y=DF[,yval])) g+ geom_boxplot(aes(fill=TB, alpha=SM), size=1, position=position_dodge(width = 1), outlier.shape=NA)+ geom_jitter(width=.1,height=0, shape=16,size=2)+ scale_alpha_manual(values=c(0.5,1))+ scale_fill_manual(values = c("#1a9850" , "#2166ac", "#b2182b"))+ theme_classic()+ theme(legend.position="none")+ theme(text = element_text(size=20), axis.text.x = element_text(angle=90, vjust=0.6)) + theme(plot.title = element_text(hjust = 0.5)) + stat_compare_means(comparisons = my_comparisons, p.adjust="bonferroni") } plot_with_all_stats<-function(DF, yval){ DF<-dplyr::filter(DF, TB != "N") DF$disease<-paste(DF$TB, DF$SM, sep=" ") DF$disease<-factor(DF$disease, levels=c("HC X", "HC SM", "LTBI X", "LTBI SM", "TB X", "TB SM")) my_comparisons <- list(c("HC SM", "HC X"),c("LTBI SM", "LTBI X"), c("TB SM", "TB X"), c("TB SM", "LTBI SM"), c("TB X", "LTBI X"), c("HC SM", "LTBI SM"), c("HC X", "LTBI X")) g<-ggplot(DF, aes(x=disease, y=DF[,yval])) g+ geom_boxplot(aes(fill=TB, alpha=SM), size=1, position=position_dodge(width = 1), outlier.shape=NA)+ geom_jitter(width=.1,height=0, shape=16,size=2)+ scale_alpha_manual(values=c(0.5,1))+ scale_fill_manual(values = c("#1a9850" , "#2166ac", "#b2182b"))+ theme_classic()+ theme(legend.position="none")+ theme(text = element_text(size=16)) + theme(plot.subtitle = element_text(hjust = 0.5)) + stat_compare_means(comparisons = my_comparisons, p.adjust="bonferroni") } plot_3<-function(DF, yval){ DF<-dplyr::filter(DF, TB != "N") my_comparisons <- list(c("HC","LTBI"), c("TB", "LTBI"), c("HC", "TB")) g<-ggplot(DF, aes(x=TB, y=DF[,yval])) g+ geom_boxplot(aes(fill=TB), size=1, position=position_dodge(width = 1), outlier.shape=NA)+ geom_jitter(width=.1,height=0, shape=16,size=2)+ scale_alpha_manual(values=c(0.5,1))+ scale_fill_manual(values = c("#1a9850" , "#2166ac", "#b2182b"))+ theme_classic()+ theme(legend.position="none")+ theme(text = element_text(size=16)) + theme(plot.subtitle = element_text(hjust = 0.5)) + stat_compare_means(comparisons = my_comparisons, p.adjust="bonferroni") } plot_3<-function(DF, yval, xval){ my_comparisons <- list(c("N", "HC"), c("N", "LTBI"), c("N", "TB"), c("HC","LTBI"), c("TB", "LTBI"), c("HC", "TB")) g<-ggplot(DF, aes(x=DF[,xval], y=DF[,yval])) g+ geom_boxplot(aes(fill=DF[,xval]), size=1, position=position_dodge(width = 1), outlier.shape=NA)+ geom_jitter(width=.1,height=0, shape=16,size=2)+ scale_alpha_manual(values=c(0.5,1))+ scale_fill_manual(values =c("N" = "#da70d6","HC" = "#00AFBB", "LTBI" = "#E7B800","TB"="#FC4E07", "X" = "#00AFBB", "SM" = "#E7B800"))+ theme_classic()+ theme(legend.position="none")+ theme(text = element_text(size=16)) + theme(plot.subtitle = element_text(hjust = 0.5)) + stat_compare_means(comparisons = my_comparisons) } plot_cell<-function(DF, yval){ my_comparisons <- list(c("CD4","CD8"), c("CD8", "GD"), c("CD4", "GD")) g<-ggplot(DF, aes(x=celltype, y=DF[,yval])) g+ geom_boxplot(aes(fill=TB), size=1, position=position_dodge(width = 1), outlier.shape=NA)+ geom_jitter(width=.1,height=0, shape=16,size=2)+ scale_fill_manual(values =c("N" = "#da70d6","HC" = "#00AFBB", "LTBI" = "#E7B800","TB"="#FC4E07"))+ theme_classic()+ theme(legend.position="none")+ theme(text = element_text(size=16)) + theme(plot.subtitle = element_text(hjust = 0.5)) + stat_compare_means(comparisons = my_comparisons, label="p.signif") } triple_boolean_plot<-function(datatable){ library(dplyr) DF<-datatable Boolean<-dplyr::select(DF, Donor, TB, SM, Stim, G_4_13_T, G_4_x_T, G_x_13_T, G_4_13_x, x_4_13_T, G_4_x_x, G_x_13_x, x_4_x_T, x_x_13_T, G_x_x_T , G_x_x_x, x_x_x_T, x_4_13_x, x_4_x_x, x_x_13_x)%>% dplyr::filter(TB!="N") melt<-melt(Boolean,id.vars=c("Donor","TB","SM", "Stim")) data1<-filter(melt, variable %in% c("G_4_13_T", "G_4_x_T", "G_x_13_T", "G_4_13_x", "x_4_13_T","G_4_x_x", "G_x_13_x","x_4_x_T","x_x_13_T")) data2<-filter(melt, variable %in% c("G_x_x_T" , "G_x_x_x","x_x_x_T")) data3<-filter(melt, variable %in% c("x_4_13_x","x_4_x_x", "x_x_13_x")) g1<-ggplot(data1, aes(x=variable, y=value, fill=TB, alpha=SM))+ geom_boxplot(size=1, position=position_dodge(width = 1)) + scale_fill_manual(values =c("#1a9850" , "#2166ac", "#b2182b"))+ scale_alpha_manual(values=c(0.5,1))+ theme_classic()+ theme(legend.position="none")+ theme(strip.text.y = element_blank())+ theme(text = element_text(size=20), axis.text.x = element_blank())+ labs(y="Cytokine+ CD4+ Cells (%)", x="", title="TH1/2")+ theme(plot.title = element_text(hjust = 0.5)) + facet_grid(TB~., scale="fixed")+ stat_compare_means(label = "p.signif", p.adjust.method = "fdr", hide.ns = TRUE, size = 8, method="wilcox.test", paired = FALSE) g2<-ggplot(data2, aes(x=variable, y=value, fill=TB, alpha=SM))+ geom_boxplot(size=1, position=position_dodge(width = 1)) + scale_fill_manual(values =c("#1a9850" , "#2166ac", "#b2182b"))+ scale_alpha_manual(values=c(0.5,1))+ theme_classic()+ theme(legend.position="none")+ theme(strip.text.y = element_blank())+ theme(text = element_text(size=20), axis.text.x = element_blank()) + labs(y="", x="", title="TH1")+ theme(plot.title = element_text(hjust = 0.5)) + facet_grid(TB~., scale="fixed")+ stat_compare_means(label = "p.signif", p.adjust.method = "fdr", hide.ns = TRUE, size = 8, method="wilcox.test", paired = FALSE) g3<-ggplot(data3, aes(x=variable, y=value, fill=TB, alpha=SM))+ geom_boxplot(size=1, position=position_dodge(width = 1)) + scale_fill_manual(values =c("#1a9850" , "#2166ac", "#b2182b"))+ scale_alpha_manual(values=c(0.5,1))+ theme_classic()+ theme(legend.position="none")+ theme(text = element_text(size=16), axis.text.x = element_blank()) + labs(y="", x="", title="TH2")+ theme(plot.title = element_text(hjust = 0.5)) + facet_grid(TB~., scale="fixed")+ stat_compare_means(label = "p.signif", p.adjust.method = "fdr", hide.ns = TRUE, size = 8, method="wilcox.test", paired = FALSE) plot<-grid.arrange(g1, g2, g3, ncol=3, widths=c(8,4,4), top=text_grob("Cytokine Production", size=24)) } plot_6<-function(data, xval, yval){ ggplot(data, aes(x=data[,xval], y=data[,yval]))+ geom_boxplot(size=1, position=position_dodge(width = 1), outlier.shape = NA, aes(fill=TB, alpha=SM)) + geom_jitter(width=.1,height=0, shape=16,size=2)+ scale_fill_manual(values =c("#1a9850" , "#2166ac", "#b2182b"))+ scale_alpha_manual(values=c(0.5,1))+ scale_x_discrete(labels=c("SM-", "SM+"))+ theme_classic()+ theme(legend.position="none")+ theme(axis.text.x = element_text(angle=90, vjust=0.6)) + theme(text = element_text(size=12), axis.text.x = element_text(angle=90, vjust=0.6)) + facet_grid(~TB, scale="free")+ stat_compare_means(label = "p.format", p.adjust.method = "fdr", hide.ns = TRUE, size = 4, method="wilcox.test", paired = FALSE, label.y= (max(data[,yval])*1.1)) } plot_2<-function(data, xval, yval){ my_comparisons <- list(c("LTBI SM", "LTBI X"), c("TB SM", "TB X"), c("TB SM", "LTBI SM"), c("TB X", "LTBI X")) ggplot(data, aes(x=data[,xval], y=data[,yval]))+ geom_boxplot(size=1, position=position_dodge(width = 1), outlier.shape = NA, aes(fill=TB, alpha=SM)) + geom_jitter(width=.1,height=0, shape=16,size=2)+ scale_fill_manual(values =c("#2166ac", "#b2182b"))+ scale_alpha_manual(values=c(0.5,1))+ scale_x_discrete(labels=c("QFT+SM-", "QFT+SM+", "TB SM-","TB SM+"))+ theme_classic()+ theme(legend.position="none")+ theme(plot.title = element_text(hjust = 0.5))+ theme(text = element_text(size=14)) + stat_compare_means(comparisons=my_comparisons, aes(label=..p.adj..), label = "p.format", p.adjust.method = "fdr", hide.ns = TRUE, size = 5, method="wilcox.test", paired = FALSE, na.rm=TRUE)+ labs(title="", x="", y=yval)} plot_compare<-function(data, xval, yval){ ##Compares Total responses to Mtb-specific responses not paired ##requires melted data where xval == ggplot(data, aes(x=data[,xval], y=data[,yval]))+ geom_boxplot(size=1, position=position_dodge(width = 1), outlier.shape = NA, aes(fill=TB, alpha=SM)) + geom_jitter(width=.1,height=0, shape=16,size=2)+ scale_fill_manual(values =c("#2166ac", "#b2182b"))+ scale_alpha_manual(values=c(0.5,1))+ theme_classic()+ theme(legend.position="none")+ theme(plot.title = element_text(hjust = 0.5))+ theme(text = element_text(size=20), axis.text.x = element_text(angle=30, vjust=0.6)) + theme(strip.text.x = element_blank())+ facet_grid(~TB+SM, scale="free")+ stat_compare_means(label = "p.format", p.adjust.method = "fdr", hide.ns = TRUE, size = 6, method="wilcox.test", paired = FALSE, label.y = 1.1*(max(data[,yval], na.rm = TRUE)))+ labs(y=paste(yval, "+ CD4+ (%)"), x="") } plot_pbmc<-function(data, yval){ data[,yval]<-as.numeric(data[,yval]) ggplot(data, aes(x=SM, y=data[,yval]))+ geom_boxplot(size=1, position=position_dodge(width = 1), outlier.shape = NA, aes(fill=TB, alpha=SM)) + geom_jitter(width=.1,height=0, shape=16,size=2)+ scale_alpha_manual(values=c(0.5,0.5,1))+ scale_fill_manual(values = c("#e0e0e0", "#1a9850" , "#2166ac", "#b2182b"))+ theme_classic()+ theme(legend.position="none")+ theme(text = element_text(size=14)) + facet_grid(~TB, scales="free")+ labs(x="", y="") } plot_pbmc.pairs<-function(data){ ggplot(data, aes(x=variable, y=value, color=TB, alpha=SM))+ geom_point(shape=16,size=2)+ geom_line(aes(group=Donor))+ scale_alpha_manual(values=c(0.5,0.5,1))+ scale_color_manual(values = c("#e0e0e0", "#1a9850" , "#2166ac", "#b2182b"))+ theme_classic()+ theme(legend.position="none")+ scale_x_discrete(labels=c("Pre", "Post"))+ theme(text = element_text(size=20)) + facet_grid(SM~TB, scales="free")+ stat_compare_means(label = "p.format", p.adjust.method = "fdr", hide.ns = TRUE, size = 6, method="wilcox.test", paired = TRUE, label.y = 1.1*(max(data$value, na.rm = TRUE)))+ labs(x="", y="") } plot_stim<-function(data, yval){ ggplot(data, aes(y=data[,yval], x=Stim, col=TB))+ geom_point(size=3, alpha=0.5)+geom_line(aes(group=Donor))+ scale_color_manual(values =c("#2166ac", "#b2182b"))+ theme_bw()+ theme(legend.position="none")+ theme(text = element_text(size=20)) + facet_grid(SM~TB)+ stat_compare_means(comparisons = list(c("PMA", "PEP"), c("PMA","WCL")) , na.rm=TRUE, size=4, label.y.npc = .9)+ labs(y= yval) } baseplot<-function(data, xval, yval, colors){ ggplot(data, aes(data[,xval], data[,yval], fill=SM))+ geom_boxplot(size=1, position=position_dodge(width = 1), outlier.shape = NA)+ scale_fill_manual(values = colors)+ theme_classic()+ theme(legend.position="none")+ theme(panel.border = element_rect(fill = NA, colour = "black"))+ theme(text = element_text(size=20), axis.text.x = element_text(size = 16), strip.text = element_text(size = 20))+ theme(plot.title = element_text(hjust = 0.5, size=20)) + labs(x="", y="") } booleanplot<-function(data, xval, yval, colors){ ggplot(data, aes(data[,xval], data[,yval], fill=SM))+ geom_boxplot(size=1, position=position_dodge(width = 1), outlier.shape = NA)+ scale_fill_manual(values = colors)+ theme_classic()+ theme(legend.position="none")+ theme(text = element_text(size=20), axis.text.x = element_text(size=20), strip.text = element_text(size = 20))+ theme(plot.title = element_text(hjust = 0.5, size=20)) + stat_compare_means(method="wilcox.test", label="p.signif", hide.ns = TRUE, size=10)+ labs(x="", y="") } booleanplot2<-function(data, xval, yval, colors){ ggplot(data, aes(data[,xval], data[,yval], fill=disease))+ geom_boxplot(size=1, position=position_dodge(width = 1), outlier.shape = NA)+ scale_fill_manual(values = colors)+ theme_classic()+ theme(legend.position="none")+ theme(text = element_text(size=20), axis.text.x = element_text(size=20), strip.text = element_text(size = 20))+ theme(plot.title = element_text(hjust = 0.5, size=20)) + labs(x="", y="") } allplot<-function(data, xval, yval, colors){ ggplot(data, aes(data[,xval], data[,yval], fill=disease))+ geom_boxplot(size=1, position=position_dodge2(width=0.2, padding = 0.2, preserve = "single"), outlier.shape = NA)+ scale_fill_manual(values = colors)+ theme_classic()+ theme(legend.position="none") + theme(panel.border = element_rect(fill = NA, colour = "black"))+ facet_wrap(~variable)+ theme(text = element_text(size=20), axis.text.x = element_text(size = 16), strip.text = element_text(size = 20))+ theme(plot.title = element_text(hjust = 0.5, size=20))+ labs(x="", y="") } cyt_groups <- as_labeller(c("IFNg" = "IFNγ", "TNFa" = "TNFα", "IL4" = "IL-4", "IL13" = "IL-13")) all_comps<-list(c("HC SM+", "HC SM-"),c("LTBI SM+", "LTBI SM-"), c("TB SM+", "TB SM-")) healthy.cols<- c("SM+" = "#1a9850", "SM-" ="#91cf60") ltbi.cols<- c("SM+" = "#2166ac", "SM-" = "#85ABD1") tb.cols<- c("SM+" = "#b2182b", "SM-"="#E59EA6") all.cols<- c("Naive" = "#C8D0D8","HC SM+" = "#1a9850", "HC SM-" ="#91cf60", "LTBI SM+" = "#2166ac", "LTBI SM-" = "#85ABD1", "TB SM+" = "#b2182b", "TB SM-"="#E59EA6")
/plot_functions.R
no_license
tarynam/random-code
R
false
false
18,018
r
library(dplyr) library(ggplot2) library(gridExtra) library(ggpubr) library(data.table) library(reshape2) library(knitr) library(cowplot) mutate_TB<-function(df){ df$disease<-paste(df$TB, df$SM, sep=" ") df$disease<-factor(df$disease, levels=c("HC X", "HC SM", "LTBI X", "LTBI SM", "TB X", "TB SM")) df$TB<-gsub("LTBI", "QFT+", df$TB) df<-filter(df, SM!="N", TB!="HC") df$SM<-factor(df$SM, levels=c("X","SM")) df$TB<-factor(df$TB, levels=c("QFT+","TB")) df } filter_tb<-function(df){ df$SM<-gsub("SM", "SM+", df$SM) df$SM<-gsub("X", "SM-", df$SM) df<-filter(df, SM!="N", TB=="TB") df$SM<-factor(df$SM, levels=c("SM-","SM+")) df$TB<-factor(df$TB, levels=c("TB")) df$disease<-paste(df$TB, df$SM, sep=" ") df$disease<-factor(df$disease, levels=c("TB SM-", "TB SM+")) names(df)<-gsub("Tbet", "T-bet", names(df)) df } filter_ltbi<-function(df){ df$SM<-gsub("SM", "SM+", df$SM) df$SM<-gsub("X", "SM-", df$SM) df<-filter(df, SM!="N", TB=="LTBI") df$SM<-factor(df$SM, levels=c("SM-","SM+")) df$disease<-paste(df$TB, df$SM, sep=" ") df$disease<-factor(df$disease, levels=c("LTBI SM-", "LTBI SM+")) names(df)<-gsub("Tbet", "T-bet", names(df)) df } filter_hc<-function(df){ df$SM<-gsub("SM", "SM+", df$SM) df$SM<-gsub("X", "SM-", df$SM) df<-filter(df, SM!="N", TB=="HC") df$SM<-factor(df$SM, levels=c("SM-","SM+")) df$disease<-paste(df$TB, df$SM, sep=" ") df$disease<-factor(df$disease, levels=c("HC SM-", "HC SM+")) names(df)<-gsub("Tbet", "T-bet", names(df)) df } plot_filter<-function(datatable){ library(dplyr) df<-filter(datatable, TB!="N") df$TB<-factor(df$TB, levels=c("HC","LTBI","TB")) df$SM<-gsub("SM", "SM+", df$SM) df$SM<-gsub("X", "SM-", df$SM) df$SM<-factor(df$SM, levels=c("SM-","SM+")) df$disease<-paste(df$TB, df$SM, sep=" ") df$disease<-factor(df$disease, levels=c("HC SM-", "HC SM+", "LTBI SM-", "LTBI SM+", "TB SM-", "TB SM+")) names(df)<-gsub("Tbet", "T-bet", names(df)) df } filter_all<-function(datatable){ library(dplyr) df<-datatable df$TB<-factor(df$TB, levels=c("N","HC","LTBI","TB")) df$SM<-gsub("SM", "SM+", df$SM) df$SM<-gsub("X", "SM-", df$SM) df$SM<-factor(df$SM, levels=c("N","SM-","SM+")) df$disease<-paste(df$TB, df$SM, sep=" ") df$disease<-gsub("N N", "Naive", df$disease) df$disease<-factor(df$disease, levels=c("Naive","HC SM-", "HC SM+", "LTBI SM-", "LTBI SM+", "TB SM-", "TB SM+")) names(df)<-gsub("Tbet", "T-bet", names(df)) df } plot_compass<-function(DF, yval){ library(dplyr) DF$disease<-paste(DF$TB, DF$SM, sep=" ") DF$disease<-factor(DF$disease, levels=c("HC X", "HC SM", "LTBI X", "LTBI SM", "TB X", "TB SM")) g<-ggplot(DF, aes(x=disease, y=DF[,yval])) g+ geom_boxplot(aes(fill=TB, alpha=SM), size=1, position=position_dodge(width = 1), outlier.shape=NA)+ geom_jitter(width=.1,height=0, shape=16,size=2)+ scale_alpha_manual(values=c(0.5,1))+ scale_fill_manual(values = c("#1a9850" , "#2166ac", "#b2182b"))+ theme_classic()+ theme(legend.position="none")+ theme(text = element_text(size=20), axis.text.x = element_text(angle=90, vjust=0.6)) + theme(plot.title = element_text(hjust = 0.5)) + stat_compare_means(comparisons = my_comparisons, p.adjust="bonferroni") } plot_with_all_stats<-function(DF, yval){ DF<-dplyr::filter(DF, TB != "N") DF$disease<-paste(DF$TB, DF$SM, sep=" ") DF$disease<-factor(DF$disease, levels=c("HC X", "HC SM", "LTBI X", "LTBI SM", "TB X", "TB SM")) my_comparisons <- list(c("HC SM", "HC X"),c("LTBI SM", "LTBI X"), c("TB SM", "TB X"), c("TB SM", "LTBI SM"), c("TB X", "LTBI X"), c("HC SM", "LTBI SM"), c("HC X", "LTBI X")) g<-ggplot(DF, aes(x=disease, y=DF[,yval])) g+ geom_boxplot(aes(fill=TB, alpha=SM), size=1, position=position_dodge(width = 1), outlier.shape=NA)+ geom_jitter(width=.1,height=0, shape=16,size=2)+ scale_alpha_manual(values=c(0.5,1))+ scale_fill_manual(values = c("#1a9850" , "#2166ac", "#b2182b"))+ theme_classic()+ theme(legend.position="none")+ theme(text = element_text(size=16)) + theme(plot.subtitle = element_text(hjust = 0.5)) + stat_compare_means(comparisons = my_comparisons, p.adjust="bonferroni") } plot_3<-function(DF, yval){ DF<-dplyr::filter(DF, TB != "N") my_comparisons <- list(c("HC","LTBI"), c("TB", "LTBI"), c("HC", "TB")) g<-ggplot(DF, aes(x=TB, y=DF[,yval])) g+ geom_boxplot(aes(fill=TB), size=1, position=position_dodge(width = 1), outlier.shape=NA)+ geom_jitter(width=.1,height=0, shape=16,size=2)+ scale_alpha_manual(values=c(0.5,1))+ scale_fill_manual(values = c("#1a9850" , "#2166ac", "#b2182b"))+ theme_classic()+ theme(legend.position="none")+ theme(text = element_text(size=16)) + theme(plot.subtitle = element_text(hjust = 0.5)) + stat_compare_means(comparisons = my_comparisons, p.adjust="bonferroni") } plot_3<-function(DF, yval, xval){ my_comparisons <- list(c("N", "HC"), c("N", "LTBI"), c("N", "TB"), c("HC","LTBI"), c("TB", "LTBI"), c("HC", "TB")) g<-ggplot(DF, aes(x=DF[,xval], y=DF[,yval])) g+ geom_boxplot(aes(fill=DF[,xval]), size=1, position=position_dodge(width = 1), outlier.shape=NA)+ geom_jitter(width=.1,height=0, shape=16,size=2)+ scale_alpha_manual(values=c(0.5,1))+ scale_fill_manual(values =c("N" = "#da70d6","HC" = "#00AFBB", "LTBI" = "#E7B800","TB"="#FC4E07", "X" = "#00AFBB", "SM" = "#E7B800"))+ theme_classic()+ theme(legend.position="none")+ theme(text = element_text(size=16)) + theme(plot.subtitle = element_text(hjust = 0.5)) + stat_compare_means(comparisons = my_comparisons) } plot_cell<-function(DF, yval){ my_comparisons <- list(c("CD4","CD8"), c("CD8", "GD"), c("CD4", "GD")) g<-ggplot(DF, aes(x=celltype, y=DF[,yval])) g+ geom_boxplot(aes(fill=TB), size=1, position=position_dodge(width = 1), outlier.shape=NA)+ geom_jitter(width=.1,height=0, shape=16,size=2)+ scale_fill_manual(values =c("N" = "#da70d6","HC" = "#00AFBB", "LTBI" = "#E7B800","TB"="#FC4E07"))+ theme_classic()+ theme(legend.position="none")+ theme(text = element_text(size=16)) + theme(plot.subtitle = element_text(hjust = 0.5)) + stat_compare_means(comparisons = my_comparisons, label="p.signif") } triple_boolean_plot<-function(datatable){ library(dplyr) DF<-datatable Boolean<-dplyr::select(DF, Donor, TB, SM, Stim, G_4_13_T, G_4_x_T, G_x_13_T, G_4_13_x, x_4_13_T, G_4_x_x, G_x_13_x, x_4_x_T, x_x_13_T, G_x_x_T , G_x_x_x, x_x_x_T, x_4_13_x, x_4_x_x, x_x_13_x)%>% dplyr::filter(TB!="N") melt<-melt(Boolean,id.vars=c("Donor","TB","SM", "Stim")) data1<-filter(melt, variable %in% c("G_4_13_T", "G_4_x_T", "G_x_13_T", "G_4_13_x", "x_4_13_T","G_4_x_x", "G_x_13_x","x_4_x_T","x_x_13_T")) data2<-filter(melt, variable %in% c("G_x_x_T" , "G_x_x_x","x_x_x_T")) data3<-filter(melt, variable %in% c("x_4_13_x","x_4_x_x", "x_x_13_x")) g1<-ggplot(data1, aes(x=variable, y=value, fill=TB, alpha=SM))+ geom_boxplot(size=1, position=position_dodge(width = 1)) + scale_fill_manual(values =c("#1a9850" , "#2166ac", "#b2182b"))+ scale_alpha_manual(values=c(0.5,1))+ theme_classic()+ theme(legend.position="none")+ theme(strip.text.y = element_blank())+ theme(text = element_text(size=20), axis.text.x = element_blank())+ labs(y="Cytokine+ CD4+ Cells (%)", x="", title="TH1/2")+ theme(plot.title = element_text(hjust = 0.5)) + facet_grid(TB~., scale="fixed")+ stat_compare_means(label = "p.signif", p.adjust.method = "fdr", hide.ns = TRUE, size = 8, method="wilcox.test", paired = FALSE) g2<-ggplot(data2, aes(x=variable, y=value, fill=TB, alpha=SM))+ geom_boxplot(size=1, position=position_dodge(width = 1)) + scale_fill_manual(values =c("#1a9850" , "#2166ac", "#b2182b"))+ scale_alpha_manual(values=c(0.5,1))+ theme_classic()+ theme(legend.position="none")+ theme(strip.text.y = element_blank())+ theme(text = element_text(size=20), axis.text.x = element_blank()) + labs(y="", x="", title="TH1")+ theme(plot.title = element_text(hjust = 0.5)) + facet_grid(TB~., scale="fixed")+ stat_compare_means(label = "p.signif", p.adjust.method = "fdr", hide.ns = TRUE, size = 8, method="wilcox.test", paired = FALSE) g3<-ggplot(data3, aes(x=variable, y=value, fill=TB, alpha=SM))+ geom_boxplot(size=1, position=position_dodge(width = 1)) + scale_fill_manual(values =c("#1a9850" , "#2166ac", "#b2182b"))+ scale_alpha_manual(values=c(0.5,1))+ theme_classic()+ theme(legend.position="none")+ theme(text = element_text(size=16), axis.text.x = element_blank()) + labs(y="", x="", title="TH2")+ theme(plot.title = element_text(hjust = 0.5)) + facet_grid(TB~., scale="fixed")+ stat_compare_means(label = "p.signif", p.adjust.method = "fdr", hide.ns = TRUE, size = 8, method="wilcox.test", paired = FALSE) plot<-grid.arrange(g1, g2, g3, ncol=3, widths=c(8,4,4), top=text_grob("Cytokine Production", size=24)) } plot_6<-function(data, xval, yval){ ggplot(data, aes(x=data[,xval], y=data[,yval]))+ geom_boxplot(size=1, position=position_dodge(width = 1), outlier.shape = NA, aes(fill=TB, alpha=SM)) + geom_jitter(width=.1,height=0, shape=16,size=2)+ scale_fill_manual(values =c("#1a9850" , "#2166ac", "#b2182b"))+ scale_alpha_manual(values=c(0.5,1))+ scale_x_discrete(labels=c("SM-", "SM+"))+ theme_classic()+ theme(legend.position="none")+ theme(axis.text.x = element_text(angle=90, vjust=0.6)) + theme(text = element_text(size=12), axis.text.x = element_text(angle=90, vjust=0.6)) + facet_grid(~TB, scale="free")+ stat_compare_means(label = "p.format", p.adjust.method = "fdr", hide.ns = TRUE, size = 4, method="wilcox.test", paired = FALSE, label.y= (max(data[,yval])*1.1)) } plot_2<-function(data, xval, yval){ my_comparisons <- list(c("LTBI SM", "LTBI X"), c("TB SM", "TB X"), c("TB SM", "LTBI SM"), c("TB X", "LTBI X")) ggplot(data, aes(x=data[,xval], y=data[,yval]))+ geom_boxplot(size=1, position=position_dodge(width = 1), outlier.shape = NA, aes(fill=TB, alpha=SM)) + geom_jitter(width=.1,height=0, shape=16,size=2)+ scale_fill_manual(values =c("#2166ac", "#b2182b"))+ scale_alpha_manual(values=c(0.5,1))+ scale_x_discrete(labels=c("QFT+SM-", "QFT+SM+", "TB SM-","TB SM+"))+ theme_classic()+ theme(legend.position="none")+ theme(plot.title = element_text(hjust = 0.5))+ theme(text = element_text(size=14)) + stat_compare_means(comparisons=my_comparisons, aes(label=..p.adj..), label = "p.format", p.adjust.method = "fdr", hide.ns = TRUE, size = 5, method="wilcox.test", paired = FALSE, na.rm=TRUE)+ labs(title="", x="", y=yval)} plot_compare<-function(data, xval, yval){ ##Compares Total responses to Mtb-specific responses not paired ##requires melted data where xval == ggplot(data, aes(x=data[,xval], y=data[,yval]))+ geom_boxplot(size=1, position=position_dodge(width = 1), outlier.shape = NA, aes(fill=TB, alpha=SM)) + geom_jitter(width=.1,height=0, shape=16,size=2)+ scale_fill_manual(values =c("#2166ac", "#b2182b"))+ scale_alpha_manual(values=c(0.5,1))+ theme_classic()+ theme(legend.position="none")+ theme(plot.title = element_text(hjust = 0.5))+ theme(text = element_text(size=20), axis.text.x = element_text(angle=30, vjust=0.6)) + theme(strip.text.x = element_blank())+ facet_grid(~TB+SM, scale="free")+ stat_compare_means(label = "p.format", p.adjust.method = "fdr", hide.ns = TRUE, size = 6, method="wilcox.test", paired = FALSE, label.y = 1.1*(max(data[,yval], na.rm = TRUE)))+ labs(y=paste(yval, "+ CD4+ (%)"), x="") } plot_pbmc<-function(data, yval){ data[,yval]<-as.numeric(data[,yval]) ggplot(data, aes(x=SM, y=data[,yval]))+ geom_boxplot(size=1, position=position_dodge(width = 1), outlier.shape = NA, aes(fill=TB, alpha=SM)) + geom_jitter(width=.1,height=0, shape=16,size=2)+ scale_alpha_manual(values=c(0.5,0.5,1))+ scale_fill_manual(values = c("#e0e0e0", "#1a9850" , "#2166ac", "#b2182b"))+ theme_classic()+ theme(legend.position="none")+ theme(text = element_text(size=14)) + facet_grid(~TB, scales="free")+ labs(x="", y="") } plot_pbmc.pairs<-function(data){ ggplot(data, aes(x=variable, y=value, color=TB, alpha=SM))+ geom_point(shape=16,size=2)+ geom_line(aes(group=Donor))+ scale_alpha_manual(values=c(0.5,0.5,1))+ scale_color_manual(values = c("#e0e0e0", "#1a9850" , "#2166ac", "#b2182b"))+ theme_classic()+ theme(legend.position="none")+ scale_x_discrete(labels=c("Pre", "Post"))+ theme(text = element_text(size=20)) + facet_grid(SM~TB, scales="free")+ stat_compare_means(label = "p.format", p.adjust.method = "fdr", hide.ns = TRUE, size = 6, method="wilcox.test", paired = TRUE, label.y = 1.1*(max(data$value, na.rm = TRUE)))+ labs(x="", y="") } plot_stim<-function(data, yval){ ggplot(data, aes(y=data[,yval], x=Stim, col=TB))+ geom_point(size=3, alpha=0.5)+geom_line(aes(group=Donor))+ scale_color_manual(values =c("#2166ac", "#b2182b"))+ theme_bw()+ theme(legend.position="none")+ theme(text = element_text(size=20)) + facet_grid(SM~TB)+ stat_compare_means(comparisons = list(c("PMA", "PEP"), c("PMA","WCL")) , na.rm=TRUE, size=4, label.y.npc = .9)+ labs(y= yval) } baseplot<-function(data, xval, yval, colors){ ggplot(data, aes(data[,xval], data[,yval], fill=SM))+ geom_boxplot(size=1, position=position_dodge(width = 1), outlier.shape = NA)+ scale_fill_manual(values = colors)+ theme_classic()+ theme(legend.position="none")+ theme(panel.border = element_rect(fill = NA, colour = "black"))+ theme(text = element_text(size=20), axis.text.x = element_text(size = 16), strip.text = element_text(size = 20))+ theme(plot.title = element_text(hjust = 0.5, size=20)) + labs(x="", y="") } booleanplot<-function(data, xval, yval, colors){ ggplot(data, aes(data[,xval], data[,yval], fill=SM))+ geom_boxplot(size=1, position=position_dodge(width = 1), outlier.shape = NA)+ scale_fill_manual(values = colors)+ theme_classic()+ theme(legend.position="none")+ theme(text = element_text(size=20), axis.text.x = element_text(size=20), strip.text = element_text(size = 20))+ theme(plot.title = element_text(hjust = 0.5, size=20)) + stat_compare_means(method="wilcox.test", label="p.signif", hide.ns = TRUE, size=10)+ labs(x="", y="") } booleanplot2<-function(data, xval, yval, colors){ ggplot(data, aes(data[,xval], data[,yval], fill=disease))+ geom_boxplot(size=1, position=position_dodge(width = 1), outlier.shape = NA)+ scale_fill_manual(values = colors)+ theme_classic()+ theme(legend.position="none")+ theme(text = element_text(size=20), axis.text.x = element_text(size=20), strip.text = element_text(size = 20))+ theme(plot.title = element_text(hjust = 0.5, size=20)) + labs(x="", y="") } allplot<-function(data, xval, yval, colors){ ggplot(data, aes(data[,xval], data[,yval], fill=disease))+ geom_boxplot(size=1, position=position_dodge2(width=0.2, padding = 0.2, preserve = "single"), outlier.shape = NA)+ scale_fill_manual(values = colors)+ theme_classic()+ theme(legend.position="none") + theme(panel.border = element_rect(fill = NA, colour = "black"))+ facet_wrap(~variable)+ theme(text = element_text(size=20), axis.text.x = element_text(size = 16), strip.text = element_text(size = 20))+ theme(plot.title = element_text(hjust = 0.5, size=20))+ labs(x="", y="") } cyt_groups <- as_labeller(c("IFNg" = "IFNγ", "TNFa" = "TNFα", "IL4" = "IL-4", "IL13" = "IL-13")) all_comps<-list(c("HC SM+", "HC SM-"),c("LTBI SM+", "LTBI SM-"), c("TB SM+", "TB SM-")) healthy.cols<- c("SM+" = "#1a9850", "SM-" ="#91cf60") ltbi.cols<- c("SM+" = "#2166ac", "SM-" = "#85ABD1") tb.cols<- c("SM+" = "#b2182b", "SM-"="#E59EA6") all.cols<- c("Naive" = "#C8D0D8","HC SM+" = "#1a9850", "HC SM-" ="#91cf60", "LTBI SM+" = "#2166ac", "LTBI SM-" = "#85ABD1", "TB SM+" = "#b2182b", "TB SM-"="#E59EA6")
# - source all the tabs - tabpath <- "./Tabs_ui" source(file.path(tabpath, "html_styles_ui.R")) source(file.path(tabpath, "Tab01_ui.R")) # -- ui <- fluidPage( html_styles, Tab01 )
/learn/Learn_ui.R
no_license
TBrach/shiny_apps
R
false
false
198
r
# - source all the tabs - tabpath <- "./Tabs_ui" source(file.path(tabpath, "html_styles_ui.R")) source(file.path(tabpath, "Tab01_ui.R")) # -- ui <- fluidPage( html_styles, Tab01 )
#Pacchetti library(bsts) library(chron) library(tidyverse) library(data.table) library(dplyr) library(forecast) library(tseries) library(rjags) library(coda) library(plotrix) #Dataset load("../../Dataset_pronti/weather_temp.RData") load("../../Dataset_pronti/train.RData") key=read.table('../../Data/key.csv',sep=',',header=TRUE) #Some useful function for selecting the time series unit_series <- function(data,store,item,last_date='14/12/19',all.dates=FALSE) { data=data[which(data$store_nbr==store & data$item_nbr==item & data$date<chron(date=last_date,format = c(date='d/m/y'))),c(1,4)] if(all.dates) { #To be decided in the future #I would like to add NAs for test dates # and 0 for christmas } return(data) } weather_series <- function(data,station,features,last_date='14/12/19',all.dates=FALSE) { data=data[which(data$station_nbr==station & data$date<chron(date=last_date,format = c(date='d/m/y'))),c('date',features)] if(all.dates) { #To be decided in the future } return(data) } #Preliminary steps #We try first with item 19 in store 32 last_date='01/01/13' item=44 store=30#4 niter=2000 station=key$station_nbr[which(key$store_nbr==store)] items_dataset=unit_series(train,item=item,last_date = last_date,store=store) regressors_dataset=weather_series(data=weather,station=station,features =c('preciptotal','snowfall','resultspeed','rain','snow'),last_date=last_date) working_dataset=merge(y=items_dataset,x=regressors_dataset,by='date',all.x=TRUE) plot(working_dataset$date,working_dataset$units,col = 'green4', type='l') hist(working_dataset$units) boxplot(working_dataset$units) #Fixing NAs in the regressors #working_dataset$tavg[which(is.na(working_dataset$tavg))]=(working_dataset$tmax[which(is.na(working_dataset$tavg))]+working_dataset$tmin[which(is.na(working_dataset$tavg))])/2 working_dataset$units[which(working_dataset$date=='25/12/12')]=0 working_dataset$units[which(working_dataset$date=='25/12/13')]#=0 sum(is.na(working_dataset$units)) #working_dataset$tavg=c(0,diff(working_dataset$tavg)) #Taking the difference in avg(temp) summary(working_dataset) working_dataset$preciptotal[which(is.na(working_dataset$preciptotal))]=0 #because rain==0 working_dataset$snowfall[which(is.na(working_dataset$snowfall))]=0 #beacuse snow==0 summary(working_dataset) working_dataset$snow_tomorrow=shift(working_dataset$snow,n=-1) working_dataset$snowfall_tomorrow=shift(working_dataset$snowfall,n=-1) working_dataset$snow_tomorrow[which(is.na(working_dataset$snow_tomorrow))]=0 working_dataset$snowfall_tomorrow[which(is.na(working_dataset$snowfall_tomorrow))]=0 #working_dataset$units=as.numeric(scale(working_dataset$units)) mean=mean(working_dataset$units) working_dataset$units=working_dataset$units-mean(working_dataset$units)#detrend #Holidays july4 <- FixedDateHoliday("July4", "July", 4) christmas <- FixedDateHoliday("Christmas", "Dec", 25) #acf e pacf test plot.ts(working_dataset$units) adf.test(working_dataset$units, alternative="stationary", k=0) # stazionario # let's try only with AR model acf(working_dataset$units) pacf(working_dataset$units) #AR(6) # JAGS data = list(y=working_dataset$units[7:dim(working_dataset)[1]], x = working_dataset$units[1:(dim(working_dataset)[1])], n = dim(working_dataset)[1], m = 6) modelRegress=jags.model("AR_model_long.bug",data=data,n.adapt=1000,n.chains=1) update(modelRegress,n.iter=19000) variable.names=c("a","b", "tau")#c("b","tau")#c("a","b", "tau") n.iter=50000 thin=10 outputRegress=coda.samples(model=modelRegress,variable.names=variable.names,n.iter=n.iter,thin=thin) data.out=as.matrix(outputRegress) data.out=data.frame(data.out) #summary(data.out) head(data.out) par(mfrow=c(1,1)) acf(data.out[,'a'],lwd=3,col="red3",main="autocorrelation of a0") par(mfrow=c(1,2)) acf(data.out[,'b.1.'],lwd=3,col="red3",main="autocorrelation of b1") acf(data.out[,'b.2.'],lwd=3,col="red3",main="autocorrelation of b2") acf(data.out[,'b.3.'],lwd=3,col="red3",main="autocorrelation of b3") acf(data.out[,'b.4.'],lwd=3,col="red3",main="autocorrelation of b4") par(mfrow=c(1,1)) acf(data.out[,'tau'],lwd=3,col="red3",main="autocorrelation of tau") # # alfa.post <- data.out[,1] # beta.post <- data.out[,2:5] # sigma.post <- data.out[,6] alfa.post <- data.out[,1] beta.post <- data.out[,2:7] sigma.post <- data.out[,8] #posterior mean of the alfa and beta parameters alfa.bayes <- mean(alfa.post) alfa.bayes beta.bayes <- colMeans(beta.post) beta.bayes sigma.bayes <- mean(sigma.post) sigma.bayes ### ALFA ## Representation of the posterior chain of a0 chain <- alfa.post layout(matrix(c(1,2,3,3),2,2,byrow=T)) plot(chain,type="l",main="Trace plot of a0") acf(chain,lwd=3,col="red3",main="autocorrelation of a0") hist(chain,nclass="fd",freq=F,main="Posterior of a0",col="gray") lines(density(chain),col="blue",lwd=2) abline(v=quantile(chain,prob=c(0.025)),col="red",lty=2,lwd=2) abline(v=quantile(chain,prob=c(0.5)),col="red",lty=1,lwd=2) abline(v=quantile(chain,prob=c(0.975)),col="red",lty=2,lwd=2) ### BETA ## Representation of the posterior chain of b1 chain <- beta.post layout(matrix(c(1,2,3,3),2,2,byrow=T)) plot(chain,type="l",main="Trace plot of b1") acf(chain,lwd=3,col="red3",main="autocorrelation of b1") hist(chain,nclass="fd",freq=F,main="Posterior of b1",col="gray") lines(density(chain),col="blue",lwd=2) abline(v=quantile(chain,prob=c(0.025)),col="red",lty=2,lwd=2) abline(v=quantile(chain,prob=c(0.5)),col="red",lty=1,lwd=2) abline(v=quantile(chain,prob=c(0.975)),col="red",lty=2,lwd=2) ##Representation of the posterior chain of b2 chain <- beta.post[,2] layout(matrix(c(1,2,3,3),2,2,byrow=T)) plot(chain,type="l",main="Trace plot of b2") acf(chain,lwd=3,col="red3",main="autocorrelation of b2") hist(chain,nclass="fd",freq=F,main="Posterior of b2",col="gray") lines(density(chain),col="blue",lwd=2) abline(v=quantile(chain,prob=c(0.025)),col="red",lty=2,lwd=2) abline(v=quantile(chain,prob=c(0.5)),col="red",lty=1,lwd=2) abline(v=quantile(chain,prob=c(0.975)),col="red",lty=2,lwd=2) ##Representation of the posterior chain of b3 chain <- beta.post[,3] layout(matrix(c(1,2,3,3),2,2,byrow=T)) plot(chain,type="l",main="Trace plot of b3") acf(chain,lwd=3,col="red3",main="autocorrelation of b3") hist(chain,nclass="fd",freq=F,main="Posterior of b3",col="gray") lines(density(chain),col="blue",lwd=2) abline(v=quantile(chain,prob=c(0.025)),col="red",lty=2,lwd=2) abline(v=quantile(chain,prob=c(0.5)),col="red",lty=1,lwd=2) abline(v=quantile(chain,prob=c(0.975)),col="red",lty=2,lwd=2) ##Representation of the posterior chain of b4 chain <- beta.post[,4] layout(matrix(c(1,2,3,3),2,2,byrow=T)) plot(chain,type="l",main="Trace plot of b4") acf(chain,lwd=3,col="red3",main="autocorrelation of b4") hist(chain,nclass="fd",freq=F,main="Posterior of b4",col="gray") lines(density(chain),col="blue",lwd=2) abline(v=quantile(chain,prob=c(0.025)),col="red",lty=2,lwd=2) abline(v=quantile(chain,prob=c(0.5)),col="red",lty=1,lwd=2) abline(v=quantile(chain,prob=c(0.975)),col="red",lty=2,lwd=2) ##Representation of the posterior chain of sigma chain <- sigma.post layout(matrix(c(1,2,3,3),2,2,byrow=T)) plot(chain,type="l",main="Trace plot of sigma") acf(chain,lwd=3,col="red3",main="autocorrelation of sigma") hist(chain,nclass="fd",freq=F,main="Posterior of sigma",col="gray") lines(density(chain),col="blue",lwd=2) abline(v=quantile(chain,prob=c(0.025)),col="red",lty=2,lwd=2) abline(v=quantile(chain,prob=c(0.5)),col="red",lty=1,lwd=2) abline(v=quantile(chain,prob=c(0.975)),col="red",lty=2,lwd=2) par(mfrow=c(1,1)) ### We first compare the observed data distribution with the posterior ### predictive distribution of Y ypred <- rep(0, dim(working_dataset)[1]) ypred[1:6] <- working_dataset$units[1:6] for ( i in 7:dim(working_dataset)[1] ){ #mu <- alfa.bayes + beta.bayes[1] * working_dataset$units[i-1] mu <- alfa.bayes + beta.bayes[1] * working_dataset$units[i-1] + beta.bayes[2] * working_dataset$units[i-2] + beta.bayes[3] * working_dataset$units[i-3] + beta.bayes[4] * working_dataset$units[i-4] + beta.bayes[5] * working_dataset$units[i-5] + beta.bayes[6] * working_dataset$units[i-6] ypred[i] <- rnorm(1, mu, 10) } par(mfrow=c(1,1)) plot(working_dataset$units, col="darkgreen", type="l", lty=1, lwd=1) lines(ypred, col="red", type="l", lty=1, lwd=1) error=working_dataset$units-ypred hist(error,nclass="fd",freq=F,main="ERROR",col="gray", xlab = "mean(y)") lines(density(error),col="blue",lwd=2) abline(v=quantile(error,prob=c(0.025)),col="red",lty=2,lwd=2) abline(v=quantile(error,prob=c(0.5)),col="red",lty=1,lwd=2) abline(v=quantile(error,prob=c(0.975)),col="red",lty=2,lwd=2) ypred[2:365] <- ypred[3:366] ypred[366] <- working_dataset$units[366] par(mfrow=c(1,1)) plot(working_dataset$units, col="darkgreen", type="l", lty=1, lwd=1) lines(ypred, col="red", type="l", lty=1, lwd=1) # # # error=working_dataset$units-ypred # hist(error,nclass="fd",freq=F,main="ERROR",col="gray", xlab = "mean(y)") # lines(density(error),col="blue",lwd=2) # abline(v=quantile(error,prob=c(0.025)),col="red",lty=2,lwd=2) # abline(v=quantile(error,prob=c(0.5)),col="red",lty=1,lwd=2) # abline(v=quantile(error,prob=c(0.975)),col="red",lty=2,lwd=2)
/03. Univariate models/AR with JAGS/AR_model_long.R
no_license
fmacca/bayesian-sales-in-stormy-weather
R
false
false
9,287
r
#Pacchetti library(bsts) library(chron) library(tidyverse) library(data.table) library(dplyr) library(forecast) library(tseries) library(rjags) library(coda) library(plotrix) #Dataset load("../../Dataset_pronti/weather_temp.RData") load("../../Dataset_pronti/train.RData") key=read.table('../../Data/key.csv',sep=',',header=TRUE) #Some useful function for selecting the time series unit_series <- function(data,store,item,last_date='14/12/19',all.dates=FALSE) { data=data[which(data$store_nbr==store & data$item_nbr==item & data$date<chron(date=last_date,format = c(date='d/m/y'))),c(1,4)] if(all.dates) { #To be decided in the future #I would like to add NAs for test dates # and 0 for christmas } return(data) } weather_series <- function(data,station,features,last_date='14/12/19',all.dates=FALSE) { data=data[which(data$station_nbr==station & data$date<chron(date=last_date,format = c(date='d/m/y'))),c('date',features)] if(all.dates) { #To be decided in the future } return(data) } #Preliminary steps #We try first with item 19 in store 32 last_date='01/01/13' item=44 store=30#4 niter=2000 station=key$station_nbr[which(key$store_nbr==store)] items_dataset=unit_series(train,item=item,last_date = last_date,store=store) regressors_dataset=weather_series(data=weather,station=station,features =c('preciptotal','snowfall','resultspeed','rain','snow'),last_date=last_date) working_dataset=merge(y=items_dataset,x=regressors_dataset,by='date',all.x=TRUE) plot(working_dataset$date,working_dataset$units,col = 'green4', type='l') hist(working_dataset$units) boxplot(working_dataset$units) #Fixing NAs in the regressors #working_dataset$tavg[which(is.na(working_dataset$tavg))]=(working_dataset$tmax[which(is.na(working_dataset$tavg))]+working_dataset$tmin[which(is.na(working_dataset$tavg))])/2 working_dataset$units[which(working_dataset$date=='25/12/12')]=0 working_dataset$units[which(working_dataset$date=='25/12/13')]#=0 sum(is.na(working_dataset$units)) #working_dataset$tavg=c(0,diff(working_dataset$tavg)) #Taking the difference in avg(temp) summary(working_dataset) working_dataset$preciptotal[which(is.na(working_dataset$preciptotal))]=0 #because rain==0 working_dataset$snowfall[which(is.na(working_dataset$snowfall))]=0 #beacuse snow==0 summary(working_dataset) working_dataset$snow_tomorrow=shift(working_dataset$snow,n=-1) working_dataset$snowfall_tomorrow=shift(working_dataset$snowfall,n=-1) working_dataset$snow_tomorrow[which(is.na(working_dataset$snow_tomorrow))]=0 working_dataset$snowfall_tomorrow[which(is.na(working_dataset$snowfall_tomorrow))]=0 #working_dataset$units=as.numeric(scale(working_dataset$units)) mean=mean(working_dataset$units) working_dataset$units=working_dataset$units-mean(working_dataset$units)#detrend #Holidays july4 <- FixedDateHoliday("July4", "July", 4) christmas <- FixedDateHoliday("Christmas", "Dec", 25) #acf e pacf test plot.ts(working_dataset$units) adf.test(working_dataset$units, alternative="stationary", k=0) # stazionario # let's try only with AR model acf(working_dataset$units) pacf(working_dataset$units) #AR(6) # JAGS data = list(y=working_dataset$units[7:dim(working_dataset)[1]], x = working_dataset$units[1:(dim(working_dataset)[1])], n = dim(working_dataset)[1], m = 6) modelRegress=jags.model("AR_model_long.bug",data=data,n.adapt=1000,n.chains=1) update(modelRegress,n.iter=19000) variable.names=c("a","b", "tau")#c("b","tau")#c("a","b", "tau") n.iter=50000 thin=10 outputRegress=coda.samples(model=modelRegress,variable.names=variable.names,n.iter=n.iter,thin=thin) data.out=as.matrix(outputRegress) data.out=data.frame(data.out) #summary(data.out) head(data.out) par(mfrow=c(1,1)) acf(data.out[,'a'],lwd=3,col="red3",main="autocorrelation of a0") par(mfrow=c(1,2)) acf(data.out[,'b.1.'],lwd=3,col="red3",main="autocorrelation of b1") acf(data.out[,'b.2.'],lwd=3,col="red3",main="autocorrelation of b2") acf(data.out[,'b.3.'],lwd=3,col="red3",main="autocorrelation of b3") acf(data.out[,'b.4.'],lwd=3,col="red3",main="autocorrelation of b4") par(mfrow=c(1,1)) acf(data.out[,'tau'],lwd=3,col="red3",main="autocorrelation of tau") # # alfa.post <- data.out[,1] # beta.post <- data.out[,2:5] # sigma.post <- data.out[,6] alfa.post <- data.out[,1] beta.post <- data.out[,2:7] sigma.post <- data.out[,8] #posterior mean of the alfa and beta parameters alfa.bayes <- mean(alfa.post) alfa.bayes beta.bayes <- colMeans(beta.post) beta.bayes sigma.bayes <- mean(sigma.post) sigma.bayes ### ALFA ## Representation of the posterior chain of a0 chain <- alfa.post layout(matrix(c(1,2,3,3),2,2,byrow=T)) plot(chain,type="l",main="Trace plot of a0") acf(chain,lwd=3,col="red3",main="autocorrelation of a0") hist(chain,nclass="fd",freq=F,main="Posterior of a0",col="gray") lines(density(chain),col="blue",lwd=2) abline(v=quantile(chain,prob=c(0.025)),col="red",lty=2,lwd=2) abline(v=quantile(chain,prob=c(0.5)),col="red",lty=1,lwd=2) abline(v=quantile(chain,prob=c(0.975)),col="red",lty=2,lwd=2) ### BETA ## Representation of the posterior chain of b1 chain <- beta.post layout(matrix(c(1,2,3,3),2,2,byrow=T)) plot(chain,type="l",main="Trace plot of b1") acf(chain,lwd=3,col="red3",main="autocorrelation of b1") hist(chain,nclass="fd",freq=F,main="Posterior of b1",col="gray") lines(density(chain),col="blue",lwd=2) abline(v=quantile(chain,prob=c(0.025)),col="red",lty=2,lwd=2) abline(v=quantile(chain,prob=c(0.5)),col="red",lty=1,lwd=2) abline(v=quantile(chain,prob=c(0.975)),col="red",lty=2,lwd=2) ##Representation of the posterior chain of b2 chain <- beta.post[,2] layout(matrix(c(1,2,3,3),2,2,byrow=T)) plot(chain,type="l",main="Trace plot of b2") acf(chain,lwd=3,col="red3",main="autocorrelation of b2") hist(chain,nclass="fd",freq=F,main="Posterior of b2",col="gray") lines(density(chain),col="blue",lwd=2) abline(v=quantile(chain,prob=c(0.025)),col="red",lty=2,lwd=2) abline(v=quantile(chain,prob=c(0.5)),col="red",lty=1,lwd=2) abline(v=quantile(chain,prob=c(0.975)),col="red",lty=2,lwd=2) ##Representation of the posterior chain of b3 chain <- beta.post[,3] layout(matrix(c(1,2,3,3),2,2,byrow=T)) plot(chain,type="l",main="Trace plot of b3") acf(chain,lwd=3,col="red3",main="autocorrelation of b3") hist(chain,nclass="fd",freq=F,main="Posterior of b3",col="gray") lines(density(chain),col="blue",lwd=2) abline(v=quantile(chain,prob=c(0.025)),col="red",lty=2,lwd=2) abline(v=quantile(chain,prob=c(0.5)),col="red",lty=1,lwd=2) abline(v=quantile(chain,prob=c(0.975)),col="red",lty=2,lwd=2) ##Representation of the posterior chain of b4 chain <- beta.post[,4] layout(matrix(c(1,2,3,3),2,2,byrow=T)) plot(chain,type="l",main="Trace plot of b4") acf(chain,lwd=3,col="red3",main="autocorrelation of b4") hist(chain,nclass="fd",freq=F,main="Posterior of b4",col="gray") lines(density(chain),col="blue",lwd=2) abline(v=quantile(chain,prob=c(0.025)),col="red",lty=2,lwd=2) abline(v=quantile(chain,prob=c(0.5)),col="red",lty=1,lwd=2) abline(v=quantile(chain,prob=c(0.975)),col="red",lty=2,lwd=2) ##Representation of the posterior chain of sigma chain <- sigma.post layout(matrix(c(1,2,3,3),2,2,byrow=T)) plot(chain,type="l",main="Trace plot of sigma") acf(chain,lwd=3,col="red3",main="autocorrelation of sigma") hist(chain,nclass="fd",freq=F,main="Posterior of sigma",col="gray") lines(density(chain),col="blue",lwd=2) abline(v=quantile(chain,prob=c(0.025)),col="red",lty=2,lwd=2) abline(v=quantile(chain,prob=c(0.5)),col="red",lty=1,lwd=2) abline(v=quantile(chain,prob=c(0.975)),col="red",lty=2,lwd=2) par(mfrow=c(1,1)) ### We first compare the observed data distribution with the posterior ### predictive distribution of Y ypred <- rep(0, dim(working_dataset)[1]) ypred[1:6] <- working_dataset$units[1:6] for ( i in 7:dim(working_dataset)[1] ){ #mu <- alfa.bayes + beta.bayes[1] * working_dataset$units[i-1] mu <- alfa.bayes + beta.bayes[1] * working_dataset$units[i-1] + beta.bayes[2] * working_dataset$units[i-2] + beta.bayes[3] * working_dataset$units[i-3] + beta.bayes[4] * working_dataset$units[i-4] + beta.bayes[5] * working_dataset$units[i-5] + beta.bayes[6] * working_dataset$units[i-6] ypred[i] <- rnorm(1, mu, 10) } par(mfrow=c(1,1)) plot(working_dataset$units, col="darkgreen", type="l", lty=1, lwd=1) lines(ypred, col="red", type="l", lty=1, lwd=1) error=working_dataset$units-ypred hist(error,nclass="fd",freq=F,main="ERROR",col="gray", xlab = "mean(y)") lines(density(error),col="blue",lwd=2) abline(v=quantile(error,prob=c(0.025)),col="red",lty=2,lwd=2) abline(v=quantile(error,prob=c(0.5)),col="red",lty=1,lwd=2) abline(v=quantile(error,prob=c(0.975)),col="red",lty=2,lwd=2) ypred[2:365] <- ypred[3:366] ypred[366] <- working_dataset$units[366] par(mfrow=c(1,1)) plot(working_dataset$units, col="darkgreen", type="l", lty=1, lwd=1) lines(ypred, col="red", type="l", lty=1, lwd=1) # # # error=working_dataset$units-ypred # hist(error,nclass="fd",freq=F,main="ERROR",col="gray", xlab = "mean(y)") # lines(density(error),col="blue",lwd=2) # abline(v=quantile(error,prob=c(0.025)),col="red",lty=2,lwd=2) # abline(v=quantile(error,prob=c(0.5)),col="red",lty=1,lwd=2) # abline(v=quantile(error,prob=c(0.975)),col="red",lty=2,lwd=2)
library(data.table) plot2 <- function(){ powercon =read.table("./data/household_power_consumption.txt", header = TRUE, sep = ";",stringsAsFactors=F,na.strings=c("?","NA","N/A","")) #powercon =fread("./data/household_power_consumption.txt", header = TRUE, sep = ";", # stringsAsFactors=FALSE, # verbose=FALSE, # na.strings=c("?","NA","N/A","")) powercon.sub <-subset(powercon, Date=="1/2/2007" | Date=="2/2/2007" ) datetime.v = paste(powercon.sub$Date,powercon.sub$Time); powercon.sub$DateTime = datetime.v powercon.sub$DateTime <- strptime(powercon.sub$DateTime,format="%d/%m/%Y %H:%M:%S") powercon.sub$Global_active_power <- as.numeric(powercon.sub$Global_active_power) with(powercon.sub, { plot(DateTime, Global_active_power,xlab="Date", ylab="Global Active Power (kilowatts)",type='n') }) with(subset(powercon.sub), lines(DateTime, Global_active_power)) dev.copy(png, file="plot2.png") dev.off() }
/plort2.R
no_license
kennethchung/HopkinsDataScience
R
false
false
994
r
library(data.table) plot2 <- function(){ powercon =read.table("./data/household_power_consumption.txt", header = TRUE, sep = ";",stringsAsFactors=F,na.strings=c("?","NA","N/A","")) #powercon =fread("./data/household_power_consumption.txt", header = TRUE, sep = ";", # stringsAsFactors=FALSE, # verbose=FALSE, # na.strings=c("?","NA","N/A","")) powercon.sub <-subset(powercon, Date=="1/2/2007" | Date=="2/2/2007" ) datetime.v = paste(powercon.sub$Date,powercon.sub$Time); powercon.sub$DateTime = datetime.v powercon.sub$DateTime <- strptime(powercon.sub$DateTime,format="%d/%m/%Y %H:%M:%S") powercon.sub$Global_active_power <- as.numeric(powercon.sub$Global_active_power) with(powercon.sub, { plot(DateTime, Global_active_power,xlab="Date", ylab="Global Active Power (kilowatts)",type='n') }) with(subset(powercon.sub), lines(DateTime, Global_active_power)) dev.copy(png, file="plot2.png") dev.off() }