blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
2
327
content_id
stringlengths
40
40
detected_licenses
listlengths
0
91
license_type
stringclasses
2 values
repo_name
stringlengths
5
134
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringclasses
46 values
visit_date
timestamp[us]date
2016-08-02 22:44:29
2023-09-06 08:39:28
revision_date
timestamp[us]date
1977-08-08 00:00:00
2023-09-05 12:13:49
committer_date
timestamp[us]date
1977-08-08 00:00:00
2023-09-05 12:13:49
github_id
int64
19.4k
671M
star_events_count
int64
0
40k
fork_events_count
int64
0
32.4k
gha_license_id
stringclasses
14 values
gha_event_created_at
timestamp[us]date
2012-06-21 16:39:19
2023-09-14 21:52:42
gha_created_at
timestamp[us]date
2008-05-25 01:21:32
2023-06-28 13:19:12
gha_language
stringclasses
60 values
src_encoding
stringclasses
24 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
7
9.18M
extension
stringclasses
20 values
filename
stringlengths
1
141
content
stringlengths
7
9.18M
abf6d00230576a6fb48cefae615f1cb51334efb9
d86dc6266f527a14c8705a775267e26490b32708
/man/multipleGroupDEgenes.Rd
a0f11d05483dd4e01be7cfa3b16fac53758e8ac3
[ "MIT" ]
permissive
Tong-Chen/YSX
a3c221ea891d9ac5136e35a231606440484e981c
4bb1d21f3d4fa5703937207fce4301ad5ac3aa16
refs/heads/master
2021-06-26T03:19:10.277663
2021-01-12T13:14:15
2021-01-12T13:14:15
172,993,901
7
8
null
null
null
null
UTF-8
R
false
true
1,365
rd
multipleGroupDEgenes.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/transcriptome.R \name{multipleGroupDEgenes} \alias{multipleGroupDEgenes} \title{DE genes analysis for multiple groups.} \usage{ multipleGroupDEgenes( dds, comparePairFile = NULL, design = "conditions", padj = 0.05, log2FC = 1, dropCol = c("lfcSE", "stat"), output_prefix = "ehbio", ... ) } \arguments{ \item{dds}{\code{\link{DESeq}} function returned object.} \item{comparePairFile}{A file containing sample groups for comparing. Optional. If not given, the function will use \code{colData} information in \code{dds} and perform group compare for all possible combinations.\preformatted{groupA groupB groupA groupC groupC groupB }} \item{design}{The group column name. Default "conditions".} \item{padj}{Multiple-test corrected p-value. Default 0.05.} \item{log2FC}{Log2 transformed fold change. Default 1.} \item{dropCol}{Columns to drop in final output. Default \code{c("lfcSE", "stat")}. Other options \code{"ID", "baseMean", "log2FoldChange", "lfcSE", "stat", "pvalue", "padj"}. This has no specific usages except make the table clearer.} \item{output_prefix}{A string as prefix of output files.} \item{...}{Additional parameters given to \code{\link{ggsave}}.} } \description{ DE genes analysis for multiple groups. } \examples{ multipleGroupDEgenes(dds) }
dd66c3d6d294b7aac7896c676e94c36083d61a90
78fd77926f0f663b5da2bb5b5c167ad7c0855ab0
/run_analysis.R
76f2cd439a61a90dd98990c79a4b24eef9d7223a
[]
no_license
bencollins21/Getting-and-cleaning-data-course-project
4492885609614467d4ae0524be0a09b9bfcff618
f21082fd50252f57a3c72039f2c4566d7a5118b1
refs/heads/master
2020-12-04T05:44:36.388802
2020-01-03T18:01:56
2020-01-03T18:01:56
231,638,183
0
0
null
null
null
null
UTF-8
R
false
false
6,405
r
run_analysis.R
## Script: run.analysis.R ## Course: Getting and Cleaning Data ## Author: Ben Collins ## This script has been created in order to fulfill the requirements of the ## Getting and Cleaning Data course project, which are as follows: ## ## 1. Merges the training and the test sets to create one data set. ## ## 2. Extracts only the measurements on the mean and standard deviation for each ## measurement. ## ## 3. Uses descriptive activity names to name the activities in the data set. ## ## 4. Appropriately labels the data set with descriptive variable names. ## ## 5. From the data set in step 4, creates a second, independent tidy data set ## with the average of each variable for each activity and each subject. ## NOTE: This script uses a mixture of base R and tidyverse functions. library(tidyverse) ##____________________________________________________________________________ ## Start of script ##____________________________________________________________________________ ## Download the data to the working directory. data_url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip" data_file <- "UCI HAR Dataset.zip" download.file(data_url, data_file, method = "curl", quiet = TRUE) unzip(data_file, setTimes = TRUE) ## Data has now been downloaded and released from the zip folder. ## start to load and merge the data sets. ## First load the names of features. features <- read.table(file = "UCI HAR Dataset/features.txt", col.names = c("column_number", "feature_name"), colClasses = c("integer", "character")) ## Use the mutate function to add columns which indicate if the features are ## needed (for this project, as stated in the second requirement, we need the ## mean and standard deviations measurements) and to make syntactically valid ## names for the features. features <- features %>% ## Add indicators for the mean and standard deviation. mutate(is_needed = str_detect(feature_name, "-(mean|std)\\(\\)")) %>% ## Add syntactically valid names for the features. mutate(feature_column = make.names(feature_name, unique = TRUE)) %>% ## Swap the . for _ in the new feature name. mutate(feature_column = str_replace_all(feature_column, "\\.+", "_")) ## Next load the activity labels. activity_labels <- read.table(file = "UCI HAR Dataset/activity_labels.txt", col.names = c("id", "activity"), colClasses = c("integer", "factor")) ## Change activity labels to lower case. activity_labels$activity <- tolower(activity_labels2$activity) ## Now we can create a function which will read in the test and training data ## files from their relevant directories and return a data frame. read_from_directory <- function(directory) { directory_path <- file.path("UCI HAR Dataset", directory) ## Set up a warning if the incorrect directory has been inserted in to ## the function. if(!dir.exists(directory_path)){ stop("Directory not found") } ## Read in the data set based on the selected directory and rename the columns ## to match the features, which follows the fourth requirement. data_set <- read.table(file = file.path(directory_path, str_c("X_", directory, ".txt")), col.names = features$feature_column) ## Choose the columns that are required for this project based on our features ## data frame. data_set <- data_set %>% select(one_of(features %>% filter(is_needed) %>% pull(feature_column))) ## Read in the activity. activity <- read.table(file = file.path(directory_path, str_c("y_", directory, ".txt")), col.names = "id", colClasses = "numeric") ## Add the activity labels. activity <- left_join(activity, activity_labels, by = "id") ## Add the description of the activities to the start of the data set, as stated ## in the third requirement. data_set <- cbind(activity = activity[,2], data_set) ## Read in the subject. subject <- read.table(file = file.path(directory_path, str_c("subject_", directory, ".txt")), col.names = "subject", colClasses = "integer") ## Add the subject to the start of the data set. data_set <- cbind(subject, data_set) ## Get the function to output the data set with an additional column to ## identify if the data is from the test or the training data_set %>% mutate(data_set = directory) %>% select(data_set, everything()) } ## The function to read the data files has now been completed. ## We can now load both the training and test data and then merge them ## together. training_data <- read_from_directory("train") test_data <- read_from_directory("test") data <- rbind(training_data, test_data) ## We have no successfully loaded and merged the data. ## To fulfill the fifth requirment we need to create another independent, ## tidy data set. We can use the pivot_longer function to reduce the number of ## columns in the data and to summarise the average of each variable for each ## activity and each subject. tidy_data <- pivot_longer(data, cols = -c(data_set, subject, activity), names_to = c("sensor", "measure", "dimension"), names_sep = "_") ## Change the characters to factors to aid rearrangement. tidy_data <- mutate_if(tidy_data, is.character, as_factor) ## Rearrange the data to make it easier to read. tidy_data <- arrange(tidy_data, data_set, subject, activity, sensor, measure, dimension) ## Save the data in the working directory. write.table(tidy_data, file = "tidy_data.txt", row.names = FALSE) ## Tidy the data up further for submission, including defining the average ## column. submit_data <- tidy_data %>% group_by(subject, activity, sensor, measure, dimension) %>% summarise(mean = mean(value)) ## Save the submission data in the working directory. write.table(submit_data, file = "submit_data.txt", row.names = FALSE) ##____________________________________________________________________________ ## End of script ##____________________________________________________________________________
e986b92309d9c8f42e0a6c29081581f27aad3736
254e5f76e62f641ed5402c645bf9b56ccfa6b374
/R/wbccPredictor.R
418e320262cba360d97eeb43fc511f3c6d60100e
[]
no_license
molepi/DNAmArray
cbd5ab8d583283e2aa8b567866d408f450e6dfdf
f7ca1f7ca567bce749529d94e49e457b57287c1f
refs/heads/master
2022-09-10T20:03:11.303962
2022-08-24T07:21:14
2022-08-24T07:21:14
80,819,365
8
3
null
2022-08-24T07:21:15
2017-02-03T10:29:00
R
UTF-8
R
false
false
6,757
r
wbccPredictor.R
##' Train the cell-type composition predictor using partial least squares regression on mehtylation values ##' ##' @title Train a predictor for cell-type composition ##' @param data Matrix with DNA methylation beta-values ##' @param covar Matrix of covariates to correct for in the model ##' @param cellPer Matrix of cell percentages on which the predictor will be trained ##' @param model Formula (default: cellPer ~ covar + data) ##' @param ncomp Number of PLS components to train (default: 50) ##' @param keep.model Logical specifying whether to return the model (default: FALSE) ##' @param ... Additional parameters for plsr see ?plsr ##' @return Prediction model PLSR object ##' @author mvaniterson ##' @export ##' @importFrom pls plsr train_wbcc <- function(data, covar, cellPer, model=formula(cellPer ~ covar + data), ncomp = 50, keep.model = FALSE, ...){ if(is.null(data) | is.null(covar) | is.null(cellPer)) stop("Data, covariates, and cell percentages must be provided.") if(ncol(data) != nrow(covar) | nrow(covar) != nrow(cellPer)) stop("Data column number must match covariate and cell percentages row number.") if(!all.equal(colnames(data), rownames(covar)) | !all.equal(rownames(covar), rownames(cellPer))) stop("Data column names must match covariate and cell percentages row names.") matchID <- match(rownames(covar), colnames(data)) if(any(is.na(matchID))) stop("Data column names must match covariate row names.") covar <- covar[matchID, ] matchID <- match(rownames(covar), colnames(data)) if(any(is.na(matchID))) stop("Data column names must match cell percentage row names.") cellPer <- cellPer[matchID, ] if(any(is.na(sum(data))) | any(is.na(sum(covar))) | any(is.na(sum(cellPer)))) stop("Missing values are not allowed when training the predictor.") ## Model training using PLSR predictor <- plsr(model, ncomp=ncomp, data=list(cellPer = cellPer, covar = covar, data = t(data)), ...) ## Remove model if not being kept if(!keep.model) { predictor$model <- NULL } invisible(predictor) } ##' Predict cell percentages based on a matrix or mvr-based model ##' ##' @title Predict cell-type composition from methylation values ##' @param pred A matrix or mvr predictor, trained using train_wbcc ##' @param data Matrix of DNA methylation beta values ##' @param covar Matrix of covariates to correct for (must match those in predictor) ##' @param transformation Transformation to apply to predicted values (default: no transformation) ##' @param ncomp Optimal number of components ##' @param impute Whether to impute missing values (default: TRUE) ##' @param ... Additional parameters for plsr see ?plsr ##' @return Predicted cell percentages ##' @author mvaniterson, lsinke ##' @export ##' @import pls ##' @importFrom stats coef median formula predict_wbcc <- function(pred, data, covar, transformation=function(x) x, ncomp=NULL, impute=TRUE, ...) { if(is.null(data) | is.null(covar)) # Check if data and covariates provided stop("Both data and covariates must be provided.") if(ncol(data) != nrow(covar)) # Check dimensions stop("Data column number must match covariate row number.") if(!isTRUE(all.equal(colnames(data), rownames(covar)))) # Check same names stop("Data column names must match covariate row names.") matchID <- match(rownames(covar), colnames(data)) if(any(is.na(matchID))) stop("Data column names must match covariate row names.") covar <- covar[matchID, ] if(class(pred) == "mvr") names <- dimnames(coef(pred))[[1]] else if(class(pred) == "matrix") names <- rownames(pred) else stop(paste("This function is not designed for a", class(pred), "class of predictor.")) # covaNames <- gsub("covar", "", grep("covar", names, value=TRUE)) # dataNames <- gsub("data", "", grep("data", names, value=TRUE)) # # matchID <- match(covaNames, colnames(covar)) # if(any(is.na(matchID))) # stop("Covariates in the same do not match those in the predictor.") # covar <- covar[ , matchID] if(any(is.na(covar)) & !impute) { stop("Missing values are not allowed in the covariates if imputation is not specified.") } else if(any(is.na(covar)) & impute) { print(paste("There are", sum(is.na(covar)), "NA's in the covariate matrix.", "These will be median imputed.")) covar <- apply(covar, 2, function(x) { x[is.na(x)] = median(x, na.rm=TRUE) x}) } # matchID <- match(dataNames, rownames(data)) # if(any(is.na(matchID))) # warning("Row names of the sample do not match those of the predictor.") # data <- data[matchID, ] if(any(is.na(data)) & !impute) { stop("Missing values are not allowed in the data if imputation is not specified") } else if(any(is.na(data)) & impute) { print(paste("There are", sum(is.na(data)), "NA's in the data matrix.", "These will be median imputed.")) nas <- apply(data, 1, function(x) any(is.na(x))) data[nas,] <- apply(data[nas, ], 1, function(x) median(x, na.rm=TRUE)) data[is.na(data)] <- median(data, na.rm=TRUE) } # Prediction if(class(pred) == "mvr") { predicted <- pls:::predict.mvr(pred, newdata = list(covar = covar, data = t(data)), ncomp=ncomp, ...) predicted <- predicted[ , , 1] } else if(class(pred) == "matrix") { predicted <- cbind(1, covar, t(data)) %*% pred } invisible(transformation(predicted)) } ##' Plots to validate predictor a predictor trained using the train_wbcc function ##' ##' @title Validation plots ##' @param measured Measured cell percentages ##' @param predicted Cell percentages predicted by the model ##' @param ... Additional parameters for plsr see ?plsr ##' @return Correlation plots for measured and predicted cell percentages ##' @author ljsinke ##' @export ##' @importFrom ggplot2 ggplot aes geom_point geom_smooth facet_wrap ##' @importFrom reshape2 melt ##' @importFrom stats cor plot_wbcc <- function(measured, predicted, ...) { corrMat <- round(cor(measured,predicted),4) for (k in 1:ncol(predicted)) { colnames(predicted)[k] <- paste(colnames(predicted)[k], " (correlation: ", corrMat[k,k], ")", sep="") } predicted <- melt(predicted) measured <- melt(measured) ggFrame <- data.frame(predicted = predicted[, 3], measured = measured[, 3], type = predicted[, 2]) ggplot(data = ggFrame, mapping = aes(x = measured, y = predicted)) + geom_point(shape=1) + geom_smooth(method='lm',formula=y~x, se=FALSE, color="#D67D87", size=0.5) + facet_wrap(~type, scales="free") }
9e06a0a18c58fe11c45e8567738f12ceb53f28ac
a83946130e2f439f8b44e36ac6919cd94bda56f2
/folders/anRpackage/man/scaleBar.Rd
617837a1efda81e727e676b3c1e22c7e9b60b0f3
[ "MIT" ]
permissive
bwtian/Rtemplate
c44088eaf542558f44b5662586856a9f7c0b476f
275539f9d0f3699715fa03a86e6268898704b707
refs/heads/master
2021-01-10T22:02:09.226342
2014-11-26T01:26:03
2014-11-26T01:26:03
24,021,959
1
0
null
null
null
null
UTF-8
R
false
false
4,866
rd
scaleBar.Rd
\name{scaleBar} \alias{scaleBar} %- Also NEED an '\alias' for EACH other topic documented here. \title{ %% ~~function to do ... ~~ } \description{ %% ~~ A concise (1-5 lines) description of what the function does. ~~ } \usage{ scaleBar(lon, lat, distanceLon, distanceLat, distanceLegend, distanceUnit, dist.unit = "km", rec.fill = "white", rec.colour = "black", rec2.fill = "black", rec2.colour = "black", legend.colour = "black", legend.size = 3, orientation = TRUE, arrow.length = 500, arrow.distance = 300, arrow.colour = "black", arrow.size = 1, arrow.North.size = 6, arrow.North.color = "black") } %- maybe also 'usage' for other objects documented here. \arguments{ \item{lon}{ %% ~~Describe \code{lon} here~~ } \item{lat}{ %% ~~Describe \code{lat} here~~ } \item{distanceLon}{ %% ~~Describe \code{distanceLon} here~~ } \item{distanceLat}{ %% ~~Describe \code{distanceLat} here~~ } \item{distanceLegend}{ %% ~~Describe \code{distanceLegend} here~~ } \item{distanceUnit}{ %% ~~Describe \code{distanceUnit} here~~ } \item{dist.unit}{ %% ~~Describe \code{dist.unit} here~~ } \item{rec.fill}{ %% ~~Describe \code{rec.fill} here~~ } \item{rec.colour}{ %% ~~Describe \code{rec.colour} here~~ } \item{rec2.fill}{ %% ~~Describe \code{rec2.fill} here~~ } \item{rec2.colour}{ %% ~~Describe \code{rec2.colour} here~~ } \item{legend.colour}{ %% ~~Describe \code{legend.colour} here~~ } \item{legend.size}{ %% ~~Describe \code{legend.size} here~~ } \item{orientation}{ %% ~~Describe \code{orientation} here~~ } \item{arrow.length}{ %% ~~Describe \code{arrow.length} here~~ } \item{arrow.distance}{ %% ~~Describe \code{arrow.distance} here~~ } \item{arrow.colour}{ %% ~~Describe \code{arrow.colour} here~~ } \item{arrow.size}{ %% ~~Describe \code{arrow.size} here~~ } \item{arrow.North.size}{ %% ~~Describe \code{arrow.North.size} here~~ } \item{arrow.North.color}{ %% ~~Describe \code{arrow.North.color} here~~ } } \details{ %% ~~ If necessary, more details than the description above ~~ } \value{ %% ~Describe the value returned %% If it is a LIST, use %% \item{comp1 }{Description of 'comp1'} %% \item{comp2 }{Description of 'comp2'} %% ... } \references{ %% ~put references to the literature/web site here ~ } \author{ %% ~~who you are~~ } \note{ %% ~~further notes~~ } %% ~Make other sections like Warning with \section{Warning }{....} ~ \seealso{ %% ~~objects to See Also as \code{\link{help}}, ~~~ } \examples{ ##---- Should be DIRECTLY executable !! ---- ##-- ==> Define data, use random, ##-- or do help(data=index) for the standard data sets. ## The function is currently defined as function (lon, lat, distanceLon, distanceLat, distanceLegend, distanceUnit, dist.unit = "km", rec.fill = "white", rec.colour = "black", rec2.fill = "black", rec2.colour = "black", legend.colour = "black", legend.size = 3, orientation = TRUE, arrow.length = 500, arrow.distance = 300, arrow.colour = "black", arrow.size = 1, arrow.North.size = 6, arrow.North.color = "black") { laScaleBar <- createScaleBar(lon = lon, lat = lat, distanceLon = distanceLon, distanceLat = distanceLat, distanceLegend = distanceLegend, dist.unit = dist.unit) rectangle1 <- geom_polygon(data = laScaleBar$rectangle, aes(x = lon, y = lat), fill = rec.fill, colour = rec.colour) rectangle2 <- geom_polygon(data = laScaleBar$rectangle2, aes(x = lon, y = lat), fill = rec2.fill, colour = rec2.colour) t <- laScaleBar$legend[, "text"] scaleBarLegend <- annotate("text", label = c(t[-length(t)], paste(t[length(t)], dist.unit, sep = " ")), x = laScaleBar$legend[, "long"], y = laScaleBar$legend[, "lat"], size = legend.size, colour = legend.colour, family = "Times") kmLegend <- annotate("text", label = dist.unit, x = lon, y = lat, size = legend.size, colour = legend.colour, family = "Times") res <- list(rectangle1, rectangle2, scaleBarLegend) if (orientation) { coordsArrow <- createOrientationArrow(scaleBar = laScaleBar, length = arrow.length, distance = arrow.distance, dist.unit = dist.unit) arrow <- list(geom_segment(data = coordsArrow$res, aes(x = x, y = y, xend = xend, yend = yend), colour = arrow.colour, size = arrow.size), annotate("text", label = "N", x = coordsArrow$coordsN[1, "x"], y = coordsArrow$coordsN[1, "y"], size = arrow.North.size, colour = arrow.North.color, family = "Times")) res <- c(res, arrow) } return(res) } } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{ ~kwd1 } \keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
dd5028fadcdfe2fdf870936775c6b9bfe4c1b43c
2a2646e70f5888309470ec1a2cf8f4a18f2a8780
/R/json.R
7422a2b242a3c456c8b49b9b96a9379559ac4622
[ "Apache-2.0" ]
permissive
stencila/rasta
174512f7cef34b5ff23156f73d6d2a4a39146d8d
4fa308b6e5fd8e49b1b9053a8afcb71d5af71d6b
refs/heads/master
2021-10-29T08:25:02.078134
2021-10-11T22:00:23
2021-10-11T22:00:23
169,194,351
3
0
null
null
null
null
UTF-8
R
false
false
2,237
r
json.R
#' Functions for JSON handling #' #' These functions simply provide a consistent means for calling `jsonlite` #' functions `toJSON` and `fromJSON` from `json-rpc` and `logger`. #' #' @name json #' @rdname json NULL #' @include util.R library(stencilaschema) #' Declare that a node is scalar #' #' So that the object is "unboxed" when serialized to JSON #' #' @param object The object to mark as a scalar as_scalar <- function(object) { if (!is.null(object)) { class(object) <- c("scalar", class(object)) } object } to_json <- function(object, pretty = FALSE) { # Unbox scalar properties of Stencila nodes. # This is necessary because otherwise numbers, strings etc get # stringified as arrays of numbers, strings etc # It is not possible to use `auto_unbox` in `jsonlite::toJSON` # because that risks unboxing things that should not be e.g. # `DatatableColumn$values` of length 1. node <- transform(object, function(node) { cls <- utils::head(class(node), n = 1) if ( !is.null(cls) && cls == "scalar" && !is.null(node) && is.atomic(node) && length(dim(node)) == 1 ) jsonlite::unbox(node) else node }) as.character( # jsonlite warnings, like this one # https://github.com/jeroen/jsonlite/blob/c9c22efdaeed089d503c7d85863cc050ee4d833a/R/asJSON.list.R#L41 # cause issues (due to error handling elsewhere?) so we suppress them for now. suppressWarnings( jsonlite::toJSON( node, pretty = pretty, force = TRUE, null = "null", na = "null", Date = "ISO8601" ) ) ) } from_json <- function(json) { object <- jsonlite::fromJSON(json, simplifyDataFrame = FALSE) to_node(object) } # Call Stencila node constructors with parsed JSON objects # to (a) ensure scalar properties are # marked as such (for when they are sent back to JSON; see above), and # (b) to check that they conform to the constructors to_node <- function(node) { if (is.list(node)) { if (!is.null(node$type)) { func <- get0(node$type) if (!is.null(func)) { args <- map(node, to_node) args["type"] <- NULL return(do.call(func, args)) } } return(map(node, to_node)) } node }
43027f56f0ce4d847b0874e84affb1f54f035904
5b649005f242829df1ea2893b956684e5d7b837c
/man/evaluate_clonealign.Rd
f047da2058aa88c40b4f318a5a4af84f60051ef7
[ "Apache-2.0" ]
permissive
nceglia/clonealign
11a76695d0560e719fdd669164ae4de34904777f
773e59e5a64e5b42a94ef8703c999e5ba62e7e27
refs/heads/master
2020-05-03T05:15:03.063684
2019-03-29T18:54:49
2019-03-29T18:54:49
178,442,850
0
0
null
2019-03-29T16:44:32
2019-03-29T16:44:32
null
UTF-8
R
false
true
4,021
rd
evaluate_clonealign.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/clonealign.R \name{evaluate_clonealign} \alias{evaluate_clonealign} \title{Evaluate the quality of a clonealign fit} \usage{ evaluate_clonealign(gene_expression_data, copy_number_data, clonealign_fit, prop_holdout = 0.2, n_samples = 2, s = NULL, ...) } \arguments{ \item{gene_expression_data}{Either a \code{SingleCellExperiment} or matrix of counts, same as input to \code{clonealign}} \item{copy_number_data}{A gene-by-clone matrix of copy numbers, the same as the input to \code{clonealign}} \item{clonealign_fit}{The fit returned by a call to \code{clonealign()} on the full geneset} \item{prop_holdout}{The proportion of genes to hold out as a \emph{test} set for predicting gene expression} \item{n_samples}{The number of random permutations to establish a null distribution of predictive performance} \item{s}{Vector of cell size factors - defaults to the total counts per cell} \item{...}{Additional arguments to pass to the \code{clonealign} call} } \value{ A list object describing the evaluation results. See \code{details} } \description{ Use mean squared error of predicting the expression of held-out genes to evaluate the quality of a clonealign fit. } \details{ This evaluation function is built around the idea of how good predicted expression is under the model given the inferred (assigned) clones compared to how well you could predict expression given a random clonal assignment. \strong{Evaluations performed} \enumerate{ \item On the \emph{full} dataset, the mean square error is compared to the randomized error, and a message printed about the ratio of the two errors and the proportion of time the observed error was less than the error under a null distribution. If either the error under the null is smaller than the observed, or is smaller some percentage of time, then the fit has real problems and should be abandoned as it suggests the model is stuck in a local maxima (which could happen if the proposed clones aren't actually present in the RNA-seq). \item A certain proportion of genes (as decided by the \code{prop_holdout} parameter) are held out as a \emph{test} set, and the clonealign fit is re-performed on the \code{1 - prop_holdout} proportion of genes. The function will then print an agreement table of clone assignments between the full table and the reduced table. If these vastly disagree for only a small change in gene set (ie \code{prop_holdout} is around 0.1 or 0.2), then the fit may be unreliable as it is sensitive to the genes inputted. \item The same metrics from (1) are then printed where the evaluations are performed on the heldout set. Again, if the observed mean squared error given the clonealign fit isn't less than the null mean squared error a large proportion of the time, the fit may be unreliable. } \strong{Object returned} Everything computed above is returned in a list object with the following entries: \itemize{ \item full_clonealign_fit - the original clonealign fit on the full gene set that was passed in as the \code{clonealign_fit} argument \item full_observed_mse - the observed mean square error using the full gene set \item full_null_mse - A vector of mean square error under the randomized (permuted) distribution \item reduced_clonealign_fit - a clonealign fit on only the reduced (train) set of genes \item heldout_genes - the names of the genes held out (test set) for evaluating predictive performance out-of-sample \item kept_names - the names of retained genes as part of the reduced (train) set \item heldout_observed_mse - the observed mean square error on the heldout (test) set of genes \item heldout_null_mse - a vector of mean square errors under a null distribution of randomly permuting the clones } } \examples{ library(SingleCellExperiment) data(example_clonealign_fit) data(example_sce) copy_number_data <- rowData(example_sce)[,c("A", "B", "C")] evaluate_clonealign(example_sce, copy_number_data, example_clonealign_fit) }
e993858892e92ae19f3c29911bc792cba57015d4
1a08f81a8ebee2753b42333e77735f9416f4c396
/R/network.R
695481c82785d72b66f3d7dbb77402187255d602
[]
no_license
gaospecial/biblioreport
1404645207c667e71c0bd11c36af2fe9120ff124
ab407fad6223c8ab8c5eecd80d21bb7c0471d082
refs/heads/master
2023-09-05T19:31:05.171230
2021-11-09T11:40:29
2021-11-09T11:40:29
334,099,411
1
0
null
null
null
null
UTF-8
R
false
false
10,980
r
network.R
## 进行(文献)网络分析的函数 ## 简化网络 ## 限制 node 数目和edge.weight #' Simplified network #' #' #' @param M is a bibliometrixDB object #' @param from start year #' @param to stop year #' @param nNode 最多允许的节点数目 #' @param edge_weight_cutoff 边的阈值(小于此值的边会被去掉) #' @param analysis 分析的类型 #' @param network 网络的类型 #' @param field 网络中的节点来源的列名 #' @param remove_keyword 一个正则表达式 #' @param ... #' #' @return visNetwork object #' @export #' #' @name simplified_network #' #' @examples #' library(bibliometrix) #' data("garfield") #' author_network(garfield) simplified_network <- function(M, from = NULL, to = NULL, nNode = 30, remove_keyword = NULL, edge_weight_cutoff = 1, analysis, network, field, delete_isolate = TRUE, graph = FALSE, ... ){ if (!field %in% colnames(M)) stop(paste0("M doesn't have ", field)) require(bibliometrix) require(tibble) require(dplyr) require(igraph) require(RColorBrewer) require(visNetwork) M$PY <- as.numeric(M$PY) PY_from <- min(M$PY, na.rm = TRUE) PY_to <- max(M$PY, na.rm = TRUE) if (is.null(from)) from <- PY_from if (is.null(to)) to <- PY_to if (from > to) stop(paste0("from is bigger than to.")) m <- M %>% filter(PY>=from, PY <= to) net_mat <- biblioNetwork(m, analysis = analysis, network = network, sep = ";", ...) if(is.na(field)) stop("must specify Field tag (ID, AU, etc).") members <- unlist(strsplit(m[,field], split = ";")) %>% trimws() %>% table() %>% sort(decreasing = T) %>% enframe(name = "field",value = "nRecord") if (!is.null(remove_keyword)){ members <- members %>% dplyr::filter(!stringr::str_detect(field, remove_keyword)) } idx <- rownames(net_mat) %in% head(members$field,nNode) net_mat_s <- net_mat[idx,idx] net <- graph.adjacency(net_mat_s,weighted = TRUE, mode = "undirected") g <- net vertex.attributes(g)$size <- degree(g) g <- delete.edges(g,E(g)[edge.attributes(g)$weight < edge_weight_cutoff]) g <- igraph::simplify(g) if (delete_isolate) g <- bibliometrix:::delete.isolates(g) if(graph == TRUE) return(g) # 聚类结果 member <- membership(cluster_louvain(g)) %>% enframe(name = "id", value = "cluster") color <- colorRampPalette(brewer.pal(8,"Paired"))(length(unique(member$cluster))) names(color) <- unique(member$cluster) member$color <- color[member$cluster] visData <- toVisNetworkData(g) visData$nodes <- visData$nodes %>% left_join(degree(g) %>% enframe(name = "id")) %>% left_join(member) visData$edges$value <- visData$edges$weight visNetwork(visData$nodes, visData$edges,physics=FALSE) %>% visLayout(randomSeed = 20200721) %>% visOptions(manipulation = FALSE, highlightNearest = list(enabled = TRUE, degree = 1, hover = TRUE)) } #' @export country_network <- function(M, analysis = "collaboration", network = "countries", field = "AU_CO_NR", edge_weight_cutoff = 0, nNode = 20, graph = FALSE, ...){ simplified_network(M, analysis = analysis, network = network, field = field, nNode = nNode, edge_weight_cutoff = edge_weight_cutoff, graph = graph, ...) } #' 简化的作者合作网络 #' #' @inheritParams simplified_network #' #' @return #' @export #' #' @examples #' @name simplified_network author_network <- function(M, analysis = "collaboration", network = "authors", field = "AU", edge_weight_cutoff = 5, nNode = 200, graph = FALSE, ...){ simplified_network(M, analysis = analysis, network = network, field = field, nNode = nNode, edge_weight_cutoff = edge_weight_cutoff, graph = graph, ...) } #' 高校的合作网络 #' #' @inheritParams simplified_network #' #' @return #' @export #' #' @examples #' @name simplified_network university_network <- function(M, analysis = "collaboration", network = "universities", field = "AU_UN_NR", edge_weight_cutoff = 10, nNode = 30, graph = FALSE, ...){ simplified_network(M, analysis = analysis, network = network, field = field, nNode = nNode, edge_weight_cutoff = edge_weight_cutoff, graph = graph, ...) } #' 关键词的共现网络 #' #' @inheritParams simplified_network #' #' @return #' @export #' #' @examples #' @name simplified_network keyword_network <- function(M, nNode = 100, edge_weight_cutoff = 3, field = "ID", analysis = "co-occurrences", network = "keywords", graph = FALSE, ...){ simplified_network(M=M, nNode=nNode, field = field, edge_weight_cutoff = edge_weight_cutoff, analysis=analysis, network = network, graph = graph, ...) } ## 网络相关的函数 #' @export range01 <- function(x){(x-min(x))/(max(x)-min(x))} #' 修改 graph 对象 #' #' @param g igraph 对象 #' #' @return 一个新的 igraph 对象 #' @export #' #' @name graph_add_node #' #' @examples graph_add_node_pagerank <- function(g){ V(g)$pagerank <- page.rank(g)[["vector"]] return(g) } #' @inheritParams graph_add_node_pagerank #' @name graph_add_node #' @export graph_add_node_degree <- function(g){ V(g)$degree <- degree(g) return(g) } #' 添加节点属性 #' @export graph_add_node_attr <- function(g, data, id = "id", cols = colnames(data)){ # 依据 id 的对应关系将 data 中的属性加入到graph中, # id 是 data 中 node id 的列名, cols 是 data 中用到的列名 # ToDO: 跳过已有的属性还是覆盖? g.id <- names(V(g)) data <- as.data.frame(data) rownames(data) <- data[,id] cols <- cols[!cols %in% id] for (i in 1:length(cols)){ vertex_attr(g, name = cols[[i]]) <- data[g.id, cols[[i]]] } return(g) } #' 设置 node size #' @export graph_set_node_size <- function(g, by = "degree", scale01 = TRUE, max_size = 10){ value <- vertex_attr(g, name = by) if (isTRUE(scale01)){ value <- range01(value) } size <- (value * max_size) + 1 V(g)$size <- size return(g) } #' @export graph_set_node_color <- function(g, by = "year", decreasing = FALSE, scale01 = FALSE, palette_name = "YlOrRd"){ ## 为 graph 设置节点颜色 ## 默认按年份着色,或者其它 node 属性着色 value <- vertex_attr(g, name = by) if (isTRUE(scale01)){ value <- range01(value) } uniq_value <- sort(unique(value),decreasing = decreasing) my_palette <- brewer.pal(n=7,name = palette_name) nColor <- 100 if (length(uniq_value) < 100 ) nColor <- length(uniq_value) colors <- colorRampPalette(my_palette)(nColor) names(colors) <- uniq_value V(g)$color <- colors[as.character(value)] return(g) } #' @export graph_subgraph <- function(g, by = "degree", slice = "PY", topN = 10, ratio = 0.1){ if( !by %in% vertex_attr_names(g)) stop(by, " is not a graph attribute.\n") if( !slice %in% vertex_attr_names(g)) stop(slice, " is not a graph attribute.\n") data <- visNetwork::toVisNetworkData(g) nodes <- data$nodes %>% group_by(PY) %>% arrange(desc(degree)) %>% filter(row_number() <= topN) induced.subgraph(g, vids = nodes$id) } #' @export vis_histNet <- function(g, node.title = "title", node.size = "size", node.color = "color", edge.color = "color", layout = "layout_with_fr"){ data <- toVisNetworkData(g) visNetwork(nodes = data$nodes, edges = data$edges) %>% visIgraphLayout(physics = FALSE, layout = layout) %>% visNodes(size = node.size, color = node.color, title=node.title) %>% visEdges(color = edge.color) %>% visOptions(highlightNearest = list(enabled=TRUE,hover=FALSE)) %>% visExport() } #' Two term network #' #' @param graph igraph object #' #' @return ggplot #' @export #' #' @examples two_term_network = function(graph, graph_layout = "nicely", edge_alpha = "weight", edge_width = "weight", edge_color = "weight", node_color = "degree", node_size = "degree", node_label = "name"){ graph %>% graph_add_node_degree() %>% ggraph(layout = graph_layout) + geom_edge_link(aes_string(edge_alpha = edge_alpha, edge_width = edge_width, edge_color = edge_color), arrow = arrow(length = unit(3,"mm"), type = "closed"), start_cap = circle(3, "mm"), end_cap = circle(3,"mm"), show.legend = F) + geom_node_label(aes_string(label = node_label,color=node_color,size=node_size), label.size = NA, alpha = 2/3, show.legend = FALSE) + scale_size(range = c(3,6)) + theme_graph(base_family = "sans") } igraph_title = function(object){ if (!is_igraph(object)) { stop("Not a graph object") } title <- paste("IGRAPH", substr(graph_id(object), 1, 7), paste0(c("U", "D")[is_directed(object) + 1], c("-", "N")[is_named(object) + 1], c("-", "W")[is_weighted(object) + 1], c("-", "B")[is_bipartite(object) + 1]), vcount(object), ecount(object), "-- ") return(title) }
9fa4805776758a30b6f8a35e642ecc52bab02dc1
7e8aa90cd9737ed979d1dac9350f1362bcfe4a96
/code/test_project1.R
0b30856e29c73a718c371a9bb8ae80707b959d24
[]
no_license
JXU0728/STAT-628-module-1
1f869e14a776246dd9009f86af12003143b7ff0e
cb628e13194ab9d7973ece7317ee691fff50da66
refs/heads/master
2021-05-04T15:18:27.731263
2018-02-05T16:08:13
2018-02-05T16:08:13
120,225,060
0
0
null
null
null
null
UTF-8
R
false
false
6,188
r
test_project1.R
library(ggplot2) library(caret) library(car) library(rattle) library(LEAP) library(dplyr) body_fit <- read.csv("BodyFat.csv",header = T) qplot(DENSITY,BODYFAT,data = body) scatterplotMatrix(body[,c(-1,-2,-3)]) plot(body$BODYFAT) ### DEPENDENT VARIABLE: body fat summary(body_fit) plot(body_fit$BODYFAT) model <- lm(BODYFAT~.,data = body_fit) par(mfrow=c(2,2)) plot(model) layout(1) plot(model,which = 4) model_try <- lm(BODYFAT~.,data = body_fit[c(-182),])##bodyfat = 0 par(mfrow=c(2,2)) plot(model_try) model2 <- lm(BODYFAT~., data = body_fit[c(-42),]) ##wrist so slim par(mfrow=c(2,2)) plot(model2) layout(1) plot(model2,which = 4) influencePlot(model2) model3 <- lm(BODYFAT~., data = body_fit[c(-42,-39),]) ## wrist, weight par(mfrow=c(2,2)) plot(model3) layout(1) plot(model3,which = 4) influencePlot(model3) model4 <- lm(BODYFAT~., data = body_fit[c(-42,-39,-221),])## wrist,weight,seems normal par(mfrow=c(2,2)) plot(model4) layout(1) plot(model4,which = 4) influencePlot(model4) model5 <- lm(BODYFAT~., data = body_fit[c(-42,-39,-221,-86),]) ## wrist,weight,seems normal, ankle par(mfrow=c(2,2)) plot(model5) layout(1) plot(model5,which = 4) influencePlot(model5)##point 41, no reason to out ####stepwise choose model for full data model5_back <- step(model5,direction = "back") model5_null <- lm(BODYFAT~1, data = body_fit[c(-42,-39,-221,-86),]) model5_for <- step(model5_null, scope = list(lower = model5_null,upper = model5), direction = "forward") qqPlot(model5_back,labels = body$IDNO, simulate = T)#normality durbinWatsonTest(model5_back) ##independence of residual crPlots(model5_back) ##linearity test ncvTest(model5_back) ##homoscedasticity test spreadLevelPlot(model5_back) ##suggeted transform, no need for transform influencePlot(model5_back)##no point seems influent regression qqPlot(model5_for,labels = body$IDNO,simulate = T)##normality good durbinWatsonTest(model5_for)##resiual may have some aotucorrelation crPlots(model5_for)##linearity seems good ncvTest(model5_for)##homoscedasticity is good spreadLevelPlot(model5_for)##no need for transform influencePlot(model5_for) ####mse for forward and backward sum(model5_back$residuals^2)/length(model5_back$residuals) sum(model5_for$residuals^2)/length(model5_for$residuals) ### backward selection has better mse but forward has a good vif ##Final decision model5_for ############################################################### ###divided with age 45 body_45 <- body[body$AGE<=45,] model45_1 <- lm(BODYFAT~., data = body_45[,c(-1,-3)]) par(mfrow = c(2,2)) plot(model45_1) layout(1) plot(model45_1,which = 4) model45_2 <- lm(BODYFAT~.,data = body_45[body_45$IDNO!=42,c(-1,-3)]) par(mfrow = c(2,2)) plot(model45_2) layout(1) plot(model45_2,which = 4) model45_3 <- lm(BODYFAT~., data = body_45[(body_45$IDNO!=42&body_45$IDNO!=31),c(-1,-3)])#point 163 par(mfrow = c(2,2)) plot(model45_3) layout(1) plot(model45_3,which = 4) influencePlot(model45_3) ###163 short strong man model45_4 <- lm(BODYFAT~., data = body_45[(body_45$IDNO!=42&body_45$IDNO!=31&body_45$IDNO!=163),c(-1,-3)]) outlierTest(model45_4) influencePlot(model45_4) model45_back <- step(model45_4,direction = "backward",k = 2) model45_null <- lm(BODYFAT~1, data = body_45[(body_45$IDNO!=42&body_45$IDNO!=31&body_45$IDNO!=163),]) model45_for <- step(model45_null, scope = list(lower = model45_null,upper = model45_4), direction = "forward") model45_both <- step(model45_null, scope = list(lower = model45_null,upper = model45_4), direction = "both") sum(model45_back$residuals^2)/length(model45_back$residuals) sum(model45_for$residuals^2)/length(model45_for$residuals) qqPlot(model45_back)##normality good ncvTest(model45_back)##homoskedasticity good durbinWatsonTest(model45_back)##independence good crPlots(model45_back)##linearity good ####age>45 body45_up <- body[body$AGE>45,] model45_up1 <- lm(BODYFAT~.,data = body45_up[,c(-1,-3)]) par(mfrow = c(2,2)) plot(model45_up1) layout(1) plot(model45_up1,which = 4) model45_up2 <- lm(BODYFAT~.,data = body45_up[body45_up$IDNO!=39,c(-1,-3)]) par(mfrow = c(2,2)) plot(model45_up2) layout(1) plot(model45_up2,which = 4) model45_up3 <- lm(BODYFAT~., data = body45_up[(body45_up$IDNO!=39&body45_up$IDNO!=86),c(-1,-3)]) par(mfrow = c(2,2)) plot(model45_up3) layout(1) plot(model45_up3,which = 4) influencePlot(model45_up3) model45_up4 <- lm(BODYFAT~., data = body45_up[(body45_up$IDNO!=39&body45_up$IDNO!=86&body45_up$IDNO!=221),c(-1,-3)]) qqPlot(model45_up4, labels = body$IDNO,simulate = T)##normality good crPlots(model45_up4)##linearity ncvTest(model45_up4)##homoscedasticity durbinWatsonTest(model45_up4)##independence #model45_up3_221 <- lm(BODYFAT~., #data = body45_up[(body45_up$IDNO!=39&body45_up$IDNO!=86&body45_up$IDNO!=221),c(-1,-3,-4)]) ####stepwise selection model45_up_back <- step(model45_up4,direction = "backward") model45_up3_null <- lm(BODYFAT~1, data = body45_up[(body45_up$IDNO!=39&body45_up$IDNO!=86&body45_up!=221),c(-1,-3)]) model45_up_for <- step(model45_up3_null, scope = list(lower = model45_up3_null,upper = model45_up4), direction = "forward") sum(model45_up_back$residuals^2)/length(model45_up_back$residuals) sum(model45_up_for$residuals^2)/length(model45_up_for$residuals) ####MSE mse_age45_back <- sum(model45_back$residuals^2)/length(model45_back$residuals^2) mse_age45_for <- sum(model45_for$residuals^2)/length(model45_for$residuals) mse_full45_for <- sum(model5_for$residuals[body_fit$AGE <= 45]^2)/137 mse_full45_back <- sum(model5_back$residuals[body_fit$AGE <= 45]^2)/137 mse_age45up_back <- sum(model45_up_back$residuals^2)/length(model45_up_back$residuals) mse_full45up_back <- sum(model5_back$residuals[body_fit$AGE>45]^2,na.rm = T)/111
7e14c206b0498c0c0415c6a97fecf93fd635dce4
4e6b07f669f14a8aa4de162f0251f8cf7d3bce38
/man/susie_auto.Rd
28c994ca9abfdf886dedfca0b94e35755c3085cc
[]
no_license
jhmarcus/susieR
0e48816b787dfdff49ba030359b084ff2063198b
2f1e015af4bdb1b59874f6f172aa5a9b2d8faace
refs/heads/master
2020-03-16T23:39:00.341229
2018-05-10T20:59:21
2018-05-10T20:59:21
null
0
0
null
null
null
null
UTF-8
R
false
true
1,002
rd
susie_auto.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/susie_auto.R \name{susie_auto} \alias{susie_auto} \title{An attempt to automate reliable running of susie even on hard problems} \usage{ susie_auto(X, Y, L_init = 1, L_max = 512, verbose = FALSE, init_tol = 1) } \arguments{ \item{X}{an n by p matrix of covariates} \item{Y}{an n vector} \item{L_init}{the initial value of L to consider} \item{init_tol}{the tolerance to pass to susie during early runs (set big to run faster)} \item{Lmax}{the maximum value of L to consider} } \description{ An attempt to automate reliable running of susie even on hard problems } \details{ Currently runs a 3-stage strategy for each L: first fit susie with very small residual error, then estimate residual error, then estimate prior variance. If the last step estimates some prior variances to be 0 then stop. Otherwise double L and repeat. Initial runs are done with lax tolerance (init_tol); final run done with default tolerance. }
9d467ff82800db24b3d2e66269c55090f9b78825
cb56b3fb311c79a9f0fef6b7815b9ba934f16f50
/R/util.R
38a9978196c300047f519b721e9ad8d3f3bd175d
[]
no_license
yutannihilation/Redashr
c3d49d346eeb3b3db9c1c2f8421e65af0c5ddcc0
71b525872c0c7b7f7d7cf8f3eaee8ad4d869b89e
refs/heads/master
2021-07-11T18:17:55.915287
2017-10-16T04:46:45
2017-10-16T04:46:45
106,435,124
3
3
null
2017-10-16T04:46:46
2017-10-10T15:23:16
R
UTF-8
R
false
false
2,578
r
util.R
# Utils redash_request <- function(verb, url, api_key, ..., verbose = FALSE) { res <- httr::VERB( verb = verb, url = url, config = httr::add_headers(Authorization = glue::glue("Key {api_key}")), ... ) httr::stop_for_status(res) if (verbose) { httr::content(res) } else { suppressMessages(httr::content(res)) } } get_data_sources <- function(base_url, api_key, ..., verbose = FALSE) { url <- glue::glue("{base_url}/api/data_sources") result <- redash_request("GET", url, api_key, ..., verbose = verbose) # result contains NULL, which makes bind_rows() to fail setNames(result, purrr::map_chr(result, "name")) } post_query <- function(base_url, api_key, query, query_id, data_source_id, ..., verbose = FALSE) { url <- glue::glue("{base_url}/api/query_results") redash_request( "POST", url, api_key, body = list( query = query, query_id = query_id, data_source_id = data_source_id ), encode = "json", ..., verbose = verbose ) } IGNORE_ERRORS <- c( "Query completed but it returned no data." ) get_job_status <- function(base_url, api_key, job_id, ..., verbose = FALSE) { url <- glue::glue("{base_url}/api/jobs/{job_id}") result <- redash_request("GET", url, api_key, ...) if (result$job$status == 4L) { if (result$job$error %in% IGNORE_ERRORS) { # treat the job as success result$job$status <- 3L result$job$no_result <- TRUE } else { stop(glue::glue("Query failed: {result$job$error}", call. = FALSE)) } } result$job } get_result <- function(base_url, api_key, query_id, query_result_id, ..., verbose = FALSE) { url <- glue::glue("{base_url}/api/queries/{query_id}/results/{query_result_id}.csv") redash_request("GET", url, api_key, ..., verbose = verbose) } #' @export get_supported_data_sources <- function(...) UseMethod("get_supported_data_sources") #' @export get_supported_data_sources.RedashConnection <- function(conn, ..., verbose = FALSE) { get_supported_data_sources(conn@base_url, conn@api_key, ..., verbose = verbose) } #' @export get_supported_data_sources.default <- function(base_url, api_key, ..., verbose = FALSE) { url <- glue::glue("{base_url}/api/data_sources/types") res <- redash_request("GET", url, api_key, ..., verbose = verbose) data.frame( name = vapply(res, getElement, name = "name", FUN.VALUE = character(1L)), type = vapply(res, getElement, name = "type", FUN.VALUE = character(1L)), stringsAsFactors = FALSE ) } `%||%` <- function (x, y) if (is.null(x)) y else x
a3c0fec25823e8b1fa34b2c3b9d5f87431557e23
b9b158361d14bdf8a9b6965ad07a5119164cfcde
/cachematrix.R
4cfe02ff724adcfd03dbaeb7807b9c0528b0e017
[]
no_license
dhrubaraj/MyProgrammingAssignment2
b2fa1ae787af26423d3c1ecfaaf795c9b2210ae0
e94d4e609797c50af51365c5ae48805418ccb2e0
refs/heads/master
2021-08-28T12:04:04.101571
2017-12-12T06:07:17
2017-12-12T06:07:17
113,951,796
0
0
null
null
null
null
UTF-8
R
false
false
1,132
r
cachematrix.R
# makeCacheMatrix creates a list containing a function to # 1. set the value of a matrix # 2. get the value of a matrix # 3. set the value of an inverse matrix # 4. get the value of an inverse matrix makeCacheMatrix <- function(x = matrix()) { inv_mtrx <- NULL set <- function(y) { x <<- y inv_mtrx <<- NULL } get <- function() x setinverse <- function(inverse) inv_mtrx <<- inverse getinverse <- function() inv_mtrx list(set=set, get=get, setinverse=setinverse, getinverse=getinverse) } # The "cacheSolve" function returns the inverse matrix of a given matrix. # The resulted inverse matrix is stored in cache. Next time when the function # is called again it will return the inverse matrix from cache instead of # recalculating it again. This saves considerable processing time. # This function assumes that the function argument matrix is always invertible. cacheSolve <- function(x, ...) { inv_mtrx <-x$getinverse() if(!is.null(inv_mtrx)) { message("getting data from cached.") return(inv_mtrx) } data <- x$get() inv_mtrx <- solve(data) x$setinverse(inv_mtrx) inv_mtrx }
6abd3b8d630852a916059b6a21163571f8679654
ba7ddc3dd7e622a0341c38aa4dae5873cefb45d1
/plot_h_up_backlog.R
5b6c56973c54355897c71292e0776d9091d693e0
[]
no_license
paulnikolaus/StatNC-fBm-Extension
7003784958a77e7953e78ded433defdece02a55c
661801e578455532e51a989fbacae6d29cf885af
refs/heads/master
2021-06-13T01:44:51.739924
2021-03-20T14:36:41
2021-03-20T14:36:41
146,993,500
1
0
null
null
null
null
UTF-8
R
false
false
2,844
r
plot_h_up_backlog.R
##### plot_h_up_development.R ##### library("reshape2") # melt library("ggplot2") source("Bound.R") # Show development of the backlog bound for different H_ups #' @param true_hurst Value of the true hurst parameter #' @return data frame of mean of h_up fir different sample sizes backlog_development <- function(true_hurst = 0.7) { sample_h_ups <- read.csv( file = paste0("results/h_up_development_h_true=", true_hurst, ".csv"), header = T ) h_ups <- sample_h_ups[, 2] backlog_bound_stat <- rep(0.0, length(h_ups)) for (i in 1:length(h_ups)) { backlog_bound_stat[i] <- backlog_bound( time_n = 200, std_dev = 1.0, hurst = h_ups[i], arrival_rate = 10**(-2), server_rate = 1.5 * 10**(-2), epsilon = 1 / 500, splits = 10, conflevel = 0.999, use_stat_nc = TRUE ) } h_ups <- c(true_hurst, h_ups) backlog_bound_stat <- c(backlog_bound( time_n = 200, std_dev = 1.0, hurst = true_hurst, arrival_rate = 10**(-2), server_rate = 1.5 * 10**(-2), epsilon = 1 / 500, splits = 10, conflevel = 0.999, use_stat_nc = FALSE ), backlog_bound_stat) backlog_bounds <- as.data.frame(cbind(h_ups, backlog_bound_stat)) write.csv(backlog_bounds, file = paste0("results/backlog_development_h_true=", true_hurst, ".csv"), row.names = FALSE ) } plot_backlog_develop <- function(true_hurst = 0.7) { backlog_df <- read.csv(file = paste0( "results/backlog_development_h_true=", true_hurst, ".csv" )) true_backlog <- backlog_df[1, 2] backlog_df <- backlog_df[-1, ] colnames(backlog_df) <- c( "H_up", "BacklogBound" ) long_df <- melt(backlog_df, id = "H_up", variable.name = "type", value.name = "BacklogBound" ) p <- ggplot(long_df, aes( x = H_up, y = BacklogBound, group = type )) + geom_line(aes(color = type, linetype = type), size = 0.8) + geom_point(aes(color = type, shape = type), size = 2.8) + scale_x_reverse() + scale_linetype_manual(values = "dotdash") + scale_color_manual(values = "blue") + scale_shape_manual(values = 20) + # ylim(0.67, 0.8) + geom_hline(yintercept = true_backlog, linetype = "solid") + geom_label(aes( x = 0.76, y = mean(backlog_df[, 2]), label = "StatNC Backlog Bound" ), fill = "white", size = 5 ) + geom_label(aes( x = 0.75, y = true_backlog * 0.95, label = "SNC Backlog Bound" ), fill = "white", size = 5 ) + theme_bw(base_size = 19) + theme(legend.position = "none") + xlab("Estimated Hurst Parameter") + ylab("Backlog Bound") + theme(legend.title = element_blank()) return(p) } # print(backlog_development(true_hurst = 0.7)) ggsave("results/backlog_development.pdf", width = 8, height = 5, device = cairo_pdf ) print(plot_backlog_develop(true_hurst = 0.7)) dev.off()
db38335594b6aae77c27d7b712247f162f672d60
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/ElemStatLearn/examples/orange4.test.Rd.R
4a7d3b0f5b1d0619436b0b83d7a86976e8130fca
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
169
r
orange4.test.Rd.R
library(ElemStatLearn) ### Name: orange4.test ### Title: Simulated Orange Data ### Aliases: orange4.test ### Keywords: datasets ### ** Examples str(orange4.test)
601fdc19070751b79871367dacf5842e35eeddbb
29e424922b3d0e510736f71314795500e8e99341
/05_simulation_5_20180625.R
d5c170ce35c0444b53f8d91fc5b98d4cb6051a8a
[]
no_license
holgersr/Bayesian-inference-and-simulation-of-Elo-scores-in-analysis-of-social-behaviour
f9e31801820658500b0f679d16ec40c3991c1b9b
2a0c67188c8c1be98b7ad65de83590cbd9150022
refs/heads/master
2021-09-19T09:20:20.299115
2018-06-25T06:41:06
2018-06-25T06:41:06
95,112,337
2
0
null
null
null
null
UTF-8
R
false
false
7,767
r
05_simulation_5_20180625.R
## ############################################################# ## Simulation for unbalanced sample where prob to be observed ## ## is proportional to the underlying strength ## ## ############################################################# set.seed(1604) starting_scores1 <- seq(-6, 6, length = 10) starting_scores2 <- starting_scores1[c(2, 1, 4, 3, 6, 5, 8, 7, 10, 9)] cols <- viridis(n = length(starting_scores1), option = "A", begin = 0, end = 0.8, alpha = 0.5) cols_without_alpha <- viridis(n = length(starting_scores1), option = "A", begin = 0, end = 0.8) chains <- 1; thin <- 1; iter <- 1500; warmup <- 500 iter_used <- iter - warmup period_length <- 2000 repeats <- 2 m_list <- X_sim_list <- vector("list", repeats) set.seed(1604) for (rep_index in 1:repeats) { X_sim1 <- simulate_unbalanced_data(strength = starting_scores1, n_interaction = period_length) X_sim2 <- simulate_unbalanced_data(strength = starting_scores2, n_interaction = period_length) X_sim <- rbind(X_sim1, X_sim2) X_sim_list[[rep_index]] <- X_sim Ai <- apply(X_sim, MAR = 1, FUN = function(x){which(x == 1)}) Bi <- apply(X_sim, MAR = 1, FUN = function(x){which(x == -1)}) fit_dat <- list(N = nrow(X_sim), K = ncol(X_sim), Ai = Ai, Bi = Bi, y = rep(1, nrow(X_sim)), diff_f = 1, presence = 1 + 0 * X_sim) ## Comment out following command to save time (after first time having saved the results): m_list[[rep_index]] <- stan(file = 'elo_score_USE_THIS.stan', data = fit_dat, iter = iter*thin, chains = chains, thin = thin, warmup = warmup*thin, control = list(adapt_delta = 0.95)) } save(m_list, file = paste0("sim5_m_list", today, ".RData")) ## This will save time: load(file = paste0("sim5_m_list", today, ".RData")) ## Plot results: Elo_pA <- function(EloStart_logk, X, show_k = FALSE, presence){ EloStart <- EloStart_logk[-length(EloStart_logk)] k <- exp(EloStart_logk[length(EloStart_logk)]) if (show_k) { cat(paste0(round(k, 3), paste(rep(" ", 20), collapse = " "))) cat("\r") } EloNow <- EloStart Elo <- matrix(nrow = nrow(X), ncol = length(EloStart), 0) colnames(Elo) <- colnames(X) pA <- rep(0, nrow(X)) for (i in 1:nrow(X)) { A <- which(X[i, ] == 1) B <- which(X[i, ] == -1) EloNow <- EloNow - mean(EloNow[which(presence[i, ] == 1)]) pA[i] <- pAfun(EloA = EloNow[A], EloB = EloNow[B]) toAdd <- (1 - pA[i]) * k EloNow[A] <- EloNow[A] + toAdd EloNow[B] <- EloNow[B] - toAdd Elo[i, ] <- EloNow } return(list(pA = pA, Elo = Elo)) } ## Comment out (until including 'save(...)'-commands to save time (after first time having saved the results)): lower1_list <- upper1_list <- med_list <- vector("list", repeats) for (rep_index in 1:repeats) { X_sim <- X_sim_list[[rep_index]] draws_starting_scores <- extract(m_list[[rep_index]])[[1]] draws_k <- extract(m_list[[rep_index]])[[2]] lower1_list[[rep_index]] <- upper1_list[[rep_index]] <- med_list[[rep_index]] <- vector("list", ncol(X_sim)) for (j in 1:ncol(X_sim)) { cat(paste0(j, ":\n")) elo_list <- vector("list", nrow(draws_starting_scores)) for (i in 1:nrow(draws_starting_scores)) { elo_list[[i]] <- Elo_pA(EloStart_logk = c(draws_starting_scores[i, ], log(draws_k[i])), X = X_sim, presence = 1 + 0*X_sim)$Elo ## progress tracker: cat(".") if ((i %% 80) == 0) { cat(".\n") } } cat("\n") aux <- NULL for (i in 1:length(elo_list)) { aux <- cbind(aux, elo_list[[i]][, j]) } aux <- as.matrix(apply(aux, MAR = 1, FUN = quantile, probs = c(0.025, 0.5, 0.975))) med_list[[rep_index]][[j]] <- aux[2, ] upper1_list[[rep_index]][[j]] <- aux[1, ] lower1_list[[rep_index]][[j]] <- aux[3, ] } cat("\n") } save(med_list, file = paste0("sim5_med_list", today, ".RData")) save(upper1_list, file = paste0("sim5_upper1_list", today, ".RData")) save(lower1_list, file = paste0("sim5_lower1_list", today, ".RData")) ## This will save time: # load(file = paste0("sim5_med_list", today, ".RData")) # load(file = paste0("sim5_upper1_list", today, ".RData")) # load(file = paste0("sim5_lower1_list", today, ".RData")) pdf(paste0("simulation5_", today, ".pdf"), height = 5, width = 7) layout(mat = matrix(nrow = 2, ncol = 2, c(1, 3, 2, 4), byrow = T), heights = c(0.65, 0.35), width = c(0.5, 0.5)) par(mar = c(4, 4, 1, 1)) for (rep_index in 1:repeats) { elo_sim <- elo_list[[rep_index]] X_sim <- X_sim_list[[rep_index]] range_elo <- range(c(unlist(lower1_list[[rep_index]])), c(unlist(upper1_list[[rep_index]]))) plot(elo_sim[, 1], type = "n", bty = "n", xlab = "", ylab = "", yaxt = "n", ylim = range_elo, xaxt = "n", xlim = c(-100, 4100)) draws_k <- extract(m_list[[rep_index]])[[2]] main_title <- paste("k: ", round(mean(draws_k), 2), ", with 95% CI: [", round(quantile(draws_k, probs = 0.025), 2), ", ", round(quantile(draws_k, probs = 0.975), 2), "]", sep = "") title(main = main_title) for (i in 1:ncol(X_sim)) { lines(1:4000, med_list[[rep_index]][[i]], col = 1, lwd = 0.5, lty = 1) } mtext(side = 1, text = "Interaction index", line = 2, cex = 1) mtext(side = 2, text = "Elo-rating", line = 2, cex = 1) axis(1, lab = FALSE, at = seq(0, 4000, by = 1000)) axis(2, lab = FALSE, at = seq(-5, 5, by = 5)) mtext(side = 1, at = seq(0, 4000, by = 1000), text = seq(0, 4000, by = 1000), cex = 1, line = 0.7) mtext(side = 2, at = seq(-5, 5, by = 5), text = seq(-5, 5, by = 5), cex = 1, line = 0.7, las = 2) lines(c(2000.5, 2000.5), range(starting_scores2), lty = 2) points(rep(1, 10), starting_scores1, col = cols, pch = 16) points(rep(-139, 10), starting_scores1, col = cols_without_alpha, pch = rev(LETTERS[1:10])[order(starting_scores1)]) points(rep(4000, 10), starting_scores2, col = cols, pch = 16) points(rep(4140, 10), starting_scores1, col = cols_without_alpha[order(starting_scores2)], pch = rev(LETTERS[1:10])[order(starting_scores2)]) pres_here <- 1:4000 for (j in 1:10) { polygon(c(pres_here, rev(pres_here)), c(lower1_list[[rep_index]][[j]], rev(upper1_list[[rep_index]][[j]])), col = cols[j], border = NA) lines(pres_here, med_list[[rep_index]][[j]], col = cols_without_alpha[j], lwd = 1, lty = 1) } ## Calculate recovery of true underlying 'hierarchy': elo_2nd_period <- est_hierarchy <- error <- error_corrected_for_sd <- elo_sim[1:4000, ] truth <- order(starting_scores1) for (i in 1:2000) { est_hierarchy[i, ] <- order(elo_2nd_period[i, ]) error[i, ] <- error_corrected_for_sd[i, ] <- est_hierarchy[i, ] - truth error_corrected_for_sd[i, which(abs(error[i, ]) < 3)] <- 0 } truth <- order(starting_scores2) for (i in 2001:4000) { est_hierarchy[i, ] <- order(elo_2nd_period[i, ]) error[i, ] <- error_corrected_for_sd[i, ] <- est_hierarchy[i, ] - truth error_corrected_for_sd[i, which(abs(error[i, ]) < 3)] <- 0 } sum_abs_error <- apply(error, MAR = 1, FUN = function(x){sum(abs(x))}) sum_abs_error_corrected_for_sd <- apply(error_corrected_for_sd, MAR = 1, FUN = function(x){sum(abs(x))}) plot(1:4000, sum_abs_error/length(truth), type = "s", bty = "n", ylim = c(0, 1.2), xlab = "Interaction index", ylab = "Ranks MAE", yaxt = "n", lwd = 1) axis(2, las = 2) } dev.off() shell.exec(paste0("simulation5_", today, ".pdf"))
7aa97b44bae2258c01a4aa1dab98d5d6993be9ea
400b384715f5f02ef43118f792d9eb73de314b2b
/man/min_dist.Rd
fa79752135a4296fc3d5c6c36f8587c841d2de85
[]
no_license
yassato/rNeighborQTL
8d250721fee767f96dd115324321a1c18ba67e1c
f3c54151f794fa0c9c213c9f0834b8e51aa4d6a4
refs/heads/master
2023-04-28T02:43:01.742030
2021-05-11T08:06:03
2021-05-11T08:06:03
253,141,548
0
0
null
null
null
null
UTF-8
R
false
true
899
rd
min_dist.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/min_dist.R \name{min_dist} \alias{min_dist} \title{Calculating the minimum distance} \usage{ min_dist(smap, grouping = rep(1, nrow(smap))) } \arguments{ \item{smap}{A matrix showing a spatial map. The first and second column include spatial points along a x-axis and y-axis, respectively.} \item{grouping}{A integer vector assigning each individual to a group. This argument can be useful when a "smap" contains different experimental replicates. Default setting means that all individuals are belong to a single group.} } \value{ Return a scalar of the minimum Euclidian distance that allows all individuals to have at least one neighbor. } \description{ A function to calculate a Euclidian distance including at least one neighbor for all individuals. } \author{ Yasuhiro Sato (\email{sato.yasuhiro.36c@kyoto-u.jp}) }
5332b9ef4ccc900c07000cbf718a6085db7f39c9
2d9379912deaa5a698a2dea503dcd0e21b3e0086
/First_Course.R
5d475053ebc61d14ef2307e9bb38d5a353bdd4c9
[]
no_license
pipinho13/R_Specialization
ad4045b35fcc41dde9a503b9985cd452524c4b67
8f0fb5823880dd6d8b9d838494d63f9932e33492
refs/heads/master
2021-01-12T05:33:57.857817
2017-02-12T21:21:23
2017-02-12T21:21:23
77,130,599
0
0
null
null
null
null
UTF-8
R
false
false
15,713
r
First_Course.R
datafile <- file.path(datapath, "urban.csv.gz") urban<-read_csv(datafile) urban<-read_csv(datafile, col_types='cccdc') urban<-read_csv(datafile, col_types='cccd-') urban<-read_csv(datafile, col_types='cccd-', n_max=100) object.size(plants) wc_4 <- worldcup %>% select(Time, Passes, Tackles, Saves) %>% summarize(Time = mean(Time), Passes = mean(Passes), Tackles = mean(Tackles), Saves = mean(Saves)) %>% gather(var, mean) %>%mutate(mean=round(mean,1)) # | In this question, you will again continue to build on the data cleaning you started in the last two questions. I opened a new R script for you # | with the previous steps completed. As a next step, you now need to create a new column called agecat that divides a person's age into three broad # | categories (Under 15, 15 to 50, Over 50). To do this, add a function from `dplyr` or `tidyr` to the pipe chain in the script. Your goal is to # | re-create the example output shown in the comments of the script. When you are ready to submit your script, save the script and type `submit()`, # | or type `reset()` to reset the script to its original state. titanic_4 <- titanic %>% select(Survived, Pclass, Age, Sex) %>% filter(!is.na(Age)) %>% mutate(agecat = cut(Age, breaks = c(0, 14.99, 50, 150), include.lowest = TRUE, labels = c("Under 15", "15 to 50", "Over 50"))) %>%group_by(Pclass, agecat, Sex)%>% summarise(N=n(), survivors=sum(Survived==1))%>%mutate(perc_survived=100*survivors/N)%>%ungroup() ####################### ### Regular Expressions ####################### paste("Square", "Circle", "Triangle") paste("Square", "Circle", "Triangle", sep = "+") paste0("Square", "Circle", "Triangle") shapes <- c("Square", "Circle", "Triangle") paste("My favorite shape is a", shapes) two_cities <- c("best", "worst") paste("It was the", two_cities, "of times.") # You can also collapse all of the elements of a # vector of strings into a single string by specifying the collapse argument: paste(shapes, collapse = " ") nchar("Supercalifragilisticexpialidocious") cases <- c("CAPS", "low", "Title") tolower(cases) toupper(cases) regular_expression <- "a" string_to_search <- "Maryland" grepl(regular_expression, string_to_search) regular_expression <- "u" string_to_search <- "Maryland" grepl(regular_expression, string_to_search) grepl("land", "Maryland") grepl("ryla", "Maryland") grepl("Marly", "Maryland") grepl("dany", "Maryland") head(state.name) # "." # The first metacharacter that we’ll discuss is ".". The # metacharacter that only consists of a period represents any character other than a new line # (we’ll discuss new lines soon). Let’s take a look at some examples using the peroid regex: grepl(".", "Maryland") #[1] TRUE grepl(".", "*&2[0+,%<@#~|}") #[1] TRUE grepl(".", "") #[1] FALSE grepl("a.b", c("aaa", "aab", "abb", "acadb")) # [1] FALSE TRUE TRUE TRUE # You can specify a regular expression that contains a certain number of characters or # metacharacters using the enumeration metacharacters. # The + metacharacter indicates that one or more of the preceding expression should be present # The * indicates that zero or more of the preceding expression is present. Let’s take a look at some examples using these metacharacters: # Does "Maryland" contain one or more of "a" ? grepl("a+", "Maryland") ##[1] TRUE # Does "Maryland" contain one or more of "x" ? grepl("x+", "Maryland") #[1] FALSE # Does "Maryland" contain zero or more of "x" ? grepl("x*", "Maryland") #[1] TRUE # You can also specify exact numbers of expressions using curly brackets {}. For example # "a{5}" specifies “a exactly five times,” # "a{2,5}" specifies “a between 2 and 5 times,” and # "a{2,}" specifies “a at least 2 times.” Let’s take a look at some examples: # Does "Mississippi" contain exactly 2 adjacent "s" ? grepl("s{2}", "Mississippi") #[1] TRUE # This is equivalent to the expression above: grepl("ss", "Mississippi") #[1] TRUE # Does "Mississippi" contain between 2 and 3 adjacent "s" ? grepl("s{2,3}", "Mississippi") #[1] TRUE # Does "Mississippi" contain between 2 and 3 adjacent "i" ? grepl("i{2,3}", "Mississippi") #[1] FALSE # Does "Mississippi" contain between 2 adjacent "iss" ? grepl("(iss){2}", "Mississippi") #[1] TRUE # Does "Mississippi" contain between 2 adjacent "ss" ? grepl("(ss){2}", "Mississippi") #[1] FALSE # Does "Mississippi" contain the pattern of an "i" followed by # 2 of any character, with that pattern repeated three times adjacently? grepl("(i.{2}){3}", "Mississippi") #[1] TRUE # In the last three examples I used parentheses () to create a capturing group. A capturing # group allows you to use quantifiers on other regular expressions. In the last example I first # created the regex "i.{2}" which matches i followed by any two characters (“iss” or “ipp”). # I then used a capture group to to wrap that regex, and to specify exactly three adjacent # occurrences of that regex. # You can specify sets of characters with regular expressions, some of which come built in, # but you can build your own character sets too. First we’ll discuss the built in character sets: # words ("\\w"), digits ("\\d"), and whitespace characters ("\\s"). Words specify any letter, # digit, or a underscore, digits specify the digits 0 through 9, and whitespace specifies line # breaks, tabs, or spaces. Each of these character sets have their own compliments: not words # ("\\W"), not digits ("\\D"), and not whitespace characters ("\\S"). Each specifies all of the # characters not included in their corresponding character sets. grepl("\\w", "abcdefghijklmnopqrstuvwxyz0123456789") #[1] TRUE grepl("\\d", "0123456789") #[1] TRUE # "\n" this regex for a new line and "\t" is the regex for a tab grepl("\\s", "\n\t ") #[1] TRUE grepl("\\d", "abcdefghijklmnopqrstuvwxyz") #[1] FALSE grepl("\\D", "abcdefghijklmnopqrstuvwxyz") #[1] TRUE grepl("\\w", "\n\t ") #[1] FALSE # You can also specify specific character sets using straight brackets []. For example a character # set of just the vowels would look like: "[aeiou]". You can find the complement to a specific # character by putting a carrot ˆ after the first bracket. For example "[ˆaeiou]" matches all # characters except the lowercase vowels. You can also specify ranges of characters using a # hyphen - inside of the brackets. For example "[a-m]" matches all of the lowercase characters # between a and m, while "[5-8]" matches any digit between 5 and 8 inclusive. Let’s take a look # at some examples using custom character sets: grepl("[aeiou]", "rhythms") #[1] FALSE grepl("[^aeiou]", "rhythms") #[1] TRUE grepl("[a-m]", "xyz") #[1] FALSE grepl("[a-m]", "ABC") #[1] FALSE grepl("[a-mA-M]", "ABC") #[1] TRUE # Putting two # backslashes before a punctuation mark that is also a metacharacter indicates that you are # looking for the symbol and not the metacharacter meaning. For example "\\." indicates # you are trying to match a period in a string. Let’s take a look at a few examples: grepl("\\+", "tragedy + time = humor") #[1] TRUE grepl("\\.", "http://www.jhsph.edu/") #[1] TRUE # There are also metacharacters for matching the beginning and the end of a string which # are "ˆ" and "$" respectively. Let’s take a look at a few examples: grepl("^a", c("bab", "aab")) #[1] FALSE TRUE grepl("b$", c("bab", "aab")) #[1] TRUE TRUE grepl("^[ab]+$", c("bab", "aab", "abc")) #[1] TRUE TRUE FALSE # The last metacharacter we’ll discuss is the OR metacharacter ("|"). The OR metacharacter # matches either the regex on the left or the regex on the right side of this character. A few # examples: grepl("a|b", c("abc", "bcd", "cde")) #[1] TRUE TRUE FALSE grepl("North|South", c("South Dakota", "North Carolina", "West Virginia")) #[1] TRUE TRUE FALSE start_end_vowel <- "^[AEIOU]{1}.+[aeiou]{1}$" vowel_state_lgl <- grepl(start_end_vowel, state.name) head(vowel_state_lgl) #[1] TRUE TRUE TRUE FALSE FALSE FALSE state.name[vowel_state_lgl] #[1] "Alabama" "Alaska" "Arizona" "Idaho" "Indiana" "Iowa" #[7] "Ohio" "Oklahoma" # Metacharacter Meaning # . Any Character # \w A Word # \W Not a Word # \d A Digit # \D Not a Digit # \s Whitespace # \S Not Whitespace # [xyz] A Set of Characters # [ˆxyz] Negation of Set # [a-z] A Range of Characters # ˆ Beginning of String # $ End of String # \n Newline # + One or More of Previous # * Zero or More of Previous # ? Zero or One of Previous # Either the Previous or the Following # {5} Exactly 5 of Previous # {2, 5} Between 2 and 5 or Previous # {2, } More than 2 of Previous grepl("[Ii]", c("Hawaii", "Illinois", "Kentucky")) # [1] TRUE TRUE FALSE grep("[Ii]", c("Hawaii", "Illinois", "Kentucky")) # [1] 1 2 # The sub() function takes as arguments a regex, a “replacement,” and a vector of strings. This # function will replace the first instance of that regex found in each string. sub("[Ii]", "1", c("Hawaii", "Illinois", "Kentucky")) #[1] "Hawa1i" "1llinois" "Kentucky" # The gsub() function is nearly the same as sub() except it will replace every instance of the # regex that is matched in each string. gsub("[Ii]", "1", c("Hawaii", "Illinois", "Kentucky")) #[1] "Hawa11" "1ll1no1s" "Kentucky" # The strsplit() function will split up strings according to the provided regex. If strsplit() is # provided with a vector of strings it will return a list of string vectors. two_s <- state.name[grep("ss", state.name)] two_s [1] "Massachusetts" "Mississippi" "Missouri" "Tennessee" strsplit(two_s, "ss") [[1]] [1] "Ma" "achusetts" [[2]] [1] "Mi" "i" "ippi" [[3]] [1] "Mi" "ouri" [[4]] [1] "Tenne" "ee" # The "stringr" Package # The str_extract() function returns the sub-string of a string that matches the providied # regular expression. library(stringr) state_tbl <- paste(state.name, state.area, state.abb) head(state_tbl) # [1] "Alabama 51609 AL" "Alaska 589757 AK" "Arizona 113909 AZ" # [4] "Arkansas 53104 AR" "California 158693 CA" "Colorado 104247 CO" str_extract(state_tbl, "[0-9]+") # [1] "51609" "589757" "113909" "53104" "158693" "104247" "5009" # [8] "2057" "58560" "58876" "6450" "83557" "56400" "36291" # [15] "56290" "82264" "40395" "48523" "33215" "10577" "8257" # [22] "58216" "84068" "47716" "69686" "147138" "77227" "110540" # [29] "9304" "7836" "121666" "49576" "52586" "70665" "41222" # [36] "69919" "96981" "45333" "1214" "31055" "77047" "42244" # [43] "267339" "84916" "9609" "40815" "68192" "24181" "56154" # [50] "97914" # The str_order() function returns a numeric vector that corresponds to the alphabetical # order of the strings in the provided vector. head(state.name) # [1] "Alabama" "Alaska" "Arizona" "Arkansas" "California" # [6] "Colorado" str_order(state.name) # [1] 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 # [24] 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 # [47] 47 48 49 50 head(state.abb) # [1] "AL" "AK" "AZ" "AR" "CA" "CO" str_order(state.abb) # [1] 2 1 4 3 5 6 7 8 9 10 11 15 12 13 14 16 17 18 21 20 19 22 23 # [24] 25 24 26 33 34 27 29 30 31 28 32 35 36 37 38 39 40 41 42 43 44 46 45 # [47] 47 49 48 50 # The str_pad() function pads strings with other characters which is often useful when the # string is going to be eventually printed for a person to read. str_pad("Thai", width = 8, side = "left", pad = "-") #[1] "----Thai" str_pad("Thai", width = 8, side = "right", pad = "-") #[1] "Thai----" str_pad("Thai", width = 8, side = "both", pad = "-") #[1] "--Thai--" # The str_to_title() function acts just like tolower() and toupper() except it puts strings into # Title Case. cases <- c("CAPS", "low", "Title") str_to_title(cases) # [1] "Caps" "Low" "Title" # The str_trim() function deletes whitespace from both sides of a string. to_trim <- c(" space", "the ", " final frontier ") str_trim(to_trim) #[1] "space" "the" "final frontier" # The str_wrap() function inserts newlines in strings so that when the string is printed each # line’s length is limited. pasted_states <- paste(state.name[1:20], collapse = " ") cat(str_wrap(pasted_states, width = 80)) # Alabama Alaska Arizona Arkansas California Colorado Connecticut Delaware Florida # Georgia Hawaii Idaho Illinois Indiana Iowa Kansas Kentucky Louisiana Maine # Maryland cat(str_wrap(pasted_states, width = 30)) # Alabama Alaska Arizona # Arkansas California Colorado # Connecticut Delaware Florida # Georgia Hawaii Idaho Illinois # Indiana Iowa Kansas Kentucky # Louisiana Maine Maryland # The word() function allows you to index each word in a string as if it were a vector. a_tale <- "It was the best of times it was the worst of times it was the age of wisdom it was the ag\ e of foolishness" word(a_tale, 2) #[1] "was" word(a_tale, end = 3) #[1] "It was the" word(a_tale, start = 11, end = 15) #[1] "of times it was the" ###http://tidytextmining.com/ ### Memory library(pryr) mem_used() #72 MB # First, you might consider removing a few very large objects in your workspace. You can see # the memory usage of objects in your workspace by calling the object_size() function. # The object_size() function will print the number of bytes (or kilobytes, or megabytes) that a # given object is using in your R session. If you want see what the memory usage of the largest # 5 objects in your workspace is, you can use the following code. library(magrittr) sapply(ls(), function(x) object.size(get(x))) %>% sort %>% tail(5) # worldcup denver check_tracks ext_tracks miami # 61424 222768 239848 1842472 13121608 mem_change(rm(check_tracks, denver, b)) # The .Machine object in R (found in the base package) can give you specific details about how # your computer/operation system stores different types of data. str(.Machine) # # Task DBI Function # # Create a new driver object for an instance of a database dbDriver # # Connect to database instance dbConnect # # Find available tables in a connected database instance dbListTables # # Find available fields within a table dbListFields # # Query a connected database instance dbSendQuery # # Pull a data frame into R from a query result dbFetch # # Jointly query and pull data from a database instance dbGetQuery # # Close result set from a query dbClearResult # # Write a new table in a database instance dbWriteTable # # Remove a table from a database instance dbRemoveTable # # Disconnect from a database instance dbDisconnect # # WEEK 4 ## Q1 df2<-read_csv("daily_SPEC_2014.csv.bz2") df%>%filter(`State Name`=="Wisconsin" & `Parameter Name`=="Bromine PM2.5 LC")%>%summarize(avg = mean(`Arithmetic Mean`, na.rm=TRUE)) ## Q2 df%>%group_by(`Parameter Name`,`State Name`, `Site Num`, `Date Local`)%>%summarize(avg = mean(`Arithmetic Mean`, na.rm=TRUE))%>%ungroup()%>%arrange(desc(avg)) ## Q3 df%>% filter(`Parameter Name` == "Sulfate PM2.5 LC") %>% group_by(`Site Num`,`County Code`,`State Code`) %>% summarise(avg = mean(`Arithmetic Mean`)) %>%ungroup()%>%arrange(desc(avg)) ## Q4 df%>% filter(`Parameter Name` == "EC PM2.5 LC TOR" & `State Name` %in% c("California", "Arizona") ) %>% group_by(`State Name`) %>% summarise(avg = mean(`Arithmetic Mean`)) %>%ungroup()%>%arrange(desc(avg))%>%mutate(ddd=lag(avg), d=avg-ddd, d2=diff(avg)) ## Q5 df%>% filter(`Parameter Name` == "OC PM2.5 LC TOR" & Longitude< -100 ) %>% summarise(median_value = median(`Arithmetic Mean`)) ## Q6 df<-read_excel("aqs_sites.xlsx") df%>%filter(`Land Use`=="RESIDENTIAL" & `Location Setting`=="SUBURBAN")%>%select(`Site Number`)%>%summarize(length(unique(`Site Number`)), n=n())
7e6b9e03c30157799e3f86175b5d01a1f23e2e2b
b64fdb45e11c5d8717727b89d1361cbfd3943f1e
/man/rel.Rd
9cfdbd688cba1a7a96c6d9e3eb99a998e60918b8
[]
no_license
garrettgman/ggplyr
4c11194432d96557a02cab69bb579f791e4105d5
34a2861f875a9d30949a6fed737b40b742d1fba8
refs/heads/master
2016-09-06T02:47:39.162635
2012-06-21T16:33:04
2012-06-21T16:33:04
4,385,779
21
5
null
2012-06-21T16:39:19
2012-05-20T16:24:04
R
UTF-8
R
false
false
674
rd
rel.Rd
\name{rel} \alias{rel} \title{Make a rel class object} \usage{ rel(x) } \arguments{ \item{x}{numeric the proportion of the relative width or height to use as the final width or height} } \value{ an object of class rel } \description{ rel class objects are used to specify the width and height of glyphs in \code{\link{glyph}} calls. The numeric component of the rel object specifies the proportion of the relative width or height to use for the final width or height. The relative width or height of a glyph is calculated at the time a plot is built for rendering. It depends on the number of glyphs in a plot and their placement within the plot. }
b508a3203865efd7a0b8a14000e258ce11dea245
a260ca9899db02c781baaf1e6bf80d335920f417
/server.R
317617a0a24ca02463cdaf8c181b251f186d0b05
[]
no_license
AlexisSem/DataProducts-Course-Project
7176972dd51fb24909f78fa4b31a7cd3895b204e
19d8f9c9d119cbecf6eae4412c99d357e503e203
refs/heads/master
2020-03-30T06:11:47.119127
2018-09-29T08:48:36
2018-09-29T08:48:36
150,844,467
0
0
null
null
null
null
UTF-8
R
false
false
1,334
r
server.R
# # This is the server logic of a Shiny web application. You can run the # application by clicking 'Run App' above. # # Find out more about building applications with Shiny here: # # http://shiny.rstudio.com/ # library(shiny) library(reshape2) citiesDistances <- melt(as.matrix(eurodist), varnames = c("origin", "destination")) citiesDistances$origin <- as.character(citiesDistances$origin) citiesDistances$destination <- as.character(citiesDistances$destination) # Define server logic required to calculate time needed and display leaflet shinyServer(function(input, output) { output$timePlot <- renderPlot({ # generate bins based on input$bins from ui.R timeByCities <- reactive({ citiesDistances[which(citiesDistances$origin == input$startCityInput & citiesDistances$destination != input$startCityInput), ][, 2:3] }) timeByCitiesOrdered <- timeByCities()[order(timeByCities()$value), ] par(mar=c(5, 8, 5, 3)) barplot(height = timeByCitiesOrdered$value / input$speedInput, names.arg = timeByCitiesOrdered$destination, horiz = TRUE, xlab = "Time Needed in hours", main = "Time to destination by cities", las = 1, ) }) })
b6dd75768777ace2d319c0917eb257420e7da46c
b75d2bf6815b9a7d7a12c5059d97d875c62998e7
/bin2dec_script.R
943d00ae58a19cfaf07c258b904bf5a3fa46c9eb
[]
no_license
AdrianBach/Uncertainty-workshop-Trondheim
4c23e5a47ffb66c3ab8edb5f9abbe2efaa369f9e
def318743d83742535bc51412e1b16318ef847b5
refs/heads/master
2022-02-07T06:19:18.143732
2019-08-05T16:07:16
2019-08-05T16:07:16
171,331,573
0
0
null
null
null
null
UTF-8
R
false
false
1,479
r
bin2dec_script.R
# Adrian Bach # NINA uncertainty workshop - build a barplot of frequency of use of the different types of uncertainty representations in Conservation Policy Documents # Script to build the binary code associated with each observation and convert it into decimal to plot ## Clear list rm(list=ls()) ## set work directory setwd("/home/adrian/Documents/GitKraken/Workshop task") getwd() ## packages ## personal functions # convert a binary string into a decimal bin2dec <- function(x) { # init sum binsum <- 0 for (i in 1:length(x)) { binsum <- binsum + 2^(i-1) * x[i] } return(binsum) } #### collide the dichotomies into a string, convert into a decimal, and write them into the table #### ## import data set d <- read.csv("uncert_repres_review1.csv", h = T) ## loop # column were dichotomies assessment starts st <- 6 # and ends nd <- st+5 # browse lines for (i in 1:nrow(d)) { # init the binary number and string bin <- NULL bin_strg <- rep(NA, nd-st+1) # browse binary columns for (j in st:nd) { bin <- paste(bin, d[i,j], sep = "") bin_strg[j-(nd-st)] <- d[i,j] } # assign bin to the corresponding column d[i,nd+1] <- bin # convert to decimal using personal function and assign to the correspondig column d[i,nd+2] <- bin2dec(bin_strg) } ## column values as factors for (i in 2:ncol(d)) { d[,i] <- as.factor(d[,i]) } # export the new table write.csv(d, file = "uncert_repres_review1_Rtreated.csv", na = "")
1cd52a975765f5364f690e7ca8cc53de6197e141
9e87d7b269fbd2adb83ca0b2302378b23cf47fd8
/init.R
fe8363ab4b0560da6e630612676155e55544498f
[]
no_license
alhostat/perinataldep2019
6d58d98481f84cb5be1ef889c414e982fb46db42
39aa44158b601bd7c846ce12ff72b308698e701f
refs/heads/master
2020-06-29T07:57:06.078272
2019-09-01T01:16:33
2019-09-01T01:16:33
200,479,916
0
0
null
null
null
null
UTF-8
R
false
false
482
r
init.R
library(tidyverse) library(magrittr) library(broom) # DATA ------------------------------------------------------------------------- perinatal_data = read.csv('Data/EffectSizes.csv') depression = filter(perinatal_data, type == 'Depression') anxiety = filter(perinatal_data, type == 'Anxiety') worry = filter(perinatal_data, type == 'Worry') # SOURCES ---------------------------------------------------------------------- # source('R/effect_sizes.R') source('R/frequentist.R')
9d6d05ed1b6c98886dfda04b728fb316b5f4af06
2e73452c271ebbfd3e55e661fe887c3b30d75625
/cachematrix.R
3cef1b55403a346487472aa57a627a4747e63174
[]
no_license
OrwahAdayleh/ProgrammingAssignment2
63acd6ee4627b8012ff013065e1e4a165ed2fd5b
4ad68249e8bdbb102ff7105b882a075b8ea7f160
refs/heads/master
2022-12-16T04:17:36.451204
2020-09-09T05:38:31
2020-09-09T05:38:31
293,724,154
0
0
null
2020-09-08T06:42:19
2020-09-08T06:42:18
null
UTF-8
R
false
false
784
r
cachematrix.R
## Put comments here that give an overall description of what your ## functions do ## function take matrix as input and return the inverse using cash variable library(MASS) makeCacheMatrix <- function(x = matrix()) { var1 <-NULL func1 <-function(y) { x<<-y var1<<-NULL } get <- function() x SetTheInverse <-function(inverse) var1<<-inverse GetTheInverse <-function()var1 list(set=set,get=get,SetTheInverse=SetTheInverse,GetTheInverse=GetTheInverse) } ## Write a short comment describing this function cacheSolve <- function(x, ...) { ## Return a matrix that is the inverse of 'x' var2<-x$GetTheInverse() if(!is.null(var2)) { message("getting cached data") return(var2) } mat <- x$get() var2<-solve(mat,...) x$SetTheInverse(var2) var2 }
a80af381c99ac82e34c633efd9db06f0c0cf4788
e4602c3639d68b9d15c0a585723ca2d597875b18
/R/auditCommonFunctions.R
4a62c5b5a1d415e25c2897c47ddf68fc857b5ca2
[]
no_license
TimKDJ/jaspAudit
ba26e3b72b820eb7564445e16f04eca3d78377e3
de3d92f9a88f082ff75b61032e1a1c60421891b0
refs/heads/master
2022-12-07T12:00:37.331627
2020-08-18T07:46:07
2020-08-19T11:42:52
288,144,681
0
0
null
2020-08-17T09:59:49
2020-08-17T09:59:48
null
UTF-8
R
false
false
169,282
r
auditCommonFunctions.R
# # Copyright (C) 2013-2018 University of Amsterdam # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # # When making changes to this file always mention @koenderks as a # reviewer in the Pull Request ################################################################################ ################## The Audit Workflow ########################################## ################################################################################ .auditWorkflow <- function(options, jaspResults, type){ ### PROCEDURE STAGE ### .auditProcedureStage(options, jaspResults) ### PLANNING STAGE ### .auditPlanningStage(options, jaspResults, type, workflow = TRUE) ready <- .auditReadyForNextStage(options, jaspResults, stage = "planning") if(!ready) return() # Stop if "To Selection" is not pressed ### SELECTION STAGE ### .auditSelectionStage(options, jaspResults, workflow = TRUE) ### EXECUTION STAGE ### .auditExecutionStage(options, jaspResults) ready <- .auditReadyForNextStage(options, jaspResults, stage = "execution") if(!ready) return() # Stop if "To Evaluation" is not pressed ### EVALUATION STAGE ### .auditEvaluationStage(options, jaspResults, type, workflow = TRUE) ### CONCLUSION STAGE ### .auditConclusionStage(options, jaspResults) } ################################################################################ ################## The Separate Stages of the Audit Workflow ################### ################################################################################ ##################################### ######### PROCEDURE STAGE ########### ##################################### .auditProcedureStage <- function(options, jaspResults){ # Extract the record number and book value columns dataset <- .auditReadDataset(options, jaspResults, stage = "procedure") # Check for errors due to incompatible options (variables) .auditErrorCheckInputOptions(options, dataset, analysisContainer = NULL, stage = "procedure") # Deduct the nessecary values from the input options planningOptions <- .auditInputOptions(options, dataset, jaspResults, stage = "planning", rawData = TRUE) # Create the procedure paragraph .auditExplanatoryText(options, planningOptions, stageContainer = NULL, stageState = NULL, jaspResults, stage = "procedure", positionInContainer = 1) # Create the audit risk model paragraph .auditRiskModelParagraph(options, jaspResults, position = 2) # --- TABLES .auditCreateTableNumber(jaspResults) # Initialize table numbers # Create a table containing descriptive statistics for the book values .auditBookValueDescriptiveTable(options, planningOptions, jaspResults, positionInContainer = 2) # --- PLOTS .auditCreateFigureNumber(jaspResults) # Initialize figure numbers # Create a plot of the population book values (if the user wants it) .auditBookValueDistributionPlot(dataset, options, planningOptions, jaspResults, positionInContainer = 3) } ##################################### ######### PLANNING STAGE ############ ##################################### .auditPlanningStage <- function(options, jaspResults, type, workflow){ if(workflow){ # Deduct the nessecary values from the input options planningOptions <- .auditInputOptions(options, dataset = NULL, jaspResults, stage = "planning", rawData = TRUE) } else if(!workflow){ .auditCreateTableNumber(jaspResults) # Initialize table numbers .auditCreateFigureNumber(jaspResults) # Initialize figure numbers # Deduct the nessecary values from the input options planningOptions <- .auditInputOptions(options, dataset = NULL, jaspResults, stage = "planning", rawData = FALSE) # Create the procedure paragraph .auditExplanatoryText(options, planningOptions, stageContainer = NULL, stageState = NULL, jaspResults, stage = "procedure", positionInContainer = 1) # Create the audit risk model paragraph .auditRiskModelParagraph(options, jaspResults, position = 2) } # Check if the options have valid values for running the analysis ready <- .auditReadyForAnalysis(options, planningOptions, stage = "planning") # Create the container that holds the planning output planningContainer <- .auditAnalysisContainer(jaspResults, stage = "planning", position = 3) # Perfrom early error checks .auditErrorCheckInputOptions(options, dataset = NULL, planningContainer, stage = "planning", type = NULL, ready, planningOptions) # Get the planning state if it exists, otherwise make one planningState <- .auditPlanningState(options, planningOptions, planningContainer, ready, type) # Create explanatory text for the planning .auditExplanatoryText(options, planningOptions, planningContainer, planningState, jaspResults, stage = "planning", positionInContainer = 1, type) # --- TABLES # Create the summary table .auditPlanningSummaryTable(options, planningOptions, planningState, planningContainer, jaspResults, ready, type, positionInContainer = 2) if(type == "bayesian"){ # Create the implicit sample table .auditImplicitSampleTable(options, planningState, planningContainer, jaspResults, ready, positionInContainer = 3) # Cerate the prior and posterior statistics table .auditPriorAndExpectedPosteriorStatisticsTable(options, planningState, planningContainer, jaspResults, ready, positionInContainer = 4) } # --- PLOTS # Create the sample size comparison plot .sampleSizeComparisonPlot(options, planningOptions, planningState, planningContainer, jaspResults, ready, type, positionInContainer = 5) if(type == "frequentist"){ # Create the implied sampling distribution plot .samplingDistributionPlot(options, planningOptions, planningState, planningContainer, jaspResults, ready, positionInContainer = 7) } else if(type == "bayesian"){ # Create the prior and expected posterior plot .auditPlanningPlotPrior(options, planningOptions, planningState, planningContainer, jaspResults, ready, positionInContainer = 7) } } ##################################### ######### SELECTION STAGE ########### ##################################### .auditSelectionStage <- function(options, jaspResults, workflow){ if(workflow){ # Create the container that holds the selection output selectionContainer <- .auditAnalysisContainer(jaspResults, stage = "selection-workflow", position = 4) # Read in additional variables dataset <- .auditAddSelectionColumns(options, jaspResults) # Import options and results from the planning stage selectionOptions <- .auditInputOptions(options, dataset, jaspResults, stage = "planning", rawData = TRUE) planningContainer <- jaspResults[["planningContainer"]] planningState <- planningContainer[["planningState"]]$object error <- .auditErrorCheckInputOptions(options, dataset, selectionContainer, stage = "selection-workflow") if(error) return() # Quit on errors if(is.null(planningState)) return() # Quit if no planning was done # Perform the sampling selectionState <- .auditSelectionState(dataset, options, planningState, selectionContainer) } else if(!workflow){ .auditCreateFigureNumber(jaspResults) # Initialize figure numbers .auditCreateTableNumber(jaspResults) # Initialize table numbers # Create a custom container for the selection analysis selectionContainer <- .auditAnalysisContainer(jaspResults, stage = "selection", position = 1) # Read in the relevant variables from the data set dataset <- .auditReadDataset(options, jaspResults, stage = "selection") # Check for errors due to incompatible options error <- .auditErrorCheckInputOptions(options, dataset, selectionContainer, stage = "selection") if(error) return() # Quit on errors options[["materiality"]] <- ifelse(options[["selectionType"]] == "musSampling", yes = "materialityAbsolute", no = "materialityRelative") # Deduce relevant quantities from input options selectionOptions <- .auditInputOptions(options, dataset, jaspResults, stage = "selection") # Create a planning state planningState <- .auditBackwardsState(options, stage = "selection") # Perform error checks .auditErrorCheckInputOptions(options, dataset, analysisContainer = NULL, stage = "procedure") # Perform the sampling selectionState <- .auditSampling(dataset, options, planningState, selectionContainer) # Add the sample indicator to the data .auditAddSelectionIndicator(options, selectionOptions, selectionState, jaspResults) } # Create explanatory text for the selection .auditExplanatoryText(options, selectionOptions, selectionContainer, selectionState, jaspResults, stage = "selection", positionInContainer = 1, prevState = planningState) # --- TABLES # Create a table containing information about the selection process .auditSelectionSummaryTable(options, selectionOptions, planningState, selectionState, selectionContainer, jaspResults, positionInContainer = 2) # Create a table containing descriptive statistics of the sample .auditSelectionDescriptivesTable(options, selectionState, selectionContainer, jaspResults, positionInContainer = 3) # Create a table displaying the selection .auditSelectionSampleTable(options, selectionState, selectionContainer, jaspResults, positionInContainer = 4) # --- PLOTS if(!workflow){ # Create a collection of plots comparing the population to the sample values .auditSelectionHistograms(options, dataset, selectionState, selectionContainer, jaspResults, positionInContainer = 5) } } ##################################### ######### EXECUTION STAGE ########### ##################################### .auditExecutionStage <- function(options, jaspResults){ if(options[["pasteVariables"]]){ # Add the two computed colums to the data set planningOptions <- .auditInputOptions(options, dataset = NULL, jaspResults, stage = "planning", rawData = TRUE) selectionState <- .auditSelectionState(dataset, options, jaspResults[["planningState"]], jaspResults[["selectionContainer"]]) selectionState <- data.frame(selectionState) dataset <- .readDataSetToEnd(columns.as.numeric = options[["recordNumberVariable"]]) sampleFilter <- rep(0, planningOptions[["populationSize"]]) rowNumber <- selectionState[["rowNumber"]] sampleFilter[rowNumber] <- selectionState[["count"]] sampleFilter <- as.numeric(sampleFilter) auditDataVariable <- rep(NA, planningOptions[["populationSize"]]) auditDataVariable[options[["performAudit"]][[1]]$rowIndices] <- options[["performAudit"]][[1]]$values if(is.null(jaspResults[["sampleFilter"]])) jaspResults[["sampleFilter"]] <- createJaspColumn(columnName = options[["sampleFilter"]], dependencies = "sampleFilter") if(is.null(jaspResults[["variableName"]])) jaspResults[["variableName"]] <- createJaspColumn(columnName = options[["variableName"]], dependencies = "variableName") jaspResults[["sampleFilter"]]$setScale(sampleFilter) jaspResults[["variableName"]]$setScale(auditDataVariable) } } ##################################### ######### EVALUATION STAGE ########## ##################################### .auditEvaluationStage <- function(options, jaspResults, type, workflow){ if(workflow){ # Create the container that holds the selection output evaluationContainer <- .auditAnalysisContainer(jaspResults, stage = "evaluation-workflow", position = 5) # Read in additional variables dataset <- .auditAddEvaluationColumns(options, jaspResults) # See if analysis can be run ready <- options[["auditResult"]] != "" # Extract only the sample if(ready) sample <- subset(dataset, dataset[, .v(options[["sampleFilter"]])] != 0) # Import options and results from the planning and selection stages evaluationOptions <- .auditInputOptions(options, dataset, jaspResults, stage = "planning", rawData = TRUE) planningContainer <- jaspResults[["planningContainer"]] planningState <- planningContainer[["planningState"]]$object selectionContainer <- jaspResults[["selectionContainer"]] selectionState <- selectionContainer[["selectionState"]]$object if(is.null(selectionState)) return() # Perform the evaluation evaluationState <- .auditEvaluationState(options, evaluationOptions, sample, evaluationContainer, type) # Create explanatory text for the evaluation .auditExplanatoryTextEvaluation(options, evaluationOptions, planningState, selectionState, evaluationContainer, type, positionInContainer = 1) } else if(!workflow){ .auditCreateTableNumber(jaspResults) # Initialize table numbers .auditCreateFigureNumber(jaspResults) # Initialize figure numbers # Create an empty container for the evaluation analysis evaluationContainer <- .auditAnalysisContainer(jaspResults, stage = "evaluation", position = 1) # Read in the relevant variables from the data set sample <- .auditReadDataset(options, jaspResults, stage = "evaluation") # Check for errors due to incompatible options error <- .auditErrorCheckInputOptions(options, sample, evaluationContainer, stage = "evaluation", type) if(error) return() # Deduce relevant quantities from input options evaluationOptions <- .auditInputOptions(options, sample, jaspResults, stage = "evaluation") # Create the evaluation state that holds the results evaluationState <- .auditEvaluationAnalysisState(options, sample, evaluationOptions, evaluationContainer, type) # Backwards create a planningstate and a selectionstate planningState <- .auditBackwardsPlanningState(options, sample, evaluationOptions, type) selectionState <- .auditBackwardsState(options, stage = "evaluation") # Create explanatory text for the evaluation .auditExplanatoryTextEvaluation(options, evaluationOptions, planningState, selectionState, evaluationContainer, type, positionInContainer = 1) } # --- TABLES # Create a table containing information about the evaluation process .auditEvaluationSummaryTable(options, evaluationOptions, evaluationState, evaluationContainer, jaspResults, type, positionInContainer = 2) if(type == "bayesian"){ # Create a table containing information regarding the prior and posterior .auditPriorAndPosteriorStatisticsTable(options, evaluationOptions, evaluationState, evaluationContainer, jaspResults, positionInContainer = 3) } # --- PLOTS if(type == "bayesian"){ # Create a plot containing the prior and posterior distribution .auditEvaluationPriorAndPosterior(options, evaluationOptions, planningState, evaluationState, evaluationContainer, jaspResults, positionInContainer = 4) } # Create a plot containing evaluation information .auditEvaluationInformationPlot(options, evaluationOptions, evaluationState, evaluationContainer, jaspResults, type, positionInContainer = 6) # Create a plot containing the correlation between the book and audit values if(options[["variableType"]] == "variableTypeAuditValues") .auditCorrelationPlot(options, evaluationOptions, sample, evaluationContainer, jaspResults, positionInContainer = 8) } ##################################### ######### CONCLUSION STAGE ########## ##################################### .auditConclusionStage <- function(options, jaspResults){ if(!is.null(jaspResults[["conclusionContainer"]]) || options[["auditResult"]] == "") return() .auditExplanatoryText(options, stageOptions = NULL, stageContainer = NULL, stageState = NULL, jaspResults, stage = "conclusion", positionInContainer = 1) } ################################################################################ ################## Common functions for figure and table numbers ############### ################################################################################ .auditCreateFigureNumber <- function(jaspResults){ # Initialize figure numbers jaspResults[["figNumber"]] <- createJaspState(0) } .auditCreateTableNumber <- function(jaspResults){ # Initialize table numbers jaspResults[["tabNumber"]] <- createJaspState(0) } .updateTabNumber <- function(jaspResults){ # Update table numbers + 1 currentNumber <- jaspResults[["tabNumber"]]$object jaspResults[["tabNumber"]] <- createJaspState(currentNumber + 1) } .updateFigNumber <- function(jaspResults){ # Update figure numbers + 1 currentNumber <- jaspResults[["figNumber"]]$object jaspResults[["figNumber"]] <- createJaspState(currentNumber + 1) } ################################################################################ ################## Common functions for reading data and options ############### ################################################################################ .auditReadVariableFromOptions <- function(options, varType){ if(varType == "recordNumber"){ # Read in the record ID's recordNumberVariable <- options[["recordNumberVariable"]] if(recordNumberVariable == "") recordNumberVariable <- NULL return(recordNumberVariable) } else if(varType == "monetary"){ # Read in the book values monetaryVariable <- options[["monetaryVariable"]] if(monetaryVariable == "") monetaryVariable <- NULL return(monetaryVariable) } else if(varType == "auditResult"){ # Read in the audit result auditResult <- options[["auditResult"]] if(auditResult == "") auditResult <- NULL return(auditResult) } else if(varType == "sampleCounter"){ # Read in the sample counter sampleCounter <- options[["sampleCounter"]] if(sampleCounter == "") sampleCounter <- NULL return(sampleCounter) } else if(varType == "ranking"){ # Read in the ranking variable rankingVariable <- options[["rankingVariable"]] if(rankingVariable == "") rankingVariable <- NULL return(rankingVariable) } else if(varType == "additional"){ # Read in additional variables additionalVariables <- unlist(options[["additionalVariables"]]) return(additionalVariables) } } .auditReadDataset <- function(options, jaspResults, stage){ if(stage == "procedure"){ recordNumberVariable <- .auditReadVariableFromOptions(options, varType = "recordNumber") monetaryVariable <- .auditReadVariableFromOptions(options, varType = "monetary") analysisOptions <- list() if(!is.null(recordNumberVariable)){ dataset <- .readDataSetToEnd(columns.as.numeric = recordNumberVariable) analysisOptions[["populationSize"]] <- nrow(dataset) analysisOptions[["uniqueN"]] <- length(unique(dataset[, .v(options[["recordNumberVariable"]])])) if(!is.null(monetaryVariable)){ variables <- c(recordNumberVariable, monetaryVariable) dataset <- .readDataSetToEnd(columns.as.numeric = variables) monetaryColumn <- dataset[, .v(monetaryVariable)] analysisOptions[["populationValue"]] <- sum(monetaryColumn, na.rm = TRUE) analysisOptions[["absPopulationValue"]] <- sum(abs(monetaryColumn), na.rm = TRUE) analysisOptions[["meanValue"]] <- mean(monetaryColumn, na.rm = TRUE) analysisOptions[["sigmaValue"]] <- sd(monetaryColumn, na.rm = TRUE) analysisOptions[["quantileValue"]] <- as.numeric(quantile(monetaryColumn, probs = c(0.25, 0.50, 0.75), na.rm = TRUE)) analysisOptions[["ready"]] <- TRUE } else { analysisOptions[["populationValue"]] <- 0.01 analysisOptions[["ready"]] <- ifelse(options[["materiality"]] == "materialityRelative", yes = TRUE, no = FALSE) } } else { dataset <- NULL analysisOptions[["populationSize"]] <- 0 analysisOptions[["uniqueN"]] <- 0 analysisOptions[["populationValue"]] <- 0.01 analysisOptions[["ready"]] <- FALSE } materiality <- ifelse(options[["materiality"]] == "materialityRelative", yes = options[["materialityPercentage"]], no = options[["materialityValue"]]) if(materiality == 0) analysisOptions[["ready"]] <- FALSE jaspResults[["procedureOptions"]] <- createJaspState(analysisOptions) jaspResults[["procedureOptions"]]$dependOn(c("recordNumberVariable", "monetaryVariable", "materiality", "materialityPercentage", "materialityValue")) return(dataset) } else if(stage == "selection"){ recordNumberVariable <- .auditReadVariableFromOptions(options, varType = "recordNumber") monetaryVariable <- .auditReadVariableFromOptions(options, varType = "monetary") rankingVariable <- .auditReadVariableFromOptions(options, varType = "ranking") additionalVariables <- .auditReadVariableFromOptions(options, varType = "additional") variables <- c(recordNumberVariable, monetaryVariable, rankingVariable, additionalVariables) } else if(stage == "evaluation"){ recordNumberVariable <- .auditReadVariableFromOptions(options, varType = "recordNumber") monetaryVariable <- .auditReadVariableFromOptions(options, varType = "monetary") auditResult <- .auditReadVariableFromOptions(options, varType = "auditResult") sampleCounter <- .auditReadVariableFromOptions(options, varType = "sampleCounter") variables <- c(recordNumberVariable, monetaryVariable, auditResult, sampleCounter) } if(!is.null(variables)){ dataset <- .readDataSetToEnd(columns.as.numeric = variables) if(stage == "evaluation" && !is.null(sampleCounter)) # Apply sample filter dataset <- subset(dataset, dataset[, .v(options[["sampleCounter"]])] > 0) return(dataset) } else { return(NULL) } } .auditInputOptions <- function(options, dataset, jaspResults, stage, rawData = FALSE){ inputOptions <- list() if(stage == "planning"){ inputOptions[["valuta"]] <- base::switch(options[["valuta"]], "euroValuta" = "\u20AC", "dollarValuta" = "\u0024", "otherValuta" = options[["otherValutaName"]]) inputOptions[["confidence"]] <- options[["confidence"]] inputOptions[["confidenceLabel"]] <- paste0(round(options[["confidence"]] * 100, 2), "%") if(!rawData){ inputOptions[["populationSize"]] <- options[["populationSize"]] inputOptions[["populationValue"]] <- ifelse(options[["populationValue"]] == 0, yes = 0.01, no = options[["populationValue"]]) } else { procedureOptions <- jaspResults[["procedureOptions"]]$object inputOptions[["populationSize"]] <- procedureOptions[["populationSize"]] inputOptions[["populationValue"]] <- procedureOptions[["populationValue"]] } inputOptions[["absRel"]] <- ifelse(options[["materiality"]] == "materialityRelative", yes = gettext("<b>percentage</b>"), no = gettext("<b>amount</b>")) inputOptions[["materiality"]] <- ifelse(options[["materiality"]] == "materialityRelative", yes = options[["materialityPercentage"]], no = options[["materialityValue"]] / inputOptions[["populationValue"]]) inputOptions[["materialityLabel"]] <- ifelse(options[["materiality"]] == "materialityRelative", yes = paste0(round(inputOptions[["materiality"]] * 100, 2), "%"), no = paste(inputOptions[["valuta"]], format(options[["materialityValue"]], scientific = FALSE))) inputOptions[["expectedErrors"]] <- ifelse(options[["expectedErrors"]] == "expectedRelative", yes = options[["expectedPercentage"]], no = options[["expectedNumber"]] / inputOptions[["populationValue"]]) inputOptions[["expectedErrorsLabel"]] <- ifelse(options[["expectedErrors"]] == "expectedRelative", yes = paste0(round(inputOptions[["expectedErrors"]] * 100, 2), "%"), no = paste(inputOptions[["valuta"]], options[["expectedNumber"]])) inputOptions[["likelihood"]] <- base::switch(options[["planningModel"]], "Poisson" = "poisson", "binomial" = "binomial", "hypergeometric" = "hypergeometric") } else if(stage == "selection"){ inputOptions[["valuta"]] <- "$" # Hard coded inputOptions[["populationSize"]] <- ifelse(is.null(dataset), yes = 0, no = nrow(dataset)) if(options[["monetaryVariable"]] != "") inputOptions[["populationValue"]] <- sum(dataset[, .v(options[["monetaryVariable"]])]) } else if(stage == "evaluation"){ confidence <- options[["confidence"]] confidenceLabel <- paste0(round(options[["confidence"]] * 100, 2), "%") populationSize <- options[["populationSize"]] populationValue <- ifelse(options[["populationValue"]] == 0, yes = 0.01, no = options[["populationValue"]]) materiality <- ifelse(options[["materiality"]] == "materialityRelative", yes = options[["materialityPercentage"]], no = options[["materialityValue"]] / populationValue) materialityLabel <- ifelse(options[["materiality"]] == "materialityRelative", yes = paste0(round(materiality * 100, 2), "%"), no = paste("$", format(options[["materialityValue"]], scientific = FALSE))) expectedErrors <- ifelse(options[["expectedErrors"]] == "expectedRelative", yes = options[["expectedPercentage"]], no = options[["expectedNumber"]] / populationValue) likelihood <- base::switch(options[["estimator"]], "betaBound" = "binomial", "gammaBound" = "poisson", "betabinomialBound" = "hypergeometric") inputOptions[["materiality"]] <- materiality inputOptions[["materialityLabel"]] <- materialityLabel inputOptions[["populationSize"]] <- populationSize inputOptions[["populationValue"]] <- populationValue inputOptions[["valuta"]] <- "$" inputOptions[["confidence"]] <- confidence inputOptions[["confidenceLabel"]] <- confidenceLabel inputOptions[["expectedErrors"]] <- expectedErrors inputOptions[["likelihood"]] <- likelihood } return(inputOptions) } .auditBackwardsState <- function(options, stage){ if(stage == "selection"){ state <- list("sampleSize" = options[["sampleSize"]]) } else if(stage == "evaluation"){ state <- data.frame(count = 1) } return(state) } ################################################################################ ################## Common functions for containers ############################# ################################################################################ .auditAnalysisContainer <- function(jaspResults, stage, position = 1){ if(stage == "procedure"){ if(!is.null(jaspResults[["procedureContainer"]])) return(jaspResults[["procedureContainer"]]) analysisContainer <- createJaspContainer(title = gettext("<u>Procedure</u>")) analysisContainer$position <- position analysisContainer$dependOn(options = c("explanatoryText", "confidence", "materiality", "materialityValue", "materialityPercentage", "valuta", "otherValutaName", "monetaryVariable", "recordNumberVariable")) jaspResults[["procedureContainer"]] <- analysisContainer } else if(stage == "planning"){ if(!is.null(jaspResults[["planningContainer"]])) return(jaspResults[["planningContainer"]]) analysisContainer <- createJaspContainer(title = gettext("<u>Planning</u>")) analysisContainer$position <- position analysisContainer$dependOn(options = c("IR", "irCustom", "CR", "crCustom", "confidence", "populationSize", "populationValue", "materiality", "materialityPercentage", "materialityValue", "expectedPercentage", "expectedErrors", "expectedNumber", "recordNumberVariable", "monetaryVariable", "valuta", "otherValutaName")) jaspResults[["planningContainer"]] <- analysisContainer } else if(stage == "selection"){ if(!is.null(jaspResults[["selectionContainer"]])) return(jaspResults[["selectionContainer"]]) analysisContainer <- createJaspContainer(title = "") analysisContainer$position <- position analysisContainer$dependOn(options = c("recordNumberVariable", "monetaryVariable", "additionalVariables", "rankingVariable", "selectionMethod", "selectionType", "seed", "intervalStartingPoint", "sampleSize")) jaspResults[["selectionContainer"]] <- analysisContainer } else if(stage == "selection-workflow"){ planningContainer <- jaspResults[["planningContainer"]] planningState <- planningContainer[["planningState"]]$object if(!is.null(jaspResults[["selectionContainer"]])){ return(jaspResults[["selectionContainer"]]) } else if(!is.null(planningState)){ analysisContainer <- createJaspContainer(title = gettext("<u>Selection</u>")) analysisContainer$position <- position analysisContainer$dependOn(optionsFromObject = planningContainer, options = c("samplingChecked", "selectionMethod", "selectionType", "seed", "intervalStartingPoint", "additionalVariables", "rankingVariable", "valuta", "otherValutaName")) jaspResults[["selectionContainer"]] <- analysisContainer } } else if(stage == "evaluation"){ if(!is.null(jaspResults[["evaluationContainer"]])) return(jaspResults[["evaluationContainer"]]) analysisContainer <- createJaspContainer(title = "") analysisContainer$position <- position analysisContainer$dependOn(options = c("recordNumberVariable", "monetaryVariable", "auditResult", "sampleCounter", "variableType", "confidence", "populationSize", "populationValue", "IR", "irCustom", "CR", "crCustom", "expectedErrors", "expectedPercentage", "expectedNumber", "materiality", "materialityPercentage", "materialityValue", "useSumStats", "nSumStats", "kSumStats", "estimator", "estimator2", "areaUnderPosterior", "stringerBoundLtaAdjustment")) jaspResults[["evaluationContainer"]] <- analysisContainer } else if(stage == "evaluation-workflow"){ selectionContainer <- jaspResults[["selectionContainer"]] selectionState <- selectionContainer[["selectionState"]]$object if(!is.null(jaspResults[["evaluationContainer"]])){ return(jaspResults[["evaluationContainer"]]) } else if(!is.null(selectionState)){ analysisContainer <- createJaspContainer(title = gettext("<u>Evaluation</u>")) analysisContainer$position <- position analysisContainer$dependOn(options = c("evaluationChecked", "auditResult", "mostLikelyError", "estimator", "performAudit", "stringerBoundLtaAdjustment", "areaUnderPosterior")) jaspResults[["evaluationContainer"]] <- analysisContainer } } return(analysisContainer) } ################################################################################ ################## Common functions for error checks ########################### ################################################################################ .auditErrorCheckInputOptions <- function(options, dataset, analysisContainer, stage, type = NULL, ready = NULL, analysisOptions = NULL){ if(stage == "procedure"){ variables <- NULL if(options[["recordNumberVariable"]] != "") variables <- c(variables, options[["recordNumberVariable"]]) if(options[["monetaryVariable"]] != "") variables <- c(variables, options[["monetaryVariable"]]) if (length(variables) == 0) return() N <- nrow(dataset) # Check for infinity, zero variance, and any missing observations .hasErrors(dataset, type = c("infinity", "variance", "observations"), all.target = variables, message = "short", observations.amount = paste0("< ", N), exitAnalysisIfErrors = TRUE) } else if(stage == "planning"){ if(ready){ if(options[["materiality"]] == "materialityAbsolute" && options[["materialityValue"]] >= analysisOptions[["populationValue"]]){ # Error if the value of the performance materiality exceeds the total population value analysisContainer$setError(gettext("Analysis not possible: Your materiality is higher than, or equal to the total value of the observations.")) return(TRUE) } expTMP <- ifelse(options[['expectedErrors']] == "expectedRelative", yes = options[["expectedPercentage"]], no = options[["expectedNumber"]] / analysisOptions[["populationValue"]]) if(expTMP >= analysisOptions[["materiality"]]){ # Error if the expected errors exceed the performance materiality analysisContainer$setError(gettext("Analysis not possible: Your expected errors are higher than materiality.")) return(TRUE) } if(.auditCalculateDetectionRisk(options) >= 1){ # Error if the detection risk of the analysis is higher than one analysisContainer$setError(gettext("The detection risk is equal to or higher than 100%. Please re-specify your custom values for the Inherent risk and/or Control risk, or the confidence.")) return(TRUE) } } # No error in the planning options return(FALSE) } else if(stage == "selection"){ if(!is.null(dataset) && options[["sampleSize"]] >= nrow(dataset)){ # Error if the sample size is larger than the population size. analysisContainer[["errorMessage"]] <- createJaspTable(gettext("Selection summary")) analysisContainer$setError(gettext("Your sample size is larger than (or equal to) your population size. Cannot take a sample larger than the population.")) return(TRUE) } else if(!is.null(dataset) && options[["sampleSize"]] == 1){ # Error if the sample size is 1. analysisContainer[["errorMessage"]] <- createJaspTable(gettext("Selection summary")) analysisContainer$setError(gettext("Your sample size must be larger than 1.")) return(TRUE) } else if(options[["recordNumberVariable"]] != "" && !is.null(dataset) && nrow(dataset) != length(unique(dataset[, .v(options[["recordNumberVariable"]])]))){ # Error if the record ID's are not unique analysisContainer[["errorMessage"]] <- createJaspTable(gettext("Selection summary")) analysisContainer$setError(gettext("Your must specify unique record ID's. The row numbers of the data set are sufficient.")) return(TRUE) } else { # No error in the selection options return(FALSE) } } else if(stage == "selection-workflow") { if(options[["recordNumberVariable"]] != "" && !is.null(dataset) && nrow(dataset) != length(unique(dataset[, .v(options[["recordNumberVariable"]])]))){ # Error if the record ID's are not unique analysisContainer[["errorMessage"]] <- createJaspTable(gettext("Selection summary")) analysisContainer$setError(gettext("Your must specify unique record ID's. The row numbers of the data set are sufficient.")) return(TRUE) } else { # No error in the selection options return(FALSE) } } else if(stage == "evaluation"){ if(options[["variableType"]] == "variableTypeCorrect" && !options[["useSumStats"]] && options[["auditResult"]] != "" && !all(unique(dataset[, .v(options[["auditResult"]])]) %in% c(0, 1))){ # Error if the audit result does not contain only zero's and one's. analysisContainer[["errorMessage"]] <- createJaspTable(gettext("Evaluation summary")) analysisContainer$setError(gettext("Your audit result does not contain only 0's (correct) and 1's (incorrect).")) return(TRUE) } else if(type == "frequentist" && options[["variableType"]] == "variableTypeCorrect" && options[["estimator2"]] == "hyperBound" && options[["populationSize"]] == 0){ # Error if the population size is not defined when the hypergeometric bound is used. analysisContainer[["errorMessage"]] <- createJaspTable(gettext("Evaluation summary")) analysisContainer$setError(gettext("The hypergeometric confidence bound requires that you specify the population size.")) return(TRUE) } else if((!options[["useSumStats"]] && !is.null(dataset) && options[["populationSize"]] < nrow(dataset)) || (options[["useSumStats"]] && options[["populationSize"]] < options[["nSumStats"]])){ # Error if the sample size is larger than the population size. analysisContainer[["errorMessage"]] <- createJaspTable(gettext("Evaluation summary")) analysisContainer$setError(gettext("Your sample size is larger than (or equal to) your population size. Please adjust your population size accordingly.")) return(TRUE) } else if(options[["estimator"]] %in% c("directBound", "differenceBound", "ratioBound", "regressionBound") && (options[["populationValue"]] == 0 || options[["populationSize"]] == 0)){ # Error if the population size or the population value are zero when using direct, difference, ratio, or regression. analysisContainer[["errorMessage"]] <- createJaspTable(gettext("Evaluation summary")) analysisContainer$setError(gettext("The direct, difference, ratio, and regression confidence bound require that you specify the population size and the population value.")) return(TRUE) } else if(!options[["useSumStats"]] && options[["recordNumberVariable"]] != "" && !is.null(dataset) && nrow(dataset) != length(unique(dataset[, .v(options[["recordNumberVariable"]])]))){ # Error if the record ID's are not unique analysisContainer[["errorMessage"]] <- createJaspTable(gettext("Selection summary")) analysisContainer$setError(gettext("Your must specify unique record ID's. The row numbers of the data set are sufficient.")) return(TRUE) } else if(.auditCalculateDetectionRisk(options) >= 1){ # Error if the detection risk of the analysis is higher than one analysisContainer[["errorMessage"]] <- createJaspTable(gettext("Evaluation summary")) analysisContainer$setError(gettext("The detection risk is equal to or higher than 100%. Please re-specify your values for the Inherent risk and/or Control risk, or the confidence.")) return(TRUE) } else { # No error in the evaluation options return(FALSE) } } } .auditReadyForNextStage <- function(options, jaspResults, stage){ if(stage == "planning"){ # Check whether the "To selection" button is pressed and no error occurred in the previous stage ready <- options[["samplingChecked"]] && !jaspResults[["planningContainer"]]$getError() } else if(stage == "selection"){ } else if(stage == "execution"){ # Check whether the "To evaluation" button is pressed and no error occurred in the previous stage ready <- options[["evaluationChecked"]] && !jaspResults[["planningContainer"]]$getError() && !jaspResults[["selectionContainer"]]$getError() } else if(stage == "evaluation"){ } return(ready) } .auditReadyForAnalysis <- function(options, planningOptions, stage){ if(stage == "planning"){ if(options[["materiality"]] == "materialityAbsolute"){ ready <- options[["materialityValue"]] != 0 && planningOptions[["populationSize"]] != 0 && planningOptions[["populationValue"]] != 0 && planningOptions[["populationValue"]] != 0.01 } else { ready <- options[["materialityPercentage"]] != 0 && planningOptions[["populationSize"]] != 0 } } return(ready) } ################################################################################ ################## Common functions for the explanatory text ################### ################################################################################ .auditExplanatoryText <- function(options, stageOptions, stageContainer, stageState, jaspResults, stage, positionInContainer, type = NULL, prevState = NULL){ if(options[["explanatoryText"]]){ if(stage == "procedure"){ procedureContainer <- .auditAnalysisContainer(jaspResults, stage = "procedure", position = 1) procedureText <- gettextf("The objective of this substantive testing procedure is to determine with a specified confidence <b>(%1$s)</b> whether the %2$s of misstatement in the target population is lower than the specified materiality of <b>%3$s</b>.", stageOptions[["confidenceLabel"]], stageOptions[["absRel"]], stageOptions[["materialityLabel"]]) procedureContainer[["procedureParagraph"]] <- createJaspHtml(procedureText, "p") procedureContainer[["procedureParagraph"]]$position <- positionInContainer } else if(stage == "planning") { if(is.null(stageContainer[["planningParagraph"]]) && !stageContainer$getError()){ if(type == "frequentist"){ stageContainer[["planningParagraph"]] <- createJaspHtml(gettextf("The most likely error in the data was expected to be <b>%1$s</b>. The sample size that is required for a materiality of <b>%2$s</b>, assuming the sample contains <b>%3$s</b> full errors, is <b>%4$s</b>. This sample size is based on the <b>%5$s</b> distribution, the inherent risk <b>(%6$s)</b>, the control risk <b>(%7$s)</b> and the expected errors. Consequently, if the sum of errors from the audited observations remains below <b>%8$s</b>, the maximum misstatement is estimated to be below materiality.", stageOptions[["expectedErrorsLabel"]], stageOptions[["materialityLabel"]], stageState[["expectedSampleError"]], stageState[["sampleSize"]], options[["planningModel"]], options[["IR"]], options[["CR"]], stageOptions[["expectedErrorsLabel"]]), "p") } else if(type == "bayesian"){ distribution <- base::switch(stageOptions[["likelihood"]], "poisson" = "gamma", "binomial" = "beta", "hypergeometric" = "beta-binomial") stageContainer[["planningParagraph"]] <- createJaspHtml(gettextf("The most likely error in the data was expected to be <b>%1$s</b>. The sample size that is required for a materiality of <b>%2$s</b>, assuming the sample contains <b>%3$s</b> full errors, is <b>%4$s</b>. This sample size is based on the <b>%5$s</b> distribution, the inherent risk <b>(%6$s)</b>, the control risk <b>(%7$s)</b> and the expected errors. The information in this prior distribution states that there is a <b>%8$s%%</b> prior probability that the population misstatement is lower than materiality. Consequently, if the sum of errors from the audited observations remains below <b>%9$s</b> the maximum misstatement is estimated to be below materiality.", stageOptions[["expectedErrorsLabel"]], stageOptions[["materialityLabel"]], stageState[["expectedSampleError"]], stageState[["sampleSize"]], distribution, options[["IR"]], options[["CR"]], round(pbeta(stageState[["materiality"]], stageState[["prior"]]$aPrior, stageState[["prior"]]$bPrior) * 100, 2), stageOptions[["expectedErrorsLabel"]]), "p") } stageContainer[["planningParagraph"]]$position <- positionInContainer stageContainer[["planningParagraph"]]$dependOn(options = "explanatoryText") } } else if(stage == "selection"){ samplingLabel <- base::switch(options[["selectionMethod"]], "randomSampling" = gettext("random"), "systematicSampling" = gettext("fixed interval"), "cellSampling" = gettext("cell")) if(!is.null(stageState) && !is.null(stageState[["musFailed"]])){ # MUS has failed for some reason, fall back to record sampling message <- gettextf("From the population of <b>%1$s</b> observations, <b>%2$s</b> observations were selected using a <b>%3$s record sampling</b> method. <br><b>Warning:</b> A monetary unit sampling method was tried but failed.", stageOptions[["populationSize"]], prevState[["sampleSize"]], samplingLabel) } else { samplingLabel <- base::switch(options[["selectionType"]], "recordSampling" = gettextf("%1$s record sampling", samplingLabel), "musSampling" = gettextf("%1$s monetary unit sampling", samplingLabel)) message <- gettextf("From the population of <b>%1$s</b> observations, <b>%2$s</b> observations were selected using a <b>%3$s</b> method.", stageOptions[["populationSize"]], prevState[["sampleSize"]], samplingLabel) } if(!is.null(stageState) && sum(stageState[["count"]]) > nrow(stageState)){ message <- gettextf("%1$s <b>Note:</b> The selected subset (%2$s) is smaller than the planned sample size (%3$s), as observations are selected multiple times due to their high value. These observations (%4$s) are counted multiple times in the evaluation.", message, nrow(stageState), prevState[["sampleSize"]], prevState[["sampleSize"]] - nrow(stageState)) } stageContainer[["samplingParagraph"]] <- createJaspHtml(message, "p") stageContainer[["samplingParagraph"]]$position <- positionInContainer stageContainer[["samplingParagraph"]]$dependOn(options = "explanatoryText") } else if(stage == "conclusion"){ # Import options and results from the planning and selection stages planningOptions <- .auditInputOptions(options, dataset = NULL, jaspResults, stage = "planning", rawData = TRUE) # Import result of analysis from jaspResults evaluationContainer <- jaspResults[["evaluationContainer"]] evaluationState <- evaluationContainer[["evaluationState"]]$object if(is.null(evaluationState)) return() # Create a container for the conclusion conclusionContainer <- createJaspContainer(title = gettext("<u>Conclusion</u>")) conclusionContainer$position <- 5 conclusionContainer$dependOn(optionsFromObject = evaluationContainer) conclusionContainer$dependOn(options = "explanatoryText") # Produce relevant terms conditional on the analysis result conclusion <- evaluationState[["conclusion"]] if(conclusion == "Approve population"){ aboveBelow <- gettext("below") lowerHigher <- gettext("lower") } else { aboveBelow <- gettext("above") lowerHigher <- gettext("higher") } message <- gettextf("The objective of this substantive testing procedure was to determine with <b>%1$s</b> confidence whether the population misstatement is lower than materiality, in this case <b>%2$s</b>. For the current data, the <b>%3$s</b> confidence bound is <b>%4$s</b> materiality. The conclusion on the basis of these results is that, with <b>%5$s</b> confidence, the population misstatement is <b>%6$s</b> than materiality.", planningOptions[["confidenceLabel"]], planningOptions[["materialityLabel"]], planningOptions[["confidenceLabel"]], aboveBelow, planningOptions[["confidenceLabel"]], lowerHigher) conclusionContainer[["conclusionParagraph"]] <- createJaspHtml(message, "p") conclusionContainer[["conclusionParagraph"]]$position <- 1 conclusionContainer[["conclusionParagraph"]]$dependOn(optionsFromObject = conclusionContainer) # Finsh conclusion jaspResults[["conclusionContainer"]] <- conclusionContainer } } } ################################################################################ ################## Common functions for the procedure stage #################### ################################################################################ .auditBookValueDescriptiveTable <- function(options, planningOptions, jaspResults, positionInContainer){ procedureContainer <- .auditAnalysisContainer(jaspResults, stage = "procedure", position = 1) if(!options[["bookValueDescriptives"]] || options[["monetaryVariable"]] == "") return() .updateTabNumber(jaspResults) if(is.null(procedureContainer[["bookValueDescriptives"]])){ tableTitle <- gettextf("<b>Table %1$i.</b> Book Value Descriptive Statistics", jaspResults[["tabNumber"]]$object) descriptiveTable <- createJaspTable(tableTitle) descriptiveTable$position <- positionInContainer descriptiveTable$dependOn(options = c("bookValueDescriptives", "sampleDescriptives", "displaySample", "samplingChecked", "evaluationChecked")) descriptiveTable$addColumnInfo(name = 'populationSize', title = gettext("Population size"), type = 'string') descriptiveTable$addColumnInfo(name = 'populationValue', title = gettext("Total value"), type = 'string') descriptiveTable$addColumnInfo(name = 'absValue', title = gettext("Absolute value"), type = 'string') descriptiveTable$addColumnInfo(name = 'meanValue', title = gettext("Mean"), type = 'string') descriptiveTable$addColumnInfo(name = 'sigmaValue', title = gettext("Std. deviation"), type = 'string') descriptiveTable$addColumnInfo(name = 'q1', title = gettext("25th"), type = 'string', overtitle = "Percentile") descriptiveTable$addColumnInfo(name = 'q2', title = gettext("50th"), type = 'string', overtitle = gettext("Percentile")) descriptiveTable$addColumnInfo(name = 'q3', title = gettext("75th"), type = 'string', overtitle = gettext("Percentile")) procedureContainer[["bookValueDescriptives"]] <- descriptiveTable if(options[["monetaryVariable"]] == "" || options[["recordNumberVariable"]] == "") return() procedureOptions <- jaspResults[["procedureOptions"]]$object valuta <- planningOptions[["valuta"]] row <- data.frame(populationSize = procedureOptions[["populationSize"]], populationValue = paste(valuta, round(procedureOptions[["populationValue"]], 2)), absValue = paste(valuta, round(procedureOptions[["absPopulationValue"]], 2)), meanValue = paste(valuta, round(procedureOptions[["meanValue"]], 2)), sigmaValue = paste(valuta, round(procedureOptions[["sigmaValue"]], 2)), q1 = paste(valuta, round(procedureOptions[["quantileValue"]][1], 2)), q2 = paste(valuta, round(procedureOptions[["quantileValue"]][2], 2)), q3 = paste(valuta, round(procedureOptions[["quantileValue"]][3], 2))) descriptiveTable$addRows(row) } } .auditBookValueDistributionPlot <- function(dataset, options, planningOptions, jaspResults, positionInContainer){ procedureContainer <- .auditAnalysisContainer(jaspResults, stage = "procedure", position = 1) if(!options[["bookValueDistribution"]] || options[["monetaryVariable"]] == "") return() .updateFigNumber(jaspResults) if(is.null(procedureContainer[["bookValueDistribution"]])){ bookValuePlot <- createJaspPlot(plot = NULL, title = gettext("Book Value Distribution"), width = 600, height = 300) bookValuePlot$position <- positionInContainer bookValuePlot$dependOn(options = c("bookValueDistribution", "valuta")) procedureContainer[["bookValueDistribution"]] <- bookValuePlot if(options[["recordNumberVariable"]] == "") return() procedureOptions <- jaspResults[["procedureOptions"]]$object bookValue <- dataset[, .v(options[["monetaryVariable"]])] mean <- procedureOptions[["meanValue"]] stdev <- procedureOptions[["sigmaValue"]] quantiles <- procedureOptions[["quantileValue"]] legendData <- data.frame(x = c(0, 0, 0), y = c(0, 0, 0), l = c("1", "2", "3")) p <- .auditBarPlot(column = bookValue, variableName = options[["monetaryVariable"]], valuta = planningOptions[["valuta"]]) p <- p + ggplot2::geom_point(mapping = ggplot2::aes(x = quantiles[1], y = 0), shape = 21, fill = "orange", stroke = 2, size = 2) + ggplot2::geom_point(mapping = ggplot2::aes(x = quantiles[2], y = 0), shape = 21, fill = "orange", stroke = 2, size = 2) + ggplot2::geom_point(mapping = ggplot2::aes(x = quantiles[3], y = 0), shape = 21, fill = "orange", stroke = 2, size = 2) + ggplot2::geom_point(mapping = ggplot2::aes(x = mean, y = 0), shape = 21, fill = "red", stroke = 2, size = 4) + ggplot2::geom_point(mapping = ggplot2::aes(x = mean + stdev, y = 0), shape = 21, fill = "dodgerblue1", stroke = 2, size = 3) + ggplot2::geom_point(mapping = ggplot2::aes(x = mean - stdev, y = 0), shape = 21, fill = "dodgerblue1", stroke = 2, size = 3) + ggplot2::geom_point(data = legendData, mapping = ggplot2::aes(x = x, y = y, shape = l), size = 0, color = rgb(0, 1, 0, 0)) + ggplot2::scale_shape_manual(name = "", values = c(21, 21, 21), labels = c(gettext("Mean"), gettextf("Mean %1$s sd", "\u00B1"), gettext("Quartile"))) + ggplot2::guides(shape = ggplot2::guide_legend(override.aes = list(size = c(4, 3, 2), shape = c(21, 21, 21), fill = c("red", "dodgerblue1", "orange"), stroke = 2, color = "black")), order = 1) p <- jaspGraphs::themeJasp(p, legend.position = "top") + ggplot2::theme(legend.text = ggplot2::element_text(margin = ggplot2::margin(l = -10, r = 50)), panel.grid.major.y = ggplot2::element_line(color = "#cbcbcb", size = 0.5)) bookValuePlot$plotObject <- p } if(options[["explanatoryText"]]){ bookValuePlotText <- createJaspHtml(gettextf("<b>Figure %1$i.</b> The distribution of book values in the population. The red and blue dots respectively represent the mean and the values exactly one standard deviation from the mean. The orange dots represent the 25th, 50th (median) and 75th percentile of the book values.", jaspResults[["figNumber"]]$object), "p") bookValuePlotText$position <- positionInContainer + 1 bookValuePlotText$dependOn(optionsFromObject = procedureContainer[["bookValueDistribution"]]) bookValuePlotText$dependOn(options = "explanatoryText") procedureContainer[["bookValuePlotText"]] <- bookValuePlotText } } .auditBarPlot <- function(column, variableName, valuta){ h <- hist(column, plot = FALSE) yBreaks <- jaspGraphs::getPrettyAxisBreaks(c(0, h$counts), min.n = 4) xBreaks <- jaspGraphs::getPrettyAxisBreaks(c(column, h$breaks), min.n = 4) p <- ggplot2::ggplot(data = data.frame(column), mapping = ggplot2::aes(x = column, y = ..count..)) + ggplot2::scale_x_continuous(name = gettextf("Book values (%1$s)", valuta), breaks = xBreaks, limits = range(xBreaks)) + ggplot2::scale_y_continuous(name = gettext("Frequency"), breaks = yBreaks, limits = c(0, max(yBreaks))) + ggplot2::geom_histogram(binwidth = (h$breaks[2] - h$breaks[1]), fill = "grey", col = "black", size = .7, center = ((h$breaks[2] - h$breaks[1])/2)) p <- jaspGraphs::themeJasp(p, axisTickWidth = .7, bty = list(type = "n", ldwX = .7, lwdY = 1)) return(p) } ################################################################################ ################## Common functions for the Audit Risk Model ################### ################################################################################ .auditCalculateDetectionRisk <- function(options){ inherentRisk <- base::switch(options[["IR"]], "High" = 1, "Medium" = 0.60, "Low" = 0.50, "Custom" = options[["irCustom"]]) controlRisk <- base::switch(options[["CR"]], "High" = 1, "Medium" = 0.60, "Low" = 0.50, "Custom" = options[["crCustom"]]) detectionRisk <- (1 - options[["confidence"]]) / inherentRisk / controlRisk return(detectionRisk) } .auditRiskModelParagraph <- function(options, jaspResults, position){ if(!is.null(jaspResults[["ARMcontainer"]])) return() ARMcontainer <- createJaspContainer(title = gettext("<u>Audit Risk Model</u>")) ARMcontainer$position <- position ARMcontainer$dependOn(options = c("confidence", "IR", "irCustom", "CR", "crCustom", "materiality", "materialityPercentage", "materialityValue", "explanatoryText", "valuta", "otherValutaName")) jaspResults[["ARMcontainer"]] <- ARMcontainer auditRisk <- 1 - options[["confidence"]] if(options[["IR"]] != "Custom"){ inherentRisk <- base::switch(options[["IR"]], "Low" = 0.50, "Medium" = 0.60, "High" = 1) } else { inherentRisk <- options[["irCustom"]] } if(options[["CR"]] != "Custom"){ controlRisk <- base::switch(options[["CR"]], "Low" = 0.50, "Medium" = 0.60, "High" = 1) } else { controlRisk <- options[["crCustom"]] } if(options[["explanatoryText"]]){ irLabel <- paste0(options[["IR"]], " = " , round(inherentRisk * 100, 2)) crLabel <- paste0(options[["CR"]], " = " , round(controlRisk * 100, 2)) } else { irLabel <- round(inherentRisk * 100, 2) crLabel <- round(controlRisk * 100, 2) } detectionRisk <- auditRisk / inherentRisk / controlRisk textARM <- gettextf("Audit risk (%1$s%%) = Inherent risk (%2$s%%) x Control risk (%3$s%%) x Detection risk (%4$s%%)", round(auditRisk * 100, 2), irLabel, crLabel, round(detectionRisk * 100, 2)) ARMcontainer[["ARMformula"]] <- createJaspHtml(textARM, "h3", "21cm") ARMcontainer[["ARMformula"]]$position <- 2 if(options[["explanatoryText"]]){ irLabel <- paste0(options[["IR"]], " (", round(inherentRisk * 100, 2), "%)") crLabel <- paste0(options[["CR"]], " (", round(controlRisk * 100, 2), "%)") auditRiskLabel <- paste0(round(auditRisk * 100, 2), "%") dectectionRiskLabel <- paste0(round(detectionRisk * 100, 2), "%") message <- gettextf("Prior to the substantive testing phase, the inherent risk was determined to be <b>%1$s</b>. The internal control risk was determined to be <b>%2$s</b>. According to the Audit Risk Model, the required detection risk to maintain an audit risk of <b>%3$s</b> should be <b>%4$s</b>.", irLabel, crLabel, auditRiskLabel, dectectionRiskLabel) if(options[["IR"]] == "Custom" || options[["CR"]] == "Custom"){ message <- gettextf("%1$s The translation of High, Medium and Low to probabilities is done according custom preferences</b>.", message) } else { message <- gettextf("%1$s The translation of High, Medium and Low to probabilities is done according to <b>IODAD (2007)</b>.", message) } ARMcontainer[["AuditRiskModelParagraph"]] <- createJaspHtml(message, "p") ARMcontainer[["AuditRiskModelParagraph"]]$position <- 1 } } ################################################################################ ################## Common functions for the planning stage ##################### ################################################################################ .auditPlanningState <- function(options, planningOptions, planningContainer, ready, type){ if(!is.null(planningContainer[["planningState"]])){ return(planningContainer[["planningState"]]$object) } else if(ready && !planningContainer$getError()){ auditRisk <- 1 - options[["confidence"]] if(options[["IR"]] != "Custom"){ inherentRisk <- base::switch(options[["IR"]], "Low" = 0.50, "Medium" = 0.60, "High" = 1) } else { inherentRisk <- options[["irCustom"]] } if(options[["CR"]] != "Custom"){ controlRisk <- base::switch(options[["CR"]], "Low" = 0.50, "Medium" = 0.60, "High" = 1) } else { controlRisk <- options[["crCustom"]] } detectionRisk <- auditRisk / inherentRisk / controlRisk if(detectionRisk >= 1){ planningContainer$setError(gettextf("The detection risk is equal to or higher than 100%. Please re-specify your custom values for the Inherent risk and/or Control risk.")) return() } if(type == "frequentist"){ adjustedConfidence <- 1 - detectionRisk result <- try({ jfa::planning(materiality = planningOptions[["materiality"]], confidence = adjustedConfidence, expectedError = planningOptions[["expectedErrors"]], likelihood = planningOptions[["likelihood"]], N = planningOptions[["populationSize"]]) }) } else if(type == "bayesian"){ result <- try({ prior <- jfa::auditPrior(materiality = planningOptions[["materiality"]], confidence = planningOptions[["confidence"]], expectedError = planningOptions[["expectedErrors"]], likelihood = planningOptions[["likelihood"]], N = planningOptions[["populationSize"]], ir = inherentRisk, cr = controlRisk) jfa::planning(materiality = planningOptions[["materiality"]], confidence = planningOptions[["confidence"]], expectedError = planningOptions[["expectedErrors"]], N = planningOptions[["populationSize"]], prior = prior) }) } if(isTryError(result)){ if(jaspBase:::.extractErrorMessage(result) == "Sample size could not be calculated, please increase the maxSize argument"){ planningContainer$setError(gettext("The resulting sample size exceeds 5000.")) return() } planningContainer$setError(gettextf("An error occurred: %1$s", jaspBase:::.extractErrorMessage(result))) return() } if(result[["sampleSize"]] > planningOptions[["populationSize"]]){ planningContainer$setError(gettext("The resulting sample size is larger than the population size.")) return() } planningContainer[["planningState"]] <- createJaspState(result) planningContainer[["planningState"]]$dependOn(options = c("planningModel")) return(result) } else { bPrior <- ifelse(options[["planningModel"]] == "Poisson", yes = 0, no = 1) noResults <- list(sampleSize = 0, materiality = planningOptions[["materiality"]], N = planningOptions[["populationSize"]], expectedSampleError = 0, prior = list(aPrior = 1, bPrior = bPrior, nPrior = 0, kPrior = 0)) return(noResults) } } .auditPlanningSummaryTable <- function(options, planningOptions, planningState, planningContainer, jaspResults, ready, type, positionInContainer){ .updateTabNumber(jaspResults) if(!is.null(planningContainer[["summaryTable"]])) return() tableTitle <- gettextf("<b>Table %1$i.</b> Planning Summary", jaspResults[["tabNumber"]]$object) summaryTable <- createJaspTable(tableTitle) summaryTable$position <- positionInContainer summaryTable$dependOn(options = c("bookValueDescriptives", "sampleDescriptives", "displaySample", "samplingChecked", "evaluationChecked", "planningModel", "expectedEvidenceRatio", "expectedBayesFactor")) summaryTable$addColumnInfo(name = 'materiality', title = gettext("Materiality"), type = 'string') summaryTable$addColumnInfo(name = 'IR', title = gettext("Inherent risk"), type = 'string') summaryTable$addColumnInfo(name = 'CR', title = gettext("Control risk"), type = 'string') summaryTable$addColumnInfo(name = 'DR', title = gettext("Detection risk"), type = 'string') summaryTable$addColumnInfo(name = 'k', title = gettext("Expected errors"), type = 'string') summaryTable$addColumnInfo(name = 'n', title = gettext("Required sample size"), type = 'string') if(type == "bayesian" && options[["expectedEvidenceRatio"]]){ summaryTable$addColumnInfo(name = 'expectedEvidenceRatio', title = gettext("Expected evidence ratio"), type = 'string') } if(type == "bayesian" && options[["expectedBayesFactor"]]){ summaryTable$addColumnInfo(name = 'expectedBayesFactor', title = gettextf("Expected %1$s", "BF\u208B\u208A"), type = 'string') } planningContainer[["summaryTable"]] <- summaryTable auditRisk <- 1 - options[["confidence"]] if(options[["IR"]] != "Custom"){ inherentRisk <- base::switch(options[["IR"]], "Low" = 0.50, "Medium" = 0.60, "High" = 1) } else { inherentRisk <- options[["irCustom"]] } if(options[["CR"]] != "Custom"){ controlRisk <- base::switch(options[["CR"]], "Low" = 0.50, "Medium" = 0.60, "High" = 1) } else { controlRisk <- options[["crCustom"]] } detectionRisk <- auditRisk / inherentRisk / controlRisk if(!ready || planningContainer$getError()){ if(type == "frequentist"){ message <- base::switch(options[["planningModel"]], "Poisson" = gettext("The required sample size is based on the <b>Poisson</b> distribution."), "binomial" = gettext("The required sample size is based on the <b>binomial</b> distribution."), "hypergeometric" = gettext("The required sample size is based on the <b>hypergeometric</b> distribution.")) } else if(type == "bayesian"){ message <- base::switch(options[["planningModel"]], "Poisson" = gettext("The required sample size is based on the <b>gamma</b> distribution."), "binomial" = gettext("The required sample size is based on the <b>beta</b> distribution."), "hypergeometric" = gettextf("The required sample size is based on the <b>beta-binomial</b> distribution (N = %1$s).", options[["populationSize"]])) } summaryTable$addFootnote(message) row <- data.frame(materiality = planningOptions[["materialityLabel"]], IR = paste0(round(inherentRisk * 100, 2), "%"), CR = paste0(round(controlRisk * 100, 2), "%"), DR = paste0(round(detectionRisk * 100, 2), "%"), k = ".", n = ".") if(type == "bayesian" && options[["expectedEvidenceRatio"]]) row <- cbind(row, expectedEvidenceRatio = ".") if(type == "bayesian" && options[["expectedBayesFactor"]]) row <- cbind(row, expectedBayesFactor = ".") summaryTable$addRows(row) summaryTable$addFootnote(message = gettext("Either the materiality, the population size, or the population value is defined as zero."), symbol = gettext("<b>Analysis not ready.</b>")) return() } if(type == "frequentist"){ message <- base::switch(options[["planningModel"]], "Poisson" = gettextf("The required sample size is based on the <b>Poisson</b> distribution <i>(%1$s = %2$s)</i>.", "\u03BB", round( planningState[["materiality"]] * planningState[["sampleSize"]], 4)), "binomial" = gettextf("The required sample size is based on the <b>binomial</b> distribution <i>(p = %1$s)</i>", round(planningState[["materiality"]], 2)), "hypergeometric" = gettextf("The required sample size is based on the <b>hypergeometric</b> distribution <i>(N = %1$s, K = %2$s)</i>.", planningState[["N"]], ceiling( planningState[["N"]] * planningState[["materiality"]] ))) } else if(type == "bayesian"){ message <- base::switch(options[["planningModel"]], "Poisson" = gettextf("The required sample size is based on the <b>gamma</b> distribution <i>(%1$s = %2$s, %3$s = %4$s)</i>", "\u03B1", planningState[["prior"]]$aPrior, "\u03B2", planningState[["prior"]]$bPrior), "binomial" = gettextf("The required sample size is based on the <b>beta</b> distribution <i>(%1$s = %2$s, %3$s = %4$s)</i>.", "\u03B1", planningState[["prior"]]$aPrior, "\u03B2", planningState[["prior"]]$bPrior), "hypergeometric" = gettextf("The required sample size is based on the <b>beta-binomial</b> distribution <i>(N = %1$s, %2$s = %3$s, %4$s = %5$s)</i>.", planningState[["N"]] - planningState[["sampleSize"]] + planningState[["expectedSampleError"]], "\u03B1", planningState[["prior"]]$aPrior, "\u03B2", planningState[["prior"]]$bPrior)) } summaryTable$addFootnote(message) k <- base::switch(options[["expectedErrors"]], "expectedRelative" = planningState[["expectedSampleError"]], "expectedAbsolute" = paste( planningOptions[["valuta"]], options[["expectedNumber"]])) n <- planningState[["sampleSize"]] row <- data.frame(materiality = planningOptions[["materialityLabel"]], IR = paste0(round(inherentRisk * 100, 2), "%"), CR = paste0(round(controlRisk * 100, 2), "%"), DR = paste0(round(detectionRisk * 100, 2), "%"), k = k, n = n) if(type == "bayesian" && (options[["expectedEvidenceRatio"]] || options[["expectedBayesFactor"]])){ expResult <- .auditExpectedEvidenceRatio(planningState) if(options[["expectedEvidenceRatio"]]){ expectedEvidenceRatio <- round(expResult[["posteriorEvidenceRatio"]], 2) row <- cbind(row, expectedEvidenceRatio = expectedEvidenceRatio) } if(options[["expectedBayesFactor"]]){ expectedBayesFactor <- round(expResult[["expectedShift"]], 2) row <- cbind(row, expectedBayesFactor = expectedBayesFactor) } } summaryTable$addRows(row) } .sampleSizeComparisonPlot <- function(options, planningOptions, planningState, planningContainer, jaspResults, ready, type, positionInContainer){ if(!options[["decisionPlot"]]) return() .updateFigNumber(jaspResults) if(is.null(planningContainer[["decisionPlot"]])){ decisionPlot <- createJaspPlot(plot = NULL, title = gettext("Sample Size Comparison"), width = 600, height = 300) decisionPlot$position <- positionInContainer decisionPlot$dependOn(options = c("decisionPlot")) planningContainer[["decisionPlot"]] <- decisionPlot if(!ready || planningContainer$getError()) return() auditRisk <- 1 - options[["confidence"]] if(options[["IR"]] != "Custom"){ inherentRisk <- base::switch(options[["IR"]], "Low" = 0.50, "Medium" = 0.60, "High" = 1) } else { inherentRisk <- options[["irCustom"]] } if(options[["CR"]] != "Custom"){ controlRisk <- base::switch(options[["CR"]], "Low" = 0.50, "Medium" = 0.60, "High" = 1) } else { controlRisk <- options[["crCustom"]] } detectionRisk <- auditRisk / inherentRisk / controlRisk if(type == "frequentist"){ adjustedConfidence <- 1 - detectionRisk startProgressbar(3) n1 <- jfa::planning(materiality = planningOptions[["materiality"]], confidence = adjustedConfidence, expectedError = planningOptions[["expectedErrors"]], likelihood = "binomial", N = planningOptions[["populationSize"]]) progressbarTick() n2 <- jfa::planning(materiality = planningOptions[["materiality"]], confidence = adjustedConfidence, expectedError = planningOptions[["expectedErrors"]], likelihood = "poisson", N = planningOptions[["populationSize"]]) progressbarTick() n3 <- jfa::planning(materiality = planningOptions[["materiality"]], confidence = adjustedConfidence, expectedError = planningOptions[["expectedErrors"]], likelihood = "hypergeometric", N = planningOptions[["populationSize"]]) progressbarTick() n <- c(n1$sampleSize, n2$sampleSize, n3$sampleSize) k <- c(n1$expectedSampleError, n2$expectedSampleError, n3$expectedSampleError) d <- data.frame(y = c(n, k), dist = rep(c("Binomial", "Poisson", "Hypergeometric"), 2), nature = rep(c(gettext("Expected error-free"), gettext("Expected errors")), each = 3)) d$dist <- factor(x = d$dist, levels = levels(d$dist)[c(2, 3, 1)]) d$nature <- factor(d$nature, levels = levels(d$nature)[c(1, 2)]) } else if(type == "bayesian"){ startProgressbar(3) p1 <- jfa::auditPrior(materiality = planningOptions[["materiality"]], confidence = planningOptions[["confidence"]], expectedError = planningOptions[["expectedErrors"]], likelihood = "binomial", N = planningOptions[["populationSize"]], ir = inherentRisk, cr = controlRisk) n1 <- jfa::planning(materiality = planningOptions[["materiality"]], confidence = planningOptions[["confidence"]], expectedError = planningOptions[["expectedErrors"]], N = planningOptions[["populationSize"]], prior = p1) progressbarTick() p2 <- jfa::auditPrior(materiality = planningOptions[["materiality"]], confidence = planningOptions[["confidence"]], expectedError = planningOptions[["expectedErrors"]], likelihood = "poisson", N = planningOptions[["populationSize"]], ir = inherentRisk, cr = controlRisk) n2 <- jfa::planning(materiality = planningOptions[["materiality"]], confidence = planningOptions[["confidence"]], expectedError = planningOptions[["expectedErrors"]], N = planningOptions[["populationSize"]], prior = p2) progressbarTick() p3 <- jfa::auditPrior(materiality = planningOptions[["materiality"]], confidence = planningOptions[["confidence"]], expectedError = planningOptions[["expectedErrors"]], likelihood = "hypergeometric", N = planningOptions[["populationSize"]], ir = inherentRisk, cr = controlRisk) n3 <- jfa::planning(materiality = planningOptions[["materiality"]], confidence = planningOptions[["confidence"]], expectedError = planningOptions[["expectedErrors"]], N = planningOptions[["populationSize"]], prior = p3) progressbarTick() n <- c(n1$sampleSize, n2$sampleSize, n3$sampleSize) k <- c(n1$expectedSampleError, n2$expectedSampleError, n3$expectedSampleError) d <- data.frame(y = c(n, k), dist = rep(c("Beta", "Gamma", "Beta-binomial"), 2), nature = rep(c(gettext("Expected error-free"), gettext("Expected errors")), each = 3)) d$dist <- factor(x = d$dist, levels = levels(d$dist)[c(2, 3, 1)]) d$nature <- factor(x = d$nature, levels = levels(d$nature)[c(1, 2)]) } yBreaks <- jaspGraphs::getPrettyAxisBreaks(0:(ceiling(1.1 * max(n))), min.n = 4) yLimits <- c(0, ceiling(1.2 * max(n))) myTheme <- ggplot2::theme(axis.ticks.x = ggplot2::element_blank(), axis.ticks.y = ggplot2::element_blank(), axis.text.y = ggplot2::element_text(hjust = 0), panel.grid.major.x = ggplot2::element_line(color = "#cbcbcb", size = 0.5), legend.text = ggplot2::element_text(margin = ggplot2::margin(l = 0, r = 30))) p <- ggplot2::ggplot(data = d, mapping = ggplot2::aes(x = dist, y = y, fill = nature)) + ggplot2::geom_bar(stat = "identity", col = "black", size = 1) + ggplot2::scale_y_continuous(breaks = yBreaks, limits = yLimits) + ggplot2::coord_flip() + ggplot2::annotate("text", y = k, x = c(3, 2, 1), label = k, size = 6, vjust = 0.5, hjust = -0.3) + ggplot2::annotate("text", y = n, x = c(3, 2, 1), label = n, size = 6, vjust = 0.5, hjust = -0.5) + ggplot2::xlab("") + ggplot2::ylab(gettext("Required sample size")) + ggplot2::labs(fill = "") + ggplot2::scale_fill_manual(values=c("#7FE58B", "#FF6666"), guide = ggplot2::guide_legend(reverse = TRUE)) p <- jaspGraphs::themeJasp(p, sides = "", legend.position = "top") + myTheme decisionPlot$plotObject <- p } if(options[["explanatoryText"]] && ready){ decisionPlotText <- createJaspHtml(gettextf("<b>Figure %1$i.</b> Sample size comparison for the current options. The bars represent the sample size that is required under different planning distributions. The number of expected errors in the selection is colored in red and the number of expected error-free observations is colored in green.", jaspResults[["figNumber"]]$object), "p") decisionPlotText$position <- positionInContainer + 1 decisionPlotText$dependOn(optionsFromObject = planningContainer[["decisionPlot"]]) decisionPlotText$dependOn(options = "explanatoryText") planningContainer[["decisionPlotText"]] <- decisionPlotText } } .samplingDistributionPlot <- function(options, planningOptions, planningState, planningContainer, jaspResults, ready, positionInContainer){ if(!options[["samplingDistribution"]]) return() .updateFigNumber(jaspResults) if(is.null(planningContainer[["samplingDistribution"]])){ likelihood <- base::switch(options[["planningModel"]], "Poisson" = "Poisson", "binomial" = "Binomial", "hypergeometric" = "Hypergeometric") plotTitle <- gettextf("Implied %1$s Sampling Distribution", likelihood) samplingDistribution <- createJaspPlot(plot = NULL, title = plotTitle, width = 600, height = 300) samplingDistribution$position <- positionInContainer samplingDistribution$dependOn(options = c("planningModel", "samplingDistribution")) planningContainer[["samplingDistribution"]] <- samplingDistribution if(!ready || planningContainer$getError()) return() xVals <- 0:planningState[["sampleSize"]] limx <- planningState[["sampleSize"]] + 1 if(limx > 31) limx <- 31 xVals <- xVals[1:limx] if(planningState[["likelihood"]] == "poisson"){ dErrorFree <- stats::dpois(x = xVals, lambda = planningState[["materiality"]] * planningState[["sampleSize"]]) dError <- stats::dpois(x = 0:planningState[["expectedSampleError"]], lambda = planningState[["materiality"]] * planningState[["sampleSize"]]) } else if(planningState[["likelihood"]] == "binomial"){ dErrorFree <- stats::dbinom(x = xVals, size = planningState[["sampleSize"]], prob = planningState[["materiality"]]) dError <- stats::dbinom(x = 0:planningState[["expectedSampleError"]], size = planningState[["sampleSize"]], prob = planningState[["materiality"]]) } else if(planningState[["likelihood"]] == "hypergeometric"){ dErrorFree <- stats::dhyper(x = xVals, m = planningState[["populationK"]], n = planningState[["N"]] - planningState[["populationK"]], k = planningState[["sampleSize"]]) dError <- stats::dhyper(x = 0:planningState[["expectedSampleError"]], m = planningState[["populationK"]], n = planningState[["N"]] - planningState[["populationK"]], k = planningState[["sampleSize"]]) } dataErrorFree <- data.frame(x = xVals, y = dErrorFree) dataError <- data.frame(x = 0:planningState[["expectedSampleError"]], y = dError) dataLegend <- data.frame(x = c(0, 0), y = c(0, 0), type = c(gettext("Expected error-free"), gettext("Expected errors"))) dataLegend$type <- factor(x = dataLegend[["type"]], levels = levels(dataLegend[["type"]])[c(2,1)]) xTicks <- jaspGraphs::getPrettyAxisBreaks(c(0, xVals)) yTicks <- jaspGraphs::getPrettyAxisBreaks(c(0, dataErrorFree[["y"]])) myLegend <- ggplot2::guide_legend(override.aes=list(size = 12, shape = 22, fill = c("#FF6666", "#7FE58B"), stroke = 1.5, color = "black")) myTheme <- ggplot2::theme(panel.grid.major.y = ggplot2::element_line(color = "#cbcbcb", size = 0.5), legend.text = ggplot2::element_text(margin = ggplot2::margin(l = 0, r = 30))) p <- ggplot2::ggplot(data = dataLegend, mapping = ggplot2::aes(x = x, y = y, fill = type)) + ggplot2::geom_point(shape = 2, alpha = 0) + ggplot2::scale_x_continuous(name = gettext("Errors"), labels = xTicks, breaks = xTicks) + ggplot2::scale_y_continuous(name = gettext("Probability"), labels = yTicks, breaks = yTicks, limits = range(yTicks)) + ggplot2::geom_bar(data = dataErrorFree, mapping = ggplot2::aes(x = x, y = y), stat = "identity", fill = "#7FE58B", size = 0.5, color = "black") + ggplot2::geom_bar(data = dataError, mapping = ggplot2::aes(x = x, y = y), stat = "identity", fill = "#FF6666", size = 0.5, color = "black") + ggplot2::geom_point(data = dataLegend, mapping = ggplot2::aes(x = x, y = y, fill = type), size = 0) + ggplot2::scale_fill_manual(values=c("#7FE58B", "#FF6666"), guide = ggplot2::guide_legend(reverse = TRUE)) + ggplot2::labs(fill = "") + ggplot2::guides(fill = myLegend) p <- jaspGraphs::themeJasp(p, legend.position = "top") + myTheme samplingDistribution$plotObject <- p } if(options[["explanatoryText"]] && ready){ samplingDistributionText <- createJaspHtml(gettextf("<b>Figure %1$i.</b> The implied <b>%2$s</b> sampling distribution. The number of expected errors in the selection is colored in red and the number of expected error-free observations is colored in green. The total probability of the errors does not exceed the detection risk as specified through the audit risk model.", jaspResults[["figNumber"]]$object, options[["planningModel"]]), "p") samplingDistributionText$position <- positionInContainer + 1 samplingDistributionText$dependOn(optionsFromObject = planningContainer[["samplingDistribution"]]) samplingDistributionText$dependOn(options = "explanatoryText") planningContainer[["samplingDistributionText"]] <- samplingDistributionText } } ################################################################################ ################## Common functions for the selection stage #################### ################################################################################ .auditAddSelectionColumns <- function(options, jaspResults){ dataset <- .auditReadDataset(options, jaspResults, stage = "procedure") rankingVariable <- options[["rankingVariable"]] if(rankingVariable == "") rankingVariable <- NULL additionalVariables <- unlist(options[["additionalVariables"]]) variables <- c(rankingVariable, additionalVariables) if(!is.null(variables)){ additionalColumns <- .readDataSetToEnd(columns.as.numeric = variables) dataset <- cbind(dataset, additionalColumns) return(dataset) } else { return(dataset) } } .auditAddSelectionIndicator <- function(options, planningOptions, selectionState, jaspResults){ if(!options[["addSampleIndicator"]] || options[["sampleIndicatorColumn"]] == "") return() if(is.null(jaspResults[["sampleIndicatorColumn"]])){ sampleIndicatorColumn <- numeric(length = planningOptions[["populationSize"]]) sampleRowNumbers <- selectionState[["rowNumber"]] sampleCounts <- selectionState[["count"]] sampleIndicatorColumn[sampleRowNumbers] <- sampleCounts jaspResults[["sampleIndicatorColumn"]] <- createJaspColumn(columnName = options[["sampleIndicatorColumn"]]) jaspResults[["sampleIndicatorColumn"]]$dependOn(options = c("recordNumberVariable", "monetaryVariable", "additionalVariables", "rankingVariable", "selectionMethod", "selectionType", "seed", "intervalStartingPoint", "sampleSize", "addSampleIndicator", "sampleIndicatorColumn")) jaspResults[["sampleIndicatorColumn"]]$setNominal(sampleIndicatorColumn) } } .auditSelectionState <- function(dataset, options, planningState, selectionContainer){ if(!is.null(selectionContainer[["selectionState"]])){ return(selectionContainer[["selectionState"]]$object) } else if(!is.null(planningState)){ result <- try({ .auditSampling(dataset, options, planningState, selectionContainer) }) if(isTryError(result)){ if(options[["selectionType"]] == "musSampling"){ # MUS has failed for some reason, fall back to record sampling result <- try({ .auditSampling(dataset, options, planningState, selectionContainer, unitsExtra = "records") }) } if(isTryError(result)){ selectionContainer$setError(gettextf("An error occurred: %1$s", jaspBase:::.extractErrorMessage(result))) return() } else { # MUS has failed for some reason, return an indication for this result[["musFailed"]] <- TRUE } } selectionContainer[["selectionState"]] <- createJaspState(result) return(result) } } .auditSampling <- function(dataset, options, planningState, selectionContainer, unitsExtra = NULL){ if(!is.null(unitsExtra)){ units <- unitsExtra } else { units <- base::switch(options[["selectionType"]], "recordSampling" = "records", "musSampling" = "mus") } algorithm <- base::switch(options[["selectionMethod"]], "randomSampling" = "random", "cellSampling" = "cell", "systematicSampling" = "interval") if(options[["rankingVariable"]] != ""){ rankingColumn <- dataset[, .v(options[["rankingVariable"]])] dataset <- dataset[order(rankingColumn), ] } if(options[["monetaryVariable"]] != ""){ bookValues <- .v(options[["monetaryVariable"]]) } else { bookValues <- NULL } if(planningState[["sampleSize"]] == 0 || is.null(dataset)) return() if(units == "records" && algorithm == "interval"){ interval <- ceiling(nrow(dataset) / planningState[["sampleSize"]]) if(options[["seed"]] > interval){ selectionContainer$setError(gettext("Your specified starting point lies outside the selection interval.")) return() } } else if (units == "mus" && algorithm == "interval"){ interval <- ceiling(sum(dataset[, bookValues]) / planningState[["sampleSize"]]) if(options[["seed"]] > interval){ selectionContainer$setError("Your specified starting point lies outside the selection interval.") return() } } sample <- jfa::sampling(population = dataset, sampleSize = planningState[["sampleSize"]], algorithm = algorithm, units = units, seed = options[["seed"]], ordered = FALSE, bookValues = bookValues, intervalStartingPoint = options[["seed"]]) sample <- data.frame(sample[["sample"]]) sample[, 1:2] <- apply(X = sample[, 1:2], MARGIN = 2, as.numeric) return(sample) } .auditSelectionSummaryTable <- function(options, planningOptions, planningState, selectionState, selectionContainer, jaspResults, positionInContainer){ .updateTabNumber(jaspResults) if(!is.null(selectionContainer[["selectionInformationTable"]])) return() tableTitle <- gettextf("<b>Table %1$i.</b> Selection Summary", jaspResults[["tabNumber"]]$object) selectionInformationTable <- createJaspTable(tableTitle) selectionInformationTable$position <- positionInContainer selectionInformationTable$dependOn(options = c("bookValueDescriptives", "sampleDescriptives", "displaySample", "samplingChecked", "evaluationChecked")) selectionInformationTable$addColumnInfo(name = "size", title = gettext("Selection size"), type = "integer") if(options[["materiality"]] == "materialityAbsolute"){ selectionInformationTable$addColumnInfo(name = "value", title = gettext("Selection value"), type = "string") selectionInformationTable$addColumnInfo(name = "percentage", title = gettextf("%% of population value"), type = "string") } else { selectionInformationTable$addColumnInfo(name = "percentage", title = gettextf("%% of total observations"), type = "string") } if(options[["selectionMethod"]] != "randomSampling") selectionInformationTable$addColumnInfo(name = "interval", title ="Interval", type = "string") if(options[["selectionMethod"]] != "systematicSampling"){ message <- gettextf("The sample is drawn with <i>seed %1$s</i>.", options[["seed"]]) } else { message <- gettextf("Unit %1$s is selected from each interval.", options[["seed"]]) } selectionInformationTable$addFootnote(message) selectionContainer[["selectionInformationTable"]] <- selectionInformationTable if(is.null(selectionState)) return() if(options[["selectionType"]] == "recordSampling" || !is.null(selectionState[["musFailed"]])){ interval <- ceiling(planningOptions[["populationSize"]] / planningState[["sampleSize"]]) } else { interval <- ceiling(planningOptions[["populationValue"]] / planningState[["sampleSize"]]) } sampleSize <- sum(selectionState[["count"]]) if(options[["materiality"]] == "materialityAbsolute"){ value <- ceiling(sum(abs(selectionState[, .v(options[["monetaryVariable"]])]))) percentage <- paste0(round(value / planningOptions[["populationValue"]] * 100, 2), "%") row <- data.frame("size" = sampleSize, "value" = paste(planningOptions[["valuta"]], value), "percentage" = percentage) } else { percentage <- paste0(round(sampleSize / planningOptions[["populationSize"]] * 100, 2), "%") row <- data.frame("size" = sampleSize, "percentage" = percentage) } if(options[["selectionMethod"]] != "randomSampling"){ if(options[["selectionType"]] == "musSampling" && is.null(selectionState[["musFailed"]])){ row <- cbind(row, interval = paste(planningOptions[["valuta"]], interval)) } else { row <- cbind(row, interval = interval) } } selectionInformationTable$addRows(row) } .auditSelectionSampleTable <- function(options, selectionState, selectionContainer, jaspResults, positionInContainer){ if(!options[["displaySample"]]) return() .updateTabNumber(jaspResults) if(is.null(selectionContainer[["selectionSampleTable"]])){ tableTitle <- gettextf("<b>Table %1$i.</b> Selected Observations", jaspResults[["tabNumber"]]$object) sampleTable <- createJaspTable(tableTitle) sampleTable$position <- positionInContainer sampleTable$dependOn(options = c("bookValueDescriptives", "sampleDescriptives", "displaySample", "samplingChecked", "evaluationChecked")) recordNumberVariable <- .auditReadVariableFromOptions(options, varType = "recordNumber") monetaryVariable <- .auditReadVariableFromOptions(options, varType = "monetary") rankingVariable <- .auditReadVariableFromOptions(options, varType = "ranking") additionalVariables <- .auditReadVariableFromOptions(options, varType = "additional") columnNames <- c("Row number", "Count", recordNumberVariable, monetaryVariable, rankingVariable, additionalVariables) for(i in columnNames){ sampleTable$addColumnInfo(name = i, type = "string", title = i) } selectionContainer[["sampleTable"]] <- sampleTable if(is.null(selectionState) || selectionContainer$getError()) return() dat <- as.data.frame(selectionState) colnames(dat) <- columnNames sampleTable$setData(dat) } } .auditSelectionDescriptivesTable <- function(options, selectionState, selectionContainer, jaspResults, positionInContainer){ if(!options[["sampleDescriptives"]]) return() .updateTabNumber(jaspResults) if(is.null(selectionContainer[["sampleDescriptivesTable"]])){ recordVariable <- options[["recordNumberVariable"]] if(recordVariable == "") recordVariable <- NULL rankingVariable <- options[["rankingVariable"]] if(rankingVariable == "") rankingVariable <- NULL monetaryVariable <- unlist(options[["monetaryVariable"]]) if(monetaryVariable == "") monetaryVariable <- NULL variables <- unlist(options[["additionalVariables"]]) all.variables <- c(rankingVariable, monetaryVariable, variables) tableTitle <- gettextf("<b>Table %1$i.</b> Selection Descriptive Statistics", jaspResults[["tabNumber"]]$object) sampleDescriptivesTable <- createJaspTable(tableTitle) sampleDescriptivesTable$transpose <- TRUE sampleDescriptivesTable$position <- positionInContainer sampleDescriptivesTable$dependOn(options = c("sampleDescriptives", "mean", "sd", "var", "range", "min", "max", "median", "bookValueDescriptives", "sampleDescriptives", "displaySample", "samplingChecked", "evaluationChecked")) sampleDescriptivesTable$addColumnInfo(name = "name", type = "string", title = "") sampleDescriptivesTable$addColumnInfo(name = gettext("Valid cases"), type = "integer") if (options[["mean"]]) sampleDescriptivesTable$addColumnInfo(name = "Mean", type = "number", title = gettext("Mean")) if (options[["median"]]) sampleDescriptivesTable$addColumnInfo(name = "Median", type = "number", title = gettext("Median")) if (options[["sd"]]) sampleDescriptivesTable$addColumnInfo(name = "Std. Deviation", type = "number", title = gettext("Std. Deviation")) if (options[["var"]]) sampleDescriptivesTable$addColumnInfo(name = "Variance", type = "number", title = gettext("Variance")) if (options[["range"]]) sampleDescriptivesTable$addColumnInfo(name = "Range", type = "number", title = gettext("Range")) if (options[["min"]]) sampleDescriptivesTable$addColumnInfo(name = "Minimum", type = "number", title = gettext("Minimum")) if (options[["max"]]) sampleDescriptivesTable$addColumnInfo(name = "Maximum", type = "number", title = gettext("Maximum")) selectionContainer[["sampleDescriptivesTable"]] <- sampleDescriptivesTable if(is.null(selectionState) || selectionContainer$getError()) return() for (variable in all.variables) { column <- selectionState[[ .v(variable) ]] row <- list() row[["name"]] <- variable row[["Valid cases"]] <- base::length(column) if(!is.factor(column)) { if(options[["mean"]]) row[["Mean"]] <- base::mean(column, na.rm = TRUE) if(options[["sd"]]) row[["Std. Deviation"]] <- stats::sd(column, na.rm = TRUE) if(options[["var"]]) row[["Variance"]] <- stats::var(column, na.rm = TRUE) if(options[["median"]]) row[["Median"]] <- stats::median(column, na.rm = TRUE) if(options[["range"]]) row[["Range"]] <- base::abs(base::range(column, na.rm = TRUE)[1] - base::range(column, na.rm = TRUE)[2]) if(options[["min"]]) row[["Minimum"]] <- base::min(column, na.rm = TRUE) if(options[["max"]]) row[["Maximum"]] <- base::max(column, na.rm = TRUE) } sampleDescriptivesTable$addRows(row) } } } .auditSelectionHistograms <- function(options, dataset, selectionState, selectionContainer, jaspResults, positionInContainer){ if (!is.null(selectionContainer[["plotHistograms"]]) || !options[["plotHistograms"]]) return() .updateFigNumber(jaspResults) plotHistograms <- createJaspContainer(gettext("Population and sample histograms")) plotHistograms$dependOn(options = c("recordNumberVariable", "monetaryVariable", "additionalVariables", "rankingVariable", "selectionMethod", "selectionType", "seed", "intervalStartingPoint", "sampleSize", "plotHistograms")) plotHistograms$position <- positionInContainer selectionContainer[["plotHistograms"]] <- plotHistograms if(options[["recordNumberVariable"]] == "" || options[["monetaryVariable"]] == "" || options[["sampleSize"]] == 0) return() variables <- colnames(selectionState)[-(1:3)] for(i in 1:length(variables)){ if(i == 1){ popData <- dataset[, .v(options[["monetaryVariable"]])] } else { popData <- dataset[, variables[i]] } sampleData <- selectionState[, variables[i]] if(!is.numeric(popData) || !is.numeric(sampleData)){ next } plotData <- data.frame(x = c(popData, sampleData), type = c(rep("Population", length(popData)), rep("Sample", length(sampleData)))) xBreaks <- jaspGraphs::getPrettyAxisBreaks(popData, min.n = 4) yBreaks <- jaspGraphs::getPrettyAxisBreaks(hist(popData, plot = FALSE, breaks = 50)$counts, min.n = 4) p <- ggplot2::ggplot(data = plotData, mapping = ggplot2::aes(x = x, fill = type)) + ggplot2::geom_histogram(bins = 50, col = "black", position = "identity") + ggplot2::labs(fill = "") + ggplot2::scale_x_continuous(name = "Value", breaks = xBreaks, limits = c(min(xBreaks), max(xBreaks))) + ggplot2::scale_y_continuous(name = "Frequency", breaks = yBreaks, limits = c(min(yBreaks), max(yBreaks))) + ggplot2::scale_fill_manual(values = c("#0063B2FF", "#9CC3D5FF")) p <- jaspGraphs::themeJasp(p, legend.position = "top") if(i == 1){ plotHistograms[[variables[i]]] <- createJaspPlot(plot = p, title = "Book values", height = 300, width = 500) plotHistograms[[variables[i]]]$dependOn(optionContainsValue = list("monetaryVariable" = variables[i])) } else{ plotHistograms[[variables[i]]] <- createJaspPlot(plot = p, title = options[["additionalVariables"]][i - 1], height = 300, width = 500) plotHistograms[[variables[i]]]$dependOn(optionContainsValue = list("additionalVariables" = variables[i])) } } explanatoryText <- TRUE # Will be added as an option later if(explanatoryText){ histogramPlotText <- createJaspHtml(gettextf("<b>Figure %1$i.</b> The distributions of numeric variables in the population compared to the distributions in the sample.", jaspResults[["figNumber"]]$object), "p") histogramPlotText$position <- positionInContainer + 1 histogramPlotText$dependOn(optionsFromObject = selectionContainer[["plotHistograms"]]) histogramPlotText$dependOn(options = "explanatoryText") selectionContainer[["histogramPlotText"]] <- histogramPlotText } } ################################################################################ ################## Common functions for the evaluation ######################### ################################################################################ .auditAddEvaluationColumns <- function(options, jaspResults){ dataset <- .auditAddSelectionColumns(options, jaspResults) sampleFilter <- options[["sampleFilter"]] auditResult <- options[["auditResult"]] variables <- c(sampleFilter, auditResult) if(!("" %in% variables)){ additionalColumns <- .readDataSetToEnd(columns.as.numeric = variables) dataset <- cbind(dataset, additionalColumns) return(dataset) } else { return(dataset) } } .auditEvaluationState <- function(options, planningOptions, sample, evaluationContainer, type){ if(options[["auditResult"]] == "") return() if(!is.null(evaluationContainer[["evaluationState"]])){ return(evaluationContainer[["evaluationState"]]$object) } else { auditRisk <- 1 - options[["confidence"]] if(options[["IR"]] != "Custom"){ inherentRisk <- base::switch(options[["IR"]], "Low" = 0.50, "Medium" = 0.60, "High" = 1) } else { inherentRisk <- options[["irCustom"]] } if(options[["CR"]] != "Custom"){ controlRisk <- base::switch(options[["CR"]], "Low" = 0.50, "Medium" = 0.60, "High" = 1) } else { controlRisk <- options[["crCustom"]] } detectionRisk <- auditRisk / inherentRisk / controlRisk if(type == "frequentist"){ confidence <- 1 - detectionRisk prior <- NULL } else if(type == "bayesian"){ prior <- jfa::auditPrior(materiality = planningOptions[["materiality"]], confidence = planningOptions[["confidence"]], expectedError = planningOptions[["expectedErrors"]], likelihood = planningOptions[["likelihood"]], N = planningOptions[["populationSize"]], ir = inherentRisk, cr = controlRisk) confidence <- options[["confidence"]] } # Select evaluation method if(options[["variableType"]] == "variableTypeCorrect"){ method <- options[["planningModel"]] if(method == "Poisson") method <- "poisson" nSumstats <- nrow(sample) kSumstats <- length(which(sample[, .v(options[["auditResult"]])] == 1)) result <- try({ # call jfa evaluation jfa::evaluation(sample = sample, confidence = confidence, nSumstats = nSumstats, kSumstats = kSumstats, method = method, materiality = planningOptions[["materiality"]], N = planningOptions[["populationSize"]], prior = prior) }) } else if(options[["variableType"]] == "variableTypeAuditValues"){ method <- base::switch(options[["estimator"]], "stringerBound" = "stringer", "regressionBound" = "regression", "directBound" = "direct", "differenceBound" = "difference", "ratioBound" = "quotient", "coxAndSnellBound" = "coxsnell", "betaBound" = "binomial", "gammaBound" = "poisson", "betabinomialBound" = "hypergeometric") if(method == "stringer" && options[["stringerBoundLtaAdjustment"]]) method <- "stringer-lta" # Adjust the confidence since jfa only returns a confidence interval if(method %in% c("direct", "difference", "quotient", "regression")){ confidence <- confidence + ((1 - confidence) / 2) } # Bayesian regression is not implemented in jfa R package if(type == "bayesian" && method == "regression"){ result <- try({ .auditBayesianRegression(sample, confidence, options, planningOptions) }) } else { result <- try({ # call jfa evaluation jfa::evaluation(sample = sample, confidence = confidence, bookValues = .v(options[["monetaryVariable"]]), auditValues = .v(options[["auditResult"]]), method = method, materiality = planningOptions[["materiality"]], N = planningOptions[["populationSize"]], populationBookValue = planningOptions[["populationValue"]], prior = prior) }) } } if(isTryError(result)){ evaluationContainer$setError(paste0("An error occurred: ", jaspBase:::.extractErrorMessage(result))) return() } if(options[["estimator"]] %in% c("directBound", "differenceBound", "ratioBound", "regressionBound")){ result[["confBound"]] <- (planningOptions[["populationValue"]] - result[["lowerBound"]]) / planningOptions[["populationValue"]] if(result[["confBound"]] < planningOptions[["materiality"]]){ result[["conclusion"]] <- "Approve population" } else { result[["conclusion"]] <- "Do not approve population" } } evaluationContainer[["evaluationState"]] <- createJaspState(result) return(result) } } .auditExplanatoryTextEvaluation <- function(options, planningOptions, planningState, selectionState, evaluationContainer, type, positionInContainer = 1){ if(options[["explanatoryText"]]){ if(options[["variableType"]] == "variableTypeCorrect" && !options[["useSumStats"]]) ready <- options[["auditResult"]] != "" && options[["recordNumberVariable"]] != "" && planningOptions[["materiality"]] != 0 if(options[["variableType"]] == "variableTypeAuditValues" && !options[["useSumStats"]]) ready <- options[["auditResult"]] != "" && options[["recordNumberVariable"]] != "" && options[["monetaryVariable"]] != "" && planningOptions[["materiality"]] != 0 if(options[["variableType"]] == "variableTypeCorrect" && options[["useSumStats"]]) ready <- options[["nSumStats"]] > 0 && planningOptions[["materiality"]] != 0 if(ready){ evaluationState <- evaluationContainer[["evaluationState"]]$object if(options[["estimator"]] %in% c("directBound", "differenceBound", "ratioBound", "regressionBound")){ confidenceBound <- (planningOptions[["populationValue"]] - evaluationState[["lowerBound"]]) / planningOptions[["populationValue"]] } else { confidenceBound <- evaluationState[["confBound"]] } errorLabel <- evaluationState[["k"]] if(options[["materiality"]] == "materialityRelative"){ boundLabel <- paste0(round(confidenceBound * 100, 2), "%") } else if(options[["materiality"]] == "materialityAbsolute"){ boundLabel <- paste(planningOptions[["valuta"]], round(confidenceBound * planningOptions[["populationValue"]], 2)) } } else { boundLabel <- "..." errorLabel <- "..." } if(sum(selectionState[["count"]]) > nrow(selectionState)){ sampleSizeMessage <- paste0(planningState[["sampleSize"]], " (", nrow(selectionState), " + ", length(which(selectionState[["count"]] != 1)), ")") } else { sampleSizeMessage <- planningState[["sampleSize"]] } if(type == "frequentist"){ additionalMessage <- gettext("probability that, when one would repeatedly sample from this population, the maximum misstatement is calculated to be lower than") } else if(type == "bayesian"){ additionalMessage <- gettext("probability that the maximum misstatement is lower than") } message <- gettextf("The selection consisted of <b>%1$s</b> observations, of which <b>%2$s</b> were found to contain an error. The knowledge from these data, combined with the risk assessments results in an <b>%3$s</b> upper confidence bound of <b>%4$s</b>. The cumulative knowledge states that there is a <b>%5$s</b> %6$s <b>%7$s</b>.", sampleSizeMessage, errorLabel, planningOptions[["confidenceLabel"]], boundLabel, planningOptions[["confidenceLabel"]], additionalMessage, boundLabel) evaluationContainer[["evaluationParagraph"]] <- createJaspHtml(message, "p") evaluationContainer[["evaluationParagraph"]]$position <- positionInContainer evaluationContainer[["evaluationParagraph"]]$dependOn(options = "explanatoryText") } } .auditBackwardsPlanningState <- function(options, dataset, evaluationOptions, type){ if(evaluationOptions[["materiality"]] != 0 && ((options[["variableType"]] == "variableTypeAuditValues" && options[["recordNumberVariable"]] != "" && options[["monetaryVariable"]] != "" && options[["auditResult"]] != "") || (options[["variableType"]] == "variableTypeCorrect" && options[["recordNumberVariable"]] != "" && options[["auditResult"]] != "") || (options[["variableType"]] == "variableTypeCorrect" && options[["useSumStats"]] && options[["nSumStats"]] > 0))){ if(type == "bayesian"){ if(options[["IR"]] != "Custom"){ inherentRisk <- base::switch(options[["IR"]], "Low" = 0.50, "Medium" = 0.60, "High" = 1) } else { inherentRisk <- options[["irCustom"]] } if(options[["CR"]] != "Custom"){ controlRisk <- base::switch(options[["CR"]], "Low" = 0.50, "Medium" = 0.60, "High" = 1) } else { controlRisk <- options[["crCustom"]] } p <- jfa::auditPrior(materiality = evaluationOptions[["materiality"]], confidence = evaluationOptions[["confidence"]], expectedError = evaluationOptions[["expectedErrors"]], likelihood = evaluationOptions[["likelihood"]], N = evaluationOptions[["populationSize"]], ir = inherentRisk, cr = controlRisk) planningState <- jfa::planning(materiality = evaluationOptions[["materiality"]], confidence = evaluationOptions[["confidence"]], expectedError = evaluationOptions[["expectedErrors"]], N = evaluationOptions[["populationSize"]], prior = p) if(options[["useSumStats"]]){ planningState[["sampleSize"]] <- options[["nSumStats"]] } else { planningState[["sampleSize"]] <- nrow(dataset) } return(planningState) } else if(type == "frequentist"){ planningState <- list() if(options[["useSumStats"]]){ planningState[["sampleSize"]] <- options[["nSumStats"]] } else { planningState[["sampleSize"]] <- nrow(dataset) } return(planningState) } } else { planningState <- list() planningState[["sampleSize"]] <- "..." return(planningState) } } .auditEvaluationAnalysisState <- function(options, sample, planningOptions, evaluationContainer, type){ if(options[["variableType"]] == "variableTypeCorrect" && !options[["useSumStats"]] && (options[["auditResult"]] == "" || options[["recordNumberVariable"]] == "")){ return() } else if(options[["variableType"]] == "variableTypeAuditValues" && !options[["useSumStats"]] && (options[["auditResult"]] == "" || options[["recordNumberVariable"]] == "" || options[["monetaryVariable"]] == "")){ return() } if(planningOptions[["materiality"]] == 0) return() if(options[["useSumStats"]] && options[["nSumStats"]] == 0) return() if(!is.null(evaluationContainer[["evaluationState"]])){ return(evaluationContainer[["evaluationState"]]$object) } else { auditRisk <- 1 - options[["confidence"]] if(options[["IR"]] != "Custom"){ inherentRisk <- base::switch(options[["IR"]], "Low" = 0.50, "Medium" = 0.60, "High" = 1) } else { inherentRisk <- options[["irCustom"]] } if(options[["CR"]] != "Custom"){ controlRisk <- base::switch(options[["CR"]], "Low" = 0.50, "Medium" = 0.60, "High" = 1) } else { controlRisk <- options[["crCustom"]] } if(type == "frequentist"){ detectionRisk <- (1 - options[["confidence"]]) / inherentRisk / controlRisk confidence <- 1 - detectionRisk prior <- FALSE } else if(type == "bayesian"){ confidence <- options[["confidence"]] prior <- jfa::auditPrior(materiality = planningOptions[["materiality"]], confidence = confidence, expectedError = planningOptions[["expectedErrors"]], likelihood = planningOptions[["likelihood"]], N = planningOptions[["populationSize"]], ir = inherentRisk, cr = controlRisk) } # Select evaluation method if(options[["variableType"]] == "variableTypeCorrect"){ if(type == "frequentist"){ method <- base::switch(options[["estimator2"]], "binomialBound" = "binomial", "poissonBound" = "poisson", "hyperBound" = "hypergeometric") } else if(type == "bayesian"){ method <- base::switch(options[["estimator"]], "betaBound" = "binomial", "gammaBound" = "poisson", "betabinomialBound" = "hypergeometric") } if(!options[["useSumStats"]]){ nSumstats <- nrow(sample) kSumstats <- length(which(sample[, .v(options[["auditResult"]])] == 1)) } else { nSumstats <- options[["nSumStats"]] kSumstats <- options[["kSumStats"]] } result <- try({ # call jfa evaluation jfa::evaluation(sample = sample, confidence = confidence, nSumstats = nSumstats, kSumstats = kSumstats, method = method, materiality = planningOptions[["materiality"]], N = planningOptions[["populationSize"]], prior = prior) }) } else if(options[["variableType"]] == "variableTypeAuditValues"){ method <- base::switch(options[["estimator"]], "stringerBound" = "stringer", "regressionBound" = "regression", "directBound" = "direct", "differenceBound" = "difference", "ratioBound" = "quotient", "coxAndSnellBound" = "coxsnell", "betaBound" = "binomial", "gammaBound" = "poisson", "betabinomialBound" = "hypergeometric") if(method == "stringer" && options[["stringerBoundLtaAdjustment"]]) method <- "stringer-lta" # Adjust the confidence since jfa only returns a confidence interval if(method %in% c("direct", "difference", "quotient", "regression")){ confidence <- confidence + ((1 - confidence) / 2) } # Bayesian regression is not implemented in jfa R package if(type == "bayesian" && method == "regression"){ result <- try({ .auditBayesianRegression(sample, confidence, options, planningOptions) }) } else { result <- try({ # call jfa evaluation jfa::evaluation(sample = sample, confidence = confidence, bookValues = .v(options[["monetaryVariable"]]), auditValues = .v(options[["auditResult"]]), method = method, materiality = planningOptions[["materiality"]], N = planningOptions[["populationSize"]], populationBookValue = planningOptions[["populationValue"]], prior = prior) }) } } if(isTryError(result)){ evaluationContainer$setError(paste0("An error occurred: ", jaspBase:::.extractErrorMessage(result))) return() } if(options[["variableType"]] == "variableTypeAuditValues" && options[["estimator"]] %in% c("directBound", "differenceBound", "ratioBound", "regressionBound")){ result[["confBound"]] <- (planningOptions[["populationValue"]] - result[["lowerBound"]]) / planningOptions[["populationValue"]] if(result[["confBound"]] < planningOptions[["materiality"]]){ result[["conclusion"]] <- "Approve population" } else { result[["conclusion"]] <- "Do not approve population" } } evaluationContainer[["evaluationState"]] <- createJaspState(result) return(result) } } .auditEvaluationSummaryTable <- function(options, planningOptions, evaluationState, evaluationContainer, jaspResults, type, positionInContainer){ .updateTabNumber(jaspResults) if(!is.null(evaluationContainer[["evaluationTable"]])) return() tableTitle <- gettextf("<b>Table %1$i.</b> Evaluation Summary", jaspResults[["tabNumber"]]$object) evaluationTable <- createJaspTable(tableTitle) evaluationTable$position <- positionInContainer evaluationTable$dependOn(options = c("bookValueDescriptives", "sampleDescriptives", "displaySample", "samplingChecked", "evaluationChecked", "auditResult", "evidenceRatio", "bayesFactor", "valuta", "otherValutaName", "mostLikelyError", "IR", "irCustom", "CR", "crCustom")) evaluationTable$addColumnInfo(name = 'materiality', title = gettext("Materiality"), type = 'string') evaluationTable$addColumnInfo(name = 'sampleSize', title = gettext("Sample size"), type = 'string') evaluationTable$addColumnInfo(name = 'fullErrors', title = gettext("Errors"), type = 'string') evaluationTable$addColumnInfo(name = 'totalTaint', title = gettext("Total tainting"), type = 'string') if(options[["mostLikelyError"]]) evaluationTable$addColumnInfo(name = 'mle', title = gettext("MLE"), type = 'string') if(type == "frequentist"){ auditRisk <- 1 - options[["confidence"]] if(options[["IR"]] != "Custom"){ inherentRisk <- base::switch(options[["IR"]], "Low" = 0.50, "Medium" = 0.60, "High" = 1) } else { inherentRisk <- options[["irCustom"]] } if(options[["CR"]] != "Custom"){ controlRisk <- base::switch(options[["CR"]], "Low" = 0.50, "Medium" = 0.60, "High" = 1) } else { controlRisk <- options[["crCustom"]] } detectionRisk <- auditRisk / inherentRisk / controlRisk boundTitle <- gettextf("%1$s%% Confidence bound", round((1 - detectionRisk) * 100, 2)) evaluationTable$addColumnInfo(name = 'bound', title = boundTitle, type = 'string') if(options[["monetaryVariable"]] != "") evaluationTable$addColumnInfo(name = 'projm', title = gettext("Maximum Misstatement"), type = 'string') } else if(type == "bayesian"){ if(options[["areaUnderPosterior"]] == "displayCredibleBound"){ boundTitle <- paste0(options[["confidence"]] * 100,"% Credible bound") evaluationTable$addColumnInfo(name = 'bound', title = boundTitle, type = 'string') if(options[["monetaryVariable"]] != "") evaluationTable$addColumnInfo(name = 'projm', title = gettext("Maximum Misstatement"), type = 'string') } else if (options[["areaUnderPosterior"]] == "displayCredibleInterval"){ boundTitle <- paste0(options[["confidence"]] * 100,"% Credible interval") evaluationTable$addColumnInfo(name = 'lowerBound', title = gettext("Lower"), overtitle = boundTitle, type = 'string') evaluationTable$addColumnInfo(name = 'upperBound', title = gettext("Upper"), overtitle = boundTitle, type = 'string') if(options[["monetaryVariable"]] != ""){ evaluationTable$addColumnInfo(name = 'lowerProjm', title = gettext("Lower"), overtitle = gettext("Maximum Misstatement"), type = 'string') evaluationTable$addColumnInfo(name = 'upperProjm', title = gettext("Upper"), overtitle = gettext("Maximum Misstatement"), type = 'string') } } } if(type == "bayesian" && options[["evidenceRatio"]]) evaluationTable$addColumnInfo(name = 'evidenceRatio', title = gettext("Evidence ratio"), type = 'string') if(type == "bayesian" && options[["bayesFactor"]]) evaluationTable$addColumnInfo(name = 'bayesFactor', title = gettextf("BF%1$s", "\u208B\u208A"), type = 'string') criterion <- options[["estimator"]] if(!options[["workflow"]] && options[["variableType"]] == "variableTypeCorrect" && type == "frequentist") criterion <- options[["estimator2"]] message <- base::switch(criterion, "poissonBound" = gettext("The confidence bound is calculated according to the <b>Poisson</b> distributon."), "binomialBound" = gettext("The confidence bound is calculated according to the <b>binomial</b> distributon."), "hyperBound" = gettext("The confidence bound is calculated according to the <b>hypergeometric</b> distribution."), "stringerBound" = gettext("The confidence bound is calculated according to the <b>Stringer</b> method."), "regressionBound" = gettext("The confidence bound is calculated according to the <b>regression</b> method."), "directBound" = gettext("The confidence bound is calculated according to the <b>direct</b> method."), "differenceBound" = gettext("The confidence bound is calculated according to the <b>difference</b> method."), "ratioBound" = gettext("The confidence bound is calculated according to the <b>ratio</b> method."), "betaBound" = gettext("The credible bound is calculated according to the <b>beta</b> distribution and requires the assumption that the sample taints are interchangeable."), "gammaBound" = gettext("The credible bound is calculated according to the <b>gamma</b> distribution and requires the assumption that the sample taints are interchangeable."), "betabinomialBound" = gettext("The credible bound is calculated according to the <b>beta-binomial</b> distribution and requires the assumption that the sample taints are interchangeable."), "coxAndSnellBound" = gettext("The credible bound is calculated according to the <b>Cox and Snell</b> method and requires the assumption that the population taints are uniformly distributed.")) if(options[["estimator"]] == "stringerBound" && options[["stringerBoundLtaAdjustment"]] && options[["variableType"]] == "variableTypeAuditValues") message <- gettext("The confidence bound is calculated according to the <b>Stringer</b> method with <b>LTA adjustment</b>.") evaluationTable$addFootnote(message) evaluationContainer[["evaluationTable"]] <- evaluationTable if(is.null(evaluationState) || (options[["auditResult"]] == "" && !options[["useSumStats"]])){ if(options[["workflow"]]){ evaluationTable$addFootnote(message = gettext("The audit result column is empty."), symbol = gettext("<b>Analysis not ready.</b>")) } else { evaluationTable$addFootnote(message = gettext("Either the materiality, the population size, or the population value is defined as zero, or one of the required variables is missing."), symbol = gettext("<b>Analysis not ready.</b>")) } return() } taintLabel <- round(evaluationState[["t"]], 2) if(type == "bayesian" && options[["areaUnderPosterior"]] == "displayCredibleInterval"){ credibleInterval <- .auditCalculateCredibleInterval(evaluationState) lowerBound <- credibleInterval[["lowerBound"]] upperBound <- credibleInterval[["upperBound"]] LowerBoundLabel <- paste0(round(lowerBound * 100, 3), "%") UpperBoundLabel <- paste0(round(upperBound * 100, 3), "%") row <- data.frame(materiality = planningOptions[["materialityLabel"]], sampleSize = evaluationState[["n"]], fullErrors = evaluationState[["k"]], totalTaint = taintLabel, lowerBound = LowerBoundLabel, upperBound = UpperBoundLabel) } else { boundLabel <- paste0(round(evaluationState[["confBound"]] * 100, 3), "%") row <- data.frame(materiality = planningOptions[["materialityLabel"]], sampleSize = evaluationState[["n"]], fullErrors = evaluationState[["k"]], totalTaint = taintLabel, bound = boundLabel) } if(options[["mostLikelyError"]]){ if(options[["variableType"]] == "variableTypeAuditValues" && options[["estimator"]] %in% c("directBound", "differenceBound", "ratioBound", "regressionBound")){ mle <- (planningOptions[["populationValue"]] - evaluationState[["pointEstimate"]]) / planningOptions[["populationValue"]] } else { if(type == "frequentist"){ mle <- evaluationState[["t"]] / evaluationState[["n"]] } else if(type == "bayesian"){ if(evaluationState[["t"]] == 0 && evaluationState[["kPrior"]] == 0){ mle <- 0 } else { if(evaluationState[["method"]] == "binomial") mle <- (1 + evaluationState[["kPrior"]] + evaluationState[["t"]] - 1) / (1 + evaluationState[["kPrior"]] + evaluationState[["t"]] + 1 + evaluationState[["nPrior"]] + evaluationState[["n"]] - evaluationState[["t"]] - 2) if(evaluationState[["method"]] == "poisson") mle <- (1 + evaluationState[["kPrior"]] + evaluationState[["t"]] - 1) / (evaluationState[["nPrior"]] + evaluationState[["n"]]) if(evaluationState[["method"]] == "hypergeometric") mle <- (1 + evaluationState[["kPrior"]] + evaluationState[["t"]] - 1) / (1 + evaluationState[["kPrior"]] + evaluationState[["t"]] + 1 + evaluationState[["nPrior"]] + evaluationState[["n"]] - evaluationState[["t"]] - 2) if(evaluationState[["method"]] == "coxsnell") mle <- evaluationState[["multiplicationFactor"]] * ( (evaluationState[["df1"]] - 2) / evaluationState[["df1"]] ) * ( evaluationState[["df2"]] / (evaluationState[["df2"]] + 2) ) } } } if(options[["materiality"]] == "materialityRelative"){ mleLabel <- paste0(round(mle * 100, 3), "%") } else if(options[["materiality"]] == "materialityAbsolute"){ mleLabel <- paste(planningOptions[["valuta"]], round(mle * planningOptions[["populationValue"]], 3)) } row <- cbind(row, mle = mleLabel) } if(options[["monetaryVariable"]] != ""){ if(type == "bayesian" && options[["areaUnderPosterior"]] == "displayCredibleInterval"){ lowerProjm <- round(lowerBound * planningOptions[["populationValue"]], 2) upperProjm <- round(upperBound * planningOptions[["populationValue"]], 2) lowerProjmLabl <- paste(planningOptions[["valuta"]], lowerProjm) upperProjmLabel <- paste(planningOptions[["valuta"]], upperProjm) row <- cbind(row, lowerProjm = lowerProjmLabl, upperProjm = upperProjmLabel) } else { projm <- round(evaluationState[["confBound"]] * planningOptions[["populationValue"]], 2) projmLabel <- paste(planningOptions[["valuta"]], projm) row <- cbind(row, projm = projmLabel) } } if(type == "bayesian" && (options[["evidenceRatio"]] || options[["bayesFactor"]])){ expResult <- .auditEvidenceRatio(planningOptions, evaluationState) if(options[["evidenceRatio"]]){ evidenceRatio <- round(expResult[["posteriorEvidenceRatio"]], 2) row <- cbind(row, evidenceRatio = evidenceRatio) } if(options[["bayesFactor"]]){ bayesFactor <- round(expResult[["shift"]], 2) row <- cbind(row, bayesFactor = bayesFactor) } } evaluationTable$addRows(row) if(options[["monetaryVariable"]] != "" && (planningOptions[["populationValue"]] == 0 || planningOptions[["populationValue"]] == 0.01)) evaluationTable$addFootnote(message = gettext("You must specify the population value to see the maximum misstatement."), symbol = " \u26A0", colNames = 'projm') } .auditEvaluationInformationPlot <- function(options, planningOptions, evaluationState, evaluationContainer, jaspResults, type, positionInContainer = 3){ if(!options[["evaluationInformation"]]) return() .updateFigNumber(jaspResults) if(is.null(evaluationContainer[["evaluationInformation"]])){ evaluationInformation <- createJaspPlot(plot = NULL, title = gettext("Evaluation Information"), width = 600, height = 300) evaluationInformation$position <- positionInContainer evaluationInformation$dependOn(options = "evaluationInformation") evaluationContainer[["evaluationInformation"]] <- evaluationInformation if(((options[["auditResult"]] == "" || options[["recordNumberVariable"]] == "") && !options[["useSumStats"]]) || (options[["useSumStats"]] && options[["nSumStats"]] == 0) || planningOptions[["materiality"]] == 0 || evaluationContainer$getError()) return() materiality <- evaluationState[["materiality"]] bound <- evaluationState[["confBound"]] if(options[["variableType"]] == "variableTypeAuditValues" && options[["estimator"]] %in% c("directBound", "differenceBound", "ratioBound", "regressionBound")){ mle <- (planningOptions[["populationValue"]] - evaluationState[["pointEstimate"]]) / planningOptions[["populationValue"]] } else { if(type == "frequentist"){ mle <- evaluationState[["t"]] / evaluationState[["n"]] } else if(type == "bayesian"){ if(evaluationState[["t"]] == 0 && evaluationState[["kPrior"]] == 0){ mle <- 0 } else { if(evaluationState[["method"]] == "binomial") mle <- (1 + evaluationState[["kPrior"]] + evaluationState[["t"]] - 1) / (1 + evaluationState[["kPrior"]] + evaluationState[["t"]] + 1 + evaluationState[["nPrior"]] + evaluationState[["n"]] - evaluationState[["t"]] - 2) if(evaluationState[["method"]] == "poisson") mle <- (1 + evaluationState[["kPrior"]] + evaluationState[["t"]] - 1) / (evaluationState[["nPrior"]] + evaluationState[["n"]]) if(evaluationState[["method"]] == "hypergeometric") mle <- (1 + evaluationState[["kPrior"]] + evaluationState[["t"]] - 1) / (1 + evaluationState[["kPrior"]] + evaluationState[["t"]] + 1 + evaluationState[["nPrior"]] + evaluationState[["n"]] - evaluationState[["t"]] - 2) if(evaluationState[["method"]] == "coxsnell") mle <- evaluationState[["multiplicationFactor"]] * ( (evaluationState[["df1"]] - 2) / evaluationState[["df1"]] ) * ( evaluationState[["df2"]] / (evaluationState[["df2"]] + 2) ) } } } label <- rev(c(gettext("Materiality"), gettext("Maximum error"), gettext("Most likely error"))) values <- rev(c(materiality, bound, mle)) if(options[["variableType"]] == "variableTypeAuditValues" && options[["materiality"]] == "materialityAbsolute") values <- values * planningOptions[["populationValue"]] boundColor <- ifelse(bound < materiality, yes = rgb(0, 1, .7, 1), no = rgb(1, 0, 0, 1)) fillUp <- rev(c("#1380A1", boundColor, "#1380A1")) yBreaks <- jaspGraphs::getPrettyAxisBreaks(c(0, values), min.n = 4) if(options[["variableType"]] == "variableTypeAuditValues" && options[["materiality"]] == "materialityAbsolute"){ x.labels <- format(jaspGraphs::getPrettyAxisBreaks( seq(0, 1.1 * max(values), length.out = 100), min.n = 4), scientific = FALSE) values.labels <- paste(planningOptions[["valuta"]], ceiling(values)) } else { x.labels <- paste0(round( jaspGraphs::getPrettyAxisBreaks(seq(0, 1.1 * max(values), length.out = 100), min.n = 4) * 100, 4), "%") values.labels <- paste0(round(values * 100, 2), "%") } plotData <- data.frame(x = label, y = values) plotData$x <- factor(plotData$x, levels = plotData$x) yLimits <- c(0, 1.1 * max(values)) yBreaks <- jaspGraphs::getPrettyAxisBreaks(seq(0, 1.1 * max(values), length.out = 100), min.n = 4) if(mle < 0 || bound < 0){ # Here we adjust the axes if the mle turns out to be negative yBreaks <- jaspGraphs::getPrettyAxisBreaks(seq(min(values), 1.1 * max(values), length.out = 100), min.n = 4) x.labels <- format(jaspGraphs::getPrettyAxisBreaks(seq(min(values), 1.1 * max(values), length.out = 100), min.n = 4), scientific = FALSE) yLimits <- c(min(values), 1.1 * max(values)) } p <- ggplot2::ggplot(data = plotData, mapping = ggplot2::aes(x = x, y = y)) + ggplot2::geom_bar(stat = "identity", col = "black", size = 1, fill = fillUp) + ggplot2::coord_flip() + ggplot2::xlab(NULL) + ggplot2::ylab(NULL) + ggplot2::annotate("text", y = values, x = c(1, 2, 3), label = values.labels, size = 6, vjust = 0.5, hjust = -0.3) + ggplot2::scale_y_continuous(breaks = yBreaks, limits = yLimits, labels = x.labels) myTheme <- ggplot2::theme(axis.ticks.x = ggplot2::element_blank(), axis.ticks.y = ggplot2::element_blank(), axis.text.y = ggplot2::element_text(hjust = 0), panel.grid.major.x = ggplot2::element_line(color = "#cbcbcb", size = 0.5)) p <- jaspGraphs::themeJasp(p, sides = "") + myTheme evaluationInformation$plotObject <- p } if(options[["explanatoryText"]]){ evaluationInformationText <- createJaspHtml(gettextf("<b>Figure %1$i.</b> Evaluation information for the current annotated selection. The materiality is compared with the maximum misstatement and the most likely error. The most likely error (MLE) is an estimate of the true misstatement in the population. The maximum error is an estimate of the maximum error in the population.", jaspResults[["figNumber"]]$object), "p") evaluationInformationText$position <- positionInContainer + 1 evaluationInformationText$dependOn(optionsFromObject = evaluationContainer[["evaluationInformation"]]) evaluationInformationText$dependOn(options = "explanatoryText") evaluationContainer[["evaluationInformationText"]] <- evaluationInformationText } } .auditCorrelationPlotAddLine <- function(fit, plot = NULL, line = FALSE, xMin, xMax, lwd) { # create function formula f <- vector("character", 0) for (i in seq_along(coef(fit))) { if (i == 1) { temp <- paste(coef(fit)[[i]]) f <- paste(f, temp, sep="") } if (i > 1) { temp <- paste("(", coef(fit)[[i]], ")*", "x^", i-1, sep="") f <- paste(f, temp, sep="+") } } x <- seq(xMin, xMax, length.out = 100) predY <- eval(parse(text=f)) if (line == FALSE) { return(predY) } if (line) { plot <- plot + ggplot2::geom_line(data = data.frame(x, predY), mapping = ggplot2::aes(x = x, y = predY), size=lwd, lty = 1) return(plot) } } .auditCorrelationPlot <- function(options, planningOptions, sample, evaluationContainer, jaspResults, positionInContainer){ if(!options[["correlationPlot"]]) return() .updateFigNumber(jaspResults) if(is.null(evaluationContainer[["correlationPlot"]])){ correlationPlot <- createJaspPlot(plot = NULL, title = gettext("Correlation Plot"), width = 500, height = 400) correlationPlot$position <- positionInContainer correlationPlot$dependOn(options = c("correlationPlot", "valuta")) evaluationContainer[["correlationPlot"]] <- correlationPlot if(options[["auditResult"]] == "" || evaluationContainer$getError()) return() plotData <- data.frame(x = sample[,.v(options[["monetaryVariable"]])], y = sample[,.v(options[["auditResult"]])]) plotData <- na.omit(plotData) corResult <- cor(x = plotData[["x"]], y = plotData[["y"]], method = "pearson") xVar <- plotData[["x"]] yVar <- plotData[["y"]] fit <- vector("list", 1) fit[[1]] <- lm(y ~ poly(x, 1, raw = TRUE), data = plotData) bestModel <- 1 # which.min(Bic) # format x labels xlow <- min(pretty(xVar)) xhigh <- max(pretty(xVar)) xticks <- pretty(c(xlow, xhigh)) xLabs <- vector("character", length(xticks)) xLabs <- format(xticks, digits = 3, scientific = FALSE) # Format y labels yticks <- xticks yLabs <- vector("character", length(yticks)) yLabs <- format(yticks, digits = 3, scientific = FALSE) corResult <- round(corResult, 3) cols <- rep("gray", nrow(plotData)) cols[which(plotData$x != plotData$y)] <- rgb(0.9, 0, 0, 1) p <- ggplot2::ggplot(data = plotData, mapping = ggplot2::aes(x = x, y = y)) + ggplot2::scale_x_continuous(name = gettextf("Book values (%1$s)", planningOptions[["valuta"]]), breaks = xticks, labels = xLabs) + ggplot2::scale_y_continuous(name = gettextf("Audit values (%1$s)", planningOptions[["valuta"]]), breaks = yticks, labels = yLabs) + jaspGraphs::geom_point(size = 3, fill = cols) p <- .auditCorrelationPlotAddLine(fit = fit[[bestModel]], plot = p, line = TRUE, xMin= xticks[1], xMax = xticks[length(xticks)], lwd = 1) p <- p + ggplot2::annotate("text", x = xticks[1], y = (yticks[length(yticks)] - ((yticks[length(yticks)] - yticks[length(yticks) - 1]) / 2)), label = paste0("italic(r) == ", corResult), size = 8, parse = TRUE, hjust = -0.5, vjust = 0.5) myTheme <- ggplot2::theme(panel.grid.major.x = ggplot2::element_line(color = "#cbcbcb", size = 0.5), panel.grid.major.y = ggplot2::element_line(color = "#cbcbcb", size = 0.5)) p <- jaspGraphs::themeJasp(p) + myTheme correlationPlot$plotObject <- p } if(options[["explanatoryText"]]){ correLationPlotText <- createJaspHtml(gettextf("<b>Figure %1$i.</b> Scatterplot of the book values in the selection and their audit values. Red dots indicate observations that did not match their original book value. If these red dots lie in the bottom part of the graph, the book values are overstated. If these red dots lie in the upper part of the graph, they are understated. The value <i>r</i> is the Pearson correlation coefficient of the book values and the audit values, an indicator of the strength of the linear relationship between the two variables.", jaspResults[["figNumber"]]$object), "p") correLationPlotText$position <- positionInContainer + 1 correLationPlotText$dependOn(optionsFromObject = evaluationContainer[["correlationPlot"]]) correLationPlotText$dependOn(options = "explanatoryText") evaluationContainer[["correLationPlotText"]] <- correLationPlotText } } ################################################################################ ################## End functions ############################################### ################################################################################
1d8e57d0ed8d72d3370b876f6ee09c337b4edaa3
6aa2d006ebc00e835470ea883725a30e9e4c17ec
/run_analysis.R
f339d3e4206309e4745259e605157a4821406258
[]
no_license
yashkhaitan99/Getting-and-Cleaning-Data-Assignment
b93e36984db5885b89fbca5b2f7abaf06d839340
c6be7de9b0ba034de8a03f926ba9e6867a3b4084
refs/heads/master
2022-11-19T16:09:16.232704
2020-07-23T08:57:11
2020-07-23T08:57:11
281,896,654
0
1
null
null
null
null
UTF-8
R
false
false
1,718
r
run_analysis.R
library(dplyr) # test data setwd("C:/Users/DEL/Downloads") Test_X<- read.table("./UCI HAR Dataset/test/X_test.txt") Test_Y<- read.table("./UCI HAR Dataset/test/Y_test.txt") Text_sub<- read.table("./UCI HAR Dataset/test/subject_test.txt") #Train data Train_X<- read.table("./UCI HAR Dataset/train/X_train.txt") Train_Y<- read.table("./UCI HAR Dataset/train/Y_train.txt") Train_sub<- read.table("./UCI HAR Dataset/train/subject_train.txt") # data descriptions features.list<- read.table("./UCI HAR Dataset/features.txt") # activity labels activity_label<- read.table("./UCI HAR Dataset/activity_labels.txt") #Merges the training and the test sets to create one data set. Total_x<- rbind(Test_X,Train_X) Total_Y<- rbind(Test_Y,Train_Y) Total_sub<- rbind(Text_sub,Train_sub) #Extracts only the measurements on the mean and standard deviation for each measurement. req_col<- grep("mean|std",features.list$V2) req_X<- Total_x[,req_col] # Uses descriptive activity names to name the activities in the data set and label the dataset colnames(Total_Y)<- "activity" Total_Y$activity <- factor(Total_Y$activity,labels = activity_label[,2]) colnames(Total_sub)<- "Subject" Total_sub$Subject<- as.factor(Total_sub$Subject) colnames(req_X)<-(features.list[req_col,2]) colnames(Total_sub)<- "Subject" #From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject. class(total_grp) str(total_grp) total<- cbind(Total_Y,Total_sub,req_X) total_grp<- group_by(total,activity,Subject) total_grp total_mean<- summarize_each(total_grp,funs(mean)) read.table(total_mean,"./UCI HAR Dataset/final dataset.txt",row.names = F)
b501f11f9684dc4acbfdd86934d496110543269f
1fdd33233f2416a926b903e5494d278ebb208a37
/plot1.R
09969b3c23cd675e8e1994b43cf8b184fd3d4fcf
[]
no_license
boizette/ExData_Plotting1
032aed7ea0ee22e3fe6b9c531486c67459609fad
0906feaaea37928186a5ab8a07d0842928238ee0
refs/heads/master
2021-05-09T13:51:15.143171
2018-02-02T14:36:06
2018-02-02T14:36:06
119,046,718
0
0
null
2018-01-26T12:00:17
2018-01-26T12:00:16
null
ISO-8859-1
R
false
false
1,272
r
plot1.R
plot1<-function(){ ##Initialisations diverses ##install.packages("data.table") library(data.table) ##Récupération des données f_url<-"https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip" temp<-tempfile() download.file(f_url,temp) unzip(temp,"household_power_consumption.txt") ColNames=c("Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3") fic<-read.table("household_power_consumption.txt",stringsAsFactors=FALSE,header=TRUE,col.names=ColNames,sep=";" ) ##Filtrage des lignes relatives aux deux premiers jours de février 2007 gic<-fic[between(as.Date(fic$Date,"%d/%m/%Y"),"2007-02-01","2007-02-02",incbounds = TRUE), ] ##Mise en conformité des données en abscisse G_A_POW<-as.numeric(gic$Global_active_power) ##Traçage graphique par(mfrow=c(1,1),mar=c(4,4,1,1)) hist(G_A_POW,main="Globale Active Power",xlab="Globale Active Power (kilowatts)",yaxt="n",col="red") ##Modification axe des y ytick<-seq(0,1200,by=200) axis(side=2,at=ytick,labels=FALSE) text(par("usr")[1],ytick,labels=ytick,pos=2,xpd=TRUE,xpd=TRUE,col="black") ##Sauvegarde graphe dans le fichier PLOT1.PNG dev.copy(png,file="plot1.png",width=480,height=480) dev.off() }
36ea754c05dac35e54bc34812e4cf22f1dd3022f
2d45d4f3d5dbc688c36c9f6493ccca1d3ec303dc
/src/inactive/explore_phistats.R
0825026f46e45a5a07c5ecbaf6520f15c6ae26ab
[]
no_license
TomHarrop/stacks-asw
182aae84eb4b619e6bde285b55e30d4c91cbd232
8a4f8d79a64b4ce0f27385596489cc876e15bde2
refs/heads/master
2021-05-06T09:05:21.766140
2020-08-04T02:15:33
2020-08-04T02:15:33
114,046,714
2
1
null
2020-05-22T05:39:54
2017-12-12T22:40:38
R
UTF-8
R
false
false
3,544
r
explore_phistats.R
library(data.table) library(ggplot2) fai_file <- "output/005_ref/ref.fasta.fai" phistats_file <- "output/070_populations/ns/populations.phistats.tsv" phistats <- fread(phistats_file, skip = 3) fai_names <- c("Chr", "chr_length") fai <- fread(fai_file, select = 1:2, col.names = fai_names) n_to_label <- 20 # chromosome coordinates chr_coords <- copy(fai) setorder(chr_coords, -chr_length, Chr) chr_coords[, chr_end := cumsum(chr_length)] chr_coords[, chr_start := chr_end - chr_length + 1] chr_coords[, lab_pos := chr_start + round(mean(c(chr_start, chr_end)), 0), by = Chr] pos_to_label = chr_coords[, seq(1, max(chr_end), length.out = n_to_label)] label_positions <- sapply(pos_to_label, function(x) chr_coords[, .I[which.min(abs(x - lab_pos))]]) chr_coords[label_positions, x_lab := Chr] chr_coords[is.na(x_lab), x_lab := ""] # homemade manhattan plot phistats_with_len <- merge(phistats, chr_coords, by = "Chr") setorder(phistats_with_len, -chr_length, Chr, BP) phistats_with_len[, bp_coord := BP + chr_start - 1] # pick out the outliers q99 <- phistats_with_len[, quantile(`phi_st`, 0.99)] phistats_with_len[`phi_st` > q99, outlier := TRUE] phistats_with_len[outlier == TRUE, point_colour := Chr] phistats_with_len[is.na(outlier), point_colour := NA] # order the contigs phistats_with_len[ , point_colour := factor(point_colour, levels = unique(gtools::mixedsort(point_colour, na.last = TRUE)))] ggplot() + theme_minimal() + theme(axis.text.x = element_text(angle = 30, hjust = 1, vjust = 1), axis.ticks.x = element_blank(), axis.ticks.length.x = unit(0, "mm"), panel.grid.major.x = element_blank(), panel.grid.minor.x = element_blank()) + scale_x_continuous(breaks = chr_coords[, lab_pos], labels = chr_coords[, x_lab]) + scale_colour_viridis_d() + geom_hline(yintercept = q99) + geom_point(mapping = aes(x = bp_coord, y = `phi_st`), data = phistats_with_len[is.na(point_colour)]) + geom_point(mapping = aes(x = bp_coord, y = `phi_st`, colour = point_colour), data = phistats_with_len[!is.na(point_colour)]) d99 <- phistats_with_len[, quantile(`Smoothed D_est`, 0.99)] ggplot(phistats_with_len, aes(x = bp_coord, y = `Smoothed D_est`)) + theme(axis.text.x = element_text(angle = 30, hjust = 1, vjust = 1), axis.ticks.x = element_blank(), axis.ticks.length.x = unit(0, "mm")) + scale_x_continuous(breaks = chr_coords[, lab_pos], labels = chr_coords[, x_lab]) + geom_hline(yintercept = d99) + geom_point() # distance between markers phistats_with_len[, prev_loc := c(0, bp_coord[-.N])] phistats_with_len[, distance_from_prev := bp_coord - prev_loc] phistats_with_len[, summary(distance_from_prev)] plot_contig <- fai[3, Chr] x_lim <- fai[Chr == plot_contig, c(0, chr_length)] plot_dt <- phistats[Chr == plot_contig] ggplot(plot_dt, aes(x = BP, y = phi_st)) + xlim(x_lim) + geom_path() phi_with_len <- merge(phistats, fai, by = "Chr") x <- phi_with_len[, .(mean(phi_st), .N), by = .(Chr, chr_length)] x[, distance_bw_markers := chr_length / N] ggplot(x, aes(y = distance_bw_markers, x = Chr)) + geom_point() ggplot(x, aes(y = N, x = V2)) + geom_point() ggplot(x[N>5], aes(y = distance_bw_markers, x = V2)) + geom_point() x[N>5]
33c1092762bb71678fdeedeb6815ec4a05e4862e
3fbb10838b85c77c0d358fbcf011ef557c1c9d6b
/MechaCarChallenge.R
76106b7305f00217ef3fe39d7a767e583cb686d5
[]
no_license
PGL50/MechaCar_Statistical_Analysis
efef728111bf522823e74a2d012130c12bd87c49
8dd7a011e3a97fef8b042cbc4ad0ac1581fbc087
refs/heads/main
2023-08-14T07:57:48.065032
2021-10-10T12:11:24
2021-10-10T12:11:24
413,221,241
0
0
null
null
null
null
UTF-8
R
false
false
2,440
r
MechaCarChallenge.R
library(dplyr) library(ggplot2) ## Deliverable 1 #file1 <- file.choose() #mecha_table <- read.csv(file=file1,check.names=F,stringsAsFactors = F) mecha_table <- read.csv(file='MechaCar_mpg.csv',check.names=F,stringsAsFactors = F) #Multiple regression lm(mpg ~ vehicle_length + vehicle_weight + spoiler_angle + ground_clearance + AWD,data=mecha_table) #generate multiple linear regression model summary(lm(mpg ~ vehicle_length + vehicle_weight + spoiler_angle + ground_clearance + AWD,data=mecha_table)) #generate summary statistics # Check for normality ggplot(mecha_table,aes(x=vehicle_length)) + geom_density() + #visualize distribution using density plot ggtitle('Vehicle Length') shapiro.test(mecha_table$vehicle_length) ggplot(mecha_table,aes(x=ground_clearance)) + geom_density() + ggtitle('Ground Clearance') shapiro.test(mecha_table$ground_clearance) # Plot relationships with significant vars and mpg ggplot(mecha_table, aes(x=vehicle_length, y=mpg)) + ggtitle('MPG by Vehicle Length') + geom_point(size=2) ggplot(mecha_table, aes(x=ground_clearance, y=mpg)) + ggtitle('MPG by Ground Clearance') + geom_point(size=2) # Deliverable 2 coil_table <- read.csv(file='Suspension_Coil.csv',check.names=F,stringsAsFactors = F) total_summary <- coil_table %>% summarize(Mean =mean(PSI),Median=median(PSI),Variance=var(PSI),SD=sd(PSI)) #create summary table lot_summary <- coil_table %>% group_by(Manufacturing_Lot) %>% summarize(Mean =mean(PSI),Median=median(PSI),Variance=var(PSI),SD=sd(PSI)) plt <- ggplot(coil_table,aes(x=Manufacturing_Lot,y=PSI)) #import dataset into ggplot2 plt + geom_boxplot() + theme(axis.text.x=element_text(hjust=1)) #add boxplot and rotate x-axis labels 45 degrees # Deliverable 3 # One-sample t-test # Ho = (mean PSI = 1500) res <- t.test(coil_table$PSI,alternative = "two.sided", mu = 1500) # Printing the results res # subset the data for each lot lot1 <- subset(coil_table, Manufacturing_Lot == 'Lot1') lot2 <- subset(coil_table, Manufacturing_Lot == 'Lot2') lot3 <- subset(coil_table, Manufacturing_Lot == 'Lot3') # One-sample t-test for each lot # Ho = (mean PSI = 1500) # One sample t tests for each lot res1 <- t.test(lot1$PSI,alternative = "two.sided", mu = 1500) # Printing the results res1 res2 <- t.test(lot2$PSI,alternative = "two.sided", mu = 1500) # Printing the results res2 res3 <- t.test(lot3$PSI,alternative = "two.sided", mu = 1500) # Printing the results res3
cd92f95a3a7bb08f3f82a9e3fcd6ced473a1094b
e60d4eef1860c4dc76c245668c168b2127758c0c
/cachematrix.R
2b8266d4de9f0c1bf394cd773aa87be41c4ffbd4
[]
no_license
supwar/ProgrammingAssignment2
0063432c4fd3f8edf217b140f3ae8ab804695f60
f0213dc5157f2edcd3f34ee96e92ac223ce5271f
refs/heads/master
2021-01-22T11:24:03.571105
2015-06-20T00:00:50
2015-06-20T00:00:50
37,750,561
0
0
null
2015-06-19T23:24:47
2015-06-19T23:24:46
null
UTF-8
R
false
false
1,630
r
cachematrix.R
## The following functions can be used to create a sort of caching object ## that can be used to hold both a matrix and its inverse. Additionally, ## it provides a method that can intelligenly leverage the cache to ensure ## that the inverse is only calculated once. As of yet, ## there has been no work to ensure that the matrix is actually invertible. ## This function creates a list containing a set of functions to ## get/set a matrix and its inverse. ## An example usage is: cached = makeCacheMatrix(matrix(c(1, 2, 7, 8), 2, 2, byrow = TRUE)) makeCacheMatrix <- function(x = matrix()) { inv = NULL set = function(y){ x <<- y inv = NULL } get = function() x setInverse = function(inverse) inv <<- inverse getInverse = function() inv list(set = set, get = get, setInverse = setInverse, getInverse = getInverse) } ## This function can be used to retrieve the inverse of a matrix ## that has been stored in an object that was created by the ## 'makeCacheMatrix' function above. If the inverse has already ## been calcuated, then the previous value will be used instead of ## computing the inverse again. ## An example usage is (based on example from above): cacheSolve(cached) ## Subsequent executions of 'cacheSolve' with the same object will automatically retrieve the cached value. cacheSolve <- function(x, ...) { inv = x$getInverse() if(!is.null(inv)){ message("getting cached data") return (inv) } data = x$get() inv = solve(data, ...) x$setInverse(inv) inv }
3d5f320806b0a71fdd15ca80a24420a41ba198e0
c6cb7d70e0771c124475eae37b7bfed1d3462f7f
/man/ReadGeoClass.Rd
ef753adb6d78f9f140acdf8e27af785418ef7339
[]
no_license
pslota/HYPEtools
6430997c9b7abd7e0b27c9ea197bcc749b5fcb04
5bb0c6c31641aeaec6cb1a23185bc61de6cab8bc
refs/heads/master
2021-07-24T06:33:48.648824
2017-11-05T17:28:11
2017-11-05T17:28:11
null
0
0
null
null
null
null
UTF-8
R
false
true
1,298
rd
ReadGeoClass.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/functioncollection_import.R \name{ReadGeoClass} \alias{ReadGeoClass} \title{Read a 'GeoClass.txt' File} \usage{ ReadGeoClass(filename = "GeoClass.txt", headrow) } \arguments{ \item{filename}{Path to and file name of the GeoClass file to import. Windows users: Note that Paths are separated by '/', not '\\'.} \item{headrow}{Row number with header information. HYPE allows an unlimited number of comment rows above the actual file content, and the last comment row usually contains some header information. See Details.} } \value{ \code{ReadGeoClass} returns a data frame with added attribute 'comment'. } \description{ This is a convenience wrapper function to import a GeoClass file as data frame into R. GeoClass files contain definitions of SLC (\bold{S}oil and \bold{L}and use \bold{C}lass) classes. } \details{ \code{ReadGeoClass} is a convenience wrapper function of \code{\link{read.table}}, with treatment of leading comment rows and a column header. Comment rows are imported as strings in \code{attribute} 'comment'. HYPE column headers are converted during import to eliminate invalid characters (e.g. '-') and saved to \code{attribute} 'header'. } \examples{ \dontrun{ReadGeoClass("Geoclass.txt")} }
a6fb122d4bde80f041d3e087532d4f958f02916c
1b274c4a9fc98a7f2e25b46c8ac213341a01f059
/skrypty/modele/svm_rad_ens2.R
101dc5b5087dfe9f24c71ca461e6e088d5be7b5b
[]
no_license
kubasmolik/magisterka
d8d3451db31a100df52f5e51fb7b099347e1d387
161f7976ad1c66b1d2a2ac1c2f6f981515c81646
refs/heads/master
2021-05-04T01:43:38.673922
2018-02-05T21:59:01
2018-02-05T21:59:01
120,362,429
0
0
null
null
null
null
UTF-8
R
false
false
3,018
r
svm_rad_ens2.R
#### ENSEMBLER 1 #### #### svm_rad_ens2 #### df <- train_clean %>% select(., default, one_of(zmienne_ens2_woe)) sigma_est <- kernlab::sigest(default~., data = df) sigma_est[[1]] parametry <- expand.grid( sigma = sigma_est[[1]], C = c(2^(seq(-1, 4))) # C = c(4) ) cl <- makePSOCKcluster(detectCores() - 1, outfile = "") registerDoParallel(cl) set.seed(1993) svm_rad_ens2 <- caret::train(default ~ ., data = df, method = "svmRadial", metric = "AUPRC", trControl = ctrl_svm, tuneGrid = parametry #tuneLength = 10 ) stopCluster(cl) svm_rad_ens2 svm_rad_ens2$results plot(svm_rad_ens2) svm_rad_ens2$results[which.max(svm_rad_ens2$results$AUPRC), ] svm_rad_ens2$bestTune # ocena na zbiorze treningowym train_eval <- ocena_train(svm_rad_ens2) train_eval$metryki train_eval$parametry # ocena na zbiorze testowym df <- test %>% select(., default, one_of(zmienne_ens2_woe)) test_eval <- ocena_test(zb_testowy = df, model = svm_rad_ens2, nazwa_modelu = "svm_rad_ens2") test_eval$ocena_1 test_eval$ocena_2 test_eval$gestosc test_eval$krzywa_roc test_eval$krzywa_pr # ocena na bootstrapowych probach boot_eval <- wyniki_bootstrap(df = df, model = svm_rad_ens2, proby_boot = bootstrap, nazwa_modelu = "svm_rad_ens2") boot_eval$wyniki_df boot_eval$wykres_boot # zapis danych do raportu save(train_eval, test_eval, boot_eval, file = "./dane/raporty modeli/model.RData") # generowanie raportu rmarkdown::render("./skrypty/raport_modelu.Rmd", output_file = "svm_rad_ens2.html", output_dir = "./dane/raporty modeli", params = list(dynamictitle = "Raport dla modelu svm_rad_ens2", dynamicdate = Sys.Date()) ) svm_rad_ens2_eval_boot <- boot_eval$wyniki_df # generuje prognoze #df <- test %>% select(., default, one_of(zm_numeric), ends_with("woe")) svm_rad_ens2_prognoza <- predict(svm_rad_ens2, df, type = "prob") svm_rad_ens2_prognoza$obs <- df$default svm_rad_ens2_prognoza$pred <- forcats::as_factor( ifelse(svm_rad_ens2_prognoza$default >= 0.5, "default", "no_default") ) svm_rad_ens2_prognoza$pred <- relevel(svm_rad_ens2_prognoza$pred, ref = "default") # wyciagniecie prognoz modelu na zbiorze treningowym svm_rad_ens2_train <- svm_rad_ens2$pred %>% dplyr::select(., rowIndex, default) %>% dplyr::rename( svm_rad_ens2 = default) # zapis gotowego modelu save(svm_rad_ens2, file = "./dane/modele/svm_rad_ens2.RData") # zapis prognoz na zbiorze treningowym save(svm_rad_ens2_train, file = "./dane/modele/svm_rad_ens2_train.RData") # zapis prognozy i oceny bootstrapowej save(svm_rad_ens2_eval_boot, svm_rad_ens2_prognoza, file = "./dane/modele/svm_rad_ens2_ocena.RData") # czyszczenie srodowiska rm(svm_rad_ens2, svm_rad_ens2_eval_boot, svm_rad_ens2_prognoza, train_eval, test_eval, boot_eval, cl, df)
e2f3f7aa1d6dec1e77c7da5a3d23040bb5c5bc8e
ff2b55f75a9802b8de761eee6a353f9b8058b08c
/R/data-ADPIRA.R
4bc2db64f444bfa3282fb9591f22585f32e67ec0
[]
no_license
FCACollin/rpack_pira
426d344de5eb426240608aba725a8c328b8779e1
852db8a6175fe2126202b3316ba576d47ecf163c
refs/heads/master
2023-03-08T15:58:50.987587
2021-02-20T18:22:15
2021-02-20T18:22:15
287,980,600
0
1
null
null
null
null
UTF-8
R
false
false
308
r
data-ADPIRA.R
#' ADPIRA #' #' Analysis Dataset for the study of Progression Independent of Relapse Activity #' in the Polish RRMS patient population. #' #' @source UMB. #' @format Information about the fornat #' @examples #' attr(ADPIRA, "metadata") #' sapply(ADPIRA, attr, which = "label") #' head(ADPIRA) #' "ADPIRA"
79049c5ad00cab56f718b712052a5b0000485cec
ec4d63a43632045cd00f05861c9dc01bfc03f4d5
/EnzymeAssay/man/enzyme_assay.calc_slope.Rd
a6b2f8f2c56fafeb7262e59d30b1954d61427675
[]
no_license
alisandra/enzyme_assay
9cfec8e9af9d728a0551e285cec9d468169d34c1
4ebf526525fbb75106c45ad63bd0014caf1c05e8
refs/heads/master
2021-01-01T19:30:33.441556
2015-04-07T21:02:49
2015-04-07T21:02:49
33,488,409
0
0
null
null
null
null
UTF-8
R
false
false
514
rd
enzyme_assay.calc_slope.Rd
% Generated by roxygen2 (4.1.0.9001): do not edit by hand % Please edit documentation in R/enzyme_assayClass.R \name{enzyme_assay.calc_slope} \alias{enzyme_assay.calc_slope} \title{Getting Slope from Range} \usage{ enzyme_assay.calc_slope(object) } \arguments{ \item{object}{An object of the Enzyme_Assay class} } \description{ This internal function calculates the slopes associated with curated ranges } \examples{ ea <- enzyme_assay(enzyme_assay.data, enzyme_assay.time) eaCurated <- enzyme_assay.curate(ea) }
209e546fd99c16bf77ee1814e5ae035434827331
dbfa1ca787588ef4748e98623918ec2d6cea3b4f
/Week 2/OutliersInBaseVsggplot.R
bc28f2aaaf6236e955bcc8492f97f05daa8e21c9
[]
no_license
saurish/EDA-Course-Code
c7ccc70cbf8ed9912f2836031698467962f81cf0
d089c62b290bf6e826be9859f52a6bc52df31991
refs/heads/master
2020-04-07T14:45:23.100269
2019-05-12T06:26:25
2019-05-12T06:26:25
158,459,881
0
0
null
null
null
null
UTF-8
R
false
false
195
r
OutliersInBaseVsggplot.R
testdat <- data.frame(x = 1:100, y = rnorm(100)) testdat[50,2] <- 100 ## Outlier! #plot(testdat$x, testdat$y, type = "l", ylim = c(-3,3)) g <- ggplot(testdat, aes(x = x, y = y)) g + geom_line()
4915aae02474661cb18d60af77eb8d451c2d28c8
e2582f3945f61a570200925be1ee43a68b834d73
/r_scripts/gpomo_all_var_observed.R
90a0873a6ab3bda07b06b26167ec62284f0c9add
[]
no_license
anastasiabzv/L-ODEfind
9dfb2660796b53e471593b35c8661ddd8c364d76
4be0573370727228b6c6115308b6d9169cb89f6f
refs/heads/master
2023-03-31T20:31:43.582165
2020-12-22T13:30:39
2020-12-22T13:30:39
null
0
0
null
null
null
null
UTF-8
R
false
false
3,834
r
gpomo_all_var_observed.R
require("GPoM") mse <- function(x, y){ means = colMeans(y) sds = apply(y,2,sd) xnorm = scale(x, center = means, scale = sds) ynorm = scale(y, center = means, scale = sds) return(mean((xnorm-ynorm)^2)) } fit_gpomo <- function(time, ladata, max_time_derivative, poly_degree, steps){ # output: coefficients of best gpomo model as the minimum mse metric in train prediction series. n_vars = dim(ladata)[2] out <- gPoMo(data =ladata, tin =time, dMax =poly_degree, nS=rep(max_time_derivative, n_vars), show =0, IstepMin =10, IstepMax =steps, nPmin=0, nPmax=Inf, method ='rk4') # calculate mse for all the tested models loss <- list() for(model_name in names(out$stockoutreg)){ model_data = out$stockoutreg[[model_name]][,-1] data_posta = ladata # TODO: filtrar con el tiempo y no quedarse con los priemros como ahora if (any(is.na(model_data)) || dim(model_data)[1]<dim(data_posta)[1]){ loss[[model_name]] <- Inf }else{ colnames(model_data) = colnames(data_posta) model_data = model_data[seq(length(time)),] loss[[model_name]] <- mse(model_data, data_posta) } } # return the coefficients of the model with the minimum value of mse # -> matrix coefs <- out$models[[names(loss)[which.min(loss)]]] colnames(coefs) <- rev(poLabs(nVar=max_time_derivative*n_vars, dMax = 1)[-1]) rownames(coefs) <- poLabs(nVar=max_time_derivative*n_vars, dMax = poly_degree) return(coefs) } experiment <- function(read_data_folder, write_data_folder, steps_list, max_time_derivative, poly_degree){ print(read_data_folder) dir.create(file.path(write_data_folder), showWarnings = TRUE) # read data fit_time_list <- list() for(filename in list.files(path=read_data_folder, full.names=FALSE, recursive=FALSE)){ if (grepl('solution', filename, fixed = TRUE)){ print(filename) ladata <- read.csv(paste(read_data_folder, filename, sep='/')) time <- ladata[,1] ladata <- ladata[,-1] #fit data and time it for(steps in steps_list){ print(paste('Doing steps:', steps)) t0 <- Sys.time() coefs <- fit_gpomo(ladata=ladata, time=time, max_time_derivative=max_time_derivative, poly_degree=poly_degree, steps=steps) initcond = unlist(strsplit(unlist(strsplit(filename, "init_cond_", fixed = TRUE))[2], ".csv", fixed=TRUE))[1] params = unlist(strsplit(unlist(strsplit(filename, "params_", fixed = TRUE))[2], "_", fixed=TRUE))[1] out_filename <- paste0('solution-gpomo_coefs-vars_x_y_z-dmax_',max_time_derivative,'-poly_',poly_degree,'-params_',params,'-init_cond_',initcond,'-steps_',steps,'.csv') fit_time_list[[out_filename]] <- difftime(Sys.time(), t0, units = "secs") print(paste('Time:',fit_time_list[[out_filename]])) write.csv(fit_time_list, paste0(write_data_folder, 'times.csv')) print('Saving coefs.') write.csv(coefs, paste0(write_data_folder, out_filename)) } } } } args <- commandArgs(trailingOnly = TRUE) print(args) experiment( read_data_folder = args[1], write_data_folder =args[2], #steps_list=c(40,640,1280,5120,10240), steps_list = c(args[3]), max_time_derivative=1, poly_degree=2 )
22336dc32ef60a442fb03e7d6f80004040fb6db0
c15d71d1683916a47c1e9a6509fe26525df22753
/R/queryCounts.R
e4e54d2746e27d53e0aa7645afde303590597031
[ "MIT" ]
permissive
LZhang98/nsSNPHotspotViewer
07574a48f579d2030721673b8fc2dfe6e8b11700
1f61b3beab5d002ac12f56ef9ef1e181c6b1adc5
refs/heads/master
2023-01-29T19:35:55.921753
2020-12-10T04:15:17
2020-12-10T04:15:17
307,894,766
0
0
null
null
null
null
UTF-8
R
false
false
2,015
r
queryCounts.R
#' Query dataset for top genes #' #' Filter and sort the given dataset (must be of counts, output by \code{getSNPCounts}) based on user- #' defined parameters. #' #' @param countsData A dataframe. Output of \code{getSNPCounts}. #' @param minLength The minimum length of genes to filter by. Default is 0 (removes nothing). #' @param minCounts The minimum number of observed SNPs to filter by. Default is 0 (removes nothing). #' @param sortBy A string that decides the sorting setting of the output. If none of the below arguments #' are used, use default sorting. Potentional arguments: #' \itemize{ #' \item \code{counts} - The default argument. Sort by decreasing SNP counts. #' \item \code{length} - Sort by decreasing gene length #' \item \code{none} - No sorting is performed, and the subset is returned as is. #' } #' #' @return A subset of the countsData data frame that satisfies the user inputs. #' #' @examples #' countsData <- getSNPCounts(snpPositions) #' topHits1 <- queryCounts(countsData, 10000, 150) #' topHits2 <- queryCounts(countsData, 10000, 150, sortBy="length") #' topHits3 <- queryCounts(countsData, 10000, 150, sortBy="asdf") #' #' @export queryCounts <- function(countsData, minLength=0, minCounts=0, sortBy="counts") { # Check user inputs. Non-fatal, so simply a warning is passed. if (!(sortBy %in% c("counts", "length", "none"))) { sortBy <- "counts" warning("Invalid sortBy argument. Default used.") } # Produce subset qData <- countsData[countsData$length>=minLength & countsData$counts>=minCounts, , ] # No sorting done if (sortBy == "none") { return(qData) } # Sort by counts if (sortBy=="counts") { qData <- qData[order(qData$counts, decreasing=TRUE), ,] return(qData) } # Sort by length if (sortBy=="length") { qData <- qData[order(qData$length, decreasing=TRUE), ,] return(qData) } } # [END]
be33553572c9389210e7ad8af4140d6c2f45d4a5
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/blackbox/examples/calcGCV.Rd.R
17154868345b1533421e48c3e18f867fa2192d01
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
208
r
calcGCV.Rd.R
library(blackbox) ### Name: calcGCV ### Title: Estimate smoothing parameters by generalized cross-validation ### (GCV) ### Aliases: calcGCV ### ** Examples # see example on main doc page (?blackbox)
d1a7b81d16d91865fe95b0b2a7e7599535efb574
e1f093f20200ed2bd820d4ee0884c87c73e41d66
/R/mantel.R
c3dea508f6ad6dcfb987c42a0fdc2e37bed0e58e
[]
no_license
cran/ecodist
8431a5659f02211c3131e282fbd2c90765285aa0
a34b199c4d70d5ee21e2d6abbd54d2a9729d7dd0
refs/heads/master
2022-05-13T06:14:42.563254
2022-05-05T05:50:08
2022-05-05T05:50:08
17,695,709
0
1
null
null
null
null
UTF-8
R
false
false
8,728
r
mantel.R
mantel <- function(formula = formula(data), data, nperm = 1000, mrank = FALSE, nboot = 500, pboot = 0.9, cboot = 0.95) { # Mantel test # Written by Sarah C. Goslee # 27 June 2000 # Updated 5 April 2001 # # formula is y ~ x + n1 + n2 + n3 + ... # NOT y ~ x | n1 + n2 + n3 + ... # The | means something else in S-Plus formulas. # # Uses C for permutation and bootstrap routines. # # This version calculates partial coefficients by permuting the y matrix. # # Will calculate the simple correlation or n-th order partial correlation # between two distance matrices in either of two ways: Pearson (mrank=FALSE) # or Spearman (mrank=TRUE) # # A permutation test is used to calculate the significance of r. # The permutation test was designed to be relatively fast, but because of the # way this was done, there is a possibility of repeating permutations of # 1/n! where the distance matrix is n by n. In particular, for small matrices # n < 8 or so, it may be better to enumerate the permutations. # # # As an added bonus, this function offers the option of calculating # bootstrapped confidence limits for the correlation coefficient. # nboot is the number of iterations. # pboot is the level to resample at. # cboot is the desired confidence limit. # NOTE: This is not bootstrapping with replacement. That doesn't make # much sense for dissimilarities because of the possibility of duplicates. # The dissimilarity between a sample and itself is always zero. # # mantel returns a five-element list: # mantelr is the correlation. # pval1 is the one-sided p-value (null hypothesis r <= 0) (0 if nperm == 0). # pval2 is the one-sided p-value (null hypothesis r >= 0) (0 if nperm == 0). # pval3 is the two-sided p-value (null hypothesis r = 0) (0 if nperm == 0). # llim is the lower confidence limit. # ulim is the upper confidence limit. # # requires mantel.c (Included in ecodist.c.) # # Stuff R needs to be able to use a formula m <- match.call(expand.dots = FALSE) m2 <- match(c("formula", "data"), names(m), nomatch=0) m <- m[c(1, m2)] m[[1]] <- as.name("model.frame") m <- eval(m, parent.frame()) m <- as.matrix(m) # End of R stuff. m is now the data for the Mantel test as # columns y, x, n1, n2, n3, ... # Determine the size of the matrices & do some error checking. n <- (1 + sqrt(1 + 8 * nrow(m)))/2 if(abs(n - round(n)) > 0.0000001) stop("Matrix not square.\n") n <- round(n) if(ncol(m) < 2) stop("Not enough data. \n") # If there are only x and y, then use the data as is. if(dim(m)[[2]] == 2) { ymat <- as.vector(m[, 1]) xmat <- as.vector(m[, 2]) if(mrank) { ymat <- rank(ymat) xmat <- rank(xmat) } ycor <- ymat xcor <- xmat } else { ymat <- as.vector(m[, 1]) omat <- m[, -1] if(mrank) { ymat <- rank(ymat) omat <- apply(omat, 2, rank) } omat <- cbind(rep(1, length(ymat)), omat) xmat <- as.vector(omat[, 2]) omat <- omat[, -2] omat <- as.matrix(omat) ycor <- lm.fit(omat, ymat)$residuals xcor <- lm.fit(omat, xmat)$residuals } mantelr <- cor(xcor, ycor) # Convert matrices to column order for compatibility with C routines. xmat <- full(xmat) ymat <- full(ymat) xmat <- xmat[col(xmat) > row(xmat)] ymat <- ymat[col(ymat) > row(ymat)] if(dim(m)[[2]] > 2) { for(i in 2:dim(omat)[[2]]) { curcoll <- omat[, i] curcoll <- full(curcoll) curcoll <- curcoll[col(curcoll) > row(curcoll)] omat[, i] <- curcoll } } # If using a permutation test, start here: if(nperm > 0) { # Set up the arrays needed. zstats <- numeric(nperm) tmat <- matrix(0, n, n) rarray <- rep(0, n) if(dim(m)[[2]] == 2) { # Standardize the columns of the matrices so # that z = r and we can do 2-tailed tests. ncor <- length(xmat) w1 <- sum(xmat)/ncor w2 <- sum(xmat^2) w2 <- sqrt(w2/ncor - w1^2) xmat <- (xmat - w1)/w2 w1 <- sum(ymat)/ncor w2 <- sum(ymat^2) w2 <- sqrt(w2/ncor - w1^2) ymat <- (ymat - w1)/w2 cresults <- .C("permute", as.double(xmat), as.double(ymat), as.integer(n), as.integer(length(xmat)), as.integer(nperm), zstats = as.double(zstats), as.double(as.vector(tmat)), as.integer(rarray), PACKAGE = "ecodist") } else { tomat <- t(omat) hmat <- solve(tomat %*% omat) hmat <- hmat %*% tomat bmat <- rep(0, ncol(omat)) xcor <- as.vector(lm.fit(omat, xmat)$residuals) ycor <- as.vector(lm.fit(omat, ymat)$residuals) # Standardize the columns of the matrices so # that z = r and we can do 2-tailed tests. ncor <- length(xcor) w1 <- sum(xcor)/ncor w2 <- sum(xcor^2) w2 <- sqrt(w2/ncor - w1^2) xcor <- (xcor - w1)/w2 w1 <- sum(ycor)/ncor w2 <- sum(ycor^2) w2 <- sqrt(w2/ncor - w1^2) ycor <- (ycor - w1)/w2 cresults <- .C("permpart", as.double(as.vector(hmat)), bmat = as.double(bmat), as.double(as.vector(omat)), as.double(ymat), as.double(xcor), ycor = as.double(ycor), as.integer(n), as.integer(length(bmat)), as.integer(length(xmat)), as.integer(nperm), zstats = as.double(zstats), as.double(as.vector(tmat)), as.integer(rarray), PACKAGE = "ecodist") } zstats <- cresults$zstats # Calculate the p-values. pval1 <- length(zstats[zstats >= zstats[1]])/nperm pval2 <- length(zstats[zstats <= zstats[1]])/nperm pval3 <- length(zstats[abs(zstats) >= abs(zstats[1])])/nperm } else { pval1 <- 0 pval2 <- 0 pval3 <- 0 } # If not using a permutation test, return 0 for the p-values. if(nboot > 0) { if(dim(m)[[2]] == 2) { ycor <- ymat xcor <- xmat } else { xcor <- as.vector(lm.fit(omat, xmat)$residuals) ycor <- as.vector(lm.fit(omat, ymat)$residuals) } bootcor <- numeric(nboot) rarray <- numeric(n) rmat <- numeric(length(xcor)) xdif <- numeric(length(xcor)) ydif <- numeric(length(xcor)) cresults <- .C("bootstrap", as.double(xcor), as.double(ycor), as.integer(n), as.integer(length(xcor)), as.integer(nboot), as.double(pboot), bootcor = as.double(bootcor), as.integer(rarray), as.integer(rmat), as.double(xdif), as.double(ydif), PACKAGE = "ecodist") bootcor <- cresults$bootcor bootcor <- sort(bootcor) pval <- (1 - cboot)/2 llim <- quantile(bootcor, pval) ulim <- quantile(bootcor, 1 - pval) } else { llim <- 0 ulim <- 0 } c(mantelr = mantelr, pval1 = pval1, pval2 = pval2, pval3 = pval3, llim = llim, ulim = ulim) }
4a6f70eb9c8bf64ae5c8ce679c1dd4dc19e5eeda
71fea57eff7e645225e1a4b0b19fc6c75b940e20
/R/explore.R
ba0746f95d2c7a38290759670b83784e41ac2c84
[]
no_license
anhqle/authoritarian_violence
5ed8260082beabc4bd5d2a5b4fa184b736be3c11
639e493002d5d454d403fede99316d972ed51a67
refs/heads/master
2021-05-28T09:38:20.826409
2015-03-09T01:07:27
2015-03-09T01:07:27
null
0
0
null
null
null
null
UTF-8
R
false
false
738
r
explore.R
# ---- Set up workspace ---- rm(list=ls()) # Load external functions source("./R/functions.R") # Load packages packs <- c("lme4", "nlme", "plyr", "dplyr") f_install_and_load(packs) # Load data load('./data/data_final.RData') #---- Missingness ---- # Count tmp <- d_merged %>% group_by(country, year) %>% summarize(count = n()) tmp2 <- d_merged_full %>% group_by(country, year) %>% summarize(count = n()) names(d_merged) # Missing data for each country year tmp <- ddply(d_merged_full, c("country", "year"), colwise(function(x) as.numeric(any(is.na(x))))) # Missing data for each country tmp2 <- ddply(tmp[ ,-2], c("country"), colwise(function(x) sum(x))) #---- Summary statistics ---- unique(d_merged$source_sector_name)
c30e1fd3da63beb72008b21ac56c759eee1bfd44
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/phytools/examples/nodeHeights.Rd.R
1ac0091b578d844b932d533fc03b4f1981e2e97c
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
252
r
nodeHeights.Rd.R
library(phytools) ### Name: nodeHeights ### Title: Compute the heights above the root of each node ### Aliases: nodeHeights nodeheight ### Keywords: phylogenetics utilities comparative method ### ** Examples tree<-rtree(10) X<-nodeHeights(tree)
4455e5e31662662f167c28d0ef335ebe59d4107a
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/modTempEff/vignettes/modTempEff.R
4dff543ad70cbc3757a32fb140744bd2ea2e5773
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
4,891
r
modTempEff.R
### R code from vignette source 'modTempEff.Rnw' ################################################### ### code chunk number 1: preliminaries ################################################### library("modTempEff") options(prompt = "R> ", continue = "+ ") ################################################### ### code chunk number 2: loadlib ################################################### library("modTempEff") ################################################### ### code chunk number 3: data ################################################### data("dataDeathTemp") head(dataDeathTemp) ################################################### ### code chunk number 4: modTempEff.Rnw:323-326 (eval = FALSE) ################################################### ## layout(matrix(c(1,1,2), ncol = 3)) ## with(dataDeathTemp, plot(dec1, xlab="day", ylab="no. of deaths")) ## with(dataDeathTemp, plot(mtemp, dec1, xlab="temperature", ylab="no. of deaths")) ################################################### ### code chunk number 5: descrPlots ################################################### layout(matrix(c(1,1,2), ncol = 3)) with(dataDeathTemp, plot(dec1, xlab="day", ylab="no. of deaths")) with(dataDeathTemp, plot(mtemp, dec1, xlab="temperature", ylab="no. of deaths")) ################################################### ### code chunk number 6: modTempEff.Rnw:352-354 ################################################### o <- tempeff(dec1 ~ day + factor(dweek) + factor(year) + factor(month) + csdl(mtemp, psi = 20, L = c(60, 60)), data = dataDeathTemp, fcontrol = fit.control(display = TRUE)) ################################################### ### code chunk number 7: modTempEff.Rnw:362-363 ################################################### o.noRidge <- update(o, .~. - day - factor(year) - factor(month) + seas(day, 30), fcontrol = fit.control(display = FALSE)) ################################################### ### code chunk number 8: modTempEff.Rnw:377-380 ################################################### o o.noRidge ################################################### ### code chunk number 9: modTempEff.Rnw:400-401 ################################################### o.Ridge.l <- update(o.noRidge, ridge = list(cold = "l", heat = "l")) ################################################### ### code chunk number 10: modTempEff.Rnw:404-405 ################################################### formula(o.Ridge.l) ################################################### ### code chunk number 11: modTempEff.Rnw:420-422 ################################################### o.Ridge.l4 <- update(o.noRidge, ridge = list(cold = "l^4", heat = "l^4")) o.Ridge.l4 ################################################### ### code chunk number 12: modTempEff.Rnw:428-429 ################################################### anova(o.noRidge, o.Ridge.l, o.Ridge.l4, test = "Cp") ################################################### ### code chunk number 13: modTempEff.Rnw:441-442 ################################################### summary(o.Ridge.l4) ################################################### ### code chunk number 14: modTempEff.Rnw:467-468 ################################################### coef(o.Ridge.l4,L=7) ################################################### ### code chunk number 15: DLcurve ################################################### par(mfcol = c(2, 3)) plot(o.noRidge, new = FALSE) plot(o.Ridge.l, new = FALSE) plot(o.Ridge.l4, new = FALSE) ################################################### ### code chunk number 16: modTempEff.Rnw:504-508 (eval = FALSE) ################################################### ## par(mfcol = c(2, 3)) ## plot(o.noRidge, new = FALSE) ## plot(o.Ridge.l, new = FALSE) ## plot(o.Ridge.l4, new = FALSE) ################################################### ### code chunk number 17: modTempEff.Rnw:523-525 ################################################### o2<-tempeff(dec1 ~ day + factor(dweek) + factor(year) + factor(month) + csdl(mtemp, psi = c(10, 20), L = c(60, 60)), data = dataDeathTemp, fcontrol = fit.control(display = TRUE)) ################################################### ### code chunk number 18: modTempEff.Rnw:536-537 ################################################### summary(o2) ################################################### ### code chunk number 19: modTempEff.Rnw:545-548 ################################################### o2.Ridge.l4 <- update(o.Ridge.l4, psi = c(10, 20), fcontrol = fit.control(it.max = 30)) o2.Ridge.l4 ################################################### ### code chunk number 20: modTempEff.Rnw:556-557 ################################################### anova(o.Ridge.l4, o2.Ridge.l4, test="BIC")
7a875432417d1358066d6531c270783f5c732231
488854749b8d6c1e5f1db64dd6c1656aedb6dcbd
/man/SAXMethods.Rd
f1df1b73e236d150496e0e0fa6a6a0891be1aec4
[]
no_license
cran/XML
cd6e3c4d0a0875804f040865b96a98aca4c73dbc
44649fca9d41fdea20fc2f573cb516f2b12c897e
refs/heads/master
2023-04-06T18:52:11.013175
2023-03-19T10:04:35
2023-03-19T10:04:35
17,722,082
4
3
null
null
null
null
UTF-8
R
false
false
2,363
rd
SAXMethods.Rd
\name{startElement.SAX} \alias{startElement.SAX} \alias{endElement.SAX} \alias{text.SAX} \alias{comment.SAX} \alias{processingInstruction.SAX} \alias{entityDeclaration.SAX} \alias{.InitSAXMethods} \alias{text.SAX,ANY,SAXState-method} \alias{comment.SAX,ANY,SAXState-method} \alias{endElement.SAX,ANY,SAXState-method} \alias{startElement.SAX,ANY,ANY,SAXState-method} \alias{processingInstruction.SAX,ANY,ANY,SAXState-method} \alias{entityDeclaration.SAX,ANY,ANY,ANY,ANY,ANY,SAXState-method} \title{Generic Methods for SAX callbacks} \description{ This is a collection of generic functions for which one can write methods so that they are called in repsonse to different SAX events. The idea is that one defines methods for different classes of the \code{.state} argument and dispatch to different methods based on that argument. The functions represent the different SAX events. } \usage{ startElement.SAX(name, atts, .state = NULL) endElement.SAX(name, .state = NULL) comment.SAX(content, .state = NULL) processingInstruction.SAX(target, content, .state = NULL) text.SAX(content, .state = NULL) entityDeclaration.SAX(name, base, sysId, publicId, notationName, .state = NULL) .InitSAXMethods(where = "package:XML") } %- maybe also `usage' for other objects documented here. \arguments{ \item{name}{the name of the XML element or entity being declared} \item{atts}{named character vector of XML attributes} \item{content}{the value/string in the processing instruction or comment} \item{target}{the target of the processing instruction, e.g. the R in \code{<?R....>}} \item{base}{x} \item{sysId}{the system identifier for this entity} \item{publicId}{the public identifier for the entity} \item{notationName}{name of the notation specification} \item{.state}{the state object on which the user-defined methods should dispatch.} \item{where}{the package in which the class and method definitions should be defined. This is almost always unspecified.} } \value{ Each method should return the (potentially modified) state value. } \references{\url{https://www.w3.org/XML/}, \url{http://www.xmlsoft.org}} \author{Duncan Temple Lang} \note{ This no longer requires the Expat XML parser to be installed. Instead, we use libxml's SAX parser.} \seealso{ \code{\link{xmlEventParse}} } %\examples{} \keyword{file}
4f96ed2088fe58ae47fe1d285e6d2c2b010aadd1
af395fc19b90fbfd7d382ccf4bc6dbd1b26fb209
/0901.R
db809f4a8f38e539c09a20137dca7f93f378f499
[]
no_license
Thughandling/R_studio
ec8f6395fe60330a022dea39ccf1e822ede68488
d46dac5d6b71d8a7583957173904f77f991f1565
refs/heads/main
2023-09-04T01:28:08.305840
2021-09-15T06:20:52
2021-09-15T06:20:52
405,446,824
0
0
null
null
null
null
UTF-8
R
false
false
1,969
r
0901.R
# 1 score <- c(80,60,70,50,90) print(score) # 2 mean(score) # 3 mean_score = mean(score) print(mean_score) # 4 fruits = data.frame('제품' = c('사과','딸기','수박'), '가격' = c(1800,1500,3000), '판매량' = c(24,38,13)) fruits # 5 mean(fruits$가격) mean(fruits$판매량) # 6 library(ggplot2) mpg #cty 도시연비 hwy 고속도로 연비 # Q1 mpg_ <- data.frame(mpg) mpg_ # Q2 mpg_ <- rename(mpg_,'city'='cty','highway'='hwy') mpg_ # Q3 # mpg$hwy <- ifelse(mpg$hwy < 12 | mpg$hwy > 37, NA, mpg$hwy) # exam %>% filter(math > 50) # mtcars %>% filter(disp > 400)$mpg four <- mpg_ %>% filter(displ <= 4) four <- mean(four$highway) five <- mpg_ %>% filter(displ >= 5) five <- mean(five$highway) high_displ <- ifelse(four > five, '4 이하','5 이상') high_displ print(four) print(five) # Q5 audi <- mpg_ %>% filter(manufacturer == 'audi') %>% select(city) audi <- mean(audi) toyota <- mpg_ %>% filter(mpg_$manufacturer == 'toyota') toyota <- mean(toyota$city) high_city <- ifelse(audi > toyota,'audi','toyota' ) high_city print(audi) print(toyota) # Q6 suv <- mpg_ %>% filter(class=='suv') suv <- mean(suv$city) compact <- mpg_ %>% filter(class=='compact') compact <- mean(compact$city) class_city <- ifelse(suv > compact,'suv','compact' ) class_city print(suv) print(compact) # 7 ss <- read.csv("samsung.csv") ss <- ss %>% filter(Date > '2021-01-01') ss$Date #write.csv(ss,'2021_samsung.csv') # 1) str(ss[c("Open","High", "Low", "Close", "Adj.Close", "Volume")]) # 2) summary(ss[c("Open","High", "Low", "Close", "Adj.Close", "Volume")]) # 3) normalization <- function(v){ (v - min(v)) / (max(v) - min(v)) } normalization(ss[c("Open","High", "Low", "Close", "Adj.Close", "Volume")]) Standardization <-function(v){ (v-mean(v))/sd(v) } Standardization(ss$Open) Standardization(ss$High) Standardization(ss$Low) Standardization(ss$Close) Standardization(ss$Adj.Close) Standardization(ss$Volume) # 4)
1b13e56c8080ba4e9825dfca648ff0bf44a8ddeb
fc49ca75553cdec61effbd727bac54a005dbcdaf
/R/oSCR.parfit.R
88bad2b12b03d1603ccb79ccaa56a07e9208c349
[]
no_license
cgguarderas/oSCR
da67fae5fb145025337c3b957fac4cfc1eee4b3d
fa009162cecb7497e793e5895e2171ca59e26dd4
refs/heads/master
2021-01-23T17:35:46.741331
2017-08-10T13:11:34
2017-08-10T13:11:34
null
0
0
null
null
null
null
UTF-8
R
false
false
1,027
r
oSCR.parfit.R
oSCR.parfit <- function(mods, data, ncores=2){ library(doParallel) sf<- data$sf ss<- data$ss cs<- data$cs wrapperX<- function(model.number, mods, sf, ss, cs=NULL){ mod <- list(mods[[1]][[model.number]], #density mods[[2]][[model.number]], #detection mods[[3]][[model.number]], #sigma mods[[4]][[model.number]]) #asu fm <- oSCR.fit(scrFrame=sf, ssDF=ss, costDF=cs, model=mod, trimS=2.5, se=TRUE, distmet="euc",sexmod="constant") save(fm, file=paste("model",model.number,".RData",sep="")) return(fm) } nmods<- nrow(mods) cl<-makeCluster(ncores) registerDoParallel(cl) out <-foreach(i=1:nmods) %dopar% { library(oSCR) tmp<- wrapperX(i, mods, data$sf, data$ssDF) return(tmp) } stopCluster(cl) save("out",file="models.RData") tmp<- list() for(i in 1:length(out)){ tmp[[i]]<- out[[i]] tmp[[i]]$call$model <- list("list",paste(mods[i,1]),paste(mods[i,2]),paste(mods[i,3]),paste(mods[i,4])) } out<- fitList.oSCR(tmp,rename=TRUE ) return(out) }
ddff4c3a6814fbd8590ace9fb311063602a7d1c7
1779ec29e1eea8bb271d2596ee249171180efa49
/test.R
9839269990ce97870276064079ab83bbd0966cc1
[]
no_license
Olanrewajubaby/PBD_FOO
d4fe281c57f1bffc2b159f9ba73b6897c94d4c32
8ba2ee816f81db4c6cec0d6594b0d08fcf806ced
refs/heads/master
2020-04-05T13:07:13.656715
2017-07-30T22:21:46
2017-07-30T22:21:46
95,099,769
0
0
null
null
null
null
UTF-8
R
false
false
5,540
r
test.R
R version 3.2.5 (2016-04-14) -- "Very, Very Secure Dishes" Copyright (C) 2016 The R Foundation for Statistical Computing Platform: x86_64-w64-mingw32/x64 (64-bit) R is free software and comes with ABSOLUTELY NO WARRANTY. You are welcome to redistribute it under certain conditions. Type 'license()' or 'licence()' for distribution details. R is a collaborative project with many contributors. Type 'contributors()' for more information and 'citation()' on how to cite R or R packages in publications. Type 'demo()' for some demos, 'help()' for on-line help, or 'help.start()' for an HTML browser interface to help. Type 'q()' to quit R. > #---------------------------------------------- > # 1- ADDITION > #----------------------------------------------- > > add <- function (first,second) { + return (first + second) + } > first <- 5 > > second <- 6 > add(first, second) [1] 11 > #----------------------------------------------- > # 2- SUBTRACTION > #----------------------------------------------- > subtract <- function (first,second) { + return (first - second) + } > first <- 10 > > second <- 2 > subtract(first,second) [1] 8 > > #----------------------------------------------- > # 3- MULTIPLICATION > #----------------------------------------------- > multiply <- function (first,second){ + return (first * second) + } > first <- 10 > second <- 2 > multiply(first,second) [1] 20 > > #----------------------------------------------- > # 4- DIVISION > #----------------------------------------------- > > divide <- function (first,second){ + if (second == 0){ + return ('inf') + } + else{ + return (first / second) + } + } > first <- 4 > second <- 0 > divide(first,second) [1] "inf" > first <- 200 > second <- 13 > divide(first,second) [1] 15.38462 > > #----------------------------------------------- > # 5- EXPONENT > #----------------------------------------------- > exponent <- function (first,second){ + + if (first != 0 && second ==1){ + return (first) + } + else if (first == 0 && second >0){ + return ("0") + } + else if (first != 0 && second ==0){ + return ("1") + } + else if (first != 0 && second <0){ + return ("For negative exponents, take the reciprocal of the base (flip it); change the negative exponent to a positive exponent and solve") + } + else + return (first ** second) + } > first <- 3 > second <- 5 > exponent(first,second) [1] 243 > > first <- 4 > second <- 1 > exponent(first,second) [1] 4 > > first <- 0 > second <- 0 > exponent(first,second) [1] 1 > > first <- 4 > second<- 0 > exponent(first,second) [1] "1" > > first <- 5 > second <- -4 > exponent(first,second) [1] "For negative exponents, take the reciprocal of the base (flip it); change the negative exponent to a positive exponent and solve" > > #----------------------------------------------- > # 6- SQUARE > #----------------------------------------------- > square <- function (first,second){ + if (first == 0){ + return (0) + } + else if (second == 2){ + return (first ** second) + } + } > first <- 0 > second<- 2 > square(first,second) [1] 0 > > > first <- 2 > second <- 2 > square(first,second) [1] 4 > > first <- 9 > second <- 2 > square(first,second) [1] 81 > > #----------------------------------------------- > # 7- SQUARE ROOT > #----------------------------------------------- > squareRoot <- function (n){ + if (n > 0){ + return (n ** 0.5) + } + else if (n == 0){ + return (0) + } + else if (n < 0){ + return ("inf") + + } + } > n <- 25 > squareRoot(n) [1] 5 > > > n <- -10 > squareRoot(n) [1] "inf" > > #----------------------------------------------- > # 8- CUBE > #----------------------------------------------- > > cube <- function (first,second){ + if (first == 0){ + return (0) + } + else if (second == 3){ + return (first ** second) + } + } > first <- 0 > second <- 10 > cube(first,second) [1] 0 > > first <- 10 > second <- 3 > cube(first,second) [1] 1000 > > #------------------------------------------------- > # 9- COSINE > #----------------------------------------------- > cosine <- function (first) { + second=math.cos(4) + return (second) + } > first <- 2 > second <- 4 > cos(second) [1] -0.6536436 > > > #----------------------------------------------- > # 10- MODULO > #----------------------------------------------- > modulo <- function(first, second){ + return (first %% second) + } > first<- 5 > second <- 2 > modulo(first, second) [1] 1 > > first<- 4 > second <- 6 > modulo(first, second) [1] 4
95cf6666fafee4944e4b8f5ecb6b977815f0154a
a50e07477af54821cb133805c0e8674eb7969a65
/man/estimateSTR.Rd
1dd6192f6152a763181ea04349046574eaae7343
[]
no_license
bocinsky/soilDB
ac118e09082563795ff45228bcbe5b9368541c42
12819708abfe9ef1daf0a70a78b9aaa33aedca9b
refs/heads/master
2020-06-06T00:39:10.068856
2019-06-18T18:39:14
2019-06-18T18:39:14
192,589,760
1
0
null
2019-06-18T18:05:19
2019-06-18T18:05:18
null
UTF-8
R
false
false
1,449
rd
estimateSTR.Rd
\name{estimateSTR} \alias{estimateSTR} \title{Estimate Soil Temperature Regime} \description{Estimate soil temperature regime (STR) based on mean annual soil temperature (MAST), mean summer temperature (MSST), mean winter soil temperature (MWST), presence of O horizons, saturated conditions, and presence of permafrost. Several assumptions are made when O horizon or saturation are undefined.} \usage{ estimateSTR(mast, mean.summer, mean.winter, O.hz = NA, saturated = NA, permafrost = FALSE) } \arguments{ \item{mast}{vector of mean annual soil temperature (deg C)} \item{mean.summer}{vector of mean summer soil temperature (deg C)} \item{mean.winter}{vector of mean winter soil temperature (deg C)} \item{O.hz}{logical vector of O horizon presence / absense} \item{saturated}{logical vector of seasonal saturation} \item{permafrost}{logical vector of permafrost presence / absense} } \details{Pending. \href{http://ncss-tech.github.io/AQP/soilDB/STR-eval.html}{Related tutorial}. } \value{Vector of soil temperature regimes.} \references{ Soil Survey Staff. 2015. Illustrated guide to soil taxonomy. U.S. Department of Agriculture, Natural Resources Conservation Service, National Soil Survey Center, Lincoln, Nebraska. } \author{D.E. Beaudette} \seealso{ \code{\link{STRplot}} } \examples{ # simple example estimateSTR(mast=17, mean.summer = 22, mean.winter = 12) } \keyword{ manip }% use one of RShowDoc("KEYWORDS")
6b826dd0299b4dc6b11496ab886cafc381e464f4
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/parma/vignettes/Portfolio_Optimization_in_parma.R
bc62ef5649ff160fd75b1b5eb0214020b941e0fe
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
91
r
Portfolio_Optimization_in_parma.R
### R code from vignette source 'Portfolio_Optimization_in_parma.Rnw' ### Encoding: UTF-8
4677e62b668d92c3efd84622705f25a6270f3062
2bec5a52ce1fb3266e72f8fbeb5226b025584a16
/immer/R/immer_matrix2.R
cede307e23c039bf9cbccfa8b98af108c18fc8cb
[]
no_license
akhikolla/InformationHouse
4e45b11df18dee47519e917fcf0a869a77661fce
c0daab1e3f2827fd08aa5c31127fadae3f001948
refs/heads/master
2023-02-12T19:00:20.752555
2020-12-31T20:59:23
2020-12-31T20:59:23
325,589,503
9
2
null
null
null
null
UTF-8
R
false
false
156
r
immer_matrix2.R
## File Name: immer_matrix2.R ## File Version: 0.01 immer_matrix2 <- function(x, nrow) { y <- TAM::tam_matrix2(x=x, nrow=nrow) return(y) }
76008fd7c9ead2fc017c02c1142aed467decec00
33bb983cc20cff0a5bfc8ef342addd678274b061
/diagnostic_aderma/diagnostic_aderma.R
fbd82f9381989874f5dfb175ce8e4749d162e36b
[]
no_license
Gottavianoni/R
00c65142fd29e62cc010147b9089eaecd85f0ea9
6918a4dec29faa442567f7ce271c38d001b9a2af
refs/heads/master
2021-01-22T20:35:18.829590
2017-04-05T15:17:39
2017-04-05T15:17:39
85,334,011
0
0
null
null
null
null
UTF-8
R
false
false
2,150
r
diagnostic_aderma.R
# aderma_diag$nomproduit <- ifelse(regexpr("dermalibour",aderma_diag$nomproduit,fixed=T) != -1,"dermalibour",aderma_diag$nomproduit) aderma_diag$nomproduit <- ifelse(regexpr("epitheliale",aderma_diag$nomproduit,fixed=T) != -1,"epitheliale",aderma_diag$nomproduit) aderma_diag$nomproduit <- ifelse(regexpr("rheacalm",aderma_diag$nomproduit,fixed=T) != -1,"rheacalm",aderma_diag$nomproduit) aderma_diag$nomproduit <- ifelse(regexpr("sensiphas",aderma_diag$nomproduit,fixed=T) != -1,"sensiphase",aderma_diag$nomproduit) aderma_diag$nomproduit <- ifelse(regexpr("exomega",aderma_diag$nomproduit,fixed=T) != -1,"xeramega",aderma_diag$nomproduit) ind_vir <- which(nchar(aderma_diag$nomproduit) > 12) aderma_diag <- aderma_diag[-ind_vir,] aderma_diag$brucha <- ifelse(regexpr("brule+|chaud+",aderma_diag$com,perl=T) != -1,1,0) aderma_diag$demang <- ifelse(regexpr("demang",aderma_diag$com,perl=T) != -1,1,0) aderma_diag$pictir <- ifelse(regexpr("picot+|tirail+",aderma_diag$com,perl=T) != -1,1,0) aderma_diag$rouge <- ifelse(regexpr("rouge",aderma_diag$com,perl=T) != -1,1,0) aderma_diag$seche <- ifelse(regexpr("Seche",aderma_diag$peau,perl=T) != -1,1,0) aderma_diag$douleur <- ifelse(regexpr("douleur",aderma_diag$com,perl=T) != -1,0,1) for(c in names(aderma_diag)) aderma_diag[,c] <- ifelse(is.na(aderma_diag[,c]),0,aderma_diag[,c]) res$verif_rdf <- ifelse(res[,1] == res[,2],"OK","KO") res$verif_svm <- ifelse(res[,1] == res[,3],"OK","KO") i <- 3 pred_nnet$predi <- "" for (i in 1:length(pred_nnet[,1])) { pred_nnet[i,"predi"] <- names(pred_nnet)[which(pred_nnet[i,] == max(pred_nnet[i,1], pred_nnet[i,2],pred_nnet[i,3], pred_nnet[i,4], pred_nnet[i,5]))] } pred_rpart$predi <- "" for (i in 1:length(pred_rpart[,1])) { pred_rpart[i,"predi"] <- names(pred_rpart)[which(pred_rpart[i,] == max(pred_rpart[i,1], pred_rpart[i,2],pred_rpart[i,3], pred_rpart[i,4], pred_rpart[i,5]))] } res$verif_rdf <- ifelse(res[,1] == res[,2],"OK","KO") res$verif_svm <- ifelse(res[,1] == res[,3],"OK","KO") res$verif_nnet <- ifelse(res[,1] == res[,4],"OK","KO") res$verif_rpart <- ifelse(res[,1] == res[,5],"OK","KO")
3d63ec362f9e34da844d350b7741d75c3725cfb3
7880f40b9ce793f47e7b552c9abc379c5ca3c7fd
/R/inits.R
752ad6815f1c8a2d5a72a43a610d5133c189c5a2
[]
no_license
chjackson/disbayes
1e62eb6a2f549f558c1ca1556b451eadfe929d25
1600b36d580fcf2c97fd0ffeb6be3cc115f1b472
refs/heads/master
2023-03-16T21:15:52.431687
2023-03-07T11:51:05
2023-03-07T11:51:05
138,712,654
8
2
null
2023-09-09T13:25:28
2018-06-26T09:05:08
C++
UTF-8
R
false
false
9,194
r
inits.R
## mdat <- remission, eqage, const_rem ## min <- cf_init init_rates <- function(dat, mdata, idata,...){ inc_init <- init_rate("inc", dat) rem_init <- init_rate("rem", dat, agg=mdata$const_rem) optu <- fit_unsmoothed_opt(dat, inc_init=inc_init, rem_init=rem_init, mdata=mdata, idata=idata) optdf <- tidy_disbayes_opt(optu, .disbayes_vars) cf <- optdf$mode[optdf$var=="cf"] inc <- optdf$mode[optdf$var=="inc"] rem <- optdf$mode[optdf$var=="rem"] eqage <- mdata$eqage for (i in 1:eqage){ cf[i] <- cf[eqage+1] inc[i] <- inc[eqage+1] rem[i] <- rem[eqage+1] } init_eqage_hi <- 90 for (i in init_eqage_hi:dat$nage){ cf[i] <- cf[init_eqage_hi-1] inc[i] <- inc[init_eqage_hi-1] rem[i] <- rem[init_eqage_hi-1] } if (mdata$const_rem) rem=rem_init list(cf=cf, inc=inc, rem=rem, optu=optu) } init_rate <- function(rate, dat, agg=FALSE){ default <- 0.001 nums <- dat[[sprintf("%s_num",rate)]] denoms <- dat[[sprintf("%s_denom",rate)]] if (agg) { nums <- sum(nums); denoms <- sum(denoms) } rcrude <- nums / denoms rcrude[is.na(rcrude)] <- default rcrude[is.nan(rcrude)] <- default rcrude <- pmax(default, rcrude) rcrude } fit_unsmoothed_opt <- function(dat, inc_init=NULL, rem_init=NULL, mdata, idata){ if (is.null(inc_init)) inc_init <- init_rate("inc", dat) if (is.null(rem_init)) rem_init <- init_rate("rem", dat, mdata$const_rem) nage <- dat$nage Xdummy <- matrix(0, nrow=nage, ncol=2) datstanu <- c(dat, mdata) ## quantities in the data that are different for the purpose of this training model data_fixed <- list(smooth_cf = 0, smooth_inc = 0, smooth_rem = 0, const_cf = 0, trend = 0, nyr=1, nbias=1, bias = 1, incdata_ind = 1, prevdata_ind = 1, increasing_cf=0, K=2, X=Xdummy, inc_trend = array(1, dim=c(nage,1)), cf_trend = array(1, dim=c(nage,1)), scf_isfixed=0, sinc_isfixed=0, srem_isfixed=0, lambda_cf_fixed=0, lambda_inc_fixed=0, lambda_rem_fixed=0) for (i in names(data_fixed)) datstanu[[i]] <- data_fixed[[i]] initu <- list(cf_par = rep(idata$cf_init,nage), rem_par = if (mdata$remission) as.array(rem_init) else numeric(), inc_par = inc_init, prevzero = if (mdata$prev_zero) as.array(max(dat$prev_num[1],1)/max(dat$prev_denom[1],2)) else numeric()) opt <- rstan::optimizing(stanmodels$disbayes, data = datstanu, init = initu, hessian = FALSE) class(opt) <- "disopt" opt } ## Obtains spline basis terms, and initial values for their coefficients given ## estimates of incidence or CF from unsmoothed model. init_smooth <- function(y, eqage, eqagehi, s_opts=NULL){ x <- NULL # define unbound variables to satisfy R CMD check nage <- length(y) age <- agecons <- 1:nage agecons[1:eqage] <- eqage if (!is.null(eqagehi) && eqagehi < nage) agecons[eqagehi:nage] <- eqagehi sm <- mgcv::smoothCon(s(x), data=data.frame(x=agecons), diagonal.penalty=TRUE)[[1]] X <- sm$X beta <- coef(lm(y ~ X - 1)) list(X=X, beta=beta) } ## Form constant initial value list to supply to Stan initlist_const <- function(initrates, cf_smooth, inc_smooth, remission, rem_smooth, eqage, smooth_inc, smooth_cf, const_cf, increasing_cf, smooth_rem, const_rem, nbias, scf_isfixed, sinc_isfixed, srem_isfixed){ lam_init <- laminc_init <- lamrem_init <- 0.5 beta_init <- cf_smooth$beta betainc_init <- inc_smooth$beta betarem_init <- rem_smooth$beta list(inc_par = if (smooth_inc) numeric() else initrates$inc, rem_par = if (remission && !smooth_rem) as.array(initrates$rem) else numeric(), beta = if (smooth_cf && !const_cf) beta_init else numeric(), lambda_cf = if (smooth_cf && !scf_isfixed) as.array(lam_init) else numeric(), beta_inc = if (smooth_inc) betainc_init else numeric(), lambda_inc = if (smooth_inc && !sinc_isfixed) as.array(laminc_init) else numeric(), beta_rem = if (smooth_rem) betarem_init else numeric(), lambda_rem = if (smooth_rem && !srem_isfixed) as.array(lamrem_init) else numeric(), cfbase = if (const_cf | increasing_cf) as.array(initrates$cf[eqage]) else numeric(), bias_loghr = if (nbias > 1) as.array(rnorm(1)) else numeric() ) } ## Form initial value list random generating function to supply to Stan initlist_random <- function(nage, initrates, cf_smooth, inc_smooth, remission, rem_smooth, eqage, smooth_inc, smooth_cf, const_cf, increasing_cf, smooth_rem, const_rem, nbias, scf_isfixed, sinc_isfixed, srem_isfixed){ lam_init <- laminc_init <- lamrem_init <- 0.5 beta_init <- cf_smooth$beta betainc_init <- inc_smooth$beta betarem_init <- rem_smooth$beta inits <- function(){ list( inc_par = rlnorm(nage*(1-smooth_inc), meanlog=log(initrates$inc), sdlog=initrates$inc/10), rem_par = as.array(rlnorm(remission*(1-smooth_rem)*(nage*(1 - const_rem) + 1*const_rem), meanlog=log(initrates$rem), sdlog=initrates$rem/10)), beta = if (smooth_cf) rnorm(length(beta_init)*(1 - const_cf), mean=beta_init, sd=abs(beta_init)/10) else numeric(), lambda_cf = as.array(rlnorm(length(lam_init)*smooth_cf*(1-scf_isfixed), meanlog=log(lam_init), sdlog=lam_init/10)), beta_inc = if (smooth_inc) rnorm(length(betainc_init), mean=betainc_init, sd=abs(betainc_init)/10) else numeric(), lambda_inc = as.array(rlnorm(length(laminc_init)*smooth_inc*(1-sinc_isfixed), meanlog=log(laminc_init), sdlog=laminc_init/10)), beta_rem = if (remission && !const_rem) rnorm(length(betarem_init)*smooth_rem, mean=betarem_init, sd=abs(betarem_init)/10) else numeric(), lambda_rem = as.array(rlnorm(length(lamrem_init)*smooth_rem*(1-srem_isfixed), meanlog=log(lamrem_init), sdlog=lamrem_init/10)), cfbase = if (const_cf | increasing_cf) as.array(initrates$cf[eqage]) else numeric(), bias_loghr = if (nbias > 1) as.array(rnorm(1)) else numeric() ) } inits } fit_unsmoothed <- function(dat, inc_init=NULL, rem_init=NULL, mdata, idata, method = "mcmc", iter = 1000, stan_control = NULL, ...){ if (is.null(inc_init)) inc_init <- init_rate("inc", dat) if (is.null(rem_init)) rem_init <- init_rate("rem", dat) nage <- dat$nage Xdummy <- matrix(0, nrow=nage, ncol=2) datstanu <- c(dat, mdata) ## quantities in the data that are different for the purpose of the unsmoothed model ## everything else is passed from dat (observed data) or mdata (model spec) data_fixed <- list(smooth_cf = 0, smooth_inc = 0, const_cf = 0, trend = 0, nyr=1, nbias=1, bias = 1, incdata_ind = 1, prevdata_ind = 1, increasing_cf=0, K=2, X=Xdummy, inc_trend = array(1, dim=c(nage,1)), cf_trend = array(1, dim=c(nage,1)), scf_isfixed=0, sinc_isfixed=0, srem_isfixed=0, lambda_cf_fixed=0, lambda_inc_fixed=0, lambda_rem_fixed=0) for (i in names(data_fixed)) datstanu[[i]] <- data_fixed[[i]] initu <- function(){ list(cf_par = rnorm(nage, mean=idata$cf_init, sd=idata$cf_init/10), rem_par = rnorm(mdata$remission*(1 - mdata$smooth_rem)*(nage*(1 - mdata$const_rem) + 1*mdata$const_rem), mean=rem_init, sd=rem_init/10), inc_par = rnorm(nage, mean=inc_init, sd=inc_init/10)) } if (method=="mcmc") { fitu <- rstan::sampling(stanmodels$disbayes, data = datstanu, init = initu, include = FALSE, pars=c("beta","lambda"), iter = iter, control = stan_control, ...) } else { fitu <- rstan::vb(stanmodels$disbayes, data = datstanu, init = initu, include = FALSE, pars=c("beta","lambda")) } fitu }
a60ade0cfde1cad694f419f4c474b2f0396f511b
63c6d111652afe309a8692aa744546a7550ffe7c
/msis510/text mining/text_mining.R
6135ffe5d5a96ba54887af2686b2c9c1277549c4
[]
no_license
yanmo96/UW
65cb2aea1fa995d6b4b2c0a6ec1ee05063474038
a3ac9c7f9d41e1c830873cb54597fa392e0854c4
refs/heads/main
2023-02-03T05:06:31.920247
2020-12-22T19:59:35
2020-12-22T19:59:35
320,718,194
0
0
null
null
null
null
UTF-8
R
false
false
2,371
r
text_mining.R
# install.packages("tm") library(tm) # define vector of sentences ("docs") text <- c("this is the first sentence!!", "this is a second Sentence :)", "the third sentence, is here", "forth of all 4 sentences") # convert documents into a corpus corp <- Corpus(VectorSource(text)) inspect(corp) corp <- tm_map(corp, tolower) corp <- tm_map(corp, removeNumbers) corp <- tm_map(corp, removePunctuation) corp <- tm_map(corp, removeWords, stopwords("english")) corp <- tm_map(corp, stripWhitespace) inspect(corp) # stemming # install.packages("SnowballC") library(SnowballC) corp <- tm_map(corp, stemDocument) inspect(corp) # find out Term-Document matirx based on Term Frequency dtm <- DocumentTermMatrix(corp) inspect(dtm) # find out tf-idf tfidf <- weightTfIdf(dtm) inspect(tfidf) # ============================================================================== # practice reviews <- read.csv("reviews.csv") View(reviews) # convert documents into a corpus corp <- Corpus(VectorSource(reviews[,4])) inspect(corp) corp <- tm_map(corp, tolower) corp <- tm_map(corp, removeNumbers) corp <- tm_map(corp, removePunctuation) corp <- tm_map(corp, removeWords, stopwords("english")) corp <- tm_map(corp, stripWhitespace) inspect(corp) corp <- tm_map(corp, stemDocument) # find out Term-Document matirx based on Term Frequency dtm <- DocumentTermMatrix(corp) inspect(dtm) # find out tf-idf tfidf <- weightTfIdf(dtm) inspect(tfidf) tfidf <- removeSparseTerms(tfidf, 0.93) inspect(tfidf) review.df <- data.frame(as.matrix(tfidf), Recommended = reviews$Recommended) #View(review.df) #str(review.df) set.seed(1) train.index <- sample(1:nrow(review.df), nrow(review.df)*0.6) train.df <- review.df[train.index,] valid.df <- review.df[-train.index,] logit.reg <- glm(Recommended ~., data = train.df, family = "binomial") summary(logit.reg) logit.reg.pred <- predict(logit.reg, valid.df, type = "response") pred <- ifelse(logit.reg.pred > 0.5, 1,0) library(caret) confusionMatrix(factor(pred), factor(valid.df$Recommended), positive = "1") #install.packages("wordcloud") library(wordcloud) m <- as.matrix(tfidf) v <- sort(colSums(m),decreasing = TRUE) importance <- data.frame(word = names(v), tfidf = v) wordcloud(importance$word, importance$tfidf, random.order = FALSE, max.words = 100, colors =brewer.pal(8,"Dark2"))
d2ff97e7c1665da50f801e0e16106d9606726b01
a1cc534128010f158bf71cd268217b479f834727
/man/transform_scores.Rd
d19d849cc4961678d8dce37cd1acccbedba898af
[]
no_license
dcosme/qualtrics
66555e58cc122633f4c3d1320f57a8a621d6da69
4d260fc79ee50b86a7f2d08240a55edd9b794528
refs/heads/master
2021-06-20T05:09:56.500409
2018-05-08T17:22:38
2018-05-08T17:22:38
134,196,406
2
1
null
2018-05-20T23:52:58
2018-05-20T23:52:58
null
UTF-8
R
false
true
402
rd
transform_scores.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/questionnaire_tools.R \name{transform_scores} \alias{transform_scores} \title{transform scores} \usage{ transform_scores(item_values, transformation, min = NA, max = NA) } \arguments{ \item{item_values}{item_values} \item{transformation}{transformation} \item{min}{min} \item{max}{max} } \description{ transform scores }
f6b46e9e7b7c6f37f8026307e7c0e59758f62de7
6d8aaf52204210d9ef16085235fa971427f2e471
/gene_category_specific_coverage.R
992330cbae2f3d26d767046d710d7206ea8fcf27
[]
no_license
Callum-Rakhit/panel_validation
945acce8af983bbd408313f85e3be06c2765b773
59a803b269b1a5b34b9c680ae78959043620e0f3
refs/heads/master
2022-03-05T22:55:28.646715
2022-02-02T15:40:06
2022-02-02T15:40:06
204,648,131
0
0
null
null
null
null
UTF-8
R
false
false
7,551
r
gene_category_specific_coverage.R
# TODO(Callum) # - Take input directly from snappy # - Automate file import and naming (no hardcoding) ##### Load/Install relevant packages ##### # Also need libssl-dev and libxml2-dev on Ubuntu 18.04 GetPackages <- function(required.packages) { packages.not.installed <- required.packages[!(required.packages %in% installed.packages()[, "Package"])] if(length(packages.not.installed)){install.packages(packages.not.installed, dependencies = T)} suppressMessages(lapply(required.packages, require, character.only = T)) } GetPackages(c("ggplot2", "reshape2", "wesanderson", "tidyverse", "scales", "doParallel", "devtools", "dplyr", "gtable", "grid", "gridExtra", "data.table")) # Load relevant data amplicon/coverage/sample data amplicon_coverage_filenames <- Sys.glob(paths = "/mnt/shared_data/work/three_runs_together/*amplicon_coverage") amplicon_coverage_sampleIDs <- paste0(basename(amplicon_coverage_filenames)) amplicon_coverage_list <- lapply(amplicon_coverage_filenames, function(i){read.table(file = i, header = T)}) amplicon_coverage_melted <- do.call(rbind, amplicon_coverage_list) amplicon_coverage_melted$id <- factor(rep(amplicon_coverage_sampleIDs, each = sapply(amplicon_coverage_list, nrow))) amplicon_coverage_average_per_run <- setDT(amplicon_coverage_melted)[ , .(mean_coverage = mean(BARCODE_COUNT)), by = id] amplicon_coverage_melted$mean_coverage <- amplicon_coverage_average_per_run$mean_coverage[match( amplicon_coverage_melted$id, amplicon_coverage_average_per_run$id)] # Get list for specific genes/categories CEBPA_amplicon_coverage_melted <- amplicon_coverage_melted[grep("^CEBPA", amplicon_coverage_melted$PRIMER),] Adult_Solid_Gene_List <- c( "^BRAF", "^BRCA1", "^BRCA2", "^CDKN2A", "^EGFR", "^HRAS", "^KIT", "^KRAS", "^MET", "^MLH1", "^NRAS", "^PDGFRA", "^SMARCA4", "^TP53") KRAS_Gene_List <- c( "^KRAS") Neurological_Gene_List <- c("BRAF", "IDH1", "IDH2", "ATRX", "H3F3A", "HIST1H3B", "TERT", "EGFRvIII" "HIST2H3C" "MYC" "NF1" "MYCN" "C19MC" "YAP1" "MGMT" "PDGFRA" "PTEN" "HIST1H3C" "H3F3B" "VHL" "TP53" "MGMT" "TSC1" "TSC2" "FGFR1" "DICER1", "CDKN2A", "RB1", "TERT" ) Adult_Solid_amplicon_coverage_melted <- amplicon_coverage_melted[( grep(paste(Adult_Solid_Gene_List, collapse="|"), amplicon_coverage_melted$PRIMER) )] # Pick colours colour_palette <- rep(x = wesanderson::wes_palettes$Darjeeling1, times = 10) # Create the plotting function RunCov.vs.AvCov <- function(dataframe, id, BARCODE_COUNT, mean_coverage){ ggplot(dataframe) + geom_boxplot(aes(reorder(x = id, X = BARCODE_COUNT), y = BARCODE_COUNT, color = "#000000"), coef = 6) + geom_line(aes(reorder(x = id, X = BARCODE_COUNT), y = mean_coverage, group = 1)) + # scale_fill_manual(values = colour_palette) + # scale_y_log10(limits = c(1, 10000)) + xlab("Various different runs") + ylab("Coverage (average coverage for all amplicons is the line)") + ggtitle("Coverage per run vs average coverage") + theme( # Lengends to the top legend.position = "none", # Remove the y-axis # axis.title.y = element_blank(), # Remove panel border panel.border = element_blank(), # Remove panel grid lines panel.grid.major.x = element_blank(), axis.text.x = element_blank(), axis.ticks.x = element_blank(), # explicitly set the horizontal lines (or they will disappear too) panel.grid.major.y = element_line(size = .25, color = "black"), panel.grid.minor = element_blank(), # Remove panel background panel.background = element_blank()) } Deviation.from.Av <- function(dataframe, PRIMER, BARCODE_COUNT, mean_coverage, Common_Gene_Name){ ggplot(dataframe) + geom_boxplot(aes(x = PRIMER, y = (BARCODE_COUNT - mean_coverage), color = Common_Gene_Name), coef = 6) + # guides(color = guide_legend(reverse = T)) + scale_fill_manual(values = colour_palette) + xlab("Sample ID") + theme( # Lengends to the top # legend.position = "none", # Remove the y-axis # axis.title.y = element_blank(), # Remove panel border panel.border = element_blank(), # Remove panel grid lines panel.grid.major.x = element_blank(), # axis.text.x = element_blank(), axis.ticks.x = element_blank(), # explicitly set the horizontal lines (or they will disappear too) panel.grid.major.y = element_line(size = .25, color = "black"), panel.grid.minor = element_blank(), # Remove panel background panel.background = element_blank()) } # Create the plot output <- RunCov.vs.AvCov(CEBPA_amplicon_coverage_melted, id, BARCODE_COUNT, mean_coverage) ggsave("~/Desktop/CEPRA_1.pdf", output, width = 16*1.25, height = 9*1.25) output <- Deviation.from.Av(CEBPA_amplicon_coverage_melted, PRIMER, BARCODE_COUNT, mean_coverage) ggsave("~/Desktop/CEPRA_2.pdf", output, width = 16*1.25, height = 9*1.25) output <- RunCov.vs.AvCov(Adult_Solid_amplicon_coverage_melted, id, BARCODE_COUNT, mean_coverage) ggsave("~/Desktop/AS_1.pdf", output, width = 16*1.25, height = 9*1.25) output <- Deviation.from.Av(Adult_Solid_amplicon_coverage_melted, PRIMER, BARCODE_COUNT, mean_coverage) ggsave("~/Desktop/AS_2.pdf", output, width = 16*1.25, height = 9*1.25) Deviation.from.Av(Adult_Solid_amplicon_coverage_melted, PRIMER, BARCODE_COUNT, mean_coverage, Common_Gene_Name) Adult_Solid_amplicon_coverage_melted$diff <- (Adult_Solid_amplicon_coverage_melted$BARCODE_COUNT/Adult_Solid_amplicon_coverage_melted$mean_coverage) Adult_Solid_amplicon_coverage_melted$Common_Gene_Name <- (gsub('_.*', '', Adult_Solid_amplicon_coverage_melted$PRIMER)) CEBPA_amplicon_coverage_melted$diff <- (CEBPA_amplicon_coverage_melted$BARCODE_COUNT/CEBPA_amplicon_coverage_melted$mean_coverage) boxplot(Adult_Solid_amplicon_coverage_melted$diff) mean(CEBPA_amplicon_coverage_melted$diff) mean(Adult_Solid_amplicon_coverage_melted$diff) mean(CEBPA_amplicon_coverage_melted$diff) # Read in low coverage (<200) regions from snappy low_coverage_filenames <- Sys.glob(paths = "/mnt/shared_data/work/three_runs_together/*low_coverage") # Extract all low coverage samples low_coverage_regions <- lapply( low_coverage_filenames, function(i) { read.table(i, header = T) }) # Identify low coverage regions found in all samples common_low_cov_primers <- Reduce( f = intersect, x = lapply(low_coverage_regions, '[[', 4) # 4th element is a list of names of the low coverage amplicon regions ) common_low_cov_primers<- as.data.frame(common_low_cov_primers) common_low_cov_primers <- common_low_cov_primers[!grepl("^chr", common_low_cov_primers$common_low_cov_primers),] View(common_low_cov_primers) Deviation.from.Av(KRAS_Gene_List, PRIMER, BARCODE_COUNT, mean_coverage, Common_Gene_Name)
8895b4c6cf1cd423a737a861035a5298c27b94ae
fbcc13c2996838c30992397b145e3a3266c2f96a
/man/game_status-set.Rd
c085470f4350c8e1990b48de670aff59097aee79
[ "MIT" ]
permissive
msaltieri/alicetwist
768f36f0e3f7413070bf2fa0043f8561db274047
0550446feded5a2b8fc3a30d3a3339264d3a2388
refs/heads/master
2021-01-19T00:24:06.225177
2017-04-10T14:33:02
2017-04-10T14:33:02
87,167,107
0
0
null
null
null
null
UTF-8
R
false
true
466
rd
game_status-set.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/game-attribs.R \name{game_status<-} \alias{game_status<-} \title{Set the game status} \usage{ game_status(game, player) <- value } \arguments{ \item{game}{An \code{alicetwist} game object} \item{player}{The player whose hands are to be modified} \item{value}{A vector containing the new number of fingers to assign to each hand} } \description{ Set the game status } \keyword{internal}
0384c935721dbc3aafb1bb15b4a3a1df46ba9831
bf6cff3125023e0bede2db32a79542a7a76d2530
/R/fitting_functions_multiple_genes.R
3e4d06031b5728b35ccabcd9ee6dc7e1c05d6244
[ "MIT" ]
permissive
markrobinsonuzh/regsplice
15d655f3ee1eb496b2ce92f66a3728cced438057
db0014e78af3fd6f16647a04d2ab91921dbd4395
refs/heads/master
2021-01-13T08:22:54.432366
2016-10-04T18:44:12
2016-10-04T18:44:12
69,993,689
0
0
null
2016-10-04T18:25:45
2016-10-04T18:25:44
null
UTF-8
R
false
false
8,297
r
fitting_functions_multiple_genes.R
#' @include class_RegspliceData.R class_RegspliceResults.R NULL #' Fit models. #' #' Model fitting functions for \code{regsplice} package. #' #' There are three model fitting functions: #' #' \code{fit_reg_multiple} fits regularized (lasso) models containing an optimal subset #' of exon:condition interaction terms for each gene. The model fitting procedure #' penalizes the interaction terms only, so that the main effect terms for exons and #' samples are always included. This ensures that the null model is nested, allowing #' likelihood ratio tests to be calculated. #' #' \code{fit_null_multiple} fits the null models, which do not contain any interaction #' terms. #' #' \code{fit_full_multiple} fits full models, which contain all exon:condition #' interaction terms for each gene. #' #' See \code{\link{create_design_matrix}} for more details about the terms in each model. #' #' The fitting functions fit models for all genes in the data set. The functions are #' parallelized using \code{BiocParallel::bplapply} for faster runtime. For #' \code{fit_reg_multiple}, the default number of processor cores is 8, or the maximum #' available if less than 8. For \code{fit_null_multiple} and \code{fit_full_multiple}, #' the default is one core, since these functions are already extremely fast for most #' data sets. #' #' A random seed can be provided with the \code{seed} argument, to generate reproducible #' results. #' #' If the \code{data} object does not contain a weights matrix, all exon bins are #' weighted equally. #' #' Previous step: Initialize \code{\linkS4class{RegspliceResults}} object with #' \code{\link{initialize_results}}. #' Next step: Calculate likelihood ratio tests with \code{\link{LR_tests}}. #' #' #' @param results \code{\linkS4class{RegspliceResults}} object, which will be used to #' store results. Initialized using the constructor function \code{RegspliceResults()}. #' See \code{\linkS4class{RegspliceResults}} for details. #' @param data \code{\linkS4class{RegspliceData}} object. In the case of RNA-seq read #' count data, this has been pre-transformed with \code{\link{run_voom}}. Contains #' \code{counts} and \code{weights} data matrices, and vector of experimental #' conditions for each biological sample stored in \code{colData}. See #' \code{\linkS4class{RegspliceData}} for details. #' @param alpha Elastic net parameter \code{alpha} for \code{glmnet} model fitting #' functions. Must be between 0 (ridge regression) and 1 (lasso). Default is 1 (lasso). #' See \code{glmnet} documentation for more details. #' @param lambda_choice Parameter to select which optimal \code{lambda} value to choose #' from the \code{cv.glmnet} cross validation fit. Choices are "lambda.min" (model with #' minimum cross-validated error) and "lambda.1se" (most regularized model with #' cross-validated error within one standard error of minimum). Default is #' "lambda.min". See \code{glmnet} documentation for more details. #' @param n_cores Number of cores for parallel evaluation. For \code{fit_reg_multiple}, #' the default is 8, or the maximum available if less than 8. For #' \code{fit_full_multiple} and \code{fit_null_multiple}, the default is 1, since these #' functions are already very fast. #' @param seed Random seed (integer). Default is NULL. Provide an integer value to set #' the random seed for reproducible results. #' @param progress_bar Whether to display progress bar (\code{fit_reg_multiple} only). #' Default is TRUE. #' @param ... Other arguments to pass to \code{cv.glmnet}, \code{glmnet}, or \code{glm}. #' #' #' @return Returns a \code{\linkS4class{RegspliceResults}} object containing the fitted #' model objects, deviance of fitted models, and degrees of freedom of fitted models. #' See \code{\linkS4class{RegspliceResults}} for details. #' #' #' @seealso \code{\link{create_design_matrix}} \code{\link{RegspliceResults}} #' \code{\link{initialize_results}} \code{\link{LR_tests}} #' #' @seealso \code{\link[glmnet]{glmnet}} \code{\link[glmnet]{cv.glmnet}} #' \code{\link[stats]{glm}} #' #' @importFrom BiocParallel multicoreWorkers MulticoreParam bplapply #' #' @export #' #' @examples #' file_counts <- system.file("extdata/vignette_counts.txt", package = "regsplice") #' data <- read.table(file_counts, header = TRUE, sep = "\t", stringsAsFactors = FALSE) #' head(data) #' #' counts <- data[, 2:7] #' tbl_exons <- table(sapply(strsplit(data$exon, ":"), function(s) s[[1]])) #' gene_IDs <- names(tbl_exons) #' n_exons <- unname(tbl_exons) #' condition <- rep(c("untreated", "treated"), each = 3) #' #' Y <- RegspliceData(counts, gene_IDs, n_exons, condition) #' #' Y <- filter_zeros(Y) #' Y <- filter_low_counts(Y) #' Y <- run_normalization(Y) #' Y <- run_voom(Y) #' #' res <- initialize_results(Y) #' #' res <- fit_reg_multiple(res, Y, n_cores = 1) #' res <- fit_null_multiple(res, Y) #' res <- fit_full_multiple(res, Y) #' fit_reg_multiple <- function(results, data, alpha = 1, lambda_choice = c("lambda.min", "lambda.1se"), n_cores = NULL, seed = NULL, progress_bar = TRUE, ...) { lambda_choice <- match.arg(lambda_choice) gene_IDs <- names(table(rowData(data)$gene_IDs)) n_genes <- length(gene_IDs) FUN <- function(i) { gene_ID_i <- gene_IDs[i] if (gene_ID_i != results@gene_IDs[i]) stop("gene IDs do not match") data_i <- suppressMessages(data[gene_ID_i, ]) fit_reg_single(data = data_i, alpha = alpha, lambda_choice = lambda_choice, ...) } message("Fitting regularized (lasso) models...") if (is.null(n_cores)) n_cores <- min(BiocParallel::multicoreWorkers(), 8) BPPARAM <- BiocParallel::MulticoreParam(workers = n_cores, RNGseed = seed, progressbar = progress_bar) # setting seed with BiocParallel when using glmnet doesn't work if using only one core; # use set.seed() instead if (n_cores == 1 & !is.null(seed)) set.seed(seed) res <- BiocParallel::bplapply(seq_len(n_genes), FUN = FUN, BPPARAM = BPPARAM) # collapse lists fit_collapse <- lapply(res, "[[", "fit") dev_collapse <- sapply(res, "[[", "dev") df_collapse <- sapply(res, "[[", "df") results@fit_reg_models <- fit_collapse results@fit_reg_dev <- dev_collapse results@fit_reg_df <- df_collapse results } #' @rdname fit_reg_multiple #' @export #' fit_null_multiple <- function(results, data, n_cores = 1, seed = NULL, ...) { gene_IDs <- names(table(rowData(data)$gene_IDs)) n_genes <- length(gene_IDs) FUN <- function(i) { gene_ID_i <- gene_IDs[i] if (gene_ID_i != results@gene_IDs[i]) stop("gene IDs do not match") data_i <- suppressMessages(data[gene_ID_i, ]) fit_null_single(data_i, ...) } message("Fitting null models...") BPPARAM <- BiocParallel::MulticoreParam(workers = n_cores, RNGseed = seed) res <- BiocParallel::bplapply(seq_len(n_genes), FUN = FUN, BPPARAM = BPPARAM) # collapse lists fit_collapse <- lapply(res, "[[", "fit") dev_collapse <- sapply(res, "[[", "dev") df_collapse <- sapply(res, "[[", "df") results@fit_null_models <- fit_collapse results@fit_null_dev <- dev_collapse results@fit_null_df <- df_collapse results } #' @rdname fit_reg_multiple #' @export #' fit_full_multiple <- function(results, data, n_cores = 1, seed = NULL, ...) { gene_IDs <- names(table(rowData(data)$gene_IDs)) n_genes <- length(gene_IDs) FUN <- function(i) { gene_ID_i <- gene_IDs[i] if (gene_ID_i != results@gene_IDs[i]) stop("gene IDs do not match") data_i <- suppressMessages(data[gene_ID_i, ]) fit_full_single(data_i, ...) } message("Fitting full models...") BPPARAM <- BiocParallel::MulticoreParam(workers = n_cores, RNGseed = seed) res <- BiocParallel::bplapply(seq_len(n_genes), FUN = FUN, BPPARAM = BPPARAM) # collapse lists fit_collapse <- lapply(res, "[[", "fit") dev_collapse <- sapply(res, "[[", "dev") df_collapse <- sapply(res, "[[", "df") results@fit_full_models <- fit_collapse results@fit_full_dev <- dev_collapse results@fit_full_df <- df_collapse results }
8d21fcd38663d3c05b33cbee6d2d492ff9982f60
6cd38c6b6d5490fa44ef25ac8604b16c12c0d8e7
/radar_chart_PBDE_scripts.r
59c4804b2f5f6f215edfea28c4aec7910130c50e
[]
no_license
maopeng2018/scripts
26d71e19cf6c83207a98c2795a3269c5ea796737
87dce6e0963a9a6aa8c3ba243c4dd9243716adbd
refs/heads/master
2020-03-31T09:17:28.763605
2018-10-08T14:25:37
2018-10-08T14:25:37
152,089,734
0
0
null
null
null
null
UTF-8
R
false
false
5,312
r
radar_chart_PBDE_scripts.r
setwd("D:\\Projects\\creA_project\\analysis\\visualize\\CAZyme") FPKMstatis <- read.table("RPKMs_significant_genes_fold2p001.txt",sep="\t",head=T, na.strings="NA", dec=".", strip.white=TRUE) cazyme=read.table("A_niger_CAZymes_substrate_Paul.txt",head=T,sep="\t", na.strings="NA", dec=".", strip.white=TRUE) combine <- merge(FPKMstatis,cazyme,by.x="JGI_id",by.y="Gene_ID") ; data=combine Preculture=apply(data[,3:4],1,mean); mPreculture=apply(data[,5:6],1,mean); W_4h=apply(data[,7:8],1,mean); mW_4h=apply(data[,9:11],1,mean); W_24h=apply(data[,12:14],1,mean); mW_24h=apply(data[,15:17],1,mean); W_48h=apply(data[,18:20],1,mean); mW_48h=apply(data[,21:22],1,mean); SB_4h=apply(data[,23:25],1,mean); mSB_4h=apply(data[,26:28],1,mean); SB_24h=apply(data[,29:31],1,mean); mSB_24h=apply(data[,32:34],1,mean); SB_48h=apply(data[,35:36],1,mean); mSB_48h=apply(data[,37:39],1,mean); ## m=cbind(Preculture,mPreculture,C_4h,C_24h,C_48h,mC_4h,mC_24h,mC_48h,S_4h,S_24h,S_48h,mS_4h,mS_24h,mS_48h,SB_4h,SB_24h,SB_48h,mSB_4h,mSB_24h,mSB_48h,W_4h,W_24h,W_48h,mW_4h,mW_24h,mW_48h) m=cbind(Preculture,mPreculture,W_4h,mW_4h,W_24h,mW_24h,W_48h,mW_48h,SB_4h,mSB_4h,SB_24h,mSB_24h,SB_48h,mSB_48h) rownames(m)=combine$JGI_id m_max <- (apply(m, 1, max)) m_max=unname(unlist(m_max)) ## m2=m[m_max>20,] m2=m[m_max>30,] FPKMmax30IDs=rownames(m2) row.names(combine)=combine$JGI_id combine2=combine[FPKMmax30IDs,] select=(grep("\\bno\\b",combine2$Substrate_Paul)) PBDE=combine2[-select,] write.table(PBDE,file="RPKMs_significant_PBDE_maxFPKM30.txt",sep="\t", col.names = NA) subPBDE=PBDE[,c(78:84,92)] print(colnames(subPBDE)) mPreculture=sort(table(subPBDE[grep("\\bGreater_in_m",subPBDE[,1]),8])) mW4h=sort(table(subPBDE[grep("\\bGreater_in_m",subPBDE[,2]),8])) mW24h=sort(table(subPBDE[grep("\\bGreater_in_m",subPBDE[,3]),8])) mW48h=sort(table(subPBDE[grep("\\bGreater_in_m",subPBDE[,4]),8])) mSB4h=sort(table(subPBDE[grep("\\bGreater_in_m",subPBDE[,5]),8])) mSB24h=sort(table(subPBDE[grep("\\bGreater_in_m",subPBDE[,6]),8])) mSB48h=sort(table(subPBDE[grep("\\bGreater_in_m",subPBDE[,7]),8])) table1=cbind(terms=names(mPreculture), count1=unname(unlist(mPreculture))) table2=cbind(terms=names(mW4h), count1=unname(unlist(mW4h))) table3=cbind(terms=names(mW24h), count1=unname(unlist(mW24h))) table4=cbind(terms=names(mW48h), count1=unname(unlist(mW48h))) table5=cbind(terms=names(mSB4h), count1=unname(unlist(mSB4h))) table6=cbind(terms=names(mSB24h), count1=unname(unlist(mSB24h))) table7=cbind(terms=names(mSB48h), count1=unname(unlist(mSB48h))) compare=merge(table2,table3,by="terms") compare=merge(compare,table4,by="terms") compare2=compare[,2:4] compare2=as.matrix(compare2) compare2=apply(compare2, 2, as.numeric) row.names(compare2)=compare[,1] compareW=compare2[rowSums(compare2)>0,] compare=merge(table5,table6,by="terms") compare=merge(compare,table7,by="terms") compare2=compare[,2:4] compare2=as.matrix(compare2) compare2=apply(compare2, 2, as.numeric) row.names(compare2)=compare[,1] compareSB=compare2[rowSums(compare2)>0,] #### Radar plot example library(fmsb) compareP=t(compareW) data=as.data.frame(compareP) fb=data.frame(max=rep(max(data),ncol(data)),min=rep(min(data),ncol(data))) fb=t(fb) colnames(fb)=colnames(data) data=rbind(fb,data) colors_border=c( rgb(0.2,0.5,0.5,0.9), rgb(0.8,0.2,0.5,0.9) , rgb(0.7,0.5,0.1,0.9) ) colors_in=c( rgb(0.2,0.5,0.5,0.4), rgb(0.8,0.2,0.5,0.4) , rgb(0.7,0.5,0.1,0.4) ) radarchart( data , axistype=1 , #custom polygon pcol=colors_border , pfcol=colors_in , plwd=4 , plty=1, #custom the grid cglcol="grey", cglty=1, axislabcol="grey", caxislabels=seq(0,20,5), cglwd=0.8, #custom labels vlcex=0.8 ) legend(x=0.7, y=1, legend = rownames(data[-c(1,2),]), bty = "n", pch=20 , col=colors_in , text.col = "grey", cex=1.2, pt.cex=3) compare=merge(table1,table2,by="terms") compare=merge(compare,table3,by="terms") compare=merge(compare,table4,by="terms") compare=merge(compare,table5,by="terms") compare=merge(compare,table6,by="terms") compare=merge(compare,table7,by="terms") compare2=compare[,2:8] compare2=as.matrix(compare2) compare2=apply(compare2, 2, as.numeric) row.names(compare2)=compare[,1] compareWSB=compare2[rowSums(compare2)>0,] #### Radar plot example library(fmsb) compareP=t(compareWSB) data=as.data.frame(compareP) fb=data.frame(max=rep(max(data),ncol(data)),min=rep(min(data),ncol(data))) fb=t(fb) colnames(fb)=colnames(data) data=rbind(fb,data) colors_border=c( rgb(0.5,0.5,0.5,0.7),rgb(1,0.54,0,0.7),rgb(1,0.54,0,0.7), rgb(1,0.54,0,0.7), rgb(0,0.4,0,0.7),rgb(0,0.4,0,0.7),rgb(0,0.4,0,0.7) ) ## colors_in=c( rgb(0.5,0.5,0.5,0.4),rgb(1,0.54,0,0.4), rgb(1,0.54,0,0.4), rgb(0,0.4,0,0.4),rgb(0,0.4,0,0.4),rgb(0,0.4,0,0.4) ) colors_in=c( rgb(0.5,0.5,0.5,0),rgb(1,0.54,0,0),rgb(1,0.54,0,0), rgb(1,0.54,0,0), rgb(0,0.4,0,0),rgb(0,0.4,0,0),rgb(0,0.4,0,0) ) radarchart( data , axistype=1 , #custom polygon pcol=colors_border , pfcol=colors_in , plwd=2 , plty=c(1,1:3,1:3), #custom the grid cglcol="grey", cglty=1, axislabcol="grey", caxislabels=seq(0,20,5), cglwd=0.8, #custom labels vlcex=0.8 ) legend(x=1, y=1, legend = rownames(data[-c(1,2),]), col=colors_border, text.col = "black", cex=1.2, lwd=2, lty=c(1,1:3,1:3))
17a0bf9454673ce8f64964095d0434ee7b628369
2363bb535bd8d73fbeaed0195dfe9abf3c0e7b77
/script_observation.R
444c3c8a38cf25138262d74112497e8323f7f01a
[ "Apache-2.0" ]
permissive
OHDSI/ETL---Korean-NEDIS
7bd5713f7ee35a445d813c3ef0ffa6511fee4efd
b3bbfb04e17ce9e3eed8f3dc1b11869944362336
refs/heads/master
2020-03-20T10:19:50.090905
2019-06-10T08:12:29
2019-06-10T08:12:29
137,366,496
1
0
Apache-2.0
2019-02-16T04:36:25
2018-06-14T14:07:20
null
UTF-8
R
false
false
1,967
r
script_observation.R
# Observation severity observation_ETL <- function() { tz = Sys.timezone() KTAS_observation <- EMIHPTMI %>% select(EMIHPTMI_seq, ptmiidno, ptmiktdt, ptmikttm, ptmikts1, ptmikidn) %>% filter(!is.na(as.numeric(ptmikts1))) observation <- data.frame() KTAS_observation <- data.frame (# observation_id = , person_id = KTAS_observation$ptmiidno, observation_concept_id = 37019008, # Emergency Severity Index: 37019008 observation_date = KTAS_observation$ptmiktdt, # ptmiktdt observation_datetime = as.POSIXct(paste(KTAS_observation$ptmiktdt, KTAS_observation$ptmikttm), format ='%Y-%m-%d %H%M', tz = tz), # ptmiktdt + ptmikttm observation_type_concept_id = 38000280, #Observation recorded from EHR: 38000280 value_as_number = KTAS_observation$ptmikts1, # ptmikts1 value_as_string = NA , value_as_concept_id = 0, qualifier_concept_id = NA, unit_concept_id = 0, provider_id = KTAS_observation$ptmikidn, ## ptmikidn visit_occurrence_id = KTAS_observation$EMIHPTMI_seq, visit_detail_id = 0, ## 0 observation_source_value = KTAS_observation$ptmikts1, # ptmikts1 observation_source_concept_id = 0, # 0 unit_soure_value = 0, #0 severity has not unit qualifier_source_value = NA ## ) observation<-rbind(observation,KTAS_observation) observation_id <- seq(nrow(observation)) observation <- data.frame(observation_id, observation) return(observation) }
b2a2a7671af522cafee9b6ea94388e4059364e60
ff342d1c9481e23f1790c3cea299cd20248faba5
/man/tune_accessor.Rd
8f3bf7239b78bc1cbf0d1c4d38b58bf27762fe55
[ "MIT" ]
permissive
rorynolan/tune
95ff4347d3a392ffdfacaa486628877a2305cb3b
fc40e3033ff926be09ffbbec2192fbd0c69af650
refs/heads/master
2023-01-18T18:35:32.964619
2020-11-17T15:36:46
2020-11-17T15:36:46
262,448,449
0
0
NOASSERTION
2020-05-08T23:24:17
2020-05-08T23:24:16
null
UTF-8
R
false
true
1,152
rd
tune_accessor.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utils.R \name{.get_tune_parameters} \alias{.get_tune_parameters} \alias{.get_tune_parameter_names} \alias{.get_tune_metrics} \alias{.get_tune_metric_names} \alias{.get_tune_outcome_names} \alias{.get_tune_workflow} \title{Various accessor functions} \usage{ .get_tune_parameters(x) .get_tune_parameter_names(x) .get_tune_metrics(x) .get_tune_metric_names(x) .get_tune_outcome_names(x) .get_tune_workflow(x) } \arguments{ \item{x}{An object of class \code{tune_result}.} } \value{ \itemize{ \item \code{.get_tune_parameters()} returns a \code{dials} \code{parameter} object or a tibble. \item \code{.get_tune_parameter_names()}, \code{.get_tune_metric_names()}, and \code{.get_tune_outcome_names()} return a character string. \item \code{.get_tune_metrics()} returns a metric set or NULL. \item \code{.get_tune_workflow()} returns the workflow used to fit the resamples (if \code{save_workflow} was set to \code{TRUE} during fitting) or NULL. } } \description{ These functions return different attributes from objects with class \code{tune_result}. } \keyword{internal}
7f3200c0ecf5707651ea06d0c16c77c660038caa
e7cc8f75121b87e9d1617d507a2a38caf25015a0
/package/clinDataReview/tests/testthat/test_transformData.R
eabef271a4f0b82af844ea559d8b133d38b351a2
[]
no_license
ClinicoPath/clinDataReview
1fa16e25a47341c117b4f98fd73f2c94788cf9d4
dda13974128136992e2953866ac7f370a021b85f
refs/heads/master
2023-06-23T06:05:20.955803
2021-07-14T07:44:19
2021-07-14T07:44:19
null
0
0
null
null
null
null
UTF-8
R
false
false
3,063
r
test_transformData.R
context("Test 'transformData' function") test_that("Test errors of 'transformData' function", { expect_error(transformData(data)) transformation <- list(type = NULL) expect_error(transformData(data, transformations = transformation)) transformation <- list(type = "transform") expect_error(transformData(data, transformations = transformation)) transformation <- list(type = "pivot_wider") expect_error(transformData(data, transformations = transformation)) transformation <- list(type = "pivot_wider", varsID = "TRTP") expect_error(transformData(data, transformations = transformation)) }) test_that("Test 'transformData' function", { testData <- data.frame( USUBJID = rep(1 : 10, each = 2), PARAM = rep(c("param1", "param2"), length.out = 20), VALUE = seq(1, 10, length.out = 20) ) transformation <- list( type = "pivot_wider", varsID = "USUBJID", varPivot = "PARAM", varsValue = "VALUE" ) dataTransform <- transformData(data = testData, transformations = transformation) expect_is(dataTransform, "data.frame") expect_identical(dataTransform$USUBJID, 1 : 10) expect_is(attributes(dataTransform), "list") attribDataTransf <- attr(dataTransform, "labelVars") expect_is(attribDataTransf, "character") expect_length(attribDataTransf, 2) # Test message expect_message( transformData( data = testData, transformations = transformation, verbose = TRUE ) ) # Test labels labels <- setNames( c("Subject ID", "Parameter", "Value"), nm = colnames(testData) ) dataTransform <- transformData( data = testData, transformations = transformation, labelVars = labels ) attribDataTransf <- attr(dataTransform, "labelVars") expect_is(attribDataTransf, "character") expect_length(attribDataTransf, 5) }) test_that("Test nested list for 'transformData'", { testData <- data.frame( USUBJID = rep(1 : 10, each = 4), PARAMGR1 = rep(c("param1", "param2"), length.out = 20), PARAMGR2 = rep(c("param3", "param4"), length.out = 20), VALUEGR1 = seq(1, 10, length.out = 20) ) transformation <- list( list( type = "pivot_wider", varsID = "USUBJID", varPivot = "PARAMGR1", varsValue = "VALUEGR1" ), list( type = "pivot_wider", varsID = "USUBJID", varPivot = "PARAMGR2", varsValue = "VALUEGR1" ) ) expect_warning(transformData(data = testData, transformations = transformation)) dataTransform <- transformData(data = testData, transformations = transformation) expect_is(dataTransform, "data.frame") })
447c85bc28a381f33a647e053cfeb709d003c5bb
717cff43cbac7aa6b6b49829ebd07a958dae48a9
/curso_ML/arvore_decisao_credit_data.R
7e564a47bfd2d7ec872ccbe4dd2e6bdcb8ba14a7
[]
no_license
henrikots/dados-credito-curso
0ad6dae6cdcadb50340e150ee3c048be431ffe8f
843eeff3ad785db546732c185316940f2b294db9
refs/heads/master
2020-06-03T17:24:08.995375
2019-07-03T00:16:52
2019-07-03T00:16:52
191,666,309
0
0
null
null
null
null
UTF-8
R
false
false
1,331
r
arvore_decisao_credit_data.R
library(rpart) library(caTools) base <- read.csv("credit-data.csv") base$clientid <- NULL mean(base$age[base$age > 0], na.rm = TRUE) #ajustar base inconsistente com média do valor base$age <- ifelse(base$age < 0, 40.92, base$age) #ajusta valores faltantes com a média base$age <- ifelse(is.na(base$age), mean(base$age, na.rm = TRUE) ,base$age) #normalização # x = (x - min(x)) / (max(x) - min(x)) #padronização - mais recomendado por tratar os outliers # x = (x - media(x)) / (desvio_padrao(x)) base[, 1:3] <- scale(base[, 1:3]) #transforma o classificador default em factor base$default = factor(base$default, levels = c(0, 1), labels = c(0, 1)) divisao <- sample.split(base$default, SplitRatio = 0.75) #dividir a base em treinamento e teste base_treinamento <- subset(base, divisao == TRUE) base_teste <- subset(base, divisao == FALSE) classificador_ad_credit_data <- rpart(formula = default ~ ., base_treinamento) previsoes_ad_credit_data <- predict(classificador_ad_credit_data, newdata = base_teste[-4]) previsoes_ad_credit_data <- predict(classificador_ad_credit_data, newdata = base_teste[-4], type = "class") #cria uma matriz de confusao para fazer um comparativo matriz_confusao = table(base_teste[, 4], previsoes_ad_credit_data) #93.6% de acerto library(caret) confusionMatrix(matriz_confusao)
49404f425c358c86f5d693aaf82e19999ded4a78
755def7e1f9ec6c9b2f3082bc9b81e924370ca37
/getting_started.R
47aa62b36182fb5d2763de98dc54ede6854af985
[]
no_license
agupta54/data-analysis-for-life-sciences
6fbf9f2618c460a5d7b062479864be218a3c062d
d7269ae4a91025189a88dc8b86d2aa87a7f0a81f
refs/heads/master
2020-03-25T21:58:44.763138
2018-08-27T13:08:28
2018-08-27T13:08:28
144,198,721
0
0
null
null
null
null
UTF-8
R
false
false
1,114
r
getting_started.R
dat <- read.csv("femaleMiceWeights.csv") ### Exercises ### "1." dat$Bodyweight "2." dat[12,2] "3." dat$Bodyweight[11] "4." length(dat$Bodyweight) "5." View(dat) "6." ?sample set.seed(1) dat$Bodyweight[sample(13:24,1)] ### ### ### dat <- read.csv("femaleMiceWeights.csv") head(dat) library(dplyr) chow <- filter(dat, Diet=="chow") head(chow) chowVals <- select(chow, Bodyweight) head(chowVals) chowVals <- filter(dat, Diet=="chow") %>% select(Bodyweight) class(dat) class(chowVals) chowVals <- filter(dat, Diet=="chow") %>% select(Bodyweight) %>% unlist class(chowVals) ### Exercises ### "1." dat <- read.csv("msleep.csv") "2." nrow(filter(dat, order=="Primates")) "3." class(filter(dat, order=="Primates")) "4." class(filter(dat, order=="Primates") %>% select(sleep_total)) "5." mean(filter(dat, order=="Primates") %>% select(sleep_total) %>% unlist) "6." ?summarise dat %>% group_by(order) %>% summarise(mean = mean(sleep_total), n=n()) ### ### ### ### x <- 1:5 n <- 1000 x <- 1:n S <- sum(x) onethird <- function(n) sum( 3/10^c(1:n)) 1/3 - onethird(4) 1/3 - onethird(10) 1/3 - onethird(16)
914ab147f51dfecb0e12d96402b8a2a52e261716
7fc3d62d0dff3542a33cae700d0cc7e790302026
/R/GetSeasonDate.R
f8031032106ca14ecb4c850656eb0b21680f3535
[]
no_license
d-farnham/ORB_Paper
c21e1fb2010f280af918917be9854aa09a51d117
bd7871fb2c43afce4d939310a1bcbffd19beef1d
refs/heads/master
2021-03-27T14:58:57.576475
2018-03-19T04:20:45
2018-03-19T04:20:45
94,268,439
0
0
null
null
null
null
UTF-8
R
false
false
440
r
GetSeasonDate.R
GetSeasonDate <- function(date, method = 'DJF'){ require(lubridate) month <- lubridate::month(date) GetSeasonMonth(month, method) } GetSeasonMonth <- function(month, method = 'DJF'){ require(lubridate) if(method == 'DJF'){ seasons <- c(rep('DJF', 2), rep('MAM', 3), rep('JJA', 3), rep('SON', 3), 'DJF') } else if (method == 'JFM') { seasons <- c(rep('JFM', 3), rep('AMJ', 3), rep('JAS', 3), rep('OND', 3)) } seasons[month] }
f0e974e1394fd2f6a7c9bf92a7f5887e58077a1c
473a7fa8482db09d6d0b49c2f3c4730fdb64bbf7
/SecondPlot.R
e008a60ac5d8f3390802237912b8feb7fa8c8945
[]
no_license
shabnamh/DataIncubatorChallenge
8d3c8425fe3f8a7056cdb1be5156014754e4ba05
c49aec0e5d6d72ab856dc977305d912b1ced2ae4
refs/heads/master
2020-03-14T13:15:17.576065
2018-04-30T19:34:00
2018-04-30T19:34:00
131,629,143
0
0
null
null
null
null
UTF-8
R
false
false
1,756
r
SecondPlot.R
#Question 3 - Second Plot #Second plot is going to show the percentage delay per month of the busiest airports in USA with over 100,000 #domestic flights in 2017 with the most arrival delay. library(dplyr) library(ggplot2) #First, read all of the files and rbind them at the end to make one data frame with the whole data. Fi1 <- read.csv("Jan.csv") Fi2 <- read.csv("Feb.csv") Fi3 <- read.csv("Mar.csv") Fi4 <- read.csv("Apr.csv") Fi5 <- read.csv("May.csv") Fi6 <- read.csv("Jun.csv") Fi7 <- read.csv("Jul.csv") Fi8 <- read.csv("Aug.csv") Fi9 <- read.csv("Sep.csv") Fi10 <- read.csv("Oct.csv") Fi11 <- read.csv("Nov.csv") Fi12 <- read.csv("Dec.csv") dat <- rbind(Fi1, Fi2, Fi3, Fi4, Fi5, Fi6, Fi7, Fi8, Fi9, Fi10, Fi11, Fi12) #to select the airports meeting the criteria FreqAt <- dat[ dat$DEST %in% names(which(table(dat$DEST)>100000)), ] #perc <- FreqAt %>% group_by(DEST, MONTH) %>% summarise(count = n()) perc1 <- FreqAt %>% group_by(DEST) %>% summarise(count = n()) FreqAt2 <- FreqAt[FreqAt$ARR_DELAY>0,] #freq <- FreqAt2 %>% group_by(DEST) %>% summarise(count = n()) FreqAt3 <- FreqAt2 %>% group_by(DEST, MONTH) %>% summarise(count = n()) #to get percentage final <- merge(FreqAt3, perc1, by = "DEST") final <- final %>% arrange(DEST,MONTH) final <- mutate(final, percentage = (count.x / count.y)*100) # The result shows 19 airports. I have shown the percentage delay per month of each airport over the whole year in a Bar chart. b <- ggplot(final, aes(x= DEST, y= percentage, fill= percentage, order = -percentage)) + geom_bar(stat = "identity" ) + ggtitle("Arrival Delays of Domestic Flights at 19 Busiest Airports in 2017 ") +xlab("Airport")+ylab("Percentage") + ylim (c(0,100)) + scale_fill_gradient(low="#38A7F1", high="#0E2D48")
7a1195f0ba47ba2a51373758f0c6c5c1f0e5ff54
a7e65db3007641a3aa1565f8272463339ef5e84a
/Model_Prod_4_01.R
3b9abc7d01a299c0828faeac7b8f4004635087ba
[]
no_license
marcelomedre/B2WLabs
514281cd8f9853e632993128b32c32563d2878d6
a5e28f102433ae7538a1f8e06b4aa8174fa7b98d
refs/heads/master
2020-05-21T06:09:28.893973
2017-03-16T17:53:04
2017-03-16T17:53:04
84,585,851
0
0
null
null
null
null
IBM852
R
false
false
5,674
r
Model_Prod_4_01.R
setwd("C:/Users/Marcelo/Desktop/Data Science/B2W Labs/B2WLabs/") rm(list = ls()) # B2W LABs | Pricing Challenge # Deliverables: # 1) Models for Demand Forecasting: The main objective is to create a model to predict the quantity sold for # each product given a prescribed price. Along with the statistical model, we need metrics, relationships and # descriptions of these data in order to understand the sales behavior. What does the data tell us? How are # the different data sources related? Is there a particular competitor that seems more important? # library(ggplot2) library(caTools) library(caret) P4_sales_by_weekday_month <- read.csv("P4_sales_by_weekday_month.csv", header = TRUE, stringsAsFactors = FALSE) P4_sales_by_weekday_month$PROD_ID <- NULL P4_sales_by_weekday_month$REVENUE <- NULL P4_sales <- P4_sales_by_weekday_month[,c(1,2,4,3)] # QTY_ORDER in the last column ggplot(P4_sales, aes(x = 1:51, y = QTY_ORDER))+ geom_point(data = P4_sales_by_weekday_month, aes(group = day, color = factor(month)), size = 3)+ geom_smooth()+ geom_line(data = P4_sales_by_weekday_month, aes(group = month, color = factor(month)))+ xlab("Dias da Semana ao longo dos meses")+ ylab("Quantidade vendida Produto 4") plot(P4_sales$Price, log(P4_sales$QTY_ORDER)) regP4 <- glm(log(QTY_ORDER) ~ ., family = gaussian, data = P4_sales) summary(regP4) mean(regP4$residuals) hist(regP4$residuals) fit <- predict(regP4, newdata = P4_sales, type = "response") plot(P4_sales$Price, P4_sales$QTY_ORDER) points(P4_sales$Price, exp(fit), col = "red") rdois_glm <- lm(log(P4_sales$QTY_ORDER) ~ fit) summary(rdois_glm)$r.squared #R^2 = 0.618 ggplot()+ geom_point(data = P4_sales, aes(x = Price, y = QTY_ORDER, color = "black"), size = 4)+ geom_point(aes(x = P4_sales$Price, y = exp(fit), color = "red"))+ xlab("Prešo")+ ylab("Quantidade P4")+ scale_colour_manual(name = " Quantidades P4", values =c('black'='black','red'='red'), labels = c("Observada","Prevista")) P4_sales_GLM <- P4_sales P4_sales_GLM$GLM <- exp(fit) ggplot()+ geom_point(data = P4_sales_GLM, aes(x = 1:51, y = QTY_ORDER, group = day, color = factor(month)), size = 3)+ geom_point(data = P4_sales_GLM, aes(x = 1:51, y = GLM), size = 2, color = "red")+ xlab("Dias da Semana ao longo dos meses")+ ylab("Quantidade vendida Produto 4") P4_sales$QTY_ORDER <- log(P4_sales$QTY_ORDER) names(P4_sales) feats <- names(P4_sales)[-(4)] f <- paste(feats, collapse = " + ") f <- as.formula(paste("QTY_ORDER ~", f)) f #******************************************************************************* # MLP Model library(nnet) # Split fulldata into train and test set.seed(1234) split = sample.split(P4_sales$Price, SplitRatio = 0.80) train_nn <- subset(P4_sales, split == TRUE) val_nn <- subset(P4_sales, split == FALSE) val_resp <- val_nn$QTY_ORDER # saving test results val_nn$QTY_ORDER <- NULL my.grid <- expand.grid(.decay = c(0.1, 0.05, 0.01), .size = c(3, 4, 5, 6, 7, 8, 9)) P4.fit <- train(f, data = train_nn, method = "nnet", maxit = 7000, preProcess = c("center", "scale"), tuneGrid = my.grid, trControl = trainControl (method = "repeatedcv", number = 10, repeats = 10, returnResamp = "final"), trace = F, linout = 1) model_p4 <- P4.fit$bestTune model_p4 predicted <- predict(P4.fit, newdata = val_nn) p4fit.rmse <- sqrt(mean((predicted - val_resp)^2)) p4fit.rmse results_P4.fit <- as.data.frame(cbind(val_nn, val_resp, predicted)) ggplot()+ geom_point(data = results_P4.fit, aes(x = Price, y = val_resp, color = "black"), size = 4)+ geom_point(data = results_P4.fit, aes(x = Price, y = predicted, color = "red"))+ xlab("Prešo")+ ylab("Quantidade P4")+ scale_colour_manual(name = " Quantidades P4", values =c('black'='black','red'='red'), labels = c("Observada","Prevista")) # r2 validation sample rdois_P4.fit <- lm(val_resp ~ predicted, data = results_P4.fit) summary(rdois_P4.fit)$r.squared mean(rdois_P4.fit$residuals) #R^2 = 0.772. # full dataset predicted_full <- predict(P4.fit, newdata = P4_sales) full_p4fit.rmse <- sqrt(mean((predicted_full - P4_sales$QTY_ORDER)^2)) full_p4fit.rmse full_results_P4.fit <- as.data.frame(cbind(P4_sales, predicted_full)) ggplot()+ geom_point(data = full_results_P4.fit, aes(x = Price, y = QTY_ORDER, color = "black"), size = 4)+ geom_point(data = full_results_P4.fit, aes(x = Price, y = predicted_full, color = "red"))+ xlab("Prešo")+ ylab("Quantidade P4")+ scale_colour_manual(name = " Quantidades P4", values =c('black'='black','red'='red'), labels = c("Observada","Prevista")) # r2 validation sample full_rdois_P4.fit <- lm(QTY_ORDER ~ predicted_full, data = full_results_P4.fit) summary(full_rdois_P4.fit)$r.squared mean(full_rdois_P4.fit$residuals) full_results_P4.fit$QTY_ORDER <- exp(full_results_P4.fit$QTY_ORDER) full_results_P4.fit$predicted_full <- exp(full_results_P4.fit$predicted_full) ggplot()+ geom_point(data = full_results_P4.fit, aes(x = 1:51, y = QTY_ORDER, group = day, color = factor(month)), size = 3)+ geom_point(data = full_results_P4.fit, aes(x = 1:51, y = predicted_full), size = 3, color = "red")+ geom_point(data = P4_sales_GLM, aes(x = 1:51, y = GLM), size = 3, color = "black")+ xlab("Dias da Semana ao longo dos meses")+ ylab("Quantidade vendida Produto 4")
86e9ab9f98e695abb898458bedff89edf14013d3
8bb792d0bd1d40e29da8383769a212d5bce054bb
/tests/testthat.R
86f18c5c51b4a1c3179d5ab388ea0229f9cda478
[ "MIT" ]
permissive
Covid19R/covid19Rdata
e49dc68d6cdcc1ef9a054682770dfecd2ab1f40d
96cf7bbee8a01ab8ceed1426af11b7e7b0fa8da2
refs/heads/master
2023-02-20T19:37:26.089220
2021-01-24T17:02:50
2021-01-24T17:02:50
251,089,377
2
1
NOASSERTION
2020-05-03T22:47:27
2020-03-29T17:21:20
R
UTF-8
R
false
false
68
r
testthat.R
library(testthat) library(covid19Rdata) test_check("covid19Rdata")
29ef62fe1a9fa6ef4444cffcd169b9084a634974
6b35d0783e6d5dd59e89216218e8d1feb25d281b
/Lecture8/Lect8c.R
3674dc6c6509279880543dee47cf57cc215e97a8
[]
no_license
commfish/uw-fish559
c11a56e6085e0263b541b96620e5f169a4420e03
3fccd3547f97a59d5241a234aa1bc2869f8cd5eb
refs/heads/master
2020-04-02T07:56:43.880613
2018-11-13T22:53:43
2018-11-13T22:53:43
154,221,956
1
1
null
null
null
null
UTF-8
R
false
false
8,177
r
Lect8c.R
Sign<-function(a,b) { if (b > 0) return(abs(a)) else return(-1*abs(a)) } # ===================================================================================================================================================================== Fit<-function(Xinit,Ndim,FUNK,doGraph) { # Tolerances, etc, Ftol = 0.00000001 # Set up a matrix of initial directions XI <- matrix(0,Ndim,Ndim) for (I in 1:Ndim) XI[I,I] = 1 ZZ <- Powell(Xinit,XI,Ndim,FUNK,Ftol) print(c("final",ZZ)) if (doGraph) { par(mfrow=c(2,2)) Nfunc <- length(ZZ$TraceY) X <- seq(1,Nfunc,1) Y <- ZZ$TraceY plot(X,Y,xlab="Function call",ylab="Function value",type='b',pch=16,lwd=2) X <- NULL; Y <-NULL Npnt <- length(ZZ$TraceX)/2 for (I in 1:Npnt) { X <- c(X,ZZ$TraceX[(I-1)*2+1]) Y <- c(Y,ZZ$TraceX[(I-1)*2+2]) } plot(X,Y,xlim=c(-2,3),ylim=c(-2,4),type='n',lwd=2) lines(c(0,0),c(1,1),lwd=10,pch=16) lines(X,Y,col=5,lty=1,lwd=2,type='b',pch=16) TT <- seq(0,2*pi,length=100) for (I in 1:3) { XX <- I*sin(TT) YY <- 1+I*cos(TT) lines(XX,YY,lty=1,lwd=2) } } } # ===================================================================================================================================================================== Powell<-function(X,XI,N,FUNK,Ftol) { # Declarations XIT <- rep(0,N) # Prleiminaries PT <- rep(0,N) Fret <- FUNK(X) PT <- X FVecs <- X FVals <- Fret for (Iter in 1:200) { FP <- Fret IBig <- 0 Del <- 0 # Do a line minimization in each direction for (I in 1:N) { XIT <- XI[,I] FPTT <- Fret ZZ<- LinMin(X,XIT,N,FUNK) FVals <- c(FVals,ZZ$FVals) FVecs <- c(FVecs,ZZ$X) Fret <- ZZ$Fret; X <- ZZ$X; XIT <- ZZ$XIT # Record the difference and store the largest different if (abs(FPTT-Fret) > Del) { Del = abs(FPTT-Fret); IBIG = I } } # Any progres, else quit if (2.0*abs(FP-Fret) <= Ftol*(abs(FP)+abs(Fret))) break # construnct the extrapolated point and the average direction moved PTT <- 2*X - PT XIT <- X - PT PT <- X FPTT <- FUNK(PTT) FVals <- c(FVals,FPTT) FVecs <- c(FVecs,PTT) if (FPTT < FP) { T = 2*(FP-2*Fret+FPTT)*(FP-Fret-Del)^2-Del*(FP-FPTT)^2 if (T < 0) { ZZ <- LinMin(X,XIT,N,FUNK) FVals <- c(FVals,ZZ$FVals) FVecs <- c(FVecs,ZZ$X) Fret <- ZZ$Fret; X <- ZZ$X; XIT <- ZZ$XIT XI[,IBIG] <- XIT } } } ZZ <- NULL ZZ$X <- X ZZ$Fret <- FUNK(X) ZZ$TraceY <- FVals ZZ$TraceX <- FVecs return(ZZ) } # ===================================================================================================================================================================== LinMin <- function(X,XI,N,FUNK) { TOL <- 1.0E-4 Mout <- MnBrak(0,1,F1DIM,FUNK,X,XI) Bout <- Brent(Mout$AX,Mout$BX,Mout$CX,F1DIM,FUNK,TOL,X,XI) Xmin <- Bout$X Fret <- Bout$FX XI <- Xmin * XI X <- X + XI ZZ <- NULL ZZ$Fret <- FUNK(X) ZZ$XIT <- XI ZZ$X <- X ZZ$FVals <- c(Mout$FVals,Bout$FVals) ZZ$FVecs <- Bout$FVecs return(ZZ) } # ===================================================================================================================================================================== F1DIM <- function(X,Pcom,XIcom,FUNK) { # Form the vector XT <- Pcom + X*XIcom ZZ <- FUNK(XT) return(ZZ) } # ===================================================================================================================================================================== Brent <- function(AX,BX,CX,F1DIM,FUNK,TOL,Pcom,XIcom) { # Defaults CGold <- 0.3819660; Zeps <- 1.0E-10 # Initial function value counter FVals <- NULL FVecs <- NULL # Initializations A <- min(AX,CX) B <- max(AX,CX) V <- BX; W <- V; X<- V; E <- 0 FX <- F1DIM(X,Pcom,XIcom,FUNK) FV <- FX; FW <- FX; FVecs <- c(FVecs,X) FVals <- c(FVals,FX) print(c("start",FV)) for (Iter in 1:100) { XM <- 0.5*(A+B) TOL1 <- TOL*abs(X)+Zeps TOL2 <- 2.0*TOL1 # Test for completeness if (abs(X-XM) <= (TOL2-0.5*(B-A))) break # Construct a trial parabolic fit Ok <- T if (abs(E) > TOL1) { R <- (X-W)*(FX-FW); Q = (X-V)*(FX-FW); P = (X-V)*Q-(X-W)*R Q <- 2.0*(Q-R) if (Q > 0) P = -1*P Q <- abs(Q) Etemp <- E; E <- D Ok <- T if (abs(P) < abs(0.5*Q*Etemp) && (P > Q*(A-X)) && P <= Q*(B-X)) { D = P/Q; U <- X +D if (U-A < TOL2 || B-U < TOL2) D <- Sign(TOL1,XM-X) Ok <- F } } # Gold section step if (Ok) { if (X >= XM) E <- A - X else E <- B - X D <- CGold*E } # Proceed either from gold step or parabolic fit if (abs(D) >= TOL1) U <- X+D else U <- X + Sign(TOL1,D) FU <- F1DIM(U,Pcom,XIcom,FUNK) FVecs <- c(FVecs,U) FVals <- c(FVals,FU) if (FU <= FX) { if (U >= X) A = X else B = X V = W; FV <- FW; W <= X; FW <- FX; X <- U; FX <- FU } else { if (U < X) A = U else B = U if (FU <= FW || W == X) { V <- W; FV <- FW; W <- U; FW <- FU } else { if (FU <= FV || V == X || V == W) { V = U; FV = FU } } } } # for (Iter) ZZ <- NULL ZZ$X <- X ZZ$FX <- FX ZZ$FVals <- FVals ZZ$FVecs <- FVecs return(ZZ) } # ===================================================================================================================================================================== MnBrak<-function(AX,BX,F1DIM,FUNK,Pcom,XIcom) { # various fixed parameters Gold <- 1.618034; Glimit <- 100; Tiny <- 1.0E-20 # switch A and B so that we can go DONWHILL in the direction from A to B; # compute a first guess for C FA <- F1DIM(AX,Pcom,XIcom,FUNK) FB <- F1DIM(BX,Pcom,XIcom,FUNK) FVals <- c(FA,FB) if (FB > FA) { Dum <- AX; AX <- BX; BX <- Dum; Dum <- FB; FB <- FA; FA <- Dum } CX <- BX + Gold*(BX-AX) FC <- F1DIM(CX,Pcom,XIcom,FUNK) FVals <- c(FVals,FC) while(FB >= FC) { print("continue") R <- (BX-AX)*(FB-FC) Q <- (BX-CX)*(FB-FA) U <- BX - ((BX-CX)*Q-(BX-AX)*R)/(2.0*Sign(max(abs(Q-R),Tiny),Q-R)) Ulim <- BX + Glimit*(CX-BX) # try various possibilities if ((BX-U)*(U-CX) > 0) { print("here1") FU <- F1DIM(U,Pcom,XIcom,FUNK) FVals <- c(FVals,FU) if (FU < FC) { AX <- BX; FA <- FB; BX <- U; FB <- FU; print("beaking1"); break } else if (FU > FB) { CX <- U; FC <- FU; print("beaking2"); break} print("no break") U <- CX + Gold*(CX-BX) FU <- F1DIM(U,Pcom,XIcom,FUNK) FVals <- c(FVals,FU) } else { # parabolic fit between C and its allowed limit if ((CX-U)*(U-Ulim) > 0) { FU <- F1DIM(U,Pcom,XIcom,FUNK) FVals <- c(FVals,FU) if (FU < FC) { BX <- CX; CX <- U; U <- CX + Gold*(CX-BX); FB <- FC; FC <- FU; FU <- F1DIM(U,Pcom,XIcom,FUNK); FVals <- c(FVals,FU) } } else { print("here3") if ((U-Ulim)*(Ulim-CX) >=0) { U <- Ulim; FU <- F1DIM(U,Pcom,XIcom,FUNK); FVecs <- c(FVecs,U); FVals <- c(FVals,FU) } else { U <- CX + Gold*(CX-BX); FU <- F1DIM(U,Pcom,XIcom,FUNK); FVals <- c(FVals,FU) } } } # if ((BX-U)*(U-CX) > 0) # Elimiate oldest point and continue print("elim") AX <- BX; BX <- CX; CX <- U; FA <- FB; FB <- FC; FC <- FU } # while print("done") ZZ<-NULL ZZ$AX <- AX ZZ$BX <- BX ZZ$CX <- CX ZZ$FA <- FA ZZ$FB <- FB ZZ$FC <- FC ZZ$FVals <- FVals return(ZZ) } # ===================================================================================================================================================================== g <- function(x) { return(1+x[1]*x[1]+(x[2]-1)*(x[2]-1)) } #g <- function(x) { return(1+x*x) } test <- function() { X<- c(2,2) Fit(X,2,g,T) } test()
421cab97b815e1c51005a50f2fa4f9883e11a301
ca96aa69a485886e69efa306a21d06d8769cc6d1
/man/get_prism_annual.Rd
4219ecc983ccb6525386b4c90a7fc9ecff7a5a95
[]
no_license
yangxhcaf/prism
46b14662b57d1651e41e85b4d9843bfb21c0480f
55895dcf2360a777e204c8d6ccb0dd1a196d3f5a
refs/heads/master
2020-06-04T04:15:40.348812
2018-12-10T23:03:03
2018-12-10T23:03:03
null
0
0
null
null
null
null
UTF-8
R
false
true
1,560
rd
get_prism_annual.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/get_prism_annual.R \name{get_prism_annual} \alias{get_prism_annual} \title{Download annual daily averages} \usage{ get_prism_annual(type, years = NULL, keepZip = TRUE) } \arguments{ \item{type}{The type of data to download, must be "ppt", "tmean", "tmin", "tmax", or "all", which downloads "ppt", "tmin", and "tmax". Note that tmean == mean(tmin, tmax).} \item{years}{a valid numeric year, or vector of years, to download data for. If no month is specified, year averages for that year will be downloaded.} \item{keepZip}{if \code{TRUE}, leave the downloaded zip files in your 'prism.path', if \code{FALSE}, they will be deleted.} } \description{ Download annual daily average data from the prism project at 4km grid cell resolution for precipitation, mean, min and max temperature } \details{ Data is available from 1891 until 2014, however you have to download all data for years prior to 1981. Therefore if you enter a vector of years that bounds 1981, you will automatically download all data for all years in the vector. If the "all" parameter is set to TRUE, it will override any months entered and download all data. Data will be downloaded for all months in all the years in the vectors supplied. You must make sure that you have set up a valid download directory. This must be set as \code{options(prism.path = "YOURPATH")}. } \examples{ \dontrun{ ### Get all the data for January from 1990 to 2000 get_prism_annual(type="tmean", year = 1990:2000, keepZip=FALSE) } }
8443ab1732d77564244e4756937773df4a4a2082
865001d77275f63f3a75ea1dde05fe2387de1b88
/R/corr.R
a7dab07b5112001f02b86b065709647924149a9c
[]
no_license
thiyangt/tsdataleaks
1141658da0594386df8b62a0b41ee17bfb1efd6c
ecd0c5893639d2e1ac00750d1b72b2ad784f5972
refs/heads/master
2023-04-04T00:05:37.738186
2021-04-13T00:52:48
2021-04-13T00:52:48
282,431,352
3
1
null
2021-03-16T10:13:37
2020-07-25T11:36:09
R
UTF-8
R
false
false
1,217
r
corr.R
#' Correlation calculation based on rolling window with overlapping observations. #' #' @param x time series #' @param y subsection of the time series to map #' @param cutoff benchmark value for corr, default 1 #' @param boost Logical value indicating whether to boost performance by using RCpp. For small datasets setting boost=TRUE would not be efficient. #' @importFrom slider slide_dbl #' @importFrom tibble tibble #' @return Pearson's correlation coefficient between \code{x} and \code{y} #' @export ts.match <- function(x, y, cutoff=1,boost=TRUE){ slide.size <- length(y) fn <- function(x){stats::cor(x, y)} if(boost){ match.index <- round(corw(x,y), 4) }else{ match.index <- round(slider::slide_dbl(x, fn, .before = slide.size - 1L, .complete = TRUE), 4) } index.cutoff.end <- which(match.index >= cutoff) index.cutoff.start <- index.cutoff.end - (slide.size-1L) # print(match.index) if(length(index.cutoff.end) == 0){ tibble::tibble(start = NA, end = NA) } else { tibble::tibble(start = index.cutoff.start, end = index.cutoff.end) } } #' @examples #' x <- rnorm(15) #' y <- x[6:10] #' x <- c(x, y) #' ts.match(x, y, 1) #' z <- rnorm(5) #' ts.match(x, z)
23cbe6e68bb5e55dfec800fcbf69a08a62e99451
4755ffa39c7be31111e91301c336b86142f19dbb
/cachematrix.R
1f3a1db8ade69499c44e4631c0b7c2c623bc2da6
[]
no_license
MarkVHoward/ProgrammingAssignment2-1
b406e011f92af6d48c7c5dbc06018c416ebf6a51
296f191cd9472889c9e9c97c4e04c601c809060d
refs/heads/master
2021-01-18T11:30:10.677799
2015-08-19T08:12:14
2015-08-19T08:12:14
40,948,120
0
0
null
2015-08-18T03:14:47
2015-08-18T03:14:47
null
UTF-8
R
false
false
1,229
r
cachematrix.R
## These two functions together calculate and display the inverse of a matrix ## The first function, makeCacheMatrix, creates a matrix object that can cache ## the inverse of matrix ## The second function first checks to see if the inverse of the matrix has been ## calculated, if it has then it displays the inverse from the cache otherwise it ## computes the inverse of the matrix built by the first function using the ## 'solve' function ## Written By: Mark Howard ## Date: 19 Aug 2015 ## This function creates the matrix object makeCacheMatrix <- function(x = matrix()) { m <- NULL set <- function(y) { x <<- y m <<- NULL } get <- function() x setsolve <- function(solve) m <<- solve getsolve <- function() m list(set = set, get = get, setsolve = setsolve, getsolve = getsolve) } ## This function returns the inverse of a matrix. If the matrix has already been ## calculated then it is retrieved from cache and displayed, saving re-computation ## time cacheSolve <- function(x, ...) { ## Return a matrix that is the inverse of 'x' m <- x$getsolve() if(!is.null(m)) { message("getting cached data") return(m) } data <- x$get() m <- solve(data, ...) x$setsolve(m) m }
bc148ed3ce42f21e2b02d0d290b9a2ab88c07c23
29585dff702209dd446c0ab52ceea046c58e384e
/softclassval/R/performance.R
8d39f4605862c5f575e5895c9c861e6c8787e368
[]
no_license
ingted/R-Examples
825440ce468ce608c4d73e2af4c0a0213b81c0fe
d0917dbaf698cb8bc0789db0c3ab07453016eab9
refs/heads/master
2020-04-14T12:29:22.336088
2016-07-21T14:01:14
2016-07-21T14:01:14
null
0
0
null
null
null
null
UTF-8
R
false
false
14,578
r
performance.R
##' Input checks and reference preparation for performance calculation ##' ##' Checks whether \code{r} and \code{p} are valid reference and predictions. If \code{p} is a ##' multiple of \code{r}, recycles \code{r} to the size and shape of \code{p}. If \code{r} has ##' additional length 1 dimensions (usually because dimensions were dropped from \code{p}), it is ##' shortend to the shape of \code{p}. ##' ##' In addition, any \code{NA}s in \code{p} are transferred to \code{r} so that these samples are ##' excluded from counting in \code{\link{nsamples}}. ##' ##' \code{checkrp} is automatically called by the performance functions, but doing so beforehand and ##' then setting \code{.checked = TRUE} can save time when several performance measures are to be ##' calculated on the same results. ##' @param r reference ##' @param p prediction ##' @return \code{r}, possibly recycled to length of \code{p} or with dimensions shortened to \code{p}. ##' @author Claudia Beleites ##' @export ##' @examples ##' ref <- softclassval:::ref ##' ref ##' ##' pred <- softclassval:::pred ##' pred ##' ##' ref <- checkrp (r = ref, p = pred) ##' sens (r = ref, p = pred, .checked = TRUE) checkrp <- function (r, p){ if (is.null (dr <- dim (r))) dr <- length (r) if (is.null (dp <- dim (p))) dp <- length (p) recycle <- prod (dp) > prod (dr) if (prod (dr) > prod (dp)) stop ("r must not be larger than p") dp <- dp [seq_along (dr)] if (dr [1] != dp [1]) stop ("numer of samples (nrow) of reference and prediction must be the same.") if (! is.na (dr [2]) && dr [2] != dp [2]) stop ("p and r do not have the same number of columns (classes).") ## check class names if possible if (!is.null (colnames (r)) & ! is.null (colnames (p))){ reorder <- match (colnames (r), colnames (p)) if (any (is.na (reorder))) warning ("colnames of r (", paste (colnames (r)), ") and p (", paste (colnames (p)), ") do not match.") else if (any (reorder != seq_len (dp [2]))) warning ("Columns of r seem not to be in the same order as colnames of p.\n", "To reorder them first, consider\n", deparse (substitute (p)), " <- slice (", deparse (substitute (p)), ", j = c (", paste (reorder, collapse = ", "), "))") } if (any (is.na (dp)) || any (dr != dp)) { # NA: p is shorter than r equaldims <- seq_len (min (which (is.na (dp) | dr != dp)) - 1) # first equal dims ## thereafter only length 1 dimensions are allowed if (any (dr [- equaldims] != 1)) stop ("Dimension mismatch between r and p.") ## if p is shorter than r: shorten r if (any (is.na (dp))){ a <- attributes (r) a$dim <- a$dim [equaldims] a$dimnames <- a$dimnames [equaldims] mostattributes (r) <- a } } ## recycle if necessary if (recycle) { a <- attributes (r) r <- rep (r, length.out = length (p)) mostattributes (r) <- attributes (p) dimnames (r) [seq_along (a$dimnames)] <- a$dimnames } ## make sure p == NA => r == NA is.na (r) <- is.na (p) r } .test (checkrp) <- function (){ checkEquals (checkrp (ref, pred ), ref ) checkEquals (checkrp (ref.array, pred.array ), ref.array) checkEquals (checkrp (ref , pred.array ), ref.array, msg = "recycling r") checkEquals (checkrp (ref.array [,,1, drop = FALSE], pred), ref , msg = "shortening r") checkException (checkrp (ref.array, pred ) , msg = "length (dim (r)) > length (dim (p))") checkException (checkrp (1 : 2, 1 ) , msg = "nrow (r) != nrow (p)") checkException (checkrp (ref, pred [, 1 : 2] ) , msg = "ncol (r) != ncol (p)") tmp <- ref.array dim (tmp) <- c (dim(ref.array) [1 : 2], 1, dim (ref.array) [3]) checkException (checkrp (tmp, pred.array ) , msg = "Dimension mismatch") ## check NAs are transferred correctly to reference tmp <- pred.array nas <- sample (length (pred.array), 10) tmp [nas] <- NA checkEquals (which (is.na (checkrp (ref, tmp ))), sort (nas)) ## warnings for colnames mismatches warnlevel <- options ()$warn options (warn = -1) checkEquals (checkrp (r = ref, p = ref [, 3 : 1] ), ref) options (warn = 2) checkException (checkrp (ref, pred )) checkException (checkrp (r = ref, p = ref [, 3 : 1] )) options (warn = warnlevel) } ##' Performance calculation for soft classification ##' ##' These performance measures can be used with prediction and reference being continuous class ##' memberships in [0, 1]. ##' ##' The rows of \code{r} and \code{p} are considered the samples, columns will usually hold the ##' classes, and further dimensions are preserved but ignored. ##' ##' \code{r} must have the same number of rows and columns as \code{p}, all other dimensions may be ##' filled by recycling. ##' ##' \code{spec}, \code{ppv}, and \code{npv} use the symmetry between the performance measures as ##' described in the article and call \code{sens}. ##' ##' @rdname performance ##' @param r vector, matrix, or array with reference. ##' @param p vector, matrix, or array with predictions ##' @param groups grouping variable for the averaging by \code{\link[base]{rowsum}}. If \code{NULL}, ##' all samples (rows) are averaged. ##' @param operator the \code{\link[softclassval]{operators}} to be used ##' @param drop should the results possibly be returned as vector instead of 1d array? (Note that ##' levels of \code{groups} are never dropped, you need to do that e.g. by ##' \code{\link[base]{factor}}.) ##' @param .checked for internal use: the inputs are guaranteed to be of same size and shape. If ##' \code{TRUE}, \code{confusion} omits input checking ##' @return numeric of size (ngroups x \code{dim (p) [-1]}) with the respective performance measure ##' @author Claudia Beleites ##' @seealso Operators: \code{\link{prd}} ##' ##' For the complete confusion matrix, \code{\link{confmat}} ##' @references see the literature in \code{citation ("softclassval")} ##' @export ##' @include softclassval.R ##' @examples ##' ##' ref <- softclassval:::ref ##' ref ##' ##' pred <- softclassval:::pred ##' pred ##' ##' ## Single elements or diagonal of confusion matrix ##' confusion (r = ref, p = pred) confusion <- function (r = stop ("missing reference"), p = stop ("missing prediction"), groups = NULL, operator = "prd", drop = FALSE, .checked = FALSE){ operator <- match.fun (operator) if (! .checked) r <- checkrp (r, p) res <- operator (r = r, p = p) res <- groupsum (res, group = groups, dim = 1, reorder = FALSE, na.rm = TRUE) drop1d (res, drop = drop) } ## testing by .test (sens) ##' Calculate the soft confusion matrix ##' ##' @rdname performance ##' @export ##' @examples ##' ##' ## complete confusion matrix ##' cm <- confmat (r = softclassval:::ref, p = pred) [1,,] ##' cm ##' ##' ## Sensitivity-Specificity matrix: ##' cm / rowSums (cm) ##' ##' ## Matrix with predictive values: ##' cm / rep (colSums (cm), each = nrow (cm)) confmat <- function (r = stop ("missing reference"), p = stop ("missing prediction"), ...){ rx <- slice (r, j = rep (seq_len (ncol (r)), ncol (p)), drop = FALSE) colnames (rx) <- NULL px <- slice (p, j = rep (seq_len (ncol (p)), each = ncol (r)), drop = FALSE) colnames (px) <- NULL cm <- confusion (r = rx, p = px, ...) d <- dim (cm) dim (cm) <- c (d [1], ncol (r), ncol (p), d [- (1 : 2)]) dn <- dimnames (p) if (is.null (dn)) dn <- rep (list (NULL), ndim (p)) dn <- c (list (rownames (cm)), list (r = colnames (r)), dn [- 1L]) names (dn) [3L] <- "p" dimnames (cm) <- dn cm } .test (confmat) <- function (){ cm <- confmat (r = ref, p = pred)[1,,] warn <- options(warn = -1)$warn on.exit (options (warn = warn)) for (r in colnames (ref)) for (p in colnames (pred)) checkEqualsNumeric (cm [r, p], confusion (r = ref [, r], p = pred [, p])) options (warn = warn) ## one sample only checkEquals (confmat (r = ref[1,,drop = FALSE], p = pred[1,,drop = FALSE])[1,,], structure(c(1, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(3L, 3L), .Dimnames = structure(list(r = c("A", "B", "C"), p = c("a", "b", "c")), .Names = c("r", "p"))) ) } ##' @param eps limit below which denominator is considered 0 ##' @param op.dev does the operator measure deviation? ##' @param op.postproc if a post-processing function is needed after averaging, it can be given ##' here. See the example. ##' @rdname performance ##' @export ##' @examples ##' ##' ## sensitivities ##' sens (r = ref, p = pred) sens <- function (r = stop ("missing reference"), p = stop ("missing prediction"), groups = NULL, operator = "prd", op.dev = dev (match.fun (operator)), op.postproc = postproc (match.fun (operator)), eps = 1e-8, drop = FALSE, .checked = FALSE){ force (op.dev) force (op.postproc) if (! (isTRUE (op.dev) | isTRUE (! op.dev))) stop ("op.dev must either be TRUE or FALSE.") if (!is.null (op.postproc)) POSTFUN <- match.fun (op.postproc) if (!.checked) r <- checkrp (r, p) # do the input checks. res <- confusion (r = r, p = p, groups = groups, operator = operator, drop = FALSE, .checked = TRUE) nsmpl <- nsamples (r = r, groups = groups, operator = operator) if (any (nsmpl < res)) warning ("denominator < enumerator.") nsmpl [nsmpl < eps] <- NA res <- res / nsmpl if (! is.null (op.postproc)) # e.g. root for wRMSE res <- POSTFUN (res) if (op.dev) # for wMAE, wMSE, wRMSE, and the like res <- 1 - res res } .test (sens) <- function (){ ops <- c ("strong", "weak", "prd", "and", "wMAE", "wMSE", "wRMSE") ## shape & names for (o in ops){ ## vector tmp <- sens (r = v, p = v, operator = o) checkEquals (dim (tmp), 1L) checkTrue (is.null (dimnames (tmp))[[1]]) checkTrue (is.null (names (tmp))) ## matrix tmp <- sens (r = v [1 : 4], p = m, operator = o) checkEquals (dim (tmp), c(1L, ncol (m)), msg = "matrix") checkEquals (dimnames (tmp), list (NULL, colnames (m)), msg = "matrix") checkTrue (is.null (names (tmp)), msg = "matrix") ## array tmp <- sens (r = rep (v [1 : 5], 2), p = pred.array, operator = o) checkEquals (dim (tmp), c (1, dim (pred.array) [-1]), msg = "array") checkEquals (dimnames (tmp), c (list (NULL), dimnames (pred.array) [-1]), msg = "array") checkTrue (is.null (names (tmp)), msg = "array") } checkEqualsNumeric (sens (r = ref.array, p = pred.array), c (0.6, 0.32, NA, 0.85, 0.4, NA)) checkEqualsNumeric (sens (r = ref.array, p = pred.array, operator = "weak"), c (0.675, 0.5, NA, 1, 1, NA)) checkEqualsNumeric (sens (r = ref.array, p = pred.array, operator = "strong"), c (0.55, 0.2, NA, 0.75, 0, NA)) checkEqualsNumeric (sens (r = ref.array, p = pred.array, operator = "prd"), c (0.6, 0.32, NA, 0.85, 0.4, NA)) checkEqualsNumeric (sens (r = ref.array, p = pred.array, operator = "and"), c (0.2, NA, NA, 1, NA, NA)) checkEqualsNumeric (sens (r = ref.array, p = pred.array, operator = "wMAE"), c (0.66, 0.68, NA, 1, 1, NA)) checkEqualsNumeric (sens (r = ref.array, p = pred.array, operator = "wMSE"), c (0.788, 0.86, NA, 1, 1, NA)) checkEqualsNumeric (sens (r = ref.array, p = pred.array, operator = "wRMSE"), c (1 - sqrt (1.696/8), 1 - sqrt (.28/2), NA, 1, 1, NA)) checkEqualsNumeric (sens (r = ref.array, p = pred.array, groups = ref.groups), c (0.6, 0.6, NA, 0.32, NA, NA, 1, 0.6, NA, 0.4, NA, NA)) checkEqualsNumeric (sens (r = ref.array, p = pred.array, groups = ref.groups, operator = "weak"), c (0.6, 0.8, NA, 0.5, NA, NA, 1, 1, NA, 1, NA, NA)) checkEqualsNumeric (sens (r = ref.array, p = pred.array, groups = ref.groups, operator = "strong"), c (0.6, 1.4/3, NA, 0.2, NA, NA, 1, 1/3, NA, 0, NA, NA)) checkEqualsNumeric (sens (r = ref.array, p = pred.array, groups = ref.groups, operator = "prd"), c (0.6, 0.6, NA, 0.32, NA, NA, 1, 0.6, NA, 0.4, NA, NA)) checkEqualsNumeric (sens (r = ref.array, p = pred.array, groups = ref.groups, operator = "and"), c (0.2, NA, NA, NA, NA, NA, 1, NA, NA, NA, NA, NA)) checkEqualsNumeric (sens (r = ref.array, p = pred.array, groups = ref.groups, operator = "wMAE"), c (0.6, 0.76, NA, 0.68, NA, NA, 1, 1, NA, 1, NA, NA)) checkEqualsNumeric (sens (r = ref.array, p = pred.array, groups = ref.groups, operator = "wMSE"), c (0.728, 0.888, NA, 0.86, NA, NA, 1, 1, NA, 1, NA, NA)) checkEqualsNumeric (sens (r = ref.array, p = pred.array, groups = ref.groups, operator = "wRMSE"), 1 - sqrt (1 - c (0.728, 0.888, NA, 0.86, NA, NA, 1, 1, NA, 1, NA, NA))) } ##' @param ... handed to \code{sens} ##' @rdname performance ##' @export ##' @examples ##' ##' ## specificities ##' spec (r = ref, p = pred) spec <- function (r = stop ("missing reference"), p = stop ("missing prediction"), ...){ sens (r = 1 - r, p = 1 - p, ...) } ##' @rdname performance ##' @export ##' @examples ##' ##' ## predictive values ##' ppv (r = ref, p = pred) ppv <- function (r = stop ("missing reference"), p = stop ("missing prediction"), ..., .checked = FALSE){ if (! .checked) r <- checkrp (r, p) sens (r = p, p = r, ..., .checked = TRUE) } .test (ppv) <- function (){ checkEquals (ppv (r = ref, p = pred.array), sens (r = pred.array, p = ref.array)) } ##' @rdname performance ##' @export ##' @examples ##' npv (r = ref, p = pred) npv <- function (r = stop ("missing reference"), p = stop ("missing prediction"), ..., .checked = FALSE){ if (! .checked) r <- checkrp (r, p) sens (r = 1 - p, p = 1 - r, ..., .checked = TRUE) } .test (npv) <- function (){ checkEquals (npv (r = ref, p = pred.array), sens (r = 1 - pred.array, p = 1 - ref.array)) }
147dabc23ebeb6aee28745149e66b1037d3da8d1
9729b52a87cf9387e7f1b97905a470e94a8c41a7
/R/StaticApi.R
ef417d7ae9ecf8fef38ebf5a322c5f655d2d0c84
[ "MIT" ]
permissive
terminological/roogledocs
b3e210953fe0fbdb1c9e2d2b18652eb51cd126aa
e3f333f425b198e37dec60d98f7562849fc47ccd
refs/heads/master
2023-06-10T13:47:15.619743
2023-05-26T14:44:50
2023-05-26T14:44:50
475,030,092
3
0
null
null
null
null
UTF-8
R
false
false
6,747
r
StaticApi.R
# Generated by r6-generator-maven-plugin: do not edit by hand # This is a collection of the static methods described in the Java API # and serves as an alternative R centric entry point of the roogledocs generated R library. # Version: 0.2.0 # Generated: 2022-11-21T13:35:06.572 # Contact: rob.challen@bristol.ac.uk # RoogleDocs class static methods ---- #' reauth: Re-authenticate roogledocs library #' #' Re-authenticate the service deleting the existing OAuth tokens may be #' helpful if there is some problem. #' #' Generally this is only be needed #' if #' application permission updates are needed in which case the #' directory can be manually deleted anyway, #' or if you want to switch #' google user without using a different tokenDirectory. #' @param tokenDirectory the place to store authentication tokens. This should #' not be checked into version control. - (defaulting to #' `.tokenDirectory()`) - (java expects a String) #' @return R6 RoogleDocs object: #' a new RoogleDocs instance without an active document #' @export reauth = function(tokenDirectory=.tokenDirectory()) { # get the API singleton J = JavaApi$get() # execute the R6 function call with the same parameters out = J$RoogleDocs$reauth(tokenDirectory) if(is.null(out)) return(invisible(out)) return(out) } #' docById: Get a document by id or sharing link. #' #' no description #' @param shareUrlOrDocId the url from clicking a share button in google docs or #' an id from searchForDocuments() method - (java expects a String) #' @param tokenDirectory the place to store authentication tokens. This should #' not be checked into version control. - (defaulting to #' `.tokenDirectory()`) - (java expects a String) #' @param disabled a flag to switch roogledocs off (on a document by document #' basis, for testing or development. This can be set globally with #' `options('roogledocs.disabled'=TRUE)` - (defaulting to #' `getOption('roogledocs.disabled',FALSE)`) - (java expects a boolean) #' @return R6 RoogleDocs object: #' itself - a fluent method #' @export doc_by_id = function(shareUrlOrDocId, tokenDirectory=.tokenDirectory(), disabled=getOption('roogledocs.disabled',FALSE)) { # get the API singleton J = JavaApi$get() # execute the R6 function call with the same parameters out = J$RoogleDocs$docById(shareUrlOrDocId, tokenDirectory, disabled) if(is.null(out)) return(invisible(out)) return(out) } #' docByName: Get a document by name or create a blank document if missing. #' #' no description #' @param title a document title. If there is an exact match in google drive #' then that document will be used - (java expects a String) #' @param tokenDirectory the place to store authentication tokens. This should #' not be checked into version control. - (defaulting to #' `.tokenDirectory()`) - (java expects a String) #' @param disabled a flag to switch roogledocs off (on a document by document #' basis, for testing or development. This can be set globally with #' `options('roogledocs.disabled'=TRUE)` - (defaulting to #' `getOption('roogledocs.disabled',FALSE)`) - (java expects a boolean) #' @return R6 RoogleDocs object: #' itself - a fluent method #' @export doc_by_name = function(title, tokenDirectory=.tokenDirectory(), disabled=getOption('roogledocs.disabled',FALSE)) { # get the API singleton J = JavaApi$get() # execute the R6 function call with the same parameters out = J$RoogleDocs$docByName(title, tokenDirectory, disabled) if(is.null(out)) return(invisible(out)) return(out) } #' docFromTemplate: Get a document by name or create one from a template if missing. #' #' no description #' @param title a document title. If there is an exact match in google drive #' then that document will be used #' otherwise a new one will be created. - (java expects a String) #' @param templateUri the share link (or document id) of a template google #' document - (java expects a String) #' @param tokenDirectory the place to store authentication tokens. This should #' not be checked into version control. - (defaulting to #' `.tokenDirectory()`) - (java expects a String) #' @param disabled a flag to switch roogledocs off (on a document by document #' basis, for testing or development. This can be set globally with #' `options('roogledocs.disabled'=TRUE)` - (defaulting to #' `getOption('roogledocs.disabled',FALSE)`) - (java expects a boolean) #' @return R6 RoogleDocs object: #' itself - a fluent method #' @export doc_from_template = function(title, templateUri, tokenDirectory=.tokenDirectory(), disabled=getOption('roogledocs.disabled',FALSE)) { # get the API singleton J = JavaApi$get() # execute the R6 function call with the same parameters out = J$RoogleDocs$docFromTemplate(title, templateUri, tokenDirectory, disabled) if(is.null(out)) return(invisible(out)) return(out) } #' searchForDocuments: Search for documents with the given title #' #' no description #' @param titleMatch a string to be searched for as an approximate match. All #' results will be retrieved with document ids. - (java expects a String) #' @param tokenDirectory the place to store authentication tokens. This should #' not be checked into version control. - (defaulting to #' `.tokenDirectory()`) - (java expects a String) #' @return RDataframe: #' a dataframe containing id and name columns #' @export search_for_documents = function(titleMatch, tokenDirectory=.tokenDirectory()) { # get the API singleton J = JavaApi$get() # execute the R6 function call with the same parameters out = J$RoogleDocs$searchForDocuments(titleMatch, tokenDirectory) if(is.null(out)) return(invisible(out)) return(out) } #' deleteDocument: Deletes a google document by name. #' #' no description #' @param docName - the name of a document to delete. must be an exact and #' unique match. - (java expects a String) #' @param areYouSure - a boolean check. - (defaulting to #' `utils::askYesNo(paste0('Are you sure ...`) - (java expects a boolean) #' @param tokenDirectory - (defaulting to `.tokenDirectory()`) - (java expects a String) #' @param disabled - (defaulting to `getOption('roogledocs.disabled',FALSE)`) - (java expects a boolean) #' @return void: #' nothing, called for side efffects #' @export delete_document = function(docName, areYouSure=utils::askYesNo(paste0('Are you sure you want to delete ',docName),FALSE), tokenDirectory=.tokenDirectory(), disabled=getOption('roogledocs.disabled',FALSE)) { # get the API singleton J = JavaApi$get() # execute the R6 function call with the same parameters out = J$RoogleDocs$deleteDocument(docName, areYouSure, tokenDirectory, disabled) if(is.null(out)) return(invisible(out)) return(out) }
1f36acf8ba5b9b9531526c1bc4dec33b850a592b
793429873754b0f7b44ec4f04e7f877976cb87cf
/manipulation/map/retired/1-encode-multistate-mmse.R
546df09aa6b3f0e67d85ba30c6904a79e795e616
[]
no_license
IALSA/ialsa-2018-amsterdam
c11d16451a1fd4b6d7f703371a73aba5febd76e1
7af21117757da67337ab54a95a04a092a249a51c
refs/heads/master
2021-09-04T22:10:53.310639
2018-01-22T15:41:29
2018-01-22T15:41:29
108,875,067
0
0
null
null
null
null
UTF-8
R
false
false
19,045
r
1-encode-multistate-mmse.R
# knitr::stitch_rmd(script="./manipulation/rename-classify.R", output="./manipulation/rename-classify.md") #These first few lines run only when the file is run in RStudio, !!NOT when an Rmd/Rnw file calls it!! rm(list=ls(all=TRUE)) #Clear the variables from previous runs. # ---- load-sources ------------------------------------------------------------ # Call `base::source()` on any repo file that defines functions needed below. Ideally, no real operations are performed. source("./scripts/functions-common.R") # used in multiple reports # source("./scripts/graph-presets.R") # fonts, colors, themes # source("./scripts/general-graphs.R") # source("./scripts/specific-graphs.R") # ---- load-packages ----------------------------------------------------------- # Attach these packages so their functions don't need to be qualified: http://r-pkgs.had.co.nz/namespace.html#search-path library(magrittr) #Pipes # Verify these packages are available on the machine, but their functions need to be qualified: http://r-pkgs.had.co.nz/namespace.html#search-path requireNamespace("ggplot2", quietly=TRUE) requireNamespace("dplyr", quietly=TRUE) #Avoid attaching dplyr, b/c its function names conflict with a lot of packages (esp base, stats, and plyr). requireNamespace("testit", quietly=TRUE) # requireNamespace("plyr", quietly=TRUE) # ---- declare-globals --------------------------------------------------------- path_input <- "./data/unshared/derived/0-dto.rds" path_output <- "./data/unshared/derived/1-dto.rds" options( origin="1970-01-01" ) # ---- load-data --------------------------------------------------------------- # load the product of 0-ellis-island.R, a list object containing data and metad dto <- readRDS(path_input) # ---- inspect-data ------------------------------------------------------------- names(dto) # 1st element - unit(person) level data # dplyr::tbl_df(dto[["unitData"]]) # 2nd element - meta data, info about variables # dto[["metaData"]] # ---- tweak-data -------------------------------------------------------------- ds <- dto[["unitData"]] # table(ds$fu_year, ds$dementia) ds <- ds %>% dplyr::mutate( wave = fu_year # the function to examine temporal structure depend on dataset to have a variable "wave" ) %>% dplyr::rename( physact = phys5itemsum ) # some predictors needs to be transformed into time-invariate # we will follow the convention of computing the median value across lifespan # instead of assigned the value at baseline (but this is somewhat arbitrary) # ---- force-to-static-height --------------------------- ds %>% view_temporal_pattern("htm", 2) # with seed ds %>% temporal_pattern("htm") # random every time ds %>% over_waves("htm"); # 2, 4, 6 # check that values are the same across waves ds %>% dplyr::group_by(id) %>% dplyr::summarize(unique = length(unique(htm))) %>% dplyr::arrange(desc(unique)) # unique > 1 indicates change over wave # grab the value for the first wave and forces it to all waves ds <- ds %>% dplyr::group_by(id) %>% # compute median height across lifespan dplyr::mutate( htm_bl = dplyr::first(htm), # htm_med = median(htm, na.rm =T) # computes the median height across lifespan ) %>% dplyr::ungroup() # examine the difference ds %>% view_temporal_pattern("htm_med", 2) ds %>% view_temporal_pattern("htm", 2) # ---- force-to-static-bmi --------------------------- ds %>% view_temporal_pattern("bmi", 2) # with seed ds %>% temporal_pattern("bmi") # random every time ds %>% over_waves("bmi"); # 2, 4, 6 # check that values are the same across waves ds %>% dplyr::group_by(id) %>% dplyr::summarize(unique = length(unique(bmi))) %>% dplyr::arrange(desc(unique)) # unique > 1 indicates change over wave # grab the value for the first wave and forces it to all waves ds <- ds %>% dplyr::group_by(id) %>% # compute median height across lifespan dplyr::mutate( bmi_bl = dplyr::first(bmi), # bmi_med = median(bmi, na.rm =T) # computes the median height across lifespan ) %>% dplyr::ungroup() # examine the difference ds %>% view_temporal_pattern("bmi_med", 2) ds %>% view_temporal_pattern("bmi", 2) # ---- force-to-static-gait --------------------------- ds %>% view_temporal_pattern("gait", 2) # with seed ds %>% temporal_pattern("gait") # random every time ds %>% over_waves("gait"); # 2, 4, 6 # check that values are the same across waves ds %>% dplyr::group_by(id) %>% dplyr::summarize(unique = length(unique(gait))) %>% dplyr::arrange(desc(unique)) # unique > 1 indicates change over wave # grab the value for the first wave and forces it to all waves ds <- ds %>% dplyr::group_by(id) %>% # compute median height across lifespan dplyr::mutate( gait_bl = dplyr::first(gait), # gait_med = median(gait, na.rm =T) # computes the median height across lifespan ) %>% dplyr::ungroup() # examine the difference ds %>% view_temporal_pattern("gait_med", 2) ds %>% view_temporal_pattern("gait", 2) # ---- force-to-static-grip --------------------------- ds %>% view_temporal_pattern("grip", 2) # with seed ds %>% temporal_pattern("grip") # random every time ds %>% over_waves("grip"); # 2, 4, 6 # check that values are the same across waves ds %>% dplyr::group_by(id) %>% dplyr::summarize(unique = length(unique(grip))) %>% dplyr::arrange(desc(unique)) # unique > 1 indicates change over wave # grab the value for the first wave and forces it to all waves ds <- ds %>% dplyr::group_by(id) %>% # compute median height across lifespan dplyr::mutate( grip_bl = dplyr::first(grip), # grip_med = median(grip, na.rm =T) # computes the median height across lifespan ) %>% dplyr::ungroup() # examine the difference ds %>% view_temporal_pattern("grip_med", 2) ds %>% view_temporal_pattern("grip", 2) # ---- force-to-static-physact --------------------------- ds %>% view_temporal_pattern("physact", 2) # with seed ds %>% temporal_pattern("physact") # random every time ds %>% over_waves("physact"); # 2, 4, 6 # check that values are the same across waves ds %>% dplyr::group_by(id) %>% dplyr::summarize(unique = length(unique(physact))) %>% dplyr::arrange(desc(unique)) # unique > 1 indicates change over wave # grab the value for the first wave and forces it to all waves ds <- ds %>% dplyr::group_by(id) %>% # compute median height across lifespan dplyr::mutate( physact_bl = dplyr::first(physact), # physact_med = median(physact, na.rm =T) # computes the median height across lifespan ) %>% dplyr::ungroup() # examine the difference ds %>% view_temporal_pattern("physact_med", 2) ds %>% view_temporal_pattern("physact", 2) # ---- describe-before-encoding -------------------------------------------------------------- # if died==1, all subsequent focal_outcome==DEAD. # during debuggin/testing use only a few ids, for manipulation use all set.seed(43) ids <- sample(unique(ds$id),3) # randomly select a few ids # custom select a few ids that give different pattern of data. To be used for testing ids <- c(33027) #,33027, 50101073, 6804844, 83001827 , 56751351, 13485298, 30597867) # ---- into-long-format ------------------------- ds_long <- ds %>% # dplyr::filter(id %in% ids) %>% # turn this off when using the entire sample dplyr::mutate( age_at_bl = as.numeric(age_bl), age_at_death = as.numeric(age_death), male = as.logical(ifelse(!is.na(msex), msex=="1", NA_integer_)), edu = as.numeric(educ) ) %>% dplyr::select_(.dots = c( "id" # personal identifier ,"male" # gender ,"edu" # years of education ,"age_bl" # age at baseline ,"age_at_death" # age at death ,"died" # death indicator ,"birth_year" # year of birth ,"htm_med" # height in meters, median across observed across lifespan ,"bmi_med" # Body Mass Index, median across observed across lifespan ,"physact_med" # Physical activity, median across observed across lifespan # time-invariant above ,"fu_year" # Follow-up year --- --- --- --- --- --- --- --- --- --- # time-variant below ,"date_at_visit" # perturbed date of visit ,"age_at_visit" # age at cycle - fractional ,"mmse" # mini mental state exam (max =30) ,"cogn_global" # global cognition ,"dementia" # dementia diagnosis (?) ,"gait" # Gait Speed in minutes per second (min/sec) ,"grip" # Extremity strengtg in pounds (lbs) ,"htm" # height in meters ,"bmi" # Body Mass Index in kilograms per meter squared (kg/msq) ,"physact" # Physical activity (sum of 5 items) ) ) # save to disk for direct examination # write.csv(d,"./data/shared/musti-state-dementia.csv") # inspect crated data object ds_long %>% dplyr::filter(id %in% ids) %>% print() # ---- attrition-effect ------------------------------ t <- table(ds_long[,"fu_year"], ds_long[,"died"]); t[t==0]<-".";t # ----- mmmse-trajectories ---------------------- # raw_smooth_lines(ds_long, "mmse") # ---- encode-missing-states --------------------------- # create an object ds_miss from ds_long # x <- c(NA, 5, NA, 7) determine_censor <- function(x, is_right_censored){ ifelse(is_right_censored, -2, ifelse(is.na(x), -1, x) ) } # new way # # create file with missing information # make_ds_miss <- function( # d, # variable # ){ # d_long <- d %>% # dplyr::mutate_( # "target" = variable # ) %>% # as.data.frame() # # (N <- length(unique(d_long$id))) # sample size # subjects <- as.numeric(unique(d_long$id)) # list the ids # # for(i in 1:N){ # # for(i in unique(ds$id)){ # use this line for testing # # Get the individual data: # (dta.i <- d_long[d_long$id==subjects[i],]) # select a single individual # # (dta.i <- d_long[d_long$id==6804844,]) # select a single individual # use this line for testing # (dta.i <- as.data.frame(dta.i %>% dplyr::arrange(-age_at_visit))) # enforce sorting # (dta.i$missed_last_wave <- (cumsum(!is.na(dta.i$target))==0L)) # is the last obs missing? # (dta.i$presumed_alive <- is.na(any(dta.i$age_at_death))) # can we presume subject alive? # (dta.i$right_censored <- dta.i$missed_last_wave & dta.i$presumed_alive) # right-censored? # # dta.i$target_recoded <- determine_censor(dta.i$target, dta.i$right_censored) # use when tracing # (dta.i$target <- determine_censor(dta.i$target, dta.i$right_censored)) # replace in reality # (dta.i <- as.data.frame(dta.i %>% dplyr::arrange(age_at_visit))) # (dta.i <- dta.i %>% dplyr::select(-missed_last_wave, -right_censored )) # # Rebuild the data: # if(i==1){d_miss <- dta.i}else{d_miss <- rbind(d_miss,dta.i)} # } # # this part is not finished yet, need to make replacing the old variable # d_miss <- d_miss %>% # # drop original variable # dplyr::mutate( # mmse = target # ) %>% # dplyr::select(-target) # # return(d_miss) # } # # usage # ds_miss <- ds_long %>% make_ds_miss(variable = "mmse") # old way (N <- length(unique(ds_long$id))) # sample size subjects <- as.numeric(unique(ds_long$id)) # list the ids # ds_long_temp <- ds_long # i <- 5; for(i in 1:N){ # for(i in unique(ds$id)){ # use this line for testing # Get the individual data: # ds_long <- ds_long_temp %>% # dplyr::select(id, fu_year, age_at_visit,died, age_death, mmse) %>% # as.data.frame() (dta.i <- ds_long[ds_long$id==subjects[i],]) # select a single individual # (dta.i <- ds_long[ds_long$id==6804844,]) # select a single individual # use this line for testing (dta.i <- as.data.frame(dta.i %>% dplyr::arrange(-age_at_visit))) # enforce sorting (dta.i$missed_last_wave <- (cumsum(!is.na(dta.i$mmse))==0L)) # is the last obs missing? (dta.i$presumed_alive <- is.na(any(dta.i$age_at_death))) # can we presume subject alive? # (dta.i$presumed_alive <- is.na(any(dta.i$age_death))) # can we presume subject alive? (dta.i$right_censored <- dta.i$missed_last_wave & dta.i$presumed_alive) # right-censored? # dta.i$mmse_recoded <- determine_censor(dta.i$mmse, dta.i$right_censored) # use when tracing (dta.i$mmse <- determine_censor(dta.i$mmse, dta.i$right_censored)) # replace in reality (dta.i <- as.data.frame(dta.i %>% dplyr::arrange(age_at_visit))) (dta.i <- dta.i %>% dplyr::select(-missed_last_wave, -right_censored )) # Rebuild the data: if(i==1){ds_miss <- dta.i}else{ds_miss <- rbind(ds_miss,dta.i)} } # inspect crated data object ds_miss %>% dplyr::filter(id %in% ids) %>% print() # ds_long %>% dplyr::glimpse() # ---- encode-multi-states ------------------------------ encode_multistates <- function( d, # data frame in long format outcome_name, # measure to compute live states age_name, # age at each wave age_death_name, # age of death dead_state_value # value to represent dead state ){ # declare arguments for debugging # d = ds_miss # outcome_name = "mmse";age_name = "age_at_visit";age_death_name = "age_death";dead_state_value = 4 (subjects <- sort(unique(d$id))) # list subject ids (N <- length(subjects)) # count subject ids d[,"raw_outcome"] <- d[,outcome_name] # create a copy # standardize names colnames(d)[colnames(d)==outcome_name] <- "state" # ELECT requires this name colnames(d)[colnames(d)==age_name] <- "age" # ELECT requires this name # for(i in unique(ds$id)){ # use this line for testing for(i in 1:N){ # Get the individual data: i = 1 (dta.i <- d[d$id==subjects[i],]) # (dta.i <- ds_long[ds_long$id==6804844,]) # select a single individual # use this line for testing # Encode live states dta.i$state <- ifelse( dta.i$state > 26, 1, ifelse( # healthy dta.i$state <= 26 & dta.i$state >= 23, 2, ifelse( # mild CI dta.i$state < 23 & dta.i$state >= 0, 3, dta.i$state))) # mod-sever CI # Is there a death? If so, add a record: (death <- !is.na(dta.i[,age_death_name][1])) if(death){ (record <- dta.i[1,]) (record$state <- dead_state_value) (record$age <- dta.i[,age_death_name][1]) (ddta.i <- rbind(dta.i,record)) }else{ddta.i <- dta.i} # Rebuild the data: if(i==1){dta1 <- ddta.i}else{dta1 <- rbind(dta1,ddta.i)} } dta1[,age_death_name] <- NULL colnames(dta1)[colnames(dta1)=="raw_outcome"] <- outcome_name dta1[dta1$state == dead_state_value,outcome_name] <- NA_real_ dta1[dta1$state == dead_state_value,"fu_year"] <- NA_real_ return(dta1) } ds_ms <- encode_multistates( d = ds_miss, outcome_name = "mmse", age_name = "age_at_visit", age_death_name = "age_at_death", dead_state_value = 4 ) # set.seed(NULL) # ids <- sample(unique(ds$id),1) ids <- 50107169 ds_ms %>% dplyr::filter(id %in% ids) %>% print() # ---- correct-values-at-death ----------------------- correct_values_at_death <- function( ds, # data frame in long format with multistates encoded outcome_name, # measure to correct value in dead_state_value # value that represents dead state ){ ds[ds$state == dead_state_value, outcome_name] <- NA_real_ return(ds) } # manually correct values for data_at_visit ds_ms[ds_ms$state == 4, "date_at_visit"] <- NA # because of date format # automatically correct values for time-variant measures ds_ms <- ds_ms %>% correct_values_at_death("date_at_visit",4) ds_ms <- ds_ms %>% correct_values_at_death("dementia",4) ds_ms <- ds_ms %>% correct_values_at_death("cogn_global",4) ds_ms <- ds_ms %>% correct_values_at_death("gait",4) ds_ms <- ds_ms %>% correct_values_at_death("grip",4) ds_ms <- ds_ms %>% correct_values_at_death("htm",4) ds_ms <- ds_ms %>% correct_values_at_death("bmi",4) # ds_ms <- ds_ms %>% correct_values_at_death("income_40",4) # ds_ms <- ds_ms %>% correct_values_at_death("cogact_old",4) # ds_ms <- ds_ms %>% correct_values_at_death("socact_old",4) # ds_ms <- ds_ms %>% correct_values_at_death("soc_net",4) # ds_ms <- ds_ms %>% correct_values_at_death("social_isolation",4) # TODO: automate this step ds_ms %>% dplyr::filter(id %in% ids) %>% print() # ---- add-firstobs-flag ----------------------------- (N <- length(unique(ds_ms$id))) subjects <- as.numeric(unique(ds_ms$id)) # Add first observation indicator # this creates a new dummy variable "firstobs" with 1 for the first wave cat("\nFirst observation indicator is added.\n") offset <- rep(NA,N) for(i in 1:N){offset[i] <- min(which(ds_ms$id==subjects[i]))} firstobs <- rep(0,nrow(ds_ms)) firstobs[offset] <- 1 ds_ms <- cbind(ds_ms ,firstobs=firstobs) print(head(ds_ms)) # ---- inspect-created-multistates ---------------------------------- # compare before and after ms encoding view_id <- function(ds1,ds2,id){ cat("Data set A:","\n") print(ds1[ds1$id==id,]) cat("\nData set B","\n") print(ds2[ds2$id==id,]) } # view a random person for sporadic inspections set.seed(39) ids <- sample(unique(ds_miss$id),1) # ids <- 68914513 view_id(ds_long, ds_miss, ids) view_id(ds_miss, ds_ms, ids) view_id(ds_long, ds_ms, ids) # ----- transitions-matrix ----------------------------- # simple frequencies of states table(ds_ms$state) # examine transition matrix # msm::statetable.msm(state,id,ds_ms) knitr::kable(msm::statetable.msm(state,id,ds_ms)) # TODO: examine transition cases for missing states (-2 and -1) # ---- save-to-disk ------------------------------------------------------------ # Save as a compress, binary R dataset. It's no longer readable with a text editor, but it saves metadata (eg, factor information). # at this point there exist two relevant data sets: # ds_long - subset of variables focal to the project # ds_miss - missing states are encoded # ds_ms - multi states are encoded # it is useful to have access to all three while understanding/verifying encodings names(dto) # dto[["ms_mmse"]][["long"]] <- ds_long dto[["ms_mmse"]][["missing"]] <- ds_miss dto[["ms_mmse"]][["multi"]] <- ds_ms names(dto$ms_mmse) saveRDS(dto, file=path_output, compress="xz") # ---- object-verification ------------------------------------------------ # the production of the dto object is now complete # we verify its structure and content: dto <- readRDS(path_output) pryr::object_size(dto) names(dto) names(dto$ms_mmse) # 1st element - unit(person) level data, all variables available from the source dplyr::tbl_df(dto[["unitData"]]) # 2nd element - meta data, info about variables dto[["metaData"]] %>% tibble::as_tibble() # 3rd element - data for MMSE outcome names(dto[["ms_mmse"]]) # dto$ms_mmse[["ds_long"]] # subset of variables focal to the project # dto$ms_mmse[["ds_miss"]] # missing states are encoded # dto$ms_mmse[["ds_ms"]] # multi states are encoded
1f066c612396cc67d67d7b978fff723455a342d6
557ca5e102157cf9c0817aa6d8a6c1595b544047
/tests/testthat/test-transformations.R
a6ad4689417aa404e0e27ab27b1685bd7a42d128
[]
no_license
cran/lotri
1360e7f3fa981b896cd5402df61edbb283c74fe9
8330bd5e94d74f8398104692a941b377d13daf42
refs/heads/master
2023-04-06T00:52:24.154740
2023-03-20T17:20:12
2023-03-20T17:20:12
186,655,112
0
0
null
null
null
null
UTF-8
R
false
false
2,090
r
test-transformations.R
test_that("transformations", { expect_equal(lotri(s1 + s2 + s3 ~ cor(sd(1, 0.25, 4, 0.90, 0.50, 9))), lotri(s1 + s2 + s3 ~ sd(cor(1, 0.25, 4, 0.90, 0.50, 9)))) expect_equal(lotri(s1 + s2 + s3 ~ cor(sd(1, 0.25, 4, 0.90, 0.50, 9))), lotri(s1 + s2 + s3 ~ c(1, 1, 16, 8.1, 18, 81))) expect_equal(lotri(s1 + s2 + s3 ~ cor(1, 0.25, 4, 0.90, 0.50, 9)), lotri(s1 + s2 + s3 ~ c(1, 0.5, 4.0, 2.7, 3.0, 9.0))) expect_error(lotri(s1 + s2 + s3 ~ cor(sd(1, 2, 4, 0.90, 0.50, 9)))) expect_error(lotri(s1 + s2 + s3 ~ sd(var(1, 0.5, 4, 0.90, 0.50, 9)))) expect_equal(diag(lotri(s1 + s2 + s3 ~ var(1, 0.5, 4, 0.90, 0.50, 9))), c(s1=1, s2=4, s3=9)) ## Cholesky m <- matrix(c(2.2,0.4,0,1.6),2,2) m2 <- m %*% t(m) m3 <- lotri(s1 + s2 ~ chol(2.2, 0.4, 1.6)) dimnames(m2) <- dimnames(m3) expect_equal(m2, m3) expect_error(lotri(s1 + s2 ~ sd(chol(2.2, 0.4, 1.6)))) expect_error(lotri(s1 + s2 ~ sd(var(2.2, 0.4, 1.6)))) expect_error(lotri(s1 + s2 ~ cov(cor(2.2, 0.4, 1.6)))) expect_error(lotri(s1 + s2 ~ cor(cov(2.2, 0.4, 1.6)))) })
711937c9d6f0861386102afd6e6a69d4eee9621d
7cd8b7c0cd60c18c366d8d6c027df8077c81789e
/Schedules/Original/Regional/_EDP/extract.R
592a6e4dbff0b1ca00ac4cc7c6de9165cba8e144
[]
no_license
tyler-richardett/virginia_competitive_youth_soccer
5c6c99378c888b4f03a2794b9c49a59924d32a9f
022ad240bbe29ff7677e09efdb5438677d291911
refs/heads/master
2021-04-12T11:59:01.294354
2018-07-11T01:45:28
2018-07-11T01:45:28
126,563,080
2
0
null
null
null
null
UTF-8
R
false
false
3,589
r
extract.R
library(dplyr) library(rvest) filenames <- list.files() filenames <- filenames[-grep("R$", filenames)] filenames <- filenames[-grep("csv$", filenames)] ## Used this to add class names to the table rows. ## for (i in 1:length(filenames)) { ## tmp <- readLines(filenames[i]) ## tmp <- gsub('tr bgcolor="#f5f5f5"', 'tr bgcolor="#f5f5f5" class="gameS"', tmp) ## tmp <- gsub('tr bgcolor="#ffffff"', 'tr bgcolor="#ffffff" class="games2"', tmp) ## writeLines(tmp, filenames[i]) ## } schedule <- data.frame() for (i in 1:length(filenames)) { tmp.home <- read_html(filenames[i]) %>% html_nodes("tr.gameS td:nth-child(3)") %>% html_text(trim = TRUE) tmp.away <- read_html(filenames[i]) %>% html_nodes("tr.gameS td:nth-child(5)") %>% html_text(trim = TRUE) tmp.fields <- read_html(filenames[i]) %>% html_nodes("tr.gameS td:nth-child(7)") %>% html_text(trim = TRUE) tmp <- data.frame(Home = tmp.home, Away = tmp.away, Field = tmp.fields, stringsAsFactors = FALSE) tmp$Age.Group <- rep(substring(filenames[i], 3, 5), nrow(tmp)) tmp$Gender <- rep(ifelse(substring(filenames[i], 1, 1) == "B", "Boys", "Girls"), nrow(tmp)) schedule <- rbind(schedule, tmp) } schedule <- schedule %>% mutate(Home = paste(Home, Age.Group, Gender, "EDP"), Away = paste(Away, Age.Group, Gender, "EDP")) ## Save full schedule. ## write.csv(schedule, "EDP_Full.csv", row.names = FALSE, na = "") teams <- schedule %>% select(Home, Age.Group, Gender) %>% arrange(Home) %>% distinct() fields <- schedule %>% filter(Field != "Unassigned" & Field != "") %>% select(Field) %>% arrange(Field) %>% distinct() schedule <- schedule %>% filter(Field != "Unassigned" & Field != "") %>% select(Home, Away, Field) ## Save teams, fields, and schedule. ## write.csv(teams, "EDP_Teams.csv", row.names = FALSE, na = "") ## write.csv(fields, "EDP_Fields.csv", row.names = FALSE, na = "") ## write.csv(schedule, "EDP_Schedule.csv", row.names = FALSE, na = "") ##### schedule2 <- data.frame() for (i in 1:length(filenames)) { tmp.home <- read_html(filenames[i]) %>% html_nodes("tr.games2 td:nth-child(3)") %>% html_text(trim = TRUE) tmp.away <- read_html(filenames[i]) %>% html_nodes("tr.games2 td:nth-child(5)") %>% html_text(trim = TRUE) tmp.fields <- read_html(filenames[i]) %>% html_nodes("tr.games2 td:nth-child(7)") %>% html_text(trim = TRUE) tmp <- data.frame(Home = tmp.home, Away = tmp.away, Field = tmp.fields, stringsAsFactors = FALSE) tmp$Age.Group <- rep(substring(filenames[i], 3, 5), nrow(tmp)) tmp$Gender <- rep(ifelse(substring(filenames[i], 1, 1) == "B", "Boys", "Girls"), nrow(tmp)) schedule2 <- rbind(schedule2, tmp) } schedule2 <- schedule2 %>% mutate(Home = paste(Home, Age.Group, Gender, "EDP"), Away = paste(Away, Age.Group, Gender, "EDP")) ## Save full schedule. ## write.csv(schedule, "EDP_Full2.csv", row.names = FALSE, na = "") teams2 <- schedule2 %>% select(Home, Age.Group, Gender) %>% arrange(Home) %>% distinct() fields2 <- schedule2 %>% filter(Field != "Unassigned" & Field != "" & Field != "TBD TBD") %>% select(Field) %>% arrange(Field) %>% distinct() schedule2 <- schedule2 %>% filter(Field != "Unassigned" & Field != "" & Field != "TBD TBD") %>% select(Home, Away, Field) ## Save teams, fields, and schedule. ## write.csv(teams, "EDP_Teams2.csv", row.names = FALSE, na = "") ## write.csv(fields, "EDP_Fields2.csv", row.names = FALSE, na = "") ## write.csv(schedule, "EDP_Schedule2.csv", row.names = FALSE, na = "")
4168f0f9c810f3caa177448e1dec8d7d3cc65d2c
6cfdbcb855132784afafe626f3735b62de76d3db
/R/delKwv.R
e00a5a8141146e7560f8c534c261109f88e02be0
[]
no_license
cran/FITSio
5c6f18fef0bb127ceedcbb2177ad0da8a0f61b27
70ec50e8b4cac12b280725d628493b6d867261b1
refs/heads/master
2021-07-11T17:58:25.587739
2021-04-03T18:10:02
2021-04-03T18:10:02
17,679,149
2
1
null
null
null
null
UTF-8
R
false
false
584
r
delKwv.R
delKwv <- function(keyw, headerName) { # Function deletes 'keyword = value /note' card image from FITS header # # For multi-card select, use grep, e.g. idx <- grep('^TEST[7-9] ', header) # # A. Harris 2012.10.13 # find keyword; '=' in col. 9 defines as keyword keyw <- sprintf('%-8s=', substr(keyw, 1, 8)) idx <- which(toupper(keyw) == substr(toupper(headerName), 1, 9)) if (length(idx) == 0) stop('*** No such keyword to delete ***') # eliminate card image headerName <- headerName[-idx] # return modified structure headerName }
a937c927eef4dc1929d969f4638b4889a67d3e4a
60e613aa8e6ac746abc0540c657d92c52006e085
/server.R
5b1111d094d7513d1846bd154b030ebd8cf3ec3d
[]
no_license
elombardi-cleve/intRo
730ce57f827b959f0b4165532d51e07249cf45ff
991c3267c625e6d92ab8d2ec2c6563f6e775271a
refs/heads/master
2021-01-24T02:52:52.520195
2015-03-21T15:00:12
2015-03-21T15:00:12
null
0
0
null
null
null
null
UTF-8
R
false
false
2,282
r
server.R
### ### Shiny Server definition ### shinyServer(function(input, output, session) { ## Module info module_info <- read.table("modules/modules.txt", header = TRUE, sep=",") ## Get directory ready for code printing userdir <- file.path(tempdir(), tempfile()) dir.create(userdir, recursive = TRUE) sapply(file.path(userdir, dir(userdir)[grep("code_", dir(userdir))]), file.remove) ## Maximum file upload size = 10MB options(shiny.maxRequestSize=10*1024^2) ## Reactive values values <- reactiveValues(mydat = NULL, mydat_rand = NULL) #cat(paste(readLines("global.R"), collapse = "\n"), file = "code_global.R") cat("library(RCurl)\n", file = file.path(userdir, "code_All.R")) cat("eval(parse(text = getURL('https://raw.githubusercontent.com/gammarama/intRo/master/global.R')))", file = file.path(userdir, "code_All.R"), append = TRUE) ## Modules types <- c("helper.R", "static.R", "observe.R", "reactive.R", "output.R") modules_tosource <- file.path("modules", apply(expand.grid(module_info$module, types), 1, paste, collapse = "/")) ## Source the modules for (mod in modules_tosource) { source(mod, local = TRUE) } ## Check for file update every 5 seconds code <- reactiveFileReader(500, session, file.path(userdir, "code_All.R"), clean_readlines) ## Code Viewing observe({ updateAceEditor(session, "myEditor", value=paste(code(), collapse="\n")) }) ## Printing observe({ if(is.null(input)) return if(length(input$print_clicked) > 0) { file <- NULL if(input$print_clicked) { oldwd <- getwd() if(!input$code_clicked) { file <- render(file.path(userdir, "code_All.R"), html_document(css = file.path(oldwd, "www/hide_code.css")), output_dir = file.path(oldwd, "www")) } else { file <- render(file.path(userdir, "code_All.R"), output_dir = file.path(oldwd, "www")) } session$sendCustomMessage(type = "renderFinished", paste(readLines(file), collapse="\n")) } } }) })
65abb91589590e39c2f28b63368e9557d71e28d8
602a082d7001f49a40c75f809211b80f9ba7e989
/code/vax_plots.R
d47bd8157fc8b714b0d5acb7d697e9e9f31a4570
[ "MIT" ]
permissive
ideogen/COVID-19-Danmark
f7fe8bac1f78f019db564f98ddebb5f29076d834
d17e907e50f4ab8904f1d9f33b13632e8075adf6
refs/heads/master
2023-02-19T19:17:38.886897
2021-01-22T13:30:08
2021-01-22T13:30:08
null
0
0
null
null
null
null
UTF-8
R
false
false
2,699
r
vax_plots.R
vax <- pdf_text(paste0("../data/Vax_data/Vaxdata_", today_string, ".pdf")) %>% read_lines() tabel_4 <- which(str_detect(vax, "Tabel 4"))[2] age_vax <- vax[(tabel_4 + 4):(tabel_4 + 13)] #age_vax_colnames <- vax[(tabel_4 + 1)] age_vax %<>% str_squish() %>% strsplit(split = " ") # age_vax_colnames %<>% # str_squish() %>% # strsplit(split = " ") age_vax_df <- data.frame(matrix(unlist(age_vax), nrow=length(age_vax), byrow=T)) #colnames(age_vax_df) <- c(unlist(age_vax_colnames)[1:3], "Total") age_vax_df %>% as_tibble %>% set_colnames(c("Aldersgruppe", "Female_start", "Female_done", "Male_start", "Male_done", "Total")) %>% mutate_all(str_replace_all, "\\.", "") %>% mutate(across(-Aldersgruppe, as.double)) %>% select(-Total) %>% pivot_longer(-Aldersgruppe, names_to = "sex", values_to = "value") %>% filter(sex %in% c("Female_start", "Male_start")) %>% ggplot() + geom_bar(aes(Aldersgruppe, value, fill = sex), stat = "identity", position = "dodge") + labs(y = "Antal", title = "Antal påbegyndt COVID-19 vaccinerede per køn og alder", caption = "Kristoffer T. Bæk, covid19danmark.dk, datakilde: SSI", subtitle = paste0("Til og med ", str_to_lower(strftime(as.Date(today)-1, "%e. %b %Y")))) + scale_fill_manual(name = "", labels = c("Kvinder", "Mænd"), values=c("#11999e", "#30e3ca")) + standard_theme ggsave("../figures/ntl_vax_age.png", width = 18, height = 10, units = "cm", dpi = 300) tabel_2 <- max(which(str_detect(vax, "Tabel 2"))) days_since_start <- as.integer(as.Date(today) - as.Date("2020-12-27")) time_vax <- vax[(tabel_2 + 5):(tabel_2 + 4 + days_since_start)] time_vax %<>% str_squish() %>% strsplit(split = " ") time_vax_df <- data.frame(matrix(unlist(time_vax), nrow=length(time_vax), byrow=T)) time_vax_df %>% as_tibble %>% select(X1, X3, X6) %>% set_colnames(c("Date", "Begun", "Done")) %>% mutate_all(str_replace_all, "\\.", "") %>% mutate_all(str_replace_all, "\\,", ".") %>% mutate(across(c(Begun, Done), as.double)) %>% mutate(Date = dmy(Date)) %>% pivot_longer(-Date, names_to = "variable", values_to = "value") %>% ggplot() + geom_line(aes(Date, value, color = variable), size = 2) + scale_x_date(labels = my_date_labels, date_breaks = "1 week") + scale_y_continuous(limits = c(0, NA), labels = scales::number) + scale_color_manual(name = "", labels = c("Påbegyndt", "Færdigvaccineret"), values=c("#11999e", "#30e3ca")) + labs(y = "Antal", title = "Kumuleret antal COVID-19 vaccinerede", caption = "Kristoffer T. Bæk, covid19danmark.dk, datakilde: SSI") + standard_theme ggsave("../figures/ntl_vax_cum.png", width = 18, height = 10, units = "cm", dpi = 300)
491b11ff407cadfcda008dbbb8fb860429421eb8
5aa62329c37dd3623203a1f177c3b047043dfe3b
/R/plot.loc_score.weight_mesh_val.R
50dbf64afc5d1a1bf65cc64cd583ea64b4ace96c
[ "MIT" ]
permissive
mrmtshmp/geospat
d6d4c42702e53c1be5b5180c3c8aecf9949c0aeb
e53a7b4b89c27c9c129afa057f8f92a543369e16
refs/heads/master
2021-07-17T20:34:24.783044
2020-10-05T08:31:49
2020-10-05T08:31:49
218,003,862
0
0
null
null
null
null
UTF-8
R
false
false
7,091
r
plot.loc_score.weight_mesh_val.R
#' Make histogram of diatance to pharmacy and each pharmacy's scores. #' #' @import sp #' @import sf #' @import ggplot2 #' @importFrom raster shapefile #' @importFrom plyr ldply #' @importFrom dplyr select #' @importFrom dplyr left_join #' @importFrom plyr . #' @importFrom plyr ddply #' @importFrom tibble rownames_to_column #' @importFrom tidyr gather #' @importFrom dplyr arrange #' @importFrom dplyr mutate #' @importFrom magrittr %>% #' @importFrom leaflet colorQuantile #' #' #' @param fn.RData.loc_score = "../Data/test_HmMs_2035.RData", #' @param rbPal = NULL, #' @param vec.prob_q = NULL, #' @param dir.Data = "../Data", #' @param weight.var_name = "score.merged_PTD_2035", #' @param fn.Shape.GovRegion A character vector of (a) file name(s) (with ".shp") as the background. #' @param fn.ShapeP.SchoolRegion = "/190626/A32-16_30_GML/shape/A32P-16_30.shp", #' @param fn.Shape.SchoolRegion = "/190706/A32-13_30/shp/A32-13_30.shp", #' @param prefix.pdf_output = "location_scor" #' #' @export plot.wSDG <- function( fn.RData.loc_score = "../Data/test_HmMs_2025.RData", rbPal = NULL, vec.prob_q = NULL, dir.Data = "../Data", weight.var_name = c("score.merged_PTD_2025","score.merged_PTE_2025"), fn.Shape.GovRegion = c( '/190706/N03-190101_30_GML/N03-19_30_190101.shp', '/190706/N03-190101_24_GML/N03-19_24_190101.shp', '/190706/N03-190101_27_GML/N03-19_27_190101.shp', '/190706/N03-190101_29_GML/N03-19_29_190101.shp' ), fn.ShapeP.SchoolRegion = "/190626/A32-16_30_GML/shape/A32P-16_30.shp", fn.Shape.SchoolRegion = "/190706/A32-13_30/shp/A32-13_30.shp", prefix.pdf_output = "location_score.weight" ){ # Data Loading ----------------------------------------------------------------- load( file = fn.RData.loc_score ) long.df.res.distm.rank_1 <- test[[1]] long.long.df.res.distm.rank_1.merge_mesh_on_pharm <- test[[2]] rank.restrict <- test[[3]] Shape.SchoolRegion <- shapefile( sprintf( "%s/%s", dir.Data, fn.Shape.SchoolRegion ) ) print('Read (a) shape file(s) (goverment region)') for(i in 1:length(fn.Shape.GovRegion)){ Shape_i.GovRegion <- shapefile( sprintf( "%s/%s", dir.Data, fn.Shape.GovRegion[i] ) ) if(i==1){ Shape.GovRegion <- Shape_i.GovRegion }else{ Shape.GovRegion <- Shape.GovRegion %>% rbind(Shape_i.GovRegion) } } # Shape files (school region) ------------------------------------------------------------------- ShapeP.SchoolRegion <- shapefile( sprintf( "%s/%s", dir.Data, fn.ShapeP.SchoolRegion ) ) # print("Loading shape (a) file(s).") # for(i in 1:length(fn.mesh.popEst)){ # i.Shape.mesh.pop_Est <- # rgdal::readOGR( # sprintf( # "%s", # fn.mesh.popEst[i] # ) # ) # if(i==1){ # Shape.mesh.pop_Est <- i.Shape.mesh.pop_Est # }else{ # Shape.mesh.pop_Est <- rbind( # Shape.mesh.pop_Est, # i.Shape.mesh.pop_Est # ) # } # } fn.SpatialPointDataFrame = '../Output/DataForMap.Wakayama_v01.RData' load( file = fn.SpatialPointDataFrame ) # Plot scores on atlas. --------------------------------------------------- sptsDataframe_data <- sptsDataframe@data %>% dplyr::mutate( Ph.ID = sprintf( "%s_%s", ID.pref, ID ) ) %>% dplyr::left_join( long.long.df.res.distm.rank_1.merge_mesh_on_pharm ) test.sptsDataframe <- sptsDataframe test.sptsDataframe@data <- sptsDataframe_data %>% dplyr::filter( weight %in% weight.var_name ) %>% ddply( .( ID.pref, ID, Numb.FullTime, Numb.PartTime, Ph.ID, weight ), function(D){ val = sum(D$val) names(val) <- "val" return(val) } ) print(unique(test.sptsDataframe@data$ID.pref)) if(is.null(rbPal)){ if(is.null(vec.prob_q)){vec.prob_q <- c(0.0, 0.3, 0.8, 0.9,1.0)} rbPal <- leaflet::colorQuantile( palette = "RdYlBu", domain = test.sptsDataframe@data$val, probs = vec.prob_q, reverse = TRUE ) } long.long.df.res.distm.rank_1.merge_mesh_on_pharm$Col <- rbPal( long.long.df.res.distm.rank_1.merge_mesh_on_pharm$val ) test.sptsDataframe@data$Col <- rbPal( test.sptsDataframe@data$val ) Col.uni <- unique( test.sptsDataframe@data$Col ) print( sprintf( "Output File is: %s", sprintf( "%s_%s.rank_%s.algscore_%s.pdf", prefix.pdf_output, paste( unique( test.sptsDataframe@data$weight ),collapse = "_" ), rank.restrict, alg.score ) ) ) print(unique(test.sptsDataframe@data$Col)) pdf( sprintf( "%s_%s.rank_%s.algscore_%s.pdf", prefix.pdf_output, paste( unique( test.sptsDataframe@data$weight ),sep = " & "), rank.restrict, alg.score ), width = 20, height = 20 ) plot(Shape.GovRegion, col='white', lwd=0.05) plot(Shape.SchoolRegion, col='ivory', lwd=0.05, add=TRUE) graphics::points( spts_CommuCareCentr, col='black', pch='@', cex=0.5 ) points( test.sptsDataframe, col= test.sptsDataframe@data$Col, pch=2, cex=0.4 ) for(i in 1:length(Col.uni)){ plot(Shape.GovRegion, col='white', lwd=0.05) plot(Shape.SchoolRegion, col='ivory', lwd=0.05, add=TRUE) graphics::points( spts_CommuCareCentr, col='black', pch='@', cex=0.5 ) graphics::points( test.sptsDataframe[test.sptsDataframe@data$Col==Col.uni[i],], col= test.sptsDataframe@data$Col[ test.sptsDataframe@data$Col==Col.uni[i] ], pch=2, cex=1 ) } # Histogram --------------------------------------------------------------- ggdata <- long.long.df.res.distm.rank_1.merge_mesh_on_pharm %>% dplyr::filter(weight %in% weight.var_name) %>% mutate( ID.pref = gsub('(.+)_(.+)', '\\1', Ph.ID) ) %>% ggplot( aes(x=val) ) plot( ggdata + geom_histogram(aes(fill=Col)) + scale_fill_identity(guide = "legend") + scale_x_continuous(trans = 'log10') + facet_grid(ID.pref~., scales = 'free_y') + ggtitle(label = paste(weight.var_name, sep = " & ")) + theme_bw() ) ggdata <- long.long.df.res.distm.rank_1.merge_mesh_on_pharm %>% # dplyr::filter(weight==weight.var_name) %>% mutate( ID.pref = gsub('(.+)_(.+)', '\\1', Ph.ID) ) %>% ggplot( aes(x=val) ) plot( ggdata + geom_histogram(aes(fill=Col)) + scale_fill_identity(guide = "legend") + scale_x_continuous(trans = 'log10') + facet_grid(ID.pref~., scales = 'free_y') + ggtitle(label = "Merged") + theme_bw() ) dev.off() return(list( fn.RData.loc_score,weight.var_name,rbPal, vec.prob_q) ) }
be8ba8c76116794673fbfc0a4d3acab4949d11f9
bd23162e4b8c3c779557160a774bffb765adce86
/shiny/ui.R
ce59873063dc3f2d8e6b6f59c217fa4dd58aed9c
[ "MIT" ]
permissive
ktmud/github-life
a8ab2ee91c85c2a62a348f6764742dcf1b00c338
421e46f9832879bb8c81d8731d3524ef20fc3065
refs/heads/master
2021-01-19T22:24:48.671526
2017-11-11T18:50:26
2017-11-11T18:50:26
88,812,727
3
0
null
null
null
null
UTF-8
R
false
false
3,523
r
ui.R
# == Sidebar ---------------------- sidebar <- dashboardSidebar( sidebarMenu( menuItem( "Overview", icon = icon("globe"), tabName = "overview" ), menuItem( selected = TRUE, "Explore a repository", icon = icon("book"), tabName = "single-repo" ), menuItem( "Repository Groups", icon = icon("git"), tabName = "repo" ), menuItem("People", icon = icon("users"), tabName = "usr"), menuItem("Organizations", icon = icon("sitemap"), tabName = "org") ) ) # === Main body ---------- library(rmarkdown) render("www/overview.Rmd", html_fragment(), quiet = TRUE) repo_tab <- fluidRow( column( width = 12, h2("Explore different repository groups"), p("A few fun facts about our top repositories.") ) ) single_repo_tab <- div( fluidRow( column( width = 6, class = "col-lg-4", box( width = NULL, selectizeInput( "repo", NULL, NULL, selected = "twbs/bootstrap", options = list( maxOptions = 100, valueField = 'repo', labelField = 'repo', create = FALSE, searchField = c("name", "repo", "description"), render = I(read_file("www/selectize_render.js")), placeholder = "Pick a repository..." ) ), uiOutput("repo_meta") ) ), column( width = 6, class = "col-lg-8", box( width = NULL, uiOutput("repo_detail") ) ) ), fluidRow( column( width = 12, box( width = NULL, id = "repo-activity", title = div( "Activity timeline of", htmlOutput("repo_fullname", container = strong) ), plotlyOutput("repo_timeline", height = "350px"), div( class = "info", "Number of commits from top contributors and number of new issues reported and new stargazers added each week.", tags$br(), "Number of stargazers are hidden by default, and is scaled with", tags$code("n * mean(issues) / mean(stars)"), ". The data sometimes is incomplete because GitHub only returns 40,000 records at most." ) ) ), column( width = 12, box( width = NULL, id = "repo-issue-events", title = div( "Issue events breakdown" ), plotlyOutput("repo_issue_events", height = "350px"), div( class = "info", 'Issue events break down by', tags$a( href = "https://developer.github.com/v3/issues/events/#events-1", target = "_blank", "event types" ), ". Showing up to only 40,000 events." ) ) ) ) ) body <- dashboardBody( tags$head( tags$link(rel = "stylesheet", type = "text/css", href = "style.css") ), shinyjs::useShinyjs(), tabItems( tabItem( tabName = "overview", div(class = "readable", HTML(read_file("www/overview.html"))) ), tabItem(tabName = "repo", repo_tab), tabItem(tabName = "single-repo", single_repo_tab), tabItem(tabName = "usr", h2("People")), tabItem(tabName = "org", h2("Organizations") ) ), HTML(read_file("www/disqus.html")), tags$script(read_file("www/app.js")) ) shiny_ui <- dashboardPage( title = "Github Life", dashboardHeader(title = div( icon("github-alt"), "GitHub Life" )), sidebar, body )
569d78769d03e477e165e6ed9fb4955c9b911454
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/fuzzr/vignettes/fuzzr.R
6db63d671fd2011c5b95c6f31c78850497307fd7
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
1,855
r
fuzzr.R
## ------------------------------------------------------------------------ my_function <- function(x, n, delim = " - ") { paste(rep(x, n), collapse = delim) } my_function("fuzz", 7) ## ---- results = "asis"--------------------------------------------------- library(fuzzr) # Note that, while we are specifically fuzz testing the 'n' argument, we still # need to provide an 'x' argument to pass along to my_function(). We do not have # to supply a delimiter, as my_function() declares a default value for this # argument. my_fuzz_results <- fuzz_function(my_function, "n", x = 1:3, tests = test_all()) # Produce a data frame summary of the results fuzz_df <- as.data.frame(my_fuzz_results) knitr::kable(fuzz_df) ## ------------------------------------------------------------------------ fuzz_call(my_fuzz_results, n = "dbl_single") fuzz_value(my_fuzz_results, n = "dbl_single") fuzz_call(my_fuzz_results, n = "date_single") # Hm, dates can be coerced into very large integers. Let's see how long this # result is. nchar(fuzz_value(my_fuzz_results, n = "date_single")) # Oh dear. ## ---- results = "asis"--------------------------------------------------- my_function_2 <- function(x, n, delim = " - ") { assertthat::assert_that(assertthat::is.count(n)) paste(rep(x, n), collapse = delim) } # We will abbreviate this check by only testing against double and date vectors fuzz_df_2 <- as.data.frame(fuzz_function(my_function_2, "n", x = "fuzz", tests = c(test_dbl(), test_date()))) knitr::kable(fuzz_df_2) ## ---- results = 'asis'--------------------------------------------------- p_args <- list( x = list( simple_char = "test", numbers = 1:3 ), n = test_all(), delim = test_all()) pr <- p_fuzz_function(my_function_2, p_args) prdf <- as.data.frame(pr) knitr::kable(head(prdf))
bddda7f64b92049fd6c990acea360ddcf90e2f2b
a7aff7f5192b861e715482bfed80bd91711f69c9
/tests/testthat/test_3_AOItransitions.R
121e3db501a3164659d63614f204a98c6dea6223
[]
no_license
cran/eyeRead
47699e1338fe956c4d7bb265291d0e8b69e7b166
834b380b715dcea97746f88b14903d4d4dbcc2b3
refs/heads/master
2022-12-03T19:18:26.352237
2020-08-07T12:00:02
2020-08-07T12:00:02
286,204,707
0
0
null
null
null
null
UTF-8
R
false
false
560
r
test_3_AOItransitions.R
context( "AOItransitions" ) test_that( "specFile is found", { expect_true( T ) } ) test_that( "Function returns the correct error when AOI is contains less than two elements", { expect_error( AOItransitions( AOI = c(1)), regexp = some_errors$AOI_short ) expect_error( AOItransitions( AOI = character()), regexp = some_errors$AOI_short ) } ) test_that( "Function returns the correct results", { results <- AOItransitions( some_Data$single_AOI_col$AOI ) expect_equal( results, some_results$transitions ) } )
6166046a728a0a4eaa0aaf95eae2c28bd96d0bb8
6f4abe6d0fc4a57a70ca7ea4bf1c494c8c1cff1f
/cachematrix.R
7e690ddf1d45f79a27ca9eb2872e08969af7298d
[]
no_license
nik3011/ProgrammingAssignment2
8c3f04ace7cd17f8137a04c95a9e8a49eb01d92d
d8b2ba26fcc0f5fbd96e924a4da1d7a6272fba0a
refs/heads/master
2021-01-11T07:10:20.217645
2016-10-31T12:30:19
2016-10-31T12:30:19
72,410,996
0
0
null
2016-10-31T06:58:07
2016-10-31T06:58:04
null
UTF-8
R
false
false
637
r
cachematrix.R
## Put comments here that give an overall description of what your ## functions do ## Write a short comment describing this function makeCacheMatrix <- function(x = matrix()) { z <<- NULL set <- function(y){ x <<- y z <<- NULL } get <- function() x setzerse <- function(zerse) z <<- zerse getzerse <- function() z list(set=set,get=get,setzerse=setzerse,getzerse=getzerse) } ## Write a short comment describing this function cacheSolve <- function(x, ...) { z <- x$getzerse() if(!is.null(z)) { message("getting cached data.") return(z) } data <- x$get() z <- solve(data) x$setzerse(z) z }
29080ee48d6ea548359c4a448517f9add1d399ca
cab57cb34db27f86f8b3615c4d79da588f8b8ce7
/tmp-tests/add-columns.R
a67dd090a6c5b10b9ab57b25b713770f3281bb49
[]
no_license
privefl/bigstatsr
ed943f3e444732d551a2dea23379876b6c4185dd
778ffd44b2bda0715bd345511c3379f4c0b78b26
refs/heads/master
2022-11-06T10:10:09.883359
2022-10-14T07:28:28
2022-10-14T07:28:28
68,272,648
160
36
null
2022-04-12T10:55:03
2016-09-15T06:50:50
R
UTF-8
R
false
false
311
r
add-columns.R
N <- M <- 5000 system.time({ X <- FBM(N, M) }) file.size(X$backingfile) system.time({ # file.create(tmp <- tempfile()) X2 <- FBM(N, 1) addColumns(X2$backingfile, N, M - 1, 8) X2 <- FBM(N, M, backingfile = sub("\\.bk$", "", X2$backingfile), create_bk = FALSE) }) file.size(X2$backingfile)
c8ece7df287dfbc3fac9595cdc5db7a1c863c107
b0df31777c389b953bf9bb3e9371ec48893b039f
/Computer Adaptive Testing.R
c2563f01fe942ef413be892aee4c94ca0a104f36
[]
no_license
meyerjoe-R/Spoketh
bd1f15f61b418c64c369b71556540dde730b1396
5940476c29ac2625bf7a9e47c1635417df089da2
refs/heads/main
2023-06-28T06:56:15.137647
2021-07-26T17:45:32
2021-07-26T17:45:32
323,737,355
0
0
null
null
null
null
UTF-8
R
false
false
2,391
r
Computer Adaptive Testing.R
install.packages("catR") require(catR) #load data data(tcals) ?tcals #create a bank, take out content balancing info bank <- as.matrix(tcals[,1:4]) #how should it start? start <- list(nrItems = 1, theta = 0) #create item select parameters, maximum likelihood and maximum fisher test <- list(method = "ML", itemSelect = "MFI") #EAP for final, Expected a posteriori (EAP) estimator final <- list(method = "EAP") #stop at precision of .3 stop <- list(rule = c("precision", "length"), thr = c(0.3, 50)) res <- randomCAT(0, bank, start = start, test = test, stop = stop, final = final) res plot(res) #simulation, with 10 respondents, create a list thetas <- rnorm(10) #dichotomous model, thetas are -4 to 4 in simulation ressim <- simulateRespondents(thetas = thetas, itemBank = bank, model = NULL, start = start, stop = stop, final = final) ressim plot(ressim) #Accuracy shows us how good it is #test length #conditional test length: theta and length #examine mean standard error with theta #exposure rate shows us how items are shown across people, and if it reached the limit #rmse for between true and estimated ability levels #create another response matrix stop2 <- list(rule = "classification", thr = 1.0, alpha = 0.05) #another example with different stopping parameters ressim2 <- simulateRespondents(thetas = thetas, itemBank = bank, model = NULL, start = start, stop = stop2) ressim2 plot(ressim2) #polytomous #create some more items, 100, maximum number of response categories is 3 #model is graded response m.GRM <- genPolyMatrix(items = 100, nrCat = 5, model = "GRM") #create a matrix m.GRM <- as.matrix(m.GRM) # CAT options #thetas, start select is maximum fisher information start3 <- list(theta = c(-1, 0), startSelect = "MFI") #method is maximum likelihood, select is maximum expected information test3 <- list(method = "ML", itemSelect = "MEI") #stop based on precision stop3 <- list(rule = "precision", thr = 0.4) #EAP, predicted distribution of scores based on previous information final3 <- list(method = "EAP") # CAT test res3 <- randomCAT(0, m.GRM, model = "GRM", start = start3, test = test3, stop = stop3, final = final3) res3 citation() install.packages("ggfortify") require(ggfortify) citation("ggfortify")
c278264e9aaaba24bd556628e5dc23dae945e2ce
af312ff271bd46826cecf652ea5a48610c1781d5
/posterior_output.R
0930a183f6668fe92572962cfce4dc4573e835d4
[]
no_license
daniel-trejobanos/AsynchronousGibbs
49bcc85eeb04996ba18a99179079661915ef326b
558d64df24ee8702328032eca45f405678ae2493
refs/heads/master
2020-05-21T07:30:09.474315
2019-08-08T08:37:50
2019-08-08T08:37:50
185,963,022
0
0
null
null
null
null
UTF-8
R
false
false
20,721
r
posterior_output.R
## TODO : Add the level of parallelism as a grouping variable and add this facet to the plots ##' Posterior analysis code ##' First we select the data path, lets start with a single data set size data.path <- "/scratch/temporary/dtrejoba/AsynchronousGibbs/data/2k5k" ##' We read the short runs only files.path <- list.files(data.path, pattern ="s_.*_short", full.names = T) matrices <- list.files(data.path, pattern = "s_.*_short") require(coda) require(tidyverse) ##' We still havent had the time to properly implement simulation based callibration #source('./sbc.R') ##' We save ta data frame with the file paths and descriptors of the experiment chains <- data.frame(directory = files.path, matrices = matrices) %>% separate( "matrices", into = c("matrix","sparsity","method","chain","chain-type"), sep="_") %>% separate( "matrix", into = c("matrix", "phi"), sep = "phi") %>% separate( "matrix", into = c("type","variables"), sep ="1k") %>% mutate_at(vars(directory),as.character) %>% as_tibble() ##' function to extract a family of parameters from a mcmc chains param.family <- function( chain.matrix, family){ # if(is.character(family)) as.mcmc(select(chain.matrix,matches(family))) # else # stop("error, family has to be character") } ##' we create a path to save the figures or tempoary files from the posterior summary pipeline posterior.path <- paste(data.path,"/post",sep ="") dir.create(posterior.path) ##' Function to estimate the gelman R for each of the parameters in the parameter family chains.gelman <- function(mcmc.chains,variable.family, multivariate = F){ mcmc.chains %>% group_by(type,phi,method) %>% do({ function(y){ tmp <- coda::gelman.diag( mcmc.list(lapply(y, function(x){ param.family(data.table::fread(x),variable.family) })), autoburnin =T, multivariate = multivariate) tmp.df <- as.data.frame(tmp$psrf) tmp.df$var <- rownames(tmp$psrf) tmp.df$"Upper C.I." <- NULL tmp.df %>% spread(var, "Point est.") } } (.$directory) ) } ##' we compute the effective sample size for each #tmp<-chains.gelman(chains,"sigma|mu") ##' function to compute the effective sample size for the chains in the mcmcm.chains tibble for all the parameters of the given family chains.effective <- function(mcmc.chains,variable.family, burnin){ tmp <- lapply(mcmc.chains$directory, FUN = function(x) { coda::effectiveSize(param.family(data.table::fread(x), variable.family)[burnin:3999, ])}) as_tibble(cbind(chains,do.call(rbind,tmp))) } ##' function to compute the average effective sample size over chains in the same experiment avg.effective <- function(mcmc.chains, variable.family,burnin){ chains.effective(mcmc.chains, variable.family,burnin) %>% group_by(type,phi,method,sparsity) %>% summarise_if(is.numeric , c(mean,sd)) } #tmp <- avg.effective(chains, "sigma|mu",2000) ##' Here we want to read the chains and thin them and compute the parameters summaries thin.and.summ <- function(mcmc.chains,variable.family,length.out=1001,burnin){ mcmc.chains %>% group_by(type,phi,method,sparsity) %>% do({ function(y){ tmp <- lapply(y, function(x){ param.family(data.table::fread(x),variable.family)[burnin:3999,] }) tmp <- lapply(tmp,function(z){ z[ceiling(seq(from = 1, to =nrow(z), length.out = length.out )),] } ) as_tibble(t(colMeans(as.matrix(do.call(rbind,tmp))))) } } (.$directory) ) } #thin.and.summ(chains,"sigma|mu",500,2000) ##' we compute the variance explained and its statistics for all the chains in the data frame variance.explained <- function(mcmc.chains, length.out=1001,burnin){ mcmc.chains %>% group_by(type,phi,method,sparsity) %>% do({ function(y){ tmp <- lapply(y, function(x){ param.family(data.table::fread(x),"sigma")[burnin:3999,] }) Tmp <- lapply(tmp,function(z){ z[ceiling(seq(from = 1, to =nrow(z), length.out = length.out )),] } ) tmp <- lapply(tmp, function(w) { w[,'sigmaG[1]']/(rowSums(w)) }) summary.tmp <- do.call(rbind,tmp) tibble(Min = min(summary.tmp), "0.25.q" = quantile(summary.tmp,0.25), Median = median(summary.tmp), Mean = mean(summary.tmp), "0.75.q" =quantile(summary.tmp,0.75), Max = max(summary.tmp)) } } (.$directory) ) } #variance.explained(chains,500,2000) ##' we compute the number of markers in the model markers.in.model <- function(mcmc.chains,length.out,burnin){ mcmc.chains %>% group_by(type,phi,method,sparsity) %>% do({ function(y){ tmp <- lapply(y, function(x){ param.family(data.table::fread(x),"comp")[burnin:3999,] }) tmp <- lapply(tmp,function(z){ z[ceiling(seq(from = 1, to =nrow(z), length.out = length.out )),] }) tmp <- lapply(tmp,function(z){ ifelse(z > 0, 1, 0) } ) #data.frame(mean.m.i.m =mean(rowSums(do.call(rbind,tmp)))) summary.tmp <- rowSums(do.call(rbind,tmp)) tibble(Min = min(summary.tmp), "0.25.q" = quantile(summary.tmp,0.25), Median = median(summary.tmp), Mean = mean(summary.tmp), "0.75.q" =quantile(summary.tmp,0.75), Max = max(summary.tmp)) } } (.$directory) ) } #markers.in.model(chains, 500,2000) ##' this functions yields a tibble with markers as columns and the PIP as values pip <- function(mcmc.chains, length.out,burnin){ mcmc.chains %>% group_by(type,phi,method,sparsity) %>% do({ function(y){ tmp <- lapply(y, function(x){ param.family(data.table::fread(x),"comp")[burnin:3999,] }) tmp <- lapply(tmp,function(z){ z[ceiling(seq(from = 1, to =nrow(z), length.out = length.out )),] }) tmp <- lapply(tmp,function(z){ ifelse(z > 0, 1, 0) } ) #data.frame(mean.m.i.m =mean(rowSums(do.call(rbind,tmp)))) tmp <- do.call(rbind,tmp) as_tibble(data.frame(t(colSums(tmp)/nrow(tmp)))) } } (.$directory) ) } #pip(chains,500,2000) true.values <- function() { linear.models <- list.files(data.path,pattern="_linear",full.names =T) model.list <- data.frame(linear.models= sapply(linear.models,FUN =basename)) %>% separate("linear.models",sep= "_", into=c("matrix","linear","model")) model.list$linear <- NULL model.list$model <- NULL model.list$directory <- rownames(model.list) rownames(model.list) <- NULL model.types <- as_tibble(model.list) %>% separate( "matrix", into = c("matrix", "phi"), sep = "phi") %>% separate( "matrix", into = c("type","variables"), sep ="1k") %>% mutate_at(vars(directory),as.character) as_tibble(cbind( model.types ,do.call(rbind,lapply(linear.models,function(x){ load(x) beta.colnames <- sprintf("b[%s]",1:ncol(b)) tmp.b <- b colnames(tmp.b) <- beta.colnames as_tibble(cbind (data.frame(sparsity =c("1s","2s","3s") ,var.eps = var.e, var.g = var.g, var.y =var.y, VE = var.g/var.y), as.data.frame(tmp.b))) } ) ) ) ) } ##' table with the ve and the summary statistics of the thinned and post burn in samples VE_summary <- function(mcmc.chains){ true.values() %>% select(type,phi,sparsity,VE) %>% inner_join(variance.explained(mcmc.chains,500,2000),By=c("type","phi","sparsity")) } #VE_summary(chains) ##' this dplyr query gets the true betas in tall format true.betas <- true.values() %>% select(type, phi, sparsity, var.y ,contains('b[')) %>% gather("coefficient","value", -type, -phi , -sparsity, -var.y) %>% mutate(coefficient = str_extract(coefficient,"[0-9]{1,5}")) chains.betas <- thin.and.summ(chains,'beta',500,2000) %>% gather("coefficient","estimate", -type, -phi, -method, -sparsity ) %>% mutate(coefficient = str_extract(coefficient, "[0-9]{1,5}")) #' here we compute the RMSE for the betas true.betas %>% inner_join(chains.betas) %>% group_by(type,phi,method,sparsity) %>% summarise(RMSE = sqrt(mean((value-var.y*estimate)^2 ))) %>% #here we do the TP, FP , TN, FN pip.summ <- function(true.b, chains.b, threshold){ true.b %>% inner_join(chains.b) %>% group_by(type, phi, method,sparsity) %>% summarise(TP = sum(ifelse(pip >= threshold & value ==1,1,0 )), FP= sum(ifelse(pip >= threshold & value ==0,1,0)), TN = sum(ifelse(pip <= threshold & value ==0,1,0)), FN = sum(ifelse(pip <= threshold & value ==1,1,0))) %>% mutate(thres = threshold) } auc <- function(x,y){ from <- min(x, na.rm=TRUE) to <- max(x, na.rm=TRUE) values <- approx(x, y, xout = sort(unique(c(from, to, x[x > from & x < to])))) res <- 0.5 * sum(diff(values$x) * (values$y[-1] + values$y[-length(values$y)])) res } PRcurve<- function(true.b, chains.b){ thresholds <- seq(0,1,by = 0.01) pr <- bind_rows(lapply(thresholds, function(x)pip.summ(true.b, chains.b, x))) pr <- pr %>% mutate(precision = TP/(TP+FP), recall = TP/(TP+FN), no_skill = ifelse(sparsity =="1s",0.1,ifelse(sparsity=="2s",0.5,0.9)) ) %>% unite( "experiment",c(type,phi)) %>% na.omit() p <- pr %>% ggplot()+ geom_line(aes(x = recall, y= precision, color = experiment, linetype = method )) + theme_bw()+ scale_colour_grey() + facet_grid(rows = vars(sparsity) )+ geom_hline(aes(yintercept = no_skill)) list(plot = p, table = pr, auc = auc(pr$recall, pr$precision)) } get.PRcurve <- function(true.b, chains.pip) { true.betas.bool <- true.b %>% mutate(value = ifelse(value !=0 , 1,0)) chains.betas.bool <- chains.pip %>% gather("coefficient","pip",-type, -phi, -method, -sparsity) %>% mutate(coefficient = str_extract(coefficient,"[0-9]{1,5}")) PRcurve(true.betas.bool,chains.betas.bool) } ##' we plot the PR curves faceted by the sparsity level tmp <- get.PRcurve(true.betas, pip(chains,500,2000)) #here we are going to plot the Gelman Rhat for the betas tmp <- chains.gelman("beta") #note, very expensive operation gather.tmp<- grep("beta",names(tmp),value=T) tmp2 <- tmp %>% gather(coefficient,Rhat, gather.tmp) %>% unite("experiment",c(type,phi,method)) #tmp2 <- tmp2 %>% filter(experiment == "RAR_0.5_async") p <- ggplot() + geom_histogram(tmp2,mapping =aes(x=Rhat)) +facet_grid(rows=vars(experiment))+ theme_bw() ggsave("testplot2.pdf",p) ### here we work with the short chains to plot the convergence time short.files <- list.files(data.path,pattern = "short",full.names = T) chains.short <- data.frame(directory = short.files, matrices = matrices) %>% separate( "matrices", into = c("matrix","method","chain"), sep="_") %>% separate( "matrix", into = c("matrix", "phi"), sep = "phi") %>% separate( "matrix", into = c("type","variables"), sep ="1k") %>% mutate_at(vars(directory),as.character) variance.explained.trace <- function(length.out=1001){ chains.short %>% group_by(type,phi,method) %>% do({ function(y){ tmp <- lapply(y, function(x){ param.family(data.table::fread(x),"sigma") }) #no need to thin for short chains #tmp <- lapply(tmp,function(z){ # z[ceiling(seq(from = 1, to =nrow(z), length.out = length.out )),] # } #) tmp <- lapply(tmp, function(w) { w[,'sigmaG[1]']/(rowSums(w)) }) tmp <- do.call(rbind,tmp) n.chains <- nrow(tmp) colnames(tmp) <- 1:ncol(tmp) as_tibble(tmp) %>% mutate(chains= 1:n.chains) %>% gather("iteration","VE", -chains) } } (.$directory) ) } VE.trace <- variance.explained.trace() VE.trace <- VE.trace %>% unite(type,c(type,phi)) %>% type_convert(cols(iteration ="i")) p <- VE.trace %>% filter(type == "RAR_0.8") %>% ggplot() + geom_path(aes(x=iteration,y=VE,group =chains)) + facet_grid(rows = vars(chains),cols =vars(method)) + theme_bw() ggsave("testplot4.pdf",p) trace.async <- VE.trace %>% filter(type == "RAR_0.8", method == "async") %>% group_by(iteration)%>% summarise(mean_VE = mean(VE)) %>% mutate(method = "async") trace.sync <- VE.trace %>% filter(type == "RAR_0.8", method == "sync") %>% group_by(iteration)%>% summarise(mean_VE = mean(VE)) %>% mutate(method = "sync") p <- bind_rows(trace.async, trace.sync) %>% ggplot() + geom_path(aes(x=iteration,y=mean_VE)) + facet_grid(rows =vars(method)) + theme_bw() ggsave("testplot5.pdf",p) ############################################################################### ## ## SPEED FILES CODE ############################################################################## speed.files.path <- "/scratch/temporary/dtrejoba/AsynchronousGibbs" speed.files <- list.files(speed.files.path,pattern ="speed",full.names =T) figure.path <- "./PenReg_DiscPrior_Resid_Async/figures/" get.times <- function(x){ sync <- system(paste( c("grep typeppbayes -A1022", x),collapse =" "),intern =T) fils <- sync async <- system(paste( c("grep typeasyncppbayes -A1022", x),collapse =" "),intern =T) sync <- grep( "Start reading |iteration", sync,value=T) async <- grep( "Start reading |iteration", async,value=T) sync <- grep("iteration",sync,value=T) async <- grep("iteration",async,value=T) sync <- lapply(sync,function(x){strsplit(x,split= ":")[[1]][2] }) async <- lapply(async,function(x){strsplit(x,split= ":")[[1]][2]}) sync <- lapply(sync,function(x){ k <- str_extract_all(x, "\\([^()]+\\)")[[1]] # Get the parenthesis and what is inside k <- substring(k, 2, nchar(k)-2) }) async <- lapply(async,function(x){ k <- str_extract_all(x, "\\([^()]+\\)")[[1]] # Get the parenthesis and what is inside k <- substring(k, 2, nchar(k)-2) }) covar <- unlist(lapply(grep("Start reading",fils,value=T),function(x) {rep(strsplit(basename(x),split='X')[[1]][1],1000)})) pheno <- unlist(lapply(grep("mcmc-samples",fils, value=T),function(x) {rep(str_extract(x , "_[1-3]s_" ) ,1000) })) if(!is.null(covar)) tibble(iter.times_sync = as.numeric(unlist(sync)), iter.times_async = as.numeric(unlist(async)), experiment= basename(x) , covariance = covar, phenotype = pheno) else NA } execution.times <- lapply(speed.files,get.times) execution.times[[length(execution.times)]]<- NULL all.times <- do.call(rbind,execution.times) all.times <- all.times %>% mutate(covariance = str_sub(covariance,1,4)) all.times <- all.times %>% mutate(covariance = ifelse(covariance == "RAR1", "RAR",covariance)) all.times <- all.times %>% mutate(covariance = ifelse(covariance == "RAR2", "RAR",covariance)) all.times <- all.times %>% separate(experiment,into = c("size", "parallelism"), sep = "speed" ) all.times <- all.times %>% na.omit() mean.times <- all.times %>% group_by(size, parallelism,covariance, phenotype) %>% summarise(mean_sync= mean(iter.times_sync), mean_async = mean(iter.times_async)) speed.ups <- mean.times %>% mutate(speed_up = mean_sync / mean_async) single.threaded <- all.times %>% group_by(size,covariance,phenotype) %>% summarise(single.thread = mean(iter.times_sync[parallelism =="_1_1_1_1"])) single.speedup <- mean.times %>% inner_join(single.threaded) %>% mutate(sync = ( single.thread /mean_sync) ,async = (single.thread/mean_async )) %>% gather(type, speed.up, c(sync,async)) %>% ungroup() %>% mutate( parallelism = fct_recode(parallelism, "1" = "_1_1_1_1", "2" = "_2_2_2_2", "4" = "_4_4_4_4", "8" = "_8_8_8_8", "16" = "_16_16_16_16", "32" = "_32_32_32_32")) %>% mutate(parallelism = as.numeric(as.character(parallelism)))%>% mutate(size = fct_recode(size, "1"="1k1k_", "2"="1k5k_", "3"="2k5k_", "4"="10k50k_")) %>% mutate_at(vars(size), funs(factor(., levels=unique(.)))) single.speedup$size <- factor(single.speedup$size, levels = c("1","2","3","4")) #this is the good plot ! s.labels <- c("10% non-zero", "50% non-zero", "90% non-zero") names(s.labels) <- c("_1s_","_2s_","_3s_") size.labels <- c("1e3x1e3","1e3x5e3","2e3x5e3","1e4x5e4") names(size.labels) <- c("1","2","3","4") t.q <-single.speedup %>% ggplot(aes(x = parallelism, y = speed.up, colour =covariance, shape =type)) + geom_hline(yintercept = 1) + scale_shape_manual(values= c(3,4)) + geom_line(aes(linetype = type)) + scale_color_grey() + facet_grid(rows =vars(size),cols = vars(phenotype), labeller= labeller(phenotype=s.labels, size = size.labels))+ theme_bw() + theme(panel.border = element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.line = element_line(colour = "black")) + xlab("Threads")+ ylab(expression("Speedup(",frac("m.i.t. parallel"," m.i.t. single thread"),")"))) ggsave(paste0(figure.path,"single_speedup_par.pdf"),t.q)
7d7f6a238cca3f377775ae5f3c74e91f03acb765
5ab2f745bef6ed1c6bb7f8bd08a94296cb611833
/man/FindHighVar.Rd
d5fdaa0005a2d55b340491cfa8d033e1d867ca92
[ "MIT" ]
permissive
XQBai/CCNMF
f447426c6269921d110c7bd247a170ff4a9e5d88
ac503d7dac205d4a275a4b809ddc22fb2aef258b
refs/heads/master
2023-05-26T01:19:26.462686
2023-05-24T19:17:44
2023-05-24T19:17:44
237,326,870
10
2
null
null
null
null
UTF-8
R
false
true
293
rd
FindHighVar.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/analysis.R \name{FindHighVar} \alias{FindHighVar} \title{Find high variable genes for each cluster by vst} \usage{ FindHighVar(Data, label, number) } \description{ Find high variable genes for each cluster by vst }
9893af4233fa09a172cb0d5290a4075e406af7ad
c418d9cf880f0d72aff9f8e6379841a6075a7350
/man/print.qic.select.Rd
45fdafebffbe5e072fb9a533840a5403c7424af3
[]
no_license
cran/rqPen
a214037b20cdbedd6b4ecd806b9865b7adea56be
af9b426431d8cdc8d4661fc94d3316547383e83f
refs/heads/master
2023-07-22T11:29:00.428914
2023-07-17T15:10:02
2023-07-17T17:31:56
25,911,914
2
3
null
null
null
null
UTF-8
R
false
true
472
rd
print.qic.select.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/mainFunctions.R \name{print.qic.select} \alias{print.qic.select} \title{Print a qic.select object} \usage{ \method{print}{qic.select}(x, ...) } \arguments{ \item{x}{qic.select object} \item{...}{optional arguments} } \value{ Prints the coefficients of the qic.select object } \description{ Print a qic.select object } \author{ Ben Sherwood, \email{ben.sherwood@ku.edu} }
eaaecddae3025b8ba691cd313f820f0d8fafe11d
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/tmcn/examples/toPinyin.Rd.R
88c748e485f0698858b8924fdabc55fae4b1a4a0
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
213
r
toPinyin.Rd.R
library(tmcn) ### Name: toPinyin ### Title: Convert a chinese text to pinyin format. ### Aliases: toPinyin ### Keywords: string ### ** Examples toPinyin("the quick red fox jumps over the lazy brown dog")
54e2d3a8607b789afe4d13133a3e075f8f592387
8a5453a31e02c4c519025b1800bf56fcb3cfa981
/PromoterPrimerDesign/scripts/ampliconInfo4PCRcheck.R
8adc1ee808b8815a95925c2d9192835ec4d91eb2
[]
no_license
jsemple19/dSMF_AmpliconPrimerDesign
635ea8d8f55347bc80101e32a894bac0d918f58b
c9d3dd10c520b4d71a5c9098bda4560e697c3e82
refs/heads/master
2023-01-02T15:40:36.364548
2020-10-21T10:20:22
2020-10-21T10:20:22
null
0
0
null
null
null
null
UTF-8
R
false
false
3,516
r
ampliconInfo4PCRcheck.R
#script to get list of amplicons, their length and provide an output in the order they appear on a gel to help # with checking amplicon PCR results chosenX<-read.csv("./XdcPrimers/DM_DE_promoter_test_primers_WS250_subset.csv",header=TRUE,stringsAsFactors=FALSE) chosenX<-chosenX[chosenX$finalSelected=="y",] #names(chosenX)<-c(names(chosenX)[1],"fragID",names(chosenX)[2:18]) #names(chosenX)[5]<-"Amplicon" #library("BSgenome.Celegans.UCSC.ce11") library(GenomicFeatures) # library(genomation) # library(GenomicRanges) # library(rtracklayer) txdb<-makeTxDbFromGFF("../../../GenomeVer/annotations/c_elegans.PRJNA13758.WS250.annotations.gff3") genes<-genes(txdb) genes<-genes[mcols(genes)$gene_id %in% chosenX$Gene_WB_ID,] i<-match(chosenX$Gene_WB_ID,mcols(genes)$gene_id) chosenX$strand<-as.vector(strand(genes[i,])) names(chosenX)[6]<-"orientation" finalChosenA<-read.csv("./AdcPrimers/DM_DE_promoter_test_primers_WS250_withRanks_subset.csv", header=TRUE,stringsAsFactors=FALSE) finalChosenA<-finalChosenA[finalChosenA$finalSelected=="y",] names(finalChosenA)[6]<-"NameFromBEDfile" #names(finalChosenA)[2]<-"fragID" #names(finalChosenA)[5]<-"PrimerID" names(finalChosenA)[7]<-"Amplicon" genes<-genes(txdb) genes<-genes[mcols(genes)$gene_id %in% finalChosenA$Gene_WB_ID,] i<-match(finalChosenA$Gene_WB_ID,mcols(genes)$gene_id) finalChosenA$strand<-as.vector(strand(genes[i,])) primerData<-rbind(cbind("chr"=rep("chrX",48),"strand"=chosenX$strand,chosenX[,4:19]), cbind("chr"=rep("Autosomes",48),"strand"=finalChosenA$strand,finalChosenA[,c(6,7,5,8:20)])) #### get length of Amplicons to check PCR results AmpliconLengths<-data.frame("primerNames"=c(paste0("X",1:48,"_f"),paste0("A",1:48,"_f"), paste0("X",1:48,"_r"), paste0("A",1:48,"_r")), "seq"=c(primerData$Fwseq,primerData$Rvseq),"amplicon"=c(primerData$Amplicon,primerData$Amplicon), "rowNum"=rep(rep(c("A","B","C","D","E","F","G","H"), each=12),2),"colNum"=rep(rep(1:12,8),2)) startEnd<-sapply(strsplit(as.character(AmpliconLengths$amplicon),":",fixed=T), '[[',2) start<-as.numeric(sapply(strsplit(startEnd,"-",fixed=T), '[[',1)) end<-as.numeric(sapply(strsplit(startEnd,"-",fixed=T), '[[',2)) AmpliconLengths["width"]<-end-start AmpliconLengths<-AmpliconLengths[c(1:96),] AmpliconLengths #(8+10+9+6+9+11)/96 interleave<-function(v1,v2) { ord1<-2*(1:length(v1))-1 ord2<-2*(1:length(v2)) c(v1,v2)[order(c(ord1,ord2))] } printColPair_plate<-function(data,col1,col2) { r1=data[data$colNum==col1,] r2=data[data$colNum==col2,] out<-interleave(r1$width[order(r1$rowNum,decreasing=T)],r2$width[order(r2$rowNum,decreasing=T)]) names(out)<-rep(r1$rowNum[order(r1$rowNum,decreasing=T)],each=2) return(out) } printColPair_plate(AmpliconLengths,1,2) printColPair_plate(AmpliconLengths,3,4) printColPair_plate(AmpliconLengths,5,6) printColPair_plate(AmpliconLengths,7,8) printColPair_plate(AmpliconLengths,9,10) printColPair_plate(AmpliconLengths,11,12) printColPair<-function(data,col1,col2) { r1=data[data$colNum==col1,] r2=data[data$colNum==col2,] out<-interleave(r1$width[order(r1$rowNum,decreasing=F)],r2$width[order(r2$rowNum,decreasing=F)]) names(out)<-rep(r1$rowNum[order(r1$rowNum,decreasing=F)],each=2) return(out) } printColPair(AmpliconLengths,1,2) printColPair(AmpliconLengths,3,4) printColPair(AmpliconLengths,5,6) printColPair(AmpliconLengths,7,8) printColPair(AmpliconLengths,9,10) printColPair(AmpliconLengths,11,12)
b060e606793d7ceb7de2718e498d284e9a2ce8d2
8c9c011750934e22946b97efdb25368358768677
/Google-read-csv/Google-read-csv.R
2a54d47d09c83f9152898387afcad2f049b38e8c
[]
no_license
ccowens/OddsAndEnds
b4e3fdaab3e688163303284587e5c6c1dd44d215
79f3c0b0a340c1d96e84d6262b34af6d47e60eeb
refs/heads/master
2021-01-22T03:06:17.024635
2015-07-12T04:26:32
2015-07-12T04:26:32
38,942,747
0
0
null
null
null
null
UTF-8
R
false
false
1,493
r
Google-read-csv.R
GoogleReadCsv <- function(sheet.ID, ...) { # version of read.csv for reading Google spreadsheets into data frames # using the trick of appending export?format=csv to the URL # Based on discussion in: # http://stackoverflow.com/questions/22873602/importing-data-into-r-from-google-spreadsheet # The sheetID is best copied from your browser URL bar. For example: # 1CO8L7ly0U1CO8L7ly0U # out of: # https://docs.google.com/spreadsheets/d/1CO8L7ly0U1CO8L7ly0U/edit#g # The sheet has to be public (just the link). # Note the ... for passing through parameters like stringsAsFactors, as.is, and colClasses that you can # learn about with ?read.csv # Combine the pieces to make the URL file.URL <- paste0("https://docs.google.com/spreadsheets/d/", sheet.ID, "/export?format=csv") # Use getURL from the RCurl package so you can read an HTTPS page. require(RCurl) # Put the content into the sheet variable. sheet <- getURL(file.URL, ssl.verifypeer=FALSE) # Cheeck the beginning to see if it looks like HTML or XML (not good) if (substr(sheet,1,10) == "<!DOCTYPE>" | substr(sheet,1,6) == "<HTML>") { stop("The file text looks like the beginning of an HTML file, not a CSV file. You may have the wrong ID for the sheet, or this may not be a public link.") } # Use textConnect to treat reading from the variable as if # it were reading from a CSV file and return a data frame read.csv(textConnection(sheet), ...) }
6779d96b3c57125a22057f244ca15350fe646164
1115ed2b4ab8073a9fb7937387ff2758258ed4dd
/UnivariateMethods/Ankeny_validate.R
ffbabf9ddc0779414d0abdbeb960fb7885fe5978
[]
no_license
chxyself25/Change-pointDetection
e343bc8cf47e141f579ab6f6b136257087f72a72
4a1b1b60074efb887d1f71c2b83ca3630fc9e409
refs/heads/master
2022-12-03T10:29:51.145653
2020-08-08T21:07:10
2020-08-08T21:07:10
285,974,409
0
0
null
null
null
null
UTF-8
R
false
false
2,463
r
Ankeny_validate.R
sourceDir <- function(path, trace = TRUE, ...) { for (nm in list.files(path, pattern = "[.][RrSsQq]$")) { if(trace) cat(nm,":") source(file.path(path, nm), ...) if(trace) cat("\n") } } library(doParallel) library(signal, lib.loc = "/home/xchang/R/x86_64-pc-linux-gnu-library/3.6/") library(dplyr, lib.loc = "/home/xchang/R/x86_64-pc-linux-gnu-library/3.6/") library(fdapace, lib.loc = "/home/xchang/R/x86_64-pc-linux-gnu-library/3.6/") library(Rcpp, lib.loc = "/home/xchang/R/x86_64-pc-linux-gnu-library/3.6/") sourceDir("./Funcs/", trace = FALSE) sourceDir("../fdapace/R/", trace = FALSE) registerDoParallel(cores = 25) Kd_dist <- readRDS("../Kd_simulation/Kd_ecdf_25.rds") names.used = c('pID', 'doy', 'year', 'B1', 'B2', 'B3', 'B4', 'B5', 'B7') FVEs <- c(55:99, 99.9) methods <- c('IME', 'ISE', 'CSE') #bands <- c("b2", "b3", "b4", "b5", "b7") bands <- c("ndvi", "mndwi", "b7") ## read data which matches with reference data if (!file.exists("Ankeny_validate_data.rds")) { ref <- read.csv("Ankeney_ValSmp_interpreted.csv") ids <- ref$pointID dat <- NULL for (seg in 1:14) { file.name <- paste0("./Ankeny", seg, ".rds") akn.seg <- readRDS(file.name) dat <- rbind(dat, subset(akn.seg, pID %in% ids)) } saveRDS(dat, file = "Ankeny_validate_data.rds") } else {dat <- readRDS("Ankeny_validate_data.rds"); ids <- unique(dat$pointID)} cat("doing estimation by regression method", "\n") regres <- URegEstAll(dat, var.names = names.used, bands, year.range = NULL, save.results = TRUE, use.results = FALSE, dir = "./Ankeny_validation/") saveRDS(regres, file = "./Ankeny_validation/ankeny_validate_reg3.rds") cat("doing estimation by IME method", "\n") res.seg <- UfchangepointAll(dat, var.names = names.used, bands, Kdist = Kd_dist, detect.method = NULL, est.method = 'IME', year.range = NULL, FVE = c(55, 55), K = c(NULL, NULL), alpha = 0.05, save.results = TRUE, use.results = TRUE, dir = "./Ankeny_validation/") res <- NULL for (m in methods) { for (f in FVEs) { res.seg <- UfchangepointAll(dat, var.names = names.used, bands = c('ndvi', 'mndwi', 'b7'), Kdist = Kd_dist, detect.method = NULL, est.method = m, year.range = NULL, FVE = c(f, f), K = c(NULL, NULL), alpha = 0.05, save.results = FALSE, use.results = TRUE, dir = "./Ankeny_validation/") res.seg$method = m; res.seg$fve = f res <- rbind(res, res.seg) } } saveRDS(res, file = "./Ankeny_validation/ankeny_validate_fchange3.rds")
19994fb35ca7b906f8b7a35ff9af296fe73b732d
7b2cacf99fe488c001d09b6a51eac439bdfa5272
/analysis/gene_scoring/ensemble_classifier.R
564e91d0c62af864b14719077b48bb67aa469ba0
[ "LicenseRef-scancode-unknown-license-reference", "MIT" ]
permissive
talkowski-lab/rCNV2
d4fc066478db96322b7aa062f4ece268098b9de3
7e97d4c1562372a6edd7f67cdf36d4167da216f8
refs/heads/master
2023-04-11T08:48:40.884027
2023-01-25T15:59:13
2023-01-25T15:59:13
178,399,375
14
4
MIT
2022-03-16T16:42:46
2019-03-29T12:13:54
R
UTF-8
R
false
false
5,453
r
ensemble_classifier.R
#!/usr/bin/env Rscript ###################### # rCNV Project # ###################### # Copyright (c) 2020 Ryan L. Collins and the Talkowski Laboratory # Distributed under terms of the MIT License (see LICENSE) # Contact: Ryan L. Collins <rlcollins@g.harvard.edu> # Ensemble classification of gene scores options(stringsAsFactors=F, scipen=1000) ################# ### FUNCTIONS ### ################# # Load a single gene score stats .tsv load.scores.single <- function(path, prefix=NULL){ x <- read.table(path, header=T, sep="\t", comment.char="")[, 1:3] colnames(x)[1] <- "gene" if(!is.null(prefix)){ colnames(x)[-1] <- paste(prefix, colnames(x)[-1], sep=".") } x[, 2:3] <- apply(x[, 2:3], 2, as.numeric) return(x) } # Load all gene score stats from list load.scores <- function(scores_file.in){ sl <- read.table(scores_file.in)[, 1] models <- as.vector(sapply(sl, function(path){ parts <- unlist(strsplit(path, split=".", fixed=T)) as.character(parts[length(parts)-1]) })) scores <- load.scores.single(sl[1], models[1]) for(i in 2:length(sl)){ scores <- merge(scores, load.scores.single(sl[i], models[i]), by="gene", all=T, sort=F) } return(list("scores"=scores, "models"=models)) } # Compute ROC roc <- function(stats, score, truth.genes, neg.genes, steps=seq(1, 0, -0.001), eq="gt"){ x <- data.frame("score" = stats[, which(colnames(stats) == score)], "true" = stats$gene %in% truth.genes, "neg" = stats$gene %in% neg.genes) roc_res <- as.data.frame(t(sapply(steps, function(k){ if(eq=="gt"){ idxs <- which(x$score > k) }else{ idxs <- which(x$score < k) } n.true <- length(which(x$true[idxs])) ftrue <- n.true / length(which(x$true)) n.neg <- length(which(x$neg[idxs])) fneg <- n.neg / length(which(x$neg)) fother <- length(which(!x$true[idxs])) / length(which(!x$true)) fall <- length(idxs) / nrow(x) acc <- n.true / (n.true + n.neg) d.opt <- sqrt((0-fneg)^2 + (1-ftrue)^2) return(c(k, fall, fother, ftrue, fneg, acc, d.opt)) }))) colnames(roc_res) <- c("cutoff", "frac_all", "frac_other", "frac_true", "frac_neg", "accuracy", "d.opt") roc.opt.idx <- head(which(roc_res$d.opt == min(roc_res$d.opt, na.rm=T)), 1) return(list("roc.res"=roc_res, "roc.opt"=c("cutoff"=roc_res$cutoff[roc.opt.idx], "accuracy"=roc_res$accuracy[roc.opt.idx]))) } # Calculate weights for each model get.weights <- function(scores, models, pos_genes, neg_genes, eval.metric="pred_bfdp", eq="lt"){ weights <- sapply(models, function(model){ roc.res <- roc(scores, score=paste(model, eval.metric, sep="."), pos_genes, neg_genes, eq=eq) as.numeric(roc.res$roc.opt[2]) }) weights <- weights / sum(weights) names(weights) <- models return(weights) } # Compute weighted average of BFDPs for ensemble classifier get.ensemble.bfdps <- function(scores, models, weights){ weighted.bfdps <- sapply(models, function(model){ w <- weights[which(names(weights)==model)] bfdps <- scores[, which(colnames(scores) == paste(model, "pred_bfdp", sep="."))] w * bfdps }) apply(weighted.bfdps, 1, sum) } # Compute & scale ensemble scores get.ensemble.scores <- function(scores){ cdfs <- as.vector(pnorm(scale(scores$ensemble.pred_bfdp, center=T, scale=T), lower.tail=F)) cdfs <- cdfs - min(cdfs, na.rm=T) cdfs / max(cdfs, na.rm=T) } ##################### ### RSCRIPT BLOCK ### ##################### require(optparse, quietly=T) require(flux, quietly=T) require(Hmisc, quietly=T) # List of command-line options option_list <- list() # Get command-line arguments & options args <- parse_args(OptionParser(usage="%prog scores.tsv truth.genes false.genes out.tsv", option_list=option_list), positional_arguments=TRUE) opts <- args$options # Checks for appropriate positional arguments if(length(args$args) != 4){ stop("Four positional arguments: scores.tsv, truth.genes, false.genes, and out.tsv\n") } # Writes args & opts to vars scores_file.in <- args$args[1] pos_genes.in <- args$args[2] neg_genes.in <- args$args[3] outfile <- args$args[4] # # DEV PARAMTERS # setwd("~/scratch") # scores_file.in <- "~/scratch/ensemble_input.tsv" # pos_genes.in <- "~/scratch/gold_standard.haploinsufficient.genes.list" # neg_genes.in <- "~/scratch/gold_standard.haplosufficient.genes.list" # outfile <- "~/scratch/ensemble_scores.test.tsv" # Read gene scores scores <- load.scores(scores_file.in) models <- scores$models scores <- scores$scores # Read truth sets pos_genes <- read.table(pos_genes.in)[, 1] neg_genes <- read.table(neg_genes.in)[, 1] # Determine weights for each model weights <- get.weights(scores, models, pos_genes, neg_genes) # Compute ensemble BFDPs, normalized scores, and percentile scores$ensemble.pred_bfdp <- get.ensemble.bfdps(scores, models, weights) scores$ensemble.score <- get.ensemble.scores(scores) scores$ensemble.quantile <- 100 * order(scores$ensemble.pred_bfdp) / nrow(scores) # Reformat output file and write out out.df <- data.frame("gene"=scores$gene, "pred_bfdp"=scores$ensemble.pred_bfdp, "score"=scores$ensemble.score, "quantile"=scores$ensemble.quantile) write.table(out.df, outfile, col.names=T, row.names=F, sep="\t", quote=F)
36c81017f06aefe255c25283347ff68a20dad0cb
9b3bf1dd0178c131b80d944cdaff5c33d1c62bd5
/ModelSelection.R
b877c33401caf377e353ff5f0bffd713f08a2797
[]
no_license
himankjn/Regression-Models
7f7265f980f457a0a4df659d2e43bfdbd6049b26
74f09bdc960e4f6c015dbf412c5a9120e1d52580
refs/heads/master
2022-02-23T10:17:39.463472
2019-10-09T14:03:45
2019-10-09T14:03:45
196,245,456
0
0
null
null
null
null
UTF-8
R
false
false
1,247
r
ModelSelection.R
#model selection ##In Multiple linear regression model selection can be a huge question ## a model with two categories same slope but different intercepts.i.e. multiple regresiion ##a model with two categories different slopes and different intercepts i.e with interaction ##which variables to include? ## including unnecessary(corelated) variables inflates variation. check with vif in car library ## excluding necessary variables creates bias. data(swiss) library(car) fit<-lm(data=swiss,Fertility~.) vif(fit) ## this means.by the inclusion of agriculture the variance inflates by 2.28 times than it would if agriculture was uncorelated. ##note vif for infant.mortality is only 1 since its mostly uncorelated with other variables. ##therefore always include variables with less variance inflation factor (Vif) ## we can conduct nested variable models and then anova over them as below: fit1<-lm(data=swiss,Fertility~Agriculture) fit2<-lm(data=swiss,Fertility~Agriculture+Education+Examination) fit3<-lm(data=swiss,Fertility~.) anova(fit1,fit2,fit3) ##this suggests that DF 2 variables added in each model. ##pr(>F) indicated that the variables are significant hence important for model. ##use annova between nested models to see significance.
05f95b1a3b10c26f90fc4f2b0705bda9575c07a5
6e3c81e90730c199a0536854374010f3527bc292
/analysis_scripts/variable_maps.R
139047fda92e3ce7643ae7628598c77b633be9a4
[]
no_license
camposfa/plhdbR
0d01acc33a5d4878c9ddf3d2857396d837dc8fca
4472c22d45835dde05debf4c0659c21a19c3bdcd
refs/heads/master
2020-05-17T23:51:35.521989
2017-09-12T13:26:02
2017-09-12T13:26:02
32,407,178
2
0
null
null
null
null
UTF-8
R
false
false
1,225
r
variable_maps.R
var_map <- c( rainfall = "Rainfall", mean_temp = "Temperature", index_nino3.4 = "ENSO Index", spei_03 = "Drought Index", dmi = "IOD Index", dmi_spc = "IOD Index (spc)", enso_spc = "ENSO Index (spc)" ) var_map2 <- c("rain_monthly_mm" = "Rainfall", "tavg_anomaly" = "Temperature", "nino3.4" = "ENSO Index", "dmi" = "IOD Index") term_map <- c( age_classjuvenile = "Juveniles vs. Adults", age_classnewborn = "Newborns vs. Adults", `value` = "Climate Variable", `value:age_classjuvenile` = "Juvelines:Climate", `value:age_classnewborn` = "Newborns:Climate" ) site_map <- c( beza = "Sifaka", ssr = "Capuchin", `rppn-fma` = "Muriqui", kakamega = "Blue Monkey", amboseli = "Baboon", gombe = "Chimpanzee", karisoke = "Gorilla" ) quarter_map <- c( annual = "Calendar Y", coldest_q = "Coldest Q", driest_q = "Driest Q", warmest_q = "Warmest Q", wettest_q = "Wettest Q" ) age_map <- c( newborn = "Infant", juvenile = "Juvenile", adult = "Adult", combined = "Age Classes Combined" ) global_labeller <- labeller( age_class = age_map, var = var_map, term = term_map, site = site_map, grp = site_map, quarter = quarter_map )
3a5cb82d4424318080d2738e20c3623080686de3
e8305a8f19a31ea5def1ebcbddeb3a600769399a
/scripts/0_occurrences.R
03abca58755a8c90e812526d9cb013187dd3bbbf
[]
no_license
AndreaSanchezTapia/sanchez-tapia_ffgc_2020
db6f5541ab236ba4e61fae021db36aa98261626a
f9752177bdd60576ff88f9c1c8f630d9389ddcc4
refs/heads/master
2022-12-23T23:04:04.682201
2020-10-06T01:56:47
2020-10-06T01:56:47
254,691,753
0
0
null
null
null
null
UTF-8
R
false
false
5,861
r
0_occurrences.R
# reads community matrix com.ord <- read.table("./data/comord.txt", header = T, row.names = 1) # list of names and taxonomy nomes <- read.table("./data/nomes.txt", header = T, sep = "\t") # not all names collected are in the plots nomes.filt <- nomes[nomes$cod %in% names(com.ord), ] # creates genera genera <- strsplit(as.character(nomes.filt$nome), split = " ") #creates a data.frame with genera and specific epithet nomes.filt$gen <- t(data.frame(strsplit(as.character(nomes.filt$nome), " "))[1, ])[, "1"] nomes.filt$epiteto <- t(data.frame(strsplit(as.character(nomes.filt$nome), " "))[2, ])[, "2"] head(nomes.filt) # occurrence download library(rgbif) library(dplyr) nombres <- droplevels(nomes.filt$nome) #This will take a while and might change with time---- #for each name for (i in seq_along(nombres)) { print(nombres[i]) #suggest a key key <- name_suggest(q = nombres[i], rank = 'species')$key print(key) if (!is.null(key)) { occs <- list() for (k in 1:length(key)) { occs[[k]] <- occ_search( taxonKey = key[k], limit = 100000, hasCoordinate = TRUE, basisOfRecord = "PRESERVED_SPECIMEN", hasGeospatialIssue = F, return = 'data', fields = "minimal" ) } print(lapply(occs,dim)) if (any(!is.null(lapply(occs,dim)))) { dim.null <- lapply(occs, function(x) {!is.null(dim(x))}) occs.f <- subset(occs, dim.null == T) occs.f <- bind_rows(occs.f) occs.f <- occs.f[!duplicated(occs.f[,c(1,3,4)]),] print(dim(occs.f)) write.csv( x = data.frame(occs.f), file = paste0("./results/geo/data/", nombres[i], ".csv") ) } } else { cat(paste("No key found for", nombres[i], "\n")) } cat(paste(nombres[i], "DONE", "\n")) } #some manual searches due to name errors which(nombres == "Schinus terebinthifolius") levels(nombres)[102] <- "Schinus terebinthifolia" which(nombres == "Xylosma glaberrima") levels(nombres)[121] <- "Xylosma glaberrimum" nombres[121] which(nombres == "Maytenus samydaeformis") levels(nombres) ##name corrections nomes.filt$nome <- nombres nomes.filt$epiteto <- t(data.frame(strsplit(as.character(nomes.filt$nome), " "))[2, ])[, "2"] View(nomes.filt)#good ## makes the maps to calculate AOO library(maps) library(adehabitatHR) library(data.table) data(worldMapEnv) ## data cleaning nomes.filt$nome <- droplevels(nomes.filt$nome) # downloads template worldclim rasters library(raster) tmean <- getData('worldclim', var = 'tmean', res = 2.5) #1 removes NAs (points outside the rasters)---- for (i in seq_along(nombres)) { (sp <- nombres[i]) if (file.exists(paste0("./results/geo/data/", sp, ".csv"))) { a <- read.csv(paste0("./results/geo/data/", sp, ".csv"), header = TRUE, row.names = 1) coord <- a[,c("decimalLongitude", "decimalLatitude")] b <- raster::extract(tmean, coord) c <- cbind(a, b) d <- c[complete.cases(b),] write.csv( x = d, file = paste0("./results/geo/clean/", nombres[i], ".csv") ) } } # creates a data.frame with number of records, convex hull areas areas2 <- nomes.filt areas2$N <- 0 areas2$polygon <- 0 areas2$polygon90 <- 0 head(areas2) for (i in seq_along(nombres)) { (sp <- areas2$nome[i]) if (file.exists(paste0("./results/geo/clean/", sp, ".csv"))) { a <- read.csv(paste0("./results/geo/clean/", sp, ".csv"), header = TRUE, row.names = 1) # maps map('world') #equador abline(h = 0, lty = 2) axis(1, las = 1) axis(2, las = 1) box() #occurrence points points(a$decimalLongitude, a$decimalLatitude, cex = 1, pch = 21, col = "red") mtext(text = nomes.filt$nome[i], side = 3, font = 3, cex = 1) # already complete cases #coords coord <- cbind(a$decimalLongitude, a$decimalLatitude) areas2$N[i] <- nrow(coord) if (nrow(coord) < 5) { cat(paste("not enough records for",sp)) } else { mcp.sp <- mcp(SpatialPoints(coord), percent = 100) mcp.sp95 <- mcp(SpatialPoints(coord), percent = 95) mcp.sp90 <- mcp(SpatialPoints(coord), percent = 90) plot(mcp.sp90, add = T, border = "red", lwd = 2) areas2$polygon[i] <- mcp.sp$area areas2$polygon90[i] <- mcp.sp90$area } print(areas2[i,]) } else { cat(paste("no occurrences were found for " , sp)) } } write.table(areas2, "./data/areas2.txt") #######so far only polygons but we will use AOO instead######---- #count pixels----- write.csv(nomes.filt,"./data/nomes_clean.csv") nomes.filt <- read.csv("./data/nomes_clean.csv", row.names = 1) areas2hd <- read.table("./data/areas2.txt") tibble::glimpse(areas2hd) nombres <- areas2hd$nome setdiff(areas2hd$nome, nombres) # tmean is already in disk tmean <- getData('worldclim', var = 'tmean', res = 2.5) # reads each clean occ table pixel.sum <- list() for (i in seq_along(areas2hd$nome)) { (sp <- areas2hd$nome[i]) if (file.exists(paste0("./results/geo/clean/", sp, ".csv"))) { a <- read.csv(paste0("./results/geo/clean/", sp, ".csv"), header = TRUE, row.names = 1) coord <- cbind(a$decimalLongitude, a$decimalLatitude) #rasterizes the presence points and counts pixels presences <- raster::rasterize(coord, tmean[[1]], field = 1) pa <- getValues(presences) pixel.sum[[i]] <- data.frame(nome = sp, pixelsum = sum(pa, na.rm = T)) } else { pixel.sum[[i]] <- data.frame(nome = sp, pixelsum = 0) } } pixel.sum <- data.table::rbindlist(pixel.sum) #joins all the area-related variables and saves a dataframe that will be used later to calculate geographical area CWM--- areas_tudo <- dplyr::left_join(areas2hd, pixel.sum, by = "nome") write.csv(areas_tudo, file = "./data/areas_all.csv")
b4f65ea942e3e361cc07e9365390e2965dcdca81
5e1e1088cd7876fa4c49448e8df4ea6dac14940e
/build_helper.R
042c008c9d69bc55093e2bcb95e7b8ccd01dd50b
[]
no_license
gkoumasd/shinySatRDay
982a59562e7d73e6a4e9076bf5d9870f641000ff
8d9bbb4b840f0d7137c7a464f6a56ab44ff1bfbc
refs/heads/master
2021-01-18T06:32:12.910273
2016-09-03T05:42:15
2016-09-03T05:42:15
null
0
0
null
null
null
null
UTF-8
R
false
false
225
r
build_helper.R
# package build helper mypath <- # path where your files are stored #mypath <- "C:\\Users\\kross-smith\\Documents\\02_projects\\Training\\satRday\\shinyGadget" devtools::build(mypath) devtools::install(mypath)
c3cd35c5e914dd5a218aa823b24087e1dd6d664a
a4b5e0805f0dfd69654fa3a74668d575c7e9aab8
/LoadMethylation.R
f7073edcd0f64533b9330b7fd993b925044395a7
[]
no_license
peng-gang/Methylation
10fc18a65f7e86d726a0373dba3f6814e5526dd4
51036d2c26f6a7b29a2e1a9232f58cb97e44ef54
refs/heads/master
2021-05-17T16:02:32.584684
2020-04-14T19:52:33
2020-04-14T19:52:33
250,860,292
0
0
null
null
null
null
UTF-8
R
false
false
1,176
r
LoadMethylation.R
# read raw methylation data library(minfi) library(IlluminaHumanMethylationEPICanno.ilm10b4.hg19) load("data/target.RData") #read Raw data loadData <- function(target, out_dir){ # read raw data rgSet <- read.metharray.exp(targets = target) # change sample name from sentrix information to real sample name (patient id - time) old_name <- sampleNames(rgSet) if(sum(old_name != paste(target$Sentrix_ID, target$Sentrix_Position, sep="_"))!=0){ print("please check Sentrix id and Sentrix position in target file") return() } sampleNames(rgSet) <- target$Sample_Name detP <- detectionP(rgSet) pdf(file.path(out_dir, "detection.pdf")) barplot(colMeans(detP)) dev.off() qcReport(rgSet, sampNames=target$Sample_Name, sampGroups=target$Sample_Group, pdf = file.path(out_dir, "qcReport.pdf")) MSet <- preprocessRaw(rgSet) RSet <- ratioConvert(MSet, what = "both", keepCN = TRUE) GRset <- mapToGenome(RSet) predictedSex <- getSex(GRset) rlt <- list( rgSet = rgSet, predictedSex = predictedSex, pvProb = detP ) saveRDS(rlt, file.path(out_dir, "rawData.rds")) } loadData(target, "rlt/QC/")
86b18b9999278706f76c491b6e03b4d46667abac
d65d6b0d817882f198f08ebb87308bfe85e4d850
/tests/testthat/test-basic.R
2c59baafd7f87394225ac52856a295beb4eef315
[]
no_license
rdpeng/filehashsqlite
28d239efd6cf2a4a33adb119c3c20a1b3c02d361
98284b31f919239c6a2cedf08c6d4a132afd7e57
refs/heads/master
2022-05-15T06:06:48.871023
2022-05-11T16:44:46
2022-05-11T16:44:46
509,720
8
3
null
null
null
null
UTF-8
R
false
false
1,284
r
test-basic.R
context("Basic Tests") test_that("DB Creation", { dbCreate("test1", "SQLite") db <- dbInit("test1", "SQLite") expect_s4_class(db, "filehashSQLite") dbUnlink(db) }) test_that("Insert object", { dbCreate("test2", "SQLite") db <- dbInit("test2", "SQLite") set.seed(234) val <- rnorm(1000) dbInsert(db, "a", val) x <- dbFetch(db, "a") expect_identical(x, val) dbUnlink(db) }) test_that("Multi-Fetch objects", { dbCreate("test3", "SQLite") db <- dbInit("test3", "SQLite") set.seed(234) val <- rnorm(1000) dbInsert(db, "a", val) val2 <- runif(10) dbInsert(db, "b", val2) obj <- list(a = val, b = val2) m <- dbMultiFetch(db, c("a", "b")) expect_identical(m, obj) dbUnlink(db) }) test_that("Delete objects", { dbCreate("test4", "SQLite") db <- dbInit("test4", "SQLite") set.seed(234) val <- rnorm(1000) dbInsert(db, "a", val) val2 <- runif(10) dbInsert(db, "b", val2) dbDelete(db, "a") expect_identical(dbList(db), "b") expect_false(dbExists(db, "a")) expect_true(dbExists(db, "b")) dbUnlink(db) })
9def092f1ddddc76eb57b17c5a0432beb5ca6fbb
c2b9863434f1ab0893126f463248a91028eea763
/R-plots/PlotGenderRatios.R
bfba59d88ff8c65cbe444cfdcaade5e75e18bc16
[]
no_license
emmenru/women-in-smc
b38afc49e25db1d8a78d387b76423802c1dbee28
f75d93ca19497a4cc49d43386fe246747076327d
refs/heads/master
2022-12-21T23:06:49.571700
2021-09-16T15:00:45
2021-09-16T15:00:45
84,766,100
1
0
null
2022-12-09T05:23:20
2017-03-12T23:51:27
TeX
UTF-8
R
false
false
3,079
r
PlotGenderRatios.R
# plotting library library(ggplot2) library(plotly) library(dplyr) library(tidyr) library(lattice) data_stats_corrected=0 data_stats_corrected <- read.csv("~/kurser/creative_coding/women-in-smc/output_data/ICMC_Stats_Corrected.csv") # select only percentage columns myvars <- c("Year", "Female", "Male", "Unknown") data = 0 data <- data_stats_corrected[myvars] # skip last rows data = data[1:40,] # percentages data=cbind(data[1],data[c(2,4)]*100) summary(data$Female) p <- plot_ly(data, x = data$Year, y = data$Female, name = 'trace 0', type = 'scatter', mode = 'lines') %>% add_trace(x = data$Year, y = data$Unknown, name = 'trace 1', mode = 'lines+markers') data_long = 0 data_long <- gather(data, Category, Percentage, Female:Unknown, factor_key=TRUE) colors = c("midnightblue", "royalblue") # NIME & SMC # set image to 900 x 500 barchart(Percentage~Year,data=data_long,groups=Category, auto.key=list(space='right'), scales=list(x=list(rot=90,cex=1.0)), ylab=list("Percentage (%)", cex=1.5), xlab=list("Year",cex=1.5), main=list("NIME 2001-2016",cex=1.5), col=c("midnightblue", "royalblue"),par.settings=list(superpose.polygon=list(col=colors))) # ICMC # 1100 x 500 barchart(Percentage~Year,data=data_long,groups=Category, auto.key=list(space='right'), scales=list(x=list(rot=90,cex=1.0)), ylab=list("Percentage", cex=2.0), xlab=list("Year",cex=2.0), main=list("ICMC 1975-2016",cex=2.0), col=c("midnightblue", "royalblue"),par.settings=list(superpose.polygon=list(col=colors))) # Summarized results from years 2004-2016 SMC<-read.csv("~/kurser/creative_coding/women-in-smc/output_data/SMC_Stats_Corrected.csv") NIME<-read.csv("~/kurser/creative_coding/women-in-smc/output_data/NIME_Stats_Corrected.csv") ICMC<-read.csv("~/kurser/creative_coding/women-in-smc/output_data/ICMC_Stats_Corrected.csv") countvars <- c("Year", "FemaleCount", "MaleCount", "UnknownCount", "TotNames") SMC=SMC[countvars][1:13,] NIME=NIME[countvars][4:16,] ICMC=ICMC[countvars][28:40,] colnames(SMC)<- c("Year","FemaleCountSMC","MaleCount","UnknownCountSMC","TotNamesSMC") colnames(NIME)<- c("Year","FemaleCountNIME","MaleCount","UnknownCountNIME","TotNamesNIME") colnames(ICMC)<- c("Year","FemaleCountICMC","MaleCount","UnknownCountICMC","TotNamesICMC") allData=0 allData<-cbind(SMC,NIME,ICMC) allData$totNames=allData$TotNamesSMC+allData$TotNamesNIME+allData$TotNamesICMC allData$female=allData$FemaleCountSMC+allData$FemaleCountNIME+allData$FemaleCountICMC allData$femalePercentage=allData$female/allData$totNames summary(allData$femalePercentage) keepvars=c("Year","femalePercentage") allData=allData[keepvars] allData$femalePercentage=100*(allData$femalePercentage) # Summarized data for SMC, NIME, ICMC # 900 x 400 barchart(femalePercentage~Year,data=allData,auto.key=list(space='right'), scales=list(x=list(rot=90,cex=1.0)), ylab=list("Percentage Female (%)", cex=1.5), xlab=list("Year",cex=1.5), main=list("ICMC, SMC & NIME 2001-2016",cex=1.5), col=c("midnightblue")) # 2016 #29 + 43 + 31 = 103 female #231 + 256 + 236 = 723 names
ed9a1ff9b9287f1695b32e735eb896c96be92100
5e15ffd3f55a5139ad5c91ae597de8782b791845
/old files/climate_community_relationship_MAP.R
3d72b215b5e2a45eda6313e81f6508d1942f64f2
[]
no_license
SallyKoerner/DomDiv
a0015c253634a20fa96de4d3094e8cd6babba57c
5361d07690e3e88ea5562d48cecdbad2b19ca4b3
refs/heads/master
2022-01-28T12:15:26.641008
2022-01-21T20:44:33
2022-01-21T20:44:33
161,543,407
0
0
null
null
null
null
UTF-8
R
false
false
10,797
r
climate_community_relationship_MAP.R
setwd('C:\\Users\\lapie\\Dropbox (Smithsonian)\\working groups\\DomDiv_Workshop\\Dominance_Diversity') setwd('C:\\Users\\mavolio2\\Dropbox\\DomDiv_Workshop\\Dominance_Diversity') library(grid) library(knitr) library(kableExtra) library(lme4) library(tidyverse) ###ggplot theme set theme_set(theme_bw()) theme_update(axis.title.x=element_text(size=20, vjust=-0.35), axis.text.x=element_text(size=16), axis.title.y=element_text(size=20, angle=90, vjust=0.5), axis.text.y=element_text(size=16), plot.title = element_text(size=24, vjust=2), panel.grid.major=element_blank(), panel.grid.minor=element_blank(), legend.title=element_blank(), legend.text=element_text(size=20)) #import community metrics - site level (single RAC for a site) commSite <- read.csv('community_metrics_single_climate_Dec2018.csv')%>% group_by(block_trt)%>% mutate(Evar_scale=scale(Evar), richness_scale=scale(richness), BP_scale=scale(BP_D))%>% ungroup()%>% filter(block_trt!='China') #only 4 datapoints #figure out climate mean, midpoint, min, max #function for geometric mean gm_mean = function(x, na.rm=TRUE){ exp(sum(log(x[x > 0]), na.rm=na.rm) / length(x)) } climate <- commSite%>% group_by(block_trt)%>% summarise(min_MAP=min(bio12), max_MAP=max(bio12), mean_MAP=mean(bio12), geo_mean_MAP=gm_mean(bio12))%>% ungroup()%>% mutate(midpoint_MAP=(min_MAP+max_MAP)/2) ###models for each variable #richness models richnessModels <- commSite%>% group_by(block_trt)%>% do(model = lm(richness_scale ~ bio12, data = .))%>% mutate(R2=summary(model)$r.squared, pval=summary(model)$coefficients[2,4], slope=summary(model)$coefficients[2], slope_err=summary(model)$coefficients[2,2], f=summary(model)$fstatistic[1], df_num=summary(model)$fstatistic[2], df_den=summary(model)$fstatistic[3])%>% left_join(climate)%>% mutate(slope_sig=ifelse(pval>0.05, 0, slope)) richnessModelTable <- richnessModels%>% select(block_trt, f, df_num, df_den, pval, R2, slope)%>% rename(Block=block_trt) kable(richnessModelTable, 'html')%>% cat(., file = "richnessModelTableMAP.html") # #quadratic model - AIC=529.1192 # summary(quadraticRichnessModel <- lmer(richness_scale~poly(bio12,2) + (1|block_trt), commSite)) # AIC(quadraticRichnessModel) # #linear model - AIC=549.3088 # summary(linearRichnessModel <- lmer(richness_scale~bio12 + (1|block_trt), commSite)) # AIC(linearRichnessModel) #evenness models evarModels <- commSite%>% group_by(block_trt)%>% do(model = lm(Evar_scale ~ bio12, data = .))%>% mutate(R2=summary(model)$r.squared, pval=summary(model)$coefficients[2,4], slope=summary(model)$coefficients[2], slope_err=summary(model)$coefficients[2,2], f=summary(model)$fstatistic[1], df_num=summary(model)$fstatistic[2], df_den=summary(model)$fstatistic[3])%>% left_join(climate) evarModelTable <- evarModels%>% select(block_trt, f, df_num, df_den, pval, R2, slope)%>% rename(Block=block_trt) kable(evarModelTable, 'html')%>% cat(., file = "evarModelTableMAP.html") #dominance models domModels <- commSite%>% group_by(block_trt)%>% do(model = lm(BP_scale ~ bio12, data = .))%>% mutate(R2=summary(model)$r.squared, pval=summary(model)$coefficients[2,4], slope=summary(model)$coefficients[2], slope_err=summary(model)$coefficients[2,2], f=summary(model)$fstatistic[1], df_num=summary(model)$fstatistic[2], df_den=summary(model)$fstatistic[3])%>% left_join(climate) domModelTable <- domModels%>% select(block_trt, f, df_num, df_den, pval, R2, slope)%>% rename(Block=block_trt) kable(domModelTable, 'html')%>% cat(., file = "domModelTableMAP.html") #quadratic model - AIC=543.5141 summary(quadraticDominanceModel <- lmer(BP_scale~poly(bio12,2) + (1|block_trt), commSite)) AIC(quadraticDominanceModel) #linear model - AIC=563.8917 summary(linearDominanceModel <- lmer(BP_scale~bio12 + (1|block_trt), commSite)) AIC(linearDominanceModel) #compare richness and evenness compareEvenModels <- commSite%>% group_by(block_trt)%>% do(model = lm(Evar_scale ~ richness_scale, data = .))%>% mutate(R2=summary(model)$r.squared, pval=summary(model)$coefficients[2,4], slope=summary(model)$coefficients[2], slope_err=summary(model)$coefficients[2,2], f=summary(model)$fstatistic[1], df_num=summary(model)$fstatistic[2], df_den=summary(model)$fstatistic[3])%>% left_join(climate) #compare richness and dominance compareDomModels <- commSite%>% group_by(block_trt)%>% do(model = lm(BP_scale ~ richness_scale, data = .))%>% mutate(R2=summary(model)$r.squared, pval=summary(model)$coefficients[2,4], slope=summary(model)$coefficients[2], slope_err=summary(model)$coefficients[2,2], f=summary(model)$fstatistic[1], df_num=summary(model)$fstatistic[2], df_den=summary(model)$fstatistic[3])%>% left_join(climate) ###FIGURES! #model slopes vs aridity (comparing across blocks) #richness richnessAllFig <- ggplot(data=commSite, aes(x=bio12, y=richness_scale, color=block_trt)) + xlab('MAP') + ylab('Scaled Richness') + geom_smooth(data=subset(commSite, block_trt=='India'|block_trt=='NAmerica'|block_trt=='SAfrica'|block_trt=='Tibet_ungrazed'), method='lm', se=F) + geom_smooth(data=subset(commSite, block_trt=='Brazil'|block_trt=='China2'|block_trt=='Kenya'|block_trt=='SAmerica_ungrazed'), method='lm', linetype='dashed', se=F) + # geom_smooth(data=commSite, method = "lm", formula = y ~ x + I(x^2), color='black', size=2) + geom_point(size=5) + theme(legend.position='none') summary(lm(slope~geo_mean_MAP, data=richnessModels)) richnessSlopeFig <- ggplot(data=richnessModels, aes(x=geo_mean_MAP, y=slope, color=block_trt)) + geom_point(size=5) + geom_errorbarh(aes(xmin=min_MAP, xmax=max_MAP)) + geom_errorbar(aes(ymin=slope-slope_err, ymax=slope+slope_err)) + xlab('MAP') + ylab('Slope of Richness v MAP') + geom_hline(yintercept=0) + # geom_smooth(method='lm', size=2, color='black') + annotate("text", x=1500, y=0.01, label = "R2=0.394,\np=0.096", size=8) #richness figure pushViewport(viewport(layout=grid.layout(1,2))) print(richnessAllFig, vp=viewport(layout.pos.row=1, layout.pos.col=1)) print(richnessSlopeFig, vp=viewport(layout.pos.row=1, layout.pos.col=2)) #export at 1800x800 #Evar evennessAllFig <- ggplot(data=commSite, aes(x=bio12, y=Evar_scale, color=block_trt)) + geom_point(size=5) + xlab('MAP') + ylab('Scaled Evar') + geom_smooth(data=subset(commSite, block_trt=='India'|block_trt=='Kenya'), method='lm', se=F) + geom_smooth(data=subset(commSite, block_trt=='Brazil'|block_trt=='China2'|block_trt=='India'|block_trt=='NAmerica'|block_trt=='SAfrica'|block_trt=='SAmerica_ungrazed'|block_trt=='Tibet_ungrazed'), method='lm', linetype='dashed', se=F) + theme(legend.position='none') summary(lm(slope~geo_mean_MAP, data=evarModels)) evennessSlopeFig <- ggplot(data=evarModels, aes(x=geo_mean_MAP, y=slope, color=block_trt)) + geom_point(size=5) + geom_errorbarh(aes(xmin=min_MAP, xmax=max_MAP)) + geom_errorbar(aes(ymin=slope-slope_err, ymax=slope+slope_err)) + xlab('MAP') + ylab('Slope of Evar v MAP') + geom_hline(yintercept=0) + # geom_smooth(method='lm', size=2, color='black') + annotate("text", x=1500, y=-0.004, label = "R2=120,\np=0.402", size=8) #Evar figure pushViewport(viewport(layout=grid.layout(1,2))) print(evennessAllFig, vp=viewport(layout.pos.row=1, layout.pos.col=1)) print(evennessSlopeFig, vp=viewport(layout.pos.row=1, layout.pos.col=2)) #export at 1800x800 #dominance dominanceAllFig <- ggplot(data=commSite, aes(x=bio12, y=BP_scale, color=block_trt)) + xlab('MAP') + ylab('Scaled Dominance') + geom_smooth(data=subset(commSite, block_trt=='India'|block_trt=='SAmerica_ungrazed'|block_trt=='Tibet_ungrazed'), method='lm', se=F) + geom_smooth(data=subset(commSite, block_trt=='Brazil'|block_trt=='Kenya'|block_trt=='NAmerica'|block_trt=='SAfrica'|block_trt=='China2'), method='lm', linetype='dashed', se=F) + geom_point(size=5) + theme(legend.position='none') summary(lm(slope~geo_mean_MAP, data=domModels)) dominanceSlopeFig <- ggplot(data=domModels, aes(x=geo_mean_MAP, y=slope, color=block_trt)) + geom_point(size=5) + geom_errorbarh(aes(xmin=min_MAP, xmax=max_MAP)) + geom_errorbar(aes(ymin=slope-slope_err, ymax=slope+slope_err)) + xlab('MAP') + ylab('Slope of Dominance v MAP') + geom_hline(yintercept=0) + # geom_smooth(method='lm', size=2, color='black') + annotate("text", x=1500, y=-0.004, label = "R2=0.294,\np=0.165", size=8) #dominance figure pushViewport(viewport(layout=grid.layout(1,2))) print(dominanceAllFig, vp=viewport(layout.pos.row=1, layout.pos.col=1)) print(dominanceSlopeFig, vp=viewport(layout.pos.row=1, layout.pos.col=2)) #export at 1800x800 #Evar v richness compareAllFig <- ggplot(data=commSite, aes(x=richness_scale, y=Evar_scale, color=block_trt)) + geom_point() + xlab('Scaled Richness') + ylab('Scaled Evar') + geom_smooth(data=subset(commSite, block_trt=='India'|block_trt=='Brazil'|block_trt=='NAmerica'), method='lm', se=F) + theme(legend.position='none') + facet_wrap(~block_trt) summary(lm(slope~geo_mean_MAP, data=compareEvenModels)) compareSlopeFig <- ggplot(data=compareEvenModels, aes(x=geo_mean_MAP, y=slope, color=block_trt)) + geom_point(size=5) + geom_errorbarh(aes(xmin=min_MAP, xmax=max_MAP)) + geom_errorbar(aes(ymin=slope-slope_err, ymax=slope+slope_err)) + xlab('MAP') + ylab('Slope of Evar v Richness') + geom_hline(yintercept=0) + annotate("text", x=1500, y=-0.5, label = "R2=0.303,\np=0.158", size=8) #comparison figure pushViewport(viewport(layout=grid.layout(1,2))) print(compareAllFig, vp=viewport(layout.pos.row=1, layout.pos.col=1)) print(compareSlopeFig, vp=viewport(layout.pos.row=1, layout.pos.col=2)) #export at 1800x800 #dominance v richness compareAllFig <- ggplot(data=commSite, aes(x=richness_scale, y=BP_scale, color=block_trt)) + geom_point() + xlab('Scaled Richness') + ylab('Scaled Dominance') + geom_smooth(data=subset(commSite, block_trt=='India'|block_trt=='Kenya'|block_trt=='Tibet_ungrazed'), method='lm', se=F) + theme(legend.position='none') + facet_wrap(~block_trt) summary(lm(slope~geo_mean_MAP, data=compareDomModels)) compareSlopeFig <- ggplot(data=compareDomModels, aes(x=geo_mean_MAP, y=slope, color=block_trt)) + geom_point(size=5) + geom_errorbarh(aes(xmin=min_MAP, xmax=max_MAP)) + geom_errorbar(aes(ymin=slope-slope_err, ymax=slope+slope_err)) + xlab('MAP') + ylab('Slope of Dominance v Richness') + geom_hline(yintercept=0) + annotate("text", x=1000, y=-0.75, label = "R2=0.051,\np=0.589", size=8) #comparison figure pushViewport(viewport(layout=grid.layout(1,2))) print(compareAllFig, vp=viewport(layout.pos.row=1, layout.pos.col=1)) print(compareSlopeFig, vp=viewport(layout.pos.row=1, layout.pos.col=2)) #export at 1800x800
eb4c19343c72bf47fcb7d362ed9bca33919cb07a
e9eb22e63999ff29edcecac8b4f5521266e73f4b
/r-hta-2020/speed-tests-fun.R
a3fb050fb9c7a63ce906a3f1b2fddbc6a697e06b
[]
no_license
hesim-dev/hesim-presentations
315342a76b995ebe709ada506e4ca47c587a22f9
4ecd20d214194e58d31681e0336604aa7dd9d83f
refs/heads/master
2021-07-03T12:17:11.773073
2020-10-13T16:08:01
2020-10-13T16:08:01
186,916,093
0
1
null
null
null
null
UTF-8
R
false
false
17,956
r
speed-tests-fun.R
# Run heemod ------------------------------------------------------------------- run_heemod <- function(n_samples) { ptm <- proc.time() # Define parameters param <- define_parameters( age_init = 60, sex = 0, ## age increases with cycles age = age_init + markov_cycle, ## operative mortality rates omrPTHR = .02, omrRTHR = .02, ## re-revision mortality rate rrr = .04, ## parameters for calculating primary revision rate cons = -5.49094, ageC = -.0367, maleC = .768536, lambda = exp(cons + ageC * age_init + maleC * sex), log_gamma = 0.3740968, gamma = exp(log_gamma), log_rrNP1 = -1.344473, rrNP1 = exp(log_rrNP1), ## revision probability of primary procedure standardRR = 1 - exp(lambda * ((markov_cycle - 1) ^ gamma - markov_cycle ^ gamma)), np1RR = 1 - exp(lambda * rrNP1 * ((markov_cycle - 1) ^ gamma - markov_cycle ^ gamma)), ## age-related mortality rate sex_cat = ifelse(sex == 0, "FMLE", "MLE"), mr = get_who_mr(age, sex_cat, country = "GBR", local = TRUE), ## state values u_SuccessP = .85, u_RevisionTHR = .30, u_SuccessR = .75, c_RevisionTHR = 5294 ) # Define transitions --------------------------------------------------------- mat_standard <- define_transition( state_names = c( "PrimaryTHR", "SuccessP", "RevisionTHR", "SuccessR", "Death" ), 0, C, 0, 0, omrPTHR, 0, C, standardRR, 0, mr, 0, 0, 0, C, omrRTHR+mr, 0, 0, rrr, C, mr, 0, 0, 0, 0, 1 ) mat_standard mat_np1 <- define_transition( state_names = c( "PrimaryTHR", "SuccessP", "RevisionTHR", "SuccessR", "Death" ), 0, C, 0, 0, omrPTHR, 0, C, np1RR, 0, mr, 0, 0, 0, C, omrRTHR+mr, 0, 0, rrr, C, mr, 0, 0, 0, 0, 1 ) mat_np1 # Define strategies ---------------------------------------------------------- strat_standard <- define_strategy( transition = mat_standard, PrimaryTHR = define_state( utility = 0, cost = 0 ), SuccessP = define_state( utility = discount(u_SuccessP, .015), cost = 0 ), RevisionTHR = define_state( utility = discount(u_RevisionTHR, .015), cost = discount(c_RevisionTHR, .06) ), SuccessR = define_state( utility = discount(u_SuccessR, .015), cost = 0 ), Death = define_state( utility = 0, cost = 0 ), starting_values = define_starting_values( cost = 394 ) ) strat_np1 <- define_strategy( transition = mat_np1, PrimaryTHR = define_state( utility = 0, cost = 0 ), SuccessP = define_state( utility = discount(u_SuccessP, .015), cost = 0 ), RevisionTHR = define_state( utility = discount(u_RevisionTHR, .015), cost = discount(c_RevisionTHR, .06) ), SuccessR = define_state( utility = discount(u_SuccessR, .015), cost = 0 ), Death = define_state( utility = 0, cost = 0 ), starting_values = define_starting_values( cost = 579 ) ) # Run deterministic model ---------------------------------------------------- res_mod <- run_model( standard = strat_standard, np1 = strat_np1, parameters = param, cycles = 60, cost = cost, effect = utility, init = c(1L, 0, 0, 0, 0) ) # Run PSA -------------------------------------------------------------------- rr_coef <- c(0.3740968, -5.490935, -0.0367022, 0.768536, -1.344474) names(rr_coef) <- c("lngamma", "cons", "age", "male", "np1") rr_vcov <- matrix( c(0.0474501^2, -0.005691, 0.000000028, 0.0000051, 0.000259, -0.005691, 0.207892^2, -0.000783, -0.007247, -0.000642, 0.000000028, -0.000783, 0.0052112^2, 0.000033, -0.000111, 0.0000051, -0.007247, 0.000033, 0.109066^2, 0.000184, 0.000259, -0.000642, -0.000111, 0.000184, 0.3825815^2), ncol = 5, nrow = 5, byrow = TRUE ) rsp <- define_psa( omrPTHR ~ beta(shape1 = 2, shape2 = 98), omrRTHR ~ beta(shape1 = 2, shape2 = 98), rrr ~ beta(shape1 = 4, shape2 = 96), u_SuccessP ~ beta(shape1 = .85, shape2 = .03), u_RevisionTHR ~ beta(shape1 = .30, shape2 = .03), u_SuccessR ~ beta(shape1 = .75, shape2 = .04), c_RevisionTHR ~ gamma(mean = 5294, sd = sqrt(1487)), log_gamma ~ normal(0.3740968, 0.002251512), cons ~ normal(-5.49094, 0.04321908), ageC ~ normal(-.0367, 0.00002715661), maleC ~ normal(.768536, 0.01189539), log_rrNP1 ~ normal(-1.3444740, 0.1463686) ) pm <- run_psa( model = res_mod, psa = rsp, N = n_samples ) # Return --------------------------------------------------------------------- run_time <- proc.time() - ptm print(run_time) return(pm) } # Run hesim (IPS) -------------------------------------------------------------- run_hesim_indiv <- function(n_samples) { ptm <- proc.time() sim <- 2 # Model setup ## Treatment strategies strategies <- data.table( strategy_id = 1:2, strategy_name = c("Standard prosthesis", "New prosthesis") ) n_strategies <- nrow(strategies) ## Patients n_patients <- 1000 patients <- data.table( patient_id = 1:n_patients, gender = "Female", age = 60 ) ## Health states states <- data.table( state_id = 1:4, state_name = c("PrimaryTHR", "SuccessP", "Revision", "SuccessR") ) # Non-death health states n_states <- nrow(states) ## Transitions tmat <- rbind(c(NA, 1, NA, NA, 2), c(NA, NA, 3, NA, 4), c(NA, NA, NA, 5, 6), c(NA, NA, 7, NA, 8), c(NA, NA, NA, NA, NA)) colnames(tmat) <- rownames(tmat) <- c(states$state_name, "Death") ## "hesim data" hesim_dat <- hesim_data(strategies = strategies, patients = patients, states = states) # Parameters ## Transitions ### Estimates from literature #### Revision risk rr_coef <- c(0.3740968, -5.490935, -0.0367022, 0.768536, -1.344474) names(rr_coef) <- c("lngamma", "cons", "age", "male", "np1") rr_vcov <- matrix( c(0.0474501^2, -0.005691, 0.000000028, 0.0000051, 0.000259, -0.005691, 0.207892^2, -0.000783, -0.007247, -0.000642, 0.000000028, -0.000783, 0.0052112^2, 0.000033, -0.000111, 0.0000051, -0.007247, 0.000033, 0.109066^2, 0.000184, 0.000259, -0.000642, -0.000111, 0.000184, 0.3825815^2), ncol = 5, nrow = 5, byrow = TRUE ) rownames(rr_vcov) <- colnames(rr_vcov) <- names(rr_coef) #### Storing the parameters params <- list( #### Transtion 1 ttrrPTHR = 2, # Time to recovery rate implies mean time of 1/2 years #### Transition 2 omrPTHR_shape1 = 2, # 2 out of 100 patients receiving primary THR died omrPTHR_shape2 = 98, #### Transition 3 rr_coef = rr_coef, rr_vcov = rr_vcov, #### Transition 4 mr = c(.0067, .0193, .0535, .1548), #### Transition 5 ttrRTHR = 1, # There is no rate, the time is fixed #### Transition 6: omrRTHR + mr #### Transition 7 omrRTHR_shape1 = 4, # 4 out of 100 patients with a successful revision needed another procedure omrRTHR_shape2 = 96 #### Transition 8: same as transition 4 ) ### Multi-state model matrixv <- function(v, n = NULL){ if (length(v) == 1) v <- rep(v, n_samples) m <- matrix(v) colnames(m) <- "cons" return(m) } prob_to_rate <- function(p, t = 1){ (-log(1 - p))/t } transmod_coef_def <- define_rng({ omrPTHR <- prob_to_rate(beta_rng(shape1 = omrPTHR_shape1, shape2 = omrPTHR_shape2)) mr <- fixed(mr) mr_omrPTHR <- omrPTHR + mr rr <- multi_normal_rng(mu = rr_coef, Sigma = rr_vcov) rrr <- prob_to_rate(beta_rng(shape1 = 4, shape2 = 96)) list( log_omrPTHR = matrixv(log(omrPTHR)), log_mr = lapply(as.list(log(mr)), matrixv), log_ttrrPTHR = matrixv(log(ttrrPTHR)), log_mr_omrPTHR = lapply(as.list(log(mr_omrPTHR)), matrixv), rr_shape = matrixv(rr$lngamma), rr_scale = as.matrix(rr[, -1,]), log_rrr = matrixv(log(rrr)) ) }, n = n_samples, prob_to_rate = prob_to_rate, matrixv = matrixv) transmod_coef <- eval_rng(transmod_coef_def, params = params) transmod_params <- params_surv_list( # 1. Primary THR:Successful primary (1:2) params_surv(coefs = list(rate = transmod_coef$log_ttrrPTHR), dist = "fixed"), # 2. Primary THR:Death (1:5) params_surv(coefs = list(rate = transmod_coef$log_omrPTHR), dist = "exp"), # 3. Successful primary:Revision THR (2:3) params_surv(coefs = list(shape = transmod_coef$rr_shape, scale = transmod_coef$rr_scale), dist = "weibullPH"), # 4. Successful primary:Death (2:5) params_surv(coefs = transmod_coef$log_mr, aux = list(time = c(0, 5, 15, 25)), dist = "pwexp"), # 5. Revision THR:Successful revision (3:4) params_surv(coefs = list(est = matrixv(params$ttrRTHR)), dist = "fixed"), # 6. Revision THR:Death (3:5) params_surv(coefs = transmod_coef$log_mr_omrPTHR, aux = list(time = c(0, 5, 15, 25)), dist = "pwexp"), # 7. Successful revision:Revision THR (4:3) params_surv(coefs = list(rate = transmod_coef$log_rrr), dist = "exp"), # 8. Successful revision:Death (4:5) params_surv(coefs = transmod_coef$log_mr, aux = list(time = c(0, 5, 15, 25)), dist = "pwexp") ) ## Utility and costs utility_tbl <- stateval_tbl( data.table(state_id = states$state_id, mean = c(0, .85, .3, .75), se = c(0, .03, .03, .04)), dist = "beta", hesim_data = hesim_dat ) drugcost_tbl <- stateval_tbl( data.table(strategy_id = rep(strategies$strategy_id, each = n_states), state_id = rep(states$state_id, times = n_strategies), est = c(394, 0, 0, 0, 579, 0, 0, 0)), dist = "fixed", hesim_data = hesim_dat ) medcost_tbl <- stateval_tbl( data.table(state_id = states$state_id, mean = c(0, 0, 5294, 0), se = c(0, 0, 1487, 0)), dist = "gamma", hesim_data = hesim_dat ) # Simulation ## Construct model ### Transition model transmod_data <- expand(hesim_dat, by = c("strategies", "patients")) transmod_data[, cons := 1] transmod_data[, male := ifelse(gender == "Male", 1, 0)] transmod_data[, np1 := ifelse(strategy_name == "New prosthesis", 1, 0)] transmod <- create_IndivCtstmTrans(transmod_params, input_data = transmod_data, trans_mat = tmat, clock = "forward", start_age = patients$age) ### Utility and cost models utilitymod <- create_StateVals(utility_tbl, n = transmod_coef_def$n) drugcostmod <- create_StateVals(drugcost_tbl, n = transmod_coef_def$n, method = "starting") medcostmod <- create_StateVals(medcost_tbl, n = transmod_coef_def$n) costmods <- list(Drug = drugcostmod, Medical = medcostmod) ### Economic model econmod <- IndivCtstm$new(trans_model = transmod, utility_model = utilitymod, cost_models = costmods) ## Simulate outcomes econmod$sim_disease(max_t = 60, max_age = 120) econmod$sim_qalys(dr = .015) econmod$sim_costs(dr = .06) ce_sim <- econmod$summarize() # Return run_time <- proc.time() - ptm print(run_time) return(ce_sim) } # Run hesim (cohort) ----------------------------------------------------------- run_hesim_cohort <- function(n_samples) { ptm <- proc.time() # Model setup strategies <- data.table(strategy_id = 1:2, strategy_name = c("Standard prosthesis", "New prosthesis")) patients <- data.table(patient_id = 1, sex = "Female", age = 60) hesim_dat <- hesim_data(strategies = strategies, patients = patients) # Parameters ## Estimates from literature ### Mortality mort_tbl <- rbind( c(35, 45, .00151, .00099), c(45, 55, .00393, .0026), c(55, 65, .0109, .0067), c(65, 75, .0316, .0193), c(75, 85, .0801, .0535), c(85, Inf, .1879, .1548) ) colnames(mort_tbl) <- c("age_lower", "age_upper", "male", "female") mort_tbl <- data.frame(mort_tbl) ### Revision risk #### Coefficients rr_coef <- c(0.3740968, -5.490935, -0.0367022, 0.768536, -1.344474) names(rr_coef) <- c("lngamma", "cons", "age", "male", "np1") #### Variance-covariance matrix rr_vcov <- matrix( c(0.0474501^2, -0.005691, 0.000000028, 0.0000051, 0.000259, -0.005691, 0.207892^2, -0.000783, -0.007247, -0.000642, 0.000000028, -0.000783, 0.0052112^2, 0.000033, -0.000111, 0.0000051, -0.007247, 0.000033, 0.109066^2, 0.000184, 0.000259, -0.000642, -0.000111, 0.000184, 0.3825815^2), ncol = 5, nrow = 5, byrow = TRUE ) rownames(rr_vcov) <- colnames(rr_vcov) <- names(rr_coef) #### Combine all parameters params <- list( # Transition probabilities ## Operative mortality following primary THR omrPTHR_shape1 = 2, omrPTHR_shape2 = 98, ## Revision rate for prosthesis rr_coef = rr_coef, rr_vcov = rr_vcov, ## Mortality_rates mr = mort_tbl, ## Operative mortality following revision THR omrRTHR_shape1 = 2, omrRTHR_shape2 = 98, ## re-revision rate rrr_shape1 = 4, rrr_shape2 = 96, # Utility u_mean = c(PrimaryTHR = 0, SuccessP = .85, Revision = .30, SuccessR = .75), u_se = c(PrimaryTHR = 0, SuccessP = .03, Revision = .03, SuccessR = .04), # Costs c_med_mean = c(PrimaryTHR = 0, SuccessP = 0, Revision = 5294, SuccessR = 0), c_med_se = c(PrimaryTHR = 0, SuccessP = 0, Revision = 1487, SuccessR = 0), c_Standard = 394, c_NP1 = 579 ) ### Random number generation rng_def <- define_rng({ list( omrPTHR = beta_rng(shape1 = omrPTHR_shape1, shape2 = omrPTHR_shape2), rr_coef = multi_normal_rng(mu = rr_coef, Sigma = rr_vcov), mr_male = fixed(mr$male, names = mr$age_lower), mr_female = fixed(mr$female, names = mr$age_lower), omrRTHR = beta_rng(shape1 = omrRTHR_shape1, shape2 = omrRTHR_shape2), rrr = beta_rng(shape1 = rrr_shape1, shape2 = rrr_shape2), u = beta_rng(mean = u_mean, sd = u_se), c_med = gamma_rng(mean = c_med_mean, sd = c_med_se), c_Standard = c_Standard, c_NP1 = c_NP1 ) }, n = n_samples) ### Transformed parameters (transition probability matrix) transitions_def <- define_tparams({ #### Regression for revision risk male <- ifelse(sex == "Female", 0, 1) np1 <- ifelse(strategy_name == "Standard prosthesis", 0, 1) scale <- exp(rr_coef$cons + rr_coef$age * age + rr_coef$male * male + rr_coef$np1 * np1) shape <- exp(rr_coef$lngamma) rr <- 1 - exp(scale * ((time - 1)^shape - time^shape)) #### Mortality rate age_new <- age + time mr <- mr_female[["35"]] * (sex == "Female" & age_new >= 35 & age_new < 45) + mr_female[["45"]] * (sex == "Female" & age_new >= 45 & age_new < 55) + mr_female[["55"]] * (sex == "Female" & age_new >= 55 & age_new < 65) + mr_female[["65"]] * (sex == "Female" & age_new >= 65 & age_new < 75) + mr_female[["75"]] * (sex == "Female" & age_new >= 75 & age_new < 85) + mr_female[["85"]] * (sex == "Female" & age_new >= 85) + mr_male[["35"]] * (sex == "Male" & age_new >= 35 & age_new < 45) + mr_male[["45"]] * (sex == "Male" & age_new >= 45 & age_new < 55) + mr_male[["55"]] * (sex == "Male" & age_new >= 55 & age_new < 65) + mr_male[["65"]] * (sex == "Male" & age_new >= 65 & age_new < 75) + mr_male[["75"]] * (sex == "Male" & age_new >= 75 & age_new < 85) + mr_male[["85"]] * (sex == "Male" & age_new >= 85) list( tpmatrix = tpmatrix( 0, C, 0, 0, omrPTHR, 0, C, rr, 0, mr, 0, 0, 0, C, omrRTHR + mr, 0, 0, rrr, C, mr, 0, 0, 0, 0, 1) ) }, times = 1:60) statevals_def <- define_tparams({ c_prosthesis <- ifelse(strategy_name == "Standard prosthesis", c_Standard, c_NP1) list( utility = u, costs = list( prosthesis = c_prosthesis, medical = c_med ) ) }) # Simulation ## Construct model mod_def <- define_model(tparams_def = list(transitions_def, statevals_def), rng_def = rng_def, params = params) cost_args <- list( prosthesis = list(method = "starting"), medical = list(method = "wlos") ) input_data <- expand(hesim_dat, by = c("strategies", "patients")) econmod <- create_CohortDtstm(mod_def, input_data, cost_args = cost_args) # Simulation ## Simulate outcomes econmod$sim_stateprobs(n_cycles = 60) econmod$sim_qalys(dr = .015, integrate_method = "riemann_right") econmod$sim_costs(dr = .06, integrate_method = "riemann_right") ce_sim <- econmod$summarize() # Return run_time <- proc.time() - ptm print(run_time) return(ce_sim) }
a777effcafd4ac523771b83873238fd050840414
a6fac5b0295e76c2ae12002a7847097174244f7d
/Functions_NCOV.R
05534b2eceb4da63a061fef55da8d276f93aee0d
[]
no_license
kaiyuanmifen/COVID19_prediction_USA
377894228f0a70f6b79b77f922a7b2167e2dfee3
83a39bb9f13bdef303bc08786a626904e08b6436
refs/heads/main
2023-01-03T23:15:16.349142
2020-11-03T21:58:00
2020-11-03T21:58:00
309,776,734
0
0
null
null
null
null
UTF-8
R
false
false
42,366
r
Functions_NCOV.R
packages <- c("ggplot2", "dplyr", "reshape","ppcor","glmnet","MLmetrics") if (length(setdiff(packages, rownames(installed.packages()))) > 0) { install.packages(setdiff(packages, rownames(installed.packages()))) } # Hierachical clusting choosing the best number of K HierachiClustering=function(AllProvinces,GroundTruthName,ClusteringMethod){ print(paste("clustering method:",ClusteringMethod)) #Method 1: hierachiical clustering by grouth trueth if(ClusteringMethod=="GroundTruthCorrelation"){ AllDates=AllProvinces$Date AllDates=unique(AllDates[AllDates>FirstDate_use_data & AllDates <=First_index_date]) EachProvince=NULL AllNames=NULL for (Location in unique(AllProvinces$Location)){ Vec=AllProvinces[AllProvinces$Location==Location,c(GroundTruthName,"Date" )] if(nrow(Vec)>0){ Vec=Vec[match(AllDates,Vec$Date),GroundTruthName] EachProvince=cbind(EachProvince,Vec) AllNames=c(AllNames,Location) } } EachProvince[is.na(EachProvince)]=0 colnames(EachProvince)=AllNames EachProvince=as.data.frame(EachProvince) EachProvince=EachProvince+rnorm(n =length(EachProvince) ,sd = 0.01,mean = 0) CorMat=cor(EachProvince,method = "spearman") dissimilarity=as.dist(1-CorMat) #clusters clusters=hclust(dissimilarity,method = "average") #plot(clusters) #calculated CH indexs GetCH=function(K,EachProvince){ clusterCut <- cutree(clusters, K) Vec_each_cluster=list() W=0 for (Cluster in unique(clusterCut)){ Vec=EachProvince[,colnames(EachProvince)%in%names(clusterCut)[clusterCut==Cluster],drop=F] Vec_each_cluster[[length(Vec_each_cluster)+1]]=Vec #caculate W W=W+sum((Vec-apply(Vec,MARGIN = 1,mean))^2) } X_ave_all=apply(EachProvince,MARGIN = 1,mean) X_k=lapply(Vec_each_cluster,function(x){apply(x,MARGIN = 1,mean)}) N_k=unlist(lapply(Vec_each_cluster,function(x){ncol(x)})) B=sum(N_k*unlist(lapply(X_k,function(x){sum((x-X_ave_all)^2)}))) N= ncol(EachProvince) CH=(B/(K-1))/(W/(N-K)) return(CH) } AllCH=NULL AllKs=2:8 for (K in AllKs){ AllCH=c(AllCH,GetCH(K,EachProvince)) } K_max=AllKs[which.max(AllCH)] clusterCut <- cutree(clusters, K_max) } #Method 2: by region if(ClusteringMethod=="ByRegion"){ StatesToregion=read.csv("states_to_region.csv") StatesToregion$State=paste0("US-",tolower(StatesToregion$State)) LocationNames=unique(AllProvinces$Location) StatesToregion=StatesToregion[StatesToregion$State%in%LocationNames,] StatesToregion$clusterCut= as.integer(as.factor(StatesToregion$Region)) clusterCut =StatesToregion$clusterCut names(clusterCut)=StatesToregion$State } #Method 3 : by politic party if(ClusteringMethod=="ByParty"){ StatesToparty=read.csv("state_to_party.csv") names(StatesToparty)[1]="State" StatesToparty$State=paste0("US-",tolower(StatesToparty$State)) LocationNames=unique(AllProvinces$Location) StatesToparty=StatesToparty[StatesToparty$State%in%LocationNames,] StatesToparty$clusterCut= as.integer(as.factor(StatesToparty$X2016.presidential)) clusterCut =StatesToparty$clusterCut names(clusterCut)=StatesToparty$State } #method 4: by starting date if(ClusteringMethod=="StartingDate"){ # if(GroundTruthName== "covidtracking_new_deaths"){ # Threshold=50 # } # if(GroundTruthName== "JHK_New_confirmed" ){ # Threshold=1000 # } LocationNames=unique(AllProvinces$Location) Threshold=500 Locations=NULL StartingDate=NULL for (Place in LocationNames){ SDate=AllProvinces$Date[AllProvinces$Location==Place][AllProvinces[AllProvinces$Location==Place,"JHK_New_confirmed"]>=Threshold][1] Locations=c(Locations,Place) StartingDate=c(StartingDate,as.character(SDate)) print(paste(Place ,SDate)) } Vec=data.frame(Location=Locations,Date=as.character(StartingDate)) Vec$clusterCut=as.integer(as.factor(unlist(lapply(Vec$Date,function(x){strsplit(x,split = "[-]")[[1]][2]})))) Vec$clusterCut[is.na(Vec$clusterCut)]=max(Vec$clusterCut,na.rm = T)+1 clusterCut =Vec$clusterCut names(clusterCut)=Vec$Location } return(clusterCut) } #data aggregation AggregateByDate=function(X,Aggregation,Information_to_include){ print("Doing aggregation") #Aggregation= number of dayes to Aggregation #Only aggregate specific columns X$Date=as.Date(as.character(X$Date),origin="1970-01-01") DatesLeft=seq(from = min(X$Date),to = max(X$Date),by = Aggregation) DatesLeft=DatesLeft[2:length(DatesLeft)]#skip the first aggregation date XVec=X[X$Date%in%DatesLeft,] Vec_infor=lapply(Information_to_include,FUN = function(x){Vec=strsplit(x,split ="_")[[1]];Vec=paste(Vec[1:(length(Vec)-2)],collapse = "_")}) Vec_infor=unique(unlist(Vec_infor))#reprocess the code a littble bit TargetColumns=colnames(X)[colnames(X)%in%Vec_infor] for (Date in DatesLeft){ XVec[XVec$Date==Date,TargetColumns]=apply(X[X$Date%in%c(Date-c(0:(Aggregation-1))),TargetColumns],2,sum) } #XVec[,c("Date","Location",GroundTruthName)] return(XVec) } #normalize data. DataNormalization=function(X,Y,OutlierDataPointRemoval,GroundTruthName,daysahead,Aggregation){ X_original=X Y_original=Y #X=X+rnorm(n=length(X),mean = 0,sd = 0.01) #Y=Y+rnorm(n=length(Y),mean = 0,sd = 0.01) #remove outliers if(OutlierDataPointRemoval==TRUE){ print("Ding outlier removal") VecSmooth=X[,GroundTruthName] for (i in 2:(length(VecSmooth)-1)){ if((VecSmooth[i]>sum(X[,GroundTruthName][i-1],X[,GroundTruthName][i+1],na.rm = T))&(sum(!is.na(X[,GroundTruthName][i-1]),!is.na(X[,GroundTruthName][i+1]))==2)){ VecSmooth[i]=sum(X[,GroundTruthName][i-1]+X[,GroundTruthName][i+1],na.rm = T)/2 } } X[,GroundTruthName]=VecSmooth Target=X[,GroundTruthName] Vec=Target[match(X$Date+daysahead*Aggregation,X$Date)] Y=data.frame(Y=Vec,Date=X$Date,daysahead=daysahead*Aggregation) Y$Date=as.Date(Y$Date,origin="1970-01-01") } if(OutlierDataPointRemoval==FALSE){ print("not doing outlier removal") } #normalization print("doning normalizaiton") #Normalize X for (i in 1:ncol(X)){ if (class(X[,i])== "numeric"|class(X[,i])== "integer"){ for (location in unique((X$Location)) ){ Vec=X[X$Location==location,i] Vec=Vec[!is.na(Vec)] Mu=mean(Vec,rm.na=T) Sigma=sd(Vec,na.rm = T) if(Sigma!=0){ X[X$Location==location,i]=(X[X$Location==location,i]-Mu)/Sigma } if(Sigma==0){#sum times Sigma==0 X[X$Location==location,i]=0 } } } } #Normalize Y Y$Location=X$Location Y$Mu=NA Y$Sigma=NA for (location in unique((Y$Location)) ){#Y has the same location as X Vec=Y$Y[Y$Location==location] Vec=Vec[!is.na(Vec)] Mu=mean(Vec) Sigma=sd(Vec) if(Sigma!=0){ Y$Y[Y$Location==location]=(Y$Y[Y$Location==location]-Mu)/Sigma } if(Sigma==0){#sum times Sigma==0 Y$Y[Y$Location==location]=0 } Y$Mu[Y$Location==location]=Mu Y$Sigma[Y$Location==location]=Sigma } return(list(X=X,Y=Y, X_original=X_original, Y_original=Y_original)) } #coordinate decent algorithm for optimization #function to get input, output predictions Make_prediction=function(X_train,Y_train,X_test,alpha=1,FeatureSelection=F,GroundTruthName,Augmentation=T){ # # # for (i in 1:ncol(X_train)){ # X_train[,i]=as.numeric(X_train[,i]) # } # #bootstraping if (Augmentation==T){ print("with augementation") Indexes=sample(1:length(Y_train),replace = T,100*length(Y_train)) X_train_vec= X_train[Indexes,] Y_train_vec=Y_train[Indexes] #sometimes there are several continous days without data change X_train_vec= X_train_vec+rnorm(mean = 0,sd = 0.01,n=length( X_train_vec)) Y_train_vec= Y_train_vec+rnorm(mean = 0,sd = 0.01,n=length(Y_train_vec)) } if (Augmentation==F){ print("without augementation") X_train_vec= X_train Y_train_vec=Y_train } #feature selection if (FeatureSelection==T){ print("doning feature selection") library("ppcor") cvec=cbind(as.data.frame( Y_train_vec),X_train_vec) cvec[is.na(cvec)]=0 VecPCor=pcor(cvec) VecCor=cor(X_train_vec,Y_train_vec) #if(sum(VecPCor$p.value[,1]<0.01)>2){ # # Features=names(cbind(as.data.frame(Y_train),X_train)) # # Features=Features[abs(VecPCor$estimate[,1])>=sort(abs(VecPCor$estimate[,1]),decreasing = T)[10]] # # # Features=Features[2:length(Features)] Features=rownames(VecCor)[VecCor[,1]>=sort(VecCor[,1],decreasing = T)[10]] Features=c(Features,paste0(GroundTruthName,"_Lag_0")) #} # if(sum(VecPCor$p.value[,1]<0.01)<=2){ # #if no feature has significant Pvalue , pick the top 10 with largest abs. correlation # Features=names(cbind(as.data.frame(Y_train),X_train)) # #Features=Features[abs(VecPCor$estimate[,1])>abs(VecPCor$estimate[,1])[order(abs(VecPCor$estimate[,1]),decreasing = T)==10]] # Features=Features[abs(VecPCor$estimate[,1])>sort(abs(VecPCor$estimate[,1]),decreasing = T)[12]] # # Features=Features[2:length(Features)] # # } X_train_vec=X_train_vec[,colnames(X_train_vec)%in%Features,drop=F] X_test_vec=X_test[,colnames( X_test)%in%Features,drop=F] print(paste("number of features selected: ",ncol(X_test_vec))) } if (FeatureSelection==F){ X_test_vec=X_test } print(paste(" Number of input features:",ncol(X_train_vec))) print(paste(" input features:",names(X_train_vec),collapse = " ")) #simple LM methods # Data=as.data.frame(X_train) # Data$Y_train=Y_train # fit = lm(Y_train~.,data=Data,na.action = "na.exclude") # # print(summary(fit)) # # Prediction1=predict(fit,X_test) #fix lambda # # fit = glmnet( as.matrix(X_train), Y_train, alpha = alpha,lambda = 0.1) # Prediction1=predict(fit, newx = as.matrix(X_test), type = "response") # #manualy validation and select lambda without validation # print("doing manual lambda selections") # # fit = glmnet( as.matrix(X_train), Y_train, alpha = alpha, nlambda = 20) # AllLambda=fit$lambda # Lambda= AllLambda[which(fit$dev.ratio>=0.8*max(fit$dev.ratio))[1]] # # Prediction1=predict(fit, newx = as.matrix(X_test), type = "response", s =Lambda) # # #manualy validation and select lambda using validation # # print("doing manual lambda selections") # X_val=X_train[floor(0.75*nrow(X_train)):nrow(X_train),] # X_train_vec=X_train[1:floor(0.75*nrow(X_train)),] # Y_val=Y_train[floor(0.75*nrow(X_train)):nrow(X_train)] # Y_train_vec=Y_train[1:floor(0.75*nrow(X_train))] # # fit = glmnet( as.matrix(X_train_vec), Y_train_vec, alpha = alpha, nlambda = 20) # AllLambda=fit$lambda # # RMSE=NULL # for (Vec in AllLambda){ # RMSE=c(RMSE,sqrt(mean((Y_val-predict(fit, newx = as.matrix(X_val), type = "response", s =Vec ))^2))) # } # Lambda=AllLambda[which.min(RMSE)] # # # Prediction1=predict(fit, newx = as.matrix(X_test), type = "response", s =Lambda) # # #cross validation library(glmnet) #linear regression cross validation version method="lambda.1se" fit = cv.glmnet(x=as.matrix(X_train_vec), y= Y_train_vec, alpha=alpha) #lambda=c(10^(seq(from = -6,to = 1,by = 0.1) print(coef(fit, s = method)) fit$lambda.min fit$lambda.1se Prediction1=predict(fit,newx=as.matrix(X_test_vec),s=c(method)) VaraibleImportance=as.data.frame(as.matrix(coef(fit, s = method))) #as.data.frame(print(coef(fit, s = method))) # # #RF version # library("randomForest") # X_train=as.data.frame(X_train) # X_train$Y_train=Y_train # #fit <-rfcv(trainx = X_train,trainy = Y_train, cv.fold=5, scale="log", step=0.5) # # fit <- randomForest(Y_train~.,data=X_train,ntree=5000) # Prediction1=predict(fit,X_test) # VaraibleImportance=importance(fit) return(list(Predictions=c(Prediction1), Models=list(fit), VaraibleImportance=VaraibleImportance))} #function to run models Run_models=function(DataForRegression,Lags=1,daysahead=1,Information_to_include, GroundTruthName="New_confirmed",First_index_date=NULL, UseR0_as_predictor=T,Normalization=TRUE,FeatureSelection=F,Aggregation=1,Binary=F, Clustering=T,Augmentation=T,OutlierDataPointRemoval=TRUE, IncludeMechanisticPrediction_ahead_of_time=TRUE, MechanisticPredictionsAheadofTime=MechanisticPredictionsAheadofTime, Mechanistic_prediction_to_use=Mechanistic_prediction_to_use, ClusteringMethod=ClusteringMethod){ DataForRegression$Date=as.Date(as.character(DataForRegression$Date),origin="1970-01-01") AllX=NULL AllY=NULL All_Y_original=NULL All_X_original=NULL #process data for (Location in unique(DataForRegression$Location)){ X=DataForRegression[DataForRegression$Location==Location,] #aggregation if(Aggregation!=1){#aggregate by date to enhance signal Aggregated=AggregateByDate(X,Aggregation,Information_to_include) X=Aggregated } #include all possible lags if(Lags>0){ X_vec=X Names=names(X) for (Lag in 0:Lags){ #lag in by data point Vec=X Vec=Vec[match(X$Date-Aggregation*Lag,Vec$Date),] X_vec=cbind(X_vec,Vec) Names=c(Names,paste0(names(X),'_Lag_',Lag)) } names(X_vec)=Names X=X_vec } #days_ahead AllTarges=NULL Target=X[,GroundTruthName] Vec=Target[match(X$Date+daysahead*Aggregation,X$Date)] Y=data.frame(Y=Vec,Date=X$Date,daysahead=daysahead*Aggregation) Y$Date=as.Date(Y$Date,origin="1970-01-01") if (Normalization==TRUE){ Output=DataNormalization(X,Y,OutlierDataPointRemoval,GroundTruthName,daysahead,Aggregation) X=Output$X Y=Output$Y Y_original=Output$Y_original X_original=Output$X_original } # #deal with NA data in X by persistency # for (i in 1:ncol(X)){ # X[,i][which(is.na(X[,i]))]=X[,i][min(which(!is.na(X[,i])))]# NA = the first next item that is not NA # } # AllX=rbind(AllX,X) AllY=rbind(AllY,Y) All_Y_original=rbind(All_Y_original,Y_original) All_X_original=rbind(All_X_original,X_original) } X=AllX #X=unique(X) Y=AllY # Y=unique(Y) Y_original=All_Y_original X_original=All_X_original #remove all records in X with NA, NA are allowed in Y as making prediction ahead of time if(sum(is.na(X))>0){ Y=Y[-unique(which(is.na(X),arr.ind = T)[,1]),] Y_original=Y_original[-unique(which(is.na(X),arr.ind = T)[,1]),] X_original=X_original[-unique(which(is.na(X),arr.ind = T)[,1]),] X=X[-unique(which(is.na(X),arr.ind = T)[,1]),] } Y$Date==X$Date #Y_original$Date==X$Date if(is.null(First_index_date)){ First_index_date=min(X$Date) } #X[,c("Date","Location",GroundTruthName)] #aggregation and normalization for mechanistic predictions ahead of time if(Aggregation!=1){ print("aggregation mechanistic predication") Agrgregated_Mechanistic_predeiction=NULL for (Location in unique(Mechanistic_prediction_to_use$Location)){ Vec=Mechanistic_prediction_to_use[Mechanistic_prediction_to_use$Location==Location,] Vec2=Vec for (i in Aggregation:nrow(Vec)){ Vec[i,MechanisticPredictionsAheadofTime]=apply(Vec2[(i-Aggregation+1):i,MechanisticPredictionsAheadofTime],MARGIN = 2,sum) } Agrgregated_Mechanistic_predeiction=rbind( Agrgregated_Mechanistic_predeiction,Vec) } Mechanistic_prediction_to_use=Agrgregated_Mechanistic_predeiction } if (Normalization==TRUE){ print("normalizing mechanistic predication") for (Location in unique(Mechanistic_prediction_to_use$Location)){ #data nomarlization by Z score for(Feature in MechanisticPredictionsAheadofTime){ SD=sd(Mechanistic_prediction_to_use[Mechanistic_prediction_to_use$Location==Location,Feature]) Mu=mean(Mechanistic_prediction_to_use[Mechanistic_prediction_to_use$Location==Location,Feature]) Mechanistic_prediction_to_use[Mechanistic_prediction_to_use$Location==Location,Feature]=(Mechanistic_prediction_to_use[Mechanistic_prediction_to_use$Location==Location,Feature]-Mu)/SD } } } Lags=Lags*Aggregation#to make days conssistent daysahead=daysahead*Aggregation X$Date=as.Date(X$Date,origin="1970-01-01") IndexDates=X$Date #IndexDates=IndexDates[IndexDates>(min(X$Date)+2*Lags)] IndexDates=IndexDates[IndexDates>=First_index_date]#some dates do not exist in report IndexDates=unique(IndexDates) #vector tos save AllDates=NULL AllPredictions=NULL AllModels=list() Y_train_max_all=list() Vec_Predictions=NULL ALlDaysForward=NULL All_Y_test=NULL AllLocations=NULL AllVaraibleImportance=list() #bianry (if the number goes up or down) if(Binary==T){ BinaryVec=(Y$Y-c(NA,Y$Y[1:(nrow(Y)-1)]))>0 Y$Binary=BinaryVec } for (IndexDate in IndexDates){ IndexDate=as.Date(IndexDate,origin="1970-01-01") print(IndexDate) ImportantVecForIndexDate=list() #now do the clustering if (Clustering){ clusterCut=HierachiClustering(DataForRegression[DataForRegression$Date<=IndexDate,],GroundTruthName = GroundTruthName,ClusteringMethod=ClusteringMethod) X$Cluster=clusterCut[match(X$Location,names(clusterCut))] Y$Cluster=clusterCut[match(Y$Location,names(clusterCut))] print(paste("number of clusters:",length(unique(clusterCut)))) } if (!Clustering){ clusterCut=as.integer(unique(as.factor(X$Location))) names(clusterCut)=as.character(unique(as.factor(X$Location))) X$Cluster=as.integer(as.factor(X$Location)) Y$Cluster=as.integer(as.factor(Y$Location)) print(paste("number of clusters:",length(unique(Y$Cluster)))) } VecVI=list() for (CLuster in unique(clusterCut)){ if(Binary==FALSE){ X_train=X[(X$Date<=IndexDate-daysahead)&(X$Cluster==CLuster),] X_train=X_train[,colnames(X)[colnames(X)%in%Information_to_include],drop=FALSE] class(X_train) names(X_train) Locations_train=X$Location[(X$Date<=IndexDate-daysahead)&(X$Cluster==CLuster)] Date_train=X$Date[(X$Date<=IndexDate-daysahead)&(X$Cluster==CLuster)] Y_train=data.frame(Y=Y[(Y$Date<=(IndexDate-daysahead))&(Y$Cluster==CLuster),"Y"])#not including dates X_train=X_train[!is.na(Y_train$Y),,drop=FALSE]#some data points does not have the next day Locations_train=Locations_train[!is.na(Y_train$Y)] Y_train=Y_train[!is.na(Y_train$Y),]#some data points does not have the next day X_test=X[(X$Date==IndexDate)&(X$Cluster==CLuster),which(colnames(X)%in%Information_to_include),drop=FALSE] Y_test=data.frame(Y=Y[(Y$Date==IndexDate)&(Y$Cluster==CLuster),"Y"]) Locations_test=X$Location[(X$Date==IndexDate)&(X$Cluster==CLuster)] #Including mechanistic prediction ahead of time #Only use predictions made on or before the index date if(IncludeMechanisticPrediction_ahead_of_time){ DateToUse=max(Mechanistic_prediction_to_use$DateMakingPrediction[Mechanistic_prediction_to_use$DateMakingPrediction<=IndexDate]) Vec_Mechanistic_prediction_to_use=Mechanistic_prediction_to_use[Mechanistic_prediction_to_use$DateMakingPrediction==DateToUse,] MechanisticPrediction_train=Vec_Mechanistic_prediction_to_use[match(interaction(Locations_train,Date_train+daysahead),interaction(Vec_Mechanistic_prediction_to_use[,c("Location","date")])),MechanisticPredictionsAheadofTime] names(MechanisticPrediction_train)=paste0(names(MechanisticPrediction_train),"_On_date_of_prediction") X_train=cbind(X_train,MechanisticPrediction_train) tail(names(X_train)) dim(X_train) class(X_train) MechanisticPrediction_test=Vec_Mechanistic_prediction_to_use[match(interaction(Locations_test,IndexDate+daysahead),interaction(Vec_Mechanistic_prediction_to_use[,c("Location","date")])),MechanisticPredictionsAheadofTime] names(MechanisticPrediction_test)=paste0(names(MechanisticPrediction_test),"_On_date_of_prediction") X_test=cbind(X_test,MechanisticPrediction_test) tail(names(X_test)) dim(X_test) } #Y_test_original=Y_original[Y$Date==IndexDate,"Y"] #Locations=Locations[!is.na(Y_test$Y)] #X_test=X_test[!is.na(Y_test$Y),] #Y_test=Y_test[!is.na(Y_test$Y),] if (UseR0_as_predictor==TRUE){ R0_median=median(Get_R0(X)$AllR0) R0_mean=mean(Get_R0(X)$AllR0) if(!is.na(R0_median)){ X_train$R0=rep(R0_median,nrow(X_train)) X_test$R0=R0_median } else {print("not enough data to calculate R0")} } #print(summary(X_train)) #X_train=as.matrix(X_train) Y_train=as.data.frame(Y_train) Vec_predict=Make_prediction(X_train,Y_train[,1],X_test,alpha=1, FeatureSelection=FeatureSelection,GroundTruthName=GroundTruthName, Augmentation=Augmentation) Predictions=Vec_predict$Predictions Y_test=Y_test VaraibleImportance=Vec_predict$VaraibleImportance names(VaraibleImportance)="Weight" VaraibleImportance$cluster=CLuster VaraibleImportance$IndexDate=IndexDate for (x in Locations_test){ VaraibleImportance$feature=rownames(VaraibleImportance) VaraibleImportance$Location=x ImportantVecForIndexDate[[length(ImportantVecForIndexDate)+1]]=VaraibleImportance } if (Normalization==TRUE){#scale back Mu=Y$Mu[(Y$Date==IndexDate)&(X$Cluster==CLuster)] Sigma=Y$Sigma[(Y$Date==IndexDate)&(X$Cluster==CLuster)] Predictions=Vec_predict$Predictions*Sigma+Mu#return the value back to origina range Y_test=Y_test*Sigma+Mu } } # #Do binary classficiiation # if(Binary==TRUE){ # X_train=X[(X$Date<=IndexDate-daysahead),] # X_train=X_train[,colnames(X)[colnames(X)%in%Information_to_include],drop=FALSE] # class(X_train) # names(X_train) # # Locations_train=X$Location[(X$Date<=IndexDate-daysahead)] # # Y_train=data.frame(Y=Y[(Y$Date<=(IndexDate-daysahead)),"Binary"])#not including dates # # X_train=X_train[!is.na(Y_train$Y),,drop=FALSE]#some data points does not have the next day # Y_train=Y_train[!is.na(Y_train$Y),,drop=FALSE]#some data points does not have the next day # # X_test=X[X$Date==IndexDate,which(colnames(X)%in%Information_to_include),drop=FALSE] # Y_test=data.frame(Y=Y[Y$Date==IndexDate,"Binary",drop=FALSE]) # # Y_test_original=Y_original[Y$Date==IndexDate,"Y"] # Locations=X$Location[X$Date==IndexDate] # #Locations=Locations[!is.na(Y_test$Y)] # #X_test=X_test[!is.na(Y_test$Y),] # #Y_test=Y_test[!is.na(Y_test$Y),] # # if (UseR0_as_predictor==TRUE){ # R0_median=median(Get_R0(X)$AllR0) # R0_mean=mean(Get_R0(X)$AllR0) # if(!is.na(R0_median)){ # X_train$R0=rep(R0_median,nrow(X_train)) # X_test$R0=R0_median # } else {print("not enough data to calculate R0")} # } # print(paste(" Number of input features:",ncol(X_train))) # print(paste(" input features:",names(X_train),collapse = " ")) # #print(summary(X_train)) # #X_train=as.matrix(X_train) # # if(length(unique(Y_train$Y))>1){ # Y_train=as.data.frame(Y_train) # Vec_predict=Make_prediction_binary(X_train,Y_train[,1],X_test,alpha=1, # FeatureSelection=FeatureSelection,GroundTruthName=GroundTruthName) # # Predictions=Vec_predict$Predictions # } else {Predictions=Y_train$Y[length(Y_train$Y)]} # # # Y_test=Y_test # } #save AllPredictions=c(AllPredictions,Predictions) ALlDaysForward=c(ALlDaysForward,rep(daysahead,length(Predictions))) AllModels[[length(AllModels)+1]]=Vec_predict$Models #Y_train_max_all[[length(Y_train_max_all)+1]]=Y_train_max AllDates=c(AllDates,rep(as.character(as.Date(IndexDate,origin = "1970-01-01")),length(Predictions))) All_Y_test=c(All_Y_test,Y_test) AllLocations=c(AllLocations,Locations_test) VecVI[[length(VecVI)+1]]=as.matrix(VaraibleImportance) } AllVaraibleImportance[[length(AllVaraibleImportance)+1]]=ImportantVecForIndexDate } names(AllVaraibleImportance)=IndexDates Results=data.frame(Dates=as.Date(AllDates), Predictions=AllPredictions, DaysAhead=ALlDaysForward, Aggregation=Aggregation, Y_test=unlist(All_Y_test), Location=AllLocations) #names(AllModels)=paste0(Results$Dates,"_",Results$DaysAhead) return(list(Results=Results,X=X_original,Y=Y_original, X_train=X_train,VaraibleImportance=AllVaraibleImportance, LastModel=Vec_predict$Models)) } #Generated plots Get_plots=function(Results,X,GroundTruthName="New_suspected"){ library(ggplot2) #Look at performance #pdf(paste0('figures/',i,"_plot.pdf")) AllPlots=list() for (IndexDate in unique(Results$Dates)){ VecPlot=data.frame(New_count=X[,GroundTruthName][X$Date<=IndexDate], Date=X$Date[X$Date<=IndexDate]) VecPlot$Type="Observed" VecPlot$Date=as.Date(as.character(VecPlot$Date),origin="1970-01-01") VecPlot_0=data.frame(New_count=X[,GroundTruthName], Date=X$Date) VecPlot_0$Type="Ground_truth(unseen)" VecPlot_0$Date=as.Date(as.character(VecPlot_0$Date),origin="1970-01-01") VecPlot2= data.frame(New_count=Results$Predictions[Results$Dates==IndexDate], Date=c(Results$Dates[Results$Dates==IndexDate]+Results$DaysAhead[Results$Dates==IndexDate])) VecPlot2$Type="Model_Predicts" VecPlot2$Date=as.Date(as.character(VecPlot2$Date),origin="1970-01-01") #calculate R0 based prediction CumuSum_Count=sum(VecPlot$New_count[VecPlot$Date<=IndexDate]) CumuSum_Count_t_minus_1=sum(VecPlot$New_count[VecPlot$Date<=(IndexDate-5)]) #R0_upper=3.9 R0_median=2.2 #R0_lower=1.4 Serial_period=5 Y_t=VecPlot$New_count[VecPlot$Date==IndexDate] Increased_cumulative=c(R0_median)*(CumuSum_Count-CumuSum_Count_t_minus_1) Y_R0=(2*Increased_cumulative/Serial_period)-Y_t RO_predict=data.frame(New_count=Y_R0,Date=IndexDate+Serial_period) RO_predict$Type="R0_Predicts" print(paste("R0_count",RO_predict$New_count)) RO_predict$Date=as.Date(RO_predict$Date,origin = "1970-01-01") #RO_predict=rbind(RO_predict,RO_predict) # # names(R0_predict)=c("Date","New_suspected_count") # R0_predict$Type="R0_predict" # R0_predict=R0_predict[,c("New_suspected_count","Date","Type")] # # VecPlot=data.frame(Date=c(VecPlot$Date,VecPlot2$Date,RO_predict$Date,VecPlot_0$Date), New_count=c(VecPlot$New_count,VecPlot2$New_count,RO_predict$New_count,VecPlot_0$New_count), Type=c(VecPlot$Type,VecPlot2$Type,RO_predict$Type,VecPlot_0$Type)) VecPlot$Date=as.Date(VecPlot$Date,origin="1970-01-01") # # VecPlot$Date=as.Date(VecPlot$Date) # VecPlot=VecPlot[!is.na(VecPlot$New_suspected_count),] # VecPlot$Type=as.factor(VecPlot$Type) # VecPlot$Date=as.Date(as.character(VecPlot$Date)) # class(VecPlot$Date AllPlots[[length(AllPlots)+1]]=ggplot(data=VecPlot,aes(x=Date, y=New_count,colour=Type)) + geom_line(aes(linetype=Type)) + scale_linetype_manual(values=c("dashed", "solid","solid","solid"))+ ggtitle(paste("prediction made \n on date: ",as.character(Results$Dates[i])))+ ylim(0,max(VecPlot$New_count))+ xlim(min(VecPlot$Date),max(VecPlot$Date)+7)+ xlab('Dates') + ylab('New count')+ theme_bw() + geom_point(data=RO_predict,aes(x=Date,y=New_count), colour="blue")+ scale_color_manual(values=c('black','red','black','blue'))+ geom_vline(xintercept=c(min(VecPlot2$Date)-1), linetype="dotted",color='blue')+ geom_vline(xintercept=c(min(VecPlot2$Date)), linetype="dotted",color='brown')+ geom_vline(xintercept=c(max(VecPlot2$Date)), linetype="dotted",color='red') } return(AllPlots) } #Get performace Get_performance=function(Results,X,TargetColumn="New_suspected"){ #install.packages("MLmetrics") library("MLmetrics") Dates=Results$Dates[Results$DaysAhead==1]+1 Predictions=Results$Predictions[Results$DaysAhead==1] Observed=X[,TargetColumn][match(Dates,X$Date)] BaseLine=X[,TargetColumn][match(Dates-1,X$Date)] Vec=data.frame(Dates=Dates,Predictions=Predictions, Observed=Observed,BaseLine=BaseLine ) Vec=Vec[(!is.na(Vec$Observed))&((!is.na(Vec$Predictions))),] RMSE=mean(sqrt((Vec$Predictions-Vec$Observed)^2),na.rm = T) Cor=cor(Vec$Predictions,Vec$Observed) Mape=MAPE(Vec$Predictions,Vec$Observed) Baseline_RMSE=mean(sqrt((Vec$BaseLine-Vec$Observed)^2)) Baseline_Cor=cor(Vec$BaseLine,Vec$Observed) Baseline_Mape=MAPE(Vec$BaseLine,Vec$Observed) Output=data.frame(RMSE=RMSE,Cor=Cor,Mape=Mape, Baseline_RMSE=Baseline_RMSE,Baseline_Cor=Baseline_Cor,Baseline_Mape=Baseline_Mape) return(Output) } Get_Performance_values=function(Prediction,Y_test,Print=TRUE){ #Both Prediction and Y_test include values, location and data columns library("MLmetrics") AllRMSE=NULL AllCor=NULL ALLMape=NULL AllDates=NULL AllPredction=NULL #Make sure the dates and location are matched Prediction=Prediction[match(as.character(interaction(Y_test$Location,Y_test$Date)),as.character(interaction(Prediction$Location,Prediction$Dates))),] Y_test=Y_test[!is.na(Prediction$Dates),] Prediction=Prediction[!is.na(Prediction$Dates),] #calculate for each location for (each_location in unique(Y_test$Location)){ Vec=data.frame(Predictions=Prediction$Predictions[Prediction$Location==each_location], Observed=Y_test$Y[Prediction$Location==each_location], Date=Y_test$Date[Prediction$Location==each_location] ) Vec=Vec[(!is.na(Vec$Predictions))&(!is.na(Vec$Observed)),] RMSE=mean(sqrt((Vec$Predictions-Vec$Observed)^2),na.rm = T) Cor=cor(Vec$Predictions,Vec$Observed,) Mape=MAPE(Vec$Predictions,Vec$Observed) AllRMSE=c(AllRMSE,RMSE) AllCor=c(AllCor,Cor) ALLMape=c(ALLMape,Mape) if(Print==TRUE){ #plot plot(Vec$Observed~as.Date(Vec$Date),type='l', ylim=c(0,max(c(Vec$Observed,Vec$Predictions),na.rm = T)), main=each_location ,xlab="dates",ylab="Counts") lines(Vec$Predictions~as.Date(Vec$Date),col='red') } Vec$Location=each_location names(Vec)=c("Predicted","Observed","PredictionDate","Location") # "Mechanistic_only_Prediction" AllPredction=rbind(AllPredction,Vec) } Performance=data.frame(Locations=unique(Results$Location), RMSE=AllRMSE,Cor=AllCor,Mape=ALLMape) #Mechanistic_RMSE=AllMechanistic_RMSE,Mechanistic_Cor=AllMechanistic_Cor,Mechanistic_Mape=AllMechanistic_Mape return(list(Performance=Performance, Prediction=AllPredction)) } Get_Performance_each_province=function(Results,X,daysahead=1,Aggregation=1,Print=TRUE,GroundTruthName){ Results=Results[!is.na(Results$Y_test),] Results$Location=as.character(Results$Location) library("MLmetrics") AllRMSE=NULL AllCor=NULL ALLMape=NULL AllBaseline_RMSE=NULL AllBaseline_Cor=NULL ALLBaseline_Mape=NULL AllMechanistic_RMSE= NULL AllMechanistic_Cor=NULL AllMechanistic_Mape=NULL AllDates=NULL AllPredction=NULL Results$Dates=as.Date(Results$Dates,origin="1970-01-01") X$Date=as.Date(X$Date,origin="1970-01-01") for (each_location in unique(Results$Location)){ Predictions=Results$Predictions[Results$Location==each_location] #T + delta T Dates=Results$Date[Results$Location==each_location]+daysahead*Aggregation Observed=Results$Y_test[Results$Location==each_location] X_vec=X[X$Location==each_location,] BaseLine=X_vec[match(Dates-(daysahead*Aggregation),X_vec$Date),GroundTruthName] #MechanisticPrediction=X_vec$Mechanistic_pred[match(Dates,X_vec$Date)] Vec=data.frame(Dates=Dates,Predictions=Predictions, Observed=Observed,BaseLine=BaseLine) # MechanisticPrediction=MechanisticPrediction Vec=Vec[(!is.na(Vec$Observed))&((!is.na(Vec$Predictions))),] RMSE=mean(sqrt((Vec$Predictions-Vec$Observed)^2),na.rm = T) Cor=cor(Vec$Predictions,Vec$Observed) Mape=MAPE(Vec$Predictions,Vec$Observed) Baseline_RMSE=mean(sqrt((Vec$BaseLine-Vec$Observed)^2)) Baseline_Cor=cor(Vec$BaseLine,Vec$Observed) Baseline_Mape=MAPE(Vec$BaseLine,Vec$Observed) #Mechanistic_RMSE=mean(sqrt((Vec$MechanisticPrediction-Vec$Observed)^2)) #Mechanistic_Cor=cor(Vec$MechanisticPrediction,Vec$Observed) #Mechanistic_Mape=MAPE(Vec$MechanisticPrediction,Vec$Observed) AllRMSE=c(AllRMSE,RMSE) AllCor=c(AllCor,Cor) ALLMape=c(ALLMape,Mape) AllBaseline_RMSE=c(AllBaseline_RMSE,Baseline_RMSE) AllBaseline_Cor=c(AllBaseline_Cor, Baseline_Cor) ALLBaseline_Mape=c(ALLBaseline_Mape,Baseline_Mape) #AllMechanistic_RMSE= c(AllMechanistic_RMSE,Mechanistic_RMSE) #AllMechanistic_Cor=c(AllMechanistic_Cor,Mechanistic_Cor) #AllMechanistic_Mape=c(AllMechanistic_Mape,Mechanistic_Mape) #AllDates=c(AllDates,as.character(Dates)) if(Print==TRUE){ #plot plot(X_vec[,GroundTruthName]~X_vec$Date,type='l', ylim=c(0,max(c(Vec$Observed,Vec$Predictions))), main=each_location ,xlab="dates",ylab="Counts") lines(Vec$Predictions~Vec$Date,col='red') lines(Vec$BaseLine~Vec$Date,col='brown',lty=2) } Vec$Location=each_location names(Vec)=c("Date_T_plus_deltaT",'ModelPrediction', "obsered_Y_test","Persistence_baseline", "Location" ) # "Mechanistic_only_Prediction" AllPredction=rbind(AllPredction,Vec) } Performance=data.frame(Locations=unique(Results$Location), RMSE=AllRMSE,Cor=AllCor,Mape=ALLMape, Baseline_RMSE=AllBaseline_RMSE,Baseline_Cor=AllBaseline_Cor,Baseline_Mape=ALLBaseline_Mape) #Mechanistic_RMSE=AllMechanistic_RMSE,Mechanistic_Cor=AllMechanistic_Cor,Mechanistic_Mape=AllMechanistic_Mape return(list(Performance=Performance, Prediction=AllPredction)) } #Function to get R0 Get_R0=function(X){ #calculate R0 based prediction Serial_periods=c(5,6,7) CumuSum_Count=cumsum(X$New_confirmed) CumuSum_Count=CumuSum_Count CumuSum_Count=data.frame(CumuSum_Count=CumuSum_Count,date=X$Date) #CumuSum_Count=CumuSum_Count[CumuSum_Count$date>="2020-01-25",]#two days after seal of wuhan #estimate R0 VecR0=NULL for (Serial_period in Serial_periods){ for (i in nrow(CumuSum_Count):(2*Serial_period+1)){ Vec1=CumuSum_Count$CumuSum_Count[i]-CumuSum_Count$CumuSum_Count[CumuSum_Count$date==(CumuSum_Count$date[i]-Serial_period)] Vec2=CumuSum_Count$CumuSum_Count[CumuSum_Count$date==(CumuSum_Count$date[i]-Serial_period)]-CumuSum_Count$CumuSum_Count[CumuSum_Count$date==(CumuSum_Count$date[i]-2*Serial_period)] VecR0=c(VecR0,Vec1/Vec2) } } VecR0=VecR0[!is.na(VecR0)] VecR0=VecR0[!is.infinite(VecR0)] R0_median=median(VecR0) AllR0=VecR0 #prediction use R0 Y_t=X$New_confirmed[which.max(X$Date)] Increased_cumulative=c(R0_median)*(CumuSum_Count$CumuSum_Count[which.max(CumuSum_Count$date)]-CumuSum_Count$CumuSum_Count[nrow(CumuSum_Count)-median(Serial_periods)]) Y_R0=(2*Increased_cumulative/median(Serial_periods))-Y_t #calulate the days in between Gradient=(Y_R0-Y_t)/median(Serial_periods) MiddleValues=(1:(median(Serial_periods)-1))*Gradient+Y_t RO_predict=data.frame(New_count=c(MiddleValues,Y_R0),Date=max(CumuSum_Count$date)+median(Serial_periods)-c((median(Serial_periods)-1):0)) RO_predict$Type="R0_Predicts" #print(paste("R0_count",RO_predict$New_count)) RO_predict$Date=as.Date(RO_predict$Date,origin = "1970-01-01") return(list(AllR0=AllR0,RO_predict=RO_predict)) } #get performance binary Get_confusion=function(Y_test,Pred){ confusionMatrix=matrix(data = 0,nrow = 2,ncol = 2) rownames(confusionMatrix)=c("Predicted increase","Predicted decrease") colnames(confusionMatrix)=c("Real increase","Real decrease") confusionMatrix[[1,1]]=sum(Y_test==1&Pred==1) confusionMatrix[[1,2]]=sum(Y_test==0&Pred==1) confusionMatrix[[2,1]]=sum(Y_test==1&Pred==0) confusionMatrix[[2,2]]=sum(Y_test==0&Pred==0) Precision=confusionMatrix[1,1]/sum(confusionMatrix[1,]) Recall=confusionMatrix[1,1]/sum(confusionMatrix[,1]) Accuracy=sum(Y_test==Pred,na.rm = T)/(length(Y_test)) return(list(confusionMatrix=confusionMatrix, Precision=Precision, Recall=Recall, Accuracy=Accuracy)) } GetPerformance_binary=function(Results,X,Aggregation = Aggregation,daysahead=daysahead,GroundTruthName){ # Y_test=as.numeric(Results$Y_test) # Pred=Results$Predictions # Y_test= Results$Y_test # Pred=Pred[!is.na(Y_test)] # Y_test=Y_test[!is.na(Y_test)] # # Output_all=Get_confusion(Y_test=Y_test, Pred= Pred) # AllConfusionModel=list() AllPrecision_Model=NULL AllRecall_Model=NULL AllAccuracy_Model=NULL AllConfusionBaseline=list() AllPrecision_baseline=NULL AllRecall_baseline=NULL AllAccuracy_baseline=NULL #Baseline of persistency for (Location in unique(as.character(Results$Location))){ Location_Results=Results[Results$Location==Location,] Location_x=X[X$Location==Location,] Increase=Location_x[,GroundTruthName]-Location_x[match(Location_x$Date-1,Location_x$Date),GroundTruthName] Increase=Increase>0 Baseline_pred=Increase[match(Location_Results$Dates+Location_Results$DaysAhead*Location_Results$Aggregation-1*Aggregation,Location_x$Date)] Model_pred=as.numeric(as.character(Location_Results$Predictions)) Y_test=Location_Results$Y_test Baseline_pred=Baseline_pred[!is.na(Y_test)] Model_pred=Model_pred[!is.na(Y_test)] Y_test=Y_test[!is.na(Y_test)] ModelOutput=Get_confusion(Y_test,Model_pred) BaselineOutput=Get_confusion(Y_test, Baseline_pred) AllConfusionModel[[length( AllConfusionModel)+1]]=ModelOutput$confusionMatrix AllPrecision_Model=c(AllPrecision_Model,ModelOutput$Precision) AllRecall_Model=c(AllRecall_Model,ModelOutput$Recall) AllAccuracy_Model=c(AllAccuracy_Model,ModelOutput$Accuracy) AllConfusionBaseline[[length(AllConfusionBaseline)+1]]=BaselineOutput$confusionMatrix AllPrecision_baseline=c(AllPrecision_baseline,BaselineOutput$Precision) AllRecall_baseline=c(AllRecall_baseline,BaselineOutput$Recall) Accuracy_baseline=BaselineOutput$Accuracy AllAccuracy_baseline=c(AllAccuracy_baseline,BaselineOutput$Accuracy) } return(list(AllConfusionModel=AllConfusionModel, AllPrecision_Model=AllPrecision_Model, AllRecall_Model=AllRecall_Model, AllAccuracy_Model=AllAccuracy_Model, AllConfusionBaseline=AllConfusionBaseline, AllPrecision_baseline=AllPrecision_baseline, AllRecall_baseline=AllRecall_baseline, AllAccuracy_baseline=AllAccuracy_baseline, Location= unique(as.character(Results$Location))) ) }