content
large_stringlengths
0
6.46M
path
large_stringlengths
3
331
license_type
large_stringclasses
2 values
repo_name
large_stringlengths
5
125
language
large_stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
4
6.46M
extension
large_stringclasses
75 values
text
stringlengths
0
6.46M
library(wrapr) ### Name: lambda ### Title: Build an anonymous function. ### Aliases: lambda ### ** Examples #lambda-syntax: lambda(arg [, arg]*, body [, env=env]) # also works with lambda character as function name # print(intToUtf8(0x03BB)) # example: square numbers sapply(1:4, lambda(x, x^2)) # example more than one argument f <- lambda(x, y, x+y) f(2,4) # brace interface syntax f <- x := { x^2 } f(5) # formula interface syntax: [~arg|arg(~arg)+] := { body } f <- x~y := { x + 3 * y } f(5, 47)
/data/genthat_extracted_code/wrapr/examples/lambda.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
513
r
library(wrapr) ### Name: lambda ### Title: Build an anonymous function. ### Aliases: lambda ### ** Examples #lambda-syntax: lambda(arg [, arg]*, body [, env=env]) # also works with lambda character as function name # print(intToUtf8(0x03BB)) # example: square numbers sapply(1:4, lambda(x, x^2)) # example more than one argument f <- lambda(x, y, x+y) f(2,4) # brace interface syntax f <- x := { x^2 } f(5) # formula interface syntax: [~arg|arg(~arg)+] := { body } f <- x~y := { x + 3 * y } f(5, 47)
#------------------------------------------------------------- # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # #------------------------------------------------------------- args <- commandArgs(TRUE) options(digits=22) library("Matrix") X <- readMM(paste(args[1], "X.mtx", sep="")) v <- readMM(paste(args[1], "v.mtx", sep="")) w <- readMM(paste(args[1], "w.mtx", sep="")) R = (t(X) %*% (w*(X %*% v))); writeMM(as(R, "CsparseMatrix"), paste(args[2], "R", sep=""));
/src/test/scripts/functions/binary/matrix/MapMultChainWeights.R
permissive
stc-tester/incubator-systemml
R
false
false
1,250
r
#------------------------------------------------------------- # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # #------------------------------------------------------------- args <- commandArgs(TRUE) options(digits=22) library("Matrix") X <- readMM(paste(args[1], "X.mtx", sep="")) v <- readMM(paste(args[1], "v.mtx", sep="")) w <- readMM(paste(args[1], "w.mtx", sep="")) R = (t(X) %*% (w*(X %*% v))); writeMM(as(R, "CsparseMatrix"), paste(args[2], "R", sep=""));
#' List software creators #' #' @param json #' (list) Software metadata in JSON format #' #' @return #' (list) Software creators #' #' @examples #' \dontrun{ #' json <- get_json('http://imcr.ontosoft.org/repository/software') #' list_creators(json) #' } #' list_creators <- function(json){ return( lapply( seq_along(json), function(x){ json[[x]]$value[['http://ontosoft.org/software#hasCreator']]$label } ) ) }
/R/plot_authors.R
permissive
IMCR-Hackathon/toolkit
R
false
false
453
r
#' List software creators #' #' @param json #' (list) Software metadata in JSON format #' #' @return #' (list) Software creators #' #' @examples #' \dontrun{ #' json <- get_json('http://imcr.ontosoft.org/repository/software') #' list_creators(json) #' } #' list_creators <- function(json){ return( lapply( seq_along(json), function(x){ json[[x]]$value[['http://ontosoft.org/software#hasCreator']]$label } ) ) }
#duration for data from read.asset dur.f <- function(data) { #data length l.d <- dim(data)[1] #obcięte czasy temp1 <- data[2:l.d,1] temp0 <- data[1:(l.d - 1),1] #result as.numeric(difftime(temp1,temp0, units = 'secs')) } #creating the list of durations duration <- list() for (i in 1:8) duration[[i]] <- dur.f(data.0[[i]]) names(duration) <- names(data.0) #descriptive statistics desc.dur <- function(dur) { c('number'=length(dur), summary(dur)) } #save descriptive.0<-matrix(NA, 8, 7) for (i in 1:8) descriptive.0[i,]<-desc.dur(duration[[i]]) rownames(descriptive.0)<-names(data.0) colnames(descriptive.0)<-names(desc.dur(duration[[1]])) #save setwd('./tables') write.xlsx(descriptive.0, 'descriptive_0.xlsx', sheetName='0 min') setwd('./..')
/R code/92_tables.R
no_license
amachno/epps
R
false
false
773
r
#duration for data from read.asset dur.f <- function(data) { #data length l.d <- dim(data)[1] #obcięte czasy temp1 <- data[2:l.d,1] temp0 <- data[1:(l.d - 1),1] #result as.numeric(difftime(temp1,temp0, units = 'secs')) } #creating the list of durations duration <- list() for (i in 1:8) duration[[i]] <- dur.f(data.0[[i]]) names(duration) <- names(data.0) #descriptive statistics desc.dur <- function(dur) { c('number'=length(dur), summary(dur)) } #save descriptive.0<-matrix(NA, 8, 7) for (i in 1:8) descriptive.0[i,]<-desc.dur(duration[[i]]) rownames(descriptive.0)<-names(data.0) colnames(descriptive.0)<-names(desc.dur(duration[[1]])) #save setwd('./tables') write.xlsx(descriptive.0, 'descriptive_0.xlsx', sheetName='0 min') setwd('./..')
%% BEGIN doc for: lpx_set_prob_name \name{lpx_set_prob_name} \alias{lpx_set_prob_name} \title{ Low-level interface to the GLPK function } \description{ This function is a low-level interface to the GNU Linear Programming Kit (GLPK) function \code{lpx_set_prob_name}. The parameter names, associated types, and return values are identical to the GLPK function. Please see the GLPK documentation for more info. } \usage{ lpx_set_prob_name(lp, name) } \arguments{ \item{lp}{ see GLPK doc } \item{name}{ see GLPK doc } } \details{ This function is the interface to the R-glpk C function \code{R_lpx_set_prob_name} which converts R objects and calls GLPK \code{lpx_set_prob_name}. } \references{ The GNU GLPK home page at http://www.gnu.org/software/glpk/glpk.html } \author{ Lopaka Lee <rclee@usgs.gov> (R-GLPK) Andrew Makhorin <mao@gnu.org> (GLPK) } \note{ The generation of this function code, documentation, and R-API code has largely been automated. Therefore, not all functions have been fully tested. Beware and please report any bugs or inconsistencies. The full documentation for GLPK has not been converted to Rd format -- it is available in the GLPK source distribution. Inquiries regarding this interface (R-GLPK) should NOT be sent to GNU GLPK mailing lists. } %\seealso{ %} %\examples{ %} \keyword{optimize} %% END doc for: lpx_set_prob_name
/man/lpx_set_prob_name.Rd
no_license
cran/glpk
R
false
false
1,461
rd
%% BEGIN doc for: lpx_set_prob_name \name{lpx_set_prob_name} \alias{lpx_set_prob_name} \title{ Low-level interface to the GLPK function } \description{ This function is a low-level interface to the GNU Linear Programming Kit (GLPK) function \code{lpx_set_prob_name}. The parameter names, associated types, and return values are identical to the GLPK function. Please see the GLPK documentation for more info. } \usage{ lpx_set_prob_name(lp, name) } \arguments{ \item{lp}{ see GLPK doc } \item{name}{ see GLPK doc } } \details{ This function is the interface to the R-glpk C function \code{R_lpx_set_prob_name} which converts R objects and calls GLPK \code{lpx_set_prob_name}. } \references{ The GNU GLPK home page at http://www.gnu.org/software/glpk/glpk.html } \author{ Lopaka Lee <rclee@usgs.gov> (R-GLPK) Andrew Makhorin <mao@gnu.org> (GLPK) } \note{ The generation of this function code, documentation, and R-API code has largely been automated. Therefore, not all functions have been fully tested. Beware and please report any bugs or inconsistencies. The full documentation for GLPK has not been converted to Rd format -- it is available in the GLPK source distribution. Inquiries regarding this interface (R-GLPK) should NOT be sent to GNU GLPK mailing lists. } %\seealso{ %} %\examples{ %} \keyword{optimize} %% END doc for: lpx_set_prob_name
library("seqinr") library(reshape2) library(ggplot2) no.diff.list <- c("?", "-", "N", "n") get.diff <- function(p){ algn <- read.fasta(file = p) n <- names(algn) seq1 <- as.character(get(n[1], algn)) seq2 <- as.character(get(n[2], algn)) seq3 <- as.character(get(n[3], algn)) l1 <- sapply(c(1:length(seq1)), function(x){ if(seq1[x] != seq2[x]){ if(!(seq1[x] %in% no.diff.list && seq2[x] %in% no.diff.list)) c(seq1[x], seq2[x]) } }) l2 <- sapply(c(1:length(seq1)), function(x){ if(seq2[x] != seq3[x]){ if(!(seq2[x] %in% no.diff.list && seq3[x] %in% no.diff.list)) c(seq2[x], seq3[x]) } }) l3 <- sapply(c(1:length(seq1)), function(x){ if(seq3[x] != seq1[x]){ if(!(seq3[x] %in% no.diff.list && seq1[x] %in% no.diff.list)) c(seq3[x], seq1[x]) } }) l <- c(length(Filter(Negate(is.null), l1)), length(Filter(Negate(is.null), l2)), length(Filter(Negate(is.null), l3))) return(l) } files <- list.files(path="../data/alignments/", pattern="*.fasta", full.names=T, recursive=FALSE) diff <- lapply(files, function(x){ get.diff(x) }) diff <- data.frame(diff) colnames(diff) <- sapply(files, function(x){ y <- strsplit(strsplit(x, "/")[[1]][5], "\\.")[[1]][1] paste(paste(unlist(strsplit(y, "_t")), collapse = " - Thres: "), "%") }); rownames(diff) <- c("Geneious vs iVar", "iVar vs Reference", "Geneious vs Reference") pdf("../plots/consensus.pdf") ggplot(melt(t(diff)), aes(y=Var1, x=Var2)) + geom_tile(aes(fill = value), color="#000000") + geom_text(aes(y=Var1, x = Var2, label = value)) + scale_fill_gradient(low="#FFFFFF", high = "#FF0000") + theme_classic(base_size=18) + theme(axis.line <- element_blank(), axis.text.x = element_text(angle = 90, hjust = 1)) + xlab("Nucleotide Differences") + ylab("Consensus Sequences") + ggtitle("Validation of consensus calling") dev.off()
/scripts/compare_consensus.R
no_license
gkarthik/ivar-validation
R
false
false
1,975
r
library("seqinr") library(reshape2) library(ggplot2) no.diff.list <- c("?", "-", "N", "n") get.diff <- function(p){ algn <- read.fasta(file = p) n <- names(algn) seq1 <- as.character(get(n[1], algn)) seq2 <- as.character(get(n[2], algn)) seq3 <- as.character(get(n[3], algn)) l1 <- sapply(c(1:length(seq1)), function(x){ if(seq1[x] != seq2[x]){ if(!(seq1[x] %in% no.diff.list && seq2[x] %in% no.diff.list)) c(seq1[x], seq2[x]) } }) l2 <- sapply(c(1:length(seq1)), function(x){ if(seq2[x] != seq3[x]){ if(!(seq2[x] %in% no.diff.list && seq3[x] %in% no.diff.list)) c(seq2[x], seq3[x]) } }) l3 <- sapply(c(1:length(seq1)), function(x){ if(seq3[x] != seq1[x]){ if(!(seq3[x] %in% no.diff.list && seq1[x] %in% no.diff.list)) c(seq3[x], seq1[x]) } }) l <- c(length(Filter(Negate(is.null), l1)), length(Filter(Negate(is.null), l2)), length(Filter(Negate(is.null), l3))) return(l) } files <- list.files(path="../data/alignments/", pattern="*.fasta", full.names=T, recursive=FALSE) diff <- lapply(files, function(x){ get.diff(x) }) diff <- data.frame(diff) colnames(diff) <- sapply(files, function(x){ y <- strsplit(strsplit(x, "/")[[1]][5], "\\.")[[1]][1] paste(paste(unlist(strsplit(y, "_t")), collapse = " - Thres: "), "%") }); rownames(diff) <- c("Geneious vs iVar", "iVar vs Reference", "Geneious vs Reference") pdf("../plots/consensus.pdf") ggplot(melt(t(diff)), aes(y=Var1, x=Var2)) + geom_tile(aes(fill = value), color="#000000") + geom_text(aes(y=Var1, x = Var2, label = value)) + scale_fill_gradient(low="#FFFFFF", high = "#FF0000") + theme_classic(base_size=18) + theme(axis.line <- element_blank(), axis.text.x = element_text(angle = 90, hjust = 1)) + xlab("Nucleotide Differences") + ylab("Consensus Sequences") + ggtitle("Validation of consensus calling") dev.off()
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/isNanScalarOrNull.R \name{isNanScalarOrNull} \alias{isNanScalarOrNull} \title{Wrapper for the checkarg function, using specific parameter settings.} \usage{ isNanScalarOrNull(argument, default = NULL, stopIfNot = FALSE, message = NULL, argumentName = NULL) } \arguments{ \item{argument}{See checkarg function.} \item{default}{See checkarg function.} \item{stopIfNot}{See checkarg function.} \item{message}{See checkarg function.} \item{argumentName}{See checkarg function.} } \value{ See checkarg function. } \description{ This function can be used in 3 ways:\enumerate{ \item Return TRUE or FALSE depending on whether the argument checks are passed. This is suitable e.g. for if statements that take further action if the argument does not pass the checks.\cr \item Throw an exception if the argument does not pass the checks. This is suitable e.g. when no further action needs to be taken other than throwing an exception if the argument does not pass the checks.\cr \item Same as (2) but by supplying a default value, a default can be assigned in a single statement, when the argument is NULL. The checks are still performed on the returned value, and an exception is thrown when not passed.\cr } } \details{ Actual call to checkarg: checkarg(argument, "N", default = default, stopIfNot = stopIfNot, nullAllowed = TRUE, n = 1, zeroAllowed = FALSE, negativeAllowed = FALSE, positiveAllowed = FALSE, nonIntegerAllowed = TRUE, naAllowed = FALSE, nanAllowed = TRUE, infAllowed = FALSE, message = message, argumentName = argumentName) } \examples{ isNanScalarOrNull(NaN) # returns TRUE (argument is valid) isNanScalarOrNull("X") # returns FALSE (argument is invalid) #isNanScalarOrNull("X", stopIfNot = TRUE) # throws exception with message defined by message and argumentName parameters }
/man/isNanScalarOrNull.Rd
no_license
cran/checkarg
R
false
true
1,972
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/isNanScalarOrNull.R \name{isNanScalarOrNull} \alias{isNanScalarOrNull} \title{Wrapper for the checkarg function, using specific parameter settings.} \usage{ isNanScalarOrNull(argument, default = NULL, stopIfNot = FALSE, message = NULL, argumentName = NULL) } \arguments{ \item{argument}{See checkarg function.} \item{default}{See checkarg function.} \item{stopIfNot}{See checkarg function.} \item{message}{See checkarg function.} \item{argumentName}{See checkarg function.} } \value{ See checkarg function. } \description{ This function can be used in 3 ways:\enumerate{ \item Return TRUE or FALSE depending on whether the argument checks are passed. This is suitable e.g. for if statements that take further action if the argument does not pass the checks.\cr \item Throw an exception if the argument does not pass the checks. This is suitable e.g. when no further action needs to be taken other than throwing an exception if the argument does not pass the checks.\cr \item Same as (2) but by supplying a default value, a default can be assigned in a single statement, when the argument is NULL. The checks are still performed on the returned value, and an exception is thrown when not passed.\cr } } \details{ Actual call to checkarg: checkarg(argument, "N", default = default, stopIfNot = stopIfNot, nullAllowed = TRUE, n = 1, zeroAllowed = FALSE, negativeAllowed = FALSE, positiveAllowed = FALSE, nonIntegerAllowed = TRUE, naAllowed = FALSE, nanAllowed = TRUE, infAllowed = FALSE, message = message, argumentName = argumentName) } \examples{ isNanScalarOrNull(NaN) # returns TRUE (argument is valid) isNanScalarOrNull("X") # returns FALSE (argument is invalid) #isNanScalarOrNull("X", stopIfNot = TRUE) # throws exception with message defined by message and argumentName parameters }
testlist <- list(a = 0L, b = 0L, x = c(720693L, -1073741825L, 2815L, 901722879L, -212L, 1987517265L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L)) result <- do.call(grattan:::anyOutside,testlist) str(result)
/grattan/inst/testfiles/anyOutside/libFuzzer_anyOutside/anyOutside_valgrind_files/1610055856-test.R
no_license
akhikolla/updated-only-Issues
R
false
false
313
r
testlist <- list(a = 0L, b = 0L, x = c(720693L, -1073741825L, 2815L, 901722879L, -212L, 1987517265L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L)) result <- do.call(grattan:::anyOutside,testlist) str(result)
#' Plot mean Cropper values and pointer years #' #' @description The function creates a bar plot of mean Cropper values from a \code{list} of the type as produced by \code{\link{pointer.norm}} and highlights years identified as pointer years. #' #' @usage norm.plot(list.name, start.yr = NULL, end.yr = NULL, #' sd.disp = FALSE, x.tick.major = 10, x.tick.minor = 5) #' #' @param list.name a \code{list} as produced by \code{\link{pointer.norm}} #' @param start.yr an \code{integer} specifying the first year to be plotted. Defaults to the first year included in the \code{out} component of the \code{list} if \code{\var{start.yr}} is \code{NULL}. #' @param end.yr an \code{integer} specifying the last year to be plotted. Defaults to the last year included in the \code{out} component of the \code{list} if \code{\var{end.yr}} is \code{NULL}. #' @param sd.disp a \code{logical} specifying whether error bars (stdev) should be displayed. Defaults to FALSE. #' @param x.tick.major an \code{integer} controlling the major x-axis tick labels. Defaults to 10 years. #' @param x.tick.minor an \code{integer} controlling the minor x-axis ticks. Defaults to 5 years. #' #' @details The function makes a plot showing mean Cropper values; pointer years are indicated with dark-gray bars. If event years were defined using \code{method.thresh "Neuwirth"} (\code{\link{pointer.norm}}), different tones of gray indicate weak, strong and extreme pointer years, based on the most common event year class. Error bars can be set. #' #' @return #' Bar plot. #' #' @author Marieke van der Maaten-Theunissen and Ernst van der Maaten. #' #' @examples ## Plot mean Cropper values and pointer years (method "Cropper") #' data(s033) #' py_c <- pointer.norm(s033, window = 5, method.thresh = "Cropper", #' series.thresh = 75) #' norm.plot(py_c, start.yr = 1950, end.yr = NULL, #' sd.disp = FALSE, x.tick.major = 10, x.tick.minor = 5) #' #' ## Plot mean Cropper values and pointer years (method "Neuwirth") #' data(s033) #' py_n <- pointer.norm(s033, window = 5, method.thresh = "Neuwirth", #' series.thresh = 75) #' norm.plot(py_n, start.yr = 1950, end.yr = NULL, #' sd.disp = FALSE, x.tick.major = 10, x.tick.minor = 5) #' #' @import ggplot2 #' @importFrom plyr round_any #' #' @export norm.plot #' norm.plot <- function(list.name, start.yr = NULL, end.yr = NULL, sd.disp = FALSE, x.tick.major = 10, x.tick.minor = 5) { stopifnot(is.list(list.name)) if(class(list.name)[1] != "pointer.norm") { stop("'list.name' is no list output of function pointer.norm") } if(is.data.frame(list.name$out) == FALSE) { stop("'list.name' is no list output of function pointer.norm") } if("Cvalues_mean" %in% colnames(list.name$out) == FALSE) { stop("'list.name' is no list output of function pointer.norm") } if(nrow(list.name$out) < 2){ stop("'list.name'$out contains < 2 years and is not displayed") } if(!is.null(start.yr) && start.yr < min(list.name$out[, "year"])) { stop("'start.yr' is out of bounds. By default (start.yr = NULL) the first year is displayed") } if(!is.null(end.yr) && end.yr > max(list.name$out[, "year"])) { stop("'end.yr' is out of bounds. By default (end.yr = NULL) the last year is displayed") } if(x.tick.minor > x.tick.major) { stop("'x.tick.minor' should be smaller then 'x.tick.major'") } start.yr2 <- ifelse(length(start.yr) != 0, start.yr, min(list.name$out[, "year"])) end.yr2 <- ifelse(length(end.yr) != 0, end.yr, max(list.name$out[, "year"])) start.yr3 <- round_any(start.yr2, 10, f = floor) end.yr3 <- round_any(end.yr2, 5, f = ceiling) data2 <- list.name$out[which(list.name$out[, "year"] == start.yr2):which(list.name$out[, "year"] == end.yr2),] data3 <- as.data.frame(data2) year <- nature <- Cvalues_mean <- Cvalues_sd <- int.class <- NULL limits <- aes(ymax = Cvalues_mean + Cvalues_sd, ymin = Cvalues_mean - Cvalues_sd) if(colnames(data3)[3] == "perc.pos") { nat.levels <- c(-1, 0, 1) fill.levels <- c("#636363", "#f0f0f0", "#636363") if(sd.disp) { pl <- ggplot(data3, aes(x = year, y = Cvalues_mean, fill = factor(nature))) pl + geom_bar(stat = "identity", position = "identity", colour = "black") + scale_fill_manual(limits = nat.levels, values = fill.levels) + guides(fill = FALSE) + scale_x_continuous(breaks = seq(start.yr3, end.yr3, x.tick.major), minor_breaks = seq(start.yr3, end.yr3, x.tick.minor), limits = c(start.yr3-1, end.yr3+1)) + ylab("mean Cropper value") + theme_bw() + geom_errorbar(limits, width=0.25, colour = "gray60") } else { pl <- ggplot(data3, aes(x = year, y = Cvalues_mean, fill = factor(nature))) pl + geom_bar(stat = "identity", position = "identity", colour = "black") + scale_fill_manual(limits = nat.levels, values = fill.levels) + guides(fill = FALSE) + scale_x_continuous(breaks = seq(start.yr3, end.yr3, x.tick.major), minor_breaks = seq(start.yr3, end.yr3, x.tick.minor), limits = c(start.yr3-1, end.yr3+1)) + ylab("mean Cropper value") + theme_bw() } } else { data3[,12] <- ifelse(data2[, "nature"] == (-1), max.col(data2[,c(1, 2, 5, 4, 3, 6:11)][, 6:8], ties.method = "first"), ifelse(data2[, "nature"] == 1, max.col(data2[,c(1, 2, 5, 4, 3, 6:11)][, 3:5], ties.method = "first"), 0)) data3[,12] <- ifelse(data2[, "nature"] == (-1), paste("-", data3[, 12], sep = ''), data3[, 12]) colnames(data3)[12] <- "int.class" int.levels <- c(-3, -2, -1, 0, 1, 2, 3) fill.levels <- c("black", "#636363","#bdbdbd", "#f0f0f0", "#bdbdbd", "#636363", "black") if(sd.disp) { pl <- ggplot(data3, aes(x = year, y = Cvalues_mean, fill = factor(int.class))) pl + geom_bar(stat = "identity", position = "identity", colour = "black") + scale_fill_manual(limits = int.levels, values = fill.levels) + guides(fill = FALSE) + scale_x_continuous(breaks = seq(start.yr3, end.yr3, x.tick.major), minor_breaks = seq(start.yr3, end.yr3, x.tick.minor), limits = c(start.yr3-1, end.yr3+1)) + ylab("mean Cropper value") + theme_bw() + geom_errorbar(limits, width=0.25, colour = "gray60") } else { pl <- ggplot(data3, aes(x = year, y = Cvalues_mean, fill = factor(int.class))) pl + geom_bar(stat = "identity", position = "identity", colour = "black") + scale_fill_manual(limits = int.levels, values = fill.levels) + guides(fill = FALSE) + scale_x_continuous(breaks = seq(start.yr3, end.yr3, x.tick.major), minor_breaks = seq(start.yr3, end.yr3, x.tick.minor), limits = c(start.yr3-1, end.yr3+1)) + ylab("mean Cropper value") + theme_bw() } } }
/pointRes/R/norm.plot.R
no_license
ingted/R-Examples
R
false
false
7,137
r
#' Plot mean Cropper values and pointer years #' #' @description The function creates a bar plot of mean Cropper values from a \code{list} of the type as produced by \code{\link{pointer.norm}} and highlights years identified as pointer years. #' #' @usage norm.plot(list.name, start.yr = NULL, end.yr = NULL, #' sd.disp = FALSE, x.tick.major = 10, x.tick.minor = 5) #' #' @param list.name a \code{list} as produced by \code{\link{pointer.norm}} #' @param start.yr an \code{integer} specifying the first year to be plotted. Defaults to the first year included in the \code{out} component of the \code{list} if \code{\var{start.yr}} is \code{NULL}. #' @param end.yr an \code{integer} specifying the last year to be plotted. Defaults to the last year included in the \code{out} component of the \code{list} if \code{\var{end.yr}} is \code{NULL}. #' @param sd.disp a \code{logical} specifying whether error bars (stdev) should be displayed. Defaults to FALSE. #' @param x.tick.major an \code{integer} controlling the major x-axis tick labels. Defaults to 10 years. #' @param x.tick.minor an \code{integer} controlling the minor x-axis ticks. Defaults to 5 years. #' #' @details The function makes a plot showing mean Cropper values; pointer years are indicated with dark-gray bars. If event years were defined using \code{method.thresh "Neuwirth"} (\code{\link{pointer.norm}}), different tones of gray indicate weak, strong and extreme pointer years, based on the most common event year class. Error bars can be set. #' #' @return #' Bar plot. #' #' @author Marieke van der Maaten-Theunissen and Ernst van der Maaten. #' #' @examples ## Plot mean Cropper values and pointer years (method "Cropper") #' data(s033) #' py_c <- pointer.norm(s033, window = 5, method.thresh = "Cropper", #' series.thresh = 75) #' norm.plot(py_c, start.yr = 1950, end.yr = NULL, #' sd.disp = FALSE, x.tick.major = 10, x.tick.minor = 5) #' #' ## Plot mean Cropper values and pointer years (method "Neuwirth") #' data(s033) #' py_n <- pointer.norm(s033, window = 5, method.thresh = "Neuwirth", #' series.thresh = 75) #' norm.plot(py_n, start.yr = 1950, end.yr = NULL, #' sd.disp = FALSE, x.tick.major = 10, x.tick.minor = 5) #' #' @import ggplot2 #' @importFrom plyr round_any #' #' @export norm.plot #' norm.plot <- function(list.name, start.yr = NULL, end.yr = NULL, sd.disp = FALSE, x.tick.major = 10, x.tick.minor = 5) { stopifnot(is.list(list.name)) if(class(list.name)[1] != "pointer.norm") { stop("'list.name' is no list output of function pointer.norm") } if(is.data.frame(list.name$out) == FALSE) { stop("'list.name' is no list output of function pointer.norm") } if("Cvalues_mean" %in% colnames(list.name$out) == FALSE) { stop("'list.name' is no list output of function pointer.norm") } if(nrow(list.name$out) < 2){ stop("'list.name'$out contains < 2 years and is not displayed") } if(!is.null(start.yr) && start.yr < min(list.name$out[, "year"])) { stop("'start.yr' is out of bounds. By default (start.yr = NULL) the first year is displayed") } if(!is.null(end.yr) && end.yr > max(list.name$out[, "year"])) { stop("'end.yr' is out of bounds. By default (end.yr = NULL) the last year is displayed") } if(x.tick.minor > x.tick.major) { stop("'x.tick.minor' should be smaller then 'x.tick.major'") } start.yr2 <- ifelse(length(start.yr) != 0, start.yr, min(list.name$out[, "year"])) end.yr2 <- ifelse(length(end.yr) != 0, end.yr, max(list.name$out[, "year"])) start.yr3 <- round_any(start.yr2, 10, f = floor) end.yr3 <- round_any(end.yr2, 5, f = ceiling) data2 <- list.name$out[which(list.name$out[, "year"] == start.yr2):which(list.name$out[, "year"] == end.yr2),] data3 <- as.data.frame(data2) year <- nature <- Cvalues_mean <- Cvalues_sd <- int.class <- NULL limits <- aes(ymax = Cvalues_mean + Cvalues_sd, ymin = Cvalues_mean - Cvalues_sd) if(colnames(data3)[3] == "perc.pos") { nat.levels <- c(-1, 0, 1) fill.levels <- c("#636363", "#f0f0f0", "#636363") if(sd.disp) { pl <- ggplot(data3, aes(x = year, y = Cvalues_mean, fill = factor(nature))) pl + geom_bar(stat = "identity", position = "identity", colour = "black") + scale_fill_manual(limits = nat.levels, values = fill.levels) + guides(fill = FALSE) + scale_x_continuous(breaks = seq(start.yr3, end.yr3, x.tick.major), minor_breaks = seq(start.yr3, end.yr3, x.tick.minor), limits = c(start.yr3-1, end.yr3+1)) + ylab("mean Cropper value") + theme_bw() + geom_errorbar(limits, width=0.25, colour = "gray60") } else { pl <- ggplot(data3, aes(x = year, y = Cvalues_mean, fill = factor(nature))) pl + geom_bar(stat = "identity", position = "identity", colour = "black") + scale_fill_manual(limits = nat.levels, values = fill.levels) + guides(fill = FALSE) + scale_x_continuous(breaks = seq(start.yr3, end.yr3, x.tick.major), minor_breaks = seq(start.yr3, end.yr3, x.tick.minor), limits = c(start.yr3-1, end.yr3+1)) + ylab("mean Cropper value") + theme_bw() } } else { data3[,12] <- ifelse(data2[, "nature"] == (-1), max.col(data2[,c(1, 2, 5, 4, 3, 6:11)][, 6:8], ties.method = "first"), ifelse(data2[, "nature"] == 1, max.col(data2[,c(1, 2, 5, 4, 3, 6:11)][, 3:5], ties.method = "first"), 0)) data3[,12] <- ifelse(data2[, "nature"] == (-1), paste("-", data3[, 12], sep = ''), data3[, 12]) colnames(data3)[12] <- "int.class" int.levels <- c(-3, -2, -1, 0, 1, 2, 3) fill.levels <- c("black", "#636363","#bdbdbd", "#f0f0f0", "#bdbdbd", "#636363", "black") if(sd.disp) { pl <- ggplot(data3, aes(x = year, y = Cvalues_mean, fill = factor(int.class))) pl + geom_bar(stat = "identity", position = "identity", colour = "black") + scale_fill_manual(limits = int.levels, values = fill.levels) + guides(fill = FALSE) + scale_x_continuous(breaks = seq(start.yr3, end.yr3, x.tick.major), minor_breaks = seq(start.yr3, end.yr3, x.tick.minor), limits = c(start.yr3-1, end.yr3+1)) + ylab("mean Cropper value") + theme_bw() + geom_errorbar(limits, width=0.25, colour = "gray60") } else { pl <- ggplot(data3, aes(x = year, y = Cvalues_mean, fill = factor(int.class))) pl + geom_bar(stat = "identity", position = "identity", colour = "black") + scale_fill_manual(limits = int.levels, values = fill.levels) + guides(fill = FALSE) + scale_x_continuous(breaks = seq(start.yr3, end.yr3, x.tick.major), minor_breaks = seq(start.yr3, end.yr3, x.tick.minor), limits = c(start.yr3-1, end.yr3+1)) + ylab("mean Cropper value") + theme_bw() } } }
#### load package require(ggplot2) require(GGally) require(reshape2) require(lme4) require(compiler) require(parallel) require(boot) require(caret) require(ROCR) require(xlsx) require(data.table) modeldata=read.csv('G:/Kang Zhou/Desktop/Rail defect model/result/random forests/data 01mile model.csv') dat1=modeldata[,c(1:7,9:10,12:14,16:18,21:23,25,27:28,31:32,34:41)] dat1=within(dat1,{ Prefix=factor(Prefix) Year=factor(Year) Track_Type=factor(Track_Type) }) maxs=apply(dat1[,c(8:30)],2,max) mins=apply(dat1[,c(8:30)],2,min) scaled=as.data.frame(scale(dat1[,c(8:30)],center = mins,scale = maxs-mins)) data.use=cbind(dat1[,c(1:7,31)],scaled) n=names(data.use)[c(9:14,16:31)] K.fold=caret::createFolds(data.use$Defect_Not,k=5,list = TRUE,returnTrain = T) train_1=data.use[K.fold[[1]],] test_1=data.use[-K.fold[[1]],] m.1=glmer(Defect_Not~Ballast_last+Ballast_last2+Car_Pass_last+Car_Pass_last2+Curve_Degree_D+ Curve_Tangent_H+Curve_Tangent_T+All_Defect_last+All_Defect_last2+Geo_Defects_combined +Grade_Percent+Grinding_last+Grinding_last2+Rail_Age+Rail_Quality_N+Rail_Size_lbs_yard +Speed_mph+Traf_Den_current_MGT+Traf_Den_last2_MGT+Traf_Den_last_MGT+Turnout+VTI_last +(1|Prefix), data = train_1, family = binomial(link = logit), control=glmerControl(optimizer="bobyqa",optCtrl = list(maxfun=5e5)),nAGQ = 1) for (i in 2:5){ int.train=data.use[K.fold[[i]],] code.tr=paste("train_",i,sep="") assign(code.tr,int.train) int.test=data.use[-K.fold[[i]],] code.te=paste("test_",i,sep="") assign(code.te,int.test) mm=glmer(Defect_Not~Ballast_last+Ballast_last2+Car_Pass_last+Car_Pass_last2+Curve_Degree_D+ Curve_Tangent_H+Curve_Tangent_T+All_Defect_last+All_Defect_last2+Geo_Defects_combined +Grade_Percent+Grinding_last+Grinding_last2+Rail_Age+Rail_Quality_N+Rail_Size_lbs_yard +Speed_mph+Traf_Den_current_MGT+Traf_Den_last2_MGT+Traf_Den_last_MGT+Turnout+VTI_last +(1|Prefix), data = int.train, family = binomial(link = logit), control=glmerControl(optimizer="bobyqa",optCtrl = list(maxfun=5e5)),nAGQ = 1) code.model=paste("mixed",i,sep = "") assign(code.model,mm) } fitted.prob1= predict(m.1,test_1,type='response') fitted.prob2= predict(mixed2,test_2,type='response') fitted.prob3= predict(mixed3,test_3,type='response') fitted.prob4= predict(mixed4,test_4,type='response') fitted.prob5= predict(mixed5,test_5,type='response') test_1['Fit.prob']=fitted.prob1 test_2['Fit.prob']=fitted.prob2 test_3['Fit.prob']=fitted.prob3 test_4['Fit.prob']=fitted.prob4 test_5['Fit.prob']=fitted.prob5 result=data.frame("Defect"=integer(),"Nondefect"=integer(), "Ratio"=numeric(),"AUC"=numeric(),"Gen.sens"=numeric(), "Gen.spec"=numeric(),"Gen.prec"=numeric(), "Gen.cutoff"=numeric(),"F1.sens"=numeric(),"F1.spec"=numeric(),"F1.prec"=numeric(), "F1.cutoff"=numeric(), "Geometry.sens"=numeric(),"Geometry.spec"=numeric(),"Geometry.prec"=numeric(),"Geometry.cutoff"=numeric() ) Test.whole=rbind(test_1,test_2,test_3,test_4,test_5) pred.basic=prediction(Test.whole['Fit.prob'],Test.whole['Defect_Not']) perf=performance(pred.basic,'tpr','fpr') auc=performance(pred.basic,measure = 'auc') opt.cut=function(perf,pred.basic){ cut.ind=mapply(FUN = function(x,y,p){ d=(x-0)^2+(y-1)^2 ind=which(d==min(d)) c(sensitivity=y[[ind]], specificity=1-x[[ind]],cutoff=p[[ind]],ind=ind) }, perf@x.values,perf@y.values,pred.basic@cutoffs) } prec=performance(pred.basic,'prec') gen.re=opt.cut(perf,pred.basic) result[1,1]=min(table(Test.whole$Defect_Not)) result[1,2]=max(table(Test.whole$Defect_Not)) result[1,3]=result[1,2]/result[1,1] result[1,4]=auc@y.values result[1,5]=gen.re[1,1] result[1,6]=gen.re[2,1] result[1,8]=gen.re[3,1] result[1,7]=unlist(prec@y.values)[gen.re[4,1]] sen=unlist(perf@y.values) spec=1-unlist(perf@x.values) geometry.index=which.max(sqrt(sen*spec)) precision=unlist(prec@y.values) geometry.cutoff=unlist(pred.basic@cutoffs) result[1,13]=sen[geometry.index] result[1,14]=spec[geometry.index] result[1,15]=precision[geometry.index] result[1,16]=geometry.cutoff[geometry.index] F1.perf=performance(pred.basic,'ppv','tpr') tpr=unlist(F1.perf@x.values) F1.precision=unlist(F1.perf@y.values) F1.ind=which.max(tpr*F1.precision/(tpr+F1.precision)) specificity=performance(pred.basic,'spec') F1.cutoff=unlist(pred.basic@cutoffs) result[1,9]=tpr[F1.ind] result[1,10]=unlist(specificity@y.values)[F1.ind] result[1,11]=F1.precision[F1.ind] result[1,12]=F1.cutoff[F1.ind] write.xlsx(result,file = 'Mixed effect for 01 mile data criteria.xlsx',sheetName = 'criteria') coef.1=coef(summary(m.1)) coef.2=coef(summary(mixed2)) coef.3=coef(summary(mixed3)) coef.4=coef(summary(mixed4)) coef.5=coef(summary(mixed5)) write.xlsx(coef.1,file = 'Mixed effect for 01 mile data criteria.xlsx',sheetName = 'fold 1 coef',append = T) write.xlsx(coef.2,file = 'Mixed effect for 01 mile data criteria.xlsx',sheetName = 'fold 2 coef',append = T) write.xlsx(coef.3,file = 'Mixed effect for 01 mile data criteria.xlsx',sheetName = 'fold 3 coef',append = T) write.xlsx(coef.4,file = 'Mixed effect for 01 mile data criteria.xlsx',sheetName = 'fold 4 coef',append = T) write.xlsx(coef.5,file = 'Mixed effect for 01 mile data criteria.xlsx',sheetName = 'fold 5 coef',append = T) Test.whole['Length']=0.1 test.tabledata=as.data.table(Test.whole) check.prefix=test.tabledata[,lapply(.(Defect_Not,Fit.prob,Length),function(x) sum(x,na.rm = T)),by='Prefix'] check.div=test.tabledata[,lapply(.(Defect_Not,Fit.prob,Length),function(x) sum(x,na.rm = T)),by='Division,Year'] check.prefix.y=test.tabledata[,lapply(.(Defect_Not,Fit.prob,Length),function(x) sum(x,na.rm = T)),by='Prefix,Year'] write.xlsx(check.prefix,file = 'Mixed effect for 01 mile data criteria.xlsx',sheetName = 'prefix validation',append = T) write.xlsx(check.div,file = 'Mixed effect for 01 mile data criteria.xlsx',sheetName = 'division validation',append = T) write.xlsx(check.prefix.y,file = 'Mixed effect for 01 mile data criteria.xlsx',sheetName = 'prefix-year validation',append = T)
/mixed effect for data 01 mile data.R
no_license
vincent-kangzhou/Defect-Detection
R
false
false
6,337
r
#### load package require(ggplot2) require(GGally) require(reshape2) require(lme4) require(compiler) require(parallel) require(boot) require(caret) require(ROCR) require(xlsx) require(data.table) modeldata=read.csv('G:/Kang Zhou/Desktop/Rail defect model/result/random forests/data 01mile model.csv') dat1=modeldata[,c(1:7,9:10,12:14,16:18,21:23,25,27:28,31:32,34:41)] dat1=within(dat1,{ Prefix=factor(Prefix) Year=factor(Year) Track_Type=factor(Track_Type) }) maxs=apply(dat1[,c(8:30)],2,max) mins=apply(dat1[,c(8:30)],2,min) scaled=as.data.frame(scale(dat1[,c(8:30)],center = mins,scale = maxs-mins)) data.use=cbind(dat1[,c(1:7,31)],scaled) n=names(data.use)[c(9:14,16:31)] K.fold=caret::createFolds(data.use$Defect_Not,k=5,list = TRUE,returnTrain = T) train_1=data.use[K.fold[[1]],] test_1=data.use[-K.fold[[1]],] m.1=glmer(Defect_Not~Ballast_last+Ballast_last2+Car_Pass_last+Car_Pass_last2+Curve_Degree_D+ Curve_Tangent_H+Curve_Tangent_T+All_Defect_last+All_Defect_last2+Geo_Defects_combined +Grade_Percent+Grinding_last+Grinding_last2+Rail_Age+Rail_Quality_N+Rail_Size_lbs_yard +Speed_mph+Traf_Den_current_MGT+Traf_Den_last2_MGT+Traf_Den_last_MGT+Turnout+VTI_last +(1|Prefix), data = train_1, family = binomial(link = logit), control=glmerControl(optimizer="bobyqa",optCtrl = list(maxfun=5e5)),nAGQ = 1) for (i in 2:5){ int.train=data.use[K.fold[[i]],] code.tr=paste("train_",i,sep="") assign(code.tr,int.train) int.test=data.use[-K.fold[[i]],] code.te=paste("test_",i,sep="") assign(code.te,int.test) mm=glmer(Defect_Not~Ballast_last+Ballast_last2+Car_Pass_last+Car_Pass_last2+Curve_Degree_D+ Curve_Tangent_H+Curve_Tangent_T+All_Defect_last+All_Defect_last2+Geo_Defects_combined +Grade_Percent+Grinding_last+Grinding_last2+Rail_Age+Rail_Quality_N+Rail_Size_lbs_yard +Speed_mph+Traf_Den_current_MGT+Traf_Den_last2_MGT+Traf_Den_last_MGT+Turnout+VTI_last +(1|Prefix), data = int.train, family = binomial(link = logit), control=glmerControl(optimizer="bobyqa",optCtrl = list(maxfun=5e5)),nAGQ = 1) code.model=paste("mixed",i,sep = "") assign(code.model,mm) } fitted.prob1= predict(m.1,test_1,type='response') fitted.prob2= predict(mixed2,test_2,type='response') fitted.prob3= predict(mixed3,test_3,type='response') fitted.prob4= predict(mixed4,test_4,type='response') fitted.prob5= predict(mixed5,test_5,type='response') test_1['Fit.prob']=fitted.prob1 test_2['Fit.prob']=fitted.prob2 test_3['Fit.prob']=fitted.prob3 test_4['Fit.prob']=fitted.prob4 test_5['Fit.prob']=fitted.prob5 result=data.frame("Defect"=integer(),"Nondefect"=integer(), "Ratio"=numeric(),"AUC"=numeric(),"Gen.sens"=numeric(), "Gen.spec"=numeric(),"Gen.prec"=numeric(), "Gen.cutoff"=numeric(),"F1.sens"=numeric(),"F1.spec"=numeric(),"F1.prec"=numeric(), "F1.cutoff"=numeric(), "Geometry.sens"=numeric(),"Geometry.spec"=numeric(),"Geometry.prec"=numeric(),"Geometry.cutoff"=numeric() ) Test.whole=rbind(test_1,test_2,test_3,test_4,test_5) pred.basic=prediction(Test.whole['Fit.prob'],Test.whole['Defect_Not']) perf=performance(pred.basic,'tpr','fpr') auc=performance(pred.basic,measure = 'auc') opt.cut=function(perf,pred.basic){ cut.ind=mapply(FUN = function(x,y,p){ d=(x-0)^2+(y-1)^2 ind=which(d==min(d)) c(sensitivity=y[[ind]], specificity=1-x[[ind]],cutoff=p[[ind]],ind=ind) }, perf@x.values,perf@y.values,pred.basic@cutoffs) } prec=performance(pred.basic,'prec') gen.re=opt.cut(perf,pred.basic) result[1,1]=min(table(Test.whole$Defect_Not)) result[1,2]=max(table(Test.whole$Defect_Not)) result[1,3]=result[1,2]/result[1,1] result[1,4]=auc@y.values result[1,5]=gen.re[1,1] result[1,6]=gen.re[2,1] result[1,8]=gen.re[3,1] result[1,7]=unlist(prec@y.values)[gen.re[4,1]] sen=unlist(perf@y.values) spec=1-unlist(perf@x.values) geometry.index=which.max(sqrt(sen*spec)) precision=unlist(prec@y.values) geometry.cutoff=unlist(pred.basic@cutoffs) result[1,13]=sen[geometry.index] result[1,14]=spec[geometry.index] result[1,15]=precision[geometry.index] result[1,16]=geometry.cutoff[geometry.index] F1.perf=performance(pred.basic,'ppv','tpr') tpr=unlist(F1.perf@x.values) F1.precision=unlist(F1.perf@y.values) F1.ind=which.max(tpr*F1.precision/(tpr+F1.precision)) specificity=performance(pred.basic,'spec') F1.cutoff=unlist(pred.basic@cutoffs) result[1,9]=tpr[F1.ind] result[1,10]=unlist(specificity@y.values)[F1.ind] result[1,11]=F1.precision[F1.ind] result[1,12]=F1.cutoff[F1.ind] write.xlsx(result,file = 'Mixed effect for 01 mile data criteria.xlsx',sheetName = 'criteria') coef.1=coef(summary(m.1)) coef.2=coef(summary(mixed2)) coef.3=coef(summary(mixed3)) coef.4=coef(summary(mixed4)) coef.5=coef(summary(mixed5)) write.xlsx(coef.1,file = 'Mixed effect for 01 mile data criteria.xlsx',sheetName = 'fold 1 coef',append = T) write.xlsx(coef.2,file = 'Mixed effect for 01 mile data criteria.xlsx',sheetName = 'fold 2 coef',append = T) write.xlsx(coef.3,file = 'Mixed effect for 01 mile data criteria.xlsx',sheetName = 'fold 3 coef',append = T) write.xlsx(coef.4,file = 'Mixed effect for 01 mile data criteria.xlsx',sheetName = 'fold 4 coef',append = T) write.xlsx(coef.5,file = 'Mixed effect for 01 mile data criteria.xlsx',sheetName = 'fold 5 coef',append = T) Test.whole['Length']=0.1 test.tabledata=as.data.table(Test.whole) check.prefix=test.tabledata[,lapply(.(Defect_Not,Fit.prob,Length),function(x) sum(x,na.rm = T)),by='Prefix'] check.div=test.tabledata[,lapply(.(Defect_Not,Fit.prob,Length),function(x) sum(x,na.rm = T)),by='Division,Year'] check.prefix.y=test.tabledata[,lapply(.(Defect_Not,Fit.prob,Length),function(x) sum(x,na.rm = T)),by='Prefix,Year'] write.xlsx(check.prefix,file = 'Mixed effect for 01 mile data criteria.xlsx',sheetName = 'prefix validation',append = T) write.xlsx(check.div,file = 'Mixed effect for 01 mile data criteria.xlsx',sheetName = 'division validation',append = T) write.xlsx(check.prefix.y,file = 'Mixed effect for 01 mile data criteria.xlsx',sheetName = 'prefix-year validation',append = T)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/load-value.R \name{load_value} \alias{load_value} \title{Load table} \usage{ load_value(rowname, colname = "estimate", folders = get_folders(), name = "estimates", type = "results", digits = 6) } \arguments{ \item{rowname}{string of name of row containing value} \item{colname}{string of name of column containing value} \item{folders}{string of folders containing table - separate folders with '/'} \item{name}{the name of the table (csv extension is not required)} \item{type}{the table type i.e. results (default) or analysis etc.} \item{digits}{count of number of significant digits} } \value{ a data.frame of the table } \description{ Reads in csv file from current table folder. }
/man/load_value.Rd
no_license
poissonconsulting/poiscon
R
false
true
772
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/load-value.R \name{load_value} \alias{load_value} \title{Load table} \usage{ load_value(rowname, colname = "estimate", folders = get_folders(), name = "estimates", type = "results", digits = 6) } \arguments{ \item{rowname}{string of name of row containing value} \item{colname}{string of name of column containing value} \item{folders}{string of folders containing table - separate folders with '/'} \item{name}{the name of the table (csv extension is not required)} \item{type}{the table type i.e. results (default) or analysis etc.} \item{digits}{count of number of significant digits} } \value{ a data.frame of the table } \description{ Reads in csv file from current table folder. }
# Intrinio API # # Welcome to the Intrinio API! Through our Financial Data Marketplace, we offer a wide selection of financial data feed APIs sourced by our own proprietary processes as well as from many data vendors. For a complete API request / response reference please view the [Intrinio API documentation](https://docs.intrinio.com/documentation/api_v2). If you need additional help in using the API, please visit the [Intrinio website](https://intrinio.com) and click on the chat icon in the lower right corner. # # OpenAPI spec version: 2.45.0 # # Generated by: https://github.com/swagger-api/swagger-codegen.git #' VolumePriceTrendTechnicalValue Class #' #' @field date_time #' @field vpt #' #' @importFrom R6 R6Class #' @importFrom jsonlite fromJSON toJSON #' @export VolumePriceTrendTechnicalValue <- R6::R6Class( 'VolumePriceTrendTechnicalValue', public = list( `date_time` = NA, `vpt` = NA, initialize = function(`date_time`, `vpt`){ if (!missing(`date_time`)) { self$`date_time` <- `date_time` } if (!missing(`vpt`)) { self$`vpt` <- `vpt` } }, toJSON = function() { VolumePriceTrendTechnicalValueObject <- list() if (!is.null(self$`date_time`)) { # If the object is an empty list or a list of R6 Objects if (is.list(self$`date_time`) && ((length(self$`date_time`) == 0) || ((length(self$`date_time`) != 0 && R6::is.R6(self$`date_time`[[1]]))))) { VolumePriceTrendTechnicalValueObject[['date_time']] <- lapply(self$`date_time`, function(x) x$toJSON()) } else { VolumePriceTrendTechnicalValueObject[['date_time']] <- jsonlite::toJSON(self$`date_time`, auto_unbox = TRUE) } } if (!is.null(self$`vpt`)) { # If the object is an empty list or a list of R6 Objects if (is.list(self$`vpt`) && ((length(self$`vpt`) == 0) || ((length(self$`vpt`) != 0 && R6::is.R6(self$`vpt`[[1]]))))) { VolumePriceTrendTechnicalValueObject[['vpt']] <- lapply(self$`vpt`, function(x) x$toJSON()) } else { VolumePriceTrendTechnicalValueObject[['vpt']] <- jsonlite::toJSON(self$`vpt`, auto_unbox = TRUE) } } VolumePriceTrendTechnicalValueObject }, fromJSON = function(VolumePriceTrendTechnicalValueJson) { VolumePriceTrendTechnicalValueObject <- jsonlite::fromJSON(VolumePriceTrendTechnicalValueJson) if (!is.null(VolumePriceTrendTechnicalValueObject$`date_time`)) { self$`date_time` <- VolumePriceTrendTechnicalValueObject$`date_time` } if (!is.null(VolumePriceTrendTechnicalValueObject$`vpt`)) { self$`vpt` <- VolumePriceTrendTechnicalValueObject$`vpt` } }, toJSONString = function() { jsonlite::toJSON(self$toJSON(), auto_unbox = TRUE, pretty = TRUE) }, fromJSONString = function(VolumePriceTrendTechnicalValueJson) { VolumePriceTrendTechnicalValueObject <- jsonlite::fromJSON(VolumePriceTrendTechnicalValueJson, simplifyDataFrame = FALSE) self$setFromList(VolumePriceTrendTechnicalValueObject) }, setFromList = function(listObject) { if (!is.null(listObject$`date_time`)) { self$`date_time` <- as.POSIXct(listObject$`date_time`, tz = "GMT", "%Y-%m-%dT%H:%M:%OS") } else { self$`date_time` <- NA } if (!is.null(listObject$`vpt`)) { self$`vpt` <- listObject$`vpt` } else { self$`vpt` <- NA } }, getAsList = function() { listObject = list() listObject[["date_time"]] <- self$`date_time` listObject[["vpt"]] <- self$`vpt` return(listObject) } ) )
/R/VolumePriceTrendTechnicalValue.r
no_license
intrinio/r-sdk
R
false
false
3,679
r
# Intrinio API # # Welcome to the Intrinio API! Through our Financial Data Marketplace, we offer a wide selection of financial data feed APIs sourced by our own proprietary processes as well as from many data vendors. For a complete API request / response reference please view the [Intrinio API documentation](https://docs.intrinio.com/documentation/api_v2). If you need additional help in using the API, please visit the [Intrinio website](https://intrinio.com) and click on the chat icon in the lower right corner. # # OpenAPI spec version: 2.45.0 # # Generated by: https://github.com/swagger-api/swagger-codegen.git #' VolumePriceTrendTechnicalValue Class #' #' @field date_time #' @field vpt #' #' @importFrom R6 R6Class #' @importFrom jsonlite fromJSON toJSON #' @export VolumePriceTrendTechnicalValue <- R6::R6Class( 'VolumePriceTrendTechnicalValue', public = list( `date_time` = NA, `vpt` = NA, initialize = function(`date_time`, `vpt`){ if (!missing(`date_time`)) { self$`date_time` <- `date_time` } if (!missing(`vpt`)) { self$`vpt` <- `vpt` } }, toJSON = function() { VolumePriceTrendTechnicalValueObject <- list() if (!is.null(self$`date_time`)) { # If the object is an empty list or a list of R6 Objects if (is.list(self$`date_time`) && ((length(self$`date_time`) == 0) || ((length(self$`date_time`) != 0 && R6::is.R6(self$`date_time`[[1]]))))) { VolumePriceTrendTechnicalValueObject[['date_time']] <- lapply(self$`date_time`, function(x) x$toJSON()) } else { VolumePriceTrendTechnicalValueObject[['date_time']] <- jsonlite::toJSON(self$`date_time`, auto_unbox = TRUE) } } if (!is.null(self$`vpt`)) { # If the object is an empty list or a list of R6 Objects if (is.list(self$`vpt`) && ((length(self$`vpt`) == 0) || ((length(self$`vpt`) != 0 && R6::is.R6(self$`vpt`[[1]]))))) { VolumePriceTrendTechnicalValueObject[['vpt']] <- lapply(self$`vpt`, function(x) x$toJSON()) } else { VolumePriceTrendTechnicalValueObject[['vpt']] <- jsonlite::toJSON(self$`vpt`, auto_unbox = TRUE) } } VolumePriceTrendTechnicalValueObject }, fromJSON = function(VolumePriceTrendTechnicalValueJson) { VolumePriceTrendTechnicalValueObject <- jsonlite::fromJSON(VolumePriceTrendTechnicalValueJson) if (!is.null(VolumePriceTrendTechnicalValueObject$`date_time`)) { self$`date_time` <- VolumePriceTrendTechnicalValueObject$`date_time` } if (!is.null(VolumePriceTrendTechnicalValueObject$`vpt`)) { self$`vpt` <- VolumePriceTrendTechnicalValueObject$`vpt` } }, toJSONString = function() { jsonlite::toJSON(self$toJSON(), auto_unbox = TRUE, pretty = TRUE) }, fromJSONString = function(VolumePriceTrendTechnicalValueJson) { VolumePriceTrendTechnicalValueObject <- jsonlite::fromJSON(VolumePriceTrendTechnicalValueJson, simplifyDataFrame = FALSE) self$setFromList(VolumePriceTrendTechnicalValueObject) }, setFromList = function(listObject) { if (!is.null(listObject$`date_time`)) { self$`date_time` <- as.POSIXct(listObject$`date_time`, tz = "GMT", "%Y-%m-%dT%H:%M:%OS") } else { self$`date_time` <- NA } if (!is.null(listObject$`vpt`)) { self$`vpt` <- listObject$`vpt` } else { self$`vpt` <- NA } }, getAsList = function() { listObject = list() listObject[["date_time"]] <- self$`date_time` listObject[["vpt"]] <- self$`vpt` return(listObject) } ) )
#' Relocate a column to a new position #' #' @description #' Move a column or columns to a new position #' #' @param .df A data.frame or data.table #' @param ... A selection of columns to move. `tidyselect` compatible. #' @param .before Column to move selection before #' @param .after Column to move selection after #' #' @export #' #' @examples #' df <- data.table( #' a = 1:3, #' b = 1:3, #' c = c("a", "a", "b"), #' d = c("a", "a", "b") #' ) #' #' df %>% #' relocate(c, .before = b) #' #' df %>% #' relocate(a, b, .after = c) #' #' df %>% #' relocate(where(is.numeric), .after = c) relocate <- function(.df, ..., .before = NULL, .after = NULL) { .df <- .df_as_tidytable(.df) order <- eval_relocate( expr(c(...)), .df, before = enquo(.before), after = enquo(.after), before_arg = ".before", after_arg = ".after" ) names <- names(order) out <- df_col_order(.df, order) df_set_names(out, names) } #' @export #' @keywords internal #' @inherit relocate relocate. <- function(.df, ..., .before = NULL, .after = NULL) { deprecate_dot_fun() relocate(.df, ..., .before = {{ .before }}, .after = {{ .after }}) }
/R/relocate.R
no_license
cran/tidytable
R
false
false
1,167
r
#' Relocate a column to a new position #' #' @description #' Move a column or columns to a new position #' #' @param .df A data.frame or data.table #' @param ... A selection of columns to move. `tidyselect` compatible. #' @param .before Column to move selection before #' @param .after Column to move selection after #' #' @export #' #' @examples #' df <- data.table( #' a = 1:3, #' b = 1:3, #' c = c("a", "a", "b"), #' d = c("a", "a", "b") #' ) #' #' df %>% #' relocate(c, .before = b) #' #' df %>% #' relocate(a, b, .after = c) #' #' df %>% #' relocate(where(is.numeric), .after = c) relocate <- function(.df, ..., .before = NULL, .after = NULL) { .df <- .df_as_tidytable(.df) order <- eval_relocate( expr(c(...)), .df, before = enquo(.before), after = enquo(.after), before_arg = ".before", after_arg = ".after" ) names <- names(order) out <- df_col_order(.df, order) df_set_names(out, names) } #' @export #' @keywords internal #' @inherit relocate relocate. <- function(.df, ..., .before = NULL, .after = NULL) { deprecate_dot_fun() relocate(.df, ..., .before = {{ .before }}, .after = {{ .after }}) }
#! /jgi/tools/bin/Rscript instructions <- " Creates a table with motif locations Parameters: reference_file: FASTA reference file motif_file: motifs.csv output file from MotifMaker outfile: name of output file Creates a table that looks like this: strand start motif onTarget seqid + 1626 GATC On 1 + 1774 GATC On 1 + 1924 GATC On 1 + 2094 GATC On 1 + 2117 GATC On 1 " makeAnnotationTable <- function(reference_file,motif_file,outfile) { mt = read.csv(motif_file) motifs <- as.character(mt$motifString) # centerPos in motifs.csv is zero-indexed, genomeAnnotation function expects 1-indexed positions <- mt$centerPos + 1 dna_seq <- read.DNAStringSet(reference_file) genomeAnnotations <- genomeAnnotation(dna_seq,motifs,positions) table(genomeAnnotations$motif) write.table(genomeAnnotations,file=outfile,sep="\t",row.names=FALSE,quote=FALSE) } args <- commandArgs(TRUE) if(length(args) == 3) { source('scripts.R') reference_file <- args[1] motif_file <- args[2] outfile <- args[3] makeAnnotationTable(reference_file,motif_file,outfile) q(save="no",status=0) } write("USAGE: Rscript annotate_motifs.R <reference_file> <motif_file> <outfile>",stdout()) write(instructions,stdout())
/annotate_motifs.R
no_license
mlbendall/motif-finding
R
false
false
1,358
r
#! /jgi/tools/bin/Rscript instructions <- " Creates a table with motif locations Parameters: reference_file: FASTA reference file motif_file: motifs.csv output file from MotifMaker outfile: name of output file Creates a table that looks like this: strand start motif onTarget seqid + 1626 GATC On 1 + 1774 GATC On 1 + 1924 GATC On 1 + 2094 GATC On 1 + 2117 GATC On 1 " makeAnnotationTable <- function(reference_file,motif_file,outfile) { mt = read.csv(motif_file) motifs <- as.character(mt$motifString) # centerPos in motifs.csv is zero-indexed, genomeAnnotation function expects 1-indexed positions <- mt$centerPos + 1 dna_seq <- read.DNAStringSet(reference_file) genomeAnnotations <- genomeAnnotation(dna_seq,motifs,positions) table(genomeAnnotations$motif) write.table(genomeAnnotations,file=outfile,sep="\t",row.names=FALSE,quote=FALSE) } args <- commandArgs(TRUE) if(length(args) == 3) { source('scripts.R') reference_file <- args[1] motif_file <- args[2] outfile <- args[3] makeAnnotationTable(reference_file,motif_file,outfile) q(save="no",status=0) } write("USAGE: Rscript annotate_motifs.R <reference_file> <motif_file> <outfile>",stdout()) write(instructions,stdout())
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/bijectors.R \name{tfb_gumbel} \alias{tfb_gumbel} \title{Compute \code{Y = g(X) = exp(-exp(-(X - loc) / scale))}.} \usage{ tfb_gumbel(loc = 0, scale = 1, validate_args = FALSE, name = "gumbel") } \arguments{ \item{loc}{Float-like Tensor that is the same dtype and is broadcastable with scale. This is loc in \code{Y = g(X) = exp(-exp(-(X - loc) / scale))}.} \item{scale}{Positive Float-like Tensor that is the same dtype and is broadcastable with loc. This is scale in \code{Y = g(X) = exp(-exp(-(X - loc) / scale))}.} \item{validate_args}{Logical, default FALSE. Whether to validate input with asserts. If validate_args is FALSE, and the inputs are invalid, correct behavior is not guaranteed.} \item{name}{name prefixed to Ops created by this class.} } \description{ This bijector maps inputs from \code{[-inf, inf]} to \code{[0, 1]}. The inverse of the bijector applied to a uniform random variable \code{X ~ U(0, 1)} gives back a random variable with the \href{https://en.wikipedia.org/wiki/Gumbel_distribution}{Gumbel distribution}: } \details{ \code{Y ~ Gumbel(loc, scale)} \code{pdf(y; loc, scale) = exp(-( (y - loc) / scale + exp(- (y - loc) / scale) ) ) / scale} } \seealso{ Other bijectors: \code{\link{masked_autoregressive_default_template}}, \code{\link{masked_dense}}, \code{\link{real_nvp_default_template}}, \code{\link{tfb_absolute_value}}, \code{\link{tfb_affine_linear_operator}}, \code{\link{tfb_affine_scalar}}, \code{\link{tfb_affine}}, \code{\link{tfb_batch_normalization}}, \code{\link{tfb_blockwise}}, \code{\link{tfb_chain}}, \code{\link{tfb_cholesky_outer_product}}, \code{\link{tfb_cholesky_to_inv_cholesky}}, \code{\link{tfb_discrete_cosine_transform}}, \code{\link{tfb_expm1}}, \code{\link{tfb_exp}}, \code{\link{tfb_fill_triangular}}, \code{\link{tfb_identity}}, \code{\link{tfb_inline}}, \code{\link{tfb_invert}}, \code{\link{tfb_kumaraswamy}}, \code{\link{tfb_masked_autoregressive_flow}}, \code{\link{tfb_matrix_inverse_tri_l}}, \code{\link{tfb_matvec_lu}}, \code{\link{tfb_normal_cdf}}, \code{\link{tfb_ordered}}, \code{\link{tfb_permute}}, \code{\link{tfb_power_transform}}, \code{\link{tfb_real_nvp}}, \code{\link{tfb_reciprocal}}, \code{\link{tfb_reshape}}, \code{\link{tfb_scale_tri_l}}, \code{\link{tfb_sigmoid}}, \code{\link{tfb_sinh_arcsinh}}, \code{\link{tfb_softmax_centered}}, \code{\link{tfb_softplus}}, \code{\link{tfb_softsign}}, \code{\link{tfb_square}}, \code{\link{tfb_tanh}}, \code{\link{tfb_transform_diagonal}}, \code{\link{tfb_transpose}}, \code{\link{tfb_weibull}} } \concept{bijectors}
/man/tfb_gumbel.Rd
permissive
kevinykuo/tfprobability-1
R
false
true
2,685
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/bijectors.R \name{tfb_gumbel} \alias{tfb_gumbel} \title{Compute \code{Y = g(X) = exp(-exp(-(X - loc) / scale))}.} \usage{ tfb_gumbel(loc = 0, scale = 1, validate_args = FALSE, name = "gumbel") } \arguments{ \item{loc}{Float-like Tensor that is the same dtype and is broadcastable with scale. This is loc in \code{Y = g(X) = exp(-exp(-(X - loc) / scale))}.} \item{scale}{Positive Float-like Tensor that is the same dtype and is broadcastable with loc. This is scale in \code{Y = g(X) = exp(-exp(-(X - loc) / scale))}.} \item{validate_args}{Logical, default FALSE. Whether to validate input with asserts. If validate_args is FALSE, and the inputs are invalid, correct behavior is not guaranteed.} \item{name}{name prefixed to Ops created by this class.} } \description{ This bijector maps inputs from \code{[-inf, inf]} to \code{[0, 1]}. The inverse of the bijector applied to a uniform random variable \code{X ~ U(0, 1)} gives back a random variable with the \href{https://en.wikipedia.org/wiki/Gumbel_distribution}{Gumbel distribution}: } \details{ \code{Y ~ Gumbel(loc, scale)} \code{pdf(y; loc, scale) = exp(-( (y - loc) / scale + exp(- (y - loc) / scale) ) ) / scale} } \seealso{ Other bijectors: \code{\link{masked_autoregressive_default_template}}, \code{\link{masked_dense}}, \code{\link{real_nvp_default_template}}, \code{\link{tfb_absolute_value}}, \code{\link{tfb_affine_linear_operator}}, \code{\link{tfb_affine_scalar}}, \code{\link{tfb_affine}}, \code{\link{tfb_batch_normalization}}, \code{\link{tfb_blockwise}}, \code{\link{tfb_chain}}, \code{\link{tfb_cholesky_outer_product}}, \code{\link{tfb_cholesky_to_inv_cholesky}}, \code{\link{tfb_discrete_cosine_transform}}, \code{\link{tfb_expm1}}, \code{\link{tfb_exp}}, \code{\link{tfb_fill_triangular}}, \code{\link{tfb_identity}}, \code{\link{tfb_inline}}, \code{\link{tfb_invert}}, \code{\link{tfb_kumaraswamy}}, \code{\link{tfb_masked_autoregressive_flow}}, \code{\link{tfb_matrix_inverse_tri_l}}, \code{\link{tfb_matvec_lu}}, \code{\link{tfb_normal_cdf}}, \code{\link{tfb_ordered}}, \code{\link{tfb_permute}}, \code{\link{tfb_power_transform}}, \code{\link{tfb_real_nvp}}, \code{\link{tfb_reciprocal}}, \code{\link{tfb_reshape}}, \code{\link{tfb_scale_tri_l}}, \code{\link{tfb_sigmoid}}, \code{\link{tfb_sinh_arcsinh}}, \code{\link{tfb_softmax_centered}}, \code{\link{tfb_softplus}}, \code{\link{tfb_softsign}}, \code{\link{tfb_square}}, \code{\link{tfb_tanh}}, \code{\link{tfb_transform_diagonal}}, \code{\link{tfb_transpose}}, \code{\link{tfb_weibull}} } \concept{bijectors}
library(gMCP) ### Name: bonferroni.test ### Title: Weighted Bonferroni-test ### Aliases: bonferroni.test ### ** Examples bonferroni.test(pvalues=c(0.1,0.2,0.05), weights=c(0.5,0.5,0)) bonferroni.test(pvalues=c(0.1,0.2,0.05), weights=c(0.5,0.5,0), adjPValues=FALSE)
/data/genthat_extracted_code/gMCP/examples/bonferroni.test.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
274
r
library(gMCP) ### Name: bonferroni.test ### Title: Weighted Bonferroni-test ### Aliases: bonferroni.test ### ** Examples bonferroni.test(pvalues=c(0.1,0.2,0.05), weights=c(0.5,0.5,0)) bonferroni.test(pvalues=c(0.1,0.2,0.05), weights=c(0.5,0.5,0), adjPValues=FALSE)
require(shiny) require(ggplot2) require(MASS) ui <- fluidPage( titlePanel("Boston"), sidebarLayout( sidebarPanel( h4("Feature Fit on 'medv'"), checkboxGroupInput(inputId = "features", "'lstat' fitted by default", choices = c("crim", "zn", "indus", "chas", "nox", "rm", "age", "dis", "rad", "tax", "ptratio", "black"), selected = c("crim", "zn", "indus", "chas", "nox", "rm", "age", "dis", "rad", "tax", "ptratio", "black")) ), mainPanel( plotOutput("plot"), verbatimTextOutput("summary") ) ) ) server <- function(input, output){ output$plot <- renderPlot({ default = "medv~lstat" def_append = "" for(i in input$features){def_append=(paste(def_append, "+", i, sep = ""))} formula = (paste(default,def_append,sep="")) fit=lm(formula,Boston) p <- ggplot(fit, aes(.fitted, .resid)) p + geom_point() + stat_smooth(method="loess") + geom_hline(yintercept=0, col="red", linetype="dashed") + xlab("Fitted values")+ylab("Residuals") + ggtitle("Residual vs Fitted Plot") }) output$summary <- renderPrint({ default = "medv~lstat" def_append = "" for(i in input$features){def_append=(paste(def_append, "+", i, sep = ""))} formula = (paste(default,def_append,sep="")) ols <- lm(formula, Boston) print(summary(ols)) }) } shinyApp(ui = ui, server = server)
/ch03_Linear_Regression/shiny/boston_resid/App.R
no_license
GucciTheCarpenter/ISLR_labs
R
false
false
2,581
r
require(shiny) require(ggplot2) require(MASS) ui <- fluidPage( titlePanel("Boston"), sidebarLayout( sidebarPanel( h4("Feature Fit on 'medv'"), checkboxGroupInput(inputId = "features", "'lstat' fitted by default", choices = c("crim", "zn", "indus", "chas", "nox", "rm", "age", "dis", "rad", "tax", "ptratio", "black"), selected = c("crim", "zn", "indus", "chas", "nox", "rm", "age", "dis", "rad", "tax", "ptratio", "black")) ), mainPanel( plotOutput("plot"), verbatimTextOutput("summary") ) ) ) server <- function(input, output){ output$plot <- renderPlot({ default = "medv~lstat" def_append = "" for(i in input$features){def_append=(paste(def_append, "+", i, sep = ""))} formula = (paste(default,def_append,sep="")) fit=lm(formula,Boston) p <- ggplot(fit, aes(.fitted, .resid)) p + geom_point() + stat_smooth(method="loess") + geom_hline(yintercept=0, col="red", linetype="dashed") + xlab("Fitted values")+ylab("Residuals") + ggtitle("Residual vs Fitted Plot") }) output$summary <- renderPrint({ default = "medv~lstat" def_append = "" for(i in input$features){def_append=(paste(def_append, "+", i, sep = ""))} formula = (paste(default,def_append,sep="")) ols <- lm(formula, Boston) print(summary(ols)) }) } shinyApp(ui = ui, server = server)
library(cmaes) library(quantmod) library(zoo) library(lubridate) library(dplyr) library(parallel) library(doParallel) #library(progress) #library(ggplot2) library(nloptr) #library(optimx) #library(latticeExtra) library(tseries) #get data filename <- "SP500.csv" folder <- "./data/SP500_WINDOWS/" filepath <- paste("./data/", filename, sep="") filesname <- substr(filepath, nchar("./data/")+1, nchar(filepath)-4) ticker <- read.csv(filepath) ticker$Date <- as.Date(ticker$Date, format = "%Y-%m-%d") #plot(ticker$Date,ticker$Adj.Close ,type="l") ticker <- ticker[,c(1,6)] ticker$t <- decimal_date(ticker$Date) names(ticker) <- c("Date", "Close", "t") ticker$Close <- na_if(ticker$Close,"null") ticker <- na.omit(ticker) ticker$Close <- as.numeric(ticker$Close) ticker <- ticker[15000:23395,] lppl_est <- function(data, tc, m, w, a, b, c1, c2){ dif_time = abs(tc-data$t) est = a + dif_time^m*(b + ((c1 * cos(w * log(dif_time))) + (c2 * sin(w * log(dif_time))))) return(est) } matrix_eq <- function(data, tc, m, w){ ti <- abs(tc - data$t) fi <- ti ** m #B gi <- ti ** m * cos(w * log(ti)) #C1 hi <- ti ** m * sin(w * log(ti)) #C2 yi <- log(data$Close) MAT <- matrix(c(length(ti),sum(fi),sum(gi),sum(hi), sum(fi),sum(fi**2),sum(fi*gi),sum(fi*hi), sum(gi),sum(fi*gi),sum(gi**2),sum(gi*hi), sum(hi),sum(fi*hi),sum(gi*hi),sum(hi**2)),ncol=4,nrow=4) YY <- matrix(c(sum(yi),sum(yi*fi),sum(yi*gi),sum(yi*hi)),ncol=1) coef <- solve(MAT,YY) #reg <- coef(lm(logP ~ Xm + Xm.cos + Xm.sin, data=data)) return(coef) } funz_obj <- function(x,data){ tc = x[1] m = x[2] w = x[3] lin_par <- matrix_eq(data,tc,m,w) #c = (lin_par[3] ** 2 + lin_par[4] ** 2) ** 0.5 # Residual sum of squares delta <- log(data$Close)-lppl_est(data,tc, m, w, lin_par[1], lin_par[2], lin_par[3], lin_par[4]) RSS <- sum(delta^2) return(RSS) } fitter <- function(data,plot=FALSE){ ticker <- data last_row <- tail(ticker, 1) first_row <- head(ticker, 1) dt <- last_row$t -first_row$t start_search <- c(runif(1,max(ticker$t)-0.2*dt,max(ticker$t)+0.2*dt), runif(1,0.01,1.99), runif(1,1,50)) upper <- c(max(ticker$t)+0.2*dt,2,50) lower <- c(max(ticker$t)-0.2*dt,0.01,1) # # if(type=="L-BFGS-B"){ # # test <- optim(start_search,funz_obj,lower=lower,upper=upper,method="L-BFGS-B",data=ticker) # # } # # if(type=="CMAES"){ # # nbre_generation <- 100 # # vec_control <- data.frame(maxit = c(nbre_generation)) # # test <- cmaes::cma_es(start_search, funz_obj, ticker, # lower=c(max(ticker$t)-0.2*dt, 0.01, 1), upper=c(max(ticker$t)+0.2*dt, 1, 50), control=vec_control) # # } # # # if(type=="contr"){ # # test <- crs2lm(start_search, funz_obj, lower=lower, upper=upper, data=ticker) # # # } # # if(type=="isres"){#MIGLIORE secondo # # test <- isres(start_search,funz_obj,lower=lower,upper=upper,data=ticker) # # } # #if(type=="mlsl"){#MIGLIORE IN ASSOLUTO test <- mlsl(start_search,funz_obj,lower=lower,upper=upper,local.method = "LBFGS",data=ticker) #} # # if(type=="nelder"){ # # test <- neldermead(start_search,funz_obj,lower=lower,upper=upper, data=ticker) # # } # linear_param <- matrix_eq(ticker,test$par[1], test$par[2], test$par[3]) fitted <- lppl_est(ticker,test$par[1], test$par[2], test$par[3], linear_param[1],linear_param[2],linear_param[3],linear_param[4]) if(plot==TRUE){ plot(log(ticker$Close),type="l",col="red") lines(fitted, col="blue") } #Test radice unitaria su residui residual <- log(ticker$Close)-fitted test.resid <- suppressWarnings(tseries::kpss.test(residual)[1] )#Test stazionarieta' residui rownames(test.resid) <- c() results <- data.frame(first_row$Date, last_row$Date, last_row$Close, as.integer(dt/(1/365)), exp(max(fitted)), test$par[1]-last_row$t, as.integer((test$par[1]-last_row$t)/(1/365)), test$par[2], #m test$par[3], #w test$par[1], #tc linear_param[1], #A linear_param[2], #B linear_param[3], #C1 linear_param[4], #C2 (test$par[3]/(2*pi))*log(abs((test$par[1])/(test$par[1]-last_row$t))), #(test$par[3]/(2))*log(abs((test$par[1]-first_row$t)/(last_row$t-first_row$t))),#number oscillation (test$par[2]*abs(linear_param[2]))/(test$par[3] *abs((linear_param[3]^2+linear_param[4]^2)^0.5)), #*abs(linear_param[3]/(cos(atan(linear_param[4]/linear_param[3])))) abs((log(last_row$Close)-fitted[length(fitted)])/fitted[length(fitted)]), last_row$t-0.05*dt, last_row$t+0.1*dt, - linear_param[2] * test$par[2] - abs((linear_param[3]^2+linear_param[4]^2)^0.5)* sqrt(test$par[2]^2+test$par[3]^2),#fantazzini test.resid, sum(residual^2) ) names(results) <- c("start_date","end_date","last_price","dt","LPPL_max","tc-end.t", "day_to_tc","m","w","tc","A","B","C1","C2","oscill","damp","rel_err","dt_filter_low","dt_filter_high","hazard","test.resid","resid") rownames(results) <- c() return(results) } # Script che lo lancia tu tutte le finestre temporali compute_conf <- function(data,clusters=20,size=10,diff=1,save=FALSE){ ticker <- data conf_ind <- data.frame(P.SS_EW=rep(0,nrow(ticker)), P.SS_EF=rep(0,nrow(ticker)), P.S_EW=rep(0,nrow(ticker)), P.S_EF=rep(0,nrow(ticker)), P.M_EW=rep(0,nrow(ticker)), P.M_EF=rep(0,nrow(ticker)), P.L_EW=rep(0,nrow(ticker)), P.L_EF=rep(0,nrow(ticker)), N.SS_EW=rep(0,nrow(ticker)), N.SS_EF=rep(0,nrow(ticker)), N.S_EW=rep(0,nrow(ticker)), N.S_EF=rep(0,nrow(ticker)), N.M_EW=rep(0,nrow(ticker)), N.M_EF=rep(0,nrow(ticker)), N.L_EW=rep(0,nrow(ticker)), N.L_EF=rep(0,nrow(ticker)),#19 P.SS_tc=rep(0,nrow(ticker)),#20 P.S_tc=rep(0,nrow(ticker)), P.M_tc=rep(0,nrow(ticker)), P.L_tc=rep(0,nrow(ticker)), N.SS_tc=rep(0,nrow(ticker)), N.S_tc=rep(0,nrow(ticker)), N.M_tc=rep(0,nrow(ticker)), N.L_tc=rep(0,nrow(ticker))#27 ) ticker <- cbind(ticker,conf_ind) cl <- parallel::makeForkCluster(clusters) doParallel::registerDoParallel(cl) for(j in diff:(size+diff)){ sub_ticker <- ticker[seq(nrow(ticker)-1350-j,nrow(ticker)-j),1:3] df_result <- foreach (i = seq(1,1437,1), .combine = rbind) %dopar% { #from <- from_base+i # if (as.POSIXlt(from)$wday != 0 & as.POSIXlt(from)$wday != 6) { #rTicker <- base::subset(sub_ticker, sub_ticker$Date >= from & sub_ticker$Date <= to_base) r.ticker <- sub_ticker[i:nrow(sub_ticker),] result <- NULL attempt <- 3 while(is.null(result) && attempt <= 4){ attempt <- attempt +1 try( result <- fitter(r.ticker), silent=TRUE ) } return(result) } #Prendo solo lunghezza intervallo che mi interessa dt tra 1460 e 40 df_result <- as_tibble(df_result) %>% filter(dt >= 40 & dt<=1460) #Salvare in csv risultato per singolo t2 nome <- paste("df_result","_",j,".csv",sep="") write.csv(df_result,paste(folder,nome,sep="")) # # # CALCOLA INDICATORE # ##### # # ( SS_EW ) SUPER SHORT SCALE (SS) _ EARLY WARNING __ 183 a 40 # P.SS_EW <- nrow(as_tibble(df_result) %>% # # filter(m >= 0.01 & m <= 1.2 & w >=2 & w <= 25 # & tc <= dt_filter_high & tc >= dt_filter_low # & oscill >= 2.5 & damp >=0.8 # & rel_err >=0 & rel_err <=0.05 # & dt >= 40 & dt<=183 # & B<0 & test.resid<0.463 & hazard>0)) # # # # ticker[ticker$Date==sub_ticker$Date[nrow(sub_ticker)],4]<- round(P.SS_EW/nrow(as_tibble(df_result) %>% # # filter(dt >= 40 & dt<=183 # & B<0 # #& hazard>0 # )),digits=5) # # # N.SS_EW <- nrow(as_tibble(df_result) %>% # # filter(m >= 0.01 & m <= 1.2 & w >=2 & w <= 25 # & tc <= dt_filter_high & tc >= dt_filter_low # & oscill >= 2.5 & damp >=0.8 # & rel_err >=0 & rel_err <=0.05 # & dt >= 40 & dt<=183 # & B>0 & test.resid<0.463 & hazard<0)) # # # # ticker[ticker$Date==sub_ticker$Date[nrow(sub_ticker)],12]<- round(N.SS_EW/nrow(as_tibble(df_result) %>% # # filter(dt >= 40 & dt<=183 # & B>0 # #& hazard<0 # )),digits=5) # # # # ( SS_EF ) SUPER SHORT SCALE (SS) _ END FLAG ___ 183 A 40 # P.SS_EF <- nrow(as_tibble(df_result) %>% # # filter(m >= 0.01 & m <= 0.99 & w >=2 & w <= 25 # & tc <= dt_filter_high & tc >= dt_filter_low # & oscill >= 2.5 & damp >=1 # & rel_err >=0 & rel_err <=0.2 # & dt >= 40 & dt<=183 # & B<0 & test.resid<0.463 & hazard>0)) # # ticker[ticker$Date==sub_ticker$Date[nrow(sub_ticker)],5] <- round(P.SS_EF/nrow(as_tibble(df_result) %>% # # filter(dt >= 40 & dt<=183 # & B<0 # #& hazard>0 # )),digits=5) # # # #critical time mediana # ticker[ticker$Date==sub_ticker$Date[nrow(sub_ticker)],20] <- median(unlist((as_tibble(df_result) %>% # # filter(#m >= 0.01 & m <= 0.99 & w >=2 & w <= 25 # #& tc <= dt_filter_high & tc >= dt_filter_low # #& oscill >= 2.5 & damp >=1 # #& rel_err >=0 & rel_err <=0.2 # #& # dt >= 40 & dt<=183 & B<0 # #& test.resid<0.463 & hazard>0 # ))[,10])) # # # # # # N.SS_EF <- nrow(as_tibble(df_result) %>% # # filter(m >= 0.01 & m <= 0.99 & w >=2 & w <= 25 # & tc <= dt_filter_high & tc >= dt_filter_low # & oscill >= 2.5 & damp >=1 # & rel_err >=0 & rel_err <=0.2 # & dt >= 40 & dt<=183 & B>0 # & test.resid<0.463 & hazard<0)) # # ticker[ticker$Date==sub_ticker$Date[nrow(sub_ticker)],13] <- round(N.SS_EF/nrow(as_tibble(df_result) %>% # # filter(dt >= 40 & dt<=183 # & B>0 # #& hazard<0 # )),digits=5) # # # ticker[ticker$Date==sub_ticker$Date[nrow(sub_ticker)],24] <- median(unlist((as_tibble(df_result) %>% # # filter(#m >= 0.01 & m <= 0.99 & w >=2 & w <= 25 # #& tc <= t2+0.1*(t2-t1) # #& oscill >= 2.5 & damp >=1 # #& rel_err >=0 & rel_err <=0.2 # #& # dt >= 40 & dt<=183 & B>0 & hazard<0))[,10])) # # # # # # # ( S_EW ) SHORT SCALE -- EARLY WARNING 360 A 40 # # P.S_EW <- nrow(as_tibble(df_result) %>% # # filter(m >= 0.01 & m <= 1.2 & w >=2 & w <= 25 # & tc <= dt_filter_high & tc >= dt_filter_low # & oscill >= 2.5 & damp >=0.8 # & rel_err >=0 & rel_err <=0.05 # & dt >= 40 & dt<=360 & B<0 # & test.resid<0.463 & hazard>0)) # # ticker[ticker$Date==sub_ticker$Date[nrow(sub_ticker)],6] <- round(P.S_EW/nrow(as_tibble(df_result) %>% # # filter(dt >= 40 & dt<=360 # & B<0 # #& hazard>0 # )),digits=5) # # N.S_EW <- nrow(as_tibble(df_result) %>% # # filter(m >= 0.01 & m <= 1.2 & w >=2 & w <= 25 # & tc <= dt_filter_high & tc >= dt_filter_low # & oscill >= 2.5 & damp >=0.8 # & rel_err >=0 & rel_err <=0.05 # & dt >= 40 & dt<=360 & B>0 # & test.resid<0.463 & hazard<0)) # # ticker[ticker$Date==sub_ticker$Date[nrow(sub_ticker)],14] <- round(N.S_EW/nrow(as_tibble(df_result) %>% # # filter(dt >= 40 & dt<=360 # & B>0 # #& hazard<0 # )),digits=5) # # # ( S_EF ) SHORT SCALE (S) _ END FLAG ___ 360 A 40 # P.S_EF <- nrow(as_tibble(df_result) %>% # # filter(m >= 0.01 & m <= 0.99 & w >=2 & w <= 25 # & tc <= dt_filter_high & tc >= dt_filter_low # & oscill >= 2.5 & damp >=1 # & rel_err >=0 & rel_err <=0.2 # & dt >= 40 & dt<=360 & B<0 # & test.resid<0.463 & hazard>0)) # # ticker[ticker$Date==sub_ticker$Date[nrow(sub_ticker)],7] <- round(P.S_EF/nrow(as_tibble(df_result) %>% # # filter(dt >= 40 & dt<=360 # & B<0 # #& hazard>0 # )),digits=5) # # # ticker[ticker$Date==sub_ticker$Date[nrow(sub_ticker)],21] <- median(unlist((as_tibble(df_result) %>% # # filter(#m >= 0.01 & m <= 0.99 & w >=2 & w <= 25 # #& tc <= t2+0.1*(t2-t1) # #& oscill >= 2.5 & damp >=1 # #& rel_err >=0 & rel_err <=0.2 # #& # dt >= 40 & dt<=360 & B<0 & hazard>0))[,10])) # # # # N.S_EF <- nrow(as_tibble(df_result) %>% # # filter(m >= 0.01 & m <= 0.99 & w >=2 & w <= 25 # & tc <= dt_filter_high & tc >= dt_filter_low # & oscill >= 2.5 & damp >=1 # & rel_err >=0 & rel_err <=0.2 # & dt >= 40 & dt<=360 & B>0 # & test.resid<0.463 & hazard<0)) # # ticker[ticker$Date==sub_ticker$Date[nrow(sub_ticker)],15] <- round(N.S_EF/nrow(as_tibble(df_result) %>% # # filter(dt >= 40 & dt<=360 # & B>0 # #& hazard<0 # )),digits=5) # # # ticker[ticker$Date==sub_ticker$Date[nrow(sub_ticker)],25] <- median(unlist((as_tibble(df_result) %>% # # filter(#m >= 0.01 & m <= 0.99 & w >=2 & w <= 25 # #& tc <= t2+0.1*(t2-t1) # #& oscill >= 2.5 & damp >=1 # #& rel_err >=0 & rel_err <=0.2 # #& # dt >= 40 & dt<=360 & B>0 & hazard <0))[,10])) # # # # # # ( M_EW ) MEDIUM SCALE -- EARLY WARNING 365 A 730 # # P.M_EW <- nrow(as_tibble(df_result) %>% # # filter(m >= 0.01 & m <= 1.2 & w >=2 & w <= 25 # & tc <= dt_filter_high & tc >= dt_filter_low # & oscill >= 2.5 & damp >=0.8 # & rel_err >=0 & rel_err <=0.05 # & dt >= 365 & dt<=730 & B<0 # & test.resid<0.463 & hazard>0)) # # ticker[ticker$Date==sub_ticker$Date[nrow(sub_ticker)],8] <- round(P.M_EW/nrow(as_tibble(df_result) %>% # # filter(dt >= 365 & dt<=730 # & B<0 # #& hazard>0 # )),digits=5) # # # N.M_EW <- nrow(as_tibble(df_result) %>% # # filter(m >= 0.01 & m <= 1.2 & w >=2 & w <= 25 # &tc <= dt_filter_high & tc >= dt_filter_low # & oscill >= 2.5 & damp >=0.8 # & rel_err >=0 & rel_err <=0.05 # & dt >= 365 & dt<=730 & B>0 # & test.resid<0.463 & hazard<0)) # # ticker[ticker$Date==sub_ticker$Date[nrow(sub_ticker)],16] <- round(N.M_EW/nrow(as_tibble(df_result) %>% # # filter(dt >= 365 & dt<=730 # & B>0 # #& hazard<0 # )),digits=5) # # # # ( M_EF ) MEDIUM SCALE _ END FLAG ___ 365 A 730 # P.M_EF <- nrow(as_tibble(df_result) %>% # # filter(m >= 0.01 & m <= 0.99 & w >=2 & w <= 25 # &tc <= dt_filter_high & tc >= dt_filter_low # & oscill >= 2.5 & damp >=1 # & rel_err >=0 & rel_err <=0.2 # & dt >= 365 & dt<=730 & B<0 # & test.resid<0.463 & hazard>0)) # # ticker[ticker$Date==sub_ticker$Date[nrow(sub_ticker)],9] <- round(P.M_EF/nrow(as_tibble(df_result) %>% # # filter(dt >= 365 & dt<=730 # & B<0 # #& hazard>0 # )),digits = 5) # # # ticker[ticker$Date==sub_ticker$Date[nrow(sub_ticker)],22] <- median(unlist((as_tibble(df_result) %>% # # filter(#m >= 0.01 & m <= 0.99 & w >=2 & w <= 25 # #& tc <= t2+0.1*(t2-t1) # #& oscill >= 2.5 & damp >=1 # #& rel_err >=0 & rel_err <=0.2 # #& # dt >= 365 & dt<=730 & B<0 & hazard>0))[,10])) # # # # N.M_EF <- nrow(as_tibble(df_result) %>% # # filter(m >= 0.01 & m <= 0.99 & w >=2 & w <= 25 # & tc <= dt_filter_high & tc >= dt_filter_low # & oscill >= 2.5 & damp >=1 # & rel_err >=0 & rel_err <=0.2 # & dt >= 365 & dt<=730 & B>0 # & test.resid<0.463 & hazard<0)) # # ticker[ticker$Date==sub_ticker$Date[nrow(sub_ticker)],17] <- round(N.M_EF/nrow(as_tibble(df_result) %>% # # filter(dt >= 365 & dt<=730 # & B>0 # #& hazard<0 # )),digits = 5) # # # ticker[ticker$Date==sub_ticker$Date[nrow(sub_ticker)],26] <- median(unlist((as_tibble(df_result) %>% # # filter(#m >= 0.01 & m <= 0.99 & w >=2 & w <= 25 # #& tc <= t2+0.1*(t2-t1) # #& oscill >= 2.5 & damp >=1 # #& rel_err >=0 & rel_err <=0.2 # #& # dt >= 365 & dt<=730 & B>0 & hazard<0))[,10])) # # # # # # ( L_EW ) LONG SCALE -- EARLY WARNING 1460 A 730 # # P.L_EW <- nrow(as_tibble(df_result) %>% # # filter(m >= 0.01 & m <= 1.2 & w >=2 & w <= 25 # & tc <= dt_filter_high & tc >= dt_filter_low # & oscill >= 2.5 & damp >=0.8 # & rel_err >=0 & rel_err <=0.05 # & dt >= 730 & dt<=1460 & B<0 # & test.resid<0.463 & hazard>0)) # # ticker[ticker$Date==sub_ticker$Date[nrow(sub_ticker)],10] <- round(P.L_EW/nrow(as_tibble(df_result) %>% # # filter(dt >= 730 & dt<=1460 # & B<0 # #& hazard>0 # )),digits=5) # # N.L_EW <- nrow(as_tibble(df_result) %>% # # filter(m >= 0.01 & m <= 1.2 & w >=2 & w <= 25 # & tc <= dt_filter_high & tc >= dt_filter_low # & oscill >= 2.5 & damp >=0.8 # & rel_err >=0 & rel_err <=0.05 # & dt >= 730 & dt<=1460 & B>0 # & test.resid<0.463 & hazard<0)) # # ticker[ticker$Date==sub_ticker$Date[nrow(sub_ticker)],18] <- round(N.L_EW/nrow(as_tibble(df_result) %>% # # filter(dt >= 730 & dt<=1460 # & B>0 # #& hazard<0 # )),digits=5) # # # ( L_EF ) LONG SCALE _ END FLAG ___ 1460 730 # P.L_EF <- nrow(as_tibble(df_result) %>% # # filter(m >= 0.01 & m <= 0.99 & w >=2 & w <= 25 # & tc <= dt_filter_high & tc >= dt_filter_low # & oscill >= 2.5 & damp >=1 # & rel_err >=0 & rel_err <=0.2 # & dt >= 730 & dt<=1460 & B<0 # & test.resid<0.463 & hazard>0)) # # ticker[ticker$Date==sub_ticker$Date[nrow(sub_ticker)],11] <- round(P.L_EF/nrow(as_tibble(df_result) %>% # # filter(dt >= 730 & dt<=1460 # & B<0 # #& hazard>0 # )),digits=5) # # # ticker[ticker$Date==sub_ticker$Date[nrow(sub_ticker)],23] <- median(unlist((as_tibble(df_result) %>% # # filter(#m >= 0.01 & m <= 0.99 & w >=2 & w <= 25 # #& tc <= t2+0.1*(t2-t1) # #& oscill >= 2.5 & damp >=1 # #& rel_err >=0 & rel_err <=0.2 # #& # dt >= 730 & dt<=1460 & B<0 & hazard>0))[,10])) # # # # N.L_EF <- nrow(as_tibble(df_result) %>% # # filter(m >= 0.01 & m <= 0.99 & w >=2 & w <= 25 # & tc <= dt_filter_high & tc >= dt_filter_low # & oscill >= 2.5 & damp >=1 # & rel_err >=0 & rel_err <=0.2 # & dt >= 730 & dt<=1460 & B>0 # & test.resid<0.463 & hazard<0)) # # ticker[ticker$Date==sub_ticker$Date[nrow(sub_ticker)],19] <- round(N.L_EF/nrow(as_tibble(df_result) %>% # # filter(dt >= 730 & dt<=1460 # & B>0 # #& hazard<0 # )),digits=5) # # # ticker[ticker$Date==sub_ticker$Date[nrow(sub_ticker)],27] <- median(unlist((as_tibble(df_result) %>% # # filter(#m >= 0.01 & m <= 0.99 & w >=2 & w <= 25 # #& tc <= t2+0.1*(t2-t1) # #& oscill >= 2.5 & damp >=1 # #& rel_err >=0 & rel_err <=0.2 # #& # dt >= 730 & dt<=1460 & B>0 & hazard<0))[,10])) # # # # ###### } parallel::stopCluster(cl) if(save==TRUE){ write.csv(ticker,paste(folder,filesname,"_","ANALYSIS",".csv",sep="")) } return(ticker) } compute_conf(ticker,size=6,diff=214,clusters = 36,save=FALSE)
/CINECA/fitting_2.R
no_license
sabato96/LPPLS-APPLIANCE
R
false
false
34,515
r
library(cmaes) library(quantmod) library(zoo) library(lubridate) library(dplyr) library(parallel) library(doParallel) #library(progress) #library(ggplot2) library(nloptr) #library(optimx) #library(latticeExtra) library(tseries) #get data filename <- "SP500.csv" folder <- "./data/SP500_WINDOWS/" filepath <- paste("./data/", filename, sep="") filesname <- substr(filepath, nchar("./data/")+1, nchar(filepath)-4) ticker <- read.csv(filepath) ticker$Date <- as.Date(ticker$Date, format = "%Y-%m-%d") #plot(ticker$Date,ticker$Adj.Close ,type="l") ticker <- ticker[,c(1,6)] ticker$t <- decimal_date(ticker$Date) names(ticker) <- c("Date", "Close", "t") ticker$Close <- na_if(ticker$Close,"null") ticker <- na.omit(ticker) ticker$Close <- as.numeric(ticker$Close) ticker <- ticker[15000:23395,] lppl_est <- function(data, tc, m, w, a, b, c1, c2){ dif_time = abs(tc-data$t) est = a + dif_time^m*(b + ((c1 * cos(w * log(dif_time))) + (c2 * sin(w * log(dif_time))))) return(est) } matrix_eq <- function(data, tc, m, w){ ti <- abs(tc - data$t) fi <- ti ** m #B gi <- ti ** m * cos(w * log(ti)) #C1 hi <- ti ** m * sin(w * log(ti)) #C2 yi <- log(data$Close) MAT <- matrix(c(length(ti),sum(fi),sum(gi),sum(hi), sum(fi),sum(fi**2),sum(fi*gi),sum(fi*hi), sum(gi),sum(fi*gi),sum(gi**2),sum(gi*hi), sum(hi),sum(fi*hi),sum(gi*hi),sum(hi**2)),ncol=4,nrow=4) YY <- matrix(c(sum(yi),sum(yi*fi),sum(yi*gi),sum(yi*hi)),ncol=1) coef <- solve(MAT,YY) #reg <- coef(lm(logP ~ Xm + Xm.cos + Xm.sin, data=data)) return(coef) } funz_obj <- function(x,data){ tc = x[1] m = x[2] w = x[3] lin_par <- matrix_eq(data,tc,m,w) #c = (lin_par[3] ** 2 + lin_par[4] ** 2) ** 0.5 # Residual sum of squares delta <- log(data$Close)-lppl_est(data,tc, m, w, lin_par[1], lin_par[2], lin_par[3], lin_par[4]) RSS <- sum(delta^2) return(RSS) } fitter <- function(data,plot=FALSE){ ticker <- data last_row <- tail(ticker, 1) first_row <- head(ticker, 1) dt <- last_row$t -first_row$t start_search <- c(runif(1,max(ticker$t)-0.2*dt,max(ticker$t)+0.2*dt), runif(1,0.01,1.99), runif(1,1,50)) upper <- c(max(ticker$t)+0.2*dt,2,50) lower <- c(max(ticker$t)-0.2*dt,0.01,1) # # if(type=="L-BFGS-B"){ # # test <- optim(start_search,funz_obj,lower=lower,upper=upper,method="L-BFGS-B",data=ticker) # # } # # if(type=="CMAES"){ # # nbre_generation <- 100 # # vec_control <- data.frame(maxit = c(nbre_generation)) # # test <- cmaes::cma_es(start_search, funz_obj, ticker, # lower=c(max(ticker$t)-0.2*dt, 0.01, 1), upper=c(max(ticker$t)+0.2*dt, 1, 50), control=vec_control) # # } # # # if(type=="contr"){ # # test <- crs2lm(start_search, funz_obj, lower=lower, upper=upper, data=ticker) # # # } # # if(type=="isres"){#MIGLIORE secondo # # test <- isres(start_search,funz_obj,lower=lower,upper=upper,data=ticker) # # } # #if(type=="mlsl"){#MIGLIORE IN ASSOLUTO test <- mlsl(start_search,funz_obj,lower=lower,upper=upper,local.method = "LBFGS",data=ticker) #} # # if(type=="nelder"){ # # test <- neldermead(start_search,funz_obj,lower=lower,upper=upper, data=ticker) # # } # linear_param <- matrix_eq(ticker,test$par[1], test$par[2], test$par[3]) fitted <- lppl_est(ticker,test$par[1], test$par[2], test$par[3], linear_param[1],linear_param[2],linear_param[3],linear_param[4]) if(plot==TRUE){ plot(log(ticker$Close),type="l",col="red") lines(fitted, col="blue") } #Test radice unitaria su residui residual <- log(ticker$Close)-fitted test.resid <- suppressWarnings(tseries::kpss.test(residual)[1] )#Test stazionarieta' residui rownames(test.resid) <- c() results <- data.frame(first_row$Date, last_row$Date, last_row$Close, as.integer(dt/(1/365)), exp(max(fitted)), test$par[1]-last_row$t, as.integer((test$par[1]-last_row$t)/(1/365)), test$par[2], #m test$par[3], #w test$par[1], #tc linear_param[1], #A linear_param[2], #B linear_param[3], #C1 linear_param[4], #C2 (test$par[3]/(2*pi))*log(abs((test$par[1])/(test$par[1]-last_row$t))), #(test$par[3]/(2))*log(abs((test$par[1]-first_row$t)/(last_row$t-first_row$t))),#number oscillation (test$par[2]*abs(linear_param[2]))/(test$par[3] *abs((linear_param[3]^2+linear_param[4]^2)^0.5)), #*abs(linear_param[3]/(cos(atan(linear_param[4]/linear_param[3])))) abs((log(last_row$Close)-fitted[length(fitted)])/fitted[length(fitted)]), last_row$t-0.05*dt, last_row$t+0.1*dt, - linear_param[2] * test$par[2] - abs((linear_param[3]^2+linear_param[4]^2)^0.5)* sqrt(test$par[2]^2+test$par[3]^2),#fantazzini test.resid, sum(residual^2) ) names(results) <- c("start_date","end_date","last_price","dt","LPPL_max","tc-end.t", "day_to_tc","m","w","tc","A","B","C1","C2","oscill","damp","rel_err","dt_filter_low","dt_filter_high","hazard","test.resid","resid") rownames(results) <- c() return(results) } # Script che lo lancia tu tutte le finestre temporali compute_conf <- function(data,clusters=20,size=10,diff=1,save=FALSE){ ticker <- data conf_ind <- data.frame(P.SS_EW=rep(0,nrow(ticker)), P.SS_EF=rep(0,nrow(ticker)), P.S_EW=rep(0,nrow(ticker)), P.S_EF=rep(0,nrow(ticker)), P.M_EW=rep(0,nrow(ticker)), P.M_EF=rep(0,nrow(ticker)), P.L_EW=rep(0,nrow(ticker)), P.L_EF=rep(0,nrow(ticker)), N.SS_EW=rep(0,nrow(ticker)), N.SS_EF=rep(0,nrow(ticker)), N.S_EW=rep(0,nrow(ticker)), N.S_EF=rep(0,nrow(ticker)), N.M_EW=rep(0,nrow(ticker)), N.M_EF=rep(0,nrow(ticker)), N.L_EW=rep(0,nrow(ticker)), N.L_EF=rep(0,nrow(ticker)),#19 P.SS_tc=rep(0,nrow(ticker)),#20 P.S_tc=rep(0,nrow(ticker)), P.M_tc=rep(0,nrow(ticker)), P.L_tc=rep(0,nrow(ticker)), N.SS_tc=rep(0,nrow(ticker)), N.S_tc=rep(0,nrow(ticker)), N.M_tc=rep(0,nrow(ticker)), N.L_tc=rep(0,nrow(ticker))#27 ) ticker <- cbind(ticker,conf_ind) cl <- parallel::makeForkCluster(clusters) doParallel::registerDoParallel(cl) for(j in diff:(size+diff)){ sub_ticker <- ticker[seq(nrow(ticker)-1350-j,nrow(ticker)-j),1:3] df_result <- foreach (i = seq(1,1437,1), .combine = rbind) %dopar% { #from <- from_base+i # if (as.POSIXlt(from)$wday != 0 & as.POSIXlt(from)$wday != 6) { #rTicker <- base::subset(sub_ticker, sub_ticker$Date >= from & sub_ticker$Date <= to_base) r.ticker <- sub_ticker[i:nrow(sub_ticker),] result <- NULL attempt <- 3 while(is.null(result) && attempt <= 4){ attempt <- attempt +1 try( result <- fitter(r.ticker), silent=TRUE ) } return(result) } #Prendo solo lunghezza intervallo che mi interessa dt tra 1460 e 40 df_result <- as_tibble(df_result) %>% filter(dt >= 40 & dt<=1460) #Salvare in csv risultato per singolo t2 nome <- paste("df_result","_",j,".csv",sep="") write.csv(df_result,paste(folder,nome,sep="")) # # # CALCOLA INDICATORE # ##### # # ( SS_EW ) SUPER SHORT SCALE (SS) _ EARLY WARNING __ 183 a 40 # P.SS_EW <- nrow(as_tibble(df_result) %>% # # filter(m >= 0.01 & m <= 1.2 & w >=2 & w <= 25 # & tc <= dt_filter_high & tc >= dt_filter_low # & oscill >= 2.5 & damp >=0.8 # & rel_err >=0 & rel_err <=0.05 # & dt >= 40 & dt<=183 # & B<0 & test.resid<0.463 & hazard>0)) # # # # ticker[ticker$Date==sub_ticker$Date[nrow(sub_ticker)],4]<- round(P.SS_EW/nrow(as_tibble(df_result) %>% # # filter(dt >= 40 & dt<=183 # & B<0 # #& hazard>0 # )),digits=5) # # # N.SS_EW <- nrow(as_tibble(df_result) %>% # # filter(m >= 0.01 & m <= 1.2 & w >=2 & w <= 25 # & tc <= dt_filter_high & tc >= dt_filter_low # & oscill >= 2.5 & damp >=0.8 # & rel_err >=0 & rel_err <=0.05 # & dt >= 40 & dt<=183 # & B>0 & test.resid<0.463 & hazard<0)) # # # # ticker[ticker$Date==sub_ticker$Date[nrow(sub_ticker)],12]<- round(N.SS_EW/nrow(as_tibble(df_result) %>% # # filter(dt >= 40 & dt<=183 # & B>0 # #& hazard<0 # )),digits=5) # # # # ( SS_EF ) SUPER SHORT SCALE (SS) _ END FLAG ___ 183 A 40 # P.SS_EF <- nrow(as_tibble(df_result) %>% # # filter(m >= 0.01 & m <= 0.99 & w >=2 & w <= 25 # & tc <= dt_filter_high & tc >= dt_filter_low # & oscill >= 2.5 & damp >=1 # & rel_err >=0 & rel_err <=0.2 # & dt >= 40 & dt<=183 # & B<0 & test.resid<0.463 & hazard>0)) # # ticker[ticker$Date==sub_ticker$Date[nrow(sub_ticker)],5] <- round(P.SS_EF/nrow(as_tibble(df_result) %>% # # filter(dt >= 40 & dt<=183 # & B<0 # #& hazard>0 # )),digits=5) # # # #critical time mediana # ticker[ticker$Date==sub_ticker$Date[nrow(sub_ticker)],20] <- median(unlist((as_tibble(df_result) %>% # # filter(#m >= 0.01 & m <= 0.99 & w >=2 & w <= 25 # #& tc <= dt_filter_high & tc >= dt_filter_low # #& oscill >= 2.5 & damp >=1 # #& rel_err >=0 & rel_err <=0.2 # #& # dt >= 40 & dt<=183 & B<0 # #& test.resid<0.463 & hazard>0 # ))[,10])) # # # # # # N.SS_EF <- nrow(as_tibble(df_result) %>% # # filter(m >= 0.01 & m <= 0.99 & w >=2 & w <= 25 # & tc <= dt_filter_high & tc >= dt_filter_low # & oscill >= 2.5 & damp >=1 # & rel_err >=0 & rel_err <=0.2 # & dt >= 40 & dt<=183 & B>0 # & test.resid<0.463 & hazard<0)) # # ticker[ticker$Date==sub_ticker$Date[nrow(sub_ticker)],13] <- round(N.SS_EF/nrow(as_tibble(df_result) %>% # # filter(dt >= 40 & dt<=183 # & B>0 # #& hazard<0 # )),digits=5) # # # ticker[ticker$Date==sub_ticker$Date[nrow(sub_ticker)],24] <- median(unlist((as_tibble(df_result) %>% # # filter(#m >= 0.01 & m <= 0.99 & w >=2 & w <= 25 # #& tc <= t2+0.1*(t2-t1) # #& oscill >= 2.5 & damp >=1 # #& rel_err >=0 & rel_err <=0.2 # #& # dt >= 40 & dt<=183 & B>0 & hazard<0))[,10])) # # # # # # # ( S_EW ) SHORT SCALE -- EARLY WARNING 360 A 40 # # P.S_EW <- nrow(as_tibble(df_result) %>% # # filter(m >= 0.01 & m <= 1.2 & w >=2 & w <= 25 # & tc <= dt_filter_high & tc >= dt_filter_low # & oscill >= 2.5 & damp >=0.8 # & rel_err >=0 & rel_err <=0.05 # & dt >= 40 & dt<=360 & B<0 # & test.resid<0.463 & hazard>0)) # # ticker[ticker$Date==sub_ticker$Date[nrow(sub_ticker)],6] <- round(P.S_EW/nrow(as_tibble(df_result) %>% # # filter(dt >= 40 & dt<=360 # & B<0 # #& hazard>0 # )),digits=5) # # N.S_EW <- nrow(as_tibble(df_result) %>% # # filter(m >= 0.01 & m <= 1.2 & w >=2 & w <= 25 # & tc <= dt_filter_high & tc >= dt_filter_low # & oscill >= 2.5 & damp >=0.8 # & rel_err >=0 & rel_err <=0.05 # & dt >= 40 & dt<=360 & B>0 # & test.resid<0.463 & hazard<0)) # # ticker[ticker$Date==sub_ticker$Date[nrow(sub_ticker)],14] <- round(N.S_EW/nrow(as_tibble(df_result) %>% # # filter(dt >= 40 & dt<=360 # & B>0 # #& hazard<0 # )),digits=5) # # # ( S_EF ) SHORT SCALE (S) _ END FLAG ___ 360 A 40 # P.S_EF <- nrow(as_tibble(df_result) %>% # # filter(m >= 0.01 & m <= 0.99 & w >=2 & w <= 25 # & tc <= dt_filter_high & tc >= dt_filter_low # & oscill >= 2.5 & damp >=1 # & rel_err >=0 & rel_err <=0.2 # & dt >= 40 & dt<=360 & B<0 # & test.resid<0.463 & hazard>0)) # # ticker[ticker$Date==sub_ticker$Date[nrow(sub_ticker)],7] <- round(P.S_EF/nrow(as_tibble(df_result) %>% # # filter(dt >= 40 & dt<=360 # & B<0 # #& hazard>0 # )),digits=5) # # # ticker[ticker$Date==sub_ticker$Date[nrow(sub_ticker)],21] <- median(unlist((as_tibble(df_result) %>% # # filter(#m >= 0.01 & m <= 0.99 & w >=2 & w <= 25 # #& tc <= t2+0.1*(t2-t1) # #& oscill >= 2.5 & damp >=1 # #& rel_err >=0 & rel_err <=0.2 # #& # dt >= 40 & dt<=360 & B<0 & hazard>0))[,10])) # # # # N.S_EF <- nrow(as_tibble(df_result) %>% # # filter(m >= 0.01 & m <= 0.99 & w >=2 & w <= 25 # & tc <= dt_filter_high & tc >= dt_filter_low # & oscill >= 2.5 & damp >=1 # & rel_err >=0 & rel_err <=0.2 # & dt >= 40 & dt<=360 & B>0 # & test.resid<0.463 & hazard<0)) # # ticker[ticker$Date==sub_ticker$Date[nrow(sub_ticker)],15] <- round(N.S_EF/nrow(as_tibble(df_result) %>% # # filter(dt >= 40 & dt<=360 # & B>0 # #& hazard<0 # )),digits=5) # # # ticker[ticker$Date==sub_ticker$Date[nrow(sub_ticker)],25] <- median(unlist((as_tibble(df_result) %>% # # filter(#m >= 0.01 & m <= 0.99 & w >=2 & w <= 25 # #& tc <= t2+0.1*(t2-t1) # #& oscill >= 2.5 & damp >=1 # #& rel_err >=0 & rel_err <=0.2 # #& # dt >= 40 & dt<=360 & B>0 & hazard <0))[,10])) # # # # # # ( M_EW ) MEDIUM SCALE -- EARLY WARNING 365 A 730 # # P.M_EW <- nrow(as_tibble(df_result) %>% # # filter(m >= 0.01 & m <= 1.2 & w >=2 & w <= 25 # & tc <= dt_filter_high & tc >= dt_filter_low # & oscill >= 2.5 & damp >=0.8 # & rel_err >=0 & rel_err <=0.05 # & dt >= 365 & dt<=730 & B<0 # & test.resid<0.463 & hazard>0)) # # ticker[ticker$Date==sub_ticker$Date[nrow(sub_ticker)],8] <- round(P.M_EW/nrow(as_tibble(df_result) %>% # # filter(dt >= 365 & dt<=730 # & B<0 # #& hazard>0 # )),digits=5) # # # N.M_EW <- nrow(as_tibble(df_result) %>% # # filter(m >= 0.01 & m <= 1.2 & w >=2 & w <= 25 # &tc <= dt_filter_high & tc >= dt_filter_low # & oscill >= 2.5 & damp >=0.8 # & rel_err >=0 & rel_err <=0.05 # & dt >= 365 & dt<=730 & B>0 # & test.resid<0.463 & hazard<0)) # # ticker[ticker$Date==sub_ticker$Date[nrow(sub_ticker)],16] <- round(N.M_EW/nrow(as_tibble(df_result) %>% # # filter(dt >= 365 & dt<=730 # & B>0 # #& hazard<0 # )),digits=5) # # # # ( M_EF ) MEDIUM SCALE _ END FLAG ___ 365 A 730 # P.M_EF <- nrow(as_tibble(df_result) %>% # # filter(m >= 0.01 & m <= 0.99 & w >=2 & w <= 25 # &tc <= dt_filter_high & tc >= dt_filter_low # & oscill >= 2.5 & damp >=1 # & rel_err >=0 & rel_err <=0.2 # & dt >= 365 & dt<=730 & B<0 # & test.resid<0.463 & hazard>0)) # # ticker[ticker$Date==sub_ticker$Date[nrow(sub_ticker)],9] <- round(P.M_EF/nrow(as_tibble(df_result) %>% # # filter(dt >= 365 & dt<=730 # & B<0 # #& hazard>0 # )),digits = 5) # # # ticker[ticker$Date==sub_ticker$Date[nrow(sub_ticker)],22] <- median(unlist((as_tibble(df_result) %>% # # filter(#m >= 0.01 & m <= 0.99 & w >=2 & w <= 25 # #& tc <= t2+0.1*(t2-t1) # #& oscill >= 2.5 & damp >=1 # #& rel_err >=0 & rel_err <=0.2 # #& # dt >= 365 & dt<=730 & B<0 & hazard>0))[,10])) # # # # N.M_EF <- nrow(as_tibble(df_result) %>% # # filter(m >= 0.01 & m <= 0.99 & w >=2 & w <= 25 # & tc <= dt_filter_high & tc >= dt_filter_low # & oscill >= 2.5 & damp >=1 # & rel_err >=0 & rel_err <=0.2 # & dt >= 365 & dt<=730 & B>0 # & test.resid<0.463 & hazard<0)) # # ticker[ticker$Date==sub_ticker$Date[nrow(sub_ticker)],17] <- round(N.M_EF/nrow(as_tibble(df_result) %>% # # filter(dt >= 365 & dt<=730 # & B>0 # #& hazard<0 # )),digits = 5) # # # ticker[ticker$Date==sub_ticker$Date[nrow(sub_ticker)],26] <- median(unlist((as_tibble(df_result) %>% # # filter(#m >= 0.01 & m <= 0.99 & w >=2 & w <= 25 # #& tc <= t2+0.1*(t2-t1) # #& oscill >= 2.5 & damp >=1 # #& rel_err >=0 & rel_err <=0.2 # #& # dt >= 365 & dt<=730 & B>0 & hazard<0))[,10])) # # # # # # ( L_EW ) LONG SCALE -- EARLY WARNING 1460 A 730 # # P.L_EW <- nrow(as_tibble(df_result) %>% # # filter(m >= 0.01 & m <= 1.2 & w >=2 & w <= 25 # & tc <= dt_filter_high & tc >= dt_filter_low # & oscill >= 2.5 & damp >=0.8 # & rel_err >=0 & rel_err <=0.05 # & dt >= 730 & dt<=1460 & B<0 # & test.resid<0.463 & hazard>0)) # # ticker[ticker$Date==sub_ticker$Date[nrow(sub_ticker)],10] <- round(P.L_EW/nrow(as_tibble(df_result) %>% # # filter(dt >= 730 & dt<=1460 # & B<0 # #& hazard>0 # )),digits=5) # # N.L_EW <- nrow(as_tibble(df_result) %>% # # filter(m >= 0.01 & m <= 1.2 & w >=2 & w <= 25 # & tc <= dt_filter_high & tc >= dt_filter_low # & oscill >= 2.5 & damp >=0.8 # & rel_err >=0 & rel_err <=0.05 # & dt >= 730 & dt<=1460 & B>0 # & test.resid<0.463 & hazard<0)) # # ticker[ticker$Date==sub_ticker$Date[nrow(sub_ticker)],18] <- round(N.L_EW/nrow(as_tibble(df_result) %>% # # filter(dt >= 730 & dt<=1460 # & B>0 # #& hazard<0 # )),digits=5) # # # ( L_EF ) LONG SCALE _ END FLAG ___ 1460 730 # P.L_EF <- nrow(as_tibble(df_result) %>% # # filter(m >= 0.01 & m <= 0.99 & w >=2 & w <= 25 # & tc <= dt_filter_high & tc >= dt_filter_low # & oscill >= 2.5 & damp >=1 # & rel_err >=0 & rel_err <=0.2 # & dt >= 730 & dt<=1460 & B<0 # & test.resid<0.463 & hazard>0)) # # ticker[ticker$Date==sub_ticker$Date[nrow(sub_ticker)],11] <- round(P.L_EF/nrow(as_tibble(df_result) %>% # # filter(dt >= 730 & dt<=1460 # & B<0 # #& hazard>0 # )),digits=5) # # # ticker[ticker$Date==sub_ticker$Date[nrow(sub_ticker)],23] <- median(unlist((as_tibble(df_result) %>% # # filter(#m >= 0.01 & m <= 0.99 & w >=2 & w <= 25 # #& tc <= t2+0.1*(t2-t1) # #& oscill >= 2.5 & damp >=1 # #& rel_err >=0 & rel_err <=0.2 # #& # dt >= 730 & dt<=1460 & B<0 & hazard>0))[,10])) # # # # N.L_EF <- nrow(as_tibble(df_result) %>% # # filter(m >= 0.01 & m <= 0.99 & w >=2 & w <= 25 # & tc <= dt_filter_high & tc >= dt_filter_low # & oscill >= 2.5 & damp >=1 # & rel_err >=0 & rel_err <=0.2 # & dt >= 730 & dt<=1460 & B>0 # & test.resid<0.463 & hazard<0)) # # ticker[ticker$Date==sub_ticker$Date[nrow(sub_ticker)],19] <- round(N.L_EF/nrow(as_tibble(df_result) %>% # # filter(dt >= 730 & dt<=1460 # & B>0 # #& hazard<0 # )),digits=5) # # # ticker[ticker$Date==sub_ticker$Date[nrow(sub_ticker)],27] <- median(unlist((as_tibble(df_result) %>% # # filter(#m >= 0.01 & m <= 0.99 & w >=2 & w <= 25 # #& tc <= t2+0.1*(t2-t1) # #& oscill >= 2.5 & damp >=1 # #& rel_err >=0 & rel_err <=0.2 # #& # dt >= 730 & dt<=1460 & B>0 & hazard<0))[,10])) # # # # ###### } parallel::stopCluster(cl) if(save==TRUE){ write.csv(ticker,paste(folder,filesname,"_","ANALYSIS",".csv",sep="")) } return(ticker) } compute_conf(ticker,size=6,diff=214,clusters = 36,save=FALSE)
VideoName<- readline("What data did you analyse?") fact.t1<- as.factor(t1$A) savefile <- sapply( levels(fact.t1) , function(x) write.table(t1[x==fact.t1,3:4], paste ("d:/Karen's/PhD/R program/Processed_data/trackdata/",VideoName,"/",x,".csv"), sep=";", row.names = F), simplify = TRUE, USE.NAMES = TRUE)
/scripts that do not actuially work/sub3.r
no_license
kaye11/Some-R-scripts
R
false
false
304
r
VideoName<- readline("What data did you analyse?") fact.t1<- as.factor(t1$A) savefile <- sapply( levels(fact.t1) , function(x) write.table(t1[x==fact.t1,3:4], paste ("d:/Karen's/PhD/R program/Processed_data/trackdata/",VideoName,"/",x,".csv"), sep=";", row.names = F), simplify = TRUE, USE.NAMES = TRUE)
library(tm) setwd("C:/Users/Quan Nguyen/OneDrive/Work/Self-Studies/DataScience-Coursera/DS_Specialization_JohnsHopkinsUni/TheDataScientistsToolbox/datasciencecoursera/DS_Capstone/App/") bigram <- readRDS(file="final_bigram.Rda") trigram <- readRDS(file="final_trigram.Rda") fourgram <- readRDS(file="final_fourgram.Rda") nextWordPredictor <- function(inputTxt) { if(nchar(inputTxt) > 0) { #clean input inputTxt <- tolower(inputTxt) inputTxt <- removeNumbers(inputTxt) inputTxt <- removePunctuation(inputTxt) inputTxt <- stripWhitespace(inputTxt) #split into words inputList <- unlist(strsplit(inputTxt, " ")) print(inputList) numWords <- length(inputList) print(numWords) runBigram <- function(words){ bigram[bigram$terms$one == words,]$terms$two } runTrigram <- function(words){ trigram[trigram$terms$one == words[1] & trigram$terms$two == words[2],]$terms$three } runFourgram <- function(words) { fourgram[ fourgram$terms$one == words[1] & fourgram$terms$two == words[2] & fourgram$terms$three == words[3],]$terms$four } if(numWords == 1) { #print("running bigram") predList <- runBigram(inputList[1]) }else if (numWords == 2) { #print("running trigram") word1 <- inputList[1] word2 <- inputList[2] predList <- runTrigram(c(word1, word2)) if(length(predList) == 0){ #print("Trigram failed running bigram") predList <- runBigram(word2) } }else { #print("running fourgram") word1 <- inputList[numWords-2] word2 <- inputList[numWords-1] word3 <- inputList[numWords] predList <- runFourgram(c(word1, word2, word3)) if(length(predList) == 0){ #print("fourgram failed running trigram") predList <- runTrigram(c(word2,word3)) } if(length(predList) == 0){ #print("trigram failed running bigram") predList <- runBigram(word3) } } #Return top n predictors n <- 4 tp <- length(predList) if( tp >= n){ predList <- predList[1:n] } as.character(predList) }else{ "" } }
/Data_Science_Capstone_Project/Prediction.R
no_license
quannguyen201294/datasciencecoursera
R
false
false
2,292
r
library(tm) setwd("C:/Users/Quan Nguyen/OneDrive/Work/Self-Studies/DataScience-Coursera/DS_Specialization_JohnsHopkinsUni/TheDataScientistsToolbox/datasciencecoursera/DS_Capstone/App/") bigram <- readRDS(file="final_bigram.Rda") trigram <- readRDS(file="final_trigram.Rda") fourgram <- readRDS(file="final_fourgram.Rda") nextWordPredictor <- function(inputTxt) { if(nchar(inputTxt) > 0) { #clean input inputTxt <- tolower(inputTxt) inputTxt <- removeNumbers(inputTxt) inputTxt <- removePunctuation(inputTxt) inputTxt <- stripWhitespace(inputTxt) #split into words inputList <- unlist(strsplit(inputTxt, " ")) print(inputList) numWords <- length(inputList) print(numWords) runBigram <- function(words){ bigram[bigram$terms$one == words,]$terms$two } runTrigram <- function(words){ trigram[trigram$terms$one == words[1] & trigram$terms$two == words[2],]$terms$three } runFourgram <- function(words) { fourgram[ fourgram$terms$one == words[1] & fourgram$terms$two == words[2] & fourgram$terms$three == words[3],]$terms$four } if(numWords == 1) { #print("running bigram") predList <- runBigram(inputList[1]) }else if (numWords == 2) { #print("running trigram") word1 <- inputList[1] word2 <- inputList[2] predList <- runTrigram(c(word1, word2)) if(length(predList) == 0){ #print("Trigram failed running bigram") predList <- runBigram(word2) } }else { #print("running fourgram") word1 <- inputList[numWords-2] word2 <- inputList[numWords-1] word3 <- inputList[numWords] predList <- runFourgram(c(word1, word2, word3)) if(length(predList) == 0){ #print("fourgram failed running trigram") predList <- runTrigram(c(word2,word3)) } if(length(predList) == 0){ #print("trigram failed running bigram") predList <- runBigram(word3) } } #Return top n predictors n <- 4 tp <- length(predList) if( tp >= n){ predList <- predList[1:n] } as.character(predList) }else{ "" } }
#' Assert that packages are loaded #' #' This function can be useful in writing package functions that use #' functions from packages that you "suggest". It asserts that these packages #' are available, and throws an informative error for those packages #' that are not. #' #' @param packages `character` vector of package names to check #' @param ... `character` package names to check #' #' @return `logical` indicating success #' @examples #' \dontrun{ #' # put packages in a character vector #' assert_packages(c("base", "utils")) #' #' # or by themselves #' assert_packages("base", "utils") #' #' } #' @seealso [R Packages book](http://r-pkgs.had.co.nz/description.html#dependencies) #' @keywords internal #' #' assert_packages <- function(packages, ...) { packages <- c(packages, ...) is_missing <- vapply(packages, function(x) {!requireNamespace(x, quietly = TRUE)}, TRUE) missing_pkgs <- packages[is_missing] quote_missing_pkgs <- vapply(missing_pkgs, function(x) {paste0('"', x, '"')}, "") assertthat::assert_that( identical(length(missing_pkgs), 0L), msg = paste( "Package(s):", paste(quote_missing_pkgs, collapse = ", "), "needed for this function to work. Please install.", sep = " " ) ) }
/R/utils-package.R
no_license
sa-lee/vegawidget
R
false
false
1,278
r
#' Assert that packages are loaded #' #' This function can be useful in writing package functions that use #' functions from packages that you "suggest". It asserts that these packages #' are available, and throws an informative error for those packages #' that are not. #' #' @param packages `character` vector of package names to check #' @param ... `character` package names to check #' #' @return `logical` indicating success #' @examples #' \dontrun{ #' # put packages in a character vector #' assert_packages(c("base", "utils")) #' #' # or by themselves #' assert_packages("base", "utils") #' #' } #' @seealso [R Packages book](http://r-pkgs.had.co.nz/description.html#dependencies) #' @keywords internal #' #' assert_packages <- function(packages, ...) { packages <- c(packages, ...) is_missing <- vapply(packages, function(x) {!requireNamespace(x, quietly = TRUE)}, TRUE) missing_pkgs <- packages[is_missing] quote_missing_pkgs <- vapply(missing_pkgs, function(x) {paste0('"', x, '"')}, "") assertthat::assert_that( identical(length(missing_pkgs), 0L), msg = paste( "Package(s):", paste(quote_missing_pkgs, collapse = ", "), "needed for this function to work. Please install.", sep = " " ) ) }
# # This is the server logic of a Shiny web application. You can run the # application by clicking 'Run App' above. # # Find out more about building applications with Shiny here: # # http://shiny.rstudio.com/ # library(shiny) library(package = "qrcode") # Define server logic required to draw a histogram shinyServer(function(input, output, session) { output$file_image <- renderImage({ # input$file1 will be NULL initially. After the user selects # and uploads a file, head of that data file by default, # or all rows if selected, will be shown. req(input$file1) # show the image list( src = input$file1$datapath, width = "100%" ) }, deleteFile = FALSE) output$file_qr <- renderImage({ req(input$file1) md5 <- tools::md5sum(files = input$file1$datapath) # Read plot2's width and height. These are reactive values, so this # expression will re-run whenever these values change. width <- session$clientData$output_file_qr_width height <- session$clientData$output_file_qr_height # A temp file to save the output. outfile <- tempfile(fileext='.png') # Save picture to the store file.copy( from = input$file1$datapath, to = paste0("../store/", md5, ".jpg"), overwrite = TRUE ) png(outfile, width=width, height=height) qrcode::qrcode_gen(dataString = md5) dev.off() # Return a list containing the filename list(src = outfile, width = width, height = height, alt = md5) }, deleteFile = TRUE) })
/app/server.R
no_license
odeleongt/photo-qr-store
R
false
false
1,841
r
# # This is the server logic of a Shiny web application. You can run the # application by clicking 'Run App' above. # # Find out more about building applications with Shiny here: # # http://shiny.rstudio.com/ # library(shiny) library(package = "qrcode") # Define server logic required to draw a histogram shinyServer(function(input, output, session) { output$file_image <- renderImage({ # input$file1 will be NULL initially. After the user selects # and uploads a file, head of that data file by default, # or all rows if selected, will be shown. req(input$file1) # show the image list( src = input$file1$datapath, width = "100%" ) }, deleteFile = FALSE) output$file_qr <- renderImage({ req(input$file1) md5 <- tools::md5sum(files = input$file1$datapath) # Read plot2's width and height. These are reactive values, so this # expression will re-run whenever these values change. width <- session$clientData$output_file_qr_width height <- session$clientData$output_file_qr_height # A temp file to save the output. outfile <- tempfile(fileext='.png') # Save picture to the store file.copy( from = input$file1$datapath, to = paste0("../store/", md5, ".jpg"), overwrite = TRUE ) png(outfile, width=width, height=height) qrcode::qrcode_gen(dataString = md5) dev.off() # Return a list containing the filename list(src = outfile, width = width, height = height, alt = md5) }, deleteFile = TRUE) })
# Dynamically exported, see zzz.R # taken from broom::: to make augment.eglm work has_rownames <- function(df) { if (tibble::is_tibble(df)) { return(FALSE) } any(rownames(df) != as.character(1:nrow(df))) }
/R/99-broom-has-rownames.R
permissive
nfultz/eflm
R
false
false
217
r
# Dynamically exported, see zzz.R # taken from broom::: to make augment.eglm work has_rownames <- function(df) { if (tibble::is_tibble(df)) { return(FALSE) } any(rownames(df) != as.character(1:nrow(df))) }
library("aroma.core"); verbose <- Arguments$getVerbose(-8, timestamp=TRUE); ar <- AromaRepository(verbose=TRUE); verbose && enter(verbose, "Downloading annotation data"); chipType <- "Mapping250K_Nsp"; verbose && cat(verbose, "Chip type: ", chipType); pathname <- downloadACS(ar, chipType, tags=".*"); verbose && cat(verbose, "ACS: ", pathname); pathname <- downloadCDF(ar, chipType); verbose && cat(verbose, "CDF: ", pathname); pathname <- downloadUGP(ar, chipType, tags=c("na31", ".*")); verbose && cat(verbose, "UGP: ", pathname); pathname <- downloadUFL(ar, chipType, tags=c("na31", ".*")); verbose && cat(verbose, "UFL: ", pathname); verbose && exit(verbose);
/aroma.affymetrix/inst/testScripts/complete/dataSets/GSE34754/01a.downloadAnnotationData.R
no_license
ingted/R-Examples
R
false
false
694
r
library("aroma.core"); verbose <- Arguments$getVerbose(-8, timestamp=TRUE); ar <- AromaRepository(verbose=TRUE); verbose && enter(verbose, "Downloading annotation data"); chipType <- "Mapping250K_Nsp"; verbose && cat(verbose, "Chip type: ", chipType); pathname <- downloadACS(ar, chipType, tags=".*"); verbose && cat(verbose, "ACS: ", pathname); pathname <- downloadCDF(ar, chipType); verbose && cat(verbose, "CDF: ", pathname); pathname <- downloadUGP(ar, chipType, tags=c("na31", ".*")); verbose && cat(verbose, "UGP: ", pathname); pathname <- downloadUFL(ar, chipType, tags=c("na31", ".*")); verbose && cat(verbose, "UFL: ", pathname); verbose && exit(verbose);
#!/share/apps/R-3.2.2_gcc/bin/Rscript library(chron) library(data.table) library(reshape2) library(dplyr) library(foreach) library(iterators) source_path = "/home/hnoorazar/cleaner_codes/core.R" source(source_path) raw_data_dir = "/data/hydro/users/Hossein/codling_moth_new/local/raw/" write_path = "/data/hydro/users/Hossein/codling_moth_new/local/processed/future_CM/" param_dir = "/home/hnoorazar/cleaner_codes/parameters/" file_prefix = "data_" ClimateGroup = list("Historical", "2040's", "2060's", "2080's") cellByCounty = data.table(read.csv(paste0(param_dir, "CropParamCRB.csv"))) args = commandArgs(trailingOnly=TRUE) category = args[1] for(version in c('rcp45')) { files = list.files(paste0(raw_data_dir, category, "/", version, "/")) files = files[151:180] for( file in files) { location = gsub("data_", "", file) if(category == "historical") { start_year = 1979 end_year = 2015 filename = paste0(category, "/", file_prefix, location) } else { start_year = 2006 end_year = 2099 filename = paste0(category, "/", version, "/", file_prefix, location) } temp <- prepareData_CM(filename = filename, input_folder = raw_data_dir, param_dir = param_dir, cod_moth_param_name ="CodlingMothparameters.txt", start_year = start_year, end_year = end_year, lower=10, upper=31.11) temp_data <- data.table() if(category == "historical") { temp$ClimateGroup[temp$year >= 1979 & temp$year <= 2015] <- "Historical" temp_data <- rbind(temp_data, temp[temp$year >= 1979 & temp$year <= 2015, ]) } else { temp$ClimateGroup[temp$year > 2025 & temp$year <= 2055] <- "2040's" temp_data <- rbind(temp_data, temp[temp$year > 2025 & temp$year <= 2055, ]) temp$ClimateGroup[temp$year > 2045 & temp$year <= 2075] <- "2060's" temp_data <- rbind(temp_data, temp[temp$year > 2045 & temp$year <= 2075, ]) temp$ClimateGroup[temp$year > 2065 & temp$year <= 2095] <- "2080's" temp_data <- rbind(temp_data, temp[temp$year > 2065 & temp$year <= 2095, ]) } loc = tstrsplit(location, "_") options(digits=9) temp_data$latitude <- as.numeric(unlist(loc[1])) temp_data$longitude <- as.numeric(unlist(loc[2])) temp_data$County <- as.character(unique(cellByCounty[lat == temp_data$latitude[1] & long == temp_data$longitude[1], countyname])) temp_data$ClimateScenario <- category if(category != "historical") { write_dir = paste0(write_path, category, "/", version) dir.create(file.path(write_dir), recursive = TRUE) write.table(temp_data, file = paste0(write_dir, "/CM_", location), sep = ",", row.names = FALSE, col.names = TRUE) } else { write_dir = paste0(write_path, category, "/") dir.create(file.path(write_dir), recursive = TRUE) write.table(temp_data, file = paste0(write_dir, "/CM_", location), sep = ",", row.names = FALSE, col.names = TRUE) } } }
/codling_moth/code/drivers/local_future/LF_CM_45_S6.R
permissive
HNoorazar/Ag
R
false
false
3,270
r
#!/share/apps/R-3.2.2_gcc/bin/Rscript library(chron) library(data.table) library(reshape2) library(dplyr) library(foreach) library(iterators) source_path = "/home/hnoorazar/cleaner_codes/core.R" source(source_path) raw_data_dir = "/data/hydro/users/Hossein/codling_moth_new/local/raw/" write_path = "/data/hydro/users/Hossein/codling_moth_new/local/processed/future_CM/" param_dir = "/home/hnoorazar/cleaner_codes/parameters/" file_prefix = "data_" ClimateGroup = list("Historical", "2040's", "2060's", "2080's") cellByCounty = data.table(read.csv(paste0(param_dir, "CropParamCRB.csv"))) args = commandArgs(trailingOnly=TRUE) category = args[1] for(version in c('rcp45')) { files = list.files(paste0(raw_data_dir, category, "/", version, "/")) files = files[151:180] for( file in files) { location = gsub("data_", "", file) if(category == "historical") { start_year = 1979 end_year = 2015 filename = paste0(category, "/", file_prefix, location) } else { start_year = 2006 end_year = 2099 filename = paste0(category, "/", version, "/", file_prefix, location) } temp <- prepareData_CM(filename = filename, input_folder = raw_data_dir, param_dir = param_dir, cod_moth_param_name ="CodlingMothparameters.txt", start_year = start_year, end_year = end_year, lower=10, upper=31.11) temp_data <- data.table() if(category == "historical") { temp$ClimateGroup[temp$year >= 1979 & temp$year <= 2015] <- "Historical" temp_data <- rbind(temp_data, temp[temp$year >= 1979 & temp$year <= 2015, ]) } else { temp$ClimateGroup[temp$year > 2025 & temp$year <= 2055] <- "2040's" temp_data <- rbind(temp_data, temp[temp$year > 2025 & temp$year <= 2055, ]) temp$ClimateGroup[temp$year > 2045 & temp$year <= 2075] <- "2060's" temp_data <- rbind(temp_data, temp[temp$year > 2045 & temp$year <= 2075, ]) temp$ClimateGroup[temp$year > 2065 & temp$year <= 2095] <- "2080's" temp_data <- rbind(temp_data, temp[temp$year > 2065 & temp$year <= 2095, ]) } loc = tstrsplit(location, "_") options(digits=9) temp_data$latitude <- as.numeric(unlist(loc[1])) temp_data$longitude <- as.numeric(unlist(loc[2])) temp_data$County <- as.character(unique(cellByCounty[lat == temp_data$latitude[1] & long == temp_data$longitude[1], countyname])) temp_data$ClimateScenario <- category if(category != "historical") { write_dir = paste0(write_path, category, "/", version) dir.create(file.path(write_dir), recursive = TRUE) write.table(temp_data, file = paste0(write_dir, "/CM_", location), sep = ",", row.names = FALSE, col.names = TRUE) } else { write_dir = paste0(write_path, category, "/") dir.create(file.path(write_dir), recursive = TRUE) write.table(temp_data, file = paste0(write_dir, "/CM_", location), sep = ",", row.names = FALSE, col.names = TRUE) } } }
loadSeuratObject = function(filename) { require(Seurat) sobj = readRDS(filename) return(sobj) } saveSeuratObject = function(sobj, path) { require(Seurat) saveRDS(sobj, file=path) } runSeurat = function(data, batch, hvg=2000) { require(Seurat) batch_list = SplitObject(data, split.by = batch) anchors = FindIntegrationAnchors( object.list = batch_list, anchor.features = hvg, scale = T, l2.norm = T, dims = 1:30, k.anchor = 5, k.filter = 200, k.score = 30, max.features = 200, eps = 0) integrated = IntegrateData( anchorset = anchors, new.assay.name = "integrated", features = NULL, features.to.integrate = NULL, dims = 1:30, k.weight = 100, weight.reduction = NULL, sd.weight = 1, sample.tree = NULL, preserve.order = F, do.cpp = T, eps = 0, verbose = T) return(integrated) } func_profiler = function(expr, chunksize=20000, filename='timing.out', prof.interval=0.02) { Rprof(filename, memory.profiling=T, interval=prof.interval) res = expr Rprof(NULL) t = summaryRprof(filename, chunksize=chunksize, memory="both")$sampling.time mem = max(summaryRprof(filename, chunksize=chunksize, memory="both")$by.total$mem.total) return(list(results=res, time=t, memory=mem)) } # Example call: # sobj = load_seurat_object('small_test.RDS') # out = func_profiler(runSeurat(sobj, "batch")) # out$results is results # out$time is timing # out$memory is memory use preP <- function(so, vars.to.regress=NULL, verbose=TRUE, n.pcs=100) { if (verbose) { message("Running Seurat v3 workflow") } so <- Seurat::FindVariableFeatures(object = so, verbose = verbose) so <- Seurat::ScaleData(object = so, verbose = verbose) so <- Seurat::RunPCA(object = so, npcs = n.pcs, verbose = verbose) return(so) } runConos = function(sobj, batch) { require(conos) require(Seurat) #sobj <- loadSeuratObject(data) batch_list <- SplitObject(sobj, split.by=batch) pp <- lapply(batch_list, preP) con <- Conos$new(pp) con$buildGraph(space="genes") con$findCommunities() con$embedGraph(method="UMAP") #metadata <- data.frame(Cluster=con$clusters$leiden$groups) return(con) } saveConos = function(con, outdir) { dir.create(outdir) saveConosForScanPy(con, output.path=outdir, pseudo.pca=TRUE, pca=TRUE, verbose=TRUE) } runHarm = function(sobj, batch) { require(harmony) require(Seurat) sobj <- ScaleData(sobj) sobj <- RunPCA(sobj, features=rownames(sobj@assays$RNA)) sobj <- RunHarmony(sobj, batch) sobj@reductions['X_emb'] <- sobj@reductions$harmony #harmonyEmb <- HarmonyMatrix(pca, method, batch, do_pca=F) return(sobj) } runLiger = function(sobj, batch, hvg, k=20, res=0.4, small.clust.thresh=20) { require(liger) require(Seurat) # Only counts is converted to liger object. To pass our own normalized data, # store it in the "counts" slot sobj@assays$RNA@counts = sobj@assays$RNA@data # Create Liger object lobj = seuratToLiger(sobj, combined.seurat=T, meta.var=batch, renormalize=F, remove.missing=F) # We only pass nomarlized data, so store it as such lobj@norm.data <- lobj@raw.data # Assign hvgs lobj@var.genes <- hvg lobj <- scaleNotCenter(lobj, remove.missing=F) # Can't do our own scaling atm # Use tutorial coarse k suggests of 20. lobj <- optimizeALS(lobj, k=k, thresh=5e-5, nrep=3) lobj <- quantileAlignSNF(lobj, resolution=res, small.clust.thresh=small.clust.thresh) # Store embedding in initial Seurat object # Code taken from ligerToSeurat() function from LIGER inmf.obj <- new( Class = "DimReduc", feature.loadings = t(lobj@W), cell.embeddings = lobj@H.norm, key = "X_emb" ) sobj@reductions['X_emb'] <- inmf.obj return(sobj) } runFastMNN = function(sobj, batch) { require(batchelor) expr <- sobj@assays$RNA@data sce <- fastMNN(expr, batch = sobj@meta.data[[batch]]) sobj@assays$RNA <- CreateAssayObject(assay(sce, "reconstructed")) sobj@reductions['X_emb'] <- CreateDimReducObject(reducedDim(sce, "corrected"), key='fastmnn_') return(sobj) }
/R/integration.R
permissive
DandanHan91054/scib
R
false
false
4,355
r
loadSeuratObject = function(filename) { require(Seurat) sobj = readRDS(filename) return(sobj) } saveSeuratObject = function(sobj, path) { require(Seurat) saveRDS(sobj, file=path) } runSeurat = function(data, batch, hvg=2000) { require(Seurat) batch_list = SplitObject(data, split.by = batch) anchors = FindIntegrationAnchors( object.list = batch_list, anchor.features = hvg, scale = T, l2.norm = T, dims = 1:30, k.anchor = 5, k.filter = 200, k.score = 30, max.features = 200, eps = 0) integrated = IntegrateData( anchorset = anchors, new.assay.name = "integrated", features = NULL, features.to.integrate = NULL, dims = 1:30, k.weight = 100, weight.reduction = NULL, sd.weight = 1, sample.tree = NULL, preserve.order = F, do.cpp = T, eps = 0, verbose = T) return(integrated) } func_profiler = function(expr, chunksize=20000, filename='timing.out', prof.interval=0.02) { Rprof(filename, memory.profiling=T, interval=prof.interval) res = expr Rprof(NULL) t = summaryRprof(filename, chunksize=chunksize, memory="both")$sampling.time mem = max(summaryRprof(filename, chunksize=chunksize, memory="both")$by.total$mem.total) return(list(results=res, time=t, memory=mem)) } # Example call: # sobj = load_seurat_object('small_test.RDS') # out = func_profiler(runSeurat(sobj, "batch")) # out$results is results # out$time is timing # out$memory is memory use preP <- function(so, vars.to.regress=NULL, verbose=TRUE, n.pcs=100) { if (verbose) { message("Running Seurat v3 workflow") } so <- Seurat::FindVariableFeatures(object = so, verbose = verbose) so <- Seurat::ScaleData(object = so, verbose = verbose) so <- Seurat::RunPCA(object = so, npcs = n.pcs, verbose = verbose) return(so) } runConos = function(sobj, batch) { require(conos) require(Seurat) #sobj <- loadSeuratObject(data) batch_list <- SplitObject(sobj, split.by=batch) pp <- lapply(batch_list, preP) con <- Conos$new(pp) con$buildGraph(space="genes") con$findCommunities() con$embedGraph(method="UMAP") #metadata <- data.frame(Cluster=con$clusters$leiden$groups) return(con) } saveConos = function(con, outdir) { dir.create(outdir) saveConosForScanPy(con, output.path=outdir, pseudo.pca=TRUE, pca=TRUE, verbose=TRUE) } runHarm = function(sobj, batch) { require(harmony) require(Seurat) sobj <- ScaleData(sobj) sobj <- RunPCA(sobj, features=rownames(sobj@assays$RNA)) sobj <- RunHarmony(sobj, batch) sobj@reductions['X_emb'] <- sobj@reductions$harmony #harmonyEmb <- HarmonyMatrix(pca, method, batch, do_pca=F) return(sobj) } runLiger = function(sobj, batch, hvg, k=20, res=0.4, small.clust.thresh=20) { require(liger) require(Seurat) # Only counts is converted to liger object. To pass our own normalized data, # store it in the "counts" slot sobj@assays$RNA@counts = sobj@assays$RNA@data # Create Liger object lobj = seuratToLiger(sobj, combined.seurat=T, meta.var=batch, renormalize=F, remove.missing=F) # We only pass nomarlized data, so store it as such lobj@norm.data <- lobj@raw.data # Assign hvgs lobj@var.genes <- hvg lobj <- scaleNotCenter(lobj, remove.missing=F) # Can't do our own scaling atm # Use tutorial coarse k suggests of 20. lobj <- optimizeALS(lobj, k=k, thresh=5e-5, nrep=3) lobj <- quantileAlignSNF(lobj, resolution=res, small.clust.thresh=small.clust.thresh) # Store embedding in initial Seurat object # Code taken from ligerToSeurat() function from LIGER inmf.obj <- new( Class = "DimReduc", feature.loadings = t(lobj@W), cell.embeddings = lobj@H.norm, key = "X_emb" ) sobj@reductions['X_emb'] <- inmf.obj return(sobj) } runFastMNN = function(sobj, batch) { require(batchelor) expr <- sobj@assays$RNA@data sce <- fastMNN(expr, batch = sobj@meta.data[[batch]]) sobj@assays$RNA <- CreateAssayObject(assay(sce, "reconstructed")) sobj@reductions['X_emb'] <- CreateDimReducObject(reducedDim(sce, "corrected"), key='fastmnn_') return(sobj) }
#' theme_publication for plots #' @param base_size size of font for plot default "15" #' @param base_family font to be used default "Helvetica" #' Publication quality images for summary plots theme_Publication <- function(base_size=15, base_family="Helvetica") { (theme_foundation(base_size=base_size, base_family=base_family) + theme(plot.title = element_text(face = "bold", size = 15, hjust = 0.5), text = element_text(family="Helvetica"), panel.background = element_rect(colour = NA), plot.background = element_rect(colour = NA), panel.border = element_rect(colour = NA), axis.title = element_text(face = "bold",size = 12), axis.title.y = element_text(angle=90,vjust =2,size=12), axis.title.x = element_text(size=12), axis.text.x = element_text(size=12,color="black",face="bold",hjust = 1), axis.text.y = element_text(size=12,color="black",face="bold",hjust = 1), axis.line = element_line(colour="black",size=1), axis.ticks = element_line(), panel.grid.major = element_line(colour="#f0f0f0"), panel.grid.minor = element_blank(), legend.text = element_text(size=12), legend.key = element_rect(colour = NA), legend.position = "right", legend.direction = "vertical", legend.key.size= unit(0.5, "cm"), legend.spacing = unit(1, "mm"), legend.title = element_text(family="Helvetica",face="italic",size=rel(0.7)), plot.margin=unit(c(10,10,1,10),"mm"), strip.background=element_rect(colour="#f0f0f0",fill="#f0f0f0"), strip.text = element_text(face="bold") )) }
/R/theme_publication.R
permissive
jharenza/annoFuse
R
false
false
1,759
r
#' theme_publication for plots #' @param base_size size of font for plot default "15" #' @param base_family font to be used default "Helvetica" #' Publication quality images for summary plots theme_Publication <- function(base_size=15, base_family="Helvetica") { (theme_foundation(base_size=base_size, base_family=base_family) + theme(plot.title = element_text(face = "bold", size = 15, hjust = 0.5), text = element_text(family="Helvetica"), panel.background = element_rect(colour = NA), plot.background = element_rect(colour = NA), panel.border = element_rect(colour = NA), axis.title = element_text(face = "bold",size = 12), axis.title.y = element_text(angle=90,vjust =2,size=12), axis.title.x = element_text(size=12), axis.text.x = element_text(size=12,color="black",face="bold",hjust = 1), axis.text.y = element_text(size=12,color="black",face="bold",hjust = 1), axis.line = element_line(colour="black",size=1), axis.ticks = element_line(), panel.grid.major = element_line(colour="#f0f0f0"), panel.grid.minor = element_blank(), legend.text = element_text(size=12), legend.key = element_rect(colour = NA), legend.position = "right", legend.direction = "vertical", legend.key.size= unit(0.5, "cm"), legend.spacing = unit(1, "mm"), legend.title = element_text(family="Helvetica",face="italic",size=rel(0.7)), plot.margin=unit(c(10,10,1,10),"mm"), strip.background=element_rect(colour="#f0f0f0",fill="#f0f0f0"), strip.text = element_text(face="bold") )) }
## ADD ISO BIO -- relatio id filtering "IPA.sampler.Add.Iso.Bio.relID" <- function(P, Add, Iso, Bio, Int, rel.id, no.its = 1100, burn = 100, delta.add = 0.5, delta.iso = 0.5, delta.bio = 1, allsamp = F, ratio.toll = 0.8, allsampcomp = NULL, v = TRUE, IT = 500) { counter <- 0 ind.rem <- lapply(rel.id, function(x) { counter <<- counter + 1 checking.rel.id(rel.id, counter) }) Nc <- nrow(Add) M <- nrow(P) if (is.null(allsampcomp)) { sampcomp <- apply(P, 1, multsample) allsampcomp <- matrix(0, no.its, M) olds <- 0 } else { olds <- nrow(allsampcomp) sampcomp <- allsampcomp[olds, ] allsampcomp <- rbind(allsampcomp, matrix(0, no.its, M)) } pot.bio <- apply(Bio[sampcomp, ], 2, sum) for (it in 1:no.its) { ordine <- sample(M) # randomising the order used to cheack all assignments for (thism in ordine) { # counting adducts p.add <- colSums(matrix(Add[sampcomp[-ind.rem[[thism]]], ], ncol = Nc)) ### counting isotopes tmp <- matrix(Iso[sampcomp[-ind.rem[[thism]]], ], ncol = Nc) * (Int[thism]/Int[-ind.rem[[thism]]]) ind.ones <- which((tmp >= ratio.toll) & (tmp <= (1/ratio.toll))) tmp[ind.ones] <- 1 tmp[tmp != 1] <- 0 p.iso <- colSums(tmp) ## counting biotransformations p.bio <- pot.bio - colSums(matrix(Bio[sampcomp[thism], ], ncol = Nc)) ## normalising with deltas p.add <- (p.add + delta.add)/sum(p.add + delta.add) p.iso <- (p.iso + delta.iso)/sum(p.iso + delta.iso) p.bio <- (p.bio + delta.bio)/sum(p.bio + delta.bio) ## merging scores po <- p.add * p.iso * p.bio * P[thism, ] po <- po/sum(po) oldval <- sampcomp[thism] sampcomp[thism] <- multsample(po) if (oldval != sampcomp[thism]) { pot.bio <- pot.bio - Bio[, oldval] + Bio[, sampcomp[thism]] } } allsampcomp[it + olds, ] <- sampcomp if (v) { if (it%%IT == 0) { # Print on the screen some message cat(paste0(round((it * 100)/no.its, 1), "%", "\n")) } } } post <- t(apply(allsampcomp, 2, compute.post, burn = burn, no.its = no.its + olds, Nc = Nc)) if (allsamp) { out <- list(Post = post, allsampcomp = allsampcomp) return(out) } else { return(post) } }
/R/IPA_sampler_ISO_ADD_BIO_relID.R
no_license
francescodc87/IPA
R
false
false
2,643
r
## ADD ISO BIO -- relatio id filtering "IPA.sampler.Add.Iso.Bio.relID" <- function(P, Add, Iso, Bio, Int, rel.id, no.its = 1100, burn = 100, delta.add = 0.5, delta.iso = 0.5, delta.bio = 1, allsamp = F, ratio.toll = 0.8, allsampcomp = NULL, v = TRUE, IT = 500) { counter <- 0 ind.rem <- lapply(rel.id, function(x) { counter <<- counter + 1 checking.rel.id(rel.id, counter) }) Nc <- nrow(Add) M <- nrow(P) if (is.null(allsampcomp)) { sampcomp <- apply(P, 1, multsample) allsampcomp <- matrix(0, no.its, M) olds <- 0 } else { olds <- nrow(allsampcomp) sampcomp <- allsampcomp[olds, ] allsampcomp <- rbind(allsampcomp, matrix(0, no.its, M)) } pot.bio <- apply(Bio[sampcomp, ], 2, sum) for (it in 1:no.its) { ordine <- sample(M) # randomising the order used to cheack all assignments for (thism in ordine) { # counting adducts p.add <- colSums(matrix(Add[sampcomp[-ind.rem[[thism]]], ], ncol = Nc)) ### counting isotopes tmp <- matrix(Iso[sampcomp[-ind.rem[[thism]]], ], ncol = Nc) * (Int[thism]/Int[-ind.rem[[thism]]]) ind.ones <- which((tmp >= ratio.toll) & (tmp <= (1/ratio.toll))) tmp[ind.ones] <- 1 tmp[tmp != 1] <- 0 p.iso <- colSums(tmp) ## counting biotransformations p.bio <- pot.bio - colSums(matrix(Bio[sampcomp[thism], ], ncol = Nc)) ## normalising with deltas p.add <- (p.add + delta.add)/sum(p.add + delta.add) p.iso <- (p.iso + delta.iso)/sum(p.iso + delta.iso) p.bio <- (p.bio + delta.bio)/sum(p.bio + delta.bio) ## merging scores po <- p.add * p.iso * p.bio * P[thism, ] po <- po/sum(po) oldval <- sampcomp[thism] sampcomp[thism] <- multsample(po) if (oldval != sampcomp[thism]) { pot.bio <- pot.bio - Bio[, oldval] + Bio[, sampcomp[thism]] } } allsampcomp[it + olds, ] <- sampcomp if (v) { if (it%%IT == 0) { # Print on the screen some message cat(paste0(round((it * 100)/no.its, 1), "%", "\n")) } } } post <- t(apply(allsampcomp, 2, compute.post, burn = burn, no.its = no.its + olds, Nc = Nc)) if (allsamp) { out <- list(Post = post, allsampcomp = allsampcomp) return(out) } else { return(post) } }
library(dplyr) library(MBQN) library(matrixTests) library(limma) library(qvalue) library(genefilter) library(samr) # BiocManager::install("ROTS") library(ROTS) library(parallel) library(DescTools) library(matrixcalc) library(psych) library(doParallel) library(stats) library(pcaMethods) library(foreach) library(rlist) library(matrixStats) ################################################################################# # NORMALIZATION # "unnormalized", "TRQN", "QN", "median" getNormalizedDf <- function(modus, df) { if (modus == "unnormalized"){ df.model <- df } else if (modus == "TRQN") { mtx <- as.matrix(df) df.trqn <- mbqn(mtx, FUN = mean) row.names(df.trqn) <- row.names(df) df.model <- as.data.frame(df.trqn) } else if (modus == "QN"){ mtx <- as.matrix(df) df.qn <- mbqn(mtx, FUN = NULL) row.names(df.qn) <- row.names(df) df.model <- as.data.frame(df.qn) }else if (modus == "median"){ mtx <- as.matrix(df) df.median <- limma::normalizeMedianValues(mtx) df.model <- as.data.frame(df.median) } else { print("Undefined modus") } return(df.model) } ################################################################################# # SPARSITY REDUCTION getSparsityReducedDf <- function(modus, df) { if (modus == "NoSR"){ imp.df <- df } else if (modus == "SR66") { # Filtering 66% imp.df <- df[which(rowMeans(!is.na(df)) > 0.66), ] } else if (modus == "SR90") { # Filtering 90% imp.df <- df[which(rowMeans(!is.na(df)) > 0.9), ] } else { print("Undefined modus") } return(imp.df) } ################################################################################# # STATISTICAL TESTS multiModel <- function(x, group.size, modelType){ # print(x) tmp <- data.frame(x2=unlist(as.vector(x)), y=as.factor(c(rep("X25", group.size), rep("X12", group.size)))) res <- tryCatch({ if (modelType == "glm") { # res <- as.vector(coef(summary(glm(x2 ~ y, family = Gamma(link = "identity"), data = tmp)))[,"Pr(>|t|)"][2]) res <- as.vector(coef(summary(glm(x2 ~ y, family = Gamma(link = "log"), data = tmp)))[,"Pr(>|t|)"][2]) # (link = "identity") was wrongly added #res <- as.vector(coef(summary(glm(x2 ~ y, family = "Gamma", data = tmp)))[,"Pr(>|t|)"][2]) # (link = "identity") was wrongly added } # if (modelType == "lm") { # res <- as.vector(coef(summary(lm(x2 ~ y, data=tmp)))[,"Pr(>|t|)"][2]) # } else if (modelType == "glm") { # res <- as.vector(coef(summary(glm(x2 ~ y, family = Gamma(link = "identity"), data = tmp)))[,"Pr(>|t|)"][2]) # } else if (modelType == "lasso") { # res <- as.vector(coef(summary(lasso2::l1ce(x2 ~ y, data=tmp)))[,"Pr(>|Z|)"][2]) # } # }, warning = function(warning_condition) { # message(warning_condition) }, error = function(error_condition) { # message(error_condition) res <- NA }) return(res) } statsLstToDf <- function(stats.lst){ stats.lst[sapply(stats.lst, is.null)] <- NA stats.lst[sapply(stats.lst, is.nan)] <- NA #print(stats.lst) if(is.vector(stats.lst) & !is.list(stats.lst)) { stats.df <- data.frame(Protein=names(stats.lst), pValue=stats.lst) } else { stats.df <- data.frame(Protein=names(stats.lst), pValue=do.call(rbind.data.frame, stats.lst)[,1]) } return(stats.df) } getStatTestResultDf <- function(modus, df) { group.size <- ncol(df)/2 groups <- c(rep("X25", group.size), rep("X12", group.size)) seed <- NA if (modus == "ttestVarEqual") { output <- matrixTests::row_t_equalvar(df[, 1:group.size], df[, (group.size+1):(2*group.size)], alternative = "two.sided", mu = 0, conf.level = 0.95) stats.df <- data.frame(Protein = row.names(df), pValue = output$pvalue) } else if (modus == "ttestVarNotEqual") { output <- matrixTests::row_t_welch(df[, 1:group.size], df[, (group.size+1):(2*group.size)], alternative = "two.sided", mu = 0, conf.level = 0.95) stats.df <- data.frame(Protein = row.names(df), pValue = output$pvalue) } else if (modus == "Wilcoxon") { output <- matrixTests::row_wilcoxon_twosample(df[, 1:group.size], df[, (group.size+1):(2*group.size)], alternative = "two.sided", mu = 0, exact = NA, correct = TRUE) stats.df <- data.frame(Protein = row.names(df), pValue = output$pvalue) # } else if (modus == "RankProduct") { # seed <- sample(1:1000000000, 1) # groups <- c(rep(0, group.size), rep(1, group.size)) # output <- RankProd::RankProducts(df, groups, rand=seed) # stats.df <- data.frame(Protein = row.names(df), pValue = apply(output$pval,1,min)) } else if (modus == "limma") { # limma # library(limma) design <- model.matrix(~groups) fit <- lmFit(df, design) fit2 <- eBayes(fit) output <- topTable(fit2, coef = 2, number = nrow(df), sort.by = "none", adjust.method="BH") stats.df <- data.frame(Protein = row.names(df), pValue = output$P.Value) # } else if (modus == "ttest") { # pVals <- rowttests(data.matrix(df),as.factor(groups), na.rm = T)$p.value # stats.df <- data.frame(Protein = row.names(df), pValue = pVals) } else if (modus == "SAM") { # SAM seed <- sample(1:1000000000, 1) set.seed(seed) groups <- c(rep(1, group.size), rep(2, group.size)) data <- list(x=as.matrix(df),y=groups, geneid=as.character(1:nrow(df)), genenames=row.names(df), logged2=TRUE) invisible(capture.output(samr.obj<-samr(data, resp.type="Two class unpaired", nperms=250, random.seed = seed))) pv=data.frame(samr.pvalues.from.perms(samr.obj$tt, samr.obj$ttstar)) stats.df <- data.frame(Protein = row.names(pv), pValue = pv[,1]) } else if (modus == "ROTS") { # ROTS groups <- c(rep(0, group.size), rep(1, group.size)) bool25 <- rowSums(!is.na(df[,1:group.size])) > 1 bool12 <- rowSums(!is.na(df[,(group.size+1):ncol(df)])) > 1 boolTooManyNAs <- which(bool25&bool12) seed <- sample(1:1000000000, 1) # results <- ROTS(data = df[boolTooManyNAs,], groups = groups , B = 100 , K = 500 , seed = 1234) results <- ROTS(data = df[boolTooManyNAs,], groups = groups , B = 100 , K = 500 , verbose = FALSE, seed=seed) results.df <- data.frame(results$logfc, results$pvalue) stats.df <- data.frame(Protein = row.names(results.df), pValue = results.df$results.pvalue) stats.df <- rbind(stats.df, data.frame(Protein = row.names(df[-boolTooManyNAs,]), pValue = rep(NA, nrow(df[-boolTooManyNAs,])))) stats.df <- stats.df[order(match(stats.df$Protein,row.names(df))),] row.names(stats.df) <- NULL } else if (modus == "GLMgamma") { # GLM-Gamma seed <- sample(1:1000000000, 1) set.seed(seed) exp.df <- 2^df # Gamma regression needs to be done on non-lol transformed data stats.lst <- apply(exp.df, 1, multiModel, group.size=group.size, modelType="glm") stats.df <- statsLstToDf(stats.lst) } else { print("Undefined modus") } return(list(stats.df, seed)) } ################################################################################# # DATA CHARACTERISTICS # Kolmogorov-Smirnov Test, # Returns percentage of significant samples when KS test was applied on single samples compared to all samples combined kolSmirTestSignProp <- function(mtx) { pvals.mtx <- apply(mtx, 2, function(x) stats::ks.test(x, mtx)$p.value) cnt <- sum(pvals.mtx<0.05) signProportion <- cnt/length(pvals.mtx) return(signProportion) } # calc_ functions are from radiomics package calc_energy <- function(data){ #TODO: Add dim check for 2D vs 3D return(sum(as.numeric(data)*as.numeric(data), na.rm=TRUE)) } #' @describeIn first_order_features Entropy #' @param base The base for which the logarithm is calculate #' @param nbins The number of bins the histogram is discretized into calc_entropy <- function(data, base=2, nbins=length(unique(c(data)))){ # Break data into a hist im_range <- range(data, na.rm=TRUE) cuts <- table(cut(data, seq(im_range[1], im_range[2], by=diff(im_range)/nbins), include.lowest=TRUE))/length(data[!is.na(data)]) #Logs cannot take 0 values, so let = 0 if no value entropy_vals <- vapply(cuts, function(data) ifelse(data != 0, data*logb(data, base=base), 0), FUN.VALUE = 1) return(-1*sum(entropy_vals)) } calc_kurtosis <- function(data){ n <- length(data[!is.na(data)]) data <- data - mean(data, na.rm=TRUE) r <- n * sum(data^4, na.rm=TRUE) / (sum(data^2, na.rm=TRUE)^2) return(r * (1 - 1/n)^2 - 3) } calc_meanDeviation <- function(data){ scale <- 1/prod(dim(data)) mu <- mean(data, na.rm=TRUE) return(scale * sum(abs(data - mu), na.rm=TRUE)) } calc_skewness <- function (data){ data <- data[!is.na(data)] return(sum((data - mean(data))^3)/(length(data) * sd(data)^3)) } calc_uniformity <- function(data, nbins=length(unique(c(data)))){ # Break data into a hist data <- data[!is.na(data)] im_range <- range(data, na.rm=TRUE) cuts <- table(cut(data, seq(im_range[1], im_range[2], by=diff(im_range)/nbins), include.lowest=TRUE))/length(data) function_vals <- vapply(cuts, function(data) data^2, FUN.VALUE = 1) return(sum(function_vals)) } calc_variance <- function(data) var(c(data), na.rm=TRUE) calc_RMS <- function(data) sqrt(mean(data^2, na.rm=TRUE)) getMoreCharacteristics <- function(mtx, withNAs=TRUE){ KS.SignProp <- kolSmirTestSignProp(mtx) entropy <- calc_entropy(mtx) kurtosis <- calc_kurtosis(mtx) meanDeviation <- calc_meanDeviation(mtx) skewness <- calc_skewness(mtx) uniformity <- calc_uniformity(mtx) variance <- calc_variance(mtx) RMS <- calc_RMS(mtx) group.size <- ncol(mtx)/2 var.groups.ratio <- median(matrixStats::rowVars(mtx[, 1:group.size], na.rm = TRUE)/matrixStats::rowVars(mtx[, (group.size+1):ncol(mtx)], na.rm = TRUE), na.rm = TRUE) if (withNAs){ resultvec <- c(KS.SignProp = KS.SignProp, entropy = entropy, kurtosis = kurtosis, meanDeviation = meanDeviation, skewness = skewness, uniformity = uniformity, variance = variance, RMS = RMS, var.groups.ratio = var.groups.ratio) } else { t.mtx <- t(mtx) t.mtx <- t.mtx[ , which(apply(t.mtx, 2, var) != 0)] # Remove zero variance columns pca <- stats::prcomp(t.mtx, scale.=T) eigs <- pca$sdev^2 prctPC1 <- eigs[1]/sum(eigs) prctPC2 <- eigs[2]/sum(eigs) elongation <- sqrt(eigs[2] / eigs[1]) # elongation flatness <- sqrt(eigs[length(eigs)]/eigs[1]) # flatness resultvec <- c(KS.SignProp = KS.SignProp, entropy = entropy, kurtosis = kurtosis, meanDeviation = meanDeviation, skewness = skewness, uniformity = uniformity, variance = variance, RMS = RMS, var.groups.ratio = var.groups.ratio, prctPC1 = prctPC1, prctPC2 = prctPC2, elongation = elongation, flatness = flatness) } return(resultvec) } runBPTest <- function(x, group.size){ tmp <- data.frame(x2=unlist(as.vector(x)), y=as.factor(c(rep("X25", group.size), rep("X12", group.size)))) m <- stats::lm(x2 ~ y, data=tmp) bp.res <- lmtest::bptest(m, studentize = TRUE) bp.res[["p.value"]] } getDataCharacteristics <- function(df) { mtx <- as.matrix(df) medianSampleVariance <- median(apply(df, 2, var, na.rm=TRUE)) medianProteinVariance <- median(unname(apply(df, 1, var, na.rm=TRUE)), na.rm = TRUE) #KS.SignProp <- kolSmirTestSignProp(as.matrix(df)) percNATotal <- mean(is.na(df)) * 100 percOfRowsWithNAs <- sum(apply(df, 1, anyNA))/nrow(df) * 100 #percNATotal2 <- mean(is.na(mtx)) * 100 characts.wNAs <- getMoreCharacteristics(mtx, withNAs=TRUE) # names(characts.wNAs) <- c("entropy.wNAs", # "kurtosis.wNAs", # "meanDeviation.wNAs", # "skewness.wNAs", # "uniformity.wNAs", # "variance.wNAs", # "RMS.wNAs", # "var.groups.ratio.wNAs") names(characts.wNAs) <- paste0(names(characts.wNAs), ".wNAs") # "prctPC1.wNAs", #"elongation.wNAs", #"flatness.wNAs") mtx <- mtx[rowSums(is.na(mtx)) == 0, ] nProteins.woNAs <- nrow(mtx) # number of proteins with no NAs characts.woNAs <- getMoreCharacteristics(mtx, withNAs=FALSE) # names(characts.woNAs) <- c("entropy.woNAs", # "kurtosis.woNAs", # "meanDeviation.woNAs", # "skewness.woNAs", # "uniformity.woNAs", # "variance.woNAs", # "RMS.woNAs", # "var.groups.ratio.woNAs", # "prctPC1.woNAs", # "elongation.woNAs", # "flatness.woNAs") names(characts.woNAs) <- paste0(names(characts.woNAs), ".woNAs") group.size <- ncol(mtx)/2 BPTest.lst <- apply(mtx, 1, runBPTest, group.size=group.size) # heterosc.woNAs <- sum(BPTest.lst < 0.05) / length(BPTest.lst) qobj <- qvalue::qvalue(p = BPTest.lst) heterosc.oneMinuspi0 <- 1 - qobj$pi0 datacharacts <- c(medianSampleVariance = medianSampleVariance, medianProteinVariance = medianProteinVariance, percNATotal = percNATotal, percOfRowsWithNAs = percOfRowsWithNAs, characts.wNAs, characts.woNAs, heterosc.oneMinuspi0=heterosc.oneMinuspi0, nProteins.woNAs=nProteins.woNAs) } getNumberOfProteins <- function(nEcoli.pre, nHuman.pre, stats.df, combinedProteinNames, intersectProteinNames, DiaWorkflowProteinNames) { nEcoli <- nrow(stats.df[which(grepl("ECOLI", stats.df$Protein)),]) nHuman <- nrow(stats.df[which(grepl("HUMAN", stats.df$Protein)),]) nEcoli.comb <- length(combinedProteinNames[grepl("_ECOLI", combinedProteinNames)]) nHuman.comb <- length(combinedProteinNames[grepl("_HUMAN", combinedProteinNames)]) nEcoli.intersect <- length(intersectProteinNames[grepl("_ECOLI", intersectProteinNames)]) nHuman.intersect <- length(intersectProteinNames[grepl("_HUMAN", intersectProteinNames)]) nEcoli.diaWorkflow <- length(DiaWorkflowProteinNames[grepl("_ECOLI", DiaWorkflowProteinNames)]) nHuman.diaWorkflow <- length(DiaWorkflowProteinNames[grepl("_HUMAN", DiaWorkflowProteinNames)]) c(nEcoli.pre=nEcoli.pre, nHuman.pre=nHuman.pre, nEcoli=nEcoli, nHuman=nHuman, nEcoli.comb = nEcoli.comb, nHuman.comb = nHuman.comb, nEcoli.intersect = nEcoli.intersect, nHuman.intersect = nHuman.intersect, nEcoli.diaWorkflow = nEcoli.diaWorkflow, nHuman.diaWorkflow = nHuman.diaWorkflow) } ################################################################################# # pAUC calculation library(pROC) library(rlist) library(stats) library(foreach) library(qvalue) library(parallel) library(doParallel) # Function adapted from R package pROC version 1.17.0.1 auc.roc <- function(specSensDf, # Partial auc definition partial.auc=FALSE, # false (consider total area) or numeric length 2: boundaries of the AUC to consider, between 0 and 1, or 0 and 100 if percent is TRUE partial.auc.focus=c("specificity", "sensitivity"), # if partial.auc is not FALSE: do the boundaries partial.auc.correct=FALSE, allow.invalid.partial.auc.correct = FALSE, percent = FALSE, ... # unused required to allow roc passing arguments to plot or ci. ) { if (!identical(partial.auc, FALSE)) { partial.auc.focus <- match.arg(partial.auc.focus) } # Validate partial.auc if (! identical(partial.auc, FALSE) & !(is.numeric(partial.auc) && length(partial.auc)==2)) stop("partial.auc must be either FALSE or a numeric vector of length 2") # Ensure partial.auc is sorted with partial.auc[1] >= partial.auc[2] partial.auc <- sort(partial.auc, decreasing=TRUE) # Get and sort the sensitivities and specificities specSensDf <- specSensDf[with(specSensDf, order(Specificity)), ] se <- specSensDf$Sensitivity sp <- specSensDf$Specificity # Full area if partial.auc is FALSE if (identical(partial.auc, FALSE)) { if (methods::is(roc, "smooth.roc") && ! is.null(roc$smoothing.args) && roc$smoothing.args$method == "binormal") { coefs <- coefficients(roc$model) auc <- unname(pnorm(coefs[1] / sqrt(1+coefs[2]^2)) * ifelse(percent, 100^2, 1)) } else { diffs.x <- sp[-1] - sp[-length(sp)] means.vert <- (se[-1] + se[-length(se)])/2 auc <- sum(means.vert * diffs.x) } } else { # Partial area if (partial.auc.focus == "sensitivity") { # if we focus on SE, just swap and invert x and y and the computations for SP will work x <- rev(se) y <- rev(sp) }else { x <- sp y <- se } # find the SEs and SPs in the interval x.inc <- x[x <= partial.auc[1] & x >= partial.auc[2]] y.inc <- y[x <= partial.auc[1] & x >= partial.auc[2]] # compute the AUC strictly in the interval diffs.x <- x.inc[-1] - x.inc[-length(x.inc)] means.vert <- (y.inc[-1] + y.inc[-length(y.inc)])/2 auc <- sum(means.vert * diffs.x) # add the borders: if (length(x.inc) == 0) { # special case: the whole AUC is between 2 se/sp points. Need to interpolate from both diff.horiz <- partial.auc[1] - partial.auc[2] # determine indices idx.hi <- match(FALSE, x < partial.auc[1]) idx.lo <- idx.hi - 1 # proportions proportion.hi <- (x[idx.hi] - partial.auc[1]) / (x[idx.hi] - x[idx.lo]) proportion.lo <- (partial.auc[2] - x[idx.lo]) / (x[idx.hi] - x[idx.lo]) # interpolated y's y.hi <- y[idx.hi] + proportion.hi * (y[idx.lo] - y[idx.hi]) y.lo <- y[idx.lo] - proportion.lo * (y[idx.lo] - y[idx.hi]) # compute AUC mean.vert <- (y.hi + y.lo)/2 auc <- mean.vert*diff.horiz } else { # if the upper limit is not exactly present in SPs, interpolate if (!(partial.auc[1] %in% x.inc)) { # find the limit indices idx.out <- match(FALSE, x < partial.auc[1]) idx.in <- idx.out - 1 # interpolate y proportion <- (partial.auc[1] - x[idx.out]) / (x[idx.in] - x[idx.out]) y.interpolated <- y[idx.out] + proportion * (y[idx.in] - y[idx.out]) # add to AUC auc <- auc + (partial.auc[1] - x[idx.in]) * (y[idx.in] + y.interpolated)/2 } if (!(partial.auc[2] %in% x.inc)) { # if the lower limit is not exactly present in SPs, interpolate # find the limit indices in and out #idx.out <- length(x) - match(TRUE, rev(x) < partial.auc[2]) + 1 idx.out <- match(TRUE, x > partial.auc[2]) - 1 idx.in <- idx.out + 1 # interpolate y proportion <- (x[idx.in] - partial.auc[2]) / (x[idx.in] - x[idx.out]) y.interpolated <- y[idx.in] + proportion * (y[idx.out] - y[idx.in]) # add to AUC auc <- auc + (x[idx.in] - partial.auc[2]) * (y[idx.in] + y.interpolated)/2 } } } # In percent, we have 100*100 = 10,000 as maximum area, so we need to divide by a factor 100 if (percent) auc <- auc/100 # Correction according to McClish DC, 1989 if (all(!identical(partial.auc, FALSE), partial.auc.correct)) { # only for pAUC min <- pROC:::roc.utils.min.partial.auc(partial.auc, percent) max <- pROC:::roc.utils.max.partial.auc(partial.auc, percent) # The correction is defined only when auc >= min if (!allow.invalid.partial.auc.correct && auc < min) { warning("Partial AUC correction not defined for ROC curves below the diagonal.") auc <- NA } else if (percent) { auc <- (100+((auc-min)*100/(max-min)))/2 # McClish formula adapted for % } else { auc <- (1+((auc-min)/(max-min)))/2 # original formula by McClish } } return(auc) } getSensAtpVal005 <- function(df2, nEcoli.pre=NA, nHuman.pre=NA){ if (is.na(nEcoli.pre)){ totalEcoli <- nrow(df2[which(grepl("ECOLI", row.names(df2))),]) totalHuman <- nrow(df2[which(grepl("HUMAN", row.names(df2))),]) } else { totalEcoli <- nEcoli.pre totalHuman <- nHuman.pre } TP <- nrow(df2[(df2$pValue<0.05) & grepl("ECOLI", row.names(df2)),]) sens <- TP/totalEcoli return(sens) } getpValCurvedf <- function(stats.df, nEcoli.pre=NA, nHuman.pre=NA) { if (is.na(nEcoli.pre)){ totalEcoli <- nrow(stats.df[which(grepl("ECOLI", row.names(stats.df))),]) totalHuman <- nrow(stats.df[which(grepl("HUMAN", row.names(stats.df))),]) } else { totalEcoli <- nEcoli.pre totalHuman <- nHuman.pre } #p.curve.lst <- lapply(unique(c(0, sort(stats.df$pValue)[c(TRUE, rep(FALSE, times=9))], 1)), function(pvalue, stats.df){ p.curve.lst <- lapply(unique(c(0, sort(stats.df$pValue), 1)), function(pvalue, stats.df){ TP <- nrow(stats.df[(stats.df$pValue<=pvalue) & grepl("ECOLI", row.names(stats.df)),]) FP <- nrow(stats.df[(stats.df$pValue<=pvalue) & grepl("HUMAN", row.names(stats.df)),]) oneMinusSpec <- FP/totalHuman sens <- TP/totalEcoli list(oneMinusSpec, sens) }, stats.df=stats.df) p.curve.df <- data.frame(matrix(unlist(p.curve.lst), nrow = length(p.curve.lst), byrow = T)) colnames(p.curve.df) <- c("1-Specificity", "Sensitivity") if (nrow(p.curve.df[(p.curve.df$`1-Specificity` == 0 & p.curve.df$Sensitivity == 0) == TRUE, ]) == 0) { p.curve.df <- rbind(c(0,0), p.curve.df) colnames(p.curve.df) <- c("1-Specificity", "Sensitivity") } p.curve.df <- p.curve.df[complete.cases(p.curve.df), ] return(p.curve.df) } getRegulatedProp <- function(pValueVec) { if (any(pValueVec > 1, na.rm = TRUE)){ # E.g. in the case of SAM there are sometimes p values above 1 warning(paste0(sum(pValueVec > 1), " p-values are above 1.They are set to 1.")) pValueVec[pValueVec > 1] <- 1 } # Remove missing values pValueVec[is.nan(pValueVec)] <- NA pValueVec[is.infinite(pValueVec)] <- NA pValueVec <- pValueVec[!is.na(pValueVec)] # "missing or infinite values in inputs are not allowed" result <- NA try({ qobj.comb <- qvalue::qvalue(p = pValueVec) result <- 1 - qobj.comb$pi0 }, silent = TRUE) result } getPartialAUCs <- function(SensSpecDF) { partial.aucs <- c(.8, .9, .95) partial.auc.corrects <- c(FALSE) # c(TRUE, FALSE) auc.Settings <- data.frame(expand.grid(partial.auc=partial.aucs, partial.auc.correct=partial.auc.corrects)) aucs.results <- apply(auc.Settings, 1, function(row) { # print(row["partial.auc"]) partial.auc <- unlist(unname(row["partial.auc"])) if (partial.auc == 0){ SensSpecDF2 <- rbind(SensSpecDF, c(1, max(SensSpecDF$Sensitivity),0)) auc <- auc.roc(SensSpecDF, partial.auc=FALSE, partial.auc.focus="specificity", partial.auc.correct=as.logical(row["partial.auc.correct"])) } else { auc <- auc.roc(SensSpecDF, partial.auc=c(1, partial.auc), partial.auc.focus="specificity", partial.auc.correct=as.logical(row["partial.auc.correct"])) } if (length(auc) == 0) auc <- NA # length 0 can happen if 1-specificity for pAUC can't be reached, e.g due too few proteins being left after sparsity reduction auc }) names(aucs.results) <- paste0("p.pauc_", auc.Settings$partial.auc, "_correct", as.logical(auc.Settings$partial.auc.correct)) aucs.results } getPartialAUCsResults <- function(stats.df, nEcoli, nHuman) { sensAtpVal005 <- getSensAtpVal005(stats.df, nEcoli.pre=nEcoli, nHuman.pre=nHuman) p.roc.df <- getpValCurvedf(stats.df, nEcoli.pre=nEcoli, nHuman.pre=nHuman) p.roc.df$Specificity <- 1-p.roc.df$`1-Specificity` aucs.results <- getPartialAUCs(p.roc.df) aucs.results <- c(aucs.results, sensAtpVal005=sensAtpVal005) aucs.results } getStatsProteinNames <- function(combIntersect, proteinNames, stats.df, dia=NULL) { # if (combIntersect =="combined"){ # proteinNames <- readRDS("combinedProteinNames.rds") # } else if (combIntersect =="intersect"){ # proteinNames <- readRDS("intersectProteinNames.rds") # } else if (combIntersect =="diaWorkflow"){ # proteinNames <- readRDS(paste0(dia, "_ProteinNames.rds")) # } nEcoli <- length(proteinNames[grepl("_ECOLI", proteinNames)]) nHuman <- length(proteinNames[grepl("_HUMAN", proteinNames)]) if (combIntersect %in% c("combined", "intersect")){ intersectBool <- apply(stats.df, 1, function(x) { length(intersect(unlist(base::strsplit(x[1], ";")), proteinNames))>0 }) stats.df.protNames <- stats.df[intersectBool, ] } else if (combIntersect =="diaWorkflow"){ stats.df.protNames <- stats.df } list(stats.df.protNames=stats.df.protNames, nEcoli=nEcoli, nHuman=nHuman) } getRegValsPropAndPAucs <- function(combIntersect=c("combined", "intersect", "diaWorkflow"), proteinNames, stats.df, dia) { row.names(stats.df) <- stats.df$Protein stats.protNames <- getStatsProteinNames(combIntersect = combIntersect, proteinNames=proteinNames, stats.df, dia) regpValsProp <- getRegulatedProp(pValueVec=stats.protNames[["stats.df.protNames"]]$pValue) stats.protNames[["stats.df.protNames"]]$pValue[is.na(stats.protNames[["stats.df.protNames"]]$pValue)] <- 1 # Replace NAs with pValue of 1 paucs <- getPartialAUCsResults(stats.df = stats.protNames[["stats.df.protNames"]], nEcoli = stats.protNames[["nEcoli"]], nHuman = stats.protNames[["nHuman"]]) res <- c(paucs, regpValsProp=regpValsProp) names(res) <- paste0(names(res), ".", combIntersect) return(res) } ################################################################################# # RMSE rmse <- function(actual, predicted) { sqrt(mean((actual - predicted)^2,na.rm = TRUE)) } getRMSE <- function(stats.df) { stats.dfEcoli <- stats.df[grepl("ECOLI", stats.df$Protein), ] stats.dfHuman <- stats.df[grepl("HUMAN", stats.df$Protein), ] Ecoli <- rmse(actual=stats.dfEcoli$log2FC, predicted=stats.dfEcoli$log2FCPredicted) Human <- rmse(actual=stats.dfHuman$log2FC, predicted=stats.dfHuman$log2FCPredicted) HumanAndEcoli <- rmse(actual=stats.df$log2FC, predicted=stats.df$log2FCPredicted) list(Ecoli, Human, HumanAndEcoli) } getRMSEResults <- function(combIntersect=c("intersect", "diaWorkflow"), proteinNames, stats.df, dia) { stats.protNames <- getStatsProteinNames(combIntersect = combIntersect, proteinNames = proteinNames, stats.df, dia) stats.df2 <- stats.protNames[["stats.df.protNames"]] RMSEs <- getRMSE(stats.df2) RMSEEcoli <- RMSEs[[1]] RMSEHuman <- RMSEs[[2]] RMSEHumanAndEcoli <- RMSEs[[3]] res <- c(RMSEEcoli=RMSEEcoli, RMSEHuman=RMSEHuman, RMSEHumanAndEcoli=RMSEHumanAndEcoli) names(res) <- paste0(names(res), ".", combIntersect) return(res) } ################################################################################# runAnalysisForEachBootstrapSample <- function(bootstrap.dataset, indices, diaWorkflowResults.selected, dia, normalization, sparsityReduction, statTest, combinedProteinNames, intersectProteinNames, DiaWorkflowProteinNames) { df <- diaWorkflowResults.selected[, unlist(indices[bootstrap.dataset])] #df <- repList[[bootstrap.dataset]] # Remove empty rows df <- df[rowSums(is.na(df)) != ncol(df), ] nEcoli.pre <- sum(grepl("ECOLI", row.names(df))) nHuman.pre <- sum(grepl("HUMAN", row.names(df))) group.size <- ncol(df)/2 data.characts <- getDataCharacteristics(df) sparsRed.runtime <- system.time({ # Sparsity Reduction df.sr <- getSparsityReducedDf(sparsityReduction, df) }) normalization.runtime <- system.time({ # Normalization df <- getNormalizedDf(normalization, df.sr) }) log2FC.df <- data.frame(Protein=row.names(df), log2FC=rowMeans(df[, (group.size+1):ncol(df)], na.rm = TRUE)-rowMeans(df[, 1:group.size], na.rm = TRUE)) statTest.runtime <- system.time({ # print("Stats") stats.df.lst <- getStatTestResultDf(statTest, df) stats.df <- stats.df.lst[[1]] seed.stat <- stats.df.lst[[2]] }) sparsRed.runtime <- sparsRed.runtime[["user.self"]] normalization.runtime <- normalization.runtime[["user.self"]] statTest.runtime <- statTest.runtime[["user.self"]] stats.df <- merge(stats.df, log2FC.df, by=c("Protein"), all.x=FALSE) stats.df$log2FCPredicted <- NA stats.df[grepl("ECOLI", stats.df$Protein), ]$log2FCPredicted <- log2(0.24038462/0.11076923) # 1.11778738, was previously wrongly assumed to be log2((1/12)/(1/25))=1.058894 stats.df[grepl("HUMAN", stats.df$Protein), ]$log2FCPredicted <- log2(1) if (sum(is.nan(stats.df$pValue)) > 0) stats.df[is.nan(stats.df$pValue),]$pValue <- NA if (sum(is.nan(stats.df$log2FC)) > 0) stats.df[is.nan(stats.df$log2FC),]$log2FC <- NA numberOfProteins <- getNumberOfProteins(nEcoli.pre, nHuman.pre, stats.df, combinedProteinNames, intersectProteinNames, DiaWorkflowProteinNames) regValsPropAndPAucs.comb <- getRegValsPropAndPAucs(combIntersect = "combined", proteinNames = combinedProteinNames, stats.df = stats.df, dia=NULL) regValsPropAndPAucs.intersect <- getRegValsPropAndPAucs(combIntersect = "intersect", proteinNames = intersectProteinNames, stats.df = stats.df, dia=NULL) regValsPropAndPAucs.diaWorkflow <- getRegValsPropAndPAucs(combIntersect = "diaWorkflow", proteinNames = DiaWorkflowProteinNames, stats.df = stats.df, dia=dia) RMSE.intersect <- getRMSEResults(combIntersect="intersect", proteinNames = intersectProteinNames, stats.df, dia=NULL) RMSE.diaWorkflow <- getRMSEResults(combIntersect="diaWorkflow", proteinNames = DiaWorkflowProteinNames, stats.df, dia=dia) summary <- rlist::list.flatten(list(bootstrap.dataset = bootstrap.dataset, dia = dia, normalization = normalization, sparsityReduction = sparsityReduction, statTest = statTest, groupSize=group.size, as.list(data.characts), nAllProteins=nrow(stats.df), as.list(numberOfProteins), sparsRed.runtime = sparsRed.runtime, normalization.runtime = normalization.runtime, statTest.runtime = statTest.runtime, seed.stat=seed.stat, as.list(regValsPropAndPAucs.comb), as.list(regValsPropAndPAucs.intersect), as.list(regValsPropAndPAucs.diaWorkflow), as.list(RMSE.intersect), as.list(RMSE.diaWorkflow))) return(list(stats.df, summary)) } ################################################################################ # batch <- 1 # max 32 bei batch.size 20, max 64 bei batch.size 10, max 128 bei batch.size 5 run <- function(batch, batch.size) { # GET ALL PARAMETER COMBINATIONS dias <- sort(c("DIANN_DIANN_AI", "DIANN_DIANN_AI_GPF", "DIANN_MaxQuant", "DIANN_MSFragger", "DIANN_PROSIT_EDIA_GPF", "OSW_DIANN_AI_GPF", "OSW_MaxQuant", "OSW_MSFragger", "Skyline_DIANN_AI_GPF", "Skyline_MaxQuant", "Skyline_MSFragger", "Skyline_PROSIT_EDIA_GPF", "Spectronaut_DIANN_AI_GPF", "Spectronaut_DirectDIA", "Spectronaut_MaxQuant", "Spectronaut_MSFragger", "Spectronaut_PROSIT_EDIA_GPF")) normalizations <- c("unnormalized", "TRQN", "QN", "median") sparsityReductions <- c("NoSR", "SR66", "SR90") statTests <- c("ttestVarEqual", "ttestVarNotEqual", "GLMgamma", "limma", "Wilcoxon", "SAM", "ROTS") combs <- expand.grid(statTests, sparsityReductions, normalizations, dias) colnames(combs) <- c("statTest", "sparsityReduction", "normalization", "dia") if (batch*batch.size > nrow(combs)){ combs <- combs[(((batch-1)*batch.size)+1):nrow(combs),] } else{ combs <- combs[(((batch-1)*batch.size)+1):(batch*batch.size),] } combs.lst <- as.list(as.data.frame(t(combs))) combinedProteinNames <- readRDS("combinedProteinNames.rds") intersectProteinNames <- readRDS("intersectProteinNames.rds") # load("/Users/admin/Desktop/PhD/202110_dia-benchmarking_rerun/DIAsoftwareOutputProteinLevel_1to12And1to25Only_wideFormat_withBootstrapIndicesAndIntersectAndCombinedProteinNames.RData") # saveRDS(diaWorkflowResults, file = "diaWorkflowResults.rds") indices <- readRDS("indices.rds") diaWorkflowResults <- readRDS("diaWorkflowResults.rds") #result.list <- foreach(i = seq_along(combs.lst)) %dopar% { #result.list <- foreach(i = seq_along(combs.lst)) %do% { vector <- combs.lst[[1]] statTest <- as.character(vector[1]) sparsityReduction <- as.character(vector[2]) normalization <- as.character(vector[3]) dia <- as.character(vector[4]) print(statTest) print(sparsityReduction) print(normalization) print(dia) #DiaWorkflowProteinNames <- readRDS(paste0(dia, "_ProteinNames.rds")) # print("---Loading Bootstrap datasets...") # repList <- readRDS(paste0(dia, ".rds")) print("--------------------------") # subresult.list <- lapply(seq_along(repList), runAnalysisForEachBootstrapSample, repList=repList, dia=dia, # normalization=normalization, sparsityReduction=sparsityReduction, statTest=statTest, # combinedProteinNames=combinedProteinNames, intersectProteinNames=intersectProteinNames, DiaWorkflowProteinNames=DiaWorkflowProteinNames) diaWorkflowResults.selected <- diaWorkflowResults[[dia]] DiaWorkflowProteinNames <- row.names(diaWorkflowResults.selected) # result.list <- lapply(seq_along(indices), runAnalysisForEachBootstrapSample, indices=indices, diaWorkflowResults.selected=diaWorkflowResults.selected, dia=dia, # normalization=normalization, sparsityReduction=sparsityReduction, statTest=statTest, # combinedProteinNames=combinedProteinNames, intersectProteinNames=intersectProteinNames, DiaWorkflowProteinNames=DiaWorkflowProteinNames) # procs <- as.numeric(Sys.getenv("SLURM_NTASKS")) registerDoParallel(cores=procs) result.list <- foreach(i = seq_along(indices)) %dopar% { runAnalysisForEachBootstrapSample(i, indices=indices, diaWorkflowResults.selected=diaWorkflowResults.selected, dia=dia, normalization=normalization, sparsityReduction=sparsityReduction, statTest=statTest, combinedProteinNames=combinedProteinNames, intersectProteinNames=intersectProteinNames, DiaWorkflowProteinNames=DiaWorkflowProteinNames) } #} #result.list2 <- unlist(result.list, recursive = FALSE) result.df <- lapply(result.list, function(x) x[[2]]) result.df <- do.call(rbind.data.frame, result.df) # colnames(result.df) <- c("bootstrap.dataset", "dia", "normalization", "sparsityReduction", "statTest", "groupSize", "nAllProteins", # "nEcoliProteins", "nHumanProteins", "nEcoliProteins.pre", "nHumanProteins.pre", # "medianSampleVariance", "medianProteinVariance", "KS.SignProp", "percNATotal", "percOfRowsWithNAs", # "sparsRed.runtime" , "normalization.runtime", "statTest.runtime", # "seed.stat") #colnames(result.df) <- names(subresult.list[[1]][[2]]) if (batch.size == 1){ session <- sessionInfo() sink(paste0("sessionInfo_", batch, "_",batch.size, "_", dia, "_", normalization, "_", sparsityReduction, "_", statTest,".txt")) print(session) sink() write.csv(result.df, file=paste0("benchmark_results_", batch, "_", batch.size, "_", dia, "_", normalization, "_", sparsityReduction, "_", statTest,".csv"), row.names = FALSE) save(result.list, file = paste0("resultlist_", batch, "_", batch.size, "_", dia, "_", normalization, "_", sparsityReduction, "_", statTest, ".Rdata")) } else { session <- sessionInfo() sink(paste0("sessionInfo_", batch, "_",batch.size, ".txt")) print(session) sink() write.csv(result.df, file=paste0("benchmark_results_", batch, "_", batch.size, ".csv"), row.names = FALSE) save(result.list, file = paste0("resultlist_", batch, "_", batch.size, ".Rdata")) } }
/R/benchmark_analysis.R
permissive
kreutz-lab/dia-benchmarking
R
false
false
36,560
r
library(dplyr) library(MBQN) library(matrixTests) library(limma) library(qvalue) library(genefilter) library(samr) # BiocManager::install("ROTS") library(ROTS) library(parallel) library(DescTools) library(matrixcalc) library(psych) library(doParallel) library(stats) library(pcaMethods) library(foreach) library(rlist) library(matrixStats) ################################################################################# # NORMALIZATION # "unnormalized", "TRQN", "QN", "median" getNormalizedDf <- function(modus, df) { if (modus == "unnormalized"){ df.model <- df } else if (modus == "TRQN") { mtx <- as.matrix(df) df.trqn <- mbqn(mtx, FUN = mean) row.names(df.trqn) <- row.names(df) df.model <- as.data.frame(df.trqn) } else if (modus == "QN"){ mtx <- as.matrix(df) df.qn <- mbqn(mtx, FUN = NULL) row.names(df.qn) <- row.names(df) df.model <- as.data.frame(df.qn) }else if (modus == "median"){ mtx <- as.matrix(df) df.median <- limma::normalizeMedianValues(mtx) df.model <- as.data.frame(df.median) } else { print("Undefined modus") } return(df.model) } ################################################################################# # SPARSITY REDUCTION getSparsityReducedDf <- function(modus, df) { if (modus == "NoSR"){ imp.df <- df } else if (modus == "SR66") { # Filtering 66% imp.df <- df[which(rowMeans(!is.na(df)) > 0.66), ] } else if (modus == "SR90") { # Filtering 90% imp.df <- df[which(rowMeans(!is.na(df)) > 0.9), ] } else { print("Undefined modus") } return(imp.df) } ################################################################################# # STATISTICAL TESTS multiModel <- function(x, group.size, modelType){ # print(x) tmp <- data.frame(x2=unlist(as.vector(x)), y=as.factor(c(rep("X25", group.size), rep("X12", group.size)))) res <- tryCatch({ if (modelType == "glm") { # res <- as.vector(coef(summary(glm(x2 ~ y, family = Gamma(link = "identity"), data = tmp)))[,"Pr(>|t|)"][2]) res <- as.vector(coef(summary(glm(x2 ~ y, family = Gamma(link = "log"), data = tmp)))[,"Pr(>|t|)"][2]) # (link = "identity") was wrongly added #res <- as.vector(coef(summary(glm(x2 ~ y, family = "Gamma", data = tmp)))[,"Pr(>|t|)"][2]) # (link = "identity") was wrongly added } # if (modelType == "lm") { # res <- as.vector(coef(summary(lm(x2 ~ y, data=tmp)))[,"Pr(>|t|)"][2]) # } else if (modelType == "glm") { # res <- as.vector(coef(summary(glm(x2 ~ y, family = Gamma(link = "identity"), data = tmp)))[,"Pr(>|t|)"][2]) # } else if (modelType == "lasso") { # res <- as.vector(coef(summary(lasso2::l1ce(x2 ~ y, data=tmp)))[,"Pr(>|Z|)"][2]) # } # }, warning = function(warning_condition) { # message(warning_condition) }, error = function(error_condition) { # message(error_condition) res <- NA }) return(res) } statsLstToDf <- function(stats.lst){ stats.lst[sapply(stats.lst, is.null)] <- NA stats.lst[sapply(stats.lst, is.nan)] <- NA #print(stats.lst) if(is.vector(stats.lst) & !is.list(stats.lst)) { stats.df <- data.frame(Protein=names(stats.lst), pValue=stats.lst) } else { stats.df <- data.frame(Protein=names(stats.lst), pValue=do.call(rbind.data.frame, stats.lst)[,1]) } return(stats.df) } getStatTestResultDf <- function(modus, df) { group.size <- ncol(df)/2 groups <- c(rep("X25", group.size), rep("X12", group.size)) seed <- NA if (modus == "ttestVarEqual") { output <- matrixTests::row_t_equalvar(df[, 1:group.size], df[, (group.size+1):(2*group.size)], alternative = "two.sided", mu = 0, conf.level = 0.95) stats.df <- data.frame(Protein = row.names(df), pValue = output$pvalue) } else if (modus == "ttestVarNotEqual") { output <- matrixTests::row_t_welch(df[, 1:group.size], df[, (group.size+1):(2*group.size)], alternative = "two.sided", mu = 0, conf.level = 0.95) stats.df <- data.frame(Protein = row.names(df), pValue = output$pvalue) } else if (modus == "Wilcoxon") { output <- matrixTests::row_wilcoxon_twosample(df[, 1:group.size], df[, (group.size+1):(2*group.size)], alternative = "two.sided", mu = 0, exact = NA, correct = TRUE) stats.df <- data.frame(Protein = row.names(df), pValue = output$pvalue) # } else if (modus == "RankProduct") { # seed <- sample(1:1000000000, 1) # groups <- c(rep(0, group.size), rep(1, group.size)) # output <- RankProd::RankProducts(df, groups, rand=seed) # stats.df <- data.frame(Protein = row.names(df), pValue = apply(output$pval,1,min)) } else if (modus == "limma") { # limma # library(limma) design <- model.matrix(~groups) fit <- lmFit(df, design) fit2 <- eBayes(fit) output <- topTable(fit2, coef = 2, number = nrow(df), sort.by = "none", adjust.method="BH") stats.df <- data.frame(Protein = row.names(df), pValue = output$P.Value) # } else if (modus == "ttest") { # pVals <- rowttests(data.matrix(df),as.factor(groups), na.rm = T)$p.value # stats.df <- data.frame(Protein = row.names(df), pValue = pVals) } else if (modus == "SAM") { # SAM seed <- sample(1:1000000000, 1) set.seed(seed) groups <- c(rep(1, group.size), rep(2, group.size)) data <- list(x=as.matrix(df),y=groups, geneid=as.character(1:nrow(df)), genenames=row.names(df), logged2=TRUE) invisible(capture.output(samr.obj<-samr(data, resp.type="Two class unpaired", nperms=250, random.seed = seed))) pv=data.frame(samr.pvalues.from.perms(samr.obj$tt, samr.obj$ttstar)) stats.df <- data.frame(Protein = row.names(pv), pValue = pv[,1]) } else if (modus == "ROTS") { # ROTS groups <- c(rep(0, group.size), rep(1, group.size)) bool25 <- rowSums(!is.na(df[,1:group.size])) > 1 bool12 <- rowSums(!is.na(df[,(group.size+1):ncol(df)])) > 1 boolTooManyNAs <- which(bool25&bool12) seed <- sample(1:1000000000, 1) # results <- ROTS(data = df[boolTooManyNAs,], groups = groups , B = 100 , K = 500 , seed = 1234) results <- ROTS(data = df[boolTooManyNAs,], groups = groups , B = 100 , K = 500 , verbose = FALSE, seed=seed) results.df <- data.frame(results$logfc, results$pvalue) stats.df <- data.frame(Protein = row.names(results.df), pValue = results.df$results.pvalue) stats.df <- rbind(stats.df, data.frame(Protein = row.names(df[-boolTooManyNAs,]), pValue = rep(NA, nrow(df[-boolTooManyNAs,])))) stats.df <- stats.df[order(match(stats.df$Protein,row.names(df))),] row.names(stats.df) <- NULL } else if (modus == "GLMgamma") { # GLM-Gamma seed <- sample(1:1000000000, 1) set.seed(seed) exp.df <- 2^df # Gamma regression needs to be done on non-lol transformed data stats.lst <- apply(exp.df, 1, multiModel, group.size=group.size, modelType="glm") stats.df <- statsLstToDf(stats.lst) } else { print("Undefined modus") } return(list(stats.df, seed)) } ################################################################################# # DATA CHARACTERISTICS # Kolmogorov-Smirnov Test, # Returns percentage of significant samples when KS test was applied on single samples compared to all samples combined kolSmirTestSignProp <- function(mtx) { pvals.mtx <- apply(mtx, 2, function(x) stats::ks.test(x, mtx)$p.value) cnt <- sum(pvals.mtx<0.05) signProportion <- cnt/length(pvals.mtx) return(signProportion) } # calc_ functions are from radiomics package calc_energy <- function(data){ #TODO: Add dim check for 2D vs 3D return(sum(as.numeric(data)*as.numeric(data), na.rm=TRUE)) } #' @describeIn first_order_features Entropy #' @param base The base for which the logarithm is calculate #' @param nbins The number of bins the histogram is discretized into calc_entropy <- function(data, base=2, nbins=length(unique(c(data)))){ # Break data into a hist im_range <- range(data, na.rm=TRUE) cuts <- table(cut(data, seq(im_range[1], im_range[2], by=diff(im_range)/nbins), include.lowest=TRUE))/length(data[!is.na(data)]) #Logs cannot take 0 values, so let = 0 if no value entropy_vals <- vapply(cuts, function(data) ifelse(data != 0, data*logb(data, base=base), 0), FUN.VALUE = 1) return(-1*sum(entropy_vals)) } calc_kurtosis <- function(data){ n <- length(data[!is.na(data)]) data <- data - mean(data, na.rm=TRUE) r <- n * sum(data^4, na.rm=TRUE) / (sum(data^2, na.rm=TRUE)^2) return(r * (1 - 1/n)^2 - 3) } calc_meanDeviation <- function(data){ scale <- 1/prod(dim(data)) mu <- mean(data, na.rm=TRUE) return(scale * sum(abs(data - mu), na.rm=TRUE)) } calc_skewness <- function (data){ data <- data[!is.na(data)] return(sum((data - mean(data))^3)/(length(data) * sd(data)^3)) } calc_uniformity <- function(data, nbins=length(unique(c(data)))){ # Break data into a hist data <- data[!is.na(data)] im_range <- range(data, na.rm=TRUE) cuts <- table(cut(data, seq(im_range[1], im_range[2], by=diff(im_range)/nbins), include.lowest=TRUE))/length(data) function_vals <- vapply(cuts, function(data) data^2, FUN.VALUE = 1) return(sum(function_vals)) } calc_variance <- function(data) var(c(data), na.rm=TRUE) calc_RMS <- function(data) sqrt(mean(data^2, na.rm=TRUE)) getMoreCharacteristics <- function(mtx, withNAs=TRUE){ KS.SignProp <- kolSmirTestSignProp(mtx) entropy <- calc_entropy(mtx) kurtosis <- calc_kurtosis(mtx) meanDeviation <- calc_meanDeviation(mtx) skewness <- calc_skewness(mtx) uniformity <- calc_uniformity(mtx) variance <- calc_variance(mtx) RMS <- calc_RMS(mtx) group.size <- ncol(mtx)/2 var.groups.ratio <- median(matrixStats::rowVars(mtx[, 1:group.size], na.rm = TRUE)/matrixStats::rowVars(mtx[, (group.size+1):ncol(mtx)], na.rm = TRUE), na.rm = TRUE) if (withNAs){ resultvec <- c(KS.SignProp = KS.SignProp, entropy = entropy, kurtosis = kurtosis, meanDeviation = meanDeviation, skewness = skewness, uniformity = uniformity, variance = variance, RMS = RMS, var.groups.ratio = var.groups.ratio) } else { t.mtx <- t(mtx) t.mtx <- t.mtx[ , which(apply(t.mtx, 2, var) != 0)] # Remove zero variance columns pca <- stats::prcomp(t.mtx, scale.=T) eigs <- pca$sdev^2 prctPC1 <- eigs[1]/sum(eigs) prctPC2 <- eigs[2]/sum(eigs) elongation <- sqrt(eigs[2] / eigs[1]) # elongation flatness <- sqrt(eigs[length(eigs)]/eigs[1]) # flatness resultvec <- c(KS.SignProp = KS.SignProp, entropy = entropy, kurtosis = kurtosis, meanDeviation = meanDeviation, skewness = skewness, uniformity = uniformity, variance = variance, RMS = RMS, var.groups.ratio = var.groups.ratio, prctPC1 = prctPC1, prctPC2 = prctPC2, elongation = elongation, flatness = flatness) } return(resultvec) } runBPTest <- function(x, group.size){ tmp <- data.frame(x2=unlist(as.vector(x)), y=as.factor(c(rep("X25", group.size), rep("X12", group.size)))) m <- stats::lm(x2 ~ y, data=tmp) bp.res <- lmtest::bptest(m, studentize = TRUE) bp.res[["p.value"]] } getDataCharacteristics <- function(df) { mtx <- as.matrix(df) medianSampleVariance <- median(apply(df, 2, var, na.rm=TRUE)) medianProteinVariance <- median(unname(apply(df, 1, var, na.rm=TRUE)), na.rm = TRUE) #KS.SignProp <- kolSmirTestSignProp(as.matrix(df)) percNATotal <- mean(is.na(df)) * 100 percOfRowsWithNAs <- sum(apply(df, 1, anyNA))/nrow(df) * 100 #percNATotal2 <- mean(is.na(mtx)) * 100 characts.wNAs <- getMoreCharacteristics(mtx, withNAs=TRUE) # names(characts.wNAs) <- c("entropy.wNAs", # "kurtosis.wNAs", # "meanDeviation.wNAs", # "skewness.wNAs", # "uniformity.wNAs", # "variance.wNAs", # "RMS.wNAs", # "var.groups.ratio.wNAs") names(characts.wNAs) <- paste0(names(characts.wNAs), ".wNAs") # "prctPC1.wNAs", #"elongation.wNAs", #"flatness.wNAs") mtx <- mtx[rowSums(is.na(mtx)) == 0, ] nProteins.woNAs <- nrow(mtx) # number of proteins with no NAs characts.woNAs <- getMoreCharacteristics(mtx, withNAs=FALSE) # names(characts.woNAs) <- c("entropy.woNAs", # "kurtosis.woNAs", # "meanDeviation.woNAs", # "skewness.woNAs", # "uniformity.woNAs", # "variance.woNAs", # "RMS.woNAs", # "var.groups.ratio.woNAs", # "prctPC1.woNAs", # "elongation.woNAs", # "flatness.woNAs") names(characts.woNAs) <- paste0(names(characts.woNAs), ".woNAs") group.size <- ncol(mtx)/2 BPTest.lst <- apply(mtx, 1, runBPTest, group.size=group.size) # heterosc.woNAs <- sum(BPTest.lst < 0.05) / length(BPTest.lst) qobj <- qvalue::qvalue(p = BPTest.lst) heterosc.oneMinuspi0 <- 1 - qobj$pi0 datacharacts <- c(medianSampleVariance = medianSampleVariance, medianProteinVariance = medianProteinVariance, percNATotal = percNATotal, percOfRowsWithNAs = percOfRowsWithNAs, characts.wNAs, characts.woNAs, heterosc.oneMinuspi0=heterosc.oneMinuspi0, nProteins.woNAs=nProteins.woNAs) } getNumberOfProteins <- function(nEcoli.pre, nHuman.pre, stats.df, combinedProteinNames, intersectProteinNames, DiaWorkflowProteinNames) { nEcoli <- nrow(stats.df[which(grepl("ECOLI", stats.df$Protein)),]) nHuman <- nrow(stats.df[which(grepl("HUMAN", stats.df$Protein)),]) nEcoli.comb <- length(combinedProteinNames[grepl("_ECOLI", combinedProteinNames)]) nHuman.comb <- length(combinedProteinNames[grepl("_HUMAN", combinedProteinNames)]) nEcoli.intersect <- length(intersectProteinNames[grepl("_ECOLI", intersectProteinNames)]) nHuman.intersect <- length(intersectProteinNames[grepl("_HUMAN", intersectProteinNames)]) nEcoli.diaWorkflow <- length(DiaWorkflowProteinNames[grepl("_ECOLI", DiaWorkflowProteinNames)]) nHuman.diaWorkflow <- length(DiaWorkflowProteinNames[grepl("_HUMAN", DiaWorkflowProteinNames)]) c(nEcoli.pre=nEcoli.pre, nHuman.pre=nHuman.pre, nEcoli=nEcoli, nHuman=nHuman, nEcoli.comb = nEcoli.comb, nHuman.comb = nHuman.comb, nEcoli.intersect = nEcoli.intersect, nHuman.intersect = nHuman.intersect, nEcoli.diaWorkflow = nEcoli.diaWorkflow, nHuman.diaWorkflow = nHuman.diaWorkflow) } ################################################################################# # pAUC calculation library(pROC) library(rlist) library(stats) library(foreach) library(qvalue) library(parallel) library(doParallel) # Function adapted from R package pROC version 1.17.0.1 auc.roc <- function(specSensDf, # Partial auc definition partial.auc=FALSE, # false (consider total area) or numeric length 2: boundaries of the AUC to consider, between 0 and 1, or 0 and 100 if percent is TRUE partial.auc.focus=c("specificity", "sensitivity"), # if partial.auc is not FALSE: do the boundaries partial.auc.correct=FALSE, allow.invalid.partial.auc.correct = FALSE, percent = FALSE, ... # unused required to allow roc passing arguments to plot or ci. ) { if (!identical(partial.auc, FALSE)) { partial.auc.focus <- match.arg(partial.auc.focus) } # Validate partial.auc if (! identical(partial.auc, FALSE) & !(is.numeric(partial.auc) && length(partial.auc)==2)) stop("partial.auc must be either FALSE or a numeric vector of length 2") # Ensure partial.auc is sorted with partial.auc[1] >= partial.auc[2] partial.auc <- sort(partial.auc, decreasing=TRUE) # Get and sort the sensitivities and specificities specSensDf <- specSensDf[with(specSensDf, order(Specificity)), ] se <- specSensDf$Sensitivity sp <- specSensDf$Specificity # Full area if partial.auc is FALSE if (identical(partial.auc, FALSE)) { if (methods::is(roc, "smooth.roc") && ! is.null(roc$smoothing.args) && roc$smoothing.args$method == "binormal") { coefs <- coefficients(roc$model) auc <- unname(pnorm(coefs[1] / sqrt(1+coefs[2]^2)) * ifelse(percent, 100^2, 1)) } else { diffs.x <- sp[-1] - sp[-length(sp)] means.vert <- (se[-1] + se[-length(se)])/2 auc <- sum(means.vert * diffs.x) } } else { # Partial area if (partial.auc.focus == "sensitivity") { # if we focus on SE, just swap and invert x and y and the computations for SP will work x <- rev(se) y <- rev(sp) }else { x <- sp y <- se } # find the SEs and SPs in the interval x.inc <- x[x <= partial.auc[1] & x >= partial.auc[2]] y.inc <- y[x <= partial.auc[1] & x >= partial.auc[2]] # compute the AUC strictly in the interval diffs.x <- x.inc[-1] - x.inc[-length(x.inc)] means.vert <- (y.inc[-1] + y.inc[-length(y.inc)])/2 auc <- sum(means.vert * diffs.x) # add the borders: if (length(x.inc) == 0) { # special case: the whole AUC is between 2 se/sp points. Need to interpolate from both diff.horiz <- partial.auc[1] - partial.auc[2] # determine indices idx.hi <- match(FALSE, x < partial.auc[1]) idx.lo <- idx.hi - 1 # proportions proportion.hi <- (x[idx.hi] - partial.auc[1]) / (x[idx.hi] - x[idx.lo]) proportion.lo <- (partial.auc[2] - x[idx.lo]) / (x[idx.hi] - x[idx.lo]) # interpolated y's y.hi <- y[idx.hi] + proportion.hi * (y[idx.lo] - y[idx.hi]) y.lo <- y[idx.lo] - proportion.lo * (y[idx.lo] - y[idx.hi]) # compute AUC mean.vert <- (y.hi + y.lo)/2 auc <- mean.vert*diff.horiz } else { # if the upper limit is not exactly present in SPs, interpolate if (!(partial.auc[1] %in% x.inc)) { # find the limit indices idx.out <- match(FALSE, x < partial.auc[1]) idx.in <- idx.out - 1 # interpolate y proportion <- (partial.auc[1] - x[idx.out]) / (x[idx.in] - x[idx.out]) y.interpolated <- y[idx.out] + proportion * (y[idx.in] - y[idx.out]) # add to AUC auc <- auc + (partial.auc[1] - x[idx.in]) * (y[idx.in] + y.interpolated)/2 } if (!(partial.auc[2] %in% x.inc)) { # if the lower limit is not exactly present in SPs, interpolate # find the limit indices in and out #idx.out <- length(x) - match(TRUE, rev(x) < partial.auc[2]) + 1 idx.out <- match(TRUE, x > partial.auc[2]) - 1 idx.in <- idx.out + 1 # interpolate y proportion <- (x[idx.in] - partial.auc[2]) / (x[idx.in] - x[idx.out]) y.interpolated <- y[idx.in] + proportion * (y[idx.out] - y[idx.in]) # add to AUC auc <- auc + (x[idx.in] - partial.auc[2]) * (y[idx.in] + y.interpolated)/2 } } } # In percent, we have 100*100 = 10,000 as maximum area, so we need to divide by a factor 100 if (percent) auc <- auc/100 # Correction according to McClish DC, 1989 if (all(!identical(partial.auc, FALSE), partial.auc.correct)) { # only for pAUC min <- pROC:::roc.utils.min.partial.auc(partial.auc, percent) max <- pROC:::roc.utils.max.partial.auc(partial.auc, percent) # The correction is defined only when auc >= min if (!allow.invalid.partial.auc.correct && auc < min) { warning("Partial AUC correction not defined for ROC curves below the diagonal.") auc <- NA } else if (percent) { auc <- (100+((auc-min)*100/(max-min)))/2 # McClish formula adapted for % } else { auc <- (1+((auc-min)/(max-min)))/2 # original formula by McClish } } return(auc) } getSensAtpVal005 <- function(df2, nEcoli.pre=NA, nHuman.pre=NA){ if (is.na(nEcoli.pre)){ totalEcoli <- nrow(df2[which(grepl("ECOLI", row.names(df2))),]) totalHuman <- nrow(df2[which(grepl("HUMAN", row.names(df2))),]) } else { totalEcoli <- nEcoli.pre totalHuman <- nHuman.pre } TP <- nrow(df2[(df2$pValue<0.05) & grepl("ECOLI", row.names(df2)),]) sens <- TP/totalEcoli return(sens) } getpValCurvedf <- function(stats.df, nEcoli.pre=NA, nHuman.pre=NA) { if (is.na(nEcoli.pre)){ totalEcoli <- nrow(stats.df[which(grepl("ECOLI", row.names(stats.df))),]) totalHuman <- nrow(stats.df[which(grepl("HUMAN", row.names(stats.df))),]) } else { totalEcoli <- nEcoli.pre totalHuman <- nHuman.pre } #p.curve.lst <- lapply(unique(c(0, sort(stats.df$pValue)[c(TRUE, rep(FALSE, times=9))], 1)), function(pvalue, stats.df){ p.curve.lst <- lapply(unique(c(0, sort(stats.df$pValue), 1)), function(pvalue, stats.df){ TP <- nrow(stats.df[(stats.df$pValue<=pvalue) & grepl("ECOLI", row.names(stats.df)),]) FP <- nrow(stats.df[(stats.df$pValue<=pvalue) & grepl("HUMAN", row.names(stats.df)),]) oneMinusSpec <- FP/totalHuman sens <- TP/totalEcoli list(oneMinusSpec, sens) }, stats.df=stats.df) p.curve.df <- data.frame(matrix(unlist(p.curve.lst), nrow = length(p.curve.lst), byrow = T)) colnames(p.curve.df) <- c("1-Specificity", "Sensitivity") if (nrow(p.curve.df[(p.curve.df$`1-Specificity` == 0 & p.curve.df$Sensitivity == 0) == TRUE, ]) == 0) { p.curve.df <- rbind(c(0,0), p.curve.df) colnames(p.curve.df) <- c("1-Specificity", "Sensitivity") } p.curve.df <- p.curve.df[complete.cases(p.curve.df), ] return(p.curve.df) } getRegulatedProp <- function(pValueVec) { if (any(pValueVec > 1, na.rm = TRUE)){ # E.g. in the case of SAM there are sometimes p values above 1 warning(paste0(sum(pValueVec > 1), " p-values are above 1.They are set to 1.")) pValueVec[pValueVec > 1] <- 1 } # Remove missing values pValueVec[is.nan(pValueVec)] <- NA pValueVec[is.infinite(pValueVec)] <- NA pValueVec <- pValueVec[!is.na(pValueVec)] # "missing or infinite values in inputs are not allowed" result <- NA try({ qobj.comb <- qvalue::qvalue(p = pValueVec) result <- 1 - qobj.comb$pi0 }, silent = TRUE) result } getPartialAUCs <- function(SensSpecDF) { partial.aucs <- c(.8, .9, .95) partial.auc.corrects <- c(FALSE) # c(TRUE, FALSE) auc.Settings <- data.frame(expand.grid(partial.auc=partial.aucs, partial.auc.correct=partial.auc.corrects)) aucs.results <- apply(auc.Settings, 1, function(row) { # print(row["partial.auc"]) partial.auc <- unlist(unname(row["partial.auc"])) if (partial.auc == 0){ SensSpecDF2 <- rbind(SensSpecDF, c(1, max(SensSpecDF$Sensitivity),0)) auc <- auc.roc(SensSpecDF, partial.auc=FALSE, partial.auc.focus="specificity", partial.auc.correct=as.logical(row["partial.auc.correct"])) } else { auc <- auc.roc(SensSpecDF, partial.auc=c(1, partial.auc), partial.auc.focus="specificity", partial.auc.correct=as.logical(row["partial.auc.correct"])) } if (length(auc) == 0) auc <- NA # length 0 can happen if 1-specificity for pAUC can't be reached, e.g due too few proteins being left after sparsity reduction auc }) names(aucs.results) <- paste0("p.pauc_", auc.Settings$partial.auc, "_correct", as.logical(auc.Settings$partial.auc.correct)) aucs.results } getPartialAUCsResults <- function(stats.df, nEcoli, nHuman) { sensAtpVal005 <- getSensAtpVal005(stats.df, nEcoli.pre=nEcoli, nHuman.pre=nHuman) p.roc.df <- getpValCurvedf(stats.df, nEcoli.pre=nEcoli, nHuman.pre=nHuman) p.roc.df$Specificity <- 1-p.roc.df$`1-Specificity` aucs.results <- getPartialAUCs(p.roc.df) aucs.results <- c(aucs.results, sensAtpVal005=sensAtpVal005) aucs.results } getStatsProteinNames <- function(combIntersect, proteinNames, stats.df, dia=NULL) { # if (combIntersect =="combined"){ # proteinNames <- readRDS("combinedProteinNames.rds") # } else if (combIntersect =="intersect"){ # proteinNames <- readRDS("intersectProteinNames.rds") # } else if (combIntersect =="diaWorkflow"){ # proteinNames <- readRDS(paste0(dia, "_ProteinNames.rds")) # } nEcoli <- length(proteinNames[grepl("_ECOLI", proteinNames)]) nHuman <- length(proteinNames[grepl("_HUMAN", proteinNames)]) if (combIntersect %in% c("combined", "intersect")){ intersectBool <- apply(stats.df, 1, function(x) { length(intersect(unlist(base::strsplit(x[1], ";")), proteinNames))>0 }) stats.df.protNames <- stats.df[intersectBool, ] } else if (combIntersect =="diaWorkflow"){ stats.df.protNames <- stats.df } list(stats.df.protNames=stats.df.protNames, nEcoli=nEcoli, nHuman=nHuman) } getRegValsPropAndPAucs <- function(combIntersect=c("combined", "intersect", "diaWorkflow"), proteinNames, stats.df, dia) { row.names(stats.df) <- stats.df$Protein stats.protNames <- getStatsProteinNames(combIntersect = combIntersect, proteinNames=proteinNames, stats.df, dia) regpValsProp <- getRegulatedProp(pValueVec=stats.protNames[["stats.df.protNames"]]$pValue) stats.protNames[["stats.df.protNames"]]$pValue[is.na(stats.protNames[["stats.df.protNames"]]$pValue)] <- 1 # Replace NAs with pValue of 1 paucs <- getPartialAUCsResults(stats.df = stats.protNames[["stats.df.protNames"]], nEcoli = stats.protNames[["nEcoli"]], nHuman = stats.protNames[["nHuman"]]) res <- c(paucs, regpValsProp=regpValsProp) names(res) <- paste0(names(res), ".", combIntersect) return(res) } ################################################################################# # RMSE rmse <- function(actual, predicted) { sqrt(mean((actual - predicted)^2,na.rm = TRUE)) } getRMSE <- function(stats.df) { stats.dfEcoli <- stats.df[grepl("ECOLI", stats.df$Protein), ] stats.dfHuman <- stats.df[grepl("HUMAN", stats.df$Protein), ] Ecoli <- rmse(actual=stats.dfEcoli$log2FC, predicted=stats.dfEcoli$log2FCPredicted) Human <- rmse(actual=stats.dfHuman$log2FC, predicted=stats.dfHuman$log2FCPredicted) HumanAndEcoli <- rmse(actual=stats.df$log2FC, predicted=stats.df$log2FCPredicted) list(Ecoli, Human, HumanAndEcoli) } getRMSEResults <- function(combIntersect=c("intersect", "diaWorkflow"), proteinNames, stats.df, dia) { stats.protNames <- getStatsProteinNames(combIntersect = combIntersect, proteinNames = proteinNames, stats.df, dia) stats.df2 <- stats.protNames[["stats.df.protNames"]] RMSEs <- getRMSE(stats.df2) RMSEEcoli <- RMSEs[[1]] RMSEHuman <- RMSEs[[2]] RMSEHumanAndEcoli <- RMSEs[[3]] res <- c(RMSEEcoli=RMSEEcoli, RMSEHuman=RMSEHuman, RMSEHumanAndEcoli=RMSEHumanAndEcoli) names(res) <- paste0(names(res), ".", combIntersect) return(res) } ################################################################################# runAnalysisForEachBootstrapSample <- function(bootstrap.dataset, indices, diaWorkflowResults.selected, dia, normalization, sparsityReduction, statTest, combinedProteinNames, intersectProteinNames, DiaWorkflowProteinNames) { df <- diaWorkflowResults.selected[, unlist(indices[bootstrap.dataset])] #df <- repList[[bootstrap.dataset]] # Remove empty rows df <- df[rowSums(is.na(df)) != ncol(df), ] nEcoli.pre <- sum(grepl("ECOLI", row.names(df))) nHuman.pre <- sum(grepl("HUMAN", row.names(df))) group.size <- ncol(df)/2 data.characts <- getDataCharacteristics(df) sparsRed.runtime <- system.time({ # Sparsity Reduction df.sr <- getSparsityReducedDf(sparsityReduction, df) }) normalization.runtime <- system.time({ # Normalization df <- getNormalizedDf(normalization, df.sr) }) log2FC.df <- data.frame(Protein=row.names(df), log2FC=rowMeans(df[, (group.size+1):ncol(df)], na.rm = TRUE)-rowMeans(df[, 1:group.size], na.rm = TRUE)) statTest.runtime <- system.time({ # print("Stats") stats.df.lst <- getStatTestResultDf(statTest, df) stats.df <- stats.df.lst[[1]] seed.stat <- stats.df.lst[[2]] }) sparsRed.runtime <- sparsRed.runtime[["user.self"]] normalization.runtime <- normalization.runtime[["user.self"]] statTest.runtime <- statTest.runtime[["user.self"]] stats.df <- merge(stats.df, log2FC.df, by=c("Protein"), all.x=FALSE) stats.df$log2FCPredicted <- NA stats.df[grepl("ECOLI", stats.df$Protein), ]$log2FCPredicted <- log2(0.24038462/0.11076923) # 1.11778738, was previously wrongly assumed to be log2((1/12)/(1/25))=1.058894 stats.df[grepl("HUMAN", stats.df$Protein), ]$log2FCPredicted <- log2(1) if (sum(is.nan(stats.df$pValue)) > 0) stats.df[is.nan(stats.df$pValue),]$pValue <- NA if (sum(is.nan(stats.df$log2FC)) > 0) stats.df[is.nan(stats.df$log2FC),]$log2FC <- NA numberOfProteins <- getNumberOfProteins(nEcoli.pre, nHuman.pre, stats.df, combinedProteinNames, intersectProteinNames, DiaWorkflowProteinNames) regValsPropAndPAucs.comb <- getRegValsPropAndPAucs(combIntersect = "combined", proteinNames = combinedProteinNames, stats.df = stats.df, dia=NULL) regValsPropAndPAucs.intersect <- getRegValsPropAndPAucs(combIntersect = "intersect", proteinNames = intersectProteinNames, stats.df = stats.df, dia=NULL) regValsPropAndPAucs.diaWorkflow <- getRegValsPropAndPAucs(combIntersect = "diaWorkflow", proteinNames = DiaWorkflowProteinNames, stats.df = stats.df, dia=dia) RMSE.intersect <- getRMSEResults(combIntersect="intersect", proteinNames = intersectProteinNames, stats.df, dia=NULL) RMSE.diaWorkflow <- getRMSEResults(combIntersect="diaWorkflow", proteinNames = DiaWorkflowProteinNames, stats.df, dia=dia) summary <- rlist::list.flatten(list(bootstrap.dataset = bootstrap.dataset, dia = dia, normalization = normalization, sparsityReduction = sparsityReduction, statTest = statTest, groupSize=group.size, as.list(data.characts), nAllProteins=nrow(stats.df), as.list(numberOfProteins), sparsRed.runtime = sparsRed.runtime, normalization.runtime = normalization.runtime, statTest.runtime = statTest.runtime, seed.stat=seed.stat, as.list(regValsPropAndPAucs.comb), as.list(regValsPropAndPAucs.intersect), as.list(regValsPropAndPAucs.diaWorkflow), as.list(RMSE.intersect), as.list(RMSE.diaWorkflow))) return(list(stats.df, summary)) } ################################################################################ # batch <- 1 # max 32 bei batch.size 20, max 64 bei batch.size 10, max 128 bei batch.size 5 run <- function(batch, batch.size) { # GET ALL PARAMETER COMBINATIONS dias <- sort(c("DIANN_DIANN_AI", "DIANN_DIANN_AI_GPF", "DIANN_MaxQuant", "DIANN_MSFragger", "DIANN_PROSIT_EDIA_GPF", "OSW_DIANN_AI_GPF", "OSW_MaxQuant", "OSW_MSFragger", "Skyline_DIANN_AI_GPF", "Skyline_MaxQuant", "Skyline_MSFragger", "Skyline_PROSIT_EDIA_GPF", "Spectronaut_DIANN_AI_GPF", "Spectronaut_DirectDIA", "Spectronaut_MaxQuant", "Spectronaut_MSFragger", "Spectronaut_PROSIT_EDIA_GPF")) normalizations <- c("unnormalized", "TRQN", "QN", "median") sparsityReductions <- c("NoSR", "SR66", "SR90") statTests <- c("ttestVarEqual", "ttestVarNotEqual", "GLMgamma", "limma", "Wilcoxon", "SAM", "ROTS") combs <- expand.grid(statTests, sparsityReductions, normalizations, dias) colnames(combs) <- c("statTest", "sparsityReduction", "normalization", "dia") if (batch*batch.size > nrow(combs)){ combs <- combs[(((batch-1)*batch.size)+1):nrow(combs),] } else{ combs <- combs[(((batch-1)*batch.size)+1):(batch*batch.size),] } combs.lst <- as.list(as.data.frame(t(combs))) combinedProteinNames <- readRDS("combinedProteinNames.rds") intersectProteinNames <- readRDS("intersectProteinNames.rds") # load("/Users/admin/Desktop/PhD/202110_dia-benchmarking_rerun/DIAsoftwareOutputProteinLevel_1to12And1to25Only_wideFormat_withBootstrapIndicesAndIntersectAndCombinedProteinNames.RData") # saveRDS(diaWorkflowResults, file = "diaWorkflowResults.rds") indices <- readRDS("indices.rds") diaWorkflowResults <- readRDS("diaWorkflowResults.rds") #result.list <- foreach(i = seq_along(combs.lst)) %dopar% { #result.list <- foreach(i = seq_along(combs.lst)) %do% { vector <- combs.lst[[1]] statTest <- as.character(vector[1]) sparsityReduction <- as.character(vector[2]) normalization <- as.character(vector[3]) dia <- as.character(vector[4]) print(statTest) print(sparsityReduction) print(normalization) print(dia) #DiaWorkflowProteinNames <- readRDS(paste0(dia, "_ProteinNames.rds")) # print("---Loading Bootstrap datasets...") # repList <- readRDS(paste0(dia, ".rds")) print("--------------------------") # subresult.list <- lapply(seq_along(repList), runAnalysisForEachBootstrapSample, repList=repList, dia=dia, # normalization=normalization, sparsityReduction=sparsityReduction, statTest=statTest, # combinedProteinNames=combinedProteinNames, intersectProteinNames=intersectProteinNames, DiaWorkflowProteinNames=DiaWorkflowProteinNames) diaWorkflowResults.selected <- diaWorkflowResults[[dia]] DiaWorkflowProteinNames <- row.names(diaWorkflowResults.selected) # result.list <- lapply(seq_along(indices), runAnalysisForEachBootstrapSample, indices=indices, diaWorkflowResults.selected=diaWorkflowResults.selected, dia=dia, # normalization=normalization, sparsityReduction=sparsityReduction, statTest=statTest, # combinedProteinNames=combinedProteinNames, intersectProteinNames=intersectProteinNames, DiaWorkflowProteinNames=DiaWorkflowProteinNames) # procs <- as.numeric(Sys.getenv("SLURM_NTASKS")) registerDoParallel(cores=procs) result.list <- foreach(i = seq_along(indices)) %dopar% { runAnalysisForEachBootstrapSample(i, indices=indices, diaWorkflowResults.selected=diaWorkflowResults.selected, dia=dia, normalization=normalization, sparsityReduction=sparsityReduction, statTest=statTest, combinedProteinNames=combinedProteinNames, intersectProteinNames=intersectProteinNames, DiaWorkflowProteinNames=DiaWorkflowProteinNames) } #} #result.list2 <- unlist(result.list, recursive = FALSE) result.df <- lapply(result.list, function(x) x[[2]]) result.df <- do.call(rbind.data.frame, result.df) # colnames(result.df) <- c("bootstrap.dataset", "dia", "normalization", "sparsityReduction", "statTest", "groupSize", "nAllProteins", # "nEcoliProteins", "nHumanProteins", "nEcoliProteins.pre", "nHumanProteins.pre", # "medianSampleVariance", "medianProteinVariance", "KS.SignProp", "percNATotal", "percOfRowsWithNAs", # "sparsRed.runtime" , "normalization.runtime", "statTest.runtime", # "seed.stat") #colnames(result.df) <- names(subresult.list[[1]][[2]]) if (batch.size == 1){ session <- sessionInfo() sink(paste0("sessionInfo_", batch, "_",batch.size, "_", dia, "_", normalization, "_", sparsityReduction, "_", statTest,".txt")) print(session) sink() write.csv(result.df, file=paste0("benchmark_results_", batch, "_", batch.size, "_", dia, "_", normalization, "_", sparsityReduction, "_", statTest,".csv"), row.names = FALSE) save(result.list, file = paste0("resultlist_", batch, "_", batch.size, "_", dia, "_", normalization, "_", sparsityReduction, "_", statTest, ".Rdata")) } else { session <- sessionInfo() sink(paste0("sessionInfo_", batch, "_",batch.size, ".txt")) print(session) sink() write.csv(result.df, file=paste0("benchmark_results_", batch, "_", batch.size, ".csv"), row.names = FALSE) save(result.list, file = paste0("resultlist_", batch, "_", batch.size, ".Rdata")) } }
## test Function FindCorValueByGenes x1 <- FindCorValueByGenes(inputCell, referencePanel) x$singleAbove0$`Fetal__Stomach_5_Pit Progenitor` x1$singleAbove0_df colSums(x1$singleAbove0_df) #### test Function CorValueByGenes_V3 inputCells = GSE97693_Tang_TPM_cells[,1:20] # drop very important referencePanel = referPanel_of_mix i=1 x <- CorValueByGenes_V3(inputCells, referencePanel) x[[1]] colSums(x[[1]]) table(colSums(x[[1]]) > 0.6) names(x[[2]]) summary(colSums(x[[2]])) cor(inputCell, referencePanel) # verify test inputCell = GSE97693_Tang_TPM_cells[,2,drop = F] # drop = F to keep rownames referencePanel = referPanel_of_mix[1:10] geneList <- intersect(rownames(inputCell),rownames(referencePanel)) inputCell <- inputCell[geneList,,drop=F] referencePanel <- referencePanel[geneList,] nonZer0Index <- inputCell > 0 # single cell > 0 genes #nonZeroInputCell <- inputCell[nonZer0Index,,] #nonZeroReference <- referencePanel[,i][nonZer0Index] geneList.2 <- geneList[nonZer0Index] inputCell[geneList.2,, drop = F] # calculate corValueGenes cor(inputCell[geneList.2,, drop = F], referencePanel[geneList.2,])
/Rs_corGene_analysis/Step3_verify_the_functions.R
no_license
haojiang9999/HCA_script
R
false
false
1,132
r
## test Function FindCorValueByGenes x1 <- FindCorValueByGenes(inputCell, referencePanel) x$singleAbove0$`Fetal__Stomach_5_Pit Progenitor` x1$singleAbove0_df colSums(x1$singleAbove0_df) #### test Function CorValueByGenes_V3 inputCells = GSE97693_Tang_TPM_cells[,1:20] # drop very important referencePanel = referPanel_of_mix i=1 x <- CorValueByGenes_V3(inputCells, referencePanel) x[[1]] colSums(x[[1]]) table(colSums(x[[1]]) > 0.6) names(x[[2]]) summary(colSums(x[[2]])) cor(inputCell, referencePanel) # verify test inputCell = GSE97693_Tang_TPM_cells[,2,drop = F] # drop = F to keep rownames referencePanel = referPanel_of_mix[1:10] geneList <- intersect(rownames(inputCell),rownames(referencePanel)) inputCell <- inputCell[geneList,,drop=F] referencePanel <- referencePanel[geneList,] nonZer0Index <- inputCell > 0 # single cell > 0 genes #nonZeroInputCell <- inputCell[nonZer0Index,,] #nonZeroReference <- referencePanel[,i][nonZer0Index] geneList.2 <- geneList[nonZer0Index] inputCell[geneList.2,, drop = F] # calculate corValueGenes cor(inputCell[geneList.2,, drop = F], referencePanel[geneList.2,])
layout(matrix(c(1,2,3,4), 2, 2, byrow = TRUE)) hist(data$child.no, breaks=7, main='Children Born to Beneficiaries', col = "lightgrey", ylim = c(0, 140), xlab = NULL, ylab = "No. of Beneficiaries") hist(data$child.before.cct, breaks=7, main='Children Born before CCT', col="lightgrey", ylim = c(0, 140), ylab = NULL, xlab = NULL) hist(data$anc.before.cct, breaks=7, col="lightgrey", ylab = NULL, xlab = NULL, main= 'ANC before CCT', ylim = c(0, 140)) hist(data$delivered.hf, breaks=7, col="lightgrey", main='Children Delivered in HF', xlab= "No. of Children", ylim = c(0, 140), ylab = NULL)
/scripts/multiplot.R
no_license
DevSolutionsLtd/cct
R
false
false
711
r
layout(matrix(c(1,2,3,4), 2, 2, byrow = TRUE)) hist(data$child.no, breaks=7, main='Children Born to Beneficiaries', col = "lightgrey", ylim = c(0, 140), xlab = NULL, ylab = "No. of Beneficiaries") hist(data$child.before.cct, breaks=7, main='Children Born before CCT', col="lightgrey", ylim = c(0, 140), ylab = NULL, xlab = NULL) hist(data$anc.before.cct, breaks=7, col="lightgrey", ylab = NULL, xlab = NULL, main= 'ANC before CCT', ylim = c(0, 140)) hist(data$delivered.hf, breaks=7, col="lightgrey", main='Children Delivered in HF', xlab= "No. of Children", ylim = c(0, 140), ylab = NULL)
#' turnOverSimple #' #' @param wts #' @param wtsInit #' @param digits #' #' @return #' @export #' #' @examples turnoverSimple = function(wts,wtsInit = NULL,digits = NULL) { if(is.null(wtsInit)) { n = length(wts) wtsInit = rep(1/n,n)} to = sum(abs(wts-wtsInit)) if(is.null(digits)) {list(turnover = to)} else {to = round(to,digits) list(turnover = to)} }
/R/turnoverSimple.R
permissive
kecoli/PCRM
R
false
false
368
r
#' turnOverSimple #' #' @param wts #' @param wtsInit #' @param digits #' #' @return #' @export #' #' @examples turnoverSimple = function(wts,wtsInit = NULL,digits = NULL) { if(is.null(wtsInit)) { n = length(wts) wtsInit = rep(1/n,n)} to = sum(abs(wts-wtsInit)) if(is.null(digits)) {list(turnover = to)} else {to = round(to,digits) list(turnover = to)} }
plot_prev2 <- function(fit, ..., ylim=NULL, xlim=c(1980, max(as.integer(dimnames(fit$prev)[[1]]))), col="blue", main="", ylab="prevalence"){ if(is.null(ylim)) ylim <- c(0, 1.1*max(fit$prev[,"upper"])) xx <- as.integer(dimnames(fit$prev)[[1]]) plot(xx, fit$prev[,"mean"], type="n", ylim=ylim, xlim=xlim, ylab=ylab, xlab="", yaxt="n", xaxt="n", main=main) axis(1, labels=TRUE) axis(2, labels=TRUE) dots <- list(...) dots <- dots[!sapply(dots, is.null)] for(ii in seq_along(dots)) cred.region(xx, dots[[ii]]$prev[,c("lower", "upper")], col=transp(col[1+ii], 0.3)) cred.region(xx, fit$prev[,c("lower", "upper")], col=transp(col[1], 0.3)) for(ii in seq_along(dots)) lines(xx, dots[[ii]]$prev[,"mean"], col=col[1+ii], lwd=1.5) lines(xx, fit$prev[,"mean"], col=col[1], lwd=1.5) } plot_incid2 <- function(fit, ..., ylim=NULL, xlim=c(1980, max(as.integer(dimnames(fit$incid)[[1]]))), col="blue", main="", ylab="incidence rate"){ if(is.null(ylim)) ylim <- c(0, 1.1*max(fit$incid[,"upper"])) xx <- as.integer(dimnames(fit$incid)[[1]]) plot(xx, fit$incid[,"mean"], type="n", ylim=ylim, xlim=xlim, ylab=ylab, xlab="", yaxt="n", xaxt="n", main=main) axis(1, labels=TRUE) axis(2, labels=TRUE) dots <- list(...) dots <- dots[!sapply(dots, is.null)] for(ii in seq_along(dots)) cred.region(xx, dots[[ii]]$incid[,c("lower", "upper")], col=transp(col[1+ii], 0.3)) cred.region(xx, fit$incid[,c("lower", "upper")], col=transp(col[1], 0.3)) for(ii in seq_along(dots)) lines(xx, dots[[ii]]$incid[,"mean"], col=col[1+ii], lwd=1.5) lines(xx, fit$incid[,"mean"], col=col[1], lwd=1.5) } plot_log_transmrate <- function(fit, ..., ylim=NULL, xlim=c(1980, max(as.integer(dimnames(fit$transmrate)[[1]]))), col="blue", main="", ylab="log transmission rate"){ if(is.null(ylim)) ylim <- c(min(log(fit$transmrate[fit$transmrate[,"lower"] > 0, "lower"]))-0.2, max(log(fit$transmrate[,"upper"])) + 0.2) xx <- as.integer(dimnames(fit$transmrate)[[1]]) plot(xx, fit$transmrate[,"mean"], type="n", ylim=ylim, xlim=xlim, ylab=ylab, xlab="", yaxt="n", xaxt="n", main=main) axis(1, labels=TRUE) axis(2, labels=TRUE) dots <- list(...) dots <- dots[!sapply(dots, is.null)] for(ii in seq_along(dots)) cred.region(xx, log(dots[[ii]]$transmrate[,c("lower", "upper")]), col=transp(col[1+ii], 0.3)) cred.region(xx, log(fit$transmrate[,c("lower", "upper")]), col=transp(col[1], 0.3)) for(ii in seq_along(dots)) lines(xx, log(dots[[ii]]$transmrate[,"mean"]), col=col[1+ii], lwd=1.5) lines(xx, log(fit$transmrate[,"mean"]), col=col[1], lwd=1.5) } plot_incidsexratio <- function(fit, ..., ylim=NULL, xlim=c(1999, max(as.integer(dimnames(fit$incidsexratio)[[1]]))), col="blue", main="", ylab="F:M incidence ratio"){ if(is.null(ylim)) ylim <- c(0, max(2.5, 1.1*max(fit$incidsexratio[,"upper"]))) xx <- as.integer(dimnames(fit$incidsexratio)[[1]]) plot(xx, fit$incidsexratio[,"mean"], type="n", ylim=ylim, xlim=xlim, ylab=ylab, xlab="", yaxt="n", xaxt="n", main=main) axis(1, labels=TRUE) axis(2, labels=TRUE) dots <- list(...) dots <- dots[!sapply(dots, is.null)] for(ii in seq_along(dots)) cred.region(xx, dots[[ii]]$incidsexratio[,c("lower", "upper")], col=transp(col[1+ii], 0.3)) cred.region(xx, fit$incidsexratio[,c("lower", "upper")], col=transp(col[1], 0.3)) for(ii in seq_along(dots)) lines(xx, dots[[ii]]$incidsexratio[,"mean"], col=col[1+ii], lwd=1.5) lines(xx, fit$incidsexratio[,"mean"], col=col[1], lwd=1.5) } plot_pregprev <- function(fit, ..., likdat=NULL, ylim=NULL, xlim=c(1988, max(as.integer(dimnames(fit$pregprev)[[1]]))), col="blue", main="", ylab="Preg. prevalence"){ dots <- list(...) dots <- dots[!sapply(dots, is.null)] if(is.null(ylim)){ maxest <- max(fit$pregprev[,"upper"]) if(!is.null(likdat) && !is.null(likdat$anclik.dat) && length(likdat$anclik.dat$W.lst)) maxdata <- max(pnorm(unlist(likdat$anclik.dat$W.lst))) else maxdata <- 0 ylim <- c(0, 1.1*max(maxest, min(maxdata, 2*maxest))) } xx <- as.integer(dimnames(fit$incidsexratio)[[1]]) plot(xx, fit$pregprev[,"mean"], type="n", ylim=ylim, xlim=xlim, ylab=ylab, xlab="", yaxt="n", xaxt="n", main=main) axis(1, labels=TRUE) axis(2, labels=TRUE) ## if(!is.null(likdat)){ if(!is.null(likdat$anclik.dat) && length(likdat$anclik.dat$W.lst)){ with(likdat$anclik.dat, mapply(function(idx, W) points(idx+1970-1, pnorm(W), col=adjustcolor("grey", 0.5), pch=15), anc.idx.lst, W.lst)) with(likdat$anclik.dat, mapply(function(idx, W) lines(idx+1970-1, pnorm(W), col=adjustcolor("grey", 0.5)), anc.idx.lst, W.lst)) } } ## for(ii in seq_along(dots)) cred.region(xx, dots[[ii]]$pregprev[,c("lower", "upper")], col=transp(col[1+ii], 0.3)) cred.region(xx, fit$pregprev[,c("lower", "upper")], col=transp(col[1], 0.3)) for(ii in seq_along(dots)) lines(xx, dots[[ii]]$pregprev[,"mean"], col=col[1+ii], lwd=1.5) lines(xx, fit$pregprev[,"mean"], col=col[1], lwd=1.5) ## if(!is.null(likdat$ancrtcens.dat) && length(likdat$ancrtcens.dat$W.ancrt)){ with(likdat$ancrtcens.dat, segments(year, y0=pnorm(qnorm(prev) - qnorm(0.975)*sqrt(v.ancrt)), y1=pnorm(qnorm(prev) + qnorm(0.975)*sqrt(v.ancrt)))) with(likdat$ancrtcens.dat, points(year, prev, pch=15)) } } plot_artcov15plus <- function(fit, ..., ylim=NULL, xlim=c(2003, max(as.integer(dimnames(fit$artcov15plus)[[1]]))), col="blue", main="", ylab="ART coverage"){ if(is.null(ylim)) ylim <- c(0, 1) xx <- as.integer(dimnames(fit$artcov15plus)[[1]]) plot(xx, fit$artcov15plus[,"mean"], type="n", ylim=ylim, xlim=xlim, ylab=ylab, xlab="", yaxt="n", xaxt="n", main=main) axis(1, labels=TRUE) axis(2, labels=TRUE) dots <- list(...) dots <- dots[!sapply(dots, is.null)] for(ii in seq_along(dots)) cred.region(xx, dots[[ii]]$artcov15plus[,c("lower", "upper")], col=transp(col[1+ii], 0.3)) cred.region(xx, fit$artcov15plus[,c("lower", "upper")], col=transp(col[1], 0.3)) for(ii in seq_along(dots)) lines(xx, dots[[ii]]$artcov15plus[,"mean"], col=col[1+ii], lwd=1.5) lines(xx, fit$artcov15plus[,"mean"], col=col[1], lwd=1.5) } plot_compare_ageprev2 <- function(fit, fit2=NULL, fit3=NULL, specres=NULL, likdat=NULL, ylim=NULL, col=c("navy", "darkred", "forestgreen"), exact_ci=TRUE){ if(is.null(ylim)){ if(!is.null(likdat)) maxdata <- likdat$hhsage.dat$prev else maxdata <- 0 ylim <- c(0, 0.05*ceiling(max(c(fit$ageprevdat$upper, 1.3*maxdata))/0.05)) } #### survprev <- fit$ageprevdat if(!is.null(likdat)){ survprev <- merge(likdat$hhsage.dat, fit$ageprevdat, by=c("year", "survyear", "sex", "agegr"), all.x=TRUE) if(exact_ci) survprev[c("ci_l", "ci_u")] <- with(survprev, binom::binom.exact(x_eff, n_eff))[c("lower", "upper")] } survprev$survyear <- with(survprev, factor(survyear, levels(survyear)[order(as.integer(substr(levels(survyear), 1, 4)))])) survprev <- split(survprev, factor(survprev$survyear)) ## if(!is.null(fit2)) survprev2 <- split(fit2$ageprevdat, factor(fit2$ageprevdat$survyear)) if(!is.null(fit3)) survprev3 <- split(fit3$ageprevdat, factor(fit3$ageprevdat$survyear)) ## par(mfrow=c(4,2), mar=c(2, 3, 2, 1), tcl=-0.25, mgp=c(2, 0.5, 0), las=1, cex=1) for(isurv in names(survprev)) for(isex in c("male", "female")){ sp <- subset(survprev[[isurv]], sex==isex & as.integer(agegr) %in% 3:11) if(!is.null(fit2)) sp2 <- subset(survprev2[[isurv]], sex==isex & as.integer(agegr) %in% 3:11) if(!is.null(fit3)) sp3 <- subset(survprev3[[isurv]], sex==isex & as.integer(agegr) %in% 3:11) ## xx <- as.integer(sp$agegr) main <- if(!is.null(sp$eppregion)) paste0(sp$country[1], " ", gsub("(\\w)(\\w*)", "\\U\\1\\L\\2", sp$eppregion[1], perl=TRUE), " ", survprev[[isurv]]$survyear[1], ", ", isex) else paste0(sp$country[1], " ", survprev[[isurv]]$survyear[1], ", ", isex) plot(xx+0.5, sp$prev, type="n", xlim=c(4, 12), ylim=ylim, xaxt="n", main=main, xlab="", ylab="") axis(1, xx+0.5, sp$agegr) ## rect(xx+0.05, sp$lower, xx+0.95, sp$upper, col=transp(col[1]), border=NA) segments(xx+0.05, sp$mean, xx+0.95, col=col[1], lwd=2) ## if(!is.null(fit2)){ rect(xx+0.05, sp2$lower, xx+0.95, sp2$upper, col=transp(col[2]), border=NA) segments(xx+0.05, sp2$mean, xx+0.95, col=col[2], lwd=2) } if(!is.null(fit3)){ rect(xx+0.05, sp3$lower, xx+0.95, sp3$upper, col=transp(col[3]), border=NA) segments(xx+0.05, sp3$mean, xx+0.95, col=col[3], lwd=2) } ## if(!is.null(specres)){ csex <- sub("(\\b[a-z]{1})", "\\U\\1" , isex, perl=TRUE) stryear <- as.character(survprev[[isurv]]$year[1]) specres.prev <- tapply(specres$hivpop[as.character(15:54), csex, stryear], rep(3:10, each=5), sum) / tapply(specres$totpop[as.character(15:54), csex, stryear], rep(3:10, each=5), sum) segments(4:11+0.1, specres.prev, 4:11+0.9, lty=3, col="grey10", lwd=2) } if(exists("prev", sp)){ points(xx+0.5, sp$prev, pch=19) segments(x0=xx+0.5, y0=sp$ci_l, y1=sp$ci_u) } } ## return(invisible()) }
/R/plot2.R
no_license
mrc-ide/eppasm
R
false
false
9,515
r
plot_prev2 <- function(fit, ..., ylim=NULL, xlim=c(1980, max(as.integer(dimnames(fit$prev)[[1]]))), col="blue", main="", ylab="prevalence"){ if(is.null(ylim)) ylim <- c(0, 1.1*max(fit$prev[,"upper"])) xx <- as.integer(dimnames(fit$prev)[[1]]) plot(xx, fit$prev[,"mean"], type="n", ylim=ylim, xlim=xlim, ylab=ylab, xlab="", yaxt="n", xaxt="n", main=main) axis(1, labels=TRUE) axis(2, labels=TRUE) dots <- list(...) dots <- dots[!sapply(dots, is.null)] for(ii in seq_along(dots)) cred.region(xx, dots[[ii]]$prev[,c("lower", "upper")], col=transp(col[1+ii], 0.3)) cred.region(xx, fit$prev[,c("lower", "upper")], col=transp(col[1], 0.3)) for(ii in seq_along(dots)) lines(xx, dots[[ii]]$prev[,"mean"], col=col[1+ii], lwd=1.5) lines(xx, fit$prev[,"mean"], col=col[1], lwd=1.5) } plot_incid2 <- function(fit, ..., ylim=NULL, xlim=c(1980, max(as.integer(dimnames(fit$incid)[[1]]))), col="blue", main="", ylab="incidence rate"){ if(is.null(ylim)) ylim <- c(0, 1.1*max(fit$incid[,"upper"])) xx <- as.integer(dimnames(fit$incid)[[1]]) plot(xx, fit$incid[,"mean"], type="n", ylim=ylim, xlim=xlim, ylab=ylab, xlab="", yaxt="n", xaxt="n", main=main) axis(1, labels=TRUE) axis(2, labels=TRUE) dots <- list(...) dots <- dots[!sapply(dots, is.null)] for(ii in seq_along(dots)) cred.region(xx, dots[[ii]]$incid[,c("lower", "upper")], col=transp(col[1+ii], 0.3)) cred.region(xx, fit$incid[,c("lower", "upper")], col=transp(col[1], 0.3)) for(ii in seq_along(dots)) lines(xx, dots[[ii]]$incid[,"mean"], col=col[1+ii], lwd=1.5) lines(xx, fit$incid[,"mean"], col=col[1], lwd=1.5) } plot_log_transmrate <- function(fit, ..., ylim=NULL, xlim=c(1980, max(as.integer(dimnames(fit$transmrate)[[1]]))), col="blue", main="", ylab="log transmission rate"){ if(is.null(ylim)) ylim <- c(min(log(fit$transmrate[fit$transmrate[,"lower"] > 0, "lower"]))-0.2, max(log(fit$transmrate[,"upper"])) + 0.2) xx <- as.integer(dimnames(fit$transmrate)[[1]]) plot(xx, fit$transmrate[,"mean"], type="n", ylim=ylim, xlim=xlim, ylab=ylab, xlab="", yaxt="n", xaxt="n", main=main) axis(1, labels=TRUE) axis(2, labels=TRUE) dots <- list(...) dots <- dots[!sapply(dots, is.null)] for(ii in seq_along(dots)) cred.region(xx, log(dots[[ii]]$transmrate[,c("lower", "upper")]), col=transp(col[1+ii], 0.3)) cred.region(xx, log(fit$transmrate[,c("lower", "upper")]), col=transp(col[1], 0.3)) for(ii in seq_along(dots)) lines(xx, log(dots[[ii]]$transmrate[,"mean"]), col=col[1+ii], lwd=1.5) lines(xx, log(fit$transmrate[,"mean"]), col=col[1], lwd=1.5) } plot_incidsexratio <- function(fit, ..., ylim=NULL, xlim=c(1999, max(as.integer(dimnames(fit$incidsexratio)[[1]]))), col="blue", main="", ylab="F:M incidence ratio"){ if(is.null(ylim)) ylim <- c(0, max(2.5, 1.1*max(fit$incidsexratio[,"upper"]))) xx <- as.integer(dimnames(fit$incidsexratio)[[1]]) plot(xx, fit$incidsexratio[,"mean"], type="n", ylim=ylim, xlim=xlim, ylab=ylab, xlab="", yaxt="n", xaxt="n", main=main) axis(1, labels=TRUE) axis(2, labels=TRUE) dots <- list(...) dots <- dots[!sapply(dots, is.null)] for(ii in seq_along(dots)) cred.region(xx, dots[[ii]]$incidsexratio[,c("lower", "upper")], col=transp(col[1+ii], 0.3)) cred.region(xx, fit$incidsexratio[,c("lower", "upper")], col=transp(col[1], 0.3)) for(ii in seq_along(dots)) lines(xx, dots[[ii]]$incidsexratio[,"mean"], col=col[1+ii], lwd=1.5) lines(xx, fit$incidsexratio[,"mean"], col=col[1], lwd=1.5) } plot_pregprev <- function(fit, ..., likdat=NULL, ylim=NULL, xlim=c(1988, max(as.integer(dimnames(fit$pregprev)[[1]]))), col="blue", main="", ylab="Preg. prevalence"){ dots <- list(...) dots <- dots[!sapply(dots, is.null)] if(is.null(ylim)){ maxest <- max(fit$pregprev[,"upper"]) if(!is.null(likdat) && !is.null(likdat$anclik.dat) && length(likdat$anclik.dat$W.lst)) maxdata <- max(pnorm(unlist(likdat$anclik.dat$W.lst))) else maxdata <- 0 ylim <- c(0, 1.1*max(maxest, min(maxdata, 2*maxest))) } xx <- as.integer(dimnames(fit$incidsexratio)[[1]]) plot(xx, fit$pregprev[,"mean"], type="n", ylim=ylim, xlim=xlim, ylab=ylab, xlab="", yaxt="n", xaxt="n", main=main) axis(1, labels=TRUE) axis(2, labels=TRUE) ## if(!is.null(likdat)){ if(!is.null(likdat$anclik.dat) && length(likdat$anclik.dat$W.lst)){ with(likdat$anclik.dat, mapply(function(idx, W) points(idx+1970-1, pnorm(W), col=adjustcolor("grey", 0.5), pch=15), anc.idx.lst, W.lst)) with(likdat$anclik.dat, mapply(function(idx, W) lines(idx+1970-1, pnorm(W), col=adjustcolor("grey", 0.5)), anc.idx.lst, W.lst)) } } ## for(ii in seq_along(dots)) cred.region(xx, dots[[ii]]$pregprev[,c("lower", "upper")], col=transp(col[1+ii], 0.3)) cred.region(xx, fit$pregprev[,c("lower", "upper")], col=transp(col[1], 0.3)) for(ii in seq_along(dots)) lines(xx, dots[[ii]]$pregprev[,"mean"], col=col[1+ii], lwd=1.5) lines(xx, fit$pregprev[,"mean"], col=col[1], lwd=1.5) ## if(!is.null(likdat$ancrtcens.dat) && length(likdat$ancrtcens.dat$W.ancrt)){ with(likdat$ancrtcens.dat, segments(year, y0=pnorm(qnorm(prev) - qnorm(0.975)*sqrt(v.ancrt)), y1=pnorm(qnorm(prev) + qnorm(0.975)*sqrt(v.ancrt)))) with(likdat$ancrtcens.dat, points(year, prev, pch=15)) } } plot_artcov15plus <- function(fit, ..., ylim=NULL, xlim=c(2003, max(as.integer(dimnames(fit$artcov15plus)[[1]]))), col="blue", main="", ylab="ART coverage"){ if(is.null(ylim)) ylim <- c(0, 1) xx <- as.integer(dimnames(fit$artcov15plus)[[1]]) plot(xx, fit$artcov15plus[,"mean"], type="n", ylim=ylim, xlim=xlim, ylab=ylab, xlab="", yaxt="n", xaxt="n", main=main) axis(1, labels=TRUE) axis(2, labels=TRUE) dots <- list(...) dots <- dots[!sapply(dots, is.null)] for(ii in seq_along(dots)) cred.region(xx, dots[[ii]]$artcov15plus[,c("lower", "upper")], col=transp(col[1+ii], 0.3)) cred.region(xx, fit$artcov15plus[,c("lower", "upper")], col=transp(col[1], 0.3)) for(ii in seq_along(dots)) lines(xx, dots[[ii]]$artcov15plus[,"mean"], col=col[1+ii], lwd=1.5) lines(xx, fit$artcov15plus[,"mean"], col=col[1], lwd=1.5) } plot_compare_ageprev2 <- function(fit, fit2=NULL, fit3=NULL, specres=NULL, likdat=NULL, ylim=NULL, col=c("navy", "darkred", "forestgreen"), exact_ci=TRUE){ if(is.null(ylim)){ if(!is.null(likdat)) maxdata <- likdat$hhsage.dat$prev else maxdata <- 0 ylim <- c(0, 0.05*ceiling(max(c(fit$ageprevdat$upper, 1.3*maxdata))/0.05)) } #### survprev <- fit$ageprevdat if(!is.null(likdat)){ survprev <- merge(likdat$hhsage.dat, fit$ageprevdat, by=c("year", "survyear", "sex", "agegr"), all.x=TRUE) if(exact_ci) survprev[c("ci_l", "ci_u")] <- with(survprev, binom::binom.exact(x_eff, n_eff))[c("lower", "upper")] } survprev$survyear <- with(survprev, factor(survyear, levels(survyear)[order(as.integer(substr(levels(survyear), 1, 4)))])) survprev <- split(survprev, factor(survprev$survyear)) ## if(!is.null(fit2)) survprev2 <- split(fit2$ageprevdat, factor(fit2$ageprevdat$survyear)) if(!is.null(fit3)) survprev3 <- split(fit3$ageprevdat, factor(fit3$ageprevdat$survyear)) ## par(mfrow=c(4,2), mar=c(2, 3, 2, 1), tcl=-0.25, mgp=c(2, 0.5, 0), las=1, cex=1) for(isurv in names(survprev)) for(isex in c("male", "female")){ sp <- subset(survprev[[isurv]], sex==isex & as.integer(agegr) %in% 3:11) if(!is.null(fit2)) sp2 <- subset(survprev2[[isurv]], sex==isex & as.integer(agegr) %in% 3:11) if(!is.null(fit3)) sp3 <- subset(survprev3[[isurv]], sex==isex & as.integer(agegr) %in% 3:11) ## xx <- as.integer(sp$agegr) main <- if(!is.null(sp$eppregion)) paste0(sp$country[1], " ", gsub("(\\w)(\\w*)", "\\U\\1\\L\\2", sp$eppregion[1], perl=TRUE), " ", survprev[[isurv]]$survyear[1], ", ", isex) else paste0(sp$country[1], " ", survprev[[isurv]]$survyear[1], ", ", isex) plot(xx+0.5, sp$prev, type="n", xlim=c(4, 12), ylim=ylim, xaxt="n", main=main, xlab="", ylab="") axis(1, xx+0.5, sp$agegr) ## rect(xx+0.05, sp$lower, xx+0.95, sp$upper, col=transp(col[1]), border=NA) segments(xx+0.05, sp$mean, xx+0.95, col=col[1], lwd=2) ## if(!is.null(fit2)){ rect(xx+0.05, sp2$lower, xx+0.95, sp2$upper, col=transp(col[2]), border=NA) segments(xx+0.05, sp2$mean, xx+0.95, col=col[2], lwd=2) } if(!is.null(fit3)){ rect(xx+0.05, sp3$lower, xx+0.95, sp3$upper, col=transp(col[3]), border=NA) segments(xx+0.05, sp3$mean, xx+0.95, col=col[3], lwd=2) } ## if(!is.null(specres)){ csex <- sub("(\\b[a-z]{1})", "\\U\\1" , isex, perl=TRUE) stryear <- as.character(survprev[[isurv]]$year[1]) specres.prev <- tapply(specres$hivpop[as.character(15:54), csex, stryear], rep(3:10, each=5), sum) / tapply(specres$totpop[as.character(15:54), csex, stryear], rep(3:10, each=5), sum) segments(4:11+0.1, specres.prev, 4:11+0.9, lty=3, col="grey10", lwd=2) } if(exists("prev", sp)){ points(xx+0.5, sp$prev, pch=19) segments(x0=xx+0.5, y0=sp$ci_l, y1=sp$ci_u) } } ## return(invisible()) }
structure(list(url = "https://play.dhis2.org/2.33.4/api/organisationUnitGroups.json?paging=false&filter=name:in:[District,CHC]&fields=id,name", status_code = 200L, headers = structure(list(server = "nginx/1.17.9", date = "Fri, 12 Jun 2020 14:42:18 GMT", `content-type` = "application/json;charset=UTF-8", `transfer-encoding` = "chunked", connection = "keep-alive", `cache-control` = "no-cache, private", `x-xss-protection` = "1; mode=block", `x-frame-options` = "SAMEORIGIN", `x-content-type-options` = "nosniff", etag = "W/\"096a2c6193c780186074ae1fb4b55665c\"", `strict-transport-security` = "max-age=15768000", `content-encoding` = "gzip"), class = c("insensitive", "list")), all_headers = list(list(status = 302L, version = "HTTP/1.1", headers = structure(list(server = "nginx/1.17.9", date = "Fri, 12 Jun 2020 14:42:18 GMT", `content-type` = "text/html", `content-length` = "145", connection = "keep-alive", location = "https://play.dhis2.org/2.33.4/api/organisationUnitGroups.json?paging=false&filter=name:in:[District,CHC]&fields=id,name", `strict-transport-security` = "max-age=15768000"), class = c("insensitive", "list"))), list(status = 200L, version = "HTTP/1.1", headers = structure(list(server = "nginx/1.17.9", date = "Fri, 12 Jun 2020 14:42:18 GMT", `content-type` = "application/json;charset=UTF-8", `transfer-encoding` = "chunked", connection = "keep-alive", `cache-control` = "no-cache, private", `x-xss-protection` = "1; mode=block", `x-frame-options` = "SAMEORIGIN", `x-content-type-options` = "nosniff", etag = "W/\"096a2c6193c780186074ae1fb4b55665c\"", `strict-transport-security` = "max-age=15768000", `content-encoding` = "gzip"), class = c("insensitive", "list")))), cookies = structure(list(domain = "#HttpOnly_play.dhis2.org", flag = FALSE, path = "/2.33.4", secure = TRUE, expiration = structure(Inf, class = c("POSIXct", "POSIXt")), name = "JSESSIONID", value = "REDACTED"), row.names = c(NA, -1L), class = "data.frame"), content = charToRaw("{\"organisationUnitGroups\":[{\"name\":\"CHC\",\"id\":\"CXw2yu5fodb\"},{\"name\":\"District\",\"id\":\"w1Atoz18PCL\"}]}"), date = structure(1591972938, class = c("POSIXct", "POSIXt" ), tzone = "GMT"), times = c(redirect = 0.12244, namelookup = 4.5e-05, connect = 4.7e-05, pretransfer = 0.000115, starttransfer = 0.050596, total = 0.173102)), class = "response")
/tests/testthat/play.dhis2.org/2.33/api/organisationUnitGroups.json-918aba.R
permissive
pepfar-datim/datimutils
R
false
false
2,607
r
structure(list(url = "https://play.dhis2.org/2.33.4/api/organisationUnitGroups.json?paging=false&filter=name:in:[District,CHC]&fields=id,name", status_code = 200L, headers = structure(list(server = "nginx/1.17.9", date = "Fri, 12 Jun 2020 14:42:18 GMT", `content-type` = "application/json;charset=UTF-8", `transfer-encoding` = "chunked", connection = "keep-alive", `cache-control` = "no-cache, private", `x-xss-protection` = "1; mode=block", `x-frame-options` = "SAMEORIGIN", `x-content-type-options` = "nosniff", etag = "W/\"096a2c6193c780186074ae1fb4b55665c\"", `strict-transport-security` = "max-age=15768000", `content-encoding` = "gzip"), class = c("insensitive", "list")), all_headers = list(list(status = 302L, version = "HTTP/1.1", headers = structure(list(server = "nginx/1.17.9", date = "Fri, 12 Jun 2020 14:42:18 GMT", `content-type` = "text/html", `content-length` = "145", connection = "keep-alive", location = "https://play.dhis2.org/2.33.4/api/organisationUnitGroups.json?paging=false&filter=name:in:[District,CHC]&fields=id,name", `strict-transport-security` = "max-age=15768000"), class = c("insensitive", "list"))), list(status = 200L, version = "HTTP/1.1", headers = structure(list(server = "nginx/1.17.9", date = "Fri, 12 Jun 2020 14:42:18 GMT", `content-type` = "application/json;charset=UTF-8", `transfer-encoding` = "chunked", connection = "keep-alive", `cache-control` = "no-cache, private", `x-xss-protection` = "1; mode=block", `x-frame-options` = "SAMEORIGIN", `x-content-type-options` = "nosniff", etag = "W/\"096a2c6193c780186074ae1fb4b55665c\"", `strict-transport-security` = "max-age=15768000", `content-encoding` = "gzip"), class = c("insensitive", "list")))), cookies = structure(list(domain = "#HttpOnly_play.dhis2.org", flag = FALSE, path = "/2.33.4", secure = TRUE, expiration = structure(Inf, class = c("POSIXct", "POSIXt")), name = "JSESSIONID", value = "REDACTED"), row.names = c(NA, -1L), class = "data.frame"), content = charToRaw("{\"organisationUnitGroups\":[{\"name\":\"CHC\",\"id\":\"CXw2yu5fodb\"},{\"name\":\"District\",\"id\":\"w1Atoz18PCL\"}]}"), date = structure(1591972938, class = c("POSIXct", "POSIXt" ), tzone = "GMT"), times = c(redirect = 0.12244, namelookup = 4.5e-05, connect = 4.7e-05, pretransfer = 0.000115, starttransfer = 0.050596, total = 0.173102)), class = "response")
#' A Place Holder for the hdf5r::H5File R6 Class #' #' @export H5File <- list( new = function(...) { print(sys.calls()) stop("You have installed a fake version of the 'hdf5r' package. This dummy version of that package was created in order to install Seurat (>= 2.3.2) on systems where 'hdf5r' fails to install. If you need Seurat::Read10X_h5(), then you need to install the real 'hdf5r' package.") } )
/R/H5File.R
no_license
Bionett/fake-hdf5r
R
false
false
417
r
#' A Place Holder for the hdf5r::H5File R6 Class #' #' @export H5File <- list( new = function(...) { print(sys.calls()) stop("You have installed a fake version of the 'hdf5r' package. This dummy version of that package was created in order to install Seurat (>= 2.3.2) on systems where 'hdf5r' fails to install. If you need Seurat::Read10X_h5(), then you need to install the real 'hdf5r' package.") } )
library("rpart") library("rpart.plot") #dados <- iris #modelo <- rpart(Species ~ ., data = dados) #colclasses ~ todas #res <- predict(teste, dados, type = "class") # efetua o teste cogu <- read.csv(file = "cogumelos.csv", header = TRUE, sep = ",") seq <- sample(1:nrow(cogu), as.integer(nrow(cogu)*0.3)) seq2 <- sample(1:nrow(cogu), nrow(cogu)-as.integer(nrow(cogu)*0.3)) treino <- dados[seq2, ] teste <- dados[seq, ] modelo <- rpart(CLASSES ~ ., data = cogu) result <- predict(modelo, teste, type = "class") mconfusao <- table(teste$CLASSES, result)
/projetos_R/arvoreDecisao.r
no_license
fndcaique/inteligencia_artificial
R
false
false
554
r
library("rpart") library("rpart.plot") #dados <- iris #modelo <- rpart(Species ~ ., data = dados) #colclasses ~ todas #res <- predict(teste, dados, type = "class") # efetua o teste cogu <- read.csv(file = "cogumelos.csv", header = TRUE, sep = ",") seq <- sample(1:nrow(cogu), as.integer(nrow(cogu)*0.3)) seq2 <- sample(1:nrow(cogu), nrow(cogu)-as.integer(nrow(cogu)*0.3)) treino <- dados[seq2, ] teste <- dados[seq, ] modelo <- rpart(CLASSES ~ ., data = cogu) result <- predict(modelo, teste, type = "class") mconfusao <- table(teste$CLASSES, result)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/pustejovsky.R \name{dicho.d.r} \alias{dicho.d.r} \title{cohen's d from dichotomizing/extreme group design to r} \usage{ dicho.d.r(d, p1, p2, n1, n2, dir) } \arguments{ \item{d}{cohen's d} \item{p1}{cutoff percentile for group 1 (sample-based, please see other formulas for population-based in pustejovsky, 2014)} \item{p2}{cutoff percentile for group 2 (sample-based, please see other formulas for population-based in pustejovsky, 2014)} \item{n1}{cell size of group 1} \item{n2}{cell size of group 2} \item{dir}{if cohen's d is in absolute value, provide the empirical direction:\cr +1 for positive associations (e.g., group 1 - group 2 >= 0) and -1 for negative associations (e.g., group 1 - group 2 < 0)} } \description{ cohen's d from dichotomizing/extreme group design to correlation coefficient\cr see \href{https://psycnet.apa.org/buy/2013-34335-001}{pustejovsky, 2014. psychological methods.} \cr returns extreme group r (referred to as r (subscript eg) in pustejovsky paper) } \examples{ dicho.d.r(.80, 1/3, 1/3, 15, 15, -1) }
/man/dicho.d.r.Rd
no_license
phoebehlam/michaela
R
false
true
1,120
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/pustejovsky.R \name{dicho.d.r} \alias{dicho.d.r} \title{cohen's d from dichotomizing/extreme group design to r} \usage{ dicho.d.r(d, p1, p2, n1, n2, dir) } \arguments{ \item{d}{cohen's d} \item{p1}{cutoff percentile for group 1 (sample-based, please see other formulas for population-based in pustejovsky, 2014)} \item{p2}{cutoff percentile for group 2 (sample-based, please see other formulas for population-based in pustejovsky, 2014)} \item{n1}{cell size of group 1} \item{n2}{cell size of group 2} \item{dir}{if cohen's d is in absolute value, provide the empirical direction:\cr +1 for positive associations (e.g., group 1 - group 2 >= 0) and -1 for negative associations (e.g., group 1 - group 2 < 0)} } \description{ cohen's d from dichotomizing/extreme group design to correlation coefficient\cr see \href{https://psycnet.apa.org/buy/2013-34335-001}{pustejovsky, 2014. psychological methods.} \cr returns extreme group r (referred to as r (subscript eg) in pustejovsky paper) } \examples{ dicho.d.r(.80, 1/3, 1/3, 15, 15, -1) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/directors.R \name{guess_init_k} \alias{guess_init_k} \title{Guess initial \code{k} based on threshold and \code{p}} \usage{ guess_init_k(.partition_step) } \arguments{ \item{.partition_step}{a \code{partition_step} object} } \value{ an integer } \description{ Guess initial \code{k} based on threshold and \code{p} } \keyword{internal}
/man/guess_init_k.Rd
permissive
USCbiostats/partition
R
false
true
414
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/directors.R \name{guess_init_k} \alias{guess_init_k} \title{Guess initial \code{k} based on threshold and \code{p}} \usage{ guess_init_k(.partition_step) } \arguments{ \item{.partition_step}{a \code{partition_step} object} } \value{ an integer } \description{ Guess initial \code{k} based on threshold and \code{p} } \keyword{internal}
library(shiny) BMI<-function(weight,height) {(weight*0.45)/((height*0.025)^2)} diagnostic_f<-function(weight,height){ BMI_value<-(weight*0.45)/((height*0.025)^2) ifelse(BMI_value<18.5,"underweight",ifelse(BMI_value<25,"normal weight",ifelse(BMI_value<30,"overweight","obesity"))) } shinyServer( function(input, output) { output$inputweightvalue <- renderPrint({input$weight}) output$inputheightvalue <- renderPrint({input$height}) output$estimation <- renderPrint({BMI(input$weight,input$height)}) output$diagnostic <- renderPrint({diagnostic_f(input$weight,input$height)}) } )
/server.R
no_license
ambika03maruthi/DevelopingDataProducts
R
false
false
611
r
library(shiny) BMI<-function(weight,height) {(weight*0.45)/((height*0.025)^2)} diagnostic_f<-function(weight,height){ BMI_value<-(weight*0.45)/((height*0.025)^2) ifelse(BMI_value<18.5,"underweight",ifelse(BMI_value<25,"normal weight",ifelse(BMI_value<30,"overweight","obesity"))) } shinyServer( function(input, output) { output$inputweightvalue <- renderPrint({input$weight}) output$inputheightvalue <- renderPrint({input$height}) output$estimation <- renderPrint({BMI(input$weight,input$height)}) output$diagnostic <- renderPrint({diagnostic_f(input$weight,input$height)}) } )
################################################### ### Stepwat: CSV reading and sorting ### ### Author: Trace E Martyn ### ### Date: 12.14.2015 ### ################################################### ################################################### ## Read in all the files and make mega database ## ## Sort the output for each year and each Rgroup ## ## based the GCM or Site specific output and ## ## identify the max, min and median values. ## ################################################### #################################################### ## Must open, save, and close all csv files #### ## in Excel before running code #### #################################################### ###################################################################### #### Must open and save all CSVs in Excel until problem is solved #### ###################################################################### #### assemble all of the bmassavg....csv's into a file structure #### #### that as a single folder for each site labeled 'Site.1' #### #### for each site in run. Each folder contains all the bmass #### #### csv's labeled as the wrapper labeled them. This will need #### #### to be adjusted for output that includes disturbance and #### #### grazing because of the difference in the naming structure #### ###################################################################### ###################################################################### SITE<-1:10 RCP<-c("RCP45","RCP85") YEARS<-c("50years","90years") GCM<-c("ACCESS1-0","CanESM2","CESM1-CAM5","CMCC-CM","CNRM-CM5","CSIRO-Mk3-6-0","HadGEM2-ES","IPSL-CM5A-MR","MIROC5","NorESM1-M","Current") # create empty dataframe STEPWAT.300<-data.frame() for (s in SITE) { # for each site setwd(paste0("C:/Users/litak/Dropbox/STEPWAT.Output/Site.",s,"/")) for (g in GCM) { # for each GCM if (g=="Current") { # special case for Current name<-paste("bmassavg.Site",s,g,sep=".") df<-read.csv(paste0(name,".csv")) df$Site<-s df$Period<-0 df$RCP<-0 df$GCM<-g assign(name,df) STEPWAT.300<-rbind(STEPWAT.300,df)} else { for (r in RCP) { # for each RCP for (y in YEARS) { # for each period 2060 (50) or 2100 (90) name<-paste("bmassavg.Site",s,g,y,r,sep=".") df<-read.csv(paste0(name,".csv")) df$Site<-s df$Period<-y df$RCP<-r df$GCM<-g assign(name,df) STEPWAT.300<-rbind(STEPWAT.300,df)}}}} # save R.Data image for each site - just in case of failing code save.image(paste("Site",s,"RData",sep="."))} # set where you want large csv to be exported setwd("C:/Users/litak/Dropbox/Dropbox") #write csv write.csv(STEPWAT.300,"StepWat.300yrs.AllSites.csv") ########### GCM Variability #################################### ########## make a new dataframe of median max and min values ### Stepwat.DF<-read.csv("StepWat.300yrs.AllSites.csv") GCM<-unique(Stepwat.DF$GCM) GCM<-unique(GCM[1:10]) Rgroup<-c("sagebrush","a.cool.forb","a.warm.forb","p.cool.forb","p.warm.forb","a.cool.grass","a.warm.grass","p.cool.grass","p.warm.grass","shrub") Site<-unique(Stepwat.DF$Site) Period<-unique(Stepwat.DF$Period) Period<-Period[1:2] RCP<-unique(Stepwat.DF$RCP) RCP<-RCP[1:2] Year<-unique(Stepwat.DF$Year) Stepwat.Sorted<-data.frame(ID=1:12001) c<-1 for (p in Period) { # for each period 2060 or 2100 print(p) for (s in RCP) { # for each RCP print(s) for (y in Year) { # for each year for (r in Rgroup) { # for each group DF.new<-data.frame(ID=1:10) b<-1 for( g in GCM) { #for each GCM yearly.r<-Stepwat.DF[which(Stepwat.DF$GCM==g & Stepwat.DF$Year ==y & Stepwat.DF$RCP==s & Stepwat.DF$Period==p),r] m.yearly.r<-mean(yearly.r,na.rm=T) DF.new$Year[b]<-y DF.new$RGroup[b]<-r DF.new$GCM[b]<-g DF.new$Bmass[b]<-m.yearly.r DF.new b<-b+1} DF.temp<-DF.new[order(DF.new$Bmass),] Stepwat.Sorted$RCP[c]<-s Stepwat.Sorted$Period[c]<-p Stepwat.Sorted$Year[c]<-y Stepwat.Sorted$RGroup[c]<-r Stepwat.Sorted$GCM.min[c]<-DF.temp$GCM[1] Stepwat.Sorted$Bmass.min[c]<-DF.temp$Bmass[1] Stepwat.Sorted$GCM.max[c]<-DF.temp$GCM[10] Stepwat.Sorted$Bmass.max[c]<-DF.temp$Bmass[10] Stepwat.Sorted$GCM.med1[c]<-DF.temp$GCM[5] Stepwat.Sorted$Bmass.med1[c]<-DF.temp$Bmass[5] Stepwat.Sorted$GCM.med2[c]<-DF.temp$GCM[6] Stepwat.Sorted$Bmass.med2[c]<-DF.temp$Bmass[6] c<-c+1} } } } #reorder to make it easier to make figures Stepwat.Sorted<-Stepwat.Sorted[order(Stepwat.Sorted$RCP,Stepwat.Sorted$Period,Stepwat.Sorted$RGroup,Stepwat.Sorted$Year),] #write csv write.csv(Stepwat.Sorted,"Stepwat.DF.Sorted.csv") ############# Current GCM #################################### Stepwat.DF<-read.csv("StepWat.300yrs.AllSites.csv") GCM<-"Current" Rgroup<-c("sagebrush","a.cool.forb","a.warm.forb","p.cool.forb","p.warm.forb","a.cool.grass","a.warm.grass","p.cool.grass","p.warm.grass","shrub") Site<-unique(Stepwat.DF$Site) Period<-unique(Stepwat.DF$Period) Period<-0 RCP<-unique(Stepwat.DF$RCP) RCP<-0 Year<-unique(Stepwat.DF$Year) Stepwat.Sorted.Current<-data.frame(ID=1:3000) c<-1 for (p in Period) { # for each period 2060 or 2100 print(p) for (s in RCP) { # for each RCP print(s) for (y in Year) { # for each year for (r in Rgroup) { # for each group DF.new<-data.frame(ID=1:10) b<-1 for( g in GCM) { #for each GCM yearly.r<-Stepwat.DF[which(Stepwat.DF$GCM==g & Stepwat.DF$Year ==y & Stepwat.DF$RCP==s & Stepwat.DF$Period==p),r] m.yearly.r<-mean(yearly.r,na.rm=T) DF.new$Year[b]<-y DF.new$RGroup[b]<-r DF.new$GCM[b]<-g DF.new$Bmass[b]<-m.yearly.r DF.new b<-b+1} DF.temp<-DF.new[order(DF.new$Bmass),] Stepwat.Sorted.Current$RCP[c]<-s Stepwat.Sorted.Current$Period[c]<-p Stepwat.Sorted.Current$Year[c]<-y Stepwat.Sorted.Current$RGroup[c]<-r Stepwat.Sorted.Current$GCM.min[c]<-DF.temp$GCM[1] Stepwat.Sorted.Current$Bmass.min[c]<-DF.temp$Bmass[1] Stepwat.Sorted.Current$GCM.max[c]<-DF.temp$GCM[10] Stepwat.Sorted.Current$Bmass.max[c]<-DF.temp$Bmass[10] Stepwat.Sorted.Current$GCM.med1[c]<-DF.temp$GCM[5] Stepwat.Sorted.Current$Bmass.med1[c]<-DF.temp$Bmass[5] Stepwat.Sorted.Current$GCM.med2[c]<-DF.temp$GCM[6] Stepwat.Sorted.Current$Bmass.med2[c]<-DF.temp$Bmass[6] c<-c+1} } } } #reorder to make it easier to make figures Stepwat.Sorted.Current<-Stepwat.Sorted.Current[order(Stepwat.Sorted.Current$RGroup,Stepwat.Sorted.Current$Year),] #write csv write.csv(Stepwat.Sorted.Current,"Stepwat.DF.Sorted.Current.csv") ################ Site Variability ############################### Stepwat.DF<-read.csv("StepWat.300yrs.AllSites.csv") ########## make a new dataframe of median max and min values ### GCM<-unique(Stepwat.DF$GCM) GCM<-unique(GCM[1:10]) Rgroup<-c("sagebrush","a.cool.forb","a.warm.forb","p.cool.forb","p.warm.forb","a.cool.grass","a.warm.grass","p.cool.grass","p.warm.grass","shrub") Site<-1:10 Period<-unique(Stepwat.DF$Period) Period<-Period[1:2] RCP<-unique(Stepwat.DF$RCP) RCP<-RCP[1:2] Year<-1:300 Stepwat.Sorted.Site<-data.frame(ID=1:12000) c<-1 for (p in Period) { # for each period 2060 (50) or 2100 (90) print(p) for (s in RCP) { # for each RCP print(s) for (y in Year) { # for each year for (r in Rgroup) { # for each group DF.new<-data.frame(ID=1:10) b<-1 for (i in Site) { # for each site yearly.r<-Stepwat.DF[which(Stepwat.DF$Site==i & Stepwat.DF$Year ==y & Stepwat.DF$RCP==s & Stepwat.DF$Period==p),r] m.yearly.r<-mean(yearly.r) DF.new$Year[b]<-y DF.new$RGroup[b]<-r DF.new$Site[b]<-i DF.new$Bmass[b]<-m.yearly.r DF.new b<-b+1} DF.temp<-DF.new[order(DF.new$Bmass),] Stepwat.Sorted.Site$RCP[c]<-s Stepwat.Sorted.Site$Period[c]<-p Stepwat.Sorted.Site$Year[c]<-y Stepwat.Sorted.Site$RGroup[c]<-r Stepwat.Sorted.Site$Site.min[c]<-DF.temp$Site[1] Stepwat.Sorted.Site$Bmass.min[c]<-DF.temp$Bmass[1] Stepwat.Sorted.Site$Site.max[c]<-DF.temp$Site[10] Stepwat.Sorted.Site$Bmass.max[c]<-DF.temp$Bmass[10] Stepwat.Sorted.Site$Site.med1[c]<-DF.temp$Site[5] Stepwat.Sorted.Site$Bmass.med1[c]<-DF.temp$Bmass[5] Stepwat.Sorted.Site$Site.med2[c]<-DF.temp$Site[6] Stepwat.Sorted.Site$Bmass.med2[c]<-DF.temp$Bmass[6] c<-c+1} } } } #reorder to make it easier to make figures Stepwat.Sorted.Site<-Stepwat.Sorted.Site[order(Stepwat.Sorted.Site$RCP,Stepwat.Sorted.Site$Period,Stepwat.Sorted.Site$RGroup,Stepwat.Sorted.Site$Year),] #write csv write.csv(Stepwat.Sorted.Site,"Stepwat.DF.Sorted.Site.csv") ######## Current Site Variability ####################### ########## make a new dataframe of median max and min values ### GCM<-"Current" Rgroup<-c("sagebrush","a.cool.forb","a.warm.forb","p.cool.forb","p.warm.forb","a.cool.grass","a.warm.grass","p.cool.grass","p.warm.grass","shrub") Site<-c(1,3) Period<-0 RCP<-0 Year<-1:300 Stepwat.Sorted.Site.Current<-data.frame(ID=1:3001) c<-1 for (p in Period) { # for each period 2060 or 2100 print(p) for (s in RCP) { # for each RCP print(s) for (y in Year) { # for each year for (r in Rgroup) { DF.new<-data.frame(ID=1:10) b<-1# for each group for (i in Site) { # for each site yearly.r<-Stepwat.DF[which(Stepwat.DF$Site==i & Stepwat.DF$Year ==y & Stepwat.DF$RCP==s & Stepwat.DF$Period==p),r] m.yearly.r<-mean(yearly.r) DF.new$Year[b]<-y DF.new$RGroup[b]<-r DF.new$Site[b]<-i DF.new$Bmass[b]<-m.yearly.r b<-b+1} DF.temp<-DF.new[order(DF.new$Bmass),] Stepwat.Sorted.Site.Current$RCP[c]<-s Stepwat.Sorted.Site.Current$Period[c]<-p Stepwat.Sorted.Site.Current$Year[c]<-y Stepwat.Sorted.Site.Current$RGroup[c]<-r Stepwat.Sorted.Site.Current$Site.min[c]<-DF.temp$Site[1] Stepwat.Sorted.Site.Current$Bmass.min[c]<-DF.temp$Bmass[1] Stepwat.Sorted.Site.Current$Site.max[c]<-DF.temp$Site[10] Stepwat.Sorted.Site.Current$Bmass.max[c]<-DF.temp$Bmass[10] Stepwat.Sorted.Site.Current$Site.med1[c]<-DF.temp$Site[5] Stepwat.Sorted.Site.Current$Bmass.med1[c]<-DF.temp$Bmass[5] Stepwat.Sorted.Site.Current$Site.med2[c]<-DF.temp$Site[6] Stepwat.Sorted.Site.Current$Bmass.med2[c]<-DF.temp$Bmass[6] c<-c+1} } } } #reorder to make it easier to make figures Stepwat.Sorted.Site.Current<-Stepwat.Sorted.Site.Current[order(Stepwat.Sorted.Site.Current$RGroup,Stepwat.Sorted.Site.Current$Year),] #write csv write.csv(Stepwat.Sorted.Site.Current,"Stepwat.DF.Sorted.Site.Current.csv")
/CSV.read_SORTING.STEPWAT.R
no_license
DrylandEcology/StepWat_R_Wrapper
R
false
false
11,172
r
################################################### ### Stepwat: CSV reading and sorting ### ### Author: Trace E Martyn ### ### Date: 12.14.2015 ### ################################################### ################################################### ## Read in all the files and make mega database ## ## Sort the output for each year and each Rgroup ## ## based the GCM or Site specific output and ## ## identify the max, min and median values. ## ################################################### #################################################### ## Must open, save, and close all csv files #### ## in Excel before running code #### #################################################### ###################################################################### #### Must open and save all CSVs in Excel until problem is solved #### ###################################################################### #### assemble all of the bmassavg....csv's into a file structure #### #### that as a single folder for each site labeled 'Site.1' #### #### for each site in run. Each folder contains all the bmass #### #### csv's labeled as the wrapper labeled them. This will need #### #### to be adjusted for output that includes disturbance and #### #### grazing because of the difference in the naming structure #### ###################################################################### ###################################################################### SITE<-1:10 RCP<-c("RCP45","RCP85") YEARS<-c("50years","90years") GCM<-c("ACCESS1-0","CanESM2","CESM1-CAM5","CMCC-CM","CNRM-CM5","CSIRO-Mk3-6-0","HadGEM2-ES","IPSL-CM5A-MR","MIROC5","NorESM1-M","Current") # create empty dataframe STEPWAT.300<-data.frame() for (s in SITE) { # for each site setwd(paste0("C:/Users/litak/Dropbox/STEPWAT.Output/Site.",s,"/")) for (g in GCM) { # for each GCM if (g=="Current") { # special case for Current name<-paste("bmassavg.Site",s,g,sep=".") df<-read.csv(paste0(name,".csv")) df$Site<-s df$Period<-0 df$RCP<-0 df$GCM<-g assign(name,df) STEPWAT.300<-rbind(STEPWAT.300,df)} else { for (r in RCP) { # for each RCP for (y in YEARS) { # for each period 2060 (50) or 2100 (90) name<-paste("bmassavg.Site",s,g,y,r,sep=".") df<-read.csv(paste0(name,".csv")) df$Site<-s df$Period<-y df$RCP<-r df$GCM<-g assign(name,df) STEPWAT.300<-rbind(STEPWAT.300,df)}}}} # save R.Data image for each site - just in case of failing code save.image(paste("Site",s,"RData",sep="."))} # set where you want large csv to be exported setwd("C:/Users/litak/Dropbox/Dropbox") #write csv write.csv(STEPWAT.300,"StepWat.300yrs.AllSites.csv") ########### GCM Variability #################################### ########## make a new dataframe of median max and min values ### Stepwat.DF<-read.csv("StepWat.300yrs.AllSites.csv") GCM<-unique(Stepwat.DF$GCM) GCM<-unique(GCM[1:10]) Rgroup<-c("sagebrush","a.cool.forb","a.warm.forb","p.cool.forb","p.warm.forb","a.cool.grass","a.warm.grass","p.cool.grass","p.warm.grass","shrub") Site<-unique(Stepwat.DF$Site) Period<-unique(Stepwat.DF$Period) Period<-Period[1:2] RCP<-unique(Stepwat.DF$RCP) RCP<-RCP[1:2] Year<-unique(Stepwat.DF$Year) Stepwat.Sorted<-data.frame(ID=1:12001) c<-1 for (p in Period) { # for each period 2060 or 2100 print(p) for (s in RCP) { # for each RCP print(s) for (y in Year) { # for each year for (r in Rgroup) { # for each group DF.new<-data.frame(ID=1:10) b<-1 for( g in GCM) { #for each GCM yearly.r<-Stepwat.DF[which(Stepwat.DF$GCM==g & Stepwat.DF$Year ==y & Stepwat.DF$RCP==s & Stepwat.DF$Period==p),r] m.yearly.r<-mean(yearly.r,na.rm=T) DF.new$Year[b]<-y DF.new$RGroup[b]<-r DF.new$GCM[b]<-g DF.new$Bmass[b]<-m.yearly.r DF.new b<-b+1} DF.temp<-DF.new[order(DF.new$Bmass),] Stepwat.Sorted$RCP[c]<-s Stepwat.Sorted$Period[c]<-p Stepwat.Sorted$Year[c]<-y Stepwat.Sorted$RGroup[c]<-r Stepwat.Sorted$GCM.min[c]<-DF.temp$GCM[1] Stepwat.Sorted$Bmass.min[c]<-DF.temp$Bmass[1] Stepwat.Sorted$GCM.max[c]<-DF.temp$GCM[10] Stepwat.Sorted$Bmass.max[c]<-DF.temp$Bmass[10] Stepwat.Sorted$GCM.med1[c]<-DF.temp$GCM[5] Stepwat.Sorted$Bmass.med1[c]<-DF.temp$Bmass[5] Stepwat.Sorted$GCM.med2[c]<-DF.temp$GCM[6] Stepwat.Sorted$Bmass.med2[c]<-DF.temp$Bmass[6] c<-c+1} } } } #reorder to make it easier to make figures Stepwat.Sorted<-Stepwat.Sorted[order(Stepwat.Sorted$RCP,Stepwat.Sorted$Period,Stepwat.Sorted$RGroup,Stepwat.Sorted$Year),] #write csv write.csv(Stepwat.Sorted,"Stepwat.DF.Sorted.csv") ############# Current GCM #################################### Stepwat.DF<-read.csv("StepWat.300yrs.AllSites.csv") GCM<-"Current" Rgroup<-c("sagebrush","a.cool.forb","a.warm.forb","p.cool.forb","p.warm.forb","a.cool.grass","a.warm.grass","p.cool.grass","p.warm.grass","shrub") Site<-unique(Stepwat.DF$Site) Period<-unique(Stepwat.DF$Period) Period<-0 RCP<-unique(Stepwat.DF$RCP) RCP<-0 Year<-unique(Stepwat.DF$Year) Stepwat.Sorted.Current<-data.frame(ID=1:3000) c<-1 for (p in Period) { # for each period 2060 or 2100 print(p) for (s in RCP) { # for each RCP print(s) for (y in Year) { # for each year for (r in Rgroup) { # for each group DF.new<-data.frame(ID=1:10) b<-1 for( g in GCM) { #for each GCM yearly.r<-Stepwat.DF[which(Stepwat.DF$GCM==g & Stepwat.DF$Year ==y & Stepwat.DF$RCP==s & Stepwat.DF$Period==p),r] m.yearly.r<-mean(yearly.r,na.rm=T) DF.new$Year[b]<-y DF.new$RGroup[b]<-r DF.new$GCM[b]<-g DF.new$Bmass[b]<-m.yearly.r DF.new b<-b+1} DF.temp<-DF.new[order(DF.new$Bmass),] Stepwat.Sorted.Current$RCP[c]<-s Stepwat.Sorted.Current$Period[c]<-p Stepwat.Sorted.Current$Year[c]<-y Stepwat.Sorted.Current$RGroup[c]<-r Stepwat.Sorted.Current$GCM.min[c]<-DF.temp$GCM[1] Stepwat.Sorted.Current$Bmass.min[c]<-DF.temp$Bmass[1] Stepwat.Sorted.Current$GCM.max[c]<-DF.temp$GCM[10] Stepwat.Sorted.Current$Bmass.max[c]<-DF.temp$Bmass[10] Stepwat.Sorted.Current$GCM.med1[c]<-DF.temp$GCM[5] Stepwat.Sorted.Current$Bmass.med1[c]<-DF.temp$Bmass[5] Stepwat.Sorted.Current$GCM.med2[c]<-DF.temp$GCM[6] Stepwat.Sorted.Current$Bmass.med2[c]<-DF.temp$Bmass[6] c<-c+1} } } } #reorder to make it easier to make figures Stepwat.Sorted.Current<-Stepwat.Sorted.Current[order(Stepwat.Sorted.Current$RGroup,Stepwat.Sorted.Current$Year),] #write csv write.csv(Stepwat.Sorted.Current,"Stepwat.DF.Sorted.Current.csv") ################ Site Variability ############################### Stepwat.DF<-read.csv("StepWat.300yrs.AllSites.csv") ########## make a new dataframe of median max and min values ### GCM<-unique(Stepwat.DF$GCM) GCM<-unique(GCM[1:10]) Rgroup<-c("sagebrush","a.cool.forb","a.warm.forb","p.cool.forb","p.warm.forb","a.cool.grass","a.warm.grass","p.cool.grass","p.warm.grass","shrub") Site<-1:10 Period<-unique(Stepwat.DF$Period) Period<-Period[1:2] RCP<-unique(Stepwat.DF$RCP) RCP<-RCP[1:2] Year<-1:300 Stepwat.Sorted.Site<-data.frame(ID=1:12000) c<-1 for (p in Period) { # for each period 2060 (50) or 2100 (90) print(p) for (s in RCP) { # for each RCP print(s) for (y in Year) { # for each year for (r in Rgroup) { # for each group DF.new<-data.frame(ID=1:10) b<-1 for (i in Site) { # for each site yearly.r<-Stepwat.DF[which(Stepwat.DF$Site==i & Stepwat.DF$Year ==y & Stepwat.DF$RCP==s & Stepwat.DF$Period==p),r] m.yearly.r<-mean(yearly.r) DF.new$Year[b]<-y DF.new$RGroup[b]<-r DF.new$Site[b]<-i DF.new$Bmass[b]<-m.yearly.r DF.new b<-b+1} DF.temp<-DF.new[order(DF.new$Bmass),] Stepwat.Sorted.Site$RCP[c]<-s Stepwat.Sorted.Site$Period[c]<-p Stepwat.Sorted.Site$Year[c]<-y Stepwat.Sorted.Site$RGroup[c]<-r Stepwat.Sorted.Site$Site.min[c]<-DF.temp$Site[1] Stepwat.Sorted.Site$Bmass.min[c]<-DF.temp$Bmass[1] Stepwat.Sorted.Site$Site.max[c]<-DF.temp$Site[10] Stepwat.Sorted.Site$Bmass.max[c]<-DF.temp$Bmass[10] Stepwat.Sorted.Site$Site.med1[c]<-DF.temp$Site[5] Stepwat.Sorted.Site$Bmass.med1[c]<-DF.temp$Bmass[5] Stepwat.Sorted.Site$Site.med2[c]<-DF.temp$Site[6] Stepwat.Sorted.Site$Bmass.med2[c]<-DF.temp$Bmass[6] c<-c+1} } } } #reorder to make it easier to make figures Stepwat.Sorted.Site<-Stepwat.Sorted.Site[order(Stepwat.Sorted.Site$RCP,Stepwat.Sorted.Site$Period,Stepwat.Sorted.Site$RGroup,Stepwat.Sorted.Site$Year),] #write csv write.csv(Stepwat.Sorted.Site,"Stepwat.DF.Sorted.Site.csv") ######## Current Site Variability ####################### ########## make a new dataframe of median max and min values ### GCM<-"Current" Rgroup<-c("sagebrush","a.cool.forb","a.warm.forb","p.cool.forb","p.warm.forb","a.cool.grass","a.warm.grass","p.cool.grass","p.warm.grass","shrub") Site<-c(1,3) Period<-0 RCP<-0 Year<-1:300 Stepwat.Sorted.Site.Current<-data.frame(ID=1:3001) c<-1 for (p in Period) { # for each period 2060 or 2100 print(p) for (s in RCP) { # for each RCP print(s) for (y in Year) { # for each year for (r in Rgroup) { DF.new<-data.frame(ID=1:10) b<-1# for each group for (i in Site) { # for each site yearly.r<-Stepwat.DF[which(Stepwat.DF$Site==i & Stepwat.DF$Year ==y & Stepwat.DF$RCP==s & Stepwat.DF$Period==p),r] m.yearly.r<-mean(yearly.r) DF.new$Year[b]<-y DF.new$RGroup[b]<-r DF.new$Site[b]<-i DF.new$Bmass[b]<-m.yearly.r b<-b+1} DF.temp<-DF.new[order(DF.new$Bmass),] Stepwat.Sorted.Site.Current$RCP[c]<-s Stepwat.Sorted.Site.Current$Period[c]<-p Stepwat.Sorted.Site.Current$Year[c]<-y Stepwat.Sorted.Site.Current$RGroup[c]<-r Stepwat.Sorted.Site.Current$Site.min[c]<-DF.temp$Site[1] Stepwat.Sorted.Site.Current$Bmass.min[c]<-DF.temp$Bmass[1] Stepwat.Sorted.Site.Current$Site.max[c]<-DF.temp$Site[10] Stepwat.Sorted.Site.Current$Bmass.max[c]<-DF.temp$Bmass[10] Stepwat.Sorted.Site.Current$Site.med1[c]<-DF.temp$Site[5] Stepwat.Sorted.Site.Current$Bmass.med1[c]<-DF.temp$Bmass[5] Stepwat.Sorted.Site.Current$Site.med2[c]<-DF.temp$Site[6] Stepwat.Sorted.Site.Current$Bmass.med2[c]<-DF.temp$Bmass[6] c<-c+1} } } } #reorder to make it easier to make figures Stepwat.Sorted.Site.Current<-Stepwat.Sorted.Site.Current[order(Stepwat.Sorted.Site.Current$RGroup,Stepwat.Sorted.Site.Current$Year),] #write csv write.csv(Stepwat.Sorted.Site.Current,"Stepwat.DF.Sorted.Site.Current.csv")
library(FreeSortR) ### Name: SortingPartition ### Title: Creates an object of class 'SortingPartition' ### Aliases: SortingPartition ### ** Examples data(AromaSort) Aroma<-SortingPartition(AromaSort) show(Aroma)
/data/genthat_extracted_code/FreeSortR/examples/SortingPartition.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
225
r
library(FreeSortR) ### Name: SortingPartition ### Title: Creates an object of class 'SortingPartition' ### Aliases: SortingPartition ### ** Examples data(AromaSort) Aroma<-SortingPartition(AromaSort) show(Aroma)
library(plyr) #Setting appropriate data path setwd("C:/Users/Cristina/Documents/DataScience/Module 3/UCI HAR Dataset") path<-getwd() #Reading Files #Activity Files ActTrain<-read.table(file.path(path,"train", "Y_train.txt"), header=FALSE) ActTest<-read.table(file.path(path,"test", "y_test.txt"), header=FALSE) ActNames<-read.table(file.path(path, "activity_labels.txt"), header=FALSE) #Subject Files SubTrain<-read.table(file.path(path,"train", "subject_train.txt"), header=FALSE) SubTest<-read.table(file.path(path,"test", "subject_test.txt"), header=FALSE) #Features Files FeatTrain<-read.table(file.path(path,"train", "X_train.txt"), header=FALSE) FeatTest<-read.table(file.path(path,"test", "X_test.txt"), header=FALSE) FeatNames<-read.table(file.path(path, "features.txt"), header=FALSE) #Merging two data sets activitySet<-rbind(ActTrain, ActTest) subjectSet<-rbind(SubTrain, SubTest) featureSet<-rbind(FeatTrain, FeatTest) #Rename variables names(activitySet)<-c("Activity") names(subjectSet)<-c("Subject") names(featureSet)<-FeatNames$V2 #Merge all dataset<-cbind(featureSet, subjectSet, activitySet) #Extract mean and standard deviation extract<-FeatNames$V2[grep("mean\\(\\)|std\\(\\)", FeatNames$V2)] ext_names<-as.character(extract) subData<-subset(dataset, select=c(ext_names, "Subject", "Activity")) #Uses descriptive activity names to name the activities in the data set subData$Activity <- ActNames[subData$Activity, 2] factor(subData$Activity) #Appropriately labels the data set with descriptive variable names. names(subData)<-gsub("^t", "time", names(subData)) names(subData)<-gsub("^f", "freq", names(subData)) names(subData)<-gsub("Acc", "Accelerometer", names(subData)) names(subData)<-gsub("Gyro", "Gyroscope", names(subData)) names(subData)<-gsub("Mag", "Magnitude", names(subData)) names(subData)<-gsub("BodyBody", "Body", names(subData)) #Creates a second, independent tidy data set with the average of each variable for #each activity and each subject. tidy<-aggregate(.~Subject+Activity, subData, mean) tidy<-tidy[order(tidy$Subject, tidy$Activity),] write.table(tidy, file="TidyDataSet.txt", row.name=FALSE)
/run_analysis.R
no_license
acgadala/GettingCleaningData
R
false
false
2,151
r
library(plyr) #Setting appropriate data path setwd("C:/Users/Cristina/Documents/DataScience/Module 3/UCI HAR Dataset") path<-getwd() #Reading Files #Activity Files ActTrain<-read.table(file.path(path,"train", "Y_train.txt"), header=FALSE) ActTest<-read.table(file.path(path,"test", "y_test.txt"), header=FALSE) ActNames<-read.table(file.path(path, "activity_labels.txt"), header=FALSE) #Subject Files SubTrain<-read.table(file.path(path,"train", "subject_train.txt"), header=FALSE) SubTest<-read.table(file.path(path,"test", "subject_test.txt"), header=FALSE) #Features Files FeatTrain<-read.table(file.path(path,"train", "X_train.txt"), header=FALSE) FeatTest<-read.table(file.path(path,"test", "X_test.txt"), header=FALSE) FeatNames<-read.table(file.path(path, "features.txt"), header=FALSE) #Merging two data sets activitySet<-rbind(ActTrain, ActTest) subjectSet<-rbind(SubTrain, SubTest) featureSet<-rbind(FeatTrain, FeatTest) #Rename variables names(activitySet)<-c("Activity") names(subjectSet)<-c("Subject") names(featureSet)<-FeatNames$V2 #Merge all dataset<-cbind(featureSet, subjectSet, activitySet) #Extract mean and standard deviation extract<-FeatNames$V2[grep("mean\\(\\)|std\\(\\)", FeatNames$V2)] ext_names<-as.character(extract) subData<-subset(dataset, select=c(ext_names, "Subject", "Activity")) #Uses descriptive activity names to name the activities in the data set subData$Activity <- ActNames[subData$Activity, 2] factor(subData$Activity) #Appropriately labels the data set with descriptive variable names. names(subData)<-gsub("^t", "time", names(subData)) names(subData)<-gsub("^f", "freq", names(subData)) names(subData)<-gsub("Acc", "Accelerometer", names(subData)) names(subData)<-gsub("Gyro", "Gyroscope", names(subData)) names(subData)<-gsub("Mag", "Magnitude", names(subData)) names(subData)<-gsub("BodyBody", "Body", names(subData)) #Creates a second, independent tidy data set with the average of each variable for #each activity and each subject. tidy<-aggregate(.~Subject+Activity, subData, mean) tidy<-tidy[order(tidy$Subject, tidy$Activity),] write.table(tidy, file="TidyDataSet.txt", row.name=FALSE)
###################################### # # # Visualizing host species phylogeny # # and creating distance matrix # # # ###################################### setwd("C:/Users/Brody/Desktop/Murre spp. Population Genomics and Conservation/BIOL812 - Bioinformatics/Endophytes") install.packages("ape") library(ape) tree = read.tree(text = "((((((((Cladophora)Cladophoraceae)Cladophorales)Ulvophyceae)Chlorophyta,((((((((((Grimmia_pilifera)Grimmia)Grimmiaceae)Grimmiales)Dicranidae,((((Bryum)Bryaceae)Bryales)Bryanae,((((Cratoneuron_filicinum)Cratoneuron)Amblystegiaceae,((Pylaisia_polyantha)Pylaisia)Hypnaceae)Hypnales)Hypnanae)Bryidae)Bryopsida)Bryophytina)Bryophyta,((((((((((((((Oryza)Oryzinae)Oryzeae)Oryzoideae)BOP_clade,(((((Zea)Tripsacinae)Andropogoneae)Andropogonodae)Panicoideae)PACMAD_clade)Poaceae)Poales)commelinids,(((Aloe)Asphodeloideae)Asphodelaceae,((((Phalaenopsis)Aeridinae)Vandeae)Epidendroideae)Orchidaceae)Asparagales)Petrosaviidae)Liliopsida,((((((((((Espeletia)Millerieae)Heliantheae_alliance)Asteroideae)Asteraceae)Asterales)campanulids,((((((((Rothmannia_macrophylla)Rothmannia)Gardenieae)Gardenieae_-_Pavetteae_clade)Gardenieae_complex)Ixoroideae)Rubiaceae)Gentianales,((((((Solanum_lycopersicum)Lycopersicon)Solanum)Solaneae)Solanoideae)Solanaceae)Solanales)lamiids)asterids,(((((Microdesmis_caseariifolia)Microdesmis)Pandaceae)Malpighiales)fabids,((((Vitis_vinifera)Vitis)Vitaceae)Vitales)rosids_incertae_sedis,((((Santiria_apiculata)Santiria)Burseraceae)Sapindales,((((Brassica_napus,Brassica_oleracea)Brassica)Brassiceae)Brassicaceae)Brassicales)malvids)rosids,(((Paeonia_suffruticosa,Paeonia_rockii)Paeonia)Paeoniaceae)Saxifragales)Pentapetalae)Gunneridae)eudicotyledons)Mesangiospermae)Magnoliophyta,((((((Pinus_pinaster)Pinus,(Pinus_flexilis)Strobus)Pinus)Pinaceae)Pinales)Pinidae)Acrogymnospermae)Spermatophyta)Euphyllophyta)Tracheophyta)Embryophyta)Streptophytina)Streptophyta)Viridiplantae)Eukaryota)cellular_organisms);") plot(tree, type='cladogram',edge.width=1,cex=1, edge.color='black') # export the plot as a PDF p.dist = cophenetic(tree) View(p.dist) write.csv(p.dist,file="dist_matrix.csv")
/tree.R
no_license
KatherineDuchesneau/Endophyte_project
R
false
false
2,243
r
###################################### # # # Visualizing host species phylogeny # # and creating distance matrix # # # ###################################### setwd("C:/Users/Brody/Desktop/Murre spp. Population Genomics and Conservation/BIOL812 - Bioinformatics/Endophytes") install.packages("ape") library(ape) tree = read.tree(text = "((((((((Cladophora)Cladophoraceae)Cladophorales)Ulvophyceae)Chlorophyta,((((((((((Grimmia_pilifera)Grimmia)Grimmiaceae)Grimmiales)Dicranidae,((((Bryum)Bryaceae)Bryales)Bryanae,((((Cratoneuron_filicinum)Cratoneuron)Amblystegiaceae,((Pylaisia_polyantha)Pylaisia)Hypnaceae)Hypnales)Hypnanae)Bryidae)Bryopsida)Bryophytina)Bryophyta,((((((((((((((Oryza)Oryzinae)Oryzeae)Oryzoideae)BOP_clade,(((((Zea)Tripsacinae)Andropogoneae)Andropogonodae)Panicoideae)PACMAD_clade)Poaceae)Poales)commelinids,(((Aloe)Asphodeloideae)Asphodelaceae,((((Phalaenopsis)Aeridinae)Vandeae)Epidendroideae)Orchidaceae)Asparagales)Petrosaviidae)Liliopsida,((((((((((Espeletia)Millerieae)Heliantheae_alliance)Asteroideae)Asteraceae)Asterales)campanulids,((((((((Rothmannia_macrophylla)Rothmannia)Gardenieae)Gardenieae_-_Pavetteae_clade)Gardenieae_complex)Ixoroideae)Rubiaceae)Gentianales,((((((Solanum_lycopersicum)Lycopersicon)Solanum)Solaneae)Solanoideae)Solanaceae)Solanales)lamiids)asterids,(((((Microdesmis_caseariifolia)Microdesmis)Pandaceae)Malpighiales)fabids,((((Vitis_vinifera)Vitis)Vitaceae)Vitales)rosids_incertae_sedis,((((Santiria_apiculata)Santiria)Burseraceae)Sapindales,((((Brassica_napus,Brassica_oleracea)Brassica)Brassiceae)Brassicaceae)Brassicales)malvids)rosids,(((Paeonia_suffruticosa,Paeonia_rockii)Paeonia)Paeoniaceae)Saxifragales)Pentapetalae)Gunneridae)eudicotyledons)Mesangiospermae)Magnoliophyta,((((((Pinus_pinaster)Pinus,(Pinus_flexilis)Strobus)Pinus)Pinaceae)Pinales)Pinidae)Acrogymnospermae)Spermatophyta)Euphyllophyta)Tracheophyta)Embryophyta)Streptophytina)Streptophyta)Viridiplantae)Eukaryota)cellular_organisms);") plot(tree, type='cladogram',edge.width=1,cex=1, edge.color='black') # export the plot as a PDF p.dist = cophenetic(tree) View(p.dist) write.csv(p.dist,file="dist_matrix.csv")
# Simulations to ask how many mosquitoes would need to be sampled # to allow a given difference in age structure between two populations # (e.g. pre- and post-intervention) to be detected, when age-class is # inferred by the the MIRS-CNN method. The effect of enriching the training # set of lab-reared mosquitoes using increasing numbers of mosquitoes reared with # environmental variation (EV) is assessed. # (Assumptions adapted from on Fig 4 of: http://dx.doi.org/10.12688/wellcomeopenres.15201.2) # load packages library(parallel) library(scales) library(RColorBrewer) # clear memory rm(list = ls()) # get date date.today <- Sys.Date() # load "EV variation" (TRUE) or "sampling variation" (FALSE) matrices # selecting "EV <- TRUE" will run the simulations to generate Fig 4b,c,d # and Table S4 # selecting "EV <- FALSE" will run the simulations to generate Fig S2 EV <- TRUE # assumptions: # Simulate a population with survival rate of 0.91 (gambiae) or 0.82 (arabiensis) s <- c(gambiae = 0.91, arabiensis = 0.82)[1] p <- 1 - s # daily death probability before intervention # increase in death rate due to two interventions # LLIN: 4-fold increase in death rate, starting on day 4 of life # toxic sugar bait: 3-fold increase in death rate, works from day 1 intervention.tab <- data.frame(effect = c(1, 4, 2), day.active = c(2, 4, 2)) #intervention.tab <- data.frame(effect = c(1, 2, 2), day.active = c(2, 4, 2)) rownames(intervention.tab) <- c("Control", # natural mortality "LLIN", # long-lasting insecticide nets (LLIN) "ATSB") # attractive toxic sugar baits (ATSB) intervention.tab # maximum lifespan of mosquitoes n.day <- 20 day <- 1:n.day # age-classes: 1-4, 5-10, 11+ age.cut <- c(min(day) - 0.5, 4.5, 10.5, max(day) + 0.5) age.bin <- lapply(2:length(age.cut), function(i) { day[sapply(day, function(x) all((x < age.cut[c(i-1, i)]) == c(FALSE, TRUE)))] }) names(age.bin) <- apply(sapply(age.bin, range), 2, paste, collapse = "-") age.bin.num <- 1:length(age.bin) # age structure in the 3 groups ageprob.list <- lapply(1:nrow(intervention.tab), function(i) { death.prob <- c(0, rep(p, intervention.tab$day.active[i] - 2), rep(p * intervention.tab$effect[i], n.day - intervention.tab$day.active[i] + 1)) ageprob <- cumprod(1 - death.prob) ageprob <- ageprob/sum(ageprob) names(ageprob) <- day ageprob }) names(ageprob.list) <- rownames(intervention.tab) # convert per-day structure to binned structure ageprob.bin.list <- lapply(1:nrow(intervention.tab), function(i) { sapply(age.bin, function(x) sum(ageprob.list[[i]][as.character(x)])) }) names(ageprob.bin.list) <- rownames(intervention.tab) # make plot comparing age structures cols <- c("black", "blue", "red") old.par <- par(mar = c(5.1, 4.1, 4.1, 4.1)) ylim <- c(0, ceiling(10 * max(unlist(ageprob.list)))/10) plot(day, ageprob.list[[1]], type = "n", ylim = ylim, xlab = "Mosquito age (days)", ylab = "Proportion in population") lapply(1:length(ageprob.list), function(i) { points(day, ageprob.list[[i]], type = "b", pch = 16, col = cols[i]) }) # add age class proportions bin.scale <- max(unlist(ageprob.bin.list))/max(unlist(ageprob.list)) lapply(names(age.bin), function(x) { lapply(1:nrow(intervention.tab), function(i) { lines(cbind(age.bin[[x]], ageprob.bin.list[[i]][x]/bin.scale), col = alpha(cols[i], 0.4), lwd = 4) }) }) axis(4, at = pretty(0:1)/bin.scale, labels = pretty(0:1)) mtext("Proportion in population (binned)", side = 4, line = 2.5) # add legend and title legend("topright", legend = rownames(intervention.tab), pch = 16, col = cols, lty = 1, bty = "n") legend("topright", lwd = 4, legend = rownames(intervention.tab), col = alpha(cols, 0.4), lty = 1, bty = "n") par(old.par) # make bar chart nice.cols <- c("#e0f3db", "#a8ddb5", "#43a2ca") ageprob.bin.tab <- do.call("cbind", ageprob.bin.list) pdf(paste0("agestructure.barchart.", date.today, ".pdf"), height = 5/2.54, width = 6/2.54, pointsize = 10) old.par <- par(mar = c(2.1, 3.1, 0.6, 0.1)) rownames(ageprob.bin.tab) <- paste0(rownames(ageprob.bin.tab), "d") barplot(ageprob.bin.tab, beside = TRUE, legend.text = TRUE, args.legend = list(x = "topleft", bty = "n", x.intersp = 0.2, inset = -0.03), ylab = "", xlab = "", axes = FALSE, ylim = c(0, 1.1 * max(ageprob.bin.tab)), col = nice.cols, padj = -1) mtext("Proportion", side = 2, line = 2) rownames(ageprob.bin.tab) <- names(ageprob.bin.list[[1]]) axis(2, at = pretty(c(ageprob.bin.tab)), padj = 0.7) par(old.par) dev.off() # read in confusion matrices if(EV) { mat.tab <- read.csv("Confusion_Matrices/confusion_matrices_all_2020-01-22.csv", header = FALSE) } else { mat.tab <- read.csv("Confusion_Matrices/confusion_matrices_0_05_2020-01-22.csv", header = FALSE) #for(j in 1:ncol(mat.tab)) mat.tab[, j] <- mean(mat.tab[, j]) } mat.names <- paste0("r", rep(1:3, each = 3), "c", rep(1:3, 3)) names(mat.tab) <- mat.names dim(mat.tab) if(EV) { mat.tab$n.tcv <- c(0, 162, 324, 486, 654, 815, 973, 1131, 1294, 1452) } else { mat.tab$n.tcv <- 1:nrow(mat.tab) } # confusion matrices explained: # each row of mat.tab # contains 9 values from 3x3 confusion matrix # which defines the accuracy of the MIRS-CNN method in inferring # the age of a mosquito. # the final column of mat.tab gives the number of mosquitoes from # the environmental variation (EV) data set that were added to the training data # to improve the training of the the CNN (convolutional neural network). # for example, row 6 of mat.tab mat.tab[6, ] # gives the confusion matrix where 815 EV mosquitoes were used, # and the value 815. turn these back into a confusion matrix: matrix(unlist(mat.tab[6, mat.names]), ncol = 3, byrow = TRUE) # the rows represent true age classes. the columns give the probability # that a mosquito of that age class will be assigned to each of the three # age classes. e.g. the probability of a mosquito in the first age class (1-4 days) # being correctly assigned to that age class by the MIRS-CNN method is: matrix(unlist(mat.tab[6, mat.names]), ncol = 3, byrow = TRUE)[1, 1] # make table of assumptions choices (scenarios to simulate) # try all combinations of # enrichment (degree of enrichment of the training data with EV) # sample size (n wild mosquitoes per intervention group) assumptions <- expand.grid( mat.row = 1:nrow(mat.tab), # which confusion matrix to use n = c(20, 50, 100, 150, 200, 250, 300), # sample size from each population nsim = 10000, # n data sets to simulate per scenario stringsAsFactors = FALSE) # set random seeds RNGkind("L'Ecuyer-CMRG") global.rand.seed <- 782120569 # https://www.random.org/integers/?num=1&min=0&max=1000000000&col=1&base=10&format=html&rnd=new # Random Integer Generator # Here are your random numbers: # 782120569 # Timestamp: 2020-02-25 13:07:09 UTC set.seed(global.rand.seed) assumptions$global.rand.seed <- global.rand.seed assumptions$rand.seed <- sample(1e9, nrow(assumptions)) # simulate populations start.time <- Sys.time() simres.tab <- sapply(1:nrow(assumptions), function(j) { # loop over scenarios set.seed(assumptions$rand.seed[j]) mc.reset.stream() simres.list <- mclapply(1:assumptions$nsim[j], # analyse nsim simulated data sets function(i) { # simulate data with true age in days n <- assumptions$n[j] dat <- do.call("rbind", lapply(1:nrow(intervention.tab), function(k) { data.frame(intervention = rownames(intervention.tab)[k], age = c(day %*% rmultinom(n, 1, ageprob.list[[k]]))) })) # bin true age in age classes dat$age.cat <- as.numeric(cut(dat$age, age.cut, labels = names(age.bin))) # apply confusion matrix to give estimated age class mat <- matrix(unlist(mat.tab[assumptions$mat.row[j], mat.names]), ncol = 3, byrow = TRUE) dimnames(mat) <- list(names(age.bin), names(age.bin)) # check rows sum to 1 rowSums(mat) # "estimate" age class by drawing from a multinomial distribution dat$age.cat.est <- sapply(dat$age.cat, function(a) age.bin.num %*% rmultinom(1, 1, mat[a, ])) # test both interventions against the control population # using wilcoxon-mann-whitney test and chi-squared test # (only chi-squared test was used, ultimately, as this test had greater power) out.list <- lapply(2:nrow(intervention.tab), function(h) { dat.test <- droplevels(dat[dat$intervention %in% rownames(intervention.tab)[c(1, h)], ]) # do wilcoxon-mann-whitney test to compare age distributions table(dat.test$age.cat, dat.test$intervention) wil.pow <- wilcox.test(age.cat ~ intervention, data = dat.test)$p.value < 0.05 wil.pow.est <- wilcox.test(age.cat.est ~ intervention, data = dat.test)$p.value < 0.05 if(is.na(wil.pow)) wil.pow <- 0 if(is.na(wil.pow.est)) wil.pow.est <- 0 # do chi-squared test to compare age distributions xtab <- table(factor(dat.test$age.cat.est, 1:3), dat.test$intervention) chi.pow <- chisq.test(table(dat.test$age.cat, dat.test$intervention))$p.value < 0.05 chi.pow.est <- chisq.test(xtab[rowSums(xtab) > 0, ])$p.value < 0.05 # export test results out <- c(wil.pow = wil.pow, wil.pow.est = wil.pow.est, chi.pow = chi.pow, chi.pow.est = chi.pow.est, prop.control = prop.table(xtab, 2)[, rownames(intervention.tab)[1]], prop.intervention = prop.table(xtab, 2)[, rownames(intervention.tab)[h]]) names(out) <- paste(names(out), rownames(intervention.tab)[h], sep = ".") out }) unlist(out.list) }, mc.cores = detectCores() - 1) print(paste0(round(100*j/nrow(assumptions)), "% complete")) # bind results together as a table simres <- do.call("rbind.data.frame", simres.list) dim(simres) names(simres) <- names(simres.list[[1]]) # take mean across all nsim simulations, giving power estimates for each scenario apply(simres, 2, mean) }) # bind assumptions table to results out <- cbind(assumptions, mat.tab[assumptions$mat.row, ], t(simres.tab)) out[, grep("prop\\.", names(out))] <- round(out[, grep("prop\\.", names(out))], 3) out$mat.row <- NULL # compare wilcox and chi-squared results plot(chi.pow.est.LLIN ~ wil.pow.est.LLIN, data = out, xlab = "Wilcoxon", ylab = "Chisq") points(chi.pow.est.ATSB ~ wil.pow.est.ATSB, data = out, col = "red") abline(0, 1) legend("topleft", legend = c("LLIN", "ATSB"), col = 1:2, pch = 1) # plot power against sample size broken down by enrichment level lapply(2:nrow(intervention.tab), function(i) { gp <- rownames(intervention.tab)[i] form <- formula(paste0("wil.pow.est.", gp, " ~ n")) form2 <- formula(paste0("wil.pow.", gp, " ~ n")) ntcv.lev <- unique(out$n.tcv) ntcv.col <- brewer.pal(length(ntcv.lev), "RdYlBu") if(!EV) ntcv.col <- rep(ntcv.col[2], length(ntcv.lev)) names(ntcv.col) <- ntcv.lev powercurve.file <- paste0("agestructure.powercurve.", ifelse(EV, "", "var."), names(s), ".", gp, ".", date.today, ".pdf") pdf(powercurve.file, height = 7/2.54, width = 8/2.54, pointsize = 10) old.par <- par(mar = c(2.6, 2.6, 0.6, 0.2)) plot(form, data = out, ylim = 0:1, xlim = c(min(out$n), max(out$n) * 1.20^(!EV - 1)), type = "n", ylab = "", xlab = "", axes = FALSE) mtext("N per population", 1, line = 1.5) mtext("Power", 2, line = 1.5) tcl <- -0.3 axis(2, padj = 0.9, tcl = tcl) axis(1, at = unique(out$n), padj = -0.9, tcl = tcl, gap.axis = 0.25) box() lapply(ntcv.lev, function(ntcv) { points(form, data = out[out$n.tcv == ntcv, ], type = "b", pch = 21, bg = ntcv.col[as.character(ntcv)]) }) max.power <- tapply(out[, paste0("wil.pow.", gp)], out$n, mean) if(EV) { lines(as.numeric(names(max.power)), max.power, lty = 3) temp <- legend("bottomright", bty = "n", legend = rep(" ", length(ntcv.lev)), pch = 21, text.width = max(strwidth(ntcv.lev)), xjust = 1, yjust = 1, pt.bg = rev(ntcv.col), x.intersp = 0.4, inset = -0.01) text(temp$rect$left + temp$rect$w, temp$text$y, rev(ntcv.lev), pos = 2) } #title(rownames(intervention.tab)[i]) par(old.par) dev.off() }) if(!EV) { head(out) sapply(out[out$n == 20, grep("wil.pow.est", names(out), value = TRUE)], sd) / sapply(out[out$n == 20, grep("wil.pow.est", names(out), value = TRUE)], function(x) { pwr <- mean(x) sqrt(pwr * (1-pwr) / unique(out$nsim)) }) } # write results to csv out.file <- paste0("agestructure.power.", ifelse(EV, "", "var."), names(s), ".", date.today, ".csv") write.csv(out, out.file, row.names = FALSE) print(Sys.time() - start.time) # post-formatting of results for Table S4 # formatting numbers # this function is better than round because it doesn't strip off trailing zeroes library(gdata) my.format<- function(x,ndp=0,na.string="") { out<- format(round(x,ndp),ns=ndp,scientific=FALSE,just='none') out[grep("NA",out)]<-na.string trim(out) } if(EV) { TableS4 <- read.csv(out.file)[, c("n", "n.tcv", "wil.pow.est.LLIN", "wil.pow.est.ATSB")] TableS4$wil.pow.est.LLIN <- paste0(my.format(TableS4$wil.pow.est.LLIN * 100, 1), "%") TableS4$wil.pow.est.ATSB <- paste0(my.format(TableS4$wil.pow.est.ATSB * 100, 1), "%") write.csv(TableS4, "TableS4.csv", row.names = FALSE) }
/code/Power analyses/MIRS_AgeStructure_PowerSim_v05.R
permissive
SimonAB/DL-MIRS_Siria_et_al
R
false
false
14,437
r
# Simulations to ask how many mosquitoes would need to be sampled # to allow a given difference in age structure between two populations # (e.g. pre- and post-intervention) to be detected, when age-class is # inferred by the the MIRS-CNN method. The effect of enriching the training # set of lab-reared mosquitoes using increasing numbers of mosquitoes reared with # environmental variation (EV) is assessed. # (Assumptions adapted from on Fig 4 of: http://dx.doi.org/10.12688/wellcomeopenres.15201.2) # load packages library(parallel) library(scales) library(RColorBrewer) # clear memory rm(list = ls()) # get date date.today <- Sys.Date() # load "EV variation" (TRUE) or "sampling variation" (FALSE) matrices # selecting "EV <- TRUE" will run the simulations to generate Fig 4b,c,d # and Table S4 # selecting "EV <- FALSE" will run the simulations to generate Fig S2 EV <- TRUE # assumptions: # Simulate a population with survival rate of 0.91 (gambiae) or 0.82 (arabiensis) s <- c(gambiae = 0.91, arabiensis = 0.82)[1] p <- 1 - s # daily death probability before intervention # increase in death rate due to two interventions # LLIN: 4-fold increase in death rate, starting on day 4 of life # toxic sugar bait: 3-fold increase in death rate, works from day 1 intervention.tab <- data.frame(effect = c(1, 4, 2), day.active = c(2, 4, 2)) #intervention.tab <- data.frame(effect = c(1, 2, 2), day.active = c(2, 4, 2)) rownames(intervention.tab) <- c("Control", # natural mortality "LLIN", # long-lasting insecticide nets (LLIN) "ATSB") # attractive toxic sugar baits (ATSB) intervention.tab # maximum lifespan of mosquitoes n.day <- 20 day <- 1:n.day # age-classes: 1-4, 5-10, 11+ age.cut <- c(min(day) - 0.5, 4.5, 10.5, max(day) + 0.5) age.bin <- lapply(2:length(age.cut), function(i) { day[sapply(day, function(x) all((x < age.cut[c(i-1, i)]) == c(FALSE, TRUE)))] }) names(age.bin) <- apply(sapply(age.bin, range), 2, paste, collapse = "-") age.bin.num <- 1:length(age.bin) # age structure in the 3 groups ageprob.list <- lapply(1:nrow(intervention.tab), function(i) { death.prob <- c(0, rep(p, intervention.tab$day.active[i] - 2), rep(p * intervention.tab$effect[i], n.day - intervention.tab$day.active[i] + 1)) ageprob <- cumprod(1 - death.prob) ageprob <- ageprob/sum(ageprob) names(ageprob) <- day ageprob }) names(ageprob.list) <- rownames(intervention.tab) # convert per-day structure to binned structure ageprob.bin.list <- lapply(1:nrow(intervention.tab), function(i) { sapply(age.bin, function(x) sum(ageprob.list[[i]][as.character(x)])) }) names(ageprob.bin.list) <- rownames(intervention.tab) # make plot comparing age structures cols <- c("black", "blue", "red") old.par <- par(mar = c(5.1, 4.1, 4.1, 4.1)) ylim <- c(0, ceiling(10 * max(unlist(ageprob.list)))/10) plot(day, ageprob.list[[1]], type = "n", ylim = ylim, xlab = "Mosquito age (days)", ylab = "Proportion in population") lapply(1:length(ageprob.list), function(i) { points(day, ageprob.list[[i]], type = "b", pch = 16, col = cols[i]) }) # add age class proportions bin.scale <- max(unlist(ageprob.bin.list))/max(unlist(ageprob.list)) lapply(names(age.bin), function(x) { lapply(1:nrow(intervention.tab), function(i) { lines(cbind(age.bin[[x]], ageprob.bin.list[[i]][x]/bin.scale), col = alpha(cols[i], 0.4), lwd = 4) }) }) axis(4, at = pretty(0:1)/bin.scale, labels = pretty(0:1)) mtext("Proportion in population (binned)", side = 4, line = 2.5) # add legend and title legend("topright", legend = rownames(intervention.tab), pch = 16, col = cols, lty = 1, bty = "n") legend("topright", lwd = 4, legend = rownames(intervention.tab), col = alpha(cols, 0.4), lty = 1, bty = "n") par(old.par) # make bar chart nice.cols <- c("#e0f3db", "#a8ddb5", "#43a2ca") ageprob.bin.tab <- do.call("cbind", ageprob.bin.list) pdf(paste0("agestructure.barchart.", date.today, ".pdf"), height = 5/2.54, width = 6/2.54, pointsize = 10) old.par <- par(mar = c(2.1, 3.1, 0.6, 0.1)) rownames(ageprob.bin.tab) <- paste0(rownames(ageprob.bin.tab), "d") barplot(ageprob.bin.tab, beside = TRUE, legend.text = TRUE, args.legend = list(x = "topleft", bty = "n", x.intersp = 0.2, inset = -0.03), ylab = "", xlab = "", axes = FALSE, ylim = c(0, 1.1 * max(ageprob.bin.tab)), col = nice.cols, padj = -1) mtext("Proportion", side = 2, line = 2) rownames(ageprob.bin.tab) <- names(ageprob.bin.list[[1]]) axis(2, at = pretty(c(ageprob.bin.tab)), padj = 0.7) par(old.par) dev.off() # read in confusion matrices if(EV) { mat.tab <- read.csv("Confusion_Matrices/confusion_matrices_all_2020-01-22.csv", header = FALSE) } else { mat.tab <- read.csv("Confusion_Matrices/confusion_matrices_0_05_2020-01-22.csv", header = FALSE) #for(j in 1:ncol(mat.tab)) mat.tab[, j] <- mean(mat.tab[, j]) } mat.names <- paste0("r", rep(1:3, each = 3), "c", rep(1:3, 3)) names(mat.tab) <- mat.names dim(mat.tab) if(EV) { mat.tab$n.tcv <- c(0, 162, 324, 486, 654, 815, 973, 1131, 1294, 1452) } else { mat.tab$n.tcv <- 1:nrow(mat.tab) } # confusion matrices explained: # each row of mat.tab # contains 9 values from 3x3 confusion matrix # which defines the accuracy of the MIRS-CNN method in inferring # the age of a mosquito. # the final column of mat.tab gives the number of mosquitoes from # the environmental variation (EV) data set that were added to the training data # to improve the training of the the CNN (convolutional neural network). # for example, row 6 of mat.tab mat.tab[6, ] # gives the confusion matrix where 815 EV mosquitoes were used, # and the value 815. turn these back into a confusion matrix: matrix(unlist(mat.tab[6, mat.names]), ncol = 3, byrow = TRUE) # the rows represent true age classes. the columns give the probability # that a mosquito of that age class will be assigned to each of the three # age classes. e.g. the probability of a mosquito in the first age class (1-4 days) # being correctly assigned to that age class by the MIRS-CNN method is: matrix(unlist(mat.tab[6, mat.names]), ncol = 3, byrow = TRUE)[1, 1] # make table of assumptions choices (scenarios to simulate) # try all combinations of # enrichment (degree of enrichment of the training data with EV) # sample size (n wild mosquitoes per intervention group) assumptions <- expand.grid( mat.row = 1:nrow(mat.tab), # which confusion matrix to use n = c(20, 50, 100, 150, 200, 250, 300), # sample size from each population nsim = 10000, # n data sets to simulate per scenario stringsAsFactors = FALSE) # set random seeds RNGkind("L'Ecuyer-CMRG") global.rand.seed <- 782120569 # https://www.random.org/integers/?num=1&min=0&max=1000000000&col=1&base=10&format=html&rnd=new # Random Integer Generator # Here are your random numbers: # 782120569 # Timestamp: 2020-02-25 13:07:09 UTC set.seed(global.rand.seed) assumptions$global.rand.seed <- global.rand.seed assumptions$rand.seed <- sample(1e9, nrow(assumptions)) # simulate populations start.time <- Sys.time() simres.tab <- sapply(1:nrow(assumptions), function(j) { # loop over scenarios set.seed(assumptions$rand.seed[j]) mc.reset.stream() simres.list <- mclapply(1:assumptions$nsim[j], # analyse nsim simulated data sets function(i) { # simulate data with true age in days n <- assumptions$n[j] dat <- do.call("rbind", lapply(1:nrow(intervention.tab), function(k) { data.frame(intervention = rownames(intervention.tab)[k], age = c(day %*% rmultinom(n, 1, ageprob.list[[k]]))) })) # bin true age in age classes dat$age.cat <- as.numeric(cut(dat$age, age.cut, labels = names(age.bin))) # apply confusion matrix to give estimated age class mat <- matrix(unlist(mat.tab[assumptions$mat.row[j], mat.names]), ncol = 3, byrow = TRUE) dimnames(mat) <- list(names(age.bin), names(age.bin)) # check rows sum to 1 rowSums(mat) # "estimate" age class by drawing from a multinomial distribution dat$age.cat.est <- sapply(dat$age.cat, function(a) age.bin.num %*% rmultinom(1, 1, mat[a, ])) # test both interventions against the control population # using wilcoxon-mann-whitney test and chi-squared test # (only chi-squared test was used, ultimately, as this test had greater power) out.list <- lapply(2:nrow(intervention.tab), function(h) { dat.test <- droplevels(dat[dat$intervention %in% rownames(intervention.tab)[c(1, h)], ]) # do wilcoxon-mann-whitney test to compare age distributions table(dat.test$age.cat, dat.test$intervention) wil.pow <- wilcox.test(age.cat ~ intervention, data = dat.test)$p.value < 0.05 wil.pow.est <- wilcox.test(age.cat.est ~ intervention, data = dat.test)$p.value < 0.05 if(is.na(wil.pow)) wil.pow <- 0 if(is.na(wil.pow.est)) wil.pow.est <- 0 # do chi-squared test to compare age distributions xtab <- table(factor(dat.test$age.cat.est, 1:3), dat.test$intervention) chi.pow <- chisq.test(table(dat.test$age.cat, dat.test$intervention))$p.value < 0.05 chi.pow.est <- chisq.test(xtab[rowSums(xtab) > 0, ])$p.value < 0.05 # export test results out <- c(wil.pow = wil.pow, wil.pow.est = wil.pow.est, chi.pow = chi.pow, chi.pow.est = chi.pow.est, prop.control = prop.table(xtab, 2)[, rownames(intervention.tab)[1]], prop.intervention = prop.table(xtab, 2)[, rownames(intervention.tab)[h]]) names(out) <- paste(names(out), rownames(intervention.tab)[h], sep = ".") out }) unlist(out.list) }, mc.cores = detectCores() - 1) print(paste0(round(100*j/nrow(assumptions)), "% complete")) # bind results together as a table simres <- do.call("rbind.data.frame", simres.list) dim(simres) names(simres) <- names(simres.list[[1]]) # take mean across all nsim simulations, giving power estimates for each scenario apply(simres, 2, mean) }) # bind assumptions table to results out <- cbind(assumptions, mat.tab[assumptions$mat.row, ], t(simres.tab)) out[, grep("prop\\.", names(out))] <- round(out[, grep("prop\\.", names(out))], 3) out$mat.row <- NULL # compare wilcox and chi-squared results plot(chi.pow.est.LLIN ~ wil.pow.est.LLIN, data = out, xlab = "Wilcoxon", ylab = "Chisq") points(chi.pow.est.ATSB ~ wil.pow.est.ATSB, data = out, col = "red") abline(0, 1) legend("topleft", legend = c("LLIN", "ATSB"), col = 1:2, pch = 1) # plot power against sample size broken down by enrichment level lapply(2:nrow(intervention.tab), function(i) { gp <- rownames(intervention.tab)[i] form <- formula(paste0("wil.pow.est.", gp, " ~ n")) form2 <- formula(paste0("wil.pow.", gp, " ~ n")) ntcv.lev <- unique(out$n.tcv) ntcv.col <- brewer.pal(length(ntcv.lev), "RdYlBu") if(!EV) ntcv.col <- rep(ntcv.col[2], length(ntcv.lev)) names(ntcv.col) <- ntcv.lev powercurve.file <- paste0("agestructure.powercurve.", ifelse(EV, "", "var."), names(s), ".", gp, ".", date.today, ".pdf") pdf(powercurve.file, height = 7/2.54, width = 8/2.54, pointsize = 10) old.par <- par(mar = c(2.6, 2.6, 0.6, 0.2)) plot(form, data = out, ylim = 0:1, xlim = c(min(out$n), max(out$n) * 1.20^(!EV - 1)), type = "n", ylab = "", xlab = "", axes = FALSE) mtext("N per population", 1, line = 1.5) mtext("Power", 2, line = 1.5) tcl <- -0.3 axis(2, padj = 0.9, tcl = tcl) axis(1, at = unique(out$n), padj = -0.9, tcl = tcl, gap.axis = 0.25) box() lapply(ntcv.lev, function(ntcv) { points(form, data = out[out$n.tcv == ntcv, ], type = "b", pch = 21, bg = ntcv.col[as.character(ntcv)]) }) max.power <- tapply(out[, paste0("wil.pow.", gp)], out$n, mean) if(EV) { lines(as.numeric(names(max.power)), max.power, lty = 3) temp <- legend("bottomright", bty = "n", legend = rep(" ", length(ntcv.lev)), pch = 21, text.width = max(strwidth(ntcv.lev)), xjust = 1, yjust = 1, pt.bg = rev(ntcv.col), x.intersp = 0.4, inset = -0.01) text(temp$rect$left + temp$rect$w, temp$text$y, rev(ntcv.lev), pos = 2) } #title(rownames(intervention.tab)[i]) par(old.par) dev.off() }) if(!EV) { head(out) sapply(out[out$n == 20, grep("wil.pow.est", names(out), value = TRUE)], sd) / sapply(out[out$n == 20, grep("wil.pow.est", names(out), value = TRUE)], function(x) { pwr <- mean(x) sqrt(pwr * (1-pwr) / unique(out$nsim)) }) } # write results to csv out.file <- paste0("agestructure.power.", ifelse(EV, "", "var."), names(s), ".", date.today, ".csv") write.csv(out, out.file, row.names = FALSE) print(Sys.time() - start.time) # post-formatting of results for Table S4 # formatting numbers # this function is better than round because it doesn't strip off trailing zeroes library(gdata) my.format<- function(x,ndp=0,na.string="") { out<- format(round(x,ndp),ns=ndp,scientific=FALSE,just='none') out[grep("NA",out)]<-na.string trim(out) } if(EV) { TableS4 <- read.csv(out.file)[, c("n", "n.tcv", "wil.pow.est.LLIN", "wil.pow.est.ATSB")] TableS4$wil.pow.est.LLIN <- paste0(my.format(TableS4$wil.pow.est.LLIN * 100, 1), "%") TableS4$wil.pow.est.ATSB <- paste0(my.format(TableS4$wil.pow.est.ATSB * 100, 1), "%") write.csv(TableS4, "TableS4.csv", row.names = FALSE) }
#' @export insert_query <- function(conn, ems_name, db_id, data_file=NULL) { obj <- list() class(obj) <- 'InsertQuery' # Instantiating other objects obj$connection <- conn obj$ems <- ems(conn) obj$ems_id <- get_id(obj$ems, ems_name) obj$db_id <- db_id # object data obj$create <- list(createColumns=list()) obj <- reset(obj) return(obj) } #' @export insert_row.InsertQuery <- function(qry, row) { # # Inputs: # row (list): A list of values to input, where the key is the fieldId and the value is the value to input. # e.g. row <- list(fieldId1 = value1, fieldId2 = value2, ..., fieldIdN = valueN) curr_length <- length(qry$create$createColumns) # Get length of createColumns field next_entry <- curr_length + 1 # Find next entry, which is where we want to put our next row qry$create$createColumns[[next_entry]] <- list() # Add an empty list at next_entry i <- 1 # Begin to loop over all key:value pairs in row. for (field_id in names(row)){ # Add all keys and values as {fieldId = key, value = value} qry$create$createColumns[[next_entry]][[i]] <- list(fieldId = field_id, value = row[[field_id]]) i <- i + 1 } qry } #' @export insert_data_frame.InsertQuery <- function(qry, df, schema_map=NULL) { # # Inputs: # df (data.frame): A DataFrame of values to input, where the columns are the fieldIds and the entries are values to input. # schema_map (list): A mapping of named dataframe columns to field ids, e.g. list('column1' = '[-hub][schema]') # if schema_map is not null, we want to translate column names to fields in EMS, so we need to make sure all of the dataframe # columns are also in schema_map, which maps df column names to schemas in EMS if (!is.null(schema_map)){ for (col in colnames(df)){ if (!(col %in% names(schema_map))){ cat(sprintf("Column: '%s' found in df, but not in mapper. Please only pass in columns which should be updated in the target table and for which a schema mapping exists in the supplied mapper list.", col)) stop("Not all columns in df were found in schema_map.") } } } print('inserting') i <- 1 # Begin to loop over all key:value pairs in row. for (i in 1:nrow(df)){ row <- as.list(df[i,]) curr_length <- length(qry$create$createColumns) # Get length of createColumns field next_entry <- curr_length + 1 # Find next entry, which is where we want to put our next row qry$create$createColumns[[next_entry]] <- list() # Add an empty list at next_entry j <- 1 for (name in names(row)){ # Add all keys and values as {fieldId = key, value = value} # Use schema_map to translate dataframe column names into a schema, if schema_map is not null if (!is.null(schema_map)){ qry$create$createColumns[[next_entry]][[j]] <- list(fieldId = schema_map[[name]], value = row[[name]]) } else { # if schema_map is not null, assume column names are schemas qry$create$createColumns[[next_entry]][[j]] <- list(fieldId = name, value = row[[name]]) } j <- j + 1 } i <- i + 1 } qry } #' @export run.InsertQuery <- function(qry) { cat("Sending a regular query to EMS ...") r <- request(qry$connection, rtype = "POST", uri_keys = c('database', 'create'), uri_args = c(qry$ems_id, qry$db_id), jsondata = qry$create) cat("Done.\n") if (httr::status_code(r) == 200){ n_rows <- length(qry$create$createColumns) if (httr::content(r)$rowsAdded == n_rows){ cat(print('Successfully added all rows.')) return(TRUE) } } else if (httr::status_code(r) %in% c(400, 401, 503)) { cat(print('Failed to add rows.')) content <- httr::content(r) cat(sprintf('message: %s', content$message)) cat(sprintf('messageDetail: %s', content$messageDetail)) cat(sprintf('unexpected: %s', content$unexpected)) return(FALSE) } else { cat(print("An unknown error occured.")) return(FALSE) } } #' @export reset.InsertQuery <- function(qry) { qry$create <- list( createColumns = list() ) qry } #' @export json_str.InsertQuery <- function(qry) { jsonlite::toJSON(qry$create, auto_unbox = T, pretty = T) }
/R/db_create.R
permissive
ge-flight-analytics/Rems
R
false
false
4,489
r
#' @export insert_query <- function(conn, ems_name, db_id, data_file=NULL) { obj <- list() class(obj) <- 'InsertQuery' # Instantiating other objects obj$connection <- conn obj$ems <- ems(conn) obj$ems_id <- get_id(obj$ems, ems_name) obj$db_id <- db_id # object data obj$create <- list(createColumns=list()) obj <- reset(obj) return(obj) } #' @export insert_row.InsertQuery <- function(qry, row) { # # Inputs: # row (list): A list of values to input, where the key is the fieldId and the value is the value to input. # e.g. row <- list(fieldId1 = value1, fieldId2 = value2, ..., fieldIdN = valueN) curr_length <- length(qry$create$createColumns) # Get length of createColumns field next_entry <- curr_length + 1 # Find next entry, which is where we want to put our next row qry$create$createColumns[[next_entry]] <- list() # Add an empty list at next_entry i <- 1 # Begin to loop over all key:value pairs in row. for (field_id in names(row)){ # Add all keys and values as {fieldId = key, value = value} qry$create$createColumns[[next_entry]][[i]] <- list(fieldId = field_id, value = row[[field_id]]) i <- i + 1 } qry } #' @export insert_data_frame.InsertQuery <- function(qry, df, schema_map=NULL) { # # Inputs: # df (data.frame): A DataFrame of values to input, where the columns are the fieldIds and the entries are values to input. # schema_map (list): A mapping of named dataframe columns to field ids, e.g. list('column1' = '[-hub][schema]') # if schema_map is not null, we want to translate column names to fields in EMS, so we need to make sure all of the dataframe # columns are also in schema_map, which maps df column names to schemas in EMS if (!is.null(schema_map)){ for (col in colnames(df)){ if (!(col %in% names(schema_map))){ cat(sprintf("Column: '%s' found in df, but not in mapper. Please only pass in columns which should be updated in the target table and for which a schema mapping exists in the supplied mapper list.", col)) stop("Not all columns in df were found in schema_map.") } } } print('inserting') i <- 1 # Begin to loop over all key:value pairs in row. for (i in 1:nrow(df)){ row <- as.list(df[i,]) curr_length <- length(qry$create$createColumns) # Get length of createColumns field next_entry <- curr_length + 1 # Find next entry, which is where we want to put our next row qry$create$createColumns[[next_entry]] <- list() # Add an empty list at next_entry j <- 1 for (name in names(row)){ # Add all keys and values as {fieldId = key, value = value} # Use schema_map to translate dataframe column names into a schema, if schema_map is not null if (!is.null(schema_map)){ qry$create$createColumns[[next_entry]][[j]] <- list(fieldId = schema_map[[name]], value = row[[name]]) } else { # if schema_map is not null, assume column names are schemas qry$create$createColumns[[next_entry]][[j]] <- list(fieldId = name, value = row[[name]]) } j <- j + 1 } i <- i + 1 } qry } #' @export run.InsertQuery <- function(qry) { cat("Sending a regular query to EMS ...") r <- request(qry$connection, rtype = "POST", uri_keys = c('database', 'create'), uri_args = c(qry$ems_id, qry$db_id), jsondata = qry$create) cat("Done.\n") if (httr::status_code(r) == 200){ n_rows <- length(qry$create$createColumns) if (httr::content(r)$rowsAdded == n_rows){ cat(print('Successfully added all rows.')) return(TRUE) } } else if (httr::status_code(r) %in% c(400, 401, 503)) { cat(print('Failed to add rows.')) content <- httr::content(r) cat(sprintf('message: %s', content$message)) cat(sprintf('messageDetail: %s', content$messageDetail)) cat(sprintf('unexpected: %s', content$unexpected)) return(FALSE) } else { cat(print("An unknown error occured.")) return(FALSE) } } #' @export reset.InsertQuery <- function(qry) { qry$create <- list( createColumns = list() ) qry } #' @export json_str.InsertQuery <- function(qry) { jsonlite::toJSON(qry$create, auto_unbox = T, pretty = T) }
####################################### # COVID-19 estimated infections # correcting for incomplete testing and # imperfect test accuracy # Plot of COVID-19 cumulative number of infections # distribution from simulation ####################################### rm(list=ls()) source(paste0(here::here(), "/0-config.R")) tmpshot <- fileSnapshot(paste0(results_path, "bias-corrected-distributions/state/")) latest = rownames(tmpshot$info[which.max(tmpshot$info$mtime),]) dist = readRDS(paste0( results_path, "/bias-corrected-distributions/state/", latest)) data = readRDS(paste0(results_path, "covid_usa_state_adjusted.RDS")) state_abbrev <- read_csv(state_abbrev_path) %>% rename(state = Abbreviation, statename = State) #-------------------------------------- # process state distributions #-------------------------------------- state_abbrev <- read_csv(state_abbrev_path) state_case_dist_list = list() N_list = list() for(i in 1:ncol(dist)){ state_case_dist_list[[i]] = dist[, i]$exp_cases N_list[[i]] = dist[, i]$N } names(state_case_dist_list) = colnames(dist) names(N_list) = colnames(dist) state_case_dist = as.data.frame(bind_rows(state_case_dist_list)) N_df = as.data.frame(bind_rows(N_list)) state_case_distl = melt(state_case_dist) %>% rename(state = variable, exp_cases = value) N_dfl = melt(N_df) %>% rename(state = variable, N = value) N_dfl = N_dfl[!duplicated(N_dfl),] plotdf = left_join(state_case_distl, N_dfl, by = "state") %>% mutate(exp_perpop = exp_cases / N * 1000) %>% group_by(state) %>% mutate( med = quantile(exp_cases, prob = 0.5), lb = quantile(exp_cases, prob = 0.025), ub = quantile(exp_cases, prob = 0.975)) plotdf = plotdf %>% left_join(state_abbrev, by = c("state" = "Abbreviation")) %>% rename("statename" = "state") plotdf$statename = factor(plotdf$statename) plotdf$statename_f = fct_reorder(plotdf$statename, plotdf$med) plotbreaks = c(0, 1000, 10000, 100000, 1000000) # plot = ggplot(plotdf, aes(y = exp_cases, x = statename_f)) + # geom_boxplot(aes(fill = log10(med)), # outlier.stroke = 0.01, lwd = 0.2) + # scale_y_log10(breaks = plotbreaks, # labels = format(plotbreaks, scientific = F, big.mark = ",")) + # scale_fill_viridis("log10(median)", begin = 0.3, end = 0.95, direction = -1, option = "A") + # ylab("Distribution of estimated COVID-19 infections") + # xlab("") + # coord_flip() + # theme_bw() + # theme(legend.position="none") # plot plotdf %>% group_by(statename_f) %>% summarise(med = median(exp_cases), q025 = quantile(x = exp_cases,probs = 0.025), q975 = quantile(x = exp_cases,probs = 0.975), q25 = quantile(x = exp_cases,probs = 0.25), q75 = quantile(x = exp_cases,probs = 0.75), name = unique(State) ) -> plotdf_summary plotdf_summary$name = factor(plotdf_summary$name) plotdf_summary$name = fct_reorder(plotdf_summary$name, plotdf_summary$med) plot <- ggplot(data = plotdf_summary) + geom_boxplot( aes( x = name, middle = med, ymin = q025, ymax = q975, lower = q25, upper = q75, fill = log10(med) ), outlier.shape = NA,stat = "identity" ) + scale_y_log10(breaks = plotbreaks, labels = format(plotbreaks, scientific = F, big.mark = ",")) + scale_fill_viridis("log10(median)", begin = 0.3, end = 0.95, direction = -1, option = "A") + ylab("Distribution of estimated COVID-19 infections") + xlab("") + coord_flip() + theme_bw() + theme(axis.title = element_text(size = rel(1.15)),axis.text = element_text(size = rel(1.10)), legend.position="none") ggsave(plot, filename = paste0(plot_path, "fig-state-cases-distribution.png"), width = 10, height=8)
/covid19-infections-NatureComms/3-figure-table-scripts/5-fig-density-state.R
no_license
meahmadi/covid-death-per-incidence
R
false
false
3,809
r
####################################### # COVID-19 estimated infections # correcting for incomplete testing and # imperfect test accuracy # Plot of COVID-19 cumulative number of infections # distribution from simulation ####################################### rm(list=ls()) source(paste0(here::here(), "/0-config.R")) tmpshot <- fileSnapshot(paste0(results_path, "bias-corrected-distributions/state/")) latest = rownames(tmpshot$info[which.max(tmpshot$info$mtime),]) dist = readRDS(paste0( results_path, "/bias-corrected-distributions/state/", latest)) data = readRDS(paste0(results_path, "covid_usa_state_adjusted.RDS")) state_abbrev <- read_csv(state_abbrev_path) %>% rename(state = Abbreviation, statename = State) #-------------------------------------- # process state distributions #-------------------------------------- state_abbrev <- read_csv(state_abbrev_path) state_case_dist_list = list() N_list = list() for(i in 1:ncol(dist)){ state_case_dist_list[[i]] = dist[, i]$exp_cases N_list[[i]] = dist[, i]$N } names(state_case_dist_list) = colnames(dist) names(N_list) = colnames(dist) state_case_dist = as.data.frame(bind_rows(state_case_dist_list)) N_df = as.data.frame(bind_rows(N_list)) state_case_distl = melt(state_case_dist) %>% rename(state = variable, exp_cases = value) N_dfl = melt(N_df) %>% rename(state = variable, N = value) N_dfl = N_dfl[!duplicated(N_dfl),] plotdf = left_join(state_case_distl, N_dfl, by = "state") %>% mutate(exp_perpop = exp_cases / N * 1000) %>% group_by(state) %>% mutate( med = quantile(exp_cases, prob = 0.5), lb = quantile(exp_cases, prob = 0.025), ub = quantile(exp_cases, prob = 0.975)) plotdf = plotdf %>% left_join(state_abbrev, by = c("state" = "Abbreviation")) %>% rename("statename" = "state") plotdf$statename = factor(plotdf$statename) plotdf$statename_f = fct_reorder(plotdf$statename, plotdf$med) plotbreaks = c(0, 1000, 10000, 100000, 1000000) # plot = ggplot(plotdf, aes(y = exp_cases, x = statename_f)) + # geom_boxplot(aes(fill = log10(med)), # outlier.stroke = 0.01, lwd = 0.2) + # scale_y_log10(breaks = plotbreaks, # labels = format(plotbreaks, scientific = F, big.mark = ",")) + # scale_fill_viridis("log10(median)", begin = 0.3, end = 0.95, direction = -1, option = "A") + # ylab("Distribution of estimated COVID-19 infections") + # xlab("") + # coord_flip() + # theme_bw() + # theme(legend.position="none") # plot plotdf %>% group_by(statename_f) %>% summarise(med = median(exp_cases), q025 = quantile(x = exp_cases,probs = 0.025), q975 = quantile(x = exp_cases,probs = 0.975), q25 = quantile(x = exp_cases,probs = 0.25), q75 = quantile(x = exp_cases,probs = 0.75), name = unique(State) ) -> plotdf_summary plotdf_summary$name = factor(plotdf_summary$name) plotdf_summary$name = fct_reorder(plotdf_summary$name, plotdf_summary$med) plot <- ggplot(data = plotdf_summary) + geom_boxplot( aes( x = name, middle = med, ymin = q025, ymax = q975, lower = q25, upper = q75, fill = log10(med) ), outlier.shape = NA,stat = "identity" ) + scale_y_log10(breaks = plotbreaks, labels = format(plotbreaks, scientific = F, big.mark = ",")) + scale_fill_viridis("log10(median)", begin = 0.3, end = 0.95, direction = -1, option = "A") + ylab("Distribution of estimated COVID-19 infections") + xlab("") + coord_flip() + theme_bw() + theme(axis.title = element_text(size = rel(1.15)),axis.text = element_text(size = rel(1.10)), legend.position="none") ggsave(plot, filename = paste0(plot_path, "fig-state-cases-distribution.png"), width = 10, height=8)
% Generated by roxygen2 (4.1.0): do not edit by hand % Please edit documentation in R/group_by_xdf.R \name{groups.RxFileData} \alias{groups.RxFileData} \alias{groups.grouped_tbl_xdf} \title{Get the groups for a file data source, or a tbl wrapping an Xdf file} \usage{ \method{groups}{RxFileData}(x) \method{groups}{grouped_tbl_xdf}(x) } \arguments{ \item{x}{A tbl for an Xdf data source; or a raw file data source.} } \value{ If \code{x} is a grouped tbl, a character vector giving the grouping variable names; otherwise, \code{NULL}. } \description{ Get the groups for a file data source, or a tbl wrapping an Xdf file }
/man/groups.Rd
no_license
esparza83/dplyrXdf
R
false
false
624
rd
% Generated by roxygen2 (4.1.0): do not edit by hand % Please edit documentation in R/group_by_xdf.R \name{groups.RxFileData} \alias{groups.RxFileData} \alias{groups.grouped_tbl_xdf} \title{Get the groups for a file data source, or a tbl wrapping an Xdf file} \usage{ \method{groups}{RxFileData}(x) \method{groups}{grouped_tbl_xdf}(x) } \arguments{ \item{x}{A tbl for an Xdf data source; or a raw file data source.} } \value{ If \code{x} is a grouped tbl, a character vector giving the grouping variable names; otherwise, \code{NULL}. } \description{ Get the groups for a file data source, or a tbl wrapping an Xdf file }
rm(list=ls()) #################################################################################################### setwd("/gpfs/group/kzk10/default/private/hydrocalib/SGrove/famos/Official_Fast/") library(snow);library(Rmpi);library(doParallel);library(foreach) ens<-nprocs <-39 mp_type = "MPI" # PSOCK or MPI cl <- parallel::makeCluster(spec = nprocs, type="MPI") doParallel::registerDoParallel(cl) source("run/mcmc_source_Tr.R") source("run/Initialize.R") outputMat<-foreach::foreach(jobNum=1:ens) %dopar% { source("run/mcmc_source_Tr.R") source("run/rWrapper.R") load("output/mhParameters_0.RData") inputDir<-"/gpfs/group/kzk10/default/private/hydrocalib/SGrove/famos/Official_Fast/input" outputDir<-'/gpfs/group/kzk10/default/private/hydrocalib/SGrove/famos/Official_Fast/output' jobPar<-parMat[jobNum,] modelEval( par = jobPar , j = jobNum , inputDir =inputDir , outputDir = outputDir) } # rm(list=ls()) # setwd("/gpfs/group/kzk10/default/private/hydrocalib/SGrove/famos/Official_Fast/") # source("run/mcmc_source_Tr.R") # source("run/rWrapper.R") # load("output/mhParameters_0.RData") # inputDir<-"/gpfs/group/kzk10/default/private/hydrocalib/SGrove/famos/Official_Fast/input" # outputDir<-'/gpfs/group/kzk10/default/private/hydrocalib/SGrove/famos/Official_Fast/output' # load("input/obsData.RData") # ens=390 # outputMat<-matrix(nrow=ens,ncol=19) # for(j in 1:ens){ # print(j) # for(i in 1:5){ # if(i==1){ # Read model output - First # output<-readOutput( j = j , interval=i, dir = outputDir) # }else{ # Read model output - Next # output<-c(output,readOutput( j = j , interval=i, dir = outputDir)) # } # } # outputMat[j,]<-output # } save(outputMat, file="output/B_preCalibrationResults.RData")
/Official_Fast/run/precalibration/genInitialSamples.R
no_license
benee55/SharmaEtAl22
R
false
false
1,757
r
rm(list=ls()) #################################################################################################### setwd("/gpfs/group/kzk10/default/private/hydrocalib/SGrove/famos/Official_Fast/") library(snow);library(Rmpi);library(doParallel);library(foreach) ens<-nprocs <-39 mp_type = "MPI" # PSOCK or MPI cl <- parallel::makeCluster(spec = nprocs, type="MPI") doParallel::registerDoParallel(cl) source("run/mcmc_source_Tr.R") source("run/Initialize.R") outputMat<-foreach::foreach(jobNum=1:ens) %dopar% { source("run/mcmc_source_Tr.R") source("run/rWrapper.R") load("output/mhParameters_0.RData") inputDir<-"/gpfs/group/kzk10/default/private/hydrocalib/SGrove/famos/Official_Fast/input" outputDir<-'/gpfs/group/kzk10/default/private/hydrocalib/SGrove/famos/Official_Fast/output' jobPar<-parMat[jobNum,] modelEval( par = jobPar , j = jobNum , inputDir =inputDir , outputDir = outputDir) } # rm(list=ls()) # setwd("/gpfs/group/kzk10/default/private/hydrocalib/SGrove/famos/Official_Fast/") # source("run/mcmc_source_Tr.R") # source("run/rWrapper.R") # load("output/mhParameters_0.RData") # inputDir<-"/gpfs/group/kzk10/default/private/hydrocalib/SGrove/famos/Official_Fast/input" # outputDir<-'/gpfs/group/kzk10/default/private/hydrocalib/SGrove/famos/Official_Fast/output' # load("input/obsData.RData") # ens=390 # outputMat<-matrix(nrow=ens,ncol=19) # for(j in 1:ens){ # print(j) # for(i in 1:5){ # if(i==1){ # Read model output - First # output<-readOutput( j = j , interval=i, dir = outputDir) # }else{ # Read model output - Next # output<-c(output,readOutput( j = j , interval=i, dir = outputDir)) # } # } # outputMat[j,]<-output # } save(outputMat, file="output/B_preCalibrationResults.RData")
library(data.table) library(xgboost) #----------------------------------------------------------------# # Data loading #----------------------------------------------------------------# #Train - test data path.code = "C:/documents/xq.do/Desktop/Kaggle/Zillow_House_Price_Data/" train = setDT(readRDS(paste0(path.code,"train.RDS"))) test = setDT(readRDS(paste0(path.code,"test.RDS"))) train[, transactiondate := NULL] test[, transactiondate := NULL] #Zoning zonier = fread("C:/documents/xq.do/Desktop/Kaggle/Zillow_House_Price/Excel files/Zonier_XGB.csv") zonier = subset(zonier, select = c(zip, beta_combination)) train = merge(train, zonier, by.x = "tract.number", by.y = "zip", all.x = T) test = merge(test, zonier, by.x = "tract.number", by.y = "zip", all.x = T) train[, beta_combination := ifelse(is.na(beta_combination), median(beta_combination, na.rm = T), as.numeric(as.character(beta_combination)))] test[, beta_combination := ifelse(is.na(beta_combination), median(beta_combination, na.rm = T), as.numeric(as.character(beta_combination)))] #regrouping train[, beta_combination := ifelse(beta_combination == 1, 2, ifelse(beta_combination == 5, 4, beta_combination))] #regrouping test[, beta_combination := ifelse(beta_combination == 1, 2, ifelse(beta_combination == 5, 4, beta_combination))] train[, tract.number := NULL] test[, tract.number := NULL] train = train[abs(train$logerror) < 0.2, ] y.train = train$logerror x.train = subset(train, select= -c(logerror, parcelid)) x.train = as.matrix(x.train) y.test = test$logerror x.test = subset(test, select= -c(logerror, parcelid)) x.test = as.matrix(x.test) set.seed(1024) y_mean = mean(y.train) param <- list(objective = "reg:linear", eval_metric = "mae", eta = 0.037, max_depth = 5, subsample = 0.8, colsample_bytree = 0.5, min_child_weight = 4, maximize = FALSE, lambda = 0.8, alpha = 0.4, base_score = y_mean, silent = 0) xgb_cv <- xgb.cv(data = x.train, label = y.train, params = param, nrounds = 400, prediction = TRUE, maximize = FALSE, nfold = 5, print_every_n = 5) plot(xgb_cv$evaluation_log$train_mae_mean, type = "l") lines(xgb_cv$evaluation_log$test_mae_mean, type = "l", col = "red") print(xgb_cv$evaluation_log[which.min(xgb_cv$evaluation_log$test_mae_mean)]) #0.0402418 round = xgb_cv$evaluation_log[which.min(xgb_cv$evaluation_log$test_mae_mean)]$iter dtrain = xgb.DMatrix(as.matrix(x.train),label = y.train) model = xgb.train(param, data = dtrain, nrounds = round) importance <- xgb.importance(colnames(dtrain), model = model) xgb.plot.importance(importance, rel_to_first = TRUE, xlab = "Relative importance") #training mae xgb.pred = predict(model, new = x.train) mean(abs(xgb.pred - y.train)) #testing mae xgb.pred = predict(model, new = x.test) mean(abs(xgb.pred - y.test))
/Code/7_Xgboost/7_XGB_with_zoning.R
no_license
metariat/Zillow_House_Price
R
false
false
3,412
r
library(data.table) library(xgboost) #----------------------------------------------------------------# # Data loading #----------------------------------------------------------------# #Train - test data path.code = "C:/documents/xq.do/Desktop/Kaggle/Zillow_House_Price_Data/" train = setDT(readRDS(paste0(path.code,"train.RDS"))) test = setDT(readRDS(paste0(path.code,"test.RDS"))) train[, transactiondate := NULL] test[, transactiondate := NULL] #Zoning zonier = fread("C:/documents/xq.do/Desktop/Kaggle/Zillow_House_Price/Excel files/Zonier_XGB.csv") zonier = subset(zonier, select = c(zip, beta_combination)) train = merge(train, zonier, by.x = "tract.number", by.y = "zip", all.x = T) test = merge(test, zonier, by.x = "tract.number", by.y = "zip", all.x = T) train[, beta_combination := ifelse(is.na(beta_combination), median(beta_combination, na.rm = T), as.numeric(as.character(beta_combination)))] test[, beta_combination := ifelse(is.na(beta_combination), median(beta_combination, na.rm = T), as.numeric(as.character(beta_combination)))] #regrouping train[, beta_combination := ifelse(beta_combination == 1, 2, ifelse(beta_combination == 5, 4, beta_combination))] #regrouping test[, beta_combination := ifelse(beta_combination == 1, 2, ifelse(beta_combination == 5, 4, beta_combination))] train[, tract.number := NULL] test[, tract.number := NULL] train = train[abs(train$logerror) < 0.2, ] y.train = train$logerror x.train = subset(train, select= -c(logerror, parcelid)) x.train = as.matrix(x.train) y.test = test$logerror x.test = subset(test, select= -c(logerror, parcelid)) x.test = as.matrix(x.test) set.seed(1024) y_mean = mean(y.train) param <- list(objective = "reg:linear", eval_metric = "mae", eta = 0.037, max_depth = 5, subsample = 0.8, colsample_bytree = 0.5, min_child_weight = 4, maximize = FALSE, lambda = 0.8, alpha = 0.4, base_score = y_mean, silent = 0) xgb_cv <- xgb.cv(data = x.train, label = y.train, params = param, nrounds = 400, prediction = TRUE, maximize = FALSE, nfold = 5, print_every_n = 5) plot(xgb_cv$evaluation_log$train_mae_mean, type = "l") lines(xgb_cv$evaluation_log$test_mae_mean, type = "l", col = "red") print(xgb_cv$evaluation_log[which.min(xgb_cv$evaluation_log$test_mae_mean)]) #0.0402418 round = xgb_cv$evaluation_log[which.min(xgb_cv$evaluation_log$test_mae_mean)]$iter dtrain = xgb.DMatrix(as.matrix(x.train),label = y.train) model = xgb.train(param, data = dtrain, nrounds = round) importance <- xgb.importance(colnames(dtrain), model = model) xgb.plot.importance(importance, rel_to_first = TRUE, xlab = "Relative importance") #training mae xgb.pred = predict(model, new = x.train) mean(abs(xgb.pred - y.train)) #testing mae xgb.pred = predict(model, new = x.test) mean(abs(xgb.pred - y.test))
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/hash_sentiment_jockers_rinker.R \docType{data} \name{hash_sentiment_jockers_rinker} \alias{hash_sentiment_jockers_rinker} \title{Combined Jockers & Rinker Polarity Lookup Table} \format{A data frame with 11,710 rows and 2 variables} \usage{ data(hash_sentiment_jockers_rinker) } \description{ A \pkg{data.table} dataset containing a combined and augmented version of Jockers (2017) & Rinker's augmented Hu & Liu (2004) positive/negative word list as sentiment lookup values. } \details{ \itemize{ \item x. Words \item y. Sentiment } } \references{ Jockers, M. L. (2017). Syuzhet: Extract sentiment and plot arcs from Text. Retrieved from https://github.com/mjockers/syuzhet \cr \cr Hu, M., & Liu, B. (2004). Mining and summarizing customer reviews. Proceedings of the ACM SIGKDD International Conference on Knowledge Discovery & Data Mining (KDD-2004). Seattle, Washington. } \keyword{datasets}
/trinker-lexicon-4c5e22b/man/hash_sentiment_jockers_rinker.Rd
no_license
pratyushaj/abusive-language-online
R
false
true
978
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/hash_sentiment_jockers_rinker.R \docType{data} \name{hash_sentiment_jockers_rinker} \alias{hash_sentiment_jockers_rinker} \title{Combined Jockers & Rinker Polarity Lookup Table} \format{A data frame with 11,710 rows and 2 variables} \usage{ data(hash_sentiment_jockers_rinker) } \description{ A \pkg{data.table} dataset containing a combined and augmented version of Jockers (2017) & Rinker's augmented Hu & Liu (2004) positive/negative word list as sentiment lookup values. } \details{ \itemize{ \item x. Words \item y. Sentiment } } \references{ Jockers, M. L. (2017). Syuzhet: Extract sentiment and plot arcs from Text. Retrieved from https://github.com/mjockers/syuzhet \cr \cr Hu, M., & Liu, B. (2004). Mining and summarizing customer reviews. Proceedings of the ACM SIGKDD International Conference on Knowledge Discovery & Data Mining (KDD-2004). Seattle, Washington. } \keyword{datasets}
% Generated by roxygen2 (4.1.0): do not edit by hand % Please edit documentation in R/csv.R \name{getenumlist} \alias{getenumlist} \title{return a list matching vcdb ordering and length with requested object} \usage{ getenumlist(veris, enum) } \arguments{ \item{veris}{a verisr object} \item{enum}{the field to count} } \description{ This will iterate through the veris object and return a list of matches. This is intented to maintain the orginal indexes of the veris object so further manipulation can be done. } \details{ Note: Can do a special "industryN" request and it will chop off the industry at the N value or return same length of zeros if it isn't long enough. }
/man/getenumlist.Rd
no_license
sajidrahman/verisr
R
false
false
678
rd
% Generated by roxygen2 (4.1.0): do not edit by hand % Please edit documentation in R/csv.R \name{getenumlist} \alias{getenumlist} \title{return a list matching vcdb ordering and length with requested object} \usage{ getenumlist(veris, enum) } \arguments{ \item{veris}{a verisr object} \item{enum}{the field to count} } \description{ This will iterate through the veris object and return a list of matches. This is intented to maintain the orginal indexes of the veris object so further manipulation can be done. } \details{ Note: Can do a special "industryN" request and it will chop off the industry at the N value or return same length of zeros if it isn't long enough. }
library(rtracklayer) library(dplyr) ## Read the refGene table for hg19 ref <- read.table("raw_data/refGene.txt",header=F,stringsAsFactors=F) names(ref) <- c("bin","id","chr","strand","txn.start","txn.end","cds.start","cds.end", "n.exons","exon.starts","exon.ends","score","symbol","x","xx","exon.frame") # Remove non-standard chromosomes ref <- ref %>% filter(!grepl("_",ref$chr)) # Select TSS locations for plus and minus-strand genes tss.p <- ref %>% filter(strand == "+") %>% select(chr,txn.start,id,score,strand) tss.m <- ref %>% filter(strand == "-") %>% select(chr,txn.end,id,score,strand) # Make regions 500 bp around tss in bed-like format tss.p.500 <- data.frame(chr=tss.p$chr, start=tss.p$txn.start - 500, end=tss.p$txn.start - 500 + 1000, name=tss.p$id, score=tss.p$score, strand=tss.p$strand) tss.m.500 <- data.frame(chr=tss.m$chr, start=tss.m$txn.end - 500, end=tss.m$txn.end -500 + 1000, name=tss.m$id, score=tss.m$score, strand=tss.m$strand) # Convert to GRanges format for comparison to G4 data tss.p.gr <- GRanges(seqnames=tss.p.500$chr, ranges=IRanges(start=tss.p.500$start,end=tss.p.500$end,), strand=tss.p.500$strand, mcols=data.frame(name=tss.p.500$name)) tss.m.gr <- GRanges(seqnames=tss.m.500$chr, ranges=IRanges(start=tss.m.500$start,end=tss.m.500$end,), strand=tss.m.500$strand, mcols=data.frame(name=tss.m.500$name)) # Read in the G4 data and overlap with tss regions to get nt and ts g4s g4.tss.p.nt <- import(con="raw_data/g4-12_plus.bw",format="bigWig",which=tss.p.gr) g4.tss.p.ts <- import(con="raw_data/g4-12_minus.bw",format="bigWig",which=tss.p.gr) g4.tss.m.nt <- import(con="raw_data/g4-12_minus.bw",format="bigWig",which=tss.m.gr) g4.tss.m.ts <- import(con="raw_data/g4-12_plus.bw",format="bigWig",which=tss.m.gr) # Build overlap matrices ol.tss.p.nt <- as.data.frame(findOverlaps(tss.p.gr,g4.tss.p.nt)) ol.tss.p.ts <- as.data.frame(findOverlaps(tss.p.gr,g4.tss.p.ts)) ol.tss.m.nt <- as.data.frame(findOverlaps(tss.m.gr,g4.tss.m.nt)) ol.tss.m.ts <- as.data.frame(findOverlaps(tss.m.gr,g4.tss.m.ts))
/build_database.R
no_license
hypercompetent/quadrupexeR
R
false
false
2,413
r
library(rtracklayer) library(dplyr) ## Read the refGene table for hg19 ref <- read.table("raw_data/refGene.txt",header=F,stringsAsFactors=F) names(ref) <- c("bin","id","chr","strand","txn.start","txn.end","cds.start","cds.end", "n.exons","exon.starts","exon.ends","score","symbol","x","xx","exon.frame") # Remove non-standard chromosomes ref <- ref %>% filter(!grepl("_",ref$chr)) # Select TSS locations for plus and minus-strand genes tss.p <- ref %>% filter(strand == "+") %>% select(chr,txn.start,id,score,strand) tss.m <- ref %>% filter(strand == "-") %>% select(chr,txn.end,id,score,strand) # Make regions 500 bp around tss in bed-like format tss.p.500 <- data.frame(chr=tss.p$chr, start=tss.p$txn.start - 500, end=tss.p$txn.start - 500 + 1000, name=tss.p$id, score=tss.p$score, strand=tss.p$strand) tss.m.500 <- data.frame(chr=tss.m$chr, start=tss.m$txn.end - 500, end=tss.m$txn.end -500 + 1000, name=tss.m$id, score=tss.m$score, strand=tss.m$strand) # Convert to GRanges format for comparison to G4 data tss.p.gr <- GRanges(seqnames=tss.p.500$chr, ranges=IRanges(start=tss.p.500$start,end=tss.p.500$end,), strand=tss.p.500$strand, mcols=data.frame(name=tss.p.500$name)) tss.m.gr <- GRanges(seqnames=tss.m.500$chr, ranges=IRanges(start=tss.m.500$start,end=tss.m.500$end,), strand=tss.m.500$strand, mcols=data.frame(name=tss.m.500$name)) # Read in the G4 data and overlap with tss regions to get nt and ts g4s g4.tss.p.nt <- import(con="raw_data/g4-12_plus.bw",format="bigWig",which=tss.p.gr) g4.tss.p.ts <- import(con="raw_data/g4-12_minus.bw",format="bigWig",which=tss.p.gr) g4.tss.m.nt <- import(con="raw_data/g4-12_minus.bw",format="bigWig",which=tss.m.gr) g4.tss.m.ts <- import(con="raw_data/g4-12_plus.bw",format="bigWig",which=tss.m.gr) # Build overlap matrices ol.tss.p.nt <- as.data.frame(findOverlaps(tss.p.gr,g4.tss.p.nt)) ol.tss.p.ts <- as.data.frame(findOverlaps(tss.p.gr,g4.tss.p.ts)) ol.tss.m.nt <- as.data.frame(findOverlaps(tss.m.gr,g4.tss.m.nt)) ol.tss.m.ts <- as.data.frame(findOverlaps(tss.m.gr,g4.tss.m.ts))
##install.packages(c("plyr", "dplyr", "reshape2")) library(plyr) library(dplyr) library(reshape2) ##read meta data features <- read.table("data/features.txt") activityLabels <- read.table("data/activity_labels.txt") names(activityLabels) <- c("activity", "activitylong") ##read test data xtest <- read.table("data/test/X_test.txt") ytest <- read.table("data/test/y_test.txt") subjectTest <- read.table("data/test/subject_test.txt") ##read train data xtrain <- read.table("data/train/X_train.txt") ytrain <- read.table("data/train/y_train.txt") subjectTrain <- read.table("data/train/subject_train.txt") ##consolidate test and train data testData <- cbind(subjectTest, ytest, xtest) trainData <- cbind(subjectTrain, ytrain, xtrain) ##add headers names(testData) <- c("subject", "activity", as.character(features[,2])) names(trainData) <- c("subject", "activity", as.character(features[,2])) ##combine data combinedData <- rbind(testData, trainData) ##names names <- c(1:2, grep("mean|std", features[,2])+2) meanStdData <- combinedData[,names] ##add activity names meanStdData <- join(meanStdData, activityLabels, by = "activity") meanStdData$activity <- meanStdData$activitylong meanStdData <- select(meanStdData, -activitylong) ##label the data set with descriptive variable names newNames <- names(meanStdData) newNames <- sub("^t", "time", newNames) newNames <- sub("^f", "frequency", newNames) newNames <- sub("Acc", "Accelerometer", newNames) newNames <- sub("Gyro", "Gyroscope", newNames) newNames <- sub("Freq", "Frequency", newNames) newNames <- sub("Mag", "Magnitude", newNames) newNames <- sub("-mean", "Mean", newNames) newNames <- sub("-std", "StdDev", newNames) newNames <- sub("-", "", newNames) names(meanStdData) <- newNames ##melt the data and dcast to create the summary melt <- melt(meanStdData, id = c("subject", "activity"), measure.vars = names(meanStdData)[3:length(names(meanStdData))]) summary <- dcast(melt, subject + activity ~ variable, mean) ##write the summary to a csv write.table(summary, "summary.txt", row.names = FALSE) write.csv(summary, "summary.csv", row.names = FALSE)
/run_analysis.R
no_license
rrj221/getting_cleaning_data_project
R
false
false
2,120
r
##install.packages(c("plyr", "dplyr", "reshape2")) library(plyr) library(dplyr) library(reshape2) ##read meta data features <- read.table("data/features.txt") activityLabels <- read.table("data/activity_labels.txt") names(activityLabels) <- c("activity", "activitylong") ##read test data xtest <- read.table("data/test/X_test.txt") ytest <- read.table("data/test/y_test.txt") subjectTest <- read.table("data/test/subject_test.txt") ##read train data xtrain <- read.table("data/train/X_train.txt") ytrain <- read.table("data/train/y_train.txt") subjectTrain <- read.table("data/train/subject_train.txt") ##consolidate test and train data testData <- cbind(subjectTest, ytest, xtest) trainData <- cbind(subjectTrain, ytrain, xtrain) ##add headers names(testData) <- c("subject", "activity", as.character(features[,2])) names(trainData) <- c("subject", "activity", as.character(features[,2])) ##combine data combinedData <- rbind(testData, trainData) ##names names <- c(1:2, grep("mean|std", features[,2])+2) meanStdData <- combinedData[,names] ##add activity names meanStdData <- join(meanStdData, activityLabels, by = "activity") meanStdData$activity <- meanStdData$activitylong meanStdData <- select(meanStdData, -activitylong) ##label the data set with descriptive variable names newNames <- names(meanStdData) newNames <- sub("^t", "time", newNames) newNames <- sub("^f", "frequency", newNames) newNames <- sub("Acc", "Accelerometer", newNames) newNames <- sub("Gyro", "Gyroscope", newNames) newNames <- sub("Freq", "Frequency", newNames) newNames <- sub("Mag", "Magnitude", newNames) newNames <- sub("-mean", "Mean", newNames) newNames <- sub("-std", "StdDev", newNames) newNames <- sub("-", "", newNames) names(meanStdData) <- newNames ##melt the data and dcast to create the summary melt <- melt(meanStdData, id = c("subject", "activity"), measure.vars = names(meanStdData)[3:length(names(meanStdData))]) summary <- dcast(melt, subject + activity ~ variable, mean) ##write the summary to a csv write.table(summary, "summary.txt", row.names = FALSE) write.csv(summary, "summary.csv", row.names = FALSE)
assert_raw <- function(x, name = deparse(substitute(x))) { if (!is.raw(x)) { stop(sprintf("%s must be raw", name), call.=FALSE) } }
/R/util_assert.R
no_license
richfitz/crater
R
false
false
140
r
assert_raw <- function(x, name = deparse(substitute(x))) { if (!is.raw(x)) { stop(sprintf("%s must be raw", name), call.=FALSE) } }
#' X11 colors and hexadecimal color values #' #' Create a data frame containing information on X11 colors and their #' corresponding hexadecimal color values. #' @export x11_hex <- function() { x11_name <- c( 'aliceblue', 'antiquewhite', 'antiquewhite1', 'antiquewhite2', 'antiquewhite3', 'antiquewhite4', 'aquamarine', 'aquamarine1', 'aquamarine2', 'aquamarine3', 'aquamarine4', 'azure', 'azure1', 'azure2', 'azure3', 'azure4', 'beige', 'bisque', 'bisque1', 'bisque2', 'bisque3', 'bisque4', 'black', 'blanchedalmond', 'blue', 'blue1', 'blue2', 'blue3', 'blue4', 'blueviolet', 'brown', 'brown1', 'brown2', 'brown3', 'brown4', 'burlywood', 'burlywood1', 'burlywood2', 'burlywood3', 'burlywood4', 'cadetblue', 'cadetblue1', 'cadetblue2', 'cadetblue3', 'cadetblue4', 'chartreuse', 'chartreuse1', 'chartreuse2', 'chartreuse3', 'chartreuse4', 'chocolate', 'chocolate1', 'chocolate2', 'chocolate3', 'chocolate4', 'coral', 'coral1', 'coral2', 'coral3', 'coral4', 'cornflowerblue', 'cornsilk', 'cornsilk1', 'cornsilk2', 'cornsilk3', 'cornsilk4', 'crimson', 'cyan', 'cyan1', 'cyan2', 'cyan3', 'cyan4', 'darkgoldenrod', 'darkgoldenrod1', 'darkgoldenrod2', 'darkgoldenrod3', 'darkgoldenrod4', 'darkgreen', 'darkkhaki', 'darkolivegreen', 'darkolivegreen1', 'darkolivegreen2', 'darkolivegreen3', 'darkolivegreen4', 'darkorange', 'darkorange1', 'darkorange2', 'darkorange3', 'darkorange4', 'darkorchid', 'darkorchid1', 'darkorchid2', 'darkorchid3', 'darkorchid4', 'darksalmon', 'darkseagreen', 'darkseagreen1', 'darkseagreen2', 'darkseagreen3', 'darkseagreen4', 'darkslateblue', 'darkslategray', 'darkslategray1', 'darkslategray2', 'darkslategray3', 'darkslategray4', 'darkslategrey', 'darkturquoise', 'darkviolet', 'deeppink', 'deeppink1', 'deeppink2', 'deeppink3', 'deeppink4', 'deepskyblue', 'deepskyblue1', 'deepskyblue2', 'deepskyblue3', 'deepskyblue4', 'dimgray', 'dimgrey', 'dodgerblue', 'dodgerblue1', 'dodgerblue2', 'dodgerblue3', 'dodgerblue4', 'firebrick', 'firebrick1', 'firebrick2', 'firebrick3', 'firebrick4', 'floralwhite', 'forestgreen', 'gainsboro', 'ghostwhite', 'gold', 'gold1', 'gold2', 'gold3', 'gold4', 'goldenrod', 'goldenrod1', 'goldenrod2', 'goldenrod3', 'goldenrod4', 'gray', 'gray0', 'gray1', 'gray10', 'gray100', 'gray11', 'gray12', 'gray13', 'gray14', 'gray15', 'gray16', 'gray17', 'gray18', 'gray19', 'gray2', 'gray20', 'gray21', 'gray22', 'gray23', 'gray24', 'gray25', 'gray26', 'gray27', 'gray28', 'gray29', 'gray3', 'gray30', 'gray31', 'gray32', 'gray33', 'gray34', 'gray35', 'gray36', 'gray37', 'gray38', 'gray39', 'gray4', 'gray40', 'gray41', 'gray42', 'gray43', 'gray44', 'gray45', 'gray46', 'gray47', 'gray48', 'gray49', 'gray5', 'gray50', 'gray51', 'gray52', 'gray53', 'gray54', 'gray55', 'gray56', 'gray57', 'gray58', 'gray59', 'gray6', 'gray60', 'gray61', 'gray62', 'gray63', 'gray64', 'gray65', 'gray66', 'gray67', 'gray68', 'gray69', 'gray7', 'gray70', 'gray71', 'gray72', 'gray73', 'gray74', 'gray75', 'gray76', 'gray77', 'gray78', 'gray79', 'gray8', 'gray80', 'gray81', 'gray82', 'gray83', 'gray84', 'gray85', 'gray86', 'gray87', 'gray88', 'gray89', 'gray9', 'gray90', 'gray91', 'gray92', 'gray93', 'gray94', 'gray95', 'gray96', 'gray97', 'gray98', 'gray99', 'green', 'green1', 'green2', 'green3', 'green4', 'greenyellow', 'grey', 'grey0', 'grey1', 'grey10', 'grey100', 'grey11', 'grey12', 'grey13', 'grey14', 'grey15', 'grey16', 'grey17', 'grey18', 'grey19', 'grey2', 'grey20', 'grey21', 'grey22', 'grey23', 'grey24', 'grey25', 'grey26', 'grey27', 'grey28', 'grey29', 'grey3', 'grey30', 'grey31', 'grey32', 'grey33', 'grey34', 'grey35', 'grey36', 'grey37', 'grey38', 'grey39', 'grey4', 'grey40', 'grey41', 'grey42', 'grey43', 'grey44', 'grey45', 'grey46', 'grey47', 'grey48', 'grey49', 'grey5', 'grey50', 'grey51', 'grey52', 'grey53', 'grey54', 'grey55', 'grey56', 'grey57', 'grey58', 'grey59', 'grey6', 'grey60', 'grey61', 'grey62', 'grey63', 'grey64', 'grey65', 'grey66', 'grey67', 'grey68', 'grey69', 'grey7', 'grey70', 'grey71', 'grey72', 'grey73', 'grey74', 'grey75', 'grey76', 'grey77', 'grey78', 'grey79', 'grey8', 'grey80', 'grey81', 'grey82', 'grey83', 'grey84', 'grey85', 'grey86', 'grey87', 'grey88', 'grey89', 'grey9', 'grey90', 'grey91', 'grey92', 'grey93', 'grey94', 'grey95', 'grey96', 'grey97', 'grey98', 'grey99', 'honeydew', 'honeydew1', 'honeydew2', 'honeydew3', 'honeydew4', 'hotpink', 'hotpink1', 'hotpink2', 'hotpink3', 'hotpink4', 'indianred', 'indianred1', 'indianred2', 'indianred3', 'indianred4', 'indigo', 'invis', 'ivory', 'ivory1', 'ivory2', 'ivory3', 'ivory4', 'khaki', 'khaki1', 'khaki2', 'khaki3', 'khaki4', 'lavender', 'lavenderblush', 'lavenderblush1', 'lavenderblush2', 'lavenderblush3', 'lavenderblush4', 'lawngreen', 'lemonchiffon', 'lemonchiffon1', 'lemonchiffon2', 'lemonchiffon3', 'lemonchiffon4', 'lightblue', 'lightblue1', 'lightblue2', 'lightblue3', 'lightblue4', 'lightcoral', 'lightcyan', 'lightcyan1', 'lightcyan2', 'lightcyan3', 'lightcyan4', 'lightgoldenrod', 'lightgoldenrod1', 'lightgoldenrod2', 'lightgoldenrod3', 'lightgoldenrod4', 'lightgoldenrodyellow', 'lightgray', 'lightgrey', 'lightpink', 'lightpink1', 'lightpink2', 'lightpink3', 'lightpink4', 'lightsalmon', 'lightsalmon1', 'lightsalmon2', 'lightsalmon3', 'lightsalmon4', 'lightseagreen', 'lightskyblue', 'lightskyblue1', 'lightskyblue2', 'lightskyblue3', 'lightskyblue4', 'lightslateblue', 'lightslategray', 'lightslategrey', 'lightsteelblue', 'lightsteelblue1', 'lightsteelblue2', 'lightsteelblue3', 'lightsteelblue4', 'lightyellow', 'lightyellow1', 'lightyellow2', 'lightyellow3', 'lightyellow4', 'limegreen', 'linen', 'magenta', 'magenta1', 'magenta2', 'magenta3', 'magenta4', 'maroon', 'maroon1', 'maroon2', 'maroon3', 'maroon4', 'mediumaquamarine', 'mediumblue', 'mediumorchid', 'mediumorchid1', 'mediumorchid2', 'mediumorchid3', 'mediumorchid4', 'mediumpurple', 'mediumpurple1', 'mediumpurple2', 'mediumpurple3', 'mediumpurple4', 'mediumseagreen', 'mediumslateblue', 'mediumspringgreen', 'mediumturquoise', 'mediumvioletred', 'midnightblue', 'mintcream', 'mistyrose', 'mistyrose1', 'mistyrose2', 'mistyrose3', 'mistyrose4', 'moccasin', 'navajowhite', 'navajowhite1', 'navajowhite2', 'navajowhite3', 'navajowhite4', 'navy', 'navyblue', 'none', 'oldlace', 'olivedrab', 'olivedrab1', 'olivedrab2', 'olivedrab3', 'olivedrab4', 'orange', 'orange1', 'orange2', 'orange3', 'orange4', 'orangered', 'orangered1', 'orangered2', 'orangered3', 'orangered4', 'orchid', 'orchid1', 'orchid2', 'orchid3', 'orchid4', 'palegoldenrod', 'palegreen', 'palegreen1', 'palegreen2', 'palegreen3', 'palegreen4', 'paleturquoise', 'paleturquoise1', 'paleturquoise2', 'paleturquoise3', 'paleturquoise4', 'palevioletred', 'palevioletred1', 'palevioletred2', 'palevioletred3', 'palevioletred4', 'papayawhip', 'peachpuff', 'peachpuff1', 'peachpuff2', 'peachpuff3', 'peachpuff4', 'peru', 'pink', 'pink1', 'pink2', 'pink3', 'pink4', 'plum', 'plum1', 'plum2', 'plum3', 'plum4', 'powderblue', 'purple', 'purple1', 'purple2', 'purple3', 'purple4', 'red', 'red1', 'red2', 'red3', 'red4', 'rosybrown', 'rosybrown1', 'rosybrown2', 'rosybrown3', 'rosybrown4', 'royalblue', 'royalblue1', 'royalblue2', 'royalblue3', 'royalblue4', 'saddlebrown', 'salmon', 'salmon1', 'salmon2', 'salmon3', 'salmon4', 'sandybrown', 'seagreen', 'seagreen1', 'seagreen2', 'seagreen3', 'seagreen4', 'seashell', 'seashell1', 'seashell2', 'seashell3', 'seashell4', 'sienna', 'sienna1', 'sienna2', 'sienna3', 'sienna4', 'skyblue', 'skyblue1', 'skyblue2', 'skyblue3', 'skyblue4', 'slateblue', 'slateblue1', 'slateblue2', 'slateblue3', 'slateblue4', 'slategray', 'slategray1', 'slategray2', 'slategray3', 'slategray4', 'slategrey', 'snow', 'snow1', 'snow2', 'snow3', 'snow4', 'springgreen', 'springgreen1', 'springgreen2', 'springgreen3', 'springgreen4', 'steelblue', 'steelblue1', 'steelblue2', 'steelblue3', 'steelblue4', 'tan', 'tan1', 'tan2', 'tan3', 'tan4', 'thistle', 'thistle1', 'thistle2', 'thistle3', 'thistle4', 'tomato', 'tomato1', 'tomato2', 'tomato3', 'tomato4', 'transparent', 'turquoise', 'turquoise1', 'turquoise2', 'turquoise3', 'turquoise4', 'violet', 'violetred', 'violetred1', 'violetred2', 'violetred3', 'violetred4', 'wheat', 'wheat1', 'wheat2', 'wheat3', 'wheat4', 'white', 'whitesmoke', 'yellow', 'yellow1', 'yellow2', 'yellow3', 'yellow4', 'yellowgreen') hex <- c( '#f0f8ff', '#faebd7', '#ffefdb', '#eedfcc', '#cdc0b0', '#8b8378', '#7fffd4', '#7fffd4', '#76eec6', '#66cdaa', '#458b74', '#f0ffff', '#f0ffff', '#e0eeee', '#c1cdcd', '#838b8b', '#f5f5dc', '#ffe4c4', '#ffe4c4', '#eed5b7', '#cdb79e', '#8b7d6b', '#000000', '#ffebcd', '#0000ff', '#0000ff', '#0000ee', '#0000cd', '#00008b', '#8a2be2', '#a52a2a', '#ff4040', '#ee3b3b', '#cd3333', '#8b2323', '#deb887', '#ffd39b', '#eec591', '#cdaa7d', '#8b7355', '#5f9ea0', '#98f5ff', '#8ee5ee', '#7ac5cd', '#53868b', '#7fff00', '#7fff00', '#76ee00', '#66cd00', '#458b00', '#d2691e', '#ff7f24', '#ee7621', '#cd661d', '#8b4513', '#ff7f50', '#ff7256', '#ee6a50', '#cd5b45', '#8b3e2f', '#6495ed', '#fff8dc', '#fff8dc', '#eee8cd', '#cdc8b1', '#8b8878', '#dc143c', '#00ffff', '#00ffff', '#00eeee', '#00cdcd', '#008b8b', '#b8860b', '#ffb90f', '#eead0e', '#cd950c', '#8b6508', '#006400', '#bdb76b', '#556b2f', '#caff70', '#bcee68', '#a2cd5a', '#6e8b3d', '#ff8c00', '#ff7f00', '#ee7600', '#cd6600', '#8b4500', '#9932cc', '#bf3eff', '#b23aee', '#9a32cd', '#68228b', '#e9967a', '#8fbc8f', '#c1ffc1', '#b4eeb4', '#9bcd9b', '#698b69', '#483d8b', '#2f4f4f', '#97ffff', '#8deeee', '#79cdcd', '#528b8b', '#2f4f4f', '#00ced1', '#9400d3', '#ff1493', '#ff1493', '#ee1289', '#cd1076', '#8b0a50', '#00bfff', '#00bfff', '#00b2ee', '#009acd', '#00688b', '#696969', '#696969', '#1e90ff', '#1e90ff', '#1c86ee', '#1874cd', '#104e8b', '#b22222', '#ff3030', '#ee2c2c', '#cd2626', '#8b1a1a', '#fffaf0', '#228b22', '#dcdcdc', '#f8f8ff', '#ffd700', '#ffd700', '#eec900', '#cdad00', '#8b7500', '#daa520', '#ffc125', '#eeb422', '#cd9b1d', '#8b6914', '#c0c0c0', '#000000', '#030303', '#1a1a1a', '#ffffff', '#1c1c1c', '#1f1f1f', '#212121', '#242424', '#262626', '#292929', '#2b2b2b', '#2e2e2e', '#303030', '#050505', '#333333', '#363636', '#383838', '#3b3b3b', '#3d3d3d', '#404040', '#424242', '#454545', '#474747', '#4a4a4a', '#080808', '#4d4d4d', '#4f4f4f', '#525252', '#545454', '#575757', '#595959', '#5c5c5c', '#5e5e5e', '#616161', '#636363', '#0a0a0a', '#666666', '#696969', '#6b6b6b', '#6e6e6e', '#707070', '#737373', '#757575', '#787878', '#7a7a7a', '#7d7d7d', '#0d0d0d', '#7f7f7f', '#828282', '#858585', '#878787', '#8a8a8a', '#8c8c8c', '#8f8f8f', '#919191', '#949494', '#969696', '#0f0f0f', '#999999', '#9c9c9c', '#9e9e9e', '#a1a1a1', '#a3a3a3', '#a6a6a6', '#a8a8a8', '#ababab', '#adadad', '#b0b0b0', '#121212', '#b3b3b3', '#b5b5b5', '#b8b8b8', '#bababa', '#bdbdbd', '#bfbfbf', '#c2c2c2', '#c4c4c4', '#c7c7c7', '#c9c9c9', '#141414', '#cccccc', '#cfcfcf', '#d1d1d1', '#d4d4d4', '#d6d6d6', '#d9d9d9', '#dbdbdb', '#dedede', '#e0e0e0', '#e3e3e3', '#171717', '#e5e5e5', '#e8e8e8', '#ebebeb', '#ededed', '#f0f0f0', '#f2f2f2', '#f5f5f5', '#f7f7f7', '#fafafa', '#fcfcfc', '#00ff00', '#00ff00', '#00ee00', '#00cd00', '#008b00', '#adff2f', '#c0c0c0', '#000000', '#030303', '#1a1a1a', '#ffffff', '#1c1c1c', '#1f1f1f', '#212121', '#242424', '#262626', '#292929', '#2b2b2b', '#2e2e2e', '#303030', '#050505', '#333333', '#363636', '#383838', '#3b3b3b', '#3d3d3d', '#404040', '#424242', '#454545', '#474747', '#4a4a4a', '#080808', '#4d4d4d', '#4f4f4f', '#525252', '#545454', '#575757', '#595959', '#5c5c5c', '#5e5e5e', '#616161', '#636363', '#0a0a0a', '#666666', '#696969', '#6b6b6b', '#6e6e6e', '#707070', '#737373', '#757575', '#787878', '#7a7a7a', '#7d7d7d', '#0d0d0d', '#7f7f7f', '#828282', '#858585', '#878787', '#8a8a8a', '#8c8c8c', '#8f8f8f', '#919191', '#949494', '#969696', '#0f0f0f', '#999999', '#9c9c9c', '#9e9e9e', '#a1a1a1', '#a3a3a3', '#a6a6a6', '#a8a8a8', '#ababab', '#adadad', '#b0b0b0', '#121212', '#b3b3b3', '#b5b5b5', '#b8b8b8', '#bababa', '#bdbdbd', '#bfbfbf', '#c2c2c2', '#c4c4c4', '#c7c7c7', '#c9c9c9', '#141414', '#cccccc', '#cfcfcf', '#d1d1d1', '#d4d4d4', '#d6d6d6', '#d9d9d9', '#dbdbdb', '#dedede', '#e0e0e0', '#e3e3e3', '#171717', '#e5e5e5', '#e8e8e8', '#ebebeb', '#ededed', '#f0f0f0', '#f2f2f2', '#f5f5f5', '#f7f7f7', '#fafafa', '#fcfcfc', '#f0fff0', '#f0fff0', '#e0eee0', '#c1cdc1', '#838b83', '#ff69b4', '#ff6eb4', '#ee6aa7', '#cd6090', '#8b3a62', '#cd5c5c', '#ff6a6a', '#ee6363', '#cd5555', '#8b3a3a', '#4b0082', '#fffffe', '#fffff0', '#fffff0', '#eeeee0', '#cdcdc1', '#8b8b83', '#f0e68c', '#fff68f', '#eee685', '#cdc673', '#8b864e', '#e6e6fa', '#fff0f5', '#fff0f5', '#eee0e5', '#cdc1c5', '#8b8386', '#7cfc00', '#fffacd', '#fffacd', '#eee9bf', '#cdc9a5', '#8b8970', '#add8e6', '#bfefff', '#b2dfee', '#9ac0cd', '#68838b', '#f08080', '#e0ffff', '#e0ffff', '#d1eeee', '#b4cdcd', '#7a8b8b', '#eedd82', '#ffec8b', '#eedc82', '#cdbe70', '#8b814c', '#fafad2', '#d3d3d3', '#d3d3d3', '#ffb6c1', '#ffaeb9', '#eea2ad', '#cd8c95', '#8b5f65', '#ffa07a', '#ffa07a', '#ee9572', '#cd8162', '#8b5742', '#20b2aa', '#87cefa', '#b0e2ff', '#a4d3ee', '#8db6cd', '#607b8b', '#8470ff', '#778899', '#778899', '#b0c4de', '#cae1ff', '#bcd2ee', '#a2b5cd', '#6e7b8b', '#ffffe0', '#ffffe0', '#eeeed1', '#cdcdb4', '#8b8b7a', '#32cd32', '#faf0e6', '#ff00ff', '#ff00ff', '#ee00ee', '#cd00cd', '#8b008b', '#b03060', '#ff34b3', '#ee30a7', '#cd2990', '#8b1c62', '#66cdaa', '#0000cd', '#ba55d3', '#e066ff', '#d15fee', '#b452cd', '#7a378b', '#9370db', '#ab82ff', '#9f79ee', '#8968cd', '#5d478b', '#3cb371', '#7b68ee', '#00fa9a', '#48d1cc', '#c71585', '#191970', '#f5fffa', '#ffe4e1', '#ffe4e1', '#eed5d2', '#cdb7b5', '#8b7d7b', '#ffe4b5', '#ffdead', '#ffdead', '#eecfa1', '#cdb38b', '#8b795e', '#000080', '#000080', '#fffffe', '#fdf5e6', '#6b8e23', '#c0ff3e', '#b3ee3a', '#9acd32', '#698b22', '#ffa500', '#ffa500', '#ee9a00', '#cd8500', '#8b5a00', '#ff4500', '#ff4500', '#ee4000', '#cd3700', '#8b2500', '#da70d6', '#ff83fa', '#ee7ae9', '#cd69c9', '#8b4789', '#eee8aa', '#98fb98', '#9aff9a', '#90ee90', '#7ccd7c', '#548b54', '#afeeee', '#bbffff', '#aeeeee', '#96cdcd', '#668b8b', '#db7093', '#ff82ab', '#ee799f', '#cd6889', '#8b475d', '#ffefd5', '#ffdab9', '#ffdab9', '#eecbad', '#cdaf95', '#8b7765', '#cd853f', '#ffc0cb', '#ffb5c5', '#eea9b8', '#cd919e', '#8b636c', '#dda0dd', '#ffbbff', '#eeaeee', '#cd96cd', '#8b668b', '#b0e0e6', '#a020f0', '#9b30ff', '#912cee', '#7d26cd', '#551a8b', '#ff0000', '#ff0000', '#ee0000', '#cd0000', '#8b0000', '#bc8f8f', '#ffc1c1', '#eeb4b4', '#cd9b9b', '#8b6969', '#4169e1', '#4876ff', '#436eee', '#3a5fcd', '#27408b', '#8b4513', '#fa8072', '#ff8c69', '#ee8262', '#cd7054', '#8b4c39', '#f4a460', '#2e8b57', '#54ff9f', '#4eee94', '#43cd80', '#2e8b57', '#fff5ee', '#fff5ee', '#eee5de', '#cdc5bf', '#8b8682', '#a0522d', '#ff8247', '#ee7942', '#cd6839', '#8b4726', '#87ceeb', '#87ceff', '#7ec0ee', '#6ca6cd', '#4a708b', '#6a5acd', '#836fff', '#7a67ee', '#6959cd', '#473c8b', '#708090', '#c6e2ff', '#b9d3ee', '#9fb6cd', '#6c7b8b', '#708090', '#fffafa', '#fffafa', '#eee9e9', '#cdc9c9', '#8b8989', '#00ff7f', '#00ff7f', '#00ee76', '#00cd66', '#008b45', '#4682b4', '#63b8ff', '#5cacee', '#4f94cd', '#36648b', '#d2b48c', '#ffa54f', '#ee9a49', '#cd853f', '#8b5a2b', '#d8bfd8', '#ffe1ff', '#eed2ee', '#cdb5cd', '#8b7b8b', '#ff6347', '#ff6347', '#ee5c42', '#cd4f39', '#8b3626', '#fffffe', '#40e0d0', '#00f5ff', '#00e5ee', '#00c5cd', '#00868b', '#ee82ee', '#d02090', '#ff3e96', '#ee3a8c', '#cd3278', '#8b2252', '#f5deb3', '#ffe7ba', '#eed8ae', '#cdba96', '#8b7e66', '#ffffff', '#f5f5f5', '#ffff00', '#ffff00', '#eeee00', '#cdcd00', '#8b8b00', '#9acd32') data.frame( x11_name, hex, stringsAsFactors = FALSE) }
/R/x11_hex.R
permissive
vnijs/DiagrammeR
R
false
false
16,730
r
#' X11 colors and hexadecimal color values #' #' Create a data frame containing information on X11 colors and their #' corresponding hexadecimal color values. #' @export x11_hex <- function() { x11_name <- c( 'aliceblue', 'antiquewhite', 'antiquewhite1', 'antiquewhite2', 'antiquewhite3', 'antiquewhite4', 'aquamarine', 'aquamarine1', 'aquamarine2', 'aquamarine3', 'aquamarine4', 'azure', 'azure1', 'azure2', 'azure3', 'azure4', 'beige', 'bisque', 'bisque1', 'bisque2', 'bisque3', 'bisque4', 'black', 'blanchedalmond', 'blue', 'blue1', 'blue2', 'blue3', 'blue4', 'blueviolet', 'brown', 'brown1', 'brown2', 'brown3', 'brown4', 'burlywood', 'burlywood1', 'burlywood2', 'burlywood3', 'burlywood4', 'cadetblue', 'cadetblue1', 'cadetblue2', 'cadetblue3', 'cadetblue4', 'chartreuse', 'chartreuse1', 'chartreuse2', 'chartreuse3', 'chartreuse4', 'chocolate', 'chocolate1', 'chocolate2', 'chocolate3', 'chocolate4', 'coral', 'coral1', 'coral2', 'coral3', 'coral4', 'cornflowerblue', 'cornsilk', 'cornsilk1', 'cornsilk2', 'cornsilk3', 'cornsilk4', 'crimson', 'cyan', 'cyan1', 'cyan2', 'cyan3', 'cyan4', 'darkgoldenrod', 'darkgoldenrod1', 'darkgoldenrod2', 'darkgoldenrod3', 'darkgoldenrod4', 'darkgreen', 'darkkhaki', 'darkolivegreen', 'darkolivegreen1', 'darkolivegreen2', 'darkolivegreen3', 'darkolivegreen4', 'darkorange', 'darkorange1', 'darkorange2', 'darkorange3', 'darkorange4', 'darkorchid', 'darkorchid1', 'darkorchid2', 'darkorchid3', 'darkorchid4', 'darksalmon', 'darkseagreen', 'darkseagreen1', 'darkseagreen2', 'darkseagreen3', 'darkseagreen4', 'darkslateblue', 'darkslategray', 'darkslategray1', 'darkslategray2', 'darkslategray3', 'darkslategray4', 'darkslategrey', 'darkturquoise', 'darkviolet', 'deeppink', 'deeppink1', 'deeppink2', 'deeppink3', 'deeppink4', 'deepskyblue', 'deepskyblue1', 'deepskyblue2', 'deepskyblue3', 'deepskyblue4', 'dimgray', 'dimgrey', 'dodgerblue', 'dodgerblue1', 'dodgerblue2', 'dodgerblue3', 'dodgerblue4', 'firebrick', 'firebrick1', 'firebrick2', 'firebrick3', 'firebrick4', 'floralwhite', 'forestgreen', 'gainsboro', 'ghostwhite', 'gold', 'gold1', 'gold2', 'gold3', 'gold4', 'goldenrod', 'goldenrod1', 'goldenrod2', 'goldenrod3', 'goldenrod4', 'gray', 'gray0', 'gray1', 'gray10', 'gray100', 'gray11', 'gray12', 'gray13', 'gray14', 'gray15', 'gray16', 'gray17', 'gray18', 'gray19', 'gray2', 'gray20', 'gray21', 'gray22', 'gray23', 'gray24', 'gray25', 'gray26', 'gray27', 'gray28', 'gray29', 'gray3', 'gray30', 'gray31', 'gray32', 'gray33', 'gray34', 'gray35', 'gray36', 'gray37', 'gray38', 'gray39', 'gray4', 'gray40', 'gray41', 'gray42', 'gray43', 'gray44', 'gray45', 'gray46', 'gray47', 'gray48', 'gray49', 'gray5', 'gray50', 'gray51', 'gray52', 'gray53', 'gray54', 'gray55', 'gray56', 'gray57', 'gray58', 'gray59', 'gray6', 'gray60', 'gray61', 'gray62', 'gray63', 'gray64', 'gray65', 'gray66', 'gray67', 'gray68', 'gray69', 'gray7', 'gray70', 'gray71', 'gray72', 'gray73', 'gray74', 'gray75', 'gray76', 'gray77', 'gray78', 'gray79', 'gray8', 'gray80', 'gray81', 'gray82', 'gray83', 'gray84', 'gray85', 'gray86', 'gray87', 'gray88', 'gray89', 'gray9', 'gray90', 'gray91', 'gray92', 'gray93', 'gray94', 'gray95', 'gray96', 'gray97', 'gray98', 'gray99', 'green', 'green1', 'green2', 'green3', 'green4', 'greenyellow', 'grey', 'grey0', 'grey1', 'grey10', 'grey100', 'grey11', 'grey12', 'grey13', 'grey14', 'grey15', 'grey16', 'grey17', 'grey18', 'grey19', 'grey2', 'grey20', 'grey21', 'grey22', 'grey23', 'grey24', 'grey25', 'grey26', 'grey27', 'grey28', 'grey29', 'grey3', 'grey30', 'grey31', 'grey32', 'grey33', 'grey34', 'grey35', 'grey36', 'grey37', 'grey38', 'grey39', 'grey4', 'grey40', 'grey41', 'grey42', 'grey43', 'grey44', 'grey45', 'grey46', 'grey47', 'grey48', 'grey49', 'grey5', 'grey50', 'grey51', 'grey52', 'grey53', 'grey54', 'grey55', 'grey56', 'grey57', 'grey58', 'grey59', 'grey6', 'grey60', 'grey61', 'grey62', 'grey63', 'grey64', 'grey65', 'grey66', 'grey67', 'grey68', 'grey69', 'grey7', 'grey70', 'grey71', 'grey72', 'grey73', 'grey74', 'grey75', 'grey76', 'grey77', 'grey78', 'grey79', 'grey8', 'grey80', 'grey81', 'grey82', 'grey83', 'grey84', 'grey85', 'grey86', 'grey87', 'grey88', 'grey89', 'grey9', 'grey90', 'grey91', 'grey92', 'grey93', 'grey94', 'grey95', 'grey96', 'grey97', 'grey98', 'grey99', 'honeydew', 'honeydew1', 'honeydew2', 'honeydew3', 'honeydew4', 'hotpink', 'hotpink1', 'hotpink2', 'hotpink3', 'hotpink4', 'indianred', 'indianred1', 'indianred2', 'indianred3', 'indianred4', 'indigo', 'invis', 'ivory', 'ivory1', 'ivory2', 'ivory3', 'ivory4', 'khaki', 'khaki1', 'khaki2', 'khaki3', 'khaki4', 'lavender', 'lavenderblush', 'lavenderblush1', 'lavenderblush2', 'lavenderblush3', 'lavenderblush4', 'lawngreen', 'lemonchiffon', 'lemonchiffon1', 'lemonchiffon2', 'lemonchiffon3', 'lemonchiffon4', 'lightblue', 'lightblue1', 'lightblue2', 'lightblue3', 'lightblue4', 'lightcoral', 'lightcyan', 'lightcyan1', 'lightcyan2', 'lightcyan3', 'lightcyan4', 'lightgoldenrod', 'lightgoldenrod1', 'lightgoldenrod2', 'lightgoldenrod3', 'lightgoldenrod4', 'lightgoldenrodyellow', 'lightgray', 'lightgrey', 'lightpink', 'lightpink1', 'lightpink2', 'lightpink3', 'lightpink4', 'lightsalmon', 'lightsalmon1', 'lightsalmon2', 'lightsalmon3', 'lightsalmon4', 'lightseagreen', 'lightskyblue', 'lightskyblue1', 'lightskyblue2', 'lightskyblue3', 'lightskyblue4', 'lightslateblue', 'lightslategray', 'lightslategrey', 'lightsteelblue', 'lightsteelblue1', 'lightsteelblue2', 'lightsteelblue3', 'lightsteelblue4', 'lightyellow', 'lightyellow1', 'lightyellow2', 'lightyellow3', 'lightyellow4', 'limegreen', 'linen', 'magenta', 'magenta1', 'magenta2', 'magenta3', 'magenta4', 'maroon', 'maroon1', 'maroon2', 'maroon3', 'maroon4', 'mediumaquamarine', 'mediumblue', 'mediumorchid', 'mediumorchid1', 'mediumorchid2', 'mediumorchid3', 'mediumorchid4', 'mediumpurple', 'mediumpurple1', 'mediumpurple2', 'mediumpurple3', 'mediumpurple4', 'mediumseagreen', 'mediumslateblue', 'mediumspringgreen', 'mediumturquoise', 'mediumvioletred', 'midnightblue', 'mintcream', 'mistyrose', 'mistyrose1', 'mistyrose2', 'mistyrose3', 'mistyrose4', 'moccasin', 'navajowhite', 'navajowhite1', 'navajowhite2', 'navajowhite3', 'navajowhite4', 'navy', 'navyblue', 'none', 'oldlace', 'olivedrab', 'olivedrab1', 'olivedrab2', 'olivedrab3', 'olivedrab4', 'orange', 'orange1', 'orange2', 'orange3', 'orange4', 'orangered', 'orangered1', 'orangered2', 'orangered3', 'orangered4', 'orchid', 'orchid1', 'orchid2', 'orchid3', 'orchid4', 'palegoldenrod', 'palegreen', 'palegreen1', 'palegreen2', 'palegreen3', 'palegreen4', 'paleturquoise', 'paleturquoise1', 'paleturquoise2', 'paleturquoise3', 'paleturquoise4', 'palevioletred', 'palevioletred1', 'palevioletred2', 'palevioletred3', 'palevioletred4', 'papayawhip', 'peachpuff', 'peachpuff1', 'peachpuff2', 'peachpuff3', 'peachpuff4', 'peru', 'pink', 'pink1', 'pink2', 'pink3', 'pink4', 'plum', 'plum1', 'plum2', 'plum3', 'plum4', 'powderblue', 'purple', 'purple1', 'purple2', 'purple3', 'purple4', 'red', 'red1', 'red2', 'red3', 'red4', 'rosybrown', 'rosybrown1', 'rosybrown2', 'rosybrown3', 'rosybrown4', 'royalblue', 'royalblue1', 'royalblue2', 'royalblue3', 'royalblue4', 'saddlebrown', 'salmon', 'salmon1', 'salmon2', 'salmon3', 'salmon4', 'sandybrown', 'seagreen', 'seagreen1', 'seagreen2', 'seagreen3', 'seagreen4', 'seashell', 'seashell1', 'seashell2', 'seashell3', 'seashell4', 'sienna', 'sienna1', 'sienna2', 'sienna3', 'sienna4', 'skyblue', 'skyblue1', 'skyblue2', 'skyblue3', 'skyblue4', 'slateblue', 'slateblue1', 'slateblue2', 'slateblue3', 'slateblue4', 'slategray', 'slategray1', 'slategray2', 'slategray3', 'slategray4', 'slategrey', 'snow', 'snow1', 'snow2', 'snow3', 'snow4', 'springgreen', 'springgreen1', 'springgreen2', 'springgreen3', 'springgreen4', 'steelblue', 'steelblue1', 'steelblue2', 'steelblue3', 'steelblue4', 'tan', 'tan1', 'tan2', 'tan3', 'tan4', 'thistle', 'thistle1', 'thistle2', 'thistle3', 'thistle4', 'tomato', 'tomato1', 'tomato2', 'tomato3', 'tomato4', 'transparent', 'turquoise', 'turquoise1', 'turquoise2', 'turquoise3', 'turquoise4', 'violet', 'violetred', 'violetred1', 'violetred2', 'violetred3', 'violetred4', 'wheat', 'wheat1', 'wheat2', 'wheat3', 'wheat4', 'white', 'whitesmoke', 'yellow', 'yellow1', 'yellow2', 'yellow3', 'yellow4', 'yellowgreen') hex <- c( '#f0f8ff', '#faebd7', '#ffefdb', '#eedfcc', '#cdc0b0', '#8b8378', '#7fffd4', '#7fffd4', '#76eec6', '#66cdaa', '#458b74', '#f0ffff', '#f0ffff', '#e0eeee', '#c1cdcd', '#838b8b', '#f5f5dc', '#ffe4c4', '#ffe4c4', '#eed5b7', '#cdb79e', '#8b7d6b', '#000000', '#ffebcd', '#0000ff', '#0000ff', '#0000ee', '#0000cd', '#00008b', '#8a2be2', '#a52a2a', '#ff4040', '#ee3b3b', '#cd3333', '#8b2323', '#deb887', '#ffd39b', '#eec591', '#cdaa7d', '#8b7355', '#5f9ea0', '#98f5ff', '#8ee5ee', '#7ac5cd', '#53868b', '#7fff00', '#7fff00', '#76ee00', '#66cd00', '#458b00', '#d2691e', '#ff7f24', '#ee7621', '#cd661d', '#8b4513', '#ff7f50', '#ff7256', '#ee6a50', '#cd5b45', '#8b3e2f', '#6495ed', '#fff8dc', '#fff8dc', '#eee8cd', '#cdc8b1', '#8b8878', '#dc143c', '#00ffff', '#00ffff', '#00eeee', '#00cdcd', '#008b8b', '#b8860b', '#ffb90f', '#eead0e', '#cd950c', '#8b6508', '#006400', '#bdb76b', '#556b2f', '#caff70', '#bcee68', '#a2cd5a', '#6e8b3d', '#ff8c00', '#ff7f00', '#ee7600', '#cd6600', '#8b4500', '#9932cc', '#bf3eff', '#b23aee', '#9a32cd', '#68228b', '#e9967a', '#8fbc8f', '#c1ffc1', '#b4eeb4', '#9bcd9b', '#698b69', '#483d8b', '#2f4f4f', '#97ffff', '#8deeee', '#79cdcd', '#528b8b', '#2f4f4f', '#00ced1', '#9400d3', '#ff1493', '#ff1493', '#ee1289', '#cd1076', '#8b0a50', '#00bfff', '#00bfff', '#00b2ee', '#009acd', '#00688b', '#696969', '#696969', '#1e90ff', '#1e90ff', '#1c86ee', '#1874cd', '#104e8b', '#b22222', '#ff3030', '#ee2c2c', '#cd2626', '#8b1a1a', '#fffaf0', '#228b22', '#dcdcdc', '#f8f8ff', '#ffd700', '#ffd700', '#eec900', '#cdad00', '#8b7500', '#daa520', '#ffc125', '#eeb422', '#cd9b1d', '#8b6914', '#c0c0c0', '#000000', '#030303', '#1a1a1a', '#ffffff', '#1c1c1c', '#1f1f1f', '#212121', '#242424', '#262626', '#292929', '#2b2b2b', '#2e2e2e', '#303030', '#050505', '#333333', '#363636', '#383838', '#3b3b3b', '#3d3d3d', '#404040', '#424242', '#454545', '#474747', '#4a4a4a', '#080808', '#4d4d4d', '#4f4f4f', '#525252', '#545454', '#575757', '#595959', '#5c5c5c', '#5e5e5e', '#616161', '#636363', '#0a0a0a', '#666666', '#696969', '#6b6b6b', '#6e6e6e', '#707070', '#737373', '#757575', '#787878', '#7a7a7a', '#7d7d7d', '#0d0d0d', '#7f7f7f', '#828282', '#858585', '#878787', '#8a8a8a', '#8c8c8c', '#8f8f8f', '#919191', '#949494', '#969696', '#0f0f0f', '#999999', '#9c9c9c', '#9e9e9e', '#a1a1a1', '#a3a3a3', '#a6a6a6', '#a8a8a8', '#ababab', '#adadad', '#b0b0b0', '#121212', '#b3b3b3', '#b5b5b5', '#b8b8b8', '#bababa', '#bdbdbd', '#bfbfbf', '#c2c2c2', '#c4c4c4', '#c7c7c7', '#c9c9c9', '#141414', '#cccccc', '#cfcfcf', '#d1d1d1', '#d4d4d4', '#d6d6d6', '#d9d9d9', '#dbdbdb', '#dedede', '#e0e0e0', '#e3e3e3', '#171717', '#e5e5e5', '#e8e8e8', '#ebebeb', '#ededed', '#f0f0f0', '#f2f2f2', '#f5f5f5', '#f7f7f7', '#fafafa', '#fcfcfc', '#00ff00', '#00ff00', '#00ee00', '#00cd00', '#008b00', '#adff2f', '#c0c0c0', '#000000', '#030303', '#1a1a1a', '#ffffff', '#1c1c1c', '#1f1f1f', '#212121', '#242424', '#262626', '#292929', '#2b2b2b', '#2e2e2e', '#303030', '#050505', '#333333', '#363636', '#383838', '#3b3b3b', '#3d3d3d', '#404040', '#424242', '#454545', '#474747', '#4a4a4a', '#080808', '#4d4d4d', '#4f4f4f', '#525252', '#545454', '#575757', '#595959', '#5c5c5c', '#5e5e5e', '#616161', '#636363', '#0a0a0a', '#666666', '#696969', '#6b6b6b', '#6e6e6e', '#707070', '#737373', '#757575', '#787878', '#7a7a7a', '#7d7d7d', '#0d0d0d', '#7f7f7f', '#828282', '#858585', '#878787', '#8a8a8a', '#8c8c8c', '#8f8f8f', '#919191', '#949494', '#969696', '#0f0f0f', '#999999', '#9c9c9c', '#9e9e9e', '#a1a1a1', '#a3a3a3', '#a6a6a6', '#a8a8a8', '#ababab', '#adadad', '#b0b0b0', '#121212', '#b3b3b3', '#b5b5b5', '#b8b8b8', '#bababa', '#bdbdbd', '#bfbfbf', '#c2c2c2', '#c4c4c4', '#c7c7c7', '#c9c9c9', '#141414', '#cccccc', '#cfcfcf', '#d1d1d1', '#d4d4d4', '#d6d6d6', '#d9d9d9', '#dbdbdb', '#dedede', '#e0e0e0', '#e3e3e3', '#171717', '#e5e5e5', '#e8e8e8', '#ebebeb', '#ededed', '#f0f0f0', '#f2f2f2', '#f5f5f5', '#f7f7f7', '#fafafa', '#fcfcfc', '#f0fff0', '#f0fff0', '#e0eee0', '#c1cdc1', '#838b83', '#ff69b4', '#ff6eb4', '#ee6aa7', '#cd6090', '#8b3a62', '#cd5c5c', '#ff6a6a', '#ee6363', '#cd5555', '#8b3a3a', '#4b0082', '#fffffe', '#fffff0', '#fffff0', '#eeeee0', '#cdcdc1', '#8b8b83', '#f0e68c', '#fff68f', '#eee685', '#cdc673', '#8b864e', '#e6e6fa', '#fff0f5', '#fff0f5', '#eee0e5', '#cdc1c5', '#8b8386', '#7cfc00', '#fffacd', '#fffacd', '#eee9bf', '#cdc9a5', '#8b8970', '#add8e6', '#bfefff', '#b2dfee', '#9ac0cd', '#68838b', '#f08080', '#e0ffff', '#e0ffff', '#d1eeee', '#b4cdcd', '#7a8b8b', '#eedd82', '#ffec8b', '#eedc82', '#cdbe70', '#8b814c', '#fafad2', '#d3d3d3', '#d3d3d3', '#ffb6c1', '#ffaeb9', '#eea2ad', '#cd8c95', '#8b5f65', '#ffa07a', '#ffa07a', '#ee9572', '#cd8162', '#8b5742', '#20b2aa', '#87cefa', '#b0e2ff', '#a4d3ee', '#8db6cd', '#607b8b', '#8470ff', '#778899', '#778899', '#b0c4de', '#cae1ff', '#bcd2ee', '#a2b5cd', '#6e7b8b', '#ffffe0', '#ffffe0', '#eeeed1', '#cdcdb4', '#8b8b7a', '#32cd32', '#faf0e6', '#ff00ff', '#ff00ff', '#ee00ee', '#cd00cd', '#8b008b', '#b03060', '#ff34b3', '#ee30a7', '#cd2990', '#8b1c62', '#66cdaa', '#0000cd', '#ba55d3', '#e066ff', '#d15fee', '#b452cd', '#7a378b', '#9370db', '#ab82ff', '#9f79ee', '#8968cd', '#5d478b', '#3cb371', '#7b68ee', '#00fa9a', '#48d1cc', '#c71585', '#191970', '#f5fffa', '#ffe4e1', '#ffe4e1', '#eed5d2', '#cdb7b5', '#8b7d7b', '#ffe4b5', '#ffdead', '#ffdead', '#eecfa1', '#cdb38b', '#8b795e', '#000080', '#000080', '#fffffe', '#fdf5e6', '#6b8e23', '#c0ff3e', '#b3ee3a', '#9acd32', '#698b22', '#ffa500', '#ffa500', '#ee9a00', '#cd8500', '#8b5a00', '#ff4500', '#ff4500', '#ee4000', '#cd3700', '#8b2500', '#da70d6', '#ff83fa', '#ee7ae9', '#cd69c9', '#8b4789', '#eee8aa', '#98fb98', '#9aff9a', '#90ee90', '#7ccd7c', '#548b54', '#afeeee', '#bbffff', '#aeeeee', '#96cdcd', '#668b8b', '#db7093', '#ff82ab', '#ee799f', '#cd6889', '#8b475d', '#ffefd5', '#ffdab9', '#ffdab9', '#eecbad', '#cdaf95', '#8b7765', '#cd853f', '#ffc0cb', '#ffb5c5', '#eea9b8', '#cd919e', '#8b636c', '#dda0dd', '#ffbbff', '#eeaeee', '#cd96cd', '#8b668b', '#b0e0e6', '#a020f0', '#9b30ff', '#912cee', '#7d26cd', '#551a8b', '#ff0000', '#ff0000', '#ee0000', '#cd0000', '#8b0000', '#bc8f8f', '#ffc1c1', '#eeb4b4', '#cd9b9b', '#8b6969', '#4169e1', '#4876ff', '#436eee', '#3a5fcd', '#27408b', '#8b4513', '#fa8072', '#ff8c69', '#ee8262', '#cd7054', '#8b4c39', '#f4a460', '#2e8b57', '#54ff9f', '#4eee94', '#43cd80', '#2e8b57', '#fff5ee', '#fff5ee', '#eee5de', '#cdc5bf', '#8b8682', '#a0522d', '#ff8247', '#ee7942', '#cd6839', '#8b4726', '#87ceeb', '#87ceff', '#7ec0ee', '#6ca6cd', '#4a708b', '#6a5acd', '#836fff', '#7a67ee', '#6959cd', '#473c8b', '#708090', '#c6e2ff', '#b9d3ee', '#9fb6cd', '#6c7b8b', '#708090', '#fffafa', '#fffafa', '#eee9e9', '#cdc9c9', '#8b8989', '#00ff7f', '#00ff7f', '#00ee76', '#00cd66', '#008b45', '#4682b4', '#63b8ff', '#5cacee', '#4f94cd', '#36648b', '#d2b48c', '#ffa54f', '#ee9a49', '#cd853f', '#8b5a2b', '#d8bfd8', '#ffe1ff', '#eed2ee', '#cdb5cd', '#8b7b8b', '#ff6347', '#ff6347', '#ee5c42', '#cd4f39', '#8b3626', '#fffffe', '#40e0d0', '#00f5ff', '#00e5ee', '#00c5cd', '#00868b', '#ee82ee', '#d02090', '#ff3e96', '#ee3a8c', '#cd3278', '#8b2252', '#f5deb3', '#ffe7ba', '#eed8ae', '#cdba96', '#8b7e66', '#ffffff', '#f5f5f5', '#ffff00', '#ffff00', '#eeee00', '#cdcd00', '#8b8b00', '#9acd32') data.frame( x11_name, hex, stringsAsFactors = FALSE) }
## Hallmann 2017 setwd("~/Documents/Uni/Umwi/M.sc./Repro") load("full_basic") library(R2jags) library(RCurl) # used for loading data from github library(ggplot2) library(dplyr) library(tidyr) library(purrr) # ---- Load ---- # Read the data url_1 <- getURL("https://raw.githubusercontent.com/leonthoma/Repro/master/Hallmann_s1_data.csv") url_2 <- getURL("https://raw.githubusercontent.com/leonthoma/Repro/master/Hallmann_s2_data.csv") data <- read.csv(text = url_1, header = TRUE, sep = ",", stringsAsFactors = T) model.frame <- read.csv(text = url_2, header = TRUE, sep = ",", stringsAsFactors = T) # summary(data) # summary(model.frame) # ---- Original basic model ---- jagsdataBasic <- list( m_bio = data$biomass, tau1 = with(model.frame, tapply(1:nrow(model.frame), potID, min)), tau2 = with(model.frame, tapply(1:nrow(model.frame), potID, max)), plot = as.numeric(model.frame$plot), loctype = as.numeric(data$location.type[match(data$potID,data$potID)]), daynr = as.numeric((model.frame$daynr-mean(data$mean.daynr)) / sd(data$mean.daynr)), daynr2 = as.numeric((model.frame$daynr-mean(data$mean.daynr)) / sd(data$mean.daynr))^2, year = model.frame$year - 1988, ndaily = nrow(model.frame), n = nrow(data), nrandom = max(as.numeric(model.frame$plot)) ) parametersBasic <- c("g_intcp", "log.lambda", "b", "c", "eps", "sdhat", "sd.re") oldjagsmodBasic <- jags(jagsdataBasic, inits = NULL, parametersBasic, "BasicModel.jag", n.iter = 12000, n.burnin = 2000, n.chains = 3, n.thin = 10) # Get mean parameter values from posterior parms_mean <- oldjagsmodBasic$BUGSoutput$mean # 2.5 % credible interval parms_mean_lo <- list("b" = as.numeric(oldjagsmodBasic$BUGSoutput$summary[1:3, 3]), "c" = as.numeric(oldjagsmodBasic$BUGSoutput$summary[4:7, 3]), "deviance" = oldjagsmodBasic$BUGSoutput$summary[8, 3], "eps" = as.numeric(oldjagsmodBasic$BUGSoutput$summary[9:71, 3]), "g_intcp" = oldjagsmodBasic$BUGSoutput$summary[72, 3], "log.lambda" = oldjagsmodBasic$BUGSoutput$summary[73, 3], "sd.re" = oldjagsmodBasic$BUGSoutput$summary[74, 3], "sdhat" = oldjagsmodBasic$BUGSoutput$summary[75, 3]) # 97.5 % credible interval parms_mean_hi <- list("b" = as.numeric(oldjagsmodBasic$BUGSoutput$summary[1:3, 7]), "c" = as.numeric(oldjagsmodBasic$BUGSoutput$summary[4:7, 7]), "deviance" = oldjagsmodBasic$BUGSoutput$summary[8, 7], "eps" = as.numeric(oldjagsmodBasic$BUGSoutput$summary[9:71, 7]), "g_intcp" = oldjagsmodBasic$BUGSoutput$summary[72, 7], "log.lambda" = oldjagsmodBasic$BUGSoutput$summary[73, 7], "sd.re" = oldjagsmodBasic$BUGSoutput$summary[74, 7], "sdhat" = oldjagsmodBasic$BUGSoutput$summary[75, 7]) # ---- Re-Analysis ---- # ---- Data wrangling ---- # Filter Biomass data (only first data set of every plot) py <- select(data, c(plot, year)) %>% group_by(plot) %>% distinct(plot, year) %>% arrange(plot, year) # overview plot by year new_plots <- group_by(py, plot) %>% slice(1) # get first sampling year of each plot # Create new dfs new_data <- semi_join(data, new_plots, by = c("plot", "year")) new_model.frame <- semi_join(model.frame, new_plots, by = c("plot", "year")) # ---- Basic Model ---- newjagsdataBasic <- list( m_bio = new_data$biomass, tau1 = with(new_model.frame, tapply(1:nrow(new_model.frame), potID, min)), tau2 = with(new_model.frame, tapply(1:nrow(new_model.frame), potID, max)), plot = as.numeric(new_model.frame$plot), loctype = as.numeric(new_data$location.type[match(new_model.frame$potID,new_data$potID)]), daynr = as.numeric((new_model.frame$daynr-mean(new_data$mean.daynr)) / sd(new_data$mean.daynr)), daynr2 = as.numeric((new_model.frame$daynr-mean(new_data$mean.daynr)) / sd(new_data$mean.daynr))^2, year = new_model.frame$year - 1988, ndaily = nrow(new_model.frame), n = nrow(new_data), nrandom = max(as.numeric(new_model.frame$plot)) ) # Jags model ## NEEDS TO RUN ONCE TO CREATE .JAGS FILE ## SET WD TO APROPRIATE DIRECTORY { # sink("BasicModel.jag") # cat("model{ # ## Likelihood function for the latent expected daily biomass # for (i in 1:n) { # m_bio[i] ~ dnorm(sum(z[tau1[i]:tau2[i]]), sig_sq[i]) # sig_sq[i] <- 1/Var[i] # Var[i] <- sum(vr[tau1[i]:tau2[i]]) # } # # ## Likelihood function for muHat, it's dependent function and variance # for (i in 1:ndaily) { # z[i] <- exp(y[i]) # y[i] <- g_intcp + log.lambda * year[i] + c[1] * daynr[i] + c[2] * daynr2[i] + # c[3] * daynr[i] * year[i] + c[4] * daynr2[i] * year[i] + b[loctype[i]] + # eps[plot[i]] # vr[i] <- exp(2 * y[i] + lvar) * (exp(lvar) - 1) # } # # ## Priors # g_intcp ~ dnorm(0, .01) # log.lambda ~ dnorm(0, .01) # b[1] <- 0 # for( i in 2:3) {b[i] ~ dnorm(0, .01)} # for( i in 1:4) {c[i] ~ dnorm(0, .01)} # sdhat ~ dunif(0, 5) # lvar <- pow(sdhat, 2) # for (i in 1:nrandom) { # eps[i] ~ dnorm(0, tau.re) # } # tau.re <- pow(sd.re, -2) # sd.re ~ dunif(0, 1) # } # ") # sink(NULL) } # Run the model jagsmodBasic <- jags(newjagsdataBasic, inits = NULL, parametersBasic, "BasicModel.jag", n.iter = 12000, n.burnin = 2000, n.chains = 3, n.thin = 10) jagsmodBasic # ---- Diagnostics ---- # ---- Calculate predicted values ---- # Get mean parameter values from posterior new_parms_mean <- jagsmodBasic$BUGSoutput$mean # 2.5 % credible interval new_parms_mean_lo <- list("b" = as.numeric(jagsmodBasic$BUGSoutput$summary[1:3, 3]), "c" = as.numeric(jagsmodBasic$BUGSoutput$summary[4:7, 3]), "deviance" = jagsmodBasic$BUGSoutput$summary[8, 3], "eps" = as.numeric(jagsmodBasic$BUGSoutput$summary[9:71, 3]), "g_intcp" = jagsmodBasic$BUGSoutput$summary[72, 3], "log.lambda" = jagsmodBasic$BUGSoutput$summary[73, 3], "sd.re" = jagsmodBasic$BUGSoutput$summary[74, 3], "sdhat" = jagsmodBasic$BUGSoutput$summary[75, 3]) # 97.5 % credible interval new_parms_mean_hi <- list("b" = as.numeric(jagsmodBasic$BUGSoutput$summary[1:3, 7]), "c" = as.numeric(jagsmodBasic$BUGSoutput$summary[4:7, 7]), "deviance" = jagsmodBasic$BUGSoutput$summary[8, 7], "eps" = as.numeric(jagsmodBasic$BUGSoutput$summary[9:71, 7]), "g_intcp" = jagsmodBasic$BUGSoutput$summary[72, 7], "log.lambda" = jagsmodBasic$BUGSoutput$summary[73, 7], "sd.re" = jagsmodBasic$BUGSoutput$summary[74, 7], "sdhat" = jagsmodBasic$BUGSoutput$summary[75, 7]) # Set tau & intervals of days original data t_1 <- as.vector(jagsdataBasic$tau1[1:nrow(data)]) t_2 <- as.vector(jagsdataBasic$tau2[1:nrow(data)]) ints <- map(1:nrow(data), ~ seq(t_1[.x], t_2[.x])) # Set year intervals original data t_1_y <- with(data, tapply(1:nrow(data), year, min)) t_2_y <- with(data, tapply(1:nrow(data), year, max)) ints_y <- map(1:length(t_1_y), ~ seq(t_1_y[.x], t_2_y[.x])) # Set tau & intervals of days new_t_1 <- as.vector(newjagsdataBasic$tau1[1:nrow(new_data)]) new_t_2 <- as.vector(newjagsdataBasic$tau2[1:nrow(new_data)]) new_ints <- map(1:nrow(new_data), ~ seq(new_t_1[.x], new_t_2[.x])) # Set year intervals new_t_1_y <- with(new_data, tapply(1:nrow(new_data), year, min)) new_t_2_y <- with(new_data, tapply(1:nrow(new_data), year, max)) new_ints_y <- map(1:length(new_t_1_y), ~ seq(new_t_1_y[.x], new_t_2_y[.x])) # Helper functions to calculate predicted biomass y <- function(x, model, cred) { data <- switch(model, new = newjagsdataBasic, old = jagsdataBasic) if (model == "new") { parms_mean <- switch(cred, base = new_parms_mean, low = new_parms_mean_lo, high = new_parms_mean_hi) } else {parms_mean <- switch(cred, base = parms_mean, low = parms_mean_lo, high = parms_mean_hi)} parms_mean$g_intcp + parms_mean$log.lambda * data$year[x] + parms_mean$c[1] * data$daynr[x] + parms_mean$c[2] * data$daynr2[x] + parms_mean$c[3] * data$daynr[x] * data$year[x] + parms_mean$c[4] * data$daynr2[x] * data$year[x] + parms_mean$b[data$loctype[x]] + parms_mean$eps[data$plot[x]] } z <- function(x, ...) exp(y(x, ...)) # Calculate predicted biomass # Old model z_val <- unlist(map(1:nrow(data), ~ mean(z(unlist(ints[.x]), "old", "base")))) z_val_y <- unlist(map(1:length(ints_y), ~ mean(z_val[unlist(ints_y[.x])]))) # 2.5 % credible interval z_val_lo <- unlist(map(1:nrow(data), ~ mean(z(unlist(ints[.x]), "old", "low")))) z_val_y_lo <- unlist(map(1:length(ints_y), ~ mean(z_val_lo[unlist(ints_y[.x])]))) # 97.5 % credible interval z_val_hi <- unlist(map(1:nrow(data), ~ mean(z(unlist(ints[.x]), "old", "high")))) z_val_y_hi <- unlist(map(1:length(ints_y), ~ mean(z_val_hi[unlist(ints_y[.x])]))) # New model new_z_val <- unlist(map(1:nrow(new_data), ~ mean(z(unlist(new_ints[.x]), "new", "base")))) new_z_val_y <- unlist(map(1:length(new_ints_y), ~ mean(new_z_val[unlist(new_ints_y[.x])]))) # 2.5 % credible interval new_z_val_lo <- unlist(map(1:nrow(new_data), ~ mean(z(unlist(new_ints[.x]), "new", "low")))) new_z_val_y_lo <- unlist(map(1:length(new_ints_y), ~ mean(new_z_val_lo[unlist(new_ints_y[.x])]))) # 97.5 % credible interval new_z_val_hi <- unlist(map(1:nrow(new_data), ~ mean(z(unlist(new_ints[.x]), "new", "high")))) new_z_val_y_hi <- unlist(map(1:length(new_ints_y), ~ mean(new_z_val_hi[unlist(new_ints_y[.x])]))) # Create initial dfs m_bio_df <- data.frame("year" = 1:26) # original new_m_bio_df <- data.frame("year" = 1:26) # new # Fill dfs; set missing years to previous val fill <- function(model, cred) { data <- c(rep(NA, 26)) if (model == "new") { z_val_y <- switch(cred, base = new_z_val_y, low = new_z_val_y_lo, high = new_z_val_y_hi) } else { z_val_y <- switch(cred, base = z_val_y, low = z_val_y_lo, high = z_val_y_hi) } # Fill vals of years with data present data[1:7] <- z_val_y[1:7] data[9] <- z_val_y[8] data[11:13] <- z_val_y[9:11] data[15:26] <- z_val_y[12:23] # Fill vals of years with data absent data[8] <- data[7] data[10] <- data[9] data[14] <- data[13] return(data) } # helper function new_m_bio_df$m_bio <- fill("new", "base") new_m_bio_df$m_bio_lo <- fill("new", "low") new_m_bio_df$m_bio_hi <- fill("new", "high") m_bio_df$m_bio <- fill("old", "base") m_bio_df$m_bio_lo <- fill("old", "low") m_bio_df$m_bio_hi <- fill("old", "high") # ---- Visualization ---- # Custom palette to match colors from paper my_pal <- c("#f46d43", "#fdae61", "#fee090", "#ffffbf", "#e0f3f8", "#abd9e9", "#74add1", "#4575b4") ## Recreate Fig. 2a tot_bm_data <- mutate(new_data, bm_p_day = biomass / (to.daynr - from.daynr)) %>% select(year, bm_p_day) ggplot(tot_bm_data, aes(x = factor(year, levels = seq(1989, 2014)), y = bm_p_day, fill = year)) + geom_boxplot(outlier.alpha = .4, outlier.shape = 1) + scale_fill_gradient2(low = "#4575b4", mid = "#fee090", high = "#f46d43", na.value = "grey50", midpoint = 2005, guide = "none") + geom_abline(intercept = parms_mean$g_intcp, slope = parms_mean$log.lambda) + geom_line(aes(y = m_bio, x = year), data = m_bio_df, color = "grey", inherit.aes = F) + geom_line(aes(y = m_bio_lo, x = year), data = m_bio_df, color = "grey", linetype = 2, inherit.aes = F) + geom_line(aes(y = m_bio_hi, x = year), data = m_bio_df, color = "grey", linetype = 2, inherit.aes = F) + geom_line(aes(y = m_bio, x = year), data = new_m_bio_df, color = "red", inherit.aes = F) + geom_line(aes(y = m_bio_lo, x = year), data = new_m_bio_df, color = "red", linetype = 2, inherit.aes = F) + geom_line(aes(y = m_bio_hi, x = year), data = new_m_bio_df, color = "red", linetype = 2, inherit.aes = F) + #geom_jitter(width = .1, alpha = .2) + # optional: datapoints scale_x_discrete(breaks = seq(1990, 2015, by = 5), drop = F) + scale_y_continuous(trans = "log", breaks = c(0.01, .02, .05, 0.1, 0.2, 0.5, 1, 2, 5, 10, 20, 50)) + theme_classic() + labs(y = "Biomass [g/d]", x = "Year") ## Recreate Fig. 2b # Season from 1st of April to 30th of October; i.e. Day 91 to 303 s_bm_data <- filter(data, from.daynr >= 91 & to.daynr <= 303) %>% mutate(bm_p_day = biomass / (to.daynr - from.daynr)) %>% select(year, bm_p_day, mean.daynr) ggplot(s_bm_data) + geom_point(aes(x = mean.daynr, y = bm_p_day, color = year)) + scale_color_gradient2(low = "#4575b4", mid = "#fee090", high = "#f46d43", na.value = "grey50", midpoint = 2005, guide = "none") + theme_classic() + labs(y = "Biomass [g/d]", x = "Day of year")
/hallmann_repro_leon_final.R
no_license
leonthoma/Repro
R
false
false
13,604
r
## Hallmann 2017 setwd("~/Documents/Uni/Umwi/M.sc./Repro") load("full_basic") library(R2jags) library(RCurl) # used for loading data from github library(ggplot2) library(dplyr) library(tidyr) library(purrr) # ---- Load ---- # Read the data url_1 <- getURL("https://raw.githubusercontent.com/leonthoma/Repro/master/Hallmann_s1_data.csv") url_2 <- getURL("https://raw.githubusercontent.com/leonthoma/Repro/master/Hallmann_s2_data.csv") data <- read.csv(text = url_1, header = TRUE, sep = ",", stringsAsFactors = T) model.frame <- read.csv(text = url_2, header = TRUE, sep = ",", stringsAsFactors = T) # summary(data) # summary(model.frame) # ---- Original basic model ---- jagsdataBasic <- list( m_bio = data$biomass, tau1 = with(model.frame, tapply(1:nrow(model.frame), potID, min)), tau2 = with(model.frame, tapply(1:nrow(model.frame), potID, max)), plot = as.numeric(model.frame$plot), loctype = as.numeric(data$location.type[match(data$potID,data$potID)]), daynr = as.numeric((model.frame$daynr-mean(data$mean.daynr)) / sd(data$mean.daynr)), daynr2 = as.numeric((model.frame$daynr-mean(data$mean.daynr)) / sd(data$mean.daynr))^2, year = model.frame$year - 1988, ndaily = nrow(model.frame), n = nrow(data), nrandom = max(as.numeric(model.frame$plot)) ) parametersBasic <- c("g_intcp", "log.lambda", "b", "c", "eps", "sdhat", "sd.re") oldjagsmodBasic <- jags(jagsdataBasic, inits = NULL, parametersBasic, "BasicModel.jag", n.iter = 12000, n.burnin = 2000, n.chains = 3, n.thin = 10) # Get mean parameter values from posterior parms_mean <- oldjagsmodBasic$BUGSoutput$mean # 2.5 % credible interval parms_mean_lo <- list("b" = as.numeric(oldjagsmodBasic$BUGSoutput$summary[1:3, 3]), "c" = as.numeric(oldjagsmodBasic$BUGSoutput$summary[4:7, 3]), "deviance" = oldjagsmodBasic$BUGSoutput$summary[8, 3], "eps" = as.numeric(oldjagsmodBasic$BUGSoutput$summary[9:71, 3]), "g_intcp" = oldjagsmodBasic$BUGSoutput$summary[72, 3], "log.lambda" = oldjagsmodBasic$BUGSoutput$summary[73, 3], "sd.re" = oldjagsmodBasic$BUGSoutput$summary[74, 3], "sdhat" = oldjagsmodBasic$BUGSoutput$summary[75, 3]) # 97.5 % credible interval parms_mean_hi <- list("b" = as.numeric(oldjagsmodBasic$BUGSoutput$summary[1:3, 7]), "c" = as.numeric(oldjagsmodBasic$BUGSoutput$summary[4:7, 7]), "deviance" = oldjagsmodBasic$BUGSoutput$summary[8, 7], "eps" = as.numeric(oldjagsmodBasic$BUGSoutput$summary[9:71, 7]), "g_intcp" = oldjagsmodBasic$BUGSoutput$summary[72, 7], "log.lambda" = oldjagsmodBasic$BUGSoutput$summary[73, 7], "sd.re" = oldjagsmodBasic$BUGSoutput$summary[74, 7], "sdhat" = oldjagsmodBasic$BUGSoutput$summary[75, 7]) # ---- Re-Analysis ---- # ---- Data wrangling ---- # Filter Biomass data (only first data set of every plot) py <- select(data, c(plot, year)) %>% group_by(plot) %>% distinct(plot, year) %>% arrange(plot, year) # overview plot by year new_plots <- group_by(py, plot) %>% slice(1) # get first sampling year of each plot # Create new dfs new_data <- semi_join(data, new_plots, by = c("plot", "year")) new_model.frame <- semi_join(model.frame, new_plots, by = c("plot", "year")) # ---- Basic Model ---- newjagsdataBasic <- list( m_bio = new_data$biomass, tau1 = with(new_model.frame, tapply(1:nrow(new_model.frame), potID, min)), tau2 = with(new_model.frame, tapply(1:nrow(new_model.frame), potID, max)), plot = as.numeric(new_model.frame$plot), loctype = as.numeric(new_data$location.type[match(new_model.frame$potID,new_data$potID)]), daynr = as.numeric((new_model.frame$daynr-mean(new_data$mean.daynr)) / sd(new_data$mean.daynr)), daynr2 = as.numeric((new_model.frame$daynr-mean(new_data$mean.daynr)) / sd(new_data$mean.daynr))^2, year = new_model.frame$year - 1988, ndaily = nrow(new_model.frame), n = nrow(new_data), nrandom = max(as.numeric(new_model.frame$plot)) ) # Jags model ## NEEDS TO RUN ONCE TO CREATE .JAGS FILE ## SET WD TO APROPRIATE DIRECTORY { # sink("BasicModel.jag") # cat("model{ # ## Likelihood function for the latent expected daily biomass # for (i in 1:n) { # m_bio[i] ~ dnorm(sum(z[tau1[i]:tau2[i]]), sig_sq[i]) # sig_sq[i] <- 1/Var[i] # Var[i] <- sum(vr[tau1[i]:tau2[i]]) # } # # ## Likelihood function for muHat, it's dependent function and variance # for (i in 1:ndaily) { # z[i] <- exp(y[i]) # y[i] <- g_intcp + log.lambda * year[i] + c[1] * daynr[i] + c[2] * daynr2[i] + # c[3] * daynr[i] * year[i] + c[4] * daynr2[i] * year[i] + b[loctype[i]] + # eps[plot[i]] # vr[i] <- exp(2 * y[i] + lvar) * (exp(lvar) - 1) # } # # ## Priors # g_intcp ~ dnorm(0, .01) # log.lambda ~ dnorm(0, .01) # b[1] <- 0 # for( i in 2:3) {b[i] ~ dnorm(0, .01)} # for( i in 1:4) {c[i] ~ dnorm(0, .01)} # sdhat ~ dunif(0, 5) # lvar <- pow(sdhat, 2) # for (i in 1:nrandom) { # eps[i] ~ dnorm(0, tau.re) # } # tau.re <- pow(sd.re, -2) # sd.re ~ dunif(0, 1) # } # ") # sink(NULL) } # Run the model jagsmodBasic <- jags(newjagsdataBasic, inits = NULL, parametersBasic, "BasicModel.jag", n.iter = 12000, n.burnin = 2000, n.chains = 3, n.thin = 10) jagsmodBasic # ---- Diagnostics ---- # ---- Calculate predicted values ---- # Get mean parameter values from posterior new_parms_mean <- jagsmodBasic$BUGSoutput$mean # 2.5 % credible interval new_parms_mean_lo <- list("b" = as.numeric(jagsmodBasic$BUGSoutput$summary[1:3, 3]), "c" = as.numeric(jagsmodBasic$BUGSoutput$summary[4:7, 3]), "deviance" = jagsmodBasic$BUGSoutput$summary[8, 3], "eps" = as.numeric(jagsmodBasic$BUGSoutput$summary[9:71, 3]), "g_intcp" = jagsmodBasic$BUGSoutput$summary[72, 3], "log.lambda" = jagsmodBasic$BUGSoutput$summary[73, 3], "sd.re" = jagsmodBasic$BUGSoutput$summary[74, 3], "sdhat" = jagsmodBasic$BUGSoutput$summary[75, 3]) # 97.5 % credible interval new_parms_mean_hi <- list("b" = as.numeric(jagsmodBasic$BUGSoutput$summary[1:3, 7]), "c" = as.numeric(jagsmodBasic$BUGSoutput$summary[4:7, 7]), "deviance" = jagsmodBasic$BUGSoutput$summary[8, 7], "eps" = as.numeric(jagsmodBasic$BUGSoutput$summary[9:71, 7]), "g_intcp" = jagsmodBasic$BUGSoutput$summary[72, 7], "log.lambda" = jagsmodBasic$BUGSoutput$summary[73, 7], "sd.re" = jagsmodBasic$BUGSoutput$summary[74, 7], "sdhat" = jagsmodBasic$BUGSoutput$summary[75, 7]) # Set tau & intervals of days original data t_1 <- as.vector(jagsdataBasic$tau1[1:nrow(data)]) t_2 <- as.vector(jagsdataBasic$tau2[1:nrow(data)]) ints <- map(1:nrow(data), ~ seq(t_1[.x], t_2[.x])) # Set year intervals original data t_1_y <- with(data, tapply(1:nrow(data), year, min)) t_2_y <- with(data, tapply(1:nrow(data), year, max)) ints_y <- map(1:length(t_1_y), ~ seq(t_1_y[.x], t_2_y[.x])) # Set tau & intervals of days new_t_1 <- as.vector(newjagsdataBasic$tau1[1:nrow(new_data)]) new_t_2 <- as.vector(newjagsdataBasic$tau2[1:nrow(new_data)]) new_ints <- map(1:nrow(new_data), ~ seq(new_t_1[.x], new_t_2[.x])) # Set year intervals new_t_1_y <- with(new_data, tapply(1:nrow(new_data), year, min)) new_t_2_y <- with(new_data, tapply(1:nrow(new_data), year, max)) new_ints_y <- map(1:length(new_t_1_y), ~ seq(new_t_1_y[.x], new_t_2_y[.x])) # Helper functions to calculate predicted biomass y <- function(x, model, cred) { data <- switch(model, new = newjagsdataBasic, old = jagsdataBasic) if (model == "new") { parms_mean <- switch(cred, base = new_parms_mean, low = new_parms_mean_lo, high = new_parms_mean_hi) } else {parms_mean <- switch(cred, base = parms_mean, low = parms_mean_lo, high = parms_mean_hi)} parms_mean$g_intcp + parms_mean$log.lambda * data$year[x] + parms_mean$c[1] * data$daynr[x] + parms_mean$c[2] * data$daynr2[x] + parms_mean$c[3] * data$daynr[x] * data$year[x] + parms_mean$c[4] * data$daynr2[x] * data$year[x] + parms_mean$b[data$loctype[x]] + parms_mean$eps[data$plot[x]] } z <- function(x, ...) exp(y(x, ...)) # Calculate predicted biomass # Old model z_val <- unlist(map(1:nrow(data), ~ mean(z(unlist(ints[.x]), "old", "base")))) z_val_y <- unlist(map(1:length(ints_y), ~ mean(z_val[unlist(ints_y[.x])]))) # 2.5 % credible interval z_val_lo <- unlist(map(1:nrow(data), ~ mean(z(unlist(ints[.x]), "old", "low")))) z_val_y_lo <- unlist(map(1:length(ints_y), ~ mean(z_val_lo[unlist(ints_y[.x])]))) # 97.5 % credible interval z_val_hi <- unlist(map(1:nrow(data), ~ mean(z(unlist(ints[.x]), "old", "high")))) z_val_y_hi <- unlist(map(1:length(ints_y), ~ mean(z_val_hi[unlist(ints_y[.x])]))) # New model new_z_val <- unlist(map(1:nrow(new_data), ~ mean(z(unlist(new_ints[.x]), "new", "base")))) new_z_val_y <- unlist(map(1:length(new_ints_y), ~ mean(new_z_val[unlist(new_ints_y[.x])]))) # 2.5 % credible interval new_z_val_lo <- unlist(map(1:nrow(new_data), ~ mean(z(unlist(new_ints[.x]), "new", "low")))) new_z_val_y_lo <- unlist(map(1:length(new_ints_y), ~ mean(new_z_val_lo[unlist(new_ints_y[.x])]))) # 97.5 % credible interval new_z_val_hi <- unlist(map(1:nrow(new_data), ~ mean(z(unlist(new_ints[.x]), "new", "high")))) new_z_val_y_hi <- unlist(map(1:length(new_ints_y), ~ mean(new_z_val_hi[unlist(new_ints_y[.x])]))) # Create initial dfs m_bio_df <- data.frame("year" = 1:26) # original new_m_bio_df <- data.frame("year" = 1:26) # new # Fill dfs; set missing years to previous val fill <- function(model, cred) { data <- c(rep(NA, 26)) if (model == "new") { z_val_y <- switch(cred, base = new_z_val_y, low = new_z_val_y_lo, high = new_z_val_y_hi) } else { z_val_y <- switch(cred, base = z_val_y, low = z_val_y_lo, high = z_val_y_hi) } # Fill vals of years with data present data[1:7] <- z_val_y[1:7] data[9] <- z_val_y[8] data[11:13] <- z_val_y[9:11] data[15:26] <- z_val_y[12:23] # Fill vals of years with data absent data[8] <- data[7] data[10] <- data[9] data[14] <- data[13] return(data) } # helper function new_m_bio_df$m_bio <- fill("new", "base") new_m_bio_df$m_bio_lo <- fill("new", "low") new_m_bio_df$m_bio_hi <- fill("new", "high") m_bio_df$m_bio <- fill("old", "base") m_bio_df$m_bio_lo <- fill("old", "low") m_bio_df$m_bio_hi <- fill("old", "high") # ---- Visualization ---- # Custom palette to match colors from paper my_pal <- c("#f46d43", "#fdae61", "#fee090", "#ffffbf", "#e0f3f8", "#abd9e9", "#74add1", "#4575b4") ## Recreate Fig. 2a tot_bm_data <- mutate(new_data, bm_p_day = biomass / (to.daynr - from.daynr)) %>% select(year, bm_p_day) ggplot(tot_bm_data, aes(x = factor(year, levels = seq(1989, 2014)), y = bm_p_day, fill = year)) + geom_boxplot(outlier.alpha = .4, outlier.shape = 1) + scale_fill_gradient2(low = "#4575b4", mid = "#fee090", high = "#f46d43", na.value = "grey50", midpoint = 2005, guide = "none") + geom_abline(intercept = parms_mean$g_intcp, slope = parms_mean$log.lambda) + geom_line(aes(y = m_bio, x = year), data = m_bio_df, color = "grey", inherit.aes = F) + geom_line(aes(y = m_bio_lo, x = year), data = m_bio_df, color = "grey", linetype = 2, inherit.aes = F) + geom_line(aes(y = m_bio_hi, x = year), data = m_bio_df, color = "grey", linetype = 2, inherit.aes = F) + geom_line(aes(y = m_bio, x = year), data = new_m_bio_df, color = "red", inherit.aes = F) + geom_line(aes(y = m_bio_lo, x = year), data = new_m_bio_df, color = "red", linetype = 2, inherit.aes = F) + geom_line(aes(y = m_bio_hi, x = year), data = new_m_bio_df, color = "red", linetype = 2, inherit.aes = F) + #geom_jitter(width = .1, alpha = .2) + # optional: datapoints scale_x_discrete(breaks = seq(1990, 2015, by = 5), drop = F) + scale_y_continuous(trans = "log", breaks = c(0.01, .02, .05, 0.1, 0.2, 0.5, 1, 2, 5, 10, 20, 50)) + theme_classic() + labs(y = "Biomass [g/d]", x = "Year") ## Recreate Fig. 2b # Season from 1st of April to 30th of October; i.e. Day 91 to 303 s_bm_data <- filter(data, from.daynr >= 91 & to.daynr <= 303) %>% mutate(bm_p_day = biomass / (to.daynr - from.daynr)) %>% select(year, bm_p_day, mean.daynr) ggplot(s_bm_data) + geom_point(aes(x = mean.daynr, y = bm_p_day, color = year)) + scale_color_gradient2(low = "#4575b4", mid = "#fee090", high = "#f46d43", na.value = "grey50", midpoint = 2005, guide = "none") + theme_classic() + labs(y = "Biomass [g/d]", x = "Day of year")
library(testthat) library(FarsKarol) library(dplyr) library(readr) library(tidyr) library(magrittr) library(graphics) library(maps) test_check("FarsKarol") test_that("Make filename test1", { expect_equal(make_filename(2013), "accident_2013.csv.bz2") }) test_that("Make filename test2", { expect_equal(make_filename(2014), "accident_2014.csv.bz2") })
/tests/testthat.R
permissive
Karol-sandoval/FarsKarol
R
false
false
381
r
library(testthat) library(FarsKarol) library(dplyr) library(readr) library(tidyr) library(magrittr) library(graphics) library(maps) test_check("FarsKarol") test_that("Make filename test1", { expect_equal(make_filename(2013), "accident_2013.csv.bz2") }) test_that("Make filename test2", { expect_equal(make_filename(2014), "accident_2014.csv.bz2") })
# Created on # Course work: # @author: # Source: val1 = c(7, 9.5, 11,4, 8.5) val2 = c(23, 6, 7.8, 4, 12) val3 = c(4, 4.5, 6, 7.3, 3) jpeg(file = "sample_multiple_lines.jpg") plot(val1,type = 'o', col = "red", xlab = "month", ylab = "rainfall", main = "rainfall chart") lines(val2,type = 'o', col = "blue") lines(val3,type = 'o', col = "green")
/chaaya/multiple_line_graph.r
no_license
tactlabs/r-samples
R
false
false
350
r
# Created on # Course work: # @author: # Source: val1 = c(7, 9.5, 11,4, 8.5) val2 = c(23, 6, 7.8, 4, 12) val3 = c(4, 4.5, 6, 7.3, 3) jpeg(file = "sample_multiple_lines.jpg") plot(val1,type = 'o', col = "red", xlab = "month", ylab = "rainfall", main = "rainfall chart") lines(val2,type = 'o', col = "blue") lines(val3,type = 'o', col = "green")
## Tests for split select weights library(rangerts) context("ranger_splitweights") ## Tests test_that("split select weights work", { expect_silent(ranger(Species ~ ., iris, num.trees = 5, split.select.weights = c(0.1, 0.2, 0.3, 0.4))) expect_error(ranger(Species ~ ., iris, num.trees = 5, split.select.weights = c(0.1, 0.2, 0.3))) }) test_that("Tree-wise split select weights work", { num.trees <- 5 weights <- replicate(num.trees, runif(ncol(iris)-1), simplify = FALSE) expect_silent(ranger(Species ~ ., iris, num.trees = num.trees, split.select.weights = weights)) weights <- replicate(num.trees+1, runif(ncol(iris)-1), simplify = FALSE) expect_error(ranger(Species ~ ., iris, num.trees = num.trees, split.select.weights = weights)) }) test_that("always split variables work", { expect_silent(ranger(Species ~ ., iris, num.trees = 10, always.split.variables = c("Petal.Length", "Petal.Width"), mtry = 2)) expect_silent(ranger(dependent.variable.name = "Species", data = iris, num.trees = 10, always.split.variables = c("Petal.Length", "Petal.Width"), mtry = 2)) })
/rangerts/tests/testthat/test_splitweights.R
no_license
BenjaminGoehry/BlocRF
R
false
false
1,136
r
## Tests for split select weights library(rangerts) context("ranger_splitweights") ## Tests test_that("split select weights work", { expect_silent(ranger(Species ~ ., iris, num.trees = 5, split.select.weights = c(0.1, 0.2, 0.3, 0.4))) expect_error(ranger(Species ~ ., iris, num.trees = 5, split.select.weights = c(0.1, 0.2, 0.3))) }) test_that("Tree-wise split select weights work", { num.trees <- 5 weights <- replicate(num.trees, runif(ncol(iris)-1), simplify = FALSE) expect_silent(ranger(Species ~ ., iris, num.trees = num.trees, split.select.weights = weights)) weights <- replicate(num.trees+1, runif(ncol(iris)-1), simplify = FALSE) expect_error(ranger(Species ~ ., iris, num.trees = num.trees, split.select.weights = weights)) }) test_that("always split variables work", { expect_silent(ranger(Species ~ ., iris, num.trees = 10, always.split.variables = c("Petal.Length", "Petal.Width"), mtry = 2)) expect_silent(ranger(dependent.variable.name = "Species", data = iris, num.trees = 10, always.split.variables = c("Petal.Length", "Petal.Width"), mtry = 2)) })
#' Generate Operating Characteristics to Find the Optimal Biological Dose #' #' This function generates operating characteristics to find the optimal biological dose (OBD). #' #' @param toxicity.low The upper boundary for the low toxicity interval. #' @param toxicity.moderate The upper boundary for the moderate toxicity interval. #' @param toxicity.high The upper boundary for the high toxicity interval. #' @param efficacy.low The upper boundary for the low efficacy interval. #' @param efficacy.moderate The upper boundary for the moderate efficacy interval. #' @param efficacy.high The upper boundary for the high efficacy interval. #' @param target.toxicity The target DLT rate. #' @param target.efficacy The target efficacy rate. #' @param ncohort The total number of cohorts. #' @param cohortsize The number of patients in the cohort. #' @param n.early The early stopping parameter. If the number of patients treated at #' the current dose reaches \code{n.early}, then we stop the trial #' and select the optimal biological dose (OBD) based on the observed data. The default value is 100. #' @param startdose The starting dose level. #' @param p.true A vector containing the true toxicity probabilities of the #' investigational dose levels. #' @param q.true A vector containing the true efficacy probabilities of the #' investigational dose levels. #' @param ntrial The total number of trials to be simulated. #' @param seed The random seed for simulation. #' @param p1 The cutoff lower limit for safety utility function 1, described in the Details section. #' @param p2 The cutoff upper limit for safety utility function 1, described in the Details section. #' @param q1 The cutoff lower limit for efficacy utility function 1, described in the Details section. #' @param q2 The cutoff upper limit for efficacy utility function 1, described in the Details section. #' @param cutoff.eli.toxicity The cutoff value to eliminate a dose with unacceptable high toxicity for safety. The default value is 0.95. #' @param cutoff.eli.efficacy The cutoff value for the futility rule, the acceptable lowest efficacy. The default value is 0.30. #' @param w1.toxicity The weight for toxicity utility function 2 and 3,described in the Details section. #' @param w2.toxicity The weight for toxicity utility function 3, described in the Details section. #' @param indicator The indicator cutoff value for utility function 3, described in the Details section. #' #' @details Large trials are simulated to characterize the operating characteristics of the Keyboard design under the prespecified true toxicity probabilities and true efficacy probabilities of the investigational doses. The dose assignment follows the rules described in the function \code{get.decision.obd.kb()}. #' #' The following stopping rules are built in the Keyboard design: #' \enumerate{ #' \item Stop the trial if the lowest dose is eliminated from the trial due to high unacceptable toxicity. #' \item Stop the trial if the number of patients treated at the current dose exceeds \code{n.earlystop}. #' } #' #' #' #' #' @return \code{get.oc.obd.kb()} returns the operating characteristics of the Keyboard design as a list, including:\cr #' \enumerate{ #' \item the selection percentage at each dose level using utility function 1 (\code{$selpercent1}), \cr #' \item the selection percentage at each dose level using utility function 2 (\code{$selpercent2}), \cr #' \item the selection percentage at each dose level using utility function 3 (\code{$selpercent3}), \cr #' \item the number of patients treated at each dose level (\code{$npatients}), \cr #' \item the number of dose-limiting toxicities (DLTs) observed at each dose level (\code{$ntox}), \cr #' \item the number of responses observed at each dose level (\code{$neff}), \cr #' \item the average number of DLTs (\code{$totaltox}), \cr #' \item the average number of responses (\code{$totaleff}), \cr #' \item the average number of patients (\code{$totaln}), \cr #' \item the percentage of early stopping without selecting the OBD using utility function 1 (\code{$percentstop1}), \cr #' \item the percentage of early stopping without selecting the OBD using utility function 2 (\code{$percentstop2}), \cr #' \item the percentage of early stopping without selecting the OBD using utility function 3 (\code{$percentstop3}), \cr #' \item data.frame (\code{$simu.setup}) containing simulation parameters, such as target, p.true, etc. #' #' #' } #' #' @author Hongying Sun, Li Tang, and Haitao Pan #' @examples #' \donttest{ #' toxicity.low <- 0.15 #' toxicity.moderate <- 0.25 #' toxicity.high <- 0.35 #' efficacy.low <- 0.25 #' efficacy.moderate <- 0.45 #' efficacy.high <- 0.65 #' target.toxicity <- 0.30 #' target.efficacy <- 0.40 #' p.true <-c(0.08,0.30,0.60,0.80) #' q.true <- c(0.25,0.40,0.25,0.50) #' oc.obd.kb <- get.oc.obd.kb(toxicity.low = toxicity.low, #' toxicity.moderate= toxicity.moderate, #' toxicity.high = toxicity.high, #' efficacy.low = efficacy.low, #' efficacy.moderate = efficacy.moderate, #' efficacy.high = efficacy.high, #' target.toxicity=target.toxicity, #' target.efficacy= target.efficacy, #' p.true= p.true, q.true= q.true) #' oc.obd.kb #' summary.kb(oc.obd.kb) #' plot.kb(oc.obd.kb) #' plot.kb(oc.obd.kb$selpercent1) #' plot.kb(oc.obd.kb$selpercent2) #' plot.kb(oc.obd.kb$selpercent3) #' plot.kb(oc.obd.kb$npatients) #' plot.kb(oc.obd.kb$ntox) #' plot.kb(oc.obd.kb$neff) #' } #' @family single-agent phase I/II functions #' #' @references #' Li DH, Whitmore JB, Guo W, Ji Y. Toxicity and efficacy probability interval design for phase I adoptive cell therapy dose-finding clinical trials. #' \emph{Clinical Cancer Research}. 2017; 23:13-20. #'https://clincancerres.aacrjournals.org/content/23/1/13.long #' #' #' Liu S, Johnson VE. A robust Bayesian dose-finding design for phase I/II clinical trials. \emph{Biostatistics}. 2016; 17(2):249-63. #' https://academic.oup.com/biostatistics/article/17/2/249/1744018 #' #' Zhou Y, Lee JJ, Yuan Y. A utility-based Bayesian optimal interval (U-BOIN) phase I/II design to identify the optimal biological dose for targeted and immune therapies. \emph{Statistics in Medicine}. 2019; 38:S5299-5316. #' https://onlinelibrary.wiley.com/doi/epdf/10.1002/sim.8361 #' @export get.oc.obd.kb <- function( toxicity.low, toxicity.moderate,toxicity.high, efficacy.low, efficacy.moderate, efficacy.high,target.toxicity, target.efficacy,ncohort=10, cohortsize=3, n.early=100, startdose=1, p.true, q.true, ntrial = 1000, seed = 6, p1=0.15, p2=0.40, q1=0.3, q2=0.6,cutoff.eli.toxicity= 0.95, cutoff.eli.efficacy=0.3, w1.toxicity =0.33, w2.toxicity=1.09, indicator = target.toxicity){ set.seed(seed) ndose = length(p.true); npts = ncohort * cohortsize; #toxicity outcome matrix Y=matrix(rep(0, ndose*ntrial), ncol=ndose); #efficacy outcome matrix E=matrix(rep(0, ndose*ntrial), ncol=ndose); # matrix to store the total number of patients N=matrix(rep(0, ndose*ntrial), ncol=ndose); # matrix to store selected dose level # dselect = rep(0, ntrial) dselect1 = rep(0, ntrial) dselect2 = rep(0, ntrial) dselect3 = rep(0, ntrial) dearlystop = rep(0, ntrial) # get decision table decision.matrix.output <- get.decision.obd.kb(toxicity.low = toxicity.low, toxicity.moderate= toxicity.moderate, toxicity.high = toxicity.high, efficacy.low = efficacy.low, efficacy.moderate = efficacy.moderate, efficacy.high = efficacy.high, target.toxicity=target.toxicity, target.efficacy=target.efficacy, cohortsize=cohortsize, ncohort=ncohort)$decision.matrix # this function outputs the decision given the dose-finding table, the number of total patients, the number of patients who experienced toxicity and the number of responses decision.finding <- function(out.matrix, n, t, r){ rowindex <- which(out.matrix$N==n & out.matrix$T==t & out.matrix$R == r) decision <- out.matrix$Decision[rowindex] decision <- as.character(decision) return (decision) } # simulation trials for (trial in 1:ntrial){ # the number of patients who experienced toxity at each level y <- rep(0, ndose); # the number of patients who experienced reponse at each level e <- rep(0, ndose); # the number of total patients treated at each level n <- rep(0, ndose); # an indicator to check whehter the trial terminates early d=startdose; earlystop = 0; # an indicator to check whehter the doses are eliminated elimi = rep(0, ndose); for (i in 1:ncohort){ y[d] = y[d] + sum(runif(cohortsize) < p.true[d]); e[d] = e[d] + sum(runif(cohortsize) < q.true[d]); n[d] = n[d] + cohortsize; if(n[d] >= n.early) break; #get dose decision matrix decision.result = decision.finding(decision.matrix.output, n[d], y[d], e[d]) # early stop rules if (n[d] >=3 ){ if (1- pbeta(target.toxicity, y[d]+1, n[d]-y[d]+1) > cutoff.eli.toxicity){ elimi[d:ndose]=1; if( d==1) { earlystop=1; break;} } } if (n[d] >=3 ){ if (1- pbeta(target.efficacy, e[d]+1, n[d]-e[d]+1) < cutoff.eli.efficacy){ elimi[d]=1; } } if(!is.null(decision.result)){ # dose transition if(decision.result == "E" && d != ndose) { if(elimi[d+1]==0) {d=d+1;} } else if(decision.result == "D" && d != 1) { if(elimi[d-1]==0) {d=d-1;} } else if (decision.result == "S") { d = d ; } else if (decision.result == "DUT" && d != 1){ if(elimi[d-1]==0) {d=d-1;} elimi[d:ndose]=1; } else if (decision.result=="DUE" && d != 1){ if(elimi[d-1]==0) {d=d-1;} elimi[d]=1; } else if (decision.result == "EUE" && d != ndose){ if(elimi[d+1]==0) {d=d+1;} elimi[d]=1; } else { d=d; } } } Y[trial,] = y; E[trial,] = e; N[trial,] = n; if(earlystop ==1){ dselect1[trial]=99; dselect2[trial]=99; dselect3[trial]=99; dearlystop[trial]=199; } else{ dselect1[trial]= select.obd.kb( target.toxicity =target.toxicity, target.efficacy=target.efficacy, npts = n, ntox = y, neff = e)$obd1 dselect2[trial]= select.obd.kb( target.toxicity =target.toxicity, target.efficacy=target.efficacy, npts = n, ntox = y, neff = e)$obd2 dselect3[trial]= select.obd.kb( target.toxicity =target.toxicity, target.efficacy=target.efficacy, npts = n, ntox = y, neff = e)$obd3 } # use select.obd function. } # output results selpercent1 = rep(0, ndose); selpercent2 = rep(0, ndose); selpercent3 = rep(0, ndose); nptsdose = apply(N,2, mean); ntoxdose = apply(Y, 2, mean); neffdose = apply(E,2, mean); for (i in 1:ndose){ selpercent1[i] = sum(dselect1==i)/ntrial*100; selpercent2[i] = sum(dselect2==i)/ntrial*100; selpercent3[i] = sum(dselect3==i)/ntrial*100; } out=list(name = "get.oc.obd.kb", ## to identify object for summary.kb() function. selpercent1=selpercent1, selpercent2=selpercent2,selpercent3=selpercent3,npatients=nptsdose, ntox=ntoxdose, neff=neffdose, totaltox=sum(Y)/ntrial,totaleff=sum(E)/ntrial, totaln=sum(N)/ntrial, earlystop = sum(dearlystop==199)/ntrial*100,percentstop1=sum(dselect1== 99)/ntrial*100,percentstop2=sum(dselect2== 99)/ntrial*100,percentstop3=sum(dselect3== 99)/ntrial*100, simu.setup=data.frame(target.toxicity=target.toxicity, target.efficacy=target.efficacy,p.true=p.true, q.true =q.true, ncohort=ncohort, cohortsize = cohortsize, startdose = startdose, ntrial = ntrial, dose=1:ndose) ); return (out) }
/R/get.oc.obd.kb.R
no_license
hongyingsun1101/KEYBOARDR
R
false
false
12,623
r
#' Generate Operating Characteristics to Find the Optimal Biological Dose #' #' This function generates operating characteristics to find the optimal biological dose (OBD). #' #' @param toxicity.low The upper boundary for the low toxicity interval. #' @param toxicity.moderate The upper boundary for the moderate toxicity interval. #' @param toxicity.high The upper boundary for the high toxicity interval. #' @param efficacy.low The upper boundary for the low efficacy interval. #' @param efficacy.moderate The upper boundary for the moderate efficacy interval. #' @param efficacy.high The upper boundary for the high efficacy interval. #' @param target.toxicity The target DLT rate. #' @param target.efficacy The target efficacy rate. #' @param ncohort The total number of cohorts. #' @param cohortsize The number of patients in the cohort. #' @param n.early The early stopping parameter. If the number of patients treated at #' the current dose reaches \code{n.early}, then we stop the trial #' and select the optimal biological dose (OBD) based on the observed data. The default value is 100. #' @param startdose The starting dose level. #' @param p.true A vector containing the true toxicity probabilities of the #' investigational dose levels. #' @param q.true A vector containing the true efficacy probabilities of the #' investigational dose levels. #' @param ntrial The total number of trials to be simulated. #' @param seed The random seed for simulation. #' @param p1 The cutoff lower limit for safety utility function 1, described in the Details section. #' @param p2 The cutoff upper limit for safety utility function 1, described in the Details section. #' @param q1 The cutoff lower limit for efficacy utility function 1, described in the Details section. #' @param q2 The cutoff upper limit for efficacy utility function 1, described in the Details section. #' @param cutoff.eli.toxicity The cutoff value to eliminate a dose with unacceptable high toxicity for safety. The default value is 0.95. #' @param cutoff.eli.efficacy The cutoff value for the futility rule, the acceptable lowest efficacy. The default value is 0.30. #' @param w1.toxicity The weight for toxicity utility function 2 and 3,described in the Details section. #' @param w2.toxicity The weight for toxicity utility function 3, described in the Details section. #' @param indicator The indicator cutoff value for utility function 3, described in the Details section. #' #' @details Large trials are simulated to characterize the operating characteristics of the Keyboard design under the prespecified true toxicity probabilities and true efficacy probabilities of the investigational doses. The dose assignment follows the rules described in the function \code{get.decision.obd.kb()}. #' #' The following stopping rules are built in the Keyboard design: #' \enumerate{ #' \item Stop the trial if the lowest dose is eliminated from the trial due to high unacceptable toxicity. #' \item Stop the trial if the number of patients treated at the current dose exceeds \code{n.earlystop}. #' } #' #' #' #' #' @return \code{get.oc.obd.kb()} returns the operating characteristics of the Keyboard design as a list, including:\cr #' \enumerate{ #' \item the selection percentage at each dose level using utility function 1 (\code{$selpercent1}), \cr #' \item the selection percentage at each dose level using utility function 2 (\code{$selpercent2}), \cr #' \item the selection percentage at each dose level using utility function 3 (\code{$selpercent3}), \cr #' \item the number of patients treated at each dose level (\code{$npatients}), \cr #' \item the number of dose-limiting toxicities (DLTs) observed at each dose level (\code{$ntox}), \cr #' \item the number of responses observed at each dose level (\code{$neff}), \cr #' \item the average number of DLTs (\code{$totaltox}), \cr #' \item the average number of responses (\code{$totaleff}), \cr #' \item the average number of patients (\code{$totaln}), \cr #' \item the percentage of early stopping without selecting the OBD using utility function 1 (\code{$percentstop1}), \cr #' \item the percentage of early stopping without selecting the OBD using utility function 2 (\code{$percentstop2}), \cr #' \item the percentage of early stopping without selecting the OBD using utility function 3 (\code{$percentstop3}), \cr #' \item data.frame (\code{$simu.setup}) containing simulation parameters, such as target, p.true, etc. #' #' #' } #' #' @author Hongying Sun, Li Tang, and Haitao Pan #' @examples #' \donttest{ #' toxicity.low <- 0.15 #' toxicity.moderate <- 0.25 #' toxicity.high <- 0.35 #' efficacy.low <- 0.25 #' efficacy.moderate <- 0.45 #' efficacy.high <- 0.65 #' target.toxicity <- 0.30 #' target.efficacy <- 0.40 #' p.true <-c(0.08,0.30,0.60,0.80) #' q.true <- c(0.25,0.40,0.25,0.50) #' oc.obd.kb <- get.oc.obd.kb(toxicity.low = toxicity.low, #' toxicity.moderate= toxicity.moderate, #' toxicity.high = toxicity.high, #' efficacy.low = efficacy.low, #' efficacy.moderate = efficacy.moderate, #' efficacy.high = efficacy.high, #' target.toxicity=target.toxicity, #' target.efficacy= target.efficacy, #' p.true= p.true, q.true= q.true) #' oc.obd.kb #' summary.kb(oc.obd.kb) #' plot.kb(oc.obd.kb) #' plot.kb(oc.obd.kb$selpercent1) #' plot.kb(oc.obd.kb$selpercent2) #' plot.kb(oc.obd.kb$selpercent3) #' plot.kb(oc.obd.kb$npatients) #' plot.kb(oc.obd.kb$ntox) #' plot.kb(oc.obd.kb$neff) #' } #' @family single-agent phase I/II functions #' #' @references #' Li DH, Whitmore JB, Guo W, Ji Y. Toxicity and efficacy probability interval design for phase I adoptive cell therapy dose-finding clinical trials. #' \emph{Clinical Cancer Research}. 2017; 23:13-20. #'https://clincancerres.aacrjournals.org/content/23/1/13.long #' #' #' Liu S, Johnson VE. A robust Bayesian dose-finding design for phase I/II clinical trials. \emph{Biostatistics}. 2016; 17(2):249-63. #' https://academic.oup.com/biostatistics/article/17/2/249/1744018 #' #' Zhou Y, Lee JJ, Yuan Y. A utility-based Bayesian optimal interval (U-BOIN) phase I/II design to identify the optimal biological dose for targeted and immune therapies. \emph{Statistics in Medicine}. 2019; 38:S5299-5316. #' https://onlinelibrary.wiley.com/doi/epdf/10.1002/sim.8361 #' @export get.oc.obd.kb <- function( toxicity.low, toxicity.moderate,toxicity.high, efficacy.low, efficacy.moderate, efficacy.high,target.toxicity, target.efficacy,ncohort=10, cohortsize=3, n.early=100, startdose=1, p.true, q.true, ntrial = 1000, seed = 6, p1=0.15, p2=0.40, q1=0.3, q2=0.6,cutoff.eli.toxicity= 0.95, cutoff.eli.efficacy=0.3, w1.toxicity =0.33, w2.toxicity=1.09, indicator = target.toxicity){ set.seed(seed) ndose = length(p.true); npts = ncohort * cohortsize; #toxicity outcome matrix Y=matrix(rep(0, ndose*ntrial), ncol=ndose); #efficacy outcome matrix E=matrix(rep(0, ndose*ntrial), ncol=ndose); # matrix to store the total number of patients N=matrix(rep(0, ndose*ntrial), ncol=ndose); # matrix to store selected dose level # dselect = rep(0, ntrial) dselect1 = rep(0, ntrial) dselect2 = rep(0, ntrial) dselect3 = rep(0, ntrial) dearlystop = rep(0, ntrial) # get decision table decision.matrix.output <- get.decision.obd.kb(toxicity.low = toxicity.low, toxicity.moderate= toxicity.moderate, toxicity.high = toxicity.high, efficacy.low = efficacy.low, efficacy.moderate = efficacy.moderate, efficacy.high = efficacy.high, target.toxicity=target.toxicity, target.efficacy=target.efficacy, cohortsize=cohortsize, ncohort=ncohort)$decision.matrix # this function outputs the decision given the dose-finding table, the number of total patients, the number of patients who experienced toxicity and the number of responses decision.finding <- function(out.matrix, n, t, r){ rowindex <- which(out.matrix$N==n & out.matrix$T==t & out.matrix$R == r) decision <- out.matrix$Decision[rowindex] decision <- as.character(decision) return (decision) } # simulation trials for (trial in 1:ntrial){ # the number of patients who experienced toxity at each level y <- rep(0, ndose); # the number of patients who experienced reponse at each level e <- rep(0, ndose); # the number of total patients treated at each level n <- rep(0, ndose); # an indicator to check whehter the trial terminates early d=startdose; earlystop = 0; # an indicator to check whehter the doses are eliminated elimi = rep(0, ndose); for (i in 1:ncohort){ y[d] = y[d] + sum(runif(cohortsize) < p.true[d]); e[d] = e[d] + sum(runif(cohortsize) < q.true[d]); n[d] = n[d] + cohortsize; if(n[d] >= n.early) break; #get dose decision matrix decision.result = decision.finding(decision.matrix.output, n[d], y[d], e[d]) # early stop rules if (n[d] >=3 ){ if (1- pbeta(target.toxicity, y[d]+1, n[d]-y[d]+1) > cutoff.eli.toxicity){ elimi[d:ndose]=1; if( d==1) { earlystop=1; break;} } } if (n[d] >=3 ){ if (1- pbeta(target.efficacy, e[d]+1, n[d]-e[d]+1) < cutoff.eli.efficacy){ elimi[d]=1; } } if(!is.null(decision.result)){ # dose transition if(decision.result == "E" && d != ndose) { if(elimi[d+1]==0) {d=d+1;} } else if(decision.result == "D" && d != 1) { if(elimi[d-1]==0) {d=d-1;} } else if (decision.result == "S") { d = d ; } else if (decision.result == "DUT" && d != 1){ if(elimi[d-1]==0) {d=d-1;} elimi[d:ndose]=1; } else if (decision.result=="DUE" && d != 1){ if(elimi[d-1]==0) {d=d-1;} elimi[d]=1; } else if (decision.result == "EUE" && d != ndose){ if(elimi[d+1]==0) {d=d+1;} elimi[d]=1; } else { d=d; } } } Y[trial,] = y; E[trial,] = e; N[trial,] = n; if(earlystop ==1){ dselect1[trial]=99; dselect2[trial]=99; dselect3[trial]=99; dearlystop[trial]=199; } else{ dselect1[trial]= select.obd.kb( target.toxicity =target.toxicity, target.efficacy=target.efficacy, npts = n, ntox = y, neff = e)$obd1 dselect2[trial]= select.obd.kb( target.toxicity =target.toxicity, target.efficacy=target.efficacy, npts = n, ntox = y, neff = e)$obd2 dselect3[trial]= select.obd.kb( target.toxicity =target.toxicity, target.efficacy=target.efficacy, npts = n, ntox = y, neff = e)$obd3 } # use select.obd function. } # output results selpercent1 = rep(0, ndose); selpercent2 = rep(0, ndose); selpercent3 = rep(0, ndose); nptsdose = apply(N,2, mean); ntoxdose = apply(Y, 2, mean); neffdose = apply(E,2, mean); for (i in 1:ndose){ selpercent1[i] = sum(dselect1==i)/ntrial*100; selpercent2[i] = sum(dselect2==i)/ntrial*100; selpercent3[i] = sum(dselect3==i)/ntrial*100; } out=list(name = "get.oc.obd.kb", ## to identify object for summary.kb() function. selpercent1=selpercent1, selpercent2=selpercent2,selpercent3=selpercent3,npatients=nptsdose, ntox=ntoxdose, neff=neffdose, totaltox=sum(Y)/ntrial,totaleff=sum(E)/ntrial, totaln=sum(N)/ntrial, earlystop = sum(dearlystop==199)/ntrial*100,percentstop1=sum(dselect1== 99)/ntrial*100,percentstop2=sum(dselect2== 99)/ntrial*100,percentstop3=sum(dselect3== 99)/ntrial*100, simu.setup=data.frame(target.toxicity=target.toxicity, target.efficacy=target.efficacy,p.true=p.true, q.true =q.true, ncohort=ncohort, cohortsize = cohortsize, startdose = startdose, ntrial = ntrial, dose=1:ndose) ); return (out) }
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/flashmatrix.R \name{fm.basic.op} \alias{fm.basic.op} \alias{fm.get.basic.op} \alias{fm.get.basic.uop} \alias{fm.init.basic.op} \title{The basic operators supported by FlashMatrix.} \usage{ fm.get.basic.op(name) fm.get.basic.uop(name) fm.init.basic.op() } \arguments{ \item{name}{the name of the basic operator.} } \value{ a reference to the specified basic operator. } \description{ The basic operators are mainly used by the FlashMatrix functions that accept operators as arguments. Such a function includes `fm.mapply', `fm.inner.prod', etc. } \details{ `fm.get.basic.op' gets the predefined basic binary operator specified by a user. The supported basic binary operators are: \itemize{ \item{"add" or "+"}{compute addition.} \item{"sub" or "-"}{compute subtraction;} \item{"mul" or "*"}{compute multiplication;} \item{"div" or "/"}{compute division;} \item{"min" and "max"}{compute minimum and maximum, respectively;} \item{"pow"}{compute exponential;} \item{"eq" or "=="}{compute equality;} \item{"gt" or ">"}{compute greater than;} \item{"ge" or ">="}{compute greater than or equal to;} \item{"lt" or "<"}{compute less than;} \item{"le" or "<="}{compute less than or equal to;} } `fm.get.basic.uop' gets the predefined basic unary operator specified by a user. The supported basic unary operators are: \itemize{ \item{"neg"}{compute negate;} \item{"sqrt"}{compute square root;} \item{"abs"}{compute absolute value;} \item{"not"}{compute logical NOT;} \item{"ceil" and "floor"}{compute a ceiling and a floor, respectively;} \item{"log", "log2" and "log10"}{compute log with different bases;} \item{"round"}{round a number;} \item{"as.int" and "as.numeric"}{cast a number to an integer and a numeric value, respectively.} } `fm.init.basic.op' initializes the following basic operators. \itemize{ \item{`fm.bo.add'}{the predifined basic binary operator for addition.} \item{`fm.bo.sub'}{the predifined basic binary operator for subtraction.} \item{`fm.bo.mul'}{the predifined basic binary operator for multiplication.} \item{`fm.bo.div'}{the predifined basic binary operator for division.} \item{`fm.bo.min'}{the predifined basic binary operator for computing minimum.} \item{`fm.bo.max'}{the predifined basic binary operator for computing maximum.} \item{`fm.bo.pow'}{the predifined basic binary operator for computing exponential.} \item{`fm.bo.eq', `fm.bo.gt', `fm.bo.ge', `fm.bo.lt' and `fm.bo.le'} {the predefined basic logical operators to compare two elements: ==, >, >=, <, <=.} \item{`fm.buo.neg'}{the predefined basic unary operator for negate.} \item{`fm.buo.sqrt'}{the predefined basic unary operator for square root.} \item{`fm.buo.abs'}{the predefined basic unary operator for absolute value.} \item{`fm.buo.not'}{the predefined logical NOT operator.} \item{`fm.buo.ceil'}{the predefined basic unary operator of computing a ceiling of a number.} \item{`fm.buo.floor'}{the predefined basic unary operator of computing a floor of a number.} \item{`fm.buo.log', `fm.buo.log2' and `fm.buo.log10'}{the predefined basic unary operators of computing log with different bases.} \item{`fm.buo.round'}{the predefined basic unary operator of rounding a value.} \item{`fm.buo.as.int'}{the predefined basic unary operator of casting a numeric value to an integer.} \item{`fm.buo.as.numeric'}{the predefined basic unary operator of casting an integer to a numeric value.} } } \author{ Da Zheng <dzheng5@jhu.edu> }
/Rpkg/man/fm.basic.op.Rd
permissive
zheng-da/FlashX
R
false
false
3,507
rd
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/flashmatrix.R \name{fm.basic.op} \alias{fm.basic.op} \alias{fm.get.basic.op} \alias{fm.get.basic.uop} \alias{fm.init.basic.op} \title{The basic operators supported by FlashMatrix.} \usage{ fm.get.basic.op(name) fm.get.basic.uop(name) fm.init.basic.op() } \arguments{ \item{name}{the name of the basic operator.} } \value{ a reference to the specified basic operator. } \description{ The basic operators are mainly used by the FlashMatrix functions that accept operators as arguments. Such a function includes `fm.mapply', `fm.inner.prod', etc. } \details{ `fm.get.basic.op' gets the predefined basic binary operator specified by a user. The supported basic binary operators are: \itemize{ \item{"add" or "+"}{compute addition.} \item{"sub" or "-"}{compute subtraction;} \item{"mul" or "*"}{compute multiplication;} \item{"div" or "/"}{compute division;} \item{"min" and "max"}{compute minimum and maximum, respectively;} \item{"pow"}{compute exponential;} \item{"eq" or "=="}{compute equality;} \item{"gt" or ">"}{compute greater than;} \item{"ge" or ">="}{compute greater than or equal to;} \item{"lt" or "<"}{compute less than;} \item{"le" or "<="}{compute less than or equal to;} } `fm.get.basic.uop' gets the predefined basic unary operator specified by a user. The supported basic unary operators are: \itemize{ \item{"neg"}{compute negate;} \item{"sqrt"}{compute square root;} \item{"abs"}{compute absolute value;} \item{"not"}{compute logical NOT;} \item{"ceil" and "floor"}{compute a ceiling and a floor, respectively;} \item{"log", "log2" and "log10"}{compute log with different bases;} \item{"round"}{round a number;} \item{"as.int" and "as.numeric"}{cast a number to an integer and a numeric value, respectively.} } `fm.init.basic.op' initializes the following basic operators. \itemize{ \item{`fm.bo.add'}{the predifined basic binary operator for addition.} \item{`fm.bo.sub'}{the predifined basic binary operator for subtraction.} \item{`fm.bo.mul'}{the predifined basic binary operator for multiplication.} \item{`fm.bo.div'}{the predifined basic binary operator for division.} \item{`fm.bo.min'}{the predifined basic binary operator for computing minimum.} \item{`fm.bo.max'}{the predifined basic binary operator for computing maximum.} \item{`fm.bo.pow'}{the predifined basic binary operator for computing exponential.} \item{`fm.bo.eq', `fm.bo.gt', `fm.bo.ge', `fm.bo.lt' and `fm.bo.le'} {the predefined basic logical operators to compare two elements: ==, >, >=, <, <=.} \item{`fm.buo.neg'}{the predefined basic unary operator for negate.} \item{`fm.buo.sqrt'}{the predefined basic unary operator for square root.} \item{`fm.buo.abs'}{the predefined basic unary operator for absolute value.} \item{`fm.buo.not'}{the predefined logical NOT operator.} \item{`fm.buo.ceil'}{the predefined basic unary operator of computing a ceiling of a number.} \item{`fm.buo.floor'}{the predefined basic unary operator of computing a floor of a number.} \item{`fm.buo.log', `fm.buo.log2' and `fm.buo.log10'}{the predefined basic unary operators of computing log with different bases.} \item{`fm.buo.round'}{the predefined basic unary operator of rounding a value.} \item{`fm.buo.as.int'}{the predefined basic unary operator of casting a numeric value to an integer.} \item{`fm.buo.as.numeric'}{the predefined basic unary operator of casting an integer to a numeric value.} } } \author{ Da Zheng <dzheng5@jhu.edu> }
inputPath <- commandArgs()[5]; outputPath <- commandArgs()[6]; tmp <- scan(inputPath, sep="\t") num <- length(tmp) / 4 input <- matrix(tmp, 4, num) rate <- input[4,] rate <- rate / mean(rate, na.rm=TRUE) na_idx <- ( is.na(rate) ) rate[na_idx] <- 0 bins=c(1,2) pdf(file=outputPath, height=480/72, width=480/72) plot(rate, type = "l", xlim=c(0,100), ylim=c(0,3), xlab="GC%", ylab="", axes=F) par(new=T) axis(1, at = c(0,25,50,75,100), labels = c(0,25,50,75,100), las = 0, lwd.ticks=1) axis(2, at = c(0,1,2,3), labels = c(0,1,2,3), las = 0, lwd.ticks=1) mtext("Normalized coverage", side=2, line=3) segments(50,-100,50,3, col=rgb(0.7,0.7,0.7), lwd=1) segments(-10,bins,120,bins, col=rgb(0.7,0.7,0.7), lwd=1) dev.off()
/toil_cnacs/data/cnacs/subscript_target/plotGC_wga.R
permissive
papaemmelab/toil_cnacs
R
false
false
719
r
inputPath <- commandArgs()[5]; outputPath <- commandArgs()[6]; tmp <- scan(inputPath, sep="\t") num <- length(tmp) / 4 input <- matrix(tmp, 4, num) rate <- input[4,] rate <- rate / mean(rate, na.rm=TRUE) na_idx <- ( is.na(rate) ) rate[na_idx] <- 0 bins=c(1,2) pdf(file=outputPath, height=480/72, width=480/72) plot(rate, type = "l", xlim=c(0,100), ylim=c(0,3), xlab="GC%", ylab="", axes=F) par(new=T) axis(1, at = c(0,25,50,75,100), labels = c(0,25,50,75,100), las = 0, lwd.ticks=1) axis(2, at = c(0,1,2,3), labels = c(0,1,2,3), las = 0, lwd.ticks=1) mtext("Normalized coverage", side=2, line=3) segments(50,-100,50,3, col=rgb(0.7,0.7,0.7), lwd=1) segments(-10,bins,120,bins, col=rgb(0.7,0.7,0.7), lwd=1) dev.off()
# In R, a matrix is a vector that has two additional attributes: # * Number of rows # * Number of columns # # As with vectors, every element of a matrix must be of the same _mode_; # either purely numeric, or purely text, etc. # # Creating a matrix from a vector # =============== # # One way to create a matrix is to begin with a specific vector that holds the values. # When we specify the number of rows and columns in the desired matrix, # R can create a matrix structure to hold them. m <- matrix( c(1,2,3,4), nrow=2, ncol=2) m attributes(m) dim(m) class(m) # Note that the vector is broken into `ncol` number of _columns_, # each of size `nrow`. The values from the vector `c(1,2,3,4)` are put in a columns. # This is called the _column-major order_. # # We can instead force a _row-major order_ by setting the `byrow` parameter to `TRUE'. matrix( c(1,2,3,4), nrow=2, ncol=2, byrow=TRUE) # If we specify only `nrow` or only `ncol`, and the unspecified one will be determined # using the length of the vector. matrix( 1:6, nrow=2 ) matrix( 1:6, ncol=3 ) # If the specified matrix sizes are not compatible with the vector's length, # the vector is _recycled_ until it fills the matrix. matrix( 1:5, nrow=2, ncol=3) # The same recycling is done also when one of the shape parameters is omitted. matrix( 1:5, nrow=2 ) # Accessing matrix elements # ====== # Once we have data stored in a matrix, we may want to access its elements, rows, or columns. # Accessing individual elements # ---------------- # The element in the `r`-th row and the `c`-th column of a matrix `m` can be accessed # with the `m[r,c]` notation. m <- matrix(1:6, nrow=2) m m[1,1] m[2,3] # Row and column access # ---------- # We may instead want to access the `r`-th row in its entirety. # Then, we use the `m[r,]` notation. Similarly, `m[,c]` gives all entries in column `c`. m <- matrix(1:6, nrow=2) m m[1,] # first row, all columns m[,1] # first column, all rows # Accessing ranges of rows/columns # -- # You may have noticed that the notation to access elements is similar # between vectors and matrices. As in matrices, we can provide a vector of indices # to specify rows and columns. m <- matrix( 1:12, nrow=3 ) m # Select rows 1 and 2, all columns: m[1:2,] # select rows 1 and 2, second column only. m[1:2, 2] # Select rows 1 and 2, and columns 1,4 and 3, in that order. m[1:2, c(1,4,3)] # Excluding some rows and columns # --- # As seen in the context of vectors, negative indices can be used to # get a new matrix with some rows/columns removed. m <- matrix( 1:12, nrow=3 ) m # Remove 3rd row. m[-3,] # Remove 2nd column m[,-2] # Remove 1st row and 3rd column m[-1,-3] # Remove columns from 1 to 2. m[,-1:-2] # Setting and getting row and column names # == # As with vectors, we can provide names to the rows and to the columns of a matrix. m <- matrix( 1:6, nrow=2) m # The functions `rownames()` and `colnames()` are used to set the names # for rows and columns, respectively. rownames(m) <- c("row I", "row II") colnames(m) <- c("col a", "col b", "col c") m # When called without an assignment, they return the existing names. rownames(m) colnames(m) # These names provide an alternative method to access matrix elements. m["row I", "col b"] m["row I",] m[,"col a"] # Create a matrix by setting individual elements # ============= # Sometimes we may not have all the data at hand at once. It is possible to start # with an empty matrix, and fill it up element-by-element. m <- matrix(nrow=2, ncol=2) m[1,1] <- 1 m[2,1] <- 2 m[1,2] <- 3 m[2,2] <- 4 m # Create a matrix by combining columns or rows # ========= # When we have several different vectors, we can combine them in # _columns_ using `cbind()`, or by _rows_ using `rbind()`. cbind( c(1,2), c(3,4) ) rbind( c(1,2), c(3,4) ) # Add a row or a column to an existing matrix # === # The functions `cbind()` and `rbind()` can also be used to extend an existing matrix. m <- matrix( 1:4, nrow = 2) m # Add a new column at the end of the matrix. cbind(m, c(10,11)) # Add a new column at the beginning of the matrix. cbind(c(10,11), m) # Add a new row at the end of the matrix rbind(m, c(10,11)) # Add a new row at the beginning of the matrix. rbind(c(10,11), m) # Insert a row or a column into a matrix # =============== # Another application of `cbind()` and `rbind()` is inserting # columns and rows to existing matrices. As with vectors, # such insertion is not done on the original matrix. # We generate a new matrix using existing rows/columns, # combine them with `rbind()`/`cbind()`, and reassign to the variable. m <- matrix( 1:9, nrow=3, ncol=3) m # Insert a row between second and third rows. rbind(m[1:2,], c(-1, -2, -3), m[3,]) # Insert a column between first and second columns cbind( m[,1], c(-4,-5,-6), m[,2:3] ) # Assign new values to submatrices # == # A matrix can be changed in-place by selecting a submatrix # using index notation, and assigning a new matrix to it. m <- matrix( 1:9, nrow=3 ) m m[ c(1,2), c(2,3) ] <- matrix(c(20,21,22,23)) m # Removing rows and columns # ==== # To remove some selected rows or colums, we just use the index notation to # specify the rows and columns we want to keep, # and assign the result to the variable's name. m <- matrix( 1:9, nrow=3 ) m # Remove 2nd row. m <- m[c(1,3),] m # Remove 1st column. m <- m[, c(2,3)] m m[ c(1,3), c(2,3)] # Filtering on matrices # ======== m <- matrix( c(2,9,4,7,5,3,6,1,8) , nrow=3 ) m m >= 5 m[m>=5] m[m[,1]>=5] m[ m< 5] <- 0 m # Matrix recycling # ========== # Remember that when two vectors of different lengths are combined in an operation, # the shorter one is _recycled_ (i.e., elements repeated until the desired length). c(1,1,1,1,1) + c(1,2,3) # converted to c(1,1,1,1,1) + c(1,2,3,1,2) # The same procedure also applies to matrices. m1 <- matrix(1:9, nrow=3) m2 <- matrix( c(1,2,3), nrow=3,ncol=3) m2 m1 + m2 # Matrix operations # ========= # transpose # ---------- m <- matrix(1:4, nrow=2) m t(m) # elementwise product # -- m m * m # matrix multiplication # -- m m %*% m # multiply by a scalar # -- m 3 * m # matrix addition # -- m m + m # Functions on matrices # ============== m <- matrix( 1:9, nrow=3 ) m rowSums(m) rowMeans(m) colSums(m) colMeans(m) # The apply() function # ------ # The `apply()` function and its relatives are quite common in R programming. # Here, we provide a function to apply to rows or columns, and the resulting vector # of numbers is returned. m <- matrix( 1:9, nrow=3) m apply(m, 1, mean) # same as rowMeans() apply(m, 2, mean) # same as colMeans() # We can also use `apply()` with user-defined functions. alt_inverse_sum <- function(x) {return(sum(c(1,-1)/x))} m <- matrix(1:12, nrow=3) m apply(m,1,alt_inverse_sum) apply(m,2,alt_inverse_sum)
/Lectures/Lecture 6 - Matrices.R
no_license
senanarci/CMPE140
R
false
false
6,840
r
# In R, a matrix is a vector that has two additional attributes: # * Number of rows # * Number of columns # # As with vectors, every element of a matrix must be of the same _mode_; # either purely numeric, or purely text, etc. # # Creating a matrix from a vector # =============== # # One way to create a matrix is to begin with a specific vector that holds the values. # When we specify the number of rows and columns in the desired matrix, # R can create a matrix structure to hold them. m <- matrix( c(1,2,3,4), nrow=2, ncol=2) m attributes(m) dim(m) class(m) # Note that the vector is broken into `ncol` number of _columns_, # each of size `nrow`. The values from the vector `c(1,2,3,4)` are put in a columns. # This is called the _column-major order_. # # We can instead force a _row-major order_ by setting the `byrow` parameter to `TRUE'. matrix( c(1,2,3,4), nrow=2, ncol=2, byrow=TRUE) # If we specify only `nrow` or only `ncol`, and the unspecified one will be determined # using the length of the vector. matrix( 1:6, nrow=2 ) matrix( 1:6, ncol=3 ) # If the specified matrix sizes are not compatible with the vector's length, # the vector is _recycled_ until it fills the matrix. matrix( 1:5, nrow=2, ncol=3) # The same recycling is done also when one of the shape parameters is omitted. matrix( 1:5, nrow=2 ) # Accessing matrix elements # ====== # Once we have data stored in a matrix, we may want to access its elements, rows, or columns. # Accessing individual elements # ---------------- # The element in the `r`-th row and the `c`-th column of a matrix `m` can be accessed # with the `m[r,c]` notation. m <- matrix(1:6, nrow=2) m m[1,1] m[2,3] # Row and column access # ---------- # We may instead want to access the `r`-th row in its entirety. # Then, we use the `m[r,]` notation. Similarly, `m[,c]` gives all entries in column `c`. m <- matrix(1:6, nrow=2) m m[1,] # first row, all columns m[,1] # first column, all rows # Accessing ranges of rows/columns # -- # You may have noticed that the notation to access elements is similar # between vectors and matrices. As in matrices, we can provide a vector of indices # to specify rows and columns. m <- matrix( 1:12, nrow=3 ) m # Select rows 1 and 2, all columns: m[1:2,] # select rows 1 and 2, second column only. m[1:2, 2] # Select rows 1 and 2, and columns 1,4 and 3, in that order. m[1:2, c(1,4,3)] # Excluding some rows and columns # --- # As seen in the context of vectors, negative indices can be used to # get a new matrix with some rows/columns removed. m <- matrix( 1:12, nrow=3 ) m # Remove 3rd row. m[-3,] # Remove 2nd column m[,-2] # Remove 1st row and 3rd column m[-1,-3] # Remove columns from 1 to 2. m[,-1:-2] # Setting and getting row and column names # == # As with vectors, we can provide names to the rows and to the columns of a matrix. m <- matrix( 1:6, nrow=2) m # The functions `rownames()` and `colnames()` are used to set the names # for rows and columns, respectively. rownames(m) <- c("row I", "row II") colnames(m) <- c("col a", "col b", "col c") m # When called without an assignment, they return the existing names. rownames(m) colnames(m) # These names provide an alternative method to access matrix elements. m["row I", "col b"] m["row I",] m[,"col a"] # Create a matrix by setting individual elements # ============= # Sometimes we may not have all the data at hand at once. It is possible to start # with an empty matrix, and fill it up element-by-element. m <- matrix(nrow=2, ncol=2) m[1,1] <- 1 m[2,1] <- 2 m[1,2] <- 3 m[2,2] <- 4 m # Create a matrix by combining columns or rows # ========= # When we have several different vectors, we can combine them in # _columns_ using `cbind()`, or by _rows_ using `rbind()`. cbind( c(1,2), c(3,4) ) rbind( c(1,2), c(3,4) ) # Add a row or a column to an existing matrix # === # The functions `cbind()` and `rbind()` can also be used to extend an existing matrix. m <- matrix( 1:4, nrow = 2) m # Add a new column at the end of the matrix. cbind(m, c(10,11)) # Add a new column at the beginning of the matrix. cbind(c(10,11), m) # Add a new row at the end of the matrix rbind(m, c(10,11)) # Add a new row at the beginning of the matrix. rbind(c(10,11), m) # Insert a row or a column into a matrix # =============== # Another application of `cbind()` and `rbind()` is inserting # columns and rows to existing matrices. As with vectors, # such insertion is not done on the original matrix. # We generate a new matrix using existing rows/columns, # combine them with `rbind()`/`cbind()`, and reassign to the variable. m <- matrix( 1:9, nrow=3, ncol=3) m # Insert a row between second and third rows. rbind(m[1:2,], c(-1, -2, -3), m[3,]) # Insert a column between first and second columns cbind( m[,1], c(-4,-5,-6), m[,2:3] ) # Assign new values to submatrices # == # A matrix can be changed in-place by selecting a submatrix # using index notation, and assigning a new matrix to it. m <- matrix( 1:9, nrow=3 ) m m[ c(1,2), c(2,3) ] <- matrix(c(20,21,22,23)) m # Removing rows and columns # ==== # To remove some selected rows or colums, we just use the index notation to # specify the rows and columns we want to keep, # and assign the result to the variable's name. m <- matrix( 1:9, nrow=3 ) m # Remove 2nd row. m <- m[c(1,3),] m # Remove 1st column. m <- m[, c(2,3)] m m[ c(1,3), c(2,3)] # Filtering on matrices # ======== m <- matrix( c(2,9,4,7,5,3,6,1,8) , nrow=3 ) m m >= 5 m[m>=5] m[m[,1]>=5] m[ m< 5] <- 0 m # Matrix recycling # ========== # Remember that when two vectors of different lengths are combined in an operation, # the shorter one is _recycled_ (i.e., elements repeated until the desired length). c(1,1,1,1,1) + c(1,2,3) # converted to c(1,1,1,1,1) + c(1,2,3,1,2) # The same procedure also applies to matrices. m1 <- matrix(1:9, nrow=3) m2 <- matrix( c(1,2,3), nrow=3,ncol=3) m2 m1 + m2 # Matrix operations # ========= # transpose # ---------- m <- matrix(1:4, nrow=2) m t(m) # elementwise product # -- m m * m # matrix multiplication # -- m m %*% m # multiply by a scalar # -- m 3 * m # matrix addition # -- m m + m # Functions on matrices # ============== m <- matrix( 1:9, nrow=3 ) m rowSums(m) rowMeans(m) colSums(m) colMeans(m) # The apply() function # ------ # The `apply()` function and its relatives are quite common in R programming. # Here, we provide a function to apply to rows or columns, and the resulting vector # of numbers is returned. m <- matrix( 1:9, nrow=3) m apply(m, 1, mean) # same as rowMeans() apply(m, 2, mean) # same as colMeans() # We can also use `apply()` with user-defined functions. alt_inverse_sum <- function(x) {return(sum(c(1,-1)/x))} m <- matrix(1:12, nrow=3) m apply(m,1,alt_inverse_sum) apply(m,2,alt_inverse_sum)
testlist <- list(A = structure(c(2.02410200510026e-79, 0, 0, 0), .Dim = c(2L, 2L)), B = structure(0, .Dim = c(1L, 1L))) result <- do.call(multivariance:::match_rows,testlist) str(result)
/multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613112779-test.R
no_license
akhikolla/updatedatatype-list3
R
false
false
187
r
testlist <- list(A = structure(c(2.02410200510026e-79, 0, 0, 0), .Dim = c(2L, 2L)), B = structure(0, .Dim = c(1L, 1L))) result <- do.call(multivariance:::match_rows,testlist) str(result)
#:# libraries library(digest) library(mlr) library(OpenML) library(farff) #:# config set.seed(1) #:# data dataset <- getOMLDataSet(data.name = "churn") head(dataset$data) #:# preprocessing head(dataset$data) #:# model task = makeClassifTask(id = "task", data = dataset$data, target = "class") lrn = makeLearner("classif.ranger", par.vals = list(num.trees = 500, num.random.splits = 1L), predict.type = "prob") #:# hash #:# bc69e734f17db34a6aa14082d8fe8d9e hash <- digest(list(task, lrn)) hash #:# audit cv <- makeResampleDesc("CV", iters = 5) r <- mlr::resample(lrn, task, cv, measures = list(acc, auc, tnr, tpr, ppv, f1)) ACC <- r$aggr ACC #:# session info sink(paste0("sessionInfo.txt")) sessionInfo() sink()
/models/openml_churn/classification_class/bc69e734f17db34a6aa14082d8fe8d9e/code.R
no_license
lukaszbrzozowski/CaseStudies2019S
R
false
false
718
r
#:# libraries library(digest) library(mlr) library(OpenML) library(farff) #:# config set.seed(1) #:# data dataset <- getOMLDataSet(data.name = "churn") head(dataset$data) #:# preprocessing head(dataset$data) #:# model task = makeClassifTask(id = "task", data = dataset$data, target = "class") lrn = makeLearner("classif.ranger", par.vals = list(num.trees = 500, num.random.splits = 1L), predict.type = "prob") #:# hash #:# bc69e734f17db34a6aa14082d8fe8d9e hash <- digest(list(task, lrn)) hash #:# audit cv <- makeResampleDesc("CV", iters = 5) r <- mlr::resample(lrn, task, cv, measures = list(acc, auc, tnr, tpr, ppv, f1)) ACC <- r$aggr ACC #:# session info sink(paste0("sessionInfo.txt")) sessionInfo() sink()
### Practical 1 ### #install.packages("reshape") library(ggplot2) library(plyr) library(rms) library(MASS) library(reshape) library(lmtest) # You might need to set your working directory here smart <- readRDS(file= "SMARTs_P1.rds") class(smart) # This tells us the class/data type of the object smart (hopefully returning data.frame) dim(smart) sapply(smart,class) # This tells us the class of each variable in the smart data, eg, categorical='factor' or continuous='numeric' Albumin is not a factor variable so we need to change and label # Albumin smart$albumin <- as.factor(smart$albumin) levels(smart$albumin) smart$albumin <- revalue(smart$albumin, c("1"="No", "2"="Low", "3"="High")) levels(smart$albumin) #There are 2 variables for systolic blood pressure (SBP) because it can either done by hand or automatic. If we check, we can see there is a lot of missing data in both of the SBP variables. table(is.na(smart$SYSTBP)==FALSE) table(is.na(smart$SYSTH)==FALSE) table(is.na(smart$SYSTH)==FALSE | is.na(smart$SYSTBP)==FALSE) smart$SBP <- ifelse(is.na(smart$SYSTBP)==FALSE, smart$SYSTBP, smart$SYSTH) table(is.na(smart$SBP)) ## Part 2 Exploratory analysis # Overview of the data attach(smart) dim(smart) summary(smart) sapply(smart,class) describe(smart) # Summarise outcome table(outcome) round(prop.table(table(outcome))*100,0) # Summarise categorical variables by outcome table(SEX,outcome) # Produces a table of counts round(prop.table(table(SEX,outcome),2)*100,0) # Produces a table of percentages round(prop.table(table(outcome,SEX),2)*100,0) # Produces a table of percentages table(SMOKING,outcome) round(prop.table(table(SMOKING,outcome),2)*100,0) round(prop.table(table(outcome,SMOKING),2)*100,0) # Look at distributions of continuous variables summary(AGE) ggplot(smart,aes(x=AGE))+geom_histogram()+facet_grid(~outcome)+theme_light()+ labs(title = "Histogram of age split by outcome") ggplot(smart, aes(AGE, fill = outcome)) + geom_histogram() ggplot(smart, aes(AGE, fill = as.factor(outcome))) + geom_density(alpha = 0.2) # Question - Above we look into age, can you do the same for BMI? # What is the range of BMI values? # Plot a BMI histogram # Plot the BMI density by outcome ## Part 3 Univariate logistic regression with categorical variables # Lets start by producing a univariate logistic model with gender as the independant variable. detach(smart) # Recode SEX so that the baseline is male # smart$SEX2 = relevel(smart$SEX, ref="Male") attach(smart) # logistic regression model including sex as the only independent variable sex_mod <- glm(outcome~SEX2,family="binomial") summary(sex_mod) beta <- round(exp(sex_mod$coef),3) # extract the model coefficients from the model b <- round(exp(confint.default(sex_mod)),3) # Compute the confidence intervals for the model coefficients d <- cbind(beta,b) d Can you interpret the output? # By modifying the code above, try fitting a logistic regression model to the SMART data with albumin as the predictor. # Q. Interpret the output. Is albumin a significant predictor? What level of albumin is associated with the lowest risk? ## Part 4 Univariate logistic regression with continuous variables # Now lets try with a continuous variable (AGE). # Assuming age is linear age_mod <- glm(outcome~AGE,family="binomial") summary(age_mod) exp(confint.default((age_mod))) # if you want confidence intervals for the model coefficients lp40 <- predict(age_mod,data.frame(AGE=40)) risk40 <- exp(lp40)/(1+exp(lp40))*100 lp80 <- predict(age_mod,data.frame(AGE=80)) risk80 <- exp(lp80)/(1+exp(lp80))*100 matrix(c("risk40","risk80", risk40, risk80),ncol=2,byrow=F) risk40a <- predict(age_mod,data.frame(AGE=40), type="response")*100 # 'type="response"' gives the predicted probabilites and so multiply by 100 to get the risk as a percentage risk40b <- 1/(1+exp(-lp40))*100 # alternative calculation c(risk40a, risk40b) ## Part 5 Modelling a continuos variable using splines age3_spline <- rcs(AGE,3) age4_spline <- rcs(AGE,4) age5_spline <- rcs(AGE,5) lp_age1 <- predict(age_mod) age3_mod <- glm(outcome~age3_spline,family="binomial") age3_mod lp_age3 <- predict(age3_mod) age4_mod <- glm(outcome~age4_spline,family="binomial") age4_mod lp_age4 <- predict(age4_mod) age5_mod <- glm(outcome~age5_spline,family="binomial") age5_mod lp_age5 <- predict(age5_mod) data_part6 <- data.frame(AGE,lp_age1,lp_age3,lp_age4,lp_age5) data_part6_m <- melt(data_part6,id.vars='AGE') plot_part6 <- ggplot(data_part6_m,aes(AGE,value,colour=variable))+geom_line()+scale_colour_manual(labels=c("linear","3 knots","4 knots","5 knots"),values=c("gray","green","red","blue"))+theme_bw() plot_part6 + labs(x="Age (years)",y="Linear Predictor (log odds)",color="") + theme(legend.position=c(0.2,0.8)) age_spline_check <- matrix(c(AIC(age_mod), BIC(age_mod), AIC(age3_mod), BIC(age3_mod), AIC(age4_mod), BIC(age4_mod), AIC(age5_mod), BIC(age5_mod)), ncol=2, byrow=TRUE) colnames(age_spline_check) <- c("AIC", "BIC") rownames(age_spline_check) <- c("age_mod","age3_mod", "age4_mod", "age5_mod") age_spline_check ## Part 5 Building a multivariable model detach(smart) smart <- subset(smart, select = c(outcome, SEX, AGE, SBP, alcohol, CHOLO, BMIO, DIABETES, CARDIAC, SMOKING, AAA)) smart <- na.omit(smart) age3_spline <- rcs(smart$AGE,3) attach(smart) k10 <- qchisq(0.10,1,lower.tail=FALSE) # this give the change in AIC we consider to be significant in our stepwise selection # Forward selection (by AIC) empty_mod_2 <- glm(outcome~1,family="binomial") forward_mod_2 <- stepAIC(empty_mod_2,k=k10,scope=list(upper=~SEX+age3_spline + SBP + alcohol + CHOLO + BMIO + DIABETES + CARDIAC + SMOKING + AAA,lower=~1),direction="forward",trace=TRUE) # Backward selection (by AIC) full_mod_2 <- glm(outcome~SEX+age3_spline + SBP + alcohol + CHOLO + BMIO + DIABETES + CARDIAC + SMOKING + AAA,family="binomial") backward_mod_2 <- stepAIC(full_mod_2,k=k10,scope=list(upper=~SEX+age3_spline + SBP + alcohol + CHOLO + BMIO + DIABETES + CARDIAC + SMOKING + AAA,lower=~1),direction="backward",trace=TRUE) forward_mod_2 backward_mod_2 # Backward selection (by AIC) forcing SEX to be incuded in the model backward_mod_2sex <- stepAIC(full_mod_2,k=k10,scope=list(upper=~SEX+age3_spline + SBP + alcohol + CHOLO + BMIO + DIABETES + CARDIAC + SMOKING + AAA,lower=~SEX),direction="backward",trace=TRUE) backward_mod_2sex summary(backward_mod_2sex)
/Practical 1 rstudio.R
no_license
David-A-Jenkins/HDRUK-Turing-immersion-week
R
false
false
6,785
r
### Practical 1 ### #install.packages("reshape") library(ggplot2) library(plyr) library(rms) library(MASS) library(reshape) library(lmtest) # You might need to set your working directory here smart <- readRDS(file= "SMARTs_P1.rds") class(smart) # This tells us the class/data type of the object smart (hopefully returning data.frame) dim(smart) sapply(smart,class) # This tells us the class of each variable in the smart data, eg, categorical='factor' or continuous='numeric' Albumin is not a factor variable so we need to change and label # Albumin smart$albumin <- as.factor(smart$albumin) levels(smart$albumin) smart$albumin <- revalue(smart$albumin, c("1"="No", "2"="Low", "3"="High")) levels(smart$albumin) #There are 2 variables for systolic blood pressure (SBP) because it can either done by hand or automatic. If we check, we can see there is a lot of missing data in both of the SBP variables. table(is.na(smart$SYSTBP)==FALSE) table(is.na(smart$SYSTH)==FALSE) table(is.na(smart$SYSTH)==FALSE | is.na(smart$SYSTBP)==FALSE) smart$SBP <- ifelse(is.na(smart$SYSTBP)==FALSE, smart$SYSTBP, smart$SYSTH) table(is.na(smart$SBP)) ## Part 2 Exploratory analysis # Overview of the data attach(smart) dim(smart) summary(smart) sapply(smart,class) describe(smart) # Summarise outcome table(outcome) round(prop.table(table(outcome))*100,0) # Summarise categorical variables by outcome table(SEX,outcome) # Produces a table of counts round(prop.table(table(SEX,outcome),2)*100,0) # Produces a table of percentages round(prop.table(table(outcome,SEX),2)*100,0) # Produces a table of percentages table(SMOKING,outcome) round(prop.table(table(SMOKING,outcome),2)*100,0) round(prop.table(table(outcome,SMOKING),2)*100,0) # Look at distributions of continuous variables summary(AGE) ggplot(smart,aes(x=AGE))+geom_histogram()+facet_grid(~outcome)+theme_light()+ labs(title = "Histogram of age split by outcome") ggplot(smart, aes(AGE, fill = outcome)) + geom_histogram() ggplot(smart, aes(AGE, fill = as.factor(outcome))) + geom_density(alpha = 0.2) # Question - Above we look into age, can you do the same for BMI? # What is the range of BMI values? # Plot a BMI histogram # Plot the BMI density by outcome ## Part 3 Univariate logistic regression with categorical variables # Lets start by producing a univariate logistic model with gender as the independant variable. detach(smart) # Recode SEX so that the baseline is male # smart$SEX2 = relevel(smart$SEX, ref="Male") attach(smart) # logistic regression model including sex as the only independent variable sex_mod <- glm(outcome~SEX2,family="binomial") summary(sex_mod) beta <- round(exp(sex_mod$coef),3) # extract the model coefficients from the model b <- round(exp(confint.default(sex_mod)),3) # Compute the confidence intervals for the model coefficients d <- cbind(beta,b) d Can you interpret the output? # By modifying the code above, try fitting a logistic regression model to the SMART data with albumin as the predictor. # Q. Interpret the output. Is albumin a significant predictor? What level of albumin is associated with the lowest risk? ## Part 4 Univariate logistic regression with continuous variables # Now lets try with a continuous variable (AGE). # Assuming age is linear age_mod <- glm(outcome~AGE,family="binomial") summary(age_mod) exp(confint.default((age_mod))) # if you want confidence intervals for the model coefficients lp40 <- predict(age_mod,data.frame(AGE=40)) risk40 <- exp(lp40)/(1+exp(lp40))*100 lp80 <- predict(age_mod,data.frame(AGE=80)) risk80 <- exp(lp80)/(1+exp(lp80))*100 matrix(c("risk40","risk80", risk40, risk80),ncol=2,byrow=F) risk40a <- predict(age_mod,data.frame(AGE=40), type="response")*100 # 'type="response"' gives the predicted probabilites and so multiply by 100 to get the risk as a percentage risk40b <- 1/(1+exp(-lp40))*100 # alternative calculation c(risk40a, risk40b) ## Part 5 Modelling a continuos variable using splines age3_spline <- rcs(AGE,3) age4_spline <- rcs(AGE,4) age5_spline <- rcs(AGE,5) lp_age1 <- predict(age_mod) age3_mod <- glm(outcome~age3_spline,family="binomial") age3_mod lp_age3 <- predict(age3_mod) age4_mod <- glm(outcome~age4_spline,family="binomial") age4_mod lp_age4 <- predict(age4_mod) age5_mod <- glm(outcome~age5_spline,family="binomial") age5_mod lp_age5 <- predict(age5_mod) data_part6 <- data.frame(AGE,lp_age1,lp_age3,lp_age4,lp_age5) data_part6_m <- melt(data_part6,id.vars='AGE') plot_part6 <- ggplot(data_part6_m,aes(AGE,value,colour=variable))+geom_line()+scale_colour_manual(labels=c("linear","3 knots","4 knots","5 knots"),values=c("gray","green","red","blue"))+theme_bw() plot_part6 + labs(x="Age (years)",y="Linear Predictor (log odds)",color="") + theme(legend.position=c(0.2,0.8)) age_spline_check <- matrix(c(AIC(age_mod), BIC(age_mod), AIC(age3_mod), BIC(age3_mod), AIC(age4_mod), BIC(age4_mod), AIC(age5_mod), BIC(age5_mod)), ncol=2, byrow=TRUE) colnames(age_spline_check) <- c("AIC", "BIC") rownames(age_spline_check) <- c("age_mod","age3_mod", "age4_mod", "age5_mod") age_spline_check ## Part 5 Building a multivariable model detach(smart) smart <- subset(smart, select = c(outcome, SEX, AGE, SBP, alcohol, CHOLO, BMIO, DIABETES, CARDIAC, SMOKING, AAA)) smart <- na.omit(smart) age3_spline <- rcs(smart$AGE,3) attach(smart) k10 <- qchisq(0.10,1,lower.tail=FALSE) # this give the change in AIC we consider to be significant in our stepwise selection # Forward selection (by AIC) empty_mod_2 <- glm(outcome~1,family="binomial") forward_mod_2 <- stepAIC(empty_mod_2,k=k10,scope=list(upper=~SEX+age3_spline + SBP + alcohol + CHOLO + BMIO + DIABETES + CARDIAC + SMOKING + AAA,lower=~1),direction="forward",trace=TRUE) # Backward selection (by AIC) full_mod_2 <- glm(outcome~SEX+age3_spline + SBP + alcohol + CHOLO + BMIO + DIABETES + CARDIAC + SMOKING + AAA,family="binomial") backward_mod_2 <- stepAIC(full_mod_2,k=k10,scope=list(upper=~SEX+age3_spline + SBP + alcohol + CHOLO + BMIO + DIABETES + CARDIAC + SMOKING + AAA,lower=~1),direction="backward",trace=TRUE) forward_mod_2 backward_mod_2 # Backward selection (by AIC) forcing SEX to be incuded in the model backward_mod_2sex <- stepAIC(full_mod_2,k=k10,scope=list(upper=~SEX+age3_spline + SBP + alcohol + CHOLO + BMIO + DIABETES + CARDIAC + SMOKING + AAA,lower=~SEX),direction="backward",trace=TRUE) backward_mod_2sex summary(backward_mod_2sex)
############################################# # Bolinger Bands indicator and signal # calculating 3 bands(upper,lower,middle) # observing to which one price is the closest # and deviation from this band ############################################# BBands = function(dataset1,ten,period){ for(i in 1:nrow(dataset1)){ sum = 0 closeValues = vector() for(j in period:1){ sum = sum + dataset1[i,paste0("Close-",j)] closeValues = c(closeValues,dataset1[i,paste0("Close-",j)]) } middleBand = sum/period upperBand = middleBand +(sd(closeValues)*2) lowerBand = middleBand -(sd(closeValues)*2) diffM = dataset1[i,"Close"] - middleBand diffU = dataset1[i,"Close"] - upperBand diffL = dataset1[i,"Close"] - lowerBand if(abs(diffM)<abs(diffU)){ if(abs(diffM)<abs(diffL)){ dataset1[i,paste0("SignalBBands",ten)] = "silent" } else{ if(diffL<(lowerBand+sd(closeValues)/2)){ dataset1[i,paste0("SignalBBands",ten)] = "rise" } else { dataset1[i,paste0("SignalBBands",ten)] = "silent" } } } else if(abs(diffU)<abs(diffL)){ if(diffU<(upperBand-sd(closeValues)/2)){ dataset1[i,paste0("SignalBBands",ten)] = "fall" } else { dataset1[i,paste0("SignalBBands",ten)] = "silent" } } else { if(diffL<(lowerBand+sd(closeValues)/2)){ dataset1[i,paste0("SignalBBands",ten)] = "rise" } else { dataset1[i,paste0("SignalBBands",ten)] = "silent" } } } return(dataset1) }
/TestingAndOptimalization/indicators/BBands.R
no_license
LukasDombrovsky/StockPredict
R
false
false
1,569
r
############################################# # Bolinger Bands indicator and signal # calculating 3 bands(upper,lower,middle) # observing to which one price is the closest # and deviation from this band ############################################# BBands = function(dataset1,ten,period){ for(i in 1:nrow(dataset1)){ sum = 0 closeValues = vector() for(j in period:1){ sum = sum + dataset1[i,paste0("Close-",j)] closeValues = c(closeValues,dataset1[i,paste0("Close-",j)]) } middleBand = sum/period upperBand = middleBand +(sd(closeValues)*2) lowerBand = middleBand -(sd(closeValues)*2) diffM = dataset1[i,"Close"] - middleBand diffU = dataset1[i,"Close"] - upperBand diffL = dataset1[i,"Close"] - lowerBand if(abs(diffM)<abs(diffU)){ if(abs(diffM)<abs(diffL)){ dataset1[i,paste0("SignalBBands",ten)] = "silent" } else{ if(diffL<(lowerBand+sd(closeValues)/2)){ dataset1[i,paste0("SignalBBands",ten)] = "rise" } else { dataset1[i,paste0("SignalBBands",ten)] = "silent" } } } else if(abs(diffU)<abs(diffL)){ if(diffU<(upperBand-sd(closeValues)/2)){ dataset1[i,paste0("SignalBBands",ten)] = "fall" } else { dataset1[i,paste0("SignalBBands",ten)] = "silent" } } else { if(diffL<(lowerBand+sd(closeValues)/2)){ dataset1[i,paste0("SignalBBands",ten)] = "rise" } else { dataset1[i,paste0("SignalBBands",ten)] = "silent" } } } return(dataset1) }
library(shiny) library(shinydashboard) library(shinyWidgets) library(shinyFeedback) groups <- c( "Group 1" = "group1", "Group 2" = "group2", "Group 3" = "group3", "Group 4" = "group4", "Group 5" = "group5" ) agegroups <- c( "< 1 year old" = "g1", "1 - 5 years old" = "g2", "5 - 10 years old" = "g3", "10 - 15 years old" = "g4", "15 - 20 years old" = "g5", "> 20 years old" = "g6" ) # Define UI for application that draws a histogram header <- dashboardHeader( title = "Bayesian Stock Management", titleWidth = 350, dropdownMenuOutput("notificationsMenu") ) sidebar <- dashboardSidebar( sidebarMenuOutput("sidebarmenu"), sliderInput("conf", tags$p("Confidence level", style = "font-size: 24; font-family: Times New Roman"), value = 80, min = 0, max = 100) ) body <- dashboardBody( tags$img(src = 'logo.jpg', width = '100', height = '75', style= 'position:absolute; right:2px; bottom:2px;'), useShinyFeedback(), setBackgroundColor( color = "LightSteelBlue", gradient = c("linear", "radial"), direction = c("bottom", "top", "right", "left"), shinydashboard = TRUE ), tags$head( tags$link(rel = "stylesheet", type = "text/css", href = "fonts.css"), tags$style("#required_componenets{font-size: 42px; font-family: Times New Roman;} #text1{font-size: 24px; font-family: Times New Roman;}") ), tabItems( tabItem( tabName = "landing", fluidRow( box( title = tags$p("Group of components", style = "font-size: 42px; font-family: Times New Roman"), solidHeader = TRUE, collapsible = TRUE, width = 8, collapsed = FALSE, background = "light-blue", selectInput("group", label = tags$p("Select a group of componenets", style = "font-size: 32; font-family: Times New Roman"), choices = groups) ), box( title = tags$p("Age category", style = "font-size: 42px; font-family: Times New Roman"), solidHeader = TRUE, collapsible = TRUE, width = 8, collapsed = FALSE, background = "light-blue", selectInput("agegroup", label = tags$p("Select an age category", style = "font-size: 32; font-family: Times New Roman"), choices = agegroups), actionButton( inputId = "confirm", label = "Next", icon = icon("ok-circle") ) ) ) ), tabItem( tabName = "dashboard", fluidRow( column( width = 4, box( title = tags$p("Current values", style = "font-size: 42px; font-family: Times New Roman"), solidHeader = TRUE, collapsible = TRUE, width = NULL, collapsed = FALSE, background = "light-blue", numericInput("n_tilde", tags$p("Number of components", style = "font-size: 32; font-family: Times New Roman"), 10, min = 1, max = 100), numericInput("n_stock", tags$p("Number of components in stock", style = "font-size: 32; font-family: Times New Roman"), 0, min = 1, max = 100) ) ), column( width = 8, box( title = tags$p("Requirements:", style = "font-size: 42px; font-family: Times New Roman; font-style: italic;"), solidHeader = TRUE, collapsible = FALSE, width = NULL, collapsed = FALSE, background = "maroon", textOutput("required_componenets") ), box( title = tags$p("Summary:", style = "font-size: 42px; font-family: Times New Roman;"), solidHeader = TRUE, collapsible = TRUE, width = NULL, collapsed = TRUE, background = "light-blue", plotOutput("PosteriorPred"), textOutput("text1") ) ) ) ), tabItem( tabName = "data", fluidRow( column( width = 7, box( title = tags$p("Prior information", style = "font-size: 42px; font-family: Times New Roman"), solidHeader = TRUE, collapsible = TRUE, width = NULL, collapsed = FALSE, background = "light-blue", numericInput("mean", tags$p("Out of 20 components how many of them will be broken?", style = "font-size: 32; font-family: Times New Roman"), 3, min = 0, max = 20), numericInput("var", tags$p("The above number can vary by +/- :", style = "font-size: 32; font-family: Times New Roman"), 1, min = 0, max = 20) ) ), column( width = 5, box( title = tags$p("Prior information from survey", style = "font-size: 42px; font-family: Times New Roman"), solidHeader = TRUE, collapsible = TRUE, width = NULL, collapsed = FALSE, background = "light-blue", fileInput("file1", tags$p("Choose CSV file", style = "font-size: 32; font-family: Times New Roman"), accept = c("text/csv", "text/comma-separated-values,text/plain", ".csv")) ) ) ) ), tabItem( tabName = "historical", fluidRow(column( width = 8, box( title = tags$p("Historical values", style = "font-size: 42px; font-family: Times New Roman"), solidHeader = TRUE, collapsible = TRUE, width = NULL, collapsed = FALSE, background = "light-blue", numericInput("n", tags$p("Total number of components", style = "font-size: 32; font-family: Times New Roman"), 30, min = 1, max = 100), numericInput("y", tags$p("Number of broken components", style = "font-size: 32; font-family: Times New Roman"), 3, min = 1, max = 100) ) )) ) ) ) ui <- dashboardPage(header = header, sidebar = sidebar, body = body)
/ui.R
permissive
GrzegorzMika/Bayesian-Stock-Management
R
false
false
5,561
r
library(shiny) library(shinydashboard) library(shinyWidgets) library(shinyFeedback) groups <- c( "Group 1" = "group1", "Group 2" = "group2", "Group 3" = "group3", "Group 4" = "group4", "Group 5" = "group5" ) agegroups <- c( "< 1 year old" = "g1", "1 - 5 years old" = "g2", "5 - 10 years old" = "g3", "10 - 15 years old" = "g4", "15 - 20 years old" = "g5", "> 20 years old" = "g6" ) # Define UI for application that draws a histogram header <- dashboardHeader( title = "Bayesian Stock Management", titleWidth = 350, dropdownMenuOutput("notificationsMenu") ) sidebar <- dashboardSidebar( sidebarMenuOutput("sidebarmenu"), sliderInput("conf", tags$p("Confidence level", style = "font-size: 24; font-family: Times New Roman"), value = 80, min = 0, max = 100) ) body <- dashboardBody( tags$img(src = 'logo.jpg', width = '100', height = '75', style= 'position:absolute; right:2px; bottom:2px;'), useShinyFeedback(), setBackgroundColor( color = "LightSteelBlue", gradient = c("linear", "radial"), direction = c("bottom", "top", "right", "left"), shinydashboard = TRUE ), tags$head( tags$link(rel = "stylesheet", type = "text/css", href = "fonts.css"), tags$style("#required_componenets{font-size: 42px; font-family: Times New Roman;} #text1{font-size: 24px; font-family: Times New Roman;}") ), tabItems( tabItem( tabName = "landing", fluidRow( box( title = tags$p("Group of components", style = "font-size: 42px; font-family: Times New Roman"), solidHeader = TRUE, collapsible = TRUE, width = 8, collapsed = FALSE, background = "light-blue", selectInput("group", label = tags$p("Select a group of componenets", style = "font-size: 32; font-family: Times New Roman"), choices = groups) ), box( title = tags$p("Age category", style = "font-size: 42px; font-family: Times New Roman"), solidHeader = TRUE, collapsible = TRUE, width = 8, collapsed = FALSE, background = "light-blue", selectInput("agegroup", label = tags$p("Select an age category", style = "font-size: 32; font-family: Times New Roman"), choices = agegroups), actionButton( inputId = "confirm", label = "Next", icon = icon("ok-circle") ) ) ) ), tabItem( tabName = "dashboard", fluidRow( column( width = 4, box( title = tags$p("Current values", style = "font-size: 42px; font-family: Times New Roman"), solidHeader = TRUE, collapsible = TRUE, width = NULL, collapsed = FALSE, background = "light-blue", numericInput("n_tilde", tags$p("Number of components", style = "font-size: 32; font-family: Times New Roman"), 10, min = 1, max = 100), numericInput("n_stock", tags$p("Number of components in stock", style = "font-size: 32; font-family: Times New Roman"), 0, min = 1, max = 100) ) ), column( width = 8, box( title = tags$p("Requirements:", style = "font-size: 42px; font-family: Times New Roman; font-style: italic;"), solidHeader = TRUE, collapsible = FALSE, width = NULL, collapsed = FALSE, background = "maroon", textOutput("required_componenets") ), box( title = tags$p("Summary:", style = "font-size: 42px; font-family: Times New Roman;"), solidHeader = TRUE, collapsible = TRUE, width = NULL, collapsed = TRUE, background = "light-blue", plotOutput("PosteriorPred"), textOutput("text1") ) ) ) ), tabItem( tabName = "data", fluidRow( column( width = 7, box( title = tags$p("Prior information", style = "font-size: 42px; font-family: Times New Roman"), solidHeader = TRUE, collapsible = TRUE, width = NULL, collapsed = FALSE, background = "light-blue", numericInput("mean", tags$p("Out of 20 components how many of them will be broken?", style = "font-size: 32; font-family: Times New Roman"), 3, min = 0, max = 20), numericInput("var", tags$p("The above number can vary by +/- :", style = "font-size: 32; font-family: Times New Roman"), 1, min = 0, max = 20) ) ), column( width = 5, box( title = tags$p("Prior information from survey", style = "font-size: 42px; font-family: Times New Roman"), solidHeader = TRUE, collapsible = TRUE, width = NULL, collapsed = FALSE, background = "light-blue", fileInput("file1", tags$p("Choose CSV file", style = "font-size: 32; font-family: Times New Roman"), accept = c("text/csv", "text/comma-separated-values,text/plain", ".csv")) ) ) ) ), tabItem( tabName = "historical", fluidRow(column( width = 8, box( title = tags$p("Historical values", style = "font-size: 42px; font-family: Times New Roman"), solidHeader = TRUE, collapsible = TRUE, width = NULL, collapsed = FALSE, background = "light-blue", numericInput("n", tags$p("Total number of components", style = "font-size: 32; font-family: Times New Roman"), 30, min = 1, max = 100), numericInput("y", tags$p("Number of broken components", style = "font-size: 32; font-family: Times New Roman"), 3, min = 1, max = 100) ) )) ) ) ) ui <- dashboardPage(header = header, sidebar = sidebar, body = body)
### Quiz 2 setwd("C:/Users/jeffthatcher/Cloud Drive/RRepos/GetCleanData") ## Question 1 library(httr) require(httpuv) require(jsonlite) # 1. Find OAuth settings for github: # http://developer.github.com/v3/oauth/ oauth_endpoints("github") # 2. Register an application at https://github.com/settings/applications # Insert your values below - if secret is omitted, it will look it up in # the GITHUB_CONSUMER_SECRET environmental variable. # # Use http://localhost:1410 as the callback url myapp <- oauth_app("quiz2", "663754c307533a825c19", secret="8795f9a4fe93b512520f407a027b1fc1c66e96bf") # 3. Get OAuth credentials github_token <- oauth2.0_token(oauth_endpoints("github"), myapp) # 4. Use API req <- GET("https://api.github.com/users/jtleek/repos", config(token = github_token)) stop_for_status(req) output <- content(req) list(output[[4]]$name, output[[4]]$created_at) ## Question 2 setwd("C:/Users/jeffthatcher/Cloud Drive/RRepos/GetCleanData") if(!file.exists("quizData")) { dir.create("quizData") } fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Fss06pid.csv" download.file(fileUrl, destfile = "./quizData/acs.csv") #curl is necessary for MAC users getting data from https list.files("./data") # sort of like the ls() command, shows the files in the directory "./data" dateDownloaded <- date() dateDownloaded options(sqldf.driver = "SQLite") # as per FAQ #7 force SQLite options(gsubfn.engine = "R") # as per FAQ #5 use R code rather than tcltk library(RMySQL) library(sqldf) acs <- data.table(read.csv("./quizData/acs.csv")) query1 <- sqldf("select pwgtp1 from acs where AGEP < 50") ## Question 3 DTq <- unique(acs$AGEP) sqlq <- sqldf("select distinct AGEP from acs") x <- DTq == sqlq sum(x) ## Question 4 library(XML) connection <- url("http://biostat.jhsph.edu/~jleek/contact.html") htmlCode <- readLines(connection) close(connection) c(nchar(htmlCode[10]), nchar(htmlCode[20]), nchar(htmlCode[30]), nchar(htmlCode[100])) ## Question 5 library(XML) connection <- url("https://d396qusza40orc.cloudfront.net/getdata%2Fwksst8110.for") htmlCode2 <- readLines(connection) close(connection) DT <- data.table(htmlCode2) url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fwksst8110.for" lines <- readLines(url, n=10) w <- c(1, 9, 5, 4, 1, 3, 5, 4, 1, 3, 5, 4, 1, 3, 5, 4, 1, 3) colNames <- c("filler", "week", "filler", "sstNino12", "filler", "sstaNino12", "filler", "sstNino3", "filler", "sstaNino3", "filler", "sstNino34", "filler", "sstaNino34", "filler", "sstNino4", "filler", "sstaNino4") d <- read.fwf(url, w, header=FALSE, skip=4, col.names=colNames) d <- d[, grep("^[^filler]", names(d))] sum(d[, 4])
/Quiz_2.R
no_license
nasim751/GetAndCleanData
R
false
false
2,672
r
### Quiz 2 setwd("C:/Users/jeffthatcher/Cloud Drive/RRepos/GetCleanData") ## Question 1 library(httr) require(httpuv) require(jsonlite) # 1. Find OAuth settings for github: # http://developer.github.com/v3/oauth/ oauth_endpoints("github") # 2. Register an application at https://github.com/settings/applications # Insert your values below - if secret is omitted, it will look it up in # the GITHUB_CONSUMER_SECRET environmental variable. # # Use http://localhost:1410 as the callback url myapp <- oauth_app("quiz2", "663754c307533a825c19", secret="8795f9a4fe93b512520f407a027b1fc1c66e96bf") # 3. Get OAuth credentials github_token <- oauth2.0_token(oauth_endpoints("github"), myapp) # 4. Use API req <- GET("https://api.github.com/users/jtleek/repos", config(token = github_token)) stop_for_status(req) output <- content(req) list(output[[4]]$name, output[[4]]$created_at) ## Question 2 setwd("C:/Users/jeffthatcher/Cloud Drive/RRepos/GetCleanData") if(!file.exists("quizData")) { dir.create("quizData") } fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Fss06pid.csv" download.file(fileUrl, destfile = "./quizData/acs.csv") #curl is necessary for MAC users getting data from https list.files("./data") # sort of like the ls() command, shows the files in the directory "./data" dateDownloaded <- date() dateDownloaded options(sqldf.driver = "SQLite") # as per FAQ #7 force SQLite options(gsubfn.engine = "R") # as per FAQ #5 use R code rather than tcltk library(RMySQL) library(sqldf) acs <- data.table(read.csv("./quizData/acs.csv")) query1 <- sqldf("select pwgtp1 from acs where AGEP < 50") ## Question 3 DTq <- unique(acs$AGEP) sqlq <- sqldf("select distinct AGEP from acs") x <- DTq == sqlq sum(x) ## Question 4 library(XML) connection <- url("http://biostat.jhsph.edu/~jleek/contact.html") htmlCode <- readLines(connection) close(connection) c(nchar(htmlCode[10]), nchar(htmlCode[20]), nchar(htmlCode[30]), nchar(htmlCode[100])) ## Question 5 library(XML) connection <- url("https://d396qusza40orc.cloudfront.net/getdata%2Fwksst8110.for") htmlCode2 <- readLines(connection) close(connection) DT <- data.table(htmlCode2) url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fwksst8110.for" lines <- readLines(url, n=10) w <- c(1, 9, 5, 4, 1, 3, 5, 4, 1, 3, 5, 4, 1, 3, 5, 4, 1, 3) colNames <- c("filler", "week", "filler", "sstNino12", "filler", "sstaNino12", "filler", "sstNino3", "filler", "sstaNino3", "filler", "sstNino34", "filler", "sstaNino34", "filler", "sstNino4", "filler", "sstaNino4") d <- read.fwf(url, w, header=FALSE, skip=4, col.names=colNames) d <- d[, grep("^[^filler]", names(d))] sum(d[, 4])
#' Trabaja con data de los bancos de Peru. Creado por BEST #' #' @name SBSR #' @docType package NULL #' Base de datos para trabajar data de los indicadores financieros de los bancos locales. #' #' Base de datos para trabajar data de los indicadores financieros de los bancos locales. #' #' @docType data #' @keywords datasets #' @name bancos #' @usage data(bancos) #' @format Base de datos para trabajar data de los indicadores financieros de los bancos locales. #' NULL
/R/SBSR.R
permissive
cRistiancec/SBSR
R
false
false
473
r
#' Trabaja con data de los bancos de Peru. Creado por BEST #' #' @name SBSR #' @docType package NULL #' Base de datos para trabajar data de los indicadores financieros de los bancos locales. #' #' Base de datos para trabajar data de los indicadores financieros de los bancos locales. #' #' @docType data #' @keywords datasets #' @name bancos #' @usage data(bancos) #' @format Base de datos para trabajar data de los indicadores financieros de los bancos locales. #' NULL
#' Add Correlates of War direct contiguity information to a dyad-year or state-year data frame #' #' @description \code{add_contiguity()} allows you to add Correlates of War contiguity #' data to a dyad-year or state-year data frame. #' #' @return \code{add_contiguity()} takes a dyad-year data frame and adds information #' about the contiguity relationship based on the "master records" for the #' Correlates of War direct contiguity data (v. 3.2). If the data are dyad-year, the function #' returns the lowest contiguity type observed in the dyad-year (if contiguity is observed at all). #' If the data are state-year, the data return the total number of land and sea borders calculated #' from these master records. #' #' @details The contiguity codes in the dyad-year data range from 1 to 6. 1 = direct land contiguity. 2 = #' separated by 12 miles of water or fewer (a la Stannis Baratheon). 3 = separated by #' 24 miles of water or fewer (but more than 12 miles). 4 = separated by 150 miles #' of water or fewer (but more than 24 miles). 5 = separated by 400 miles of water #' or fewer (but more than 150 miles). 6 = separated by more than 400 miles of water (i.e. #' not contiguous). #' #' For additional clarity, the "master records" produce duplicates for cases when #' the contiguity relationship changed in a given year. This function returns the #' *minimum* contiguity relationship observed in that given year. There should be no #' duplicates in the returned output. #' #' @author Steven V. Miller #' #' @param data a dyad-year data frame (either "directed" or "non-directed") or a state-year data frame #' #' @references Stinnett, Douglas M., Jaroslav Tir, Philip Schafer, Paul F. Diehl, and Charles Gochman #' (2002). "The Correlates of War Project Direct Contiguity Data, Version 3." Conflict #' Management and Peace Science 19 (2):58-66. #' #' @examples #' #' \donttest{ #' # just call `library(tidyverse)` at the top of the your script #' library(magrittr) #' #' cow_ddy %>% add_contiguity() #' #' create_stateyears() %>% add_contiguity() #' } #' #' @importFrom rlang .data #' @importFrom rlang .env add_contiguity <- function(data) { if (length(attributes(data)$ps_data_type) > 0 && attributes(data)$ps_data_type == "dyad_year") { if (!all(i <- c("ccode1", "ccode2") %in% colnames(data))) { stop("add_contiguity() merges on two Correlates of War codes (ccode1, ccode2), which your data don't have right now. Make sure to run create_dyadyears() at the top of the pipe. You'll want the default option, which returns Correlates of War codes.") } else { cow_contdir %>% mutate(styear = as.numeric(str_sub(.data$begin, 1, 4)), endyear = as.numeric(str_sub(.data$end, 1, 4))) %>% rowwise() %>% mutate(year = list(seq(.data$styear, .data$endyear))) %>% unnest(.data$year) %>% select(.data$ccode1, .data$ccode2, .data$conttype, .data$year) %>% group_by(.data$ccode1, .data$ccode2, .data$year) %>% filter(.data$conttype == min(.data$conttype)) %>% ungroup() -> contdir_years data %>% left_join(., contdir_years) %>% mutate(conttype = case_when(is.na(.data$conttype) ~ 0, TRUE ~ .data$conttype)) -> data return(data) } } else if (length(attributes(data)$ps_data_type) > 0 && attributes(data)$ps_data_type == "state_year") { if (!all(i <- c("ccode") %in% colnames(data))) { stop("add_contiguity() merges on the Correlates of War code (ccode), which your data don't have right now. Make sure to run create_stateyears() at the top of the pipe. You'll want the default option, which returns Correlates of War codes.") } else { cow_contdir %>% mutate(styear = as.numeric(str_sub(.data$begin, 1, 4)), endyear = as.numeric(str_sub(.data$end, 1, 4))) %>% rowwise() %>% mutate(year = list(seq(.data$styear, .data$endyear))) %>% unnest(.data$year) %>% select(.data$ccode1, .data$ccode2, .data$conttype, .data$year) %>% mutate(land = ifelse(.data$conttype == 1, 1, 0), sea = ifelse(.data$conttype > 1, 1, 0)) %>% group_by(.data$ccode1, .data$year) %>% summarize(land = sum(.data$land), sea = sum(.data$sea)) %>% rename(ccode = .data$ccode1) %>% left_join(data, .) %>% mutate_at(vars("land","sea"), ~ifelse(is.na(.), 0, .)) -> data return(data) } } else { stop("add_contiguity() requires a data/tibble with attributes$ps_data_type of state_year or dyad_year. Try running create_dyadyears() or create_stateyears() at the start of the pipe.") } }
/R/add_contiguity.R
no_license
Louis8102/peacesciencer
R
false
false
4,620
r
#' Add Correlates of War direct contiguity information to a dyad-year or state-year data frame #' #' @description \code{add_contiguity()} allows you to add Correlates of War contiguity #' data to a dyad-year or state-year data frame. #' #' @return \code{add_contiguity()} takes a dyad-year data frame and adds information #' about the contiguity relationship based on the "master records" for the #' Correlates of War direct contiguity data (v. 3.2). If the data are dyad-year, the function #' returns the lowest contiguity type observed in the dyad-year (if contiguity is observed at all). #' If the data are state-year, the data return the total number of land and sea borders calculated #' from these master records. #' #' @details The contiguity codes in the dyad-year data range from 1 to 6. 1 = direct land contiguity. 2 = #' separated by 12 miles of water or fewer (a la Stannis Baratheon). 3 = separated by #' 24 miles of water or fewer (but more than 12 miles). 4 = separated by 150 miles #' of water or fewer (but more than 24 miles). 5 = separated by 400 miles of water #' or fewer (but more than 150 miles). 6 = separated by more than 400 miles of water (i.e. #' not contiguous). #' #' For additional clarity, the "master records" produce duplicates for cases when #' the contiguity relationship changed in a given year. This function returns the #' *minimum* contiguity relationship observed in that given year. There should be no #' duplicates in the returned output. #' #' @author Steven V. Miller #' #' @param data a dyad-year data frame (either "directed" or "non-directed") or a state-year data frame #' #' @references Stinnett, Douglas M., Jaroslav Tir, Philip Schafer, Paul F. Diehl, and Charles Gochman #' (2002). "The Correlates of War Project Direct Contiguity Data, Version 3." Conflict #' Management and Peace Science 19 (2):58-66. #' #' @examples #' #' \donttest{ #' # just call `library(tidyverse)` at the top of the your script #' library(magrittr) #' #' cow_ddy %>% add_contiguity() #' #' create_stateyears() %>% add_contiguity() #' } #' #' @importFrom rlang .data #' @importFrom rlang .env add_contiguity <- function(data) { if (length(attributes(data)$ps_data_type) > 0 && attributes(data)$ps_data_type == "dyad_year") { if (!all(i <- c("ccode1", "ccode2") %in% colnames(data))) { stop("add_contiguity() merges on two Correlates of War codes (ccode1, ccode2), which your data don't have right now. Make sure to run create_dyadyears() at the top of the pipe. You'll want the default option, which returns Correlates of War codes.") } else { cow_contdir %>% mutate(styear = as.numeric(str_sub(.data$begin, 1, 4)), endyear = as.numeric(str_sub(.data$end, 1, 4))) %>% rowwise() %>% mutate(year = list(seq(.data$styear, .data$endyear))) %>% unnest(.data$year) %>% select(.data$ccode1, .data$ccode2, .data$conttype, .data$year) %>% group_by(.data$ccode1, .data$ccode2, .data$year) %>% filter(.data$conttype == min(.data$conttype)) %>% ungroup() -> contdir_years data %>% left_join(., contdir_years) %>% mutate(conttype = case_when(is.na(.data$conttype) ~ 0, TRUE ~ .data$conttype)) -> data return(data) } } else if (length(attributes(data)$ps_data_type) > 0 && attributes(data)$ps_data_type == "state_year") { if (!all(i <- c("ccode") %in% colnames(data))) { stop("add_contiguity() merges on the Correlates of War code (ccode), which your data don't have right now. Make sure to run create_stateyears() at the top of the pipe. You'll want the default option, which returns Correlates of War codes.") } else { cow_contdir %>% mutate(styear = as.numeric(str_sub(.data$begin, 1, 4)), endyear = as.numeric(str_sub(.data$end, 1, 4))) %>% rowwise() %>% mutate(year = list(seq(.data$styear, .data$endyear))) %>% unnest(.data$year) %>% select(.data$ccode1, .data$ccode2, .data$conttype, .data$year) %>% mutate(land = ifelse(.data$conttype == 1, 1, 0), sea = ifelse(.data$conttype > 1, 1, 0)) %>% group_by(.data$ccode1, .data$year) %>% summarize(land = sum(.data$land), sea = sum(.data$sea)) %>% rename(ccode = .data$ccode1) %>% left_join(data, .) %>% mutate_at(vars("land","sea"), ~ifelse(is.na(.), 0, .)) -> data return(data) } } else { stop("add_contiguity() requires a data/tibble with attributes$ps_data_type of state_year or dyad_year. Try running create_dyadyears() or create_stateyears() at the start of the pipe.") } }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/model2netcdf.JULES.R \name{model2netcdf.JULES} \alias{model2netcdf.JULES} \title{Code to convert JULES output into netCDF format} \usage{ model2netcdf.JULES(outdir) } \arguments{ \item{outdir}{Location of model output} } \description{ Convert MODEL output into the PEcAn standar } \author{ Michael Dietze }
/models/jules/man/model2netcdf.JULES.Rd
permissive
Viskari/pecan
R
false
true
386
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/model2netcdf.JULES.R \name{model2netcdf.JULES} \alias{model2netcdf.JULES} \title{Code to convert JULES output into netCDF format} \usage{ model2netcdf.JULES(outdir) } \arguments{ \item{outdir}{Location of model output} } \description{ Convert MODEL output into the PEcAn standar } \author{ Michael Dietze }
# This is for Coursera - Week 3 - Course Project # Assignment to get and clean data collected on 30 subjects doing 6 different activities # Of those 30, 70% were selected for training (the "train" files) and 30% as test (the "test" files) # Refer to the README.txt for further explanation. # First part - Reading all files into R #reading the test data xtest <- read.table("test/X_test.txt") ytest <- read.table("test/y_test.txt") subjecttest <- read.table("test/subject_test.txt") #reading the train data xtrain <- read.table("train/X_train.txt") ytrain <- read.table("train/y_train.txt") subjecttrain <- read.table("train/subject_train.txt") #reading identifiers for later use columnnames <- read.table("features.txt") ## This will be used to rename the column names activities <- read.table("activity_labels.txt") ## This will be used to rename the Activity from numbers #bind the datasets together xdata <- rbind(xtest, xtrain) ydata <- rbind(ytest, ytrain) subjectdata <- rbind(subjecttest, subjecttrain) # Second part - extract only mean and std from datasets #look for the words mean() or std() in columnnames meanstd <- grep("-(mean|std)\\(\\)", columnnames$V2) #subset the dataset by mean and std xdata <- xdata[,meanstd] # Third part - Use descriptive names for activities #rename the activity number with associated activity description ydata[,1] <- activities[ydata[,1],2] # Fourth part - Label the variable names appropriately #rename ydata and subjectdata with single test - rename xdata with columnnames names(subjectdata) <- "subject" names(ydata) <- "activity" names(xdata) <- columnnames[meanstd, 2] # Put them all together nicely :) finaloutput1 <- cbind(subjectdata, ydata, xdata)
/run_analysis.R
no_license
problemsny/Coursera-Week3-CourseProject
R
false
false
1,749
r
# This is for Coursera - Week 3 - Course Project # Assignment to get and clean data collected on 30 subjects doing 6 different activities # Of those 30, 70% were selected for training (the "train" files) and 30% as test (the "test" files) # Refer to the README.txt for further explanation. # First part - Reading all files into R #reading the test data xtest <- read.table("test/X_test.txt") ytest <- read.table("test/y_test.txt") subjecttest <- read.table("test/subject_test.txt") #reading the train data xtrain <- read.table("train/X_train.txt") ytrain <- read.table("train/y_train.txt") subjecttrain <- read.table("train/subject_train.txt") #reading identifiers for later use columnnames <- read.table("features.txt") ## This will be used to rename the column names activities <- read.table("activity_labels.txt") ## This will be used to rename the Activity from numbers #bind the datasets together xdata <- rbind(xtest, xtrain) ydata <- rbind(ytest, ytrain) subjectdata <- rbind(subjecttest, subjecttrain) # Second part - extract only mean and std from datasets #look for the words mean() or std() in columnnames meanstd <- grep("-(mean|std)\\(\\)", columnnames$V2) #subset the dataset by mean and std xdata <- xdata[,meanstd] # Third part - Use descriptive names for activities #rename the activity number with associated activity description ydata[,1] <- activities[ydata[,1],2] # Fourth part - Label the variable names appropriately #rename ydata and subjectdata with single test - rename xdata with columnnames names(subjectdata) <- "subject" names(ydata) <- "activity" names(xdata) <- columnnames[meanstd, 2] # Put them all together nicely :) finaloutput1 <- cbind(subjectdata, ydata, xdata)
skip_on_cran() test_that("tbl_summary", { expect_error(tbl_summary(trial) %>% as_flex_table(), NA) expect_warning(tbl_summary(trial) %>% as_flex_table(), NA) expect_error( tbl_summary(trial[c("trt", "age")]) %>% modify_table_styling(columns = label, footnote = "test footnote", rows = variable == "age") %>% as_flex_table(), NA ) }) test_that("tbl_summary", { expect_error(tbl_summary(trial) %>% as_flex_table(return_calls = TRUE), NA) expect_warning(tbl_summary(trial) %>% as_flex_table(return_calls = TRUE), NA) }) test_that("tbl_regression", { expect_error(lm(marker ~ age, trial) %>% tbl_regression() %>% as_flex_table(), NA) expect_warning(lm(marker ~ age, trial) %>% tbl_regression() %>% as_flex_table(), NA) }) test_that("tbl_uvregression", { expect_error(trial %>% tbl_uvregression(method = lm, y = age) %>% as_flex_table(), NA) expect_warning(trial %>% tbl_uvregression(method = lm, y = age) %>% as_flex_table(), NA) }) test_that("tbl_survfit", { library(survival) fit1 <- survfit(Surv(ttdeath, death) ~ trt, trial) expect_error(tbl_survfit(fit1, times = c(12, 24), label = "{time} Months") %>% as_flex_table(), NA) expect_warning(tbl_survfit(fit1, times = c(12, 24), label = "{time} Months") %>% as_flex_table(), NA) }) test_that("tbl_merge/tbl_stack", { t1 <- glm(response ~ trt + grade + age, trial, family = binomial) %>% tbl_regression(exponentiate = TRUE) t2 <- coxph(Surv(ttdeath, death) ~ trt + grade + age, trial) %>% tbl_regression(exponentiate = TRUE) tbl_merge_ex1 <- tbl_merge( tbls = list(t1, t2), tab_spanner = c("**Tumor Response**", "**Time to Death**") ) expect_error(as_flex_table(tbl_merge_ex1), NA) expect_warning(as_flex_table(tbl_merge_ex1), NA) tbl_stack_ex1 <- tbl_stack( tbls = list(t1, t2), group_header = c("**Tumor Response**", "**Time to Death**") ) expect_error(as_flex_table(tbl_merge_ex1), NA) expect_warning(as_flex_table(tbl_merge_ex1), NA) })
/tests/testthat/test-as_flex_table.R
permissive
mtysar/gtsummary
R
false
false
2,021
r
skip_on_cran() test_that("tbl_summary", { expect_error(tbl_summary(trial) %>% as_flex_table(), NA) expect_warning(tbl_summary(trial) %>% as_flex_table(), NA) expect_error( tbl_summary(trial[c("trt", "age")]) %>% modify_table_styling(columns = label, footnote = "test footnote", rows = variable == "age") %>% as_flex_table(), NA ) }) test_that("tbl_summary", { expect_error(tbl_summary(trial) %>% as_flex_table(return_calls = TRUE), NA) expect_warning(tbl_summary(trial) %>% as_flex_table(return_calls = TRUE), NA) }) test_that("tbl_regression", { expect_error(lm(marker ~ age, trial) %>% tbl_regression() %>% as_flex_table(), NA) expect_warning(lm(marker ~ age, trial) %>% tbl_regression() %>% as_flex_table(), NA) }) test_that("tbl_uvregression", { expect_error(trial %>% tbl_uvregression(method = lm, y = age) %>% as_flex_table(), NA) expect_warning(trial %>% tbl_uvregression(method = lm, y = age) %>% as_flex_table(), NA) }) test_that("tbl_survfit", { library(survival) fit1 <- survfit(Surv(ttdeath, death) ~ trt, trial) expect_error(tbl_survfit(fit1, times = c(12, 24), label = "{time} Months") %>% as_flex_table(), NA) expect_warning(tbl_survfit(fit1, times = c(12, 24), label = "{time} Months") %>% as_flex_table(), NA) }) test_that("tbl_merge/tbl_stack", { t1 <- glm(response ~ trt + grade + age, trial, family = binomial) %>% tbl_regression(exponentiate = TRUE) t2 <- coxph(Surv(ttdeath, death) ~ trt + grade + age, trial) %>% tbl_regression(exponentiate = TRUE) tbl_merge_ex1 <- tbl_merge( tbls = list(t1, t2), tab_spanner = c("**Tumor Response**", "**Time to Death**") ) expect_error(as_flex_table(tbl_merge_ex1), NA) expect_warning(as_flex_table(tbl_merge_ex1), NA) tbl_stack_ex1 <- tbl_stack( tbls = list(t1, t2), group_header = c("**Tumor Response**", "**Time to Death**") ) expect_error(as_flex_table(tbl_merge_ex1), NA) expect_warning(as_flex_table(tbl_merge_ex1), NA) })
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/formula.R \name{combine_terms} \alias{combine_terms} \title{Combine model term sub-matrices} \usage{ combine_terms(x) } \description{ Combine model term sub-matrices }
/man/combine_terms.Rd
no_license
sakrejda/hierarchy
R
false
true
246
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/formula.R \name{combine_terms} \alias{combine_terms} \title{Combine model term sub-matrices} \usage{ combine_terms(x) } \description{ Combine model term sub-matrices }
/Data_prep.R
no_license
KasperEinarson/Statistical-analysis-and-deep-learning-methods-for-pattern-exploration-in-pharmacokinetic-profiles
R
false
false
12,846
r
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/servicemanagement_objects.R \name{ConfigChange} \alias{ConfigChange} \title{ConfigChange Object} \usage{ ConfigChange(newValue = NULL, changeType = NULL, element = NULL, oldValue = NULL, advices = NULL) } \arguments{ \item{newValue}{Value of the changed object in the new Service configuration,} \item{changeType}{The type for this change, either ADDED, REMOVED, or MODIFIED} \item{element}{Object hierarchy path to the change, with levels separated by a '} \item{oldValue}{Value of the changed object in the old Service configuration,} \item{advices}{Collection of advice provided for this change, useful for determining the} } \value{ ConfigChange object } \description{ ConfigChange Object } \details{ Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}} Output generated from semantically comparing two versions of a serviceconfiguration.Includes detailed information about a field that have changed withapplicable advice about potential consequences for the change, such asbackwards-incompatibility. }
/googleservicemanagementv1.auto/man/ConfigChange.Rd
permissive
GVersteeg/autoGoogleAPI
R
false
true
1,106
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/servicemanagement_objects.R \name{ConfigChange} \alias{ConfigChange} \title{ConfigChange Object} \usage{ ConfigChange(newValue = NULL, changeType = NULL, element = NULL, oldValue = NULL, advices = NULL) } \arguments{ \item{newValue}{Value of the changed object in the new Service configuration,} \item{changeType}{The type for this change, either ADDED, REMOVED, or MODIFIED} \item{element}{Object hierarchy path to the change, with levels separated by a '} \item{oldValue}{Value of the changed object in the old Service configuration,} \item{advices}{Collection of advice provided for this change, useful for determining the} } \value{ ConfigChange object } \description{ ConfigChange Object } \details{ Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}} Output generated from semantically comparing two versions of a serviceconfiguration.Includes detailed information about a field that have changed withapplicable advice about potential consequences for the change, such asbackwards-incompatibility. }
#' ############################################################################# #' #' Serial Tempering Diagnostics #' #' Note: First, load the RData generated by serial tempering runs #' #' See also: #' lda_fgs_st_hs_synth_h25.R --- uses a synthetic dataset #' lda_fgs_st_hs_synth_h22.R --- uses a synthetic dataset #' lda_fgs_st_hs_C1.R --- uses a real dataset #' #' ############################################################################# library(ldamcmc); library(mcmcse); setwd(data.dir); ############################ Visualize \hat{M} and \tilde{M} ################### plot_meshgrid(model$m.hat, x.axis2, y.axis2, "alpha", "eta", "Estimate of m(h)"); plot_meshgrid(model$m.tilde, x.axis2, y.axis2, "alpha", "eta", "Estimate of m(h)"); ############################ top.n \hat{M} values ################### top.n <- 10 si <- sort(model$m.hat, decreasing=T, index.return=T) # "x" "ix" msv <- rbind(h.grid[,si$ix[1:top.n]], model$m.hat[si$ix[1:top.n]]) rownames(msv) <- c("alpha", "eta", "B(h)-est") print(msv, digits=3) ############################ top.n \tilde{M} values ################### top.n <- 10 si <- sort(model$m.tilde, decreasing=T, index.return=T) # "x" "ix" msv <- rbind(h.grid[,si$ix[1:top.n]], model$m.tilde[si$ix[1:top.n]]) rownames(msv) <- c("alpha", "eta", "B(h)-est") print(msv, digits=3) ############################ Euclidean distance from \hat{\hat{h}} to the truth # this only works with synthetic data sqrt(sum((h.grid[,si$ix[1]] - c(gen.alpha, gen.eta))^2)) ############################ Subgrid Occupancies ########################### m <- 1e+2; occu.fn <- paste(fn.prefix, "-itr", tuning.iter, "-occu", sep = "") plot_meshgrid(model$st.grid.occupancies[, tuning.iter]/m, x.axis, y.axis, "\nalpha", "\neta", "\nOccupancies", "", occu.fn, "antiquewhite"); ############################ Subgrid Occupancies for an Iteration ######### titer <- 2 plot_meshgrid(model$st.grid.zetas[, titer], x.axis, y.axis, "alpha", "eta", "zetas") si <- sort(model$st.grid.zetas[, titer], decreasing=T, index.return=T) # "x" "ix" sv <- rbind(st.grid[,si$ix[1:20]], model$st.grid.zetas[si$ix[1:20], titer]) rownames(sv) <- c("alpha", "eta", "hat{B}(h)") print(sv, digits=3) # print best B(h) values and h values hist(model$st.grid.occupancies[,titer], breaks=15) model$st.grid.occupancies[gen.st.grid.index,titer] # occupancy for the true h model$st.grid.occupancies
/demo/lda_fgs_st_hs_diagnostics.R
permissive
clintpgeorge/ldamcmc
R
false
false
2,490
r
#' ############################################################################# #' #' Serial Tempering Diagnostics #' #' Note: First, load the RData generated by serial tempering runs #' #' See also: #' lda_fgs_st_hs_synth_h25.R --- uses a synthetic dataset #' lda_fgs_st_hs_synth_h22.R --- uses a synthetic dataset #' lda_fgs_st_hs_C1.R --- uses a real dataset #' #' ############################################################################# library(ldamcmc); library(mcmcse); setwd(data.dir); ############################ Visualize \hat{M} and \tilde{M} ################### plot_meshgrid(model$m.hat, x.axis2, y.axis2, "alpha", "eta", "Estimate of m(h)"); plot_meshgrid(model$m.tilde, x.axis2, y.axis2, "alpha", "eta", "Estimate of m(h)"); ############################ top.n \hat{M} values ################### top.n <- 10 si <- sort(model$m.hat, decreasing=T, index.return=T) # "x" "ix" msv <- rbind(h.grid[,si$ix[1:top.n]], model$m.hat[si$ix[1:top.n]]) rownames(msv) <- c("alpha", "eta", "B(h)-est") print(msv, digits=3) ############################ top.n \tilde{M} values ################### top.n <- 10 si <- sort(model$m.tilde, decreasing=T, index.return=T) # "x" "ix" msv <- rbind(h.grid[,si$ix[1:top.n]], model$m.tilde[si$ix[1:top.n]]) rownames(msv) <- c("alpha", "eta", "B(h)-est") print(msv, digits=3) ############################ Euclidean distance from \hat{\hat{h}} to the truth # this only works with synthetic data sqrt(sum((h.grid[,si$ix[1]] - c(gen.alpha, gen.eta))^2)) ############################ Subgrid Occupancies ########################### m <- 1e+2; occu.fn <- paste(fn.prefix, "-itr", tuning.iter, "-occu", sep = "") plot_meshgrid(model$st.grid.occupancies[, tuning.iter]/m, x.axis, y.axis, "\nalpha", "\neta", "\nOccupancies", "", occu.fn, "antiquewhite"); ############################ Subgrid Occupancies for an Iteration ######### titer <- 2 plot_meshgrid(model$st.grid.zetas[, titer], x.axis, y.axis, "alpha", "eta", "zetas") si <- sort(model$st.grid.zetas[, titer], decreasing=T, index.return=T) # "x" "ix" sv <- rbind(st.grid[,si$ix[1:20]], model$st.grid.zetas[si$ix[1:20], titer]) rownames(sv) <- c("alpha", "eta", "hat{B}(h)") print(sv, digits=3) # print best B(h) values and h values hist(model$st.grid.occupancies[,titer], breaks=15) model$st.grid.occupancies[gen.st.grid.index,titer] # occupancy for the true h model$st.grid.occupancies
######################################################################################### # Prepared for Gabor's Data Analysis # # Data Analysis for Business, Economics, and Policy # by Gabor Bekes and Gabor Kezdi # Cambridge University Press 2021 # # gabors-data-analysis.com # # License: Free to share, modify and use for educational purposes. # Not to be used for commercial purposes. # CHAPTER 18 # CH18B Forecasting a home price index # case-schiller-la dataset # version 0.91 2020-01-08 ######################################################################################### ########### # # Clear memory rm(list=ls()) # Descriptive statistics and regressions library(tidyverse) library(fpp3) library(cowplot) # set data dir, data used source("set-data-directory.R") # data_dir must be first defined # option A: open material as project # option B: set working directory for da_case_studies # example: setwd("C:/Users/bekes.gabor/Documents/github/da_case_studies/") # load theme and functions source("ch00-tech-prep/theme_bg.R") source("ch00-tech-prep/da_helper_functions.R") data_in <- paste(data_dir,"case-shiller-la","clean/", sep = "/") use_case_dir <- "ch18-case-shiller-la/" data_out <- use_case_dir output <- paste0(use_case_dir,"output/") create_output_if_doesnt_exist(output) ############################# # RMSE functions ############################# get_RMSE_from_model <- function(m, resid_col_name = ".resid", groupby = c(".id", ".model")){ m %>% residuals() %>% as_tibble() %>% group_by_at(groupby) %>% summarise(RMSE = mean(get(resid_col_name)**2, na.rm = TRUE)**(1/2)) } get_MSE_from_forecast <- function(forecast, groupby = c(".id", ".model")){ forecast %>% as_tibble() %>% group_by_at(groupby) %>% summarise(MSE = mean(e^2)) %>% ungroup() } ############################# # DATA PREP ############################# #load raw data data <- read_csv(paste0(data_in,"homeprices-data-2000-2018.csv")) # 18 years data # 1 year holdout # 4 years of test # 13 years of train (rolling window) #data <- data %>% mutate(date = yearmonth(date)) # pick if seasonal or non seasonal version used, will be cut later # here we pick pn, not seasonally adjusted data <- data %>% mutate(date = yearmonth(date)) data <- data %>% mutate( p=pn, u=us, emp=emps ) data <- data %>% mutate( dp = difference(p, lag=1, order_by = date), p_lag = lag(p), lnp = log(p), dlnp = difference(lnp, lag=1, order_by = date), lnp_lag = lag(lnp), dlnp_lag = lag(dlnp), du = difference(u, lag=1, order_by = date), lnemp = log(emp), dlnemp = difference(lnemp, lag=1, order_by = date) ) %>% mutate( trend = 1:nrow(data), month = as.factor(month(date)) ) data <- data %>% as_tsibble(index=date) # now save the workfile with data from 2000 through 2018 data %>% write_rds(paste(data_in,"case-shiller-workfile-2000-2018.rds",sep="")) # and now create and save the workfile with data from 2000 through 2017 data <- data %>% filter(year <= 2017) data %>% write_rds(paste(data_in,"case-shiller-workfile-2000-2017.rds",sep="")) ############################# # EXPLORE ############################# data <- read_rds(paste(data_in,"case-shiller-workfile-2000-2017.rds",sep="")) # Last year of data data_holdout <- data %>% slice((n()-11):n()) # Rest of data for work set data_work <- data %>% slice(1:(n()-12)) # Prepare for cross-validation, define size of train train_length=13 data_tr <- data_work %>% slice(1:(n()-12)) %>% # last year of training data not used in any fold as training slide_tsibble(.size = train_length*12, .step = 12) data_cv_test <- data_work %>% slice(train_length*12+1:n()) %>% slide_tsibble(.size = 12, .step = 12) %>% select(trend, month) ############################# # GRAPH 18.8 # Plot price index price_index_plot <- ggplot(data = data, aes(x = as.Date(date), y = p))+ geom_line_da() + ylab("Case-shiller Price index") + xlab("Date (month)") + scale_y_continuous(limits = c(50,300), breaks = seq(50,300,50)) + scale_x_date(expand = c(0.01, 0.01), breaks = as.Date(c("2000-01-01", "2003-01-01", "2006-01-01", "2009-01-01", "2012-01-01", "2015-01-01", "2018-01-01")), labels = date_format("%b%Y")) + theme_bg() price_index_plot #save_fig("cs_tseries_p_R", output, "small") save_fig("ch18-figure-8-cs-tseries-p", output, "small") # additional graphs, not in textbook # Plot log difference of price index dp_plot <- ggplot(data = data, aes(x = as.Date(date), y = dp))+ geom_line_da() + ylab("First difference of price index") + xlab("Date (month)") + scale_y_continuous(limits = c(-10,8), breaks = seq(-10,8,2)) + scale_x_date(expand = c(0.01, 0.01), breaks = as.Date(c("2000-01-01", "2003-01-01", "2006-01-01", "2009-01-01", "2012-01-01", "2015-01-01", "2018-01-01")), labels = date_format("%b%Y")) + theme_bg() dp_plot # Plot log difference of price index dlnp_plot <- ggplot(data = data, aes(x = as.Date(date), y = dlnp))+ geom_line_da() + ylab("Log first difference of price index") + xlab("Date (month)") + scale_y_continuous(limits = c(-0.04,0.04), breaks = seq(-0.04,0.04,0.01)) + scale_x_date(date_breaks="2 years", labels = date_format("%b%Y")) + theme_bg() dlnp_plot ############################# # GRAPH 18.10 ############################# # Plot employment emp_plot<-ggplot(data = data, aes(x = as.Date(date), y = emp))+ geom_line_da() + ylab("Employment (in thousands)") + xlab("Date (month)") + # scale_y_continuous(limits = c(10000,18000), breaks = seq(10000,18000,2000)) + scale_x_date(expand = c(0.01, 0.01), breaks = as.Date(c("2000-01-01", "2003-01-01", "2006-01-01", "2009-01-01", "2012-01-01", "2015-01-01", "2018-01-01")), labels = date_format("%b%Y")) + theme_bg() emp_plot save_fig("ch18-figure-10c-cs-tseries-emp", output, "small") # Plot log diff employment ldemp_plot<- ggplot(data = data, aes(x = as.Date(date), y = dlnemp))+ geom_line_da() + ylab("Log change in employment") + xlab("Date (month)") + scale_x_date(expand = c(0.01, 0.01), breaks = as.Date(c("2000-01-01", "2003-01-01", "2006-01-01", "2009-01-01", "2012-01-01", "2015-01-01", "2018-01-01")), labels = date_format("%b%Y")) + theme_bg() ldemp_plot save_fig("ch18-figure-10d-cs-tseries-dlnemp", output, "small") # Plot unemployment rate u_plot<-ggplot(data = data, aes(x = as.Date(date), y = u))+ geom_line_da() + ylab("Unemployment rate (percent)") + xlab("Date (month)") + # scale_y_continuous(limits = c(10000,18000), breaks = seq(10000,18000,2000)) + scale_x_date(expand = c(0.01, 0.01), breaks = as.Date(c("2000-01-01", "2003-01-01", "2006-01-01", "2009-01-01", "2012-01-01", "2015-01-01", "2018-01-01")), labels = date_format("%b%Y")) + theme_bg() u_plot save_fig("ch18-figure-10a-cs-tseries-u", output, "small") # Plot diff unemployment du_plot<- ggplot(data = data, aes(x = as.Date(date), y = du))+ geom_line_da() + ylab("Change in unemployment rate") + xlab("Date (month)") + scale_x_date(expand = c(0.01, 0.01), breaks = as.Date(c("2000-01-01", "2003-01-01", "2006-01-01", "2009-01-01", "2012-01-01", "2015-01-01", "2018-01-01")), labels = date_format("%b%Y")) + theme_bg() du_plot save_fig("ch18-figure-10b-cs-tseries-du", output, "small") ########################################################## # Create work set and houldout set ########################################################## # Last year of data data_holdout <- data %>% slice((n()-11):n()) # Rest of data for work set data_work <- data %>% slice(1:(n()-12)) # Prepare for cross-validation, define size of train train_legth=13 data_tr <- data_work %>% slice(1:(n()-12)) %>% # last year of training data not used in any fold as training slide_tsibble(.size = train_legth*12, .step = 12) data_cv_test <- data_work %>% slice(train_legth*12+1:n()) %>% slide_tsibble(.size = 12, .step = 12) %>% select(trend, month) ############################################# # Use tseries of price index only # Fit models with months, trend, ARIMA ############################################# # To cross-validate auto.arima, # step 1: run it and find ARIMA specification on the whole train data, p,q chosen by BIC # note, need to add PDQ(0,0,0) to models # in order to shut down the fancy seasonality-fitting part of auto ARIMA # step 2: use the selected model as a candidate # M1 p ~ month + trend, without any ARIMA m1_formula <- "p ~ month + trend" m1 <- TSLM(as.formula(m1_formula)) # M2 p ~ auto ARIMA m2_pre <- data_work %>% model(auto_arima = ARIMA(p ~ PDQ(0,0,0))) p2_auto <- m2_pre$auto_arima[[1]]$fit$spec$p q2_auto <- m2_pre$auto_arima[[1]]$fit$spec$q d2_auto <- m2_pre$auto_arima[[1]]$fit$spec$d m2_formula <- paste0("p ~ pdq(",paste(p2_auto,d2_auto,q2_auto, sep=","),") + PDQ(0,0,0)") m2 <- ARIMA(as.formula(m2_formula)) # M3 p ~ auto ARIMA + month m3_pre <- data_work %>% model(auto_arima = ARIMA(p ~ month+ PDQ(0,0,0))) p3_auto <- m3_pre$auto_arima[[1]]$fit$spec$p q3_auto <- m3_pre$auto_arima[[1]]$fit$spec$q d3_auto <- m3_pre$auto_arima[[1]]$fit$spec$d m3_formula <- paste0("p ~ pdq(",paste(p3_auto,d3_auto,q3_auto, sep=","),") + PDQ(0,0,0) + month") m3 <- ARIMA(as.formula(m3_formula)) # M4 p ~ auto ARIMA + month + trend m4_pre <- data_work %>% model(auto_arima = ARIMA(p ~ month + trend + PDQ(0,0,0))) p4_auto <- m4_pre$auto_arima[[1]]$fit$spec$p q4_auto <- m4_pre$auto_arima[[1]]$fit$spec$q d4_auto <- m4_pre$auto_arima[[1]]$fit$spec$d m4_formula <- paste0("p ~ pdq(",paste(p4_auto,d4_auto,q4_auto, sep=","),") + PDQ(0,0,0) + month + trend") m4 <- ARIMA(as.formula(m4_formula)) # M5 dp ~ month + trend, without any ARIMA m5_formula <- "dp ~ month + trend" m5 <- TSLM(as.formula(m5_formula)) # M6 lnp ~ auto ARIMA + month m6_pre <- data_work %>% model(auto_arima = ARIMA(lnp ~ month + PDQ(0,0,0))) p6_auto <- m6_pre$auto_arima[[1]]$fit$spec$p q6_auto <- m6_pre$auto_arima[[1]]$fit$spec$q d6_auto <- m6_pre$auto_arima[[1]]$fit$spec$d m6_formula <- paste0("lnp ~ month + pdq(",paste(p6_auto,d6_auto,q6_auto, sep=","),") + PDQ(0,0,0)") m6 <- ARIMA(as.formula(m6_formula)) ########################################### # create forecasts and cross-validate # cross-validating M1-M4 with p on left-hand-side models_1_4 <- data_tr %>% model(m1 = m1, m2 = m2, m3 = m3, m4 = m4 ) rmse_train_1_4 <- models_1_4 %>% get_RMSE_from_model() forecast_1_4 <- models_1_4 %>% forecast(new_data = data_cv_test) %>% as_tsibble() %>% dplyr::rename(p_pred = .mean) %>% select(.id, .model, date, p_pred) %>% left_join(data[,c("date","p")]) %>% group_by(.id, .model) %>% mutate(e = p - p_pred) %>% ungroup() # Compute MSE for folds summary_1_4 <- forecast_1_4 %>% get_MSE_from_forecast() # cross-validating M5 with dp on left-hand-side model_5 <- data_tr %>% model(m5 = m5) rmse_train_dp <- model_5 %>% get_RMSE_from_model() forecast_5 <- model_5 %>% forecast(new_data = data_cv_test) %>% as_tsibble() %>% dplyr::rename(dp_pred = .mean) %>% select(.id, .model, date, dp_pred) %>% left_join(data[,c("date","p","p_lag")]) %>% group_by(.id, .model) %>% mutate(p_pred = cumsum(dp_pred) + p_lag[1]) %>% mutate(e = p - p_pred) %>% ungroup() # Compute MSE for folds summary_5 <- forecast_5 %>% get_MSE_from_forecast() # cross-validating M6 with lnp on left-hand-side model_6 <- data_tr %>% model(m6 = m6) rmse_train_6 <- model_6 %>% get_RMSE_from_model() forecast_6 <- model_6 %>% forecast(new_data = data_cv_test) %>% as_tsibble() %>% dplyr::rename(lnp_pred = .mean) %>% select(.id, .model, date, lnp_pred) %>% left_join(data[,c("date","p")]) %>% left_join(rmse_train_6) %>% group_by(.id, .model) %>% mutate(p_pred = exp(lnp_pred)*exp((RMSE**2)/2) ) %>% mutate(e = p - p_pred) %>% ungroup() # Compute MSE for folds summary_6 <- forecast_6 %>% get_MSE_from_forecast() summary_6 ###################################### # Table 18.2 # average cv RMSE for models 1-6 ###################################### summary_folds <- bind_rows(list(summary_1_4, summary_5, summary_6)) %>% spread(.id, MSE) %>% as.data.frame() colnames(summary_folds) <- c("Model", paste0("Fold ", colnames(summary_folds)[-1])) summary_final <- bind_rows(list(summary_1_4, summary_5, summary_6)) %>% group_by(.model) %>% dplyr::summarise(CV_RMSE = sum(MSE/4)**0.5) %>% as.data.frame() model_formulas <- summary_final %>% dplyr::pull(.model) %>% paste0("_formula") %>% sapply(FUN=get) colnames(summary_final) <- c("Model", "CV RMSE") summary_table_18_2 <- summary_final %>% add_column("Model def" = model_formulas, .before = "CV RMSE") summary_table_18_2 ############################################ # VAR # Comment: In the textbook, Table 18.3 has VAR RMSE values for the model without seasonality. # It’s noted at \url{https://gabors-data-analysis.com/errata/#part-iii} # Without seasonality, we have: RMSE (average) =8.0. With seasonality, we have: RMSE (average) =4.5. # In R we could do not figure out how to add seasonality. Let us know if you solved it... var_formula <- "vars(dp, du, dlnemp) ~ AR(1) " var <- VAR(as.formula(var_formula)) # create forecast and cross-validate var_data <- data_tr %>% filter(!is.na(dp)) %>% # need to exclude first row model(var = var) rmse_train_var <- var_data %>% get_RMSE_from_model(resid_col_name = "dp") forecast_var <- var_data %>% forecast(h=12) %>% as_tsibble() %>% dplyr::rename(dp_pred = .mean_dp) %>% select(.id, .model, date, dp_pred) %>% left_join(data[,c("date","p","p_lag")]) %>% group_by(.id, .model) %>% mutate(p_pred = cumsum(dp_pred) + p_lag[1]) %>% mutate(e = p - p_pred) %>% ungroup() # Compute MSE for folds summary_var <- forecast_var %>% get_MSE_from_forecast() ########################################## # TABLE 18.3 # rmse by folds + cv rmse, for all 7 models ########################################## summary_folds <- bind_rows(list(summary_1_4, summary_5, summary_6, summary_var)) %>% spread(.id, MSE) %>% as.data.frame() colnames(summary_folds) <- c("Model", paste0("Fold ", colnames(summary_folds)[-1])) # Table 18.3 RMSE by folds summary_rmse_folds <- summary_folds %>% mutate_at(vars(-Model), sqrt) summary_rmse_folds # Table 18.3 last column: cv average RMSE # create average MSE across folds and take square root summary_cvavg <- bind_rows(list(summary_1_4, summary_5, summary_6, summary_var)) %>% group_by(.model) %>% dplyr::summarise(CV_RMSE = sum(MSE/4)**0.5) %>% as.data.frame() model_formulas <- summary_cvavg %>% dplyr::pull(.model) %>% paste0("_formula") %>% sapply(FUN=get) colnames(summary_cvavg) <- c("Model", "CV RMSE") summary_table_18_3_lastcol <- summary_cvavg %>% add_column("Model def" = model_formulas, .before = "CV RMSE") summary_table_18_3_lastcol ###########################x # predict for holdout ###########################x conf_level <- 80 conf_level_chr <- paste0(as.character(conf_level),"%") # best model is M4 bestm <- "m4" # re-estimate best models on full work set model_best <- data_work %>% model(best = get(bestm)) rmse_train_best <- model_best %>% get_RMSE_from_model(groupby = c(".model")) forecast_holdout_best <- model_best %>% forecast(new_data = select(data_holdout, trend, month)) %>% hilo(level = c(conf_level)) %>% as_tsibble() %>% rename(p_pred = .mean) %>% select(.model, date, p_pred, conf_level_chr) %>% unpack_hilo(conf_level_chr) %>% left_join(data_holdout[,c("date","p")]) %>% mutate(e = p - p_pred) %>% ungroup() summary_holdout_best <- forecast_holdout_best %>% get_MSE_from_forecast(groupby = c(".model")) summary_holdout_best ############################# # GRAPHS ############################# # graph actual vs prediction from best arima data_plot <- data %>% left_join(forecast_holdout_best) %>% filter(year(date)>=2015) pred_p_plot <- ggplot(data = data_plot , aes(x = as.Date(date), y = p))+ geom_line(size = 0.8, aes(color = "Actual")) + geom_line(aes(x = as.Date(date), y = p_pred, color = "Prediction "), size = 1) + #annotate("text", x = yearmonth("2017-08"), y = 257, label = "Prediction ", size=2.5, vjust = 2, color = color[2])+ #annotate("text", x = yearmonth("2017-03"), y = 258, label = "Actual", size=2.5, hjust = 1.5, color = color[1])+ ylab("Case-Shiller Home Price Index") + xlab("Date (month)") + scale_color_manual(name="",values=c(color[1], color[2])) + scale_x_date(date_breaks="1 years", labels = date_format("%b%Y")) + theme_bg()+ theme(legend.position=c(0.7,0.1), legend.direction = "horizontal", legend.text = element_text(size = 4), legend.key.width = unit(.8, "cm"), legend.key.height = unit(.2, "cm")) + guides(linetype = guide_legend(override.aes = list(size = 0.6))) pred_p_plot save_fig("ch18-figure-9a-pred-p-mp", output, "small") # with uncertainty fan conf_level_lower <- paste0(conf_level_chr, "_lower") conf_level_upper <- paste0(conf_level_chr, "_upper") pred_p_mp_fan_R <- ggplot(data = data_plot , aes(x = as.Date(date), y = p))+ geom_line(size = 0.8, aes(color = "Actual")) + geom_line(aes(x = as.Date(date), y = p_pred, color = "Prediction "), size = 1) + geom_ribbon(aes(ymin = get(conf_level_lower), ymax = get(conf_level_upper)), alpha=0.2, bg=color[2]) + ylab("Case-Shiller Price index") + xlab("Date (month)") + scale_color_manual(name="",values=c(color[1], color[2])) + scale_x_date(date_breaks="1 years", labels = date_format("%b%Y")) + theme_bg()+ theme(legend.position=c(0.7,0.1), legend.direction = "horizontal", legend.text = element_text(size = 4), legend.key.width = unit(.8, "cm"), legend.key.height = unit(.2, "cm")) + guides(linetype = guide_legend(override.aes = list(size = 0.6))) pred_p_mp_fan_R save_fig("ch18-figure-9b-pred-p-mp-fan", output, "small") ########################### # EXTERNAL VALIDITY # do the prediction for an extra year ########################### data <- read_rds(paste(use_case_dir,"case-shiller-workfile-2000-2018.rds",sep="")) # Last year of data data_holdout<- data %>% slice((n()-11):n()) # Rest of data for work set data_work <- data %>% slice(1:(n()-12)) ###########################x # predict for holdout ###########################x # best model is M4 bestm <- "m4" # re-estimate best model on full work set model_best <- data_work %>% model(best = get(bestm)) rmse_train_best <- model_best %>% get_RMSE_from_model(groupby = c(".model")) forecast_holdout_bes <- model_best %>% forecast(new_data = select(data_holdout, trend, month)) %>% as_tsibble() %>% rename(p_pred = .mean) %>% select(.model, date, p_pred) %>% left_join(data_holdout[,c("date","p")]) %>% mutate(e = p - p_pred) %>% ungroup() summary_holdout_best <- forecast_holdout_best %>% get_MSE_from_forecast(groupby = c(".model")) summary_holdout_best ############################# # GRAPH 18.11 # 2015-18, actual vs prediction from best arima bestm <- "m4" # re-estimate best models on full train set model_best <- data_work %>% model(best = get(bestm)) rmse_train_best <- model_best %>% get_RMSE_from_model(groupby = c(".model")) forecast_holdout_best <- model_best %>% forecast(new_data = select(data_holdout, trend, month)) %>% hilo(level = c(conf_level)) %>% as_tsibble() %>% rename(p_pred = .mean) %>% select(.model, date, p_pred, conf_level_chr) %>% unpack_hilo(conf_level_chr) %>% left_join(data_holdout[,c("date","p")]) %>% mutate(e = p - p_pred) %>% ungroup() summary_holdout_best <- forecast_holdout_best %>% get_MSE_from_forecast(groupby = c(".model")) # graph actual vs prediction from best arima data_plot <- data %>% left_join(forecast_holdout_best) %>% filter(year(date)>=2015) # with uncertainty fan pred_p_mp_fan2018_R <- ggplot(data = data_plot , aes(x = as.Date(date), y = p))+ geom_line(size = 0.8, aes(color = "Actual")) + geom_line(aes(x = as.Date(date), y = p_pred, color = "Prediction "), size = 1) + geom_ribbon(aes(ymin = get(conf_level_lower), ymax = get(conf_level_upper)), alpha=0.2, bg=color[2]) + ylab("Case-Shiller Price index") + xlab("Date (month)") + scale_color_manual(name="", values=c(color[1], color[2])) + scale_x_date(date_breaks="1 years", labels = date_format("%b%Y")) + theme_bg()+ theme(legend.position=c(0.7,0.1), legend.direction = "horizontal", legend.text = element_text(size = 4), legend.key.width = unit(.8, "cm"), legend.key.height = unit(.2, "cm")) + guides(linetype = guide_legend(override.aes = list(size = 0.6))) pred_p_mp_fan2018_R save_fig("ch18-figure-11-pred-p-mp-fan2018", output, "small")
/ch18-case-shiller-la/ch18-ts-pred-homeprices.R
no_license
LIKE4986/da_case_studies
R
false
false
20,794
r
######################################################################################### # Prepared for Gabor's Data Analysis # # Data Analysis for Business, Economics, and Policy # by Gabor Bekes and Gabor Kezdi # Cambridge University Press 2021 # # gabors-data-analysis.com # # License: Free to share, modify and use for educational purposes. # Not to be used for commercial purposes. # CHAPTER 18 # CH18B Forecasting a home price index # case-schiller-la dataset # version 0.91 2020-01-08 ######################################################################################### ########### # # Clear memory rm(list=ls()) # Descriptive statistics and regressions library(tidyverse) library(fpp3) library(cowplot) # set data dir, data used source("set-data-directory.R") # data_dir must be first defined # option A: open material as project # option B: set working directory for da_case_studies # example: setwd("C:/Users/bekes.gabor/Documents/github/da_case_studies/") # load theme and functions source("ch00-tech-prep/theme_bg.R") source("ch00-tech-prep/da_helper_functions.R") data_in <- paste(data_dir,"case-shiller-la","clean/", sep = "/") use_case_dir <- "ch18-case-shiller-la/" data_out <- use_case_dir output <- paste0(use_case_dir,"output/") create_output_if_doesnt_exist(output) ############################# # RMSE functions ############################# get_RMSE_from_model <- function(m, resid_col_name = ".resid", groupby = c(".id", ".model")){ m %>% residuals() %>% as_tibble() %>% group_by_at(groupby) %>% summarise(RMSE = mean(get(resid_col_name)**2, na.rm = TRUE)**(1/2)) } get_MSE_from_forecast <- function(forecast, groupby = c(".id", ".model")){ forecast %>% as_tibble() %>% group_by_at(groupby) %>% summarise(MSE = mean(e^2)) %>% ungroup() } ############################# # DATA PREP ############################# #load raw data data <- read_csv(paste0(data_in,"homeprices-data-2000-2018.csv")) # 18 years data # 1 year holdout # 4 years of test # 13 years of train (rolling window) #data <- data %>% mutate(date = yearmonth(date)) # pick if seasonal or non seasonal version used, will be cut later # here we pick pn, not seasonally adjusted data <- data %>% mutate(date = yearmonth(date)) data <- data %>% mutate( p=pn, u=us, emp=emps ) data <- data %>% mutate( dp = difference(p, lag=1, order_by = date), p_lag = lag(p), lnp = log(p), dlnp = difference(lnp, lag=1, order_by = date), lnp_lag = lag(lnp), dlnp_lag = lag(dlnp), du = difference(u, lag=1, order_by = date), lnemp = log(emp), dlnemp = difference(lnemp, lag=1, order_by = date) ) %>% mutate( trend = 1:nrow(data), month = as.factor(month(date)) ) data <- data %>% as_tsibble(index=date) # now save the workfile with data from 2000 through 2018 data %>% write_rds(paste(data_in,"case-shiller-workfile-2000-2018.rds",sep="")) # and now create and save the workfile with data from 2000 through 2017 data <- data %>% filter(year <= 2017) data %>% write_rds(paste(data_in,"case-shiller-workfile-2000-2017.rds",sep="")) ############################# # EXPLORE ############################# data <- read_rds(paste(data_in,"case-shiller-workfile-2000-2017.rds",sep="")) # Last year of data data_holdout <- data %>% slice((n()-11):n()) # Rest of data for work set data_work <- data %>% slice(1:(n()-12)) # Prepare for cross-validation, define size of train train_length=13 data_tr <- data_work %>% slice(1:(n()-12)) %>% # last year of training data not used in any fold as training slide_tsibble(.size = train_length*12, .step = 12) data_cv_test <- data_work %>% slice(train_length*12+1:n()) %>% slide_tsibble(.size = 12, .step = 12) %>% select(trend, month) ############################# # GRAPH 18.8 # Plot price index price_index_plot <- ggplot(data = data, aes(x = as.Date(date), y = p))+ geom_line_da() + ylab("Case-shiller Price index") + xlab("Date (month)") + scale_y_continuous(limits = c(50,300), breaks = seq(50,300,50)) + scale_x_date(expand = c(0.01, 0.01), breaks = as.Date(c("2000-01-01", "2003-01-01", "2006-01-01", "2009-01-01", "2012-01-01", "2015-01-01", "2018-01-01")), labels = date_format("%b%Y")) + theme_bg() price_index_plot #save_fig("cs_tseries_p_R", output, "small") save_fig("ch18-figure-8-cs-tseries-p", output, "small") # additional graphs, not in textbook # Plot log difference of price index dp_plot <- ggplot(data = data, aes(x = as.Date(date), y = dp))+ geom_line_da() + ylab("First difference of price index") + xlab("Date (month)") + scale_y_continuous(limits = c(-10,8), breaks = seq(-10,8,2)) + scale_x_date(expand = c(0.01, 0.01), breaks = as.Date(c("2000-01-01", "2003-01-01", "2006-01-01", "2009-01-01", "2012-01-01", "2015-01-01", "2018-01-01")), labels = date_format("%b%Y")) + theme_bg() dp_plot # Plot log difference of price index dlnp_plot <- ggplot(data = data, aes(x = as.Date(date), y = dlnp))+ geom_line_da() + ylab("Log first difference of price index") + xlab("Date (month)") + scale_y_continuous(limits = c(-0.04,0.04), breaks = seq(-0.04,0.04,0.01)) + scale_x_date(date_breaks="2 years", labels = date_format("%b%Y")) + theme_bg() dlnp_plot ############################# # GRAPH 18.10 ############################# # Plot employment emp_plot<-ggplot(data = data, aes(x = as.Date(date), y = emp))+ geom_line_da() + ylab("Employment (in thousands)") + xlab("Date (month)") + # scale_y_continuous(limits = c(10000,18000), breaks = seq(10000,18000,2000)) + scale_x_date(expand = c(0.01, 0.01), breaks = as.Date(c("2000-01-01", "2003-01-01", "2006-01-01", "2009-01-01", "2012-01-01", "2015-01-01", "2018-01-01")), labels = date_format("%b%Y")) + theme_bg() emp_plot save_fig("ch18-figure-10c-cs-tseries-emp", output, "small") # Plot log diff employment ldemp_plot<- ggplot(data = data, aes(x = as.Date(date), y = dlnemp))+ geom_line_da() + ylab("Log change in employment") + xlab("Date (month)") + scale_x_date(expand = c(0.01, 0.01), breaks = as.Date(c("2000-01-01", "2003-01-01", "2006-01-01", "2009-01-01", "2012-01-01", "2015-01-01", "2018-01-01")), labels = date_format("%b%Y")) + theme_bg() ldemp_plot save_fig("ch18-figure-10d-cs-tseries-dlnemp", output, "small") # Plot unemployment rate u_plot<-ggplot(data = data, aes(x = as.Date(date), y = u))+ geom_line_da() + ylab("Unemployment rate (percent)") + xlab("Date (month)") + # scale_y_continuous(limits = c(10000,18000), breaks = seq(10000,18000,2000)) + scale_x_date(expand = c(0.01, 0.01), breaks = as.Date(c("2000-01-01", "2003-01-01", "2006-01-01", "2009-01-01", "2012-01-01", "2015-01-01", "2018-01-01")), labels = date_format("%b%Y")) + theme_bg() u_plot save_fig("ch18-figure-10a-cs-tseries-u", output, "small") # Plot diff unemployment du_plot<- ggplot(data = data, aes(x = as.Date(date), y = du))+ geom_line_da() + ylab("Change in unemployment rate") + xlab("Date (month)") + scale_x_date(expand = c(0.01, 0.01), breaks = as.Date(c("2000-01-01", "2003-01-01", "2006-01-01", "2009-01-01", "2012-01-01", "2015-01-01", "2018-01-01")), labels = date_format("%b%Y")) + theme_bg() du_plot save_fig("ch18-figure-10b-cs-tseries-du", output, "small") ########################################################## # Create work set and houldout set ########################################################## # Last year of data data_holdout <- data %>% slice((n()-11):n()) # Rest of data for work set data_work <- data %>% slice(1:(n()-12)) # Prepare for cross-validation, define size of train train_legth=13 data_tr <- data_work %>% slice(1:(n()-12)) %>% # last year of training data not used in any fold as training slide_tsibble(.size = train_legth*12, .step = 12) data_cv_test <- data_work %>% slice(train_legth*12+1:n()) %>% slide_tsibble(.size = 12, .step = 12) %>% select(trend, month) ############################################# # Use tseries of price index only # Fit models with months, trend, ARIMA ############################################# # To cross-validate auto.arima, # step 1: run it and find ARIMA specification on the whole train data, p,q chosen by BIC # note, need to add PDQ(0,0,0) to models # in order to shut down the fancy seasonality-fitting part of auto ARIMA # step 2: use the selected model as a candidate # M1 p ~ month + trend, without any ARIMA m1_formula <- "p ~ month + trend" m1 <- TSLM(as.formula(m1_formula)) # M2 p ~ auto ARIMA m2_pre <- data_work %>% model(auto_arima = ARIMA(p ~ PDQ(0,0,0))) p2_auto <- m2_pre$auto_arima[[1]]$fit$spec$p q2_auto <- m2_pre$auto_arima[[1]]$fit$spec$q d2_auto <- m2_pre$auto_arima[[1]]$fit$spec$d m2_formula <- paste0("p ~ pdq(",paste(p2_auto,d2_auto,q2_auto, sep=","),") + PDQ(0,0,0)") m2 <- ARIMA(as.formula(m2_formula)) # M3 p ~ auto ARIMA + month m3_pre <- data_work %>% model(auto_arima = ARIMA(p ~ month+ PDQ(0,0,0))) p3_auto <- m3_pre$auto_arima[[1]]$fit$spec$p q3_auto <- m3_pre$auto_arima[[1]]$fit$spec$q d3_auto <- m3_pre$auto_arima[[1]]$fit$spec$d m3_formula <- paste0("p ~ pdq(",paste(p3_auto,d3_auto,q3_auto, sep=","),") + PDQ(0,0,0) + month") m3 <- ARIMA(as.formula(m3_formula)) # M4 p ~ auto ARIMA + month + trend m4_pre <- data_work %>% model(auto_arima = ARIMA(p ~ month + trend + PDQ(0,0,0))) p4_auto <- m4_pre$auto_arima[[1]]$fit$spec$p q4_auto <- m4_pre$auto_arima[[1]]$fit$spec$q d4_auto <- m4_pre$auto_arima[[1]]$fit$spec$d m4_formula <- paste0("p ~ pdq(",paste(p4_auto,d4_auto,q4_auto, sep=","),") + PDQ(0,0,0) + month + trend") m4 <- ARIMA(as.formula(m4_formula)) # M5 dp ~ month + trend, without any ARIMA m5_formula <- "dp ~ month + trend" m5 <- TSLM(as.formula(m5_formula)) # M6 lnp ~ auto ARIMA + month m6_pre <- data_work %>% model(auto_arima = ARIMA(lnp ~ month + PDQ(0,0,0))) p6_auto <- m6_pre$auto_arima[[1]]$fit$spec$p q6_auto <- m6_pre$auto_arima[[1]]$fit$spec$q d6_auto <- m6_pre$auto_arima[[1]]$fit$spec$d m6_formula <- paste0("lnp ~ month + pdq(",paste(p6_auto,d6_auto,q6_auto, sep=","),") + PDQ(0,0,0)") m6 <- ARIMA(as.formula(m6_formula)) ########################################### # create forecasts and cross-validate # cross-validating M1-M4 with p on left-hand-side models_1_4 <- data_tr %>% model(m1 = m1, m2 = m2, m3 = m3, m4 = m4 ) rmse_train_1_4 <- models_1_4 %>% get_RMSE_from_model() forecast_1_4 <- models_1_4 %>% forecast(new_data = data_cv_test) %>% as_tsibble() %>% dplyr::rename(p_pred = .mean) %>% select(.id, .model, date, p_pred) %>% left_join(data[,c("date","p")]) %>% group_by(.id, .model) %>% mutate(e = p - p_pred) %>% ungroup() # Compute MSE for folds summary_1_4 <- forecast_1_4 %>% get_MSE_from_forecast() # cross-validating M5 with dp on left-hand-side model_5 <- data_tr %>% model(m5 = m5) rmse_train_dp <- model_5 %>% get_RMSE_from_model() forecast_5 <- model_5 %>% forecast(new_data = data_cv_test) %>% as_tsibble() %>% dplyr::rename(dp_pred = .mean) %>% select(.id, .model, date, dp_pred) %>% left_join(data[,c("date","p","p_lag")]) %>% group_by(.id, .model) %>% mutate(p_pred = cumsum(dp_pred) + p_lag[1]) %>% mutate(e = p - p_pred) %>% ungroup() # Compute MSE for folds summary_5 <- forecast_5 %>% get_MSE_from_forecast() # cross-validating M6 with lnp on left-hand-side model_6 <- data_tr %>% model(m6 = m6) rmse_train_6 <- model_6 %>% get_RMSE_from_model() forecast_6 <- model_6 %>% forecast(new_data = data_cv_test) %>% as_tsibble() %>% dplyr::rename(lnp_pred = .mean) %>% select(.id, .model, date, lnp_pred) %>% left_join(data[,c("date","p")]) %>% left_join(rmse_train_6) %>% group_by(.id, .model) %>% mutate(p_pred = exp(lnp_pred)*exp((RMSE**2)/2) ) %>% mutate(e = p - p_pred) %>% ungroup() # Compute MSE for folds summary_6 <- forecast_6 %>% get_MSE_from_forecast() summary_6 ###################################### # Table 18.2 # average cv RMSE for models 1-6 ###################################### summary_folds <- bind_rows(list(summary_1_4, summary_5, summary_6)) %>% spread(.id, MSE) %>% as.data.frame() colnames(summary_folds) <- c("Model", paste0("Fold ", colnames(summary_folds)[-1])) summary_final <- bind_rows(list(summary_1_4, summary_5, summary_6)) %>% group_by(.model) %>% dplyr::summarise(CV_RMSE = sum(MSE/4)**0.5) %>% as.data.frame() model_formulas <- summary_final %>% dplyr::pull(.model) %>% paste0("_formula") %>% sapply(FUN=get) colnames(summary_final) <- c("Model", "CV RMSE") summary_table_18_2 <- summary_final %>% add_column("Model def" = model_formulas, .before = "CV RMSE") summary_table_18_2 ############################################ # VAR # Comment: In the textbook, Table 18.3 has VAR RMSE values for the model without seasonality. # It’s noted at \url{https://gabors-data-analysis.com/errata/#part-iii} # Without seasonality, we have: RMSE (average) =8.0. With seasonality, we have: RMSE (average) =4.5. # In R we could do not figure out how to add seasonality. Let us know if you solved it... var_formula <- "vars(dp, du, dlnemp) ~ AR(1) " var <- VAR(as.formula(var_formula)) # create forecast and cross-validate var_data <- data_tr %>% filter(!is.na(dp)) %>% # need to exclude first row model(var = var) rmse_train_var <- var_data %>% get_RMSE_from_model(resid_col_name = "dp") forecast_var <- var_data %>% forecast(h=12) %>% as_tsibble() %>% dplyr::rename(dp_pred = .mean_dp) %>% select(.id, .model, date, dp_pred) %>% left_join(data[,c("date","p","p_lag")]) %>% group_by(.id, .model) %>% mutate(p_pred = cumsum(dp_pred) + p_lag[1]) %>% mutate(e = p - p_pred) %>% ungroup() # Compute MSE for folds summary_var <- forecast_var %>% get_MSE_from_forecast() ########################################## # TABLE 18.3 # rmse by folds + cv rmse, for all 7 models ########################################## summary_folds <- bind_rows(list(summary_1_4, summary_5, summary_6, summary_var)) %>% spread(.id, MSE) %>% as.data.frame() colnames(summary_folds) <- c("Model", paste0("Fold ", colnames(summary_folds)[-1])) # Table 18.3 RMSE by folds summary_rmse_folds <- summary_folds %>% mutate_at(vars(-Model), sqrt) summary_rmse_folds # Table 18.3 last column: cv average RMSE # create average MSE across folds and take square root summary_cvavg <- bind_rows(list(summary_1_4, summary_5, summary_6, summary_var)) %>% group_by(.model) %>% dplyr::summarise(CV_RMSE = sum(MSE/4)**0.5) %>% as.data.frame() model_formulas <- summary_cvavg %>% dplyr::pull(.model) %>% paste0("_formula") %>% sapply(FUN=get) colnames(summary_cvavg) <- c("Model", "CV RMSE") summary_table_18_3_lastcol <- summary_cvavg %>% add_column("Model def" = model_formulas, .before = "CV RMSE") summary_table_18_3_lastcol ###########################x # predict for holdout ###########################x conf_level <- 80 conf_level_chr <- paste0(as.character(conf_level),"%") # best model is M4 bestm <- "m4" # re-estimate best models on full work set model_best <- data_work %>% model(best = get(bestm)) rmse_train_best <- model_best %>% get_RMSE_from_model(groupby = c(".model")) forecast_holdout_best <- model_best %>% forecast(new_data = select(data_holdout, trend, month)) %>% hilo(level = c(conf_level)) %>% as_tsibble() %>% rename(p_pred = .mean) %>% select(.model, date, p_pred, conf_level_chr) %>% unpack_hilo(conf_level_chr) %>% left_join(data_holdout[,c("date","p")]) %>% mutate(e = p - p_pred) %>% ungroup() summary_holdout_best <- forecast_holdout_best %>% get_MSE_from_forecast(groupby = c(".model")) summary_holdout_best ############################# # GRAPHS ############################# # graph actual vs prediction from best arima data_plot <- data %>% left_join(forecast_holdout_best) %>% filter(year(date)>=2015) pred_p_plot <- ggplot(data = data_plot , aes(x = as.Date(date), y = p))+ geom_line(size = 0.8, aes(color = "Actual")) + geom_line(aes(x = as.Date(date), y = p_pred, color = "Prediction "), size = 1) + #annotate("text", x = yearmonth("2017-08"), y = 257, label = "Prediction ", size=2.5, vjust = 2, color = color[2])+ #annotate("text", x = yearmonth("2017-03"), y = 258, label = "Actual", size=2.5, hjust = 1.5, color = color[1])+ ylab("Case-Shiller Home Price Index") + xlab("Date (month)") + scale_color_manual(name="",values=c(color[1], color[2])) + scale_x_date(date_breaks="1 years", labels = date_format("%b%Y")) + theme_bg()+ theme(legend.position=c(0.7,0.1), legend.direction = "horizontal", legend.text = element_text(size = 4), legend.key.width = unit(.8, "cm"), legend.key.height = unit(.2, "cm")) + guides(linetype = guide_legend(override.aes = list(size = 0.6))) pred_p_plot save_fig("ch18-figure-9a-pred-p-mp", output, "small") # with uncertainty fan conf_level_lower <- paste0(conf_level_chr, "_lower") conf_level_upper <- paste0(conf_level_chr, "_upper") pred_p_mp_fan_R <- ggplot(data = data_plot , aes(x = as.Date(date), y = p))+ geom_line(size = 0.8, aes(color = "Actual")) + geom_line(aes(x = as.Date(date), y = p_pred, color = "Prediction "), size = 1) + geom_ribbon(aes(ymin = get(conf_level_lower), ymax = get(conf_level_upper)), alpha=0.2, bg=color[2]) + ylab("Case-Shiller Price index") + xlab("Date (month)") + scale_color_manual(name="",values=c(color[1], color[2])) + scale_x_date(date_breaks="1 years", labels = date_format("%b%Y")) + theme_bg()+ theme(legend.position=c(0.7,0.1), legend.direction = "horizontal", legend.text = element_text(size = 4), legend.key.width = unit(.8, "cm"), legend.key.height = unit(.2, "cm")) + guides(linetype = guide_legend(override.aes = list(size = 0.6))) pred_p_mp_fan_R save_fig("ch18-figure-9b-pred-p-mp-fan", output, "small") ########################### # EXTERNAL VALIDITY # do the prediction for an extra year ########################### data <- read_rds(paste(use_case_dir,"case-shiller-workfile-2000-2018.rds",sep="")) # Last year of data data_holdout<- data %>% slice((n()-11):n()) # Rest of data for work set data_work <- data %>% slice(1:(n()-12)) ###########################x # predict for holdout ###########################x # best model is M4 bestm <- "m4" # re-estimate best model on full work set model_best <- data_work %>% model(best = get(bestm)) rmse_train_best <- model_best %>% get_RMSE_from_model(groupby = c(".model")) forecast_holdout_bes <- model_best %>% forecast(new_data = select(data_holdout, trend, month)) %>% as_tsibble() %>% rename(p_pred = .mean) %>% select(.model, date, p_pred) %>% left_join(data_holdout[,c("date","p")]) %>% mutate(e = p - p_pred) %>% ungroup() summary_holdout_best <- forecast_holdout_best %>% get_MSE_from_forecast(groupby = c(".model")) summary_holdout_best ############################# # GRAPH 18.11 # 2015-18, actual vs prediction from best arima bestm <- "m4" # re-estimate best models on full train set model_best <- data_work %>% model(best = get(bestm)) rmse_train_best <- model_best %>% get_RMSE_from_model(groupby = c(".model")) forecast_holdout_best <- model_best %>% forecast(new_data = select(data_holdout, trend, month)) %>% hilo(level = c(conf_level)) %>% as_tsibble() %>% rename(p_pred = .mean) %>% select(.model, date, p_pred, conf_level_chr) %>% unpack_hilo(conf_level_chr) %>% left_join(data_holdout[,c("date","p")]) %>% mutate(e = p - p_pred) %>% ungroup() summary_holdout_best <- forecast_holdout_best %>% get_MSE_from_forecast(groupby = c(".model")) # graph actual vs prediction from best arima data_plot <- data %>% left_join(forecast_holdout_best) %>% filter(year(date)>=2015) # with uncertainty fan pred_p_mp_fan2018_R <- ggplot(data = data_plot , aes(x = as.Date(date), y = p))+ geom_line(size = 0.8, aes(color = "Actual")) + geom_line(aes(x = as.Date(date), y = p_pred, color = "Prediction "), size = 1) + geom_ribbon(aes(ymin = get(conf_level_lower), ymax = get(conf_level_upper)), alpha=0.2, bg=color[2]) + ylab("Case-Shiller Price index") + xlab("Date (month)") + scale_color_manual(name="", values=c(color[1], color[2])) + scale_x_date(date_breaks="1 years", labels = date_format("%b%Y")) + theme_bg()+ theme(legend.position=c(0.7,0.1), legend.direction = "horizontal", legend.text = element_text(size = 4), legend.key.width = unit(.8, "cm"), legend.key.height = unit(.2, "cm")) + guides(linetype = guide_legend(override.aes = list(size = 0.6))) pred_p_mp_fan2018_R save_fig("ch18-figure-11-pred-p-mp-fan2018", output, "small")
\name{key_merge} \alias{key_merge} \title{Merge Demographic Information with Person/Text Transcript} \usage{ key_merge(transcript.df, key.df, common.column = NULL, defualt.arrange = TRUE) } \arguments{ \item{transcript.df}{The text/person transcript dataframe} \item{key.df}{The demographic dataframe.} \item{common.column}{The column(s) shared by \code{transcript.df} and \code{key.df}. If NULL function defaults to use any columns with the same name.} \item{defualt.arrange}{logical. If TRUE will arrange the columns with text to the far right.} } \value{ Outputs a merged transcript dataframe with demographic information. } \description{ Wrapper function (\code{\link[base]{merge}}) for merging demographic information with a person/text transcript. } \examples{ #First view transcript dataframe and demographics dataframe. ltruncdf(list(raj, raj.demographics), 10, 50) merged.raj <- key_merge(raj, raj.demographics) htruncdf(merged.raj, 10, 40) } \seealso{ \code{\link[base]{merge}} } \keyword{demographic} \keyword{merge,}
/man/key_merge.Rd
no_license
trinker/qdap2
R
false
false
1,067
rd
\name{key_merge} \alias{key_merge} \title{Merge Demographic Information with Person/Text Transcript} \usage{ key_merge(transcript.df, key.df, common.column = NULL, defualt.arrange = TRUE) } \arguments{ \item{transcript.df}{The text/person transcript dataframe} \item{key.df}{The demographic dataframe.} \item{common.column}{The column(s) shared by \code{transcript.df} and \code{key.df}. If NULL function defaults to use any columns with the same name.} \item{defualt.arrange}{logical. If TRUE will arrange the columns with text to the far right.} } \value{ Outputs a merged transcript dataframe with demographic information. } \description{ Wrapper function (\code{\link[base]{merge}}) for merging demographic information with a person/text transcript. } \examples{ #First view transcript dataframe and demographics dataframe. ltruncdf(list(raj, raj.demographics), 10, 50) merged.raj <- key_merge(raj, raj.demographics) htruncdf(merged.raj, 10, 40) } \seealso{ \code{\link[base]{merge}} } \keyword{demographic} \keyword{merge,}
# This function takes an input n, and simulates the first attempt to blow out n candles blow_n_candles_once = function(n) { # returns a random number between 1 and n inclusive return(sample(1:n, 1)) } # This function takes an input n and returns the number of attempts to blow out n candles # assuming that each blow extinguishes between 1 to all the remaining candles blow_out_candles = function(n) { # a is the number of attempts, and we start with 0 attempts a = 0 # while we still have candles left, we keep on blowing out candles # each time we blow out a number of candles, we subtract them from the count while (n > 0) { n = n - blow_n_candles_once(n) # each time we blow, the number of attempt increases by 1 a = a + 1 } # we return the number of attempts return(a) } # This function takes inputs num_candles and num_repetitions # it runs the simulations of blowing out num_candles candles num_repetitions times blow_and_plot = function(num_candles, num_repetitions) { # we repeat blowing out candles the specified number of times and note down attempt counts all_experiments = replicate(num_repetitions, blow_out_candles(num_candles)) # we count the frequency of each number of attempts all_experiments_table = table(all_experiments) # we plot the frequency count and return it barplot(all_experiments_table) return(all_experiments_table) } # This function takes no inputs and plots the average number of attempts needed to # blow out from 1 to 100 candles, averaging over 1,000 simulations blow_1_to_100_candles = function() { # n is the number of candles, and we start with 1 n = 1 # we keep our averages in a numeric list with 100 values avgs = numeric(100) # we simulate all candle counts from 1 to 100 while (n < 101) { # we set each slot in the list to the average number of attempts needed to blow out # its index-number of candles over 1,000 simulations avgs[n] = mean(replicate(1000, blow_out_candles(n))) # we move on to the next candles count n = n + 1 } # we plot the averages and return them barplot(avgs) return(avgs) }
/candles-ke.R
no_license
annieke/r-experiments
R
false
false
2,155
r
# This function takes an input n, and simulates the first attempt to blow out n candles blow_n_candles_once = function(n) { # returns a random number between 1 and n inclusive return(sample(1:n, 1)) } # This function takes an input n and returns the number of attempts to blow out n candles # assuming that each blow extinguishes between 1 to all the remaining candles blow_out_candles = function(n) { # a is the number of attempts, and we start with 0 attempts a = 0 # while we still have candles left, we keep on blowing out candles # each time we blow out a number of candles, we subtract them from the count while (n > 0) { n = n - blow_n_candles_once(n) # each time we blow, the number of attempt increases by 1 a = a + 1 } # we return the number of attempts return(a) } # This function takes inputs num_candles and num_repetitions # it runs the simulations of blowing out num_candles candles num_repetitions times blow_and_plot = function(num_candles, num_repetitions) { # we repeat blowing out candles the specified number of times and note down attempt counts all_experiments = replicate(num_repetitions, blow_out_candles(num_candles)) # we count the frequency of each number of attempts all_experiments_table = table(all_experiments) # we plot the frequency count and return it barplot(all_experiments_table) return(all_experiments_table) } # This function takes no inputs and plots the average number of attempts needed to # blow out from 1 to 100 candles, averaging over 1,000 simulations blow_1_to_100_candles = function() { # n is the number of candles, and we start with 1 n = 1 # we keep our averages in a numeric list with 100 values avgs = numeric(100) # we simulate all candle counts from 1 to 100 while (n < 101) { # we set each slot in the list to the average number of attempts needed to blow out # its index-number of candles over 1,000 simulations avgs[n] = mean(replicate(1000, blow_out_candles(n))) # we move on to the next candles count n = n + 1 } # we plot the averages and return them barplot(avgs) return(avgs) }
library(readr) library(countrycode) data_csv <- as.data.frame(read_csv("C:/Users/Maxime Lacroix/Downloads/data_csv.csv")) head(data_csv) data_csv$continent <- factor(countrycode(sourcevar = data_csv[, "Country Code"], origin = "iso3c", destination = "continent")) data_csv$Value <- data_csv$Value / 100 data_csv$Decennie <- ifelse(data_csv$Year<2000,1990, ifelse(data_csv$Year<2010,2000,2010)) dat_prop <- data_csv[!is.na(data_csv$continent),] dat_prop <- dat_prop[dat_prop$Value>0,] hist(dat_prop$Value,main="Histogramme des proportions de femmes au parlement", freq=TRUE, xlab="Proportion", ylab="Nombre")
/Présentation_1/code_base.R
no_license
maximeglacroix/consul_pres
R
false
false
721
r
library(readr) library(countrycode) data_csv <- as.data.frame(read_csv("C:/Users/Maxime Lacroix/Downloads/data_csv.csv")) head(data_csv) data_csv$continent <- factor(countrycode(sourcevar = data_csv[, "Country Code"], origin = "iso3c", destination = "continent")) data_csv$Value <- data_csv$Value / 100 data_csv$Decennie <- ifelse(data_csv$Year<2000,1990, ifelse(data_csv$Year<2010,2000,2010)) dat_prop <- data_csv[!is.na(data_csv$continent),] dat_prop <- dat_prop[dat_prop$Value>0,] hist(dat_prop$Value,main="Histogramme des proportions de femmes au parlement", freq=TRUE, xlab="Proportion", ylab="Nombre")
#' Validating new dataset #' #' @param dataset A expression dataset to validate. Genes in rows and samples in #' columns. Gene names should be in 'symbol' format. It can be ExpressionSet, #' SummarizedExperiment, RangedSummarizedExperiment, or matrix. #' @param avgLoading A matrix with genes by RAVs. #' @param method A character string indicating which correlation coefficient is #' to be computed. One of "pearson" (default), "kendall", or "spearman": can be abbreviated. #' @param scale Default is \code{FALSE}. If it is set to \code{TRUE}, dataset will #' be row normalized by \link{rowNorm} function. #' #' @return A matrix of Pearson correlation coefficient (default, defined through \code{method} #' argument) between RAVs (row) and the top 8 PCs from the datasets (column) #' .loadingCor <- function(dataset, avgLoading, method = "pearson", scale = FALSE) { if (is(dataset, "ExpressionSet")) { dat <- Biobase::exprs(dataset) } else if (is(dataset,"SummarizedExperiment")) { dat <- SummarizedExperiment::assay(dataset) } else if (is(dataset,"matrix")) { dat <- dataset } else { stop("'dataset' should be one of the following objects: ExpressionSet, SummarizedExperiment, and matrix.") } if (isTRUE(scale)) {dat <- rowNorm(dat)} # row normalization dat <- dat[apply(dat, 1, function (x) {!any(is.na(x) | (x==Inf) | (x==-Inf))}),] gene_common <- intersect(rownames(avgLoading), rownames(dat)) prcomRes <- stats::prcomp(t(dat[gene_common,])) # centered, but not scaled by default loadings <- prcomRes$rotation[, 1:8] loading_cor <- abs(stats::cor(avgLoading[gene_common,], loadings[gene_common,], use = "pairwise.complete.obs", method = method)) return(loading_cor) } #' Validate new datasets #' #' @param dataset Single or a named list of SummarizedExperiment (RangedSummarizedExperiment, #' ExpressionSet or matrix) object(s). Gene names should be in 'symbol' format. Currently, #' each dataset should have at least 8 samples. #' @param RAVmodel PCAGenomicSignatures object. You can also provide signature model matrix directly. #' @param method A character string indicating which correlation coefficient is #' to be computed. One of "pearson" (default), "kendall", or "spearman": can be abbreviated. #' @param maxFrom Select whether to display the maximum value from dataset's PCs or avgLoadings. #' Under the default (\code{maxFrom="PC"}), the maximum correlation coefficient from #' top 8 PCs for each avgLoading will be selected as an output. If you choose (\code{maxFrom="avgLoading"}), #' the avgLoading with the maximum correlation coefficient with each PC will be in the output. #' @param level Output format of validated result. Two options are available: \code{c("max", "all")}. #' Default is "max", which outputs the matrix containing only the maximum coefficient. #' To get the coefficient of all 8 PCs, set this argument as "all". \code{level = "all"} #' can be used only for one dataset. #' @param scale Default is \code{FALSE}. If it is set to \code{TRUE}, dataset will #' be row normalized by \link{rowNorm} function. #' #' @return A data frame containing the maximum pearson correlation coefficient between #' the top 8 PCs of the dataset and pre-calculated average loadings (in row) of training #' datasets (\code{score} column). It also contains other metadata associated with #' each RAV: \code{PC} for one of the top 8 PCs of the dataset that results #' in the given \code{score}, \code{sw} for the average silhouette width of the RAV, #' \code{cl_size} for the size of each RAV. #' #' If the input for \code{dataset} argument is a list of different datasets, each row #' of the output represents a new dataset for test, and each column represents #' clusters from training datasets. If \code{level = "all"}, a list containing the matrices #' of the pearson correlation coefficient between all top 8 PCs of the datasets and #' avgLoading. #' #' @examples #' data(miniRAVmodel) #' library(bcellViper) #' data(bcellViper) #' validate(dset, miniRAVmodel) #' validate(dset, miniRAVmodel, maxFrom = "avgLoading") #' #' @export validate <- function(dataset, RAVmodel, method = "pearson", maxFrom = "PC", level = "max", scale = FALSE) { if (!is.list(dataset)) { if (ncol(dataset) < 8) {stop("Provide a study with at least 8 samples.")} } if (is.list(dataset)) { if (any(lapply(dataset, ncol) < 8)) {stop("Provide a study with at least 8 samples.")} if (level == "all") {stop("'level = \"all\"' is not available for a list of datasets.")} } sw <- silhouetteWidth(RAVmodel) cl_size <- S4Vectors::metadata(RAVmodel)$size if (is(RAVmodel,"GenomicSignatures")) { avgLoading <- SummarizedExperiment::assay(RAVmodel) } else {avgLoading <- RAVmodel} # The maximum correlation coefficient among PCs if (maxFrom == "PC") { # For a single dataset if (!is.list(dataset)) { x <- .loadingCor(dataset, avgLoading, method, scale) if (level == "max") { z <- apply(x, 1, max) %>% as.data.frame # rowMax z$PC <- apply(x, 1, which.max) colnames(z)[1] <- "score" z$sw <- sw # Silhouette width z$cl_size <- cl_size # Cluster size z$cl_num <- readr::parse_number(rownames(z)) # Cluster number res <- z } else if (level == "all") { res <- x } } else { # For a list of datasets x <- lapply(dataset, .loadingCor, avgLoading, method, scale) if (level == "max") { z <- sapply(x, function(y) {apply(y, 1, max)}) zPC <- sapply(x, function(y) {apply(y, 1, which.max)}) colnames(zPC) <- paste0(colnames(zPC), "_PC") res <- cbind(z, zPC) } else if (level == "all") { res <- x } } # return(t(res)) return(res) } # The maximum correlation coefficient among avgLoadings else if (maxFrom == "avgLoading") { if (!is.list(dataset)) { x <- .loadingCor(dataset, avgLoading, method) if (level == "max") { z <- apply(x, 2, max) %>% as.data.frame # colMax z$avgLoading <- apply(x, 2, which.max) colnames(z)[1] <- "score" return(z) } else if (level == "all") { return(x) } } else { x <- lapply(dataset, .loadingCor, avgLoading, method) if (level == "max") { z <- sapply(x, function(y) {apply(y, 2, max)}) return(z) } else if (level == "all") { return(x) } } } }
/R/validate.R
no_license
neelsoumya/GenomicSuperSignature
R
false
false
6,941
r
#' Validating new dataset #' #' @param dataset A expression dataset to validate. Genes in rows and samples in #' columns. Gene names should be in 'symbol' format. It can be ExpressionSet, #' SummarizedExperiment, RangedSummarizedExperiment, or matrix. #' @param avgLoading A matrix with genes by RAVs. #' @param method A character string indicating which correlation coefficient is #' to be computed. One of "pearson" (default), "kendall", or "spearman": can be abbreviated. #' @param scale Default is \code{FALSE}. If it is set to \code{TRUE}, dataset will #' be row normalized by \link{rowNorm} function. #' #' @return A matrix of Pearson correlation coefficient (default, defined through \code{method} #' argument) between RAVs (row) and the top 8 PCs from the datasets (column) #' .loadingCor <- function(dataset, avgLoading, method = "pearson", scale = FALSE) { if (is(dataset, "ExpressionSet")) { dat <- Biobase::exprs(dataset) } else if (is(dataset,"SummarizedExperiment")) { dat <- SummarizedExperiment::assay(dataset) } else if (is(dataset,"matrix")) { dat <- dataset } else { stop("'dataset' should be one of the following objects: ExpressionSet, SummarizedExperiment, and matrix.") } if (isTRUE(scale)) {dat <- rowNorm(dat)} # row normalization dat <- dat[apply(dat, 1, function (x) {!any(is.na(x) | (x==Inf) | (x==-Inf))}),] gene_common <- intersect(rownames(avgLoading), rownames(dat)) prcomRes <- stats::prcomp(t(dat[gene_common,])) # centered, but not scaled by default loadings <- prcomRes$rotation[, 1:8] loading_cor <- abs(stats::cor(avgLoading[gene_common,], loadings[gene_common,], use = "pairwise.complete.obs", method = method)) return(loading_cor) } #' Validate new datasets #' #' @param dataset Single or a named list of SummarizedExperiment (RangedSummarizedExperiment, #' ExpressionSet or matrix) object(s). Gene names should be in 'symbol' format. Currently, #' each dataset should have at least 8 samples. #' @param RAVmodel PCAGenomicSignatures object. You can also provide signature model matrix directly. #' @param method A character string indicating which correlation coefficient is #' to be computed. One of "pearson" (default), "kendall", or "spearman": can be abbreviated. #' @param maxFrom Select whether to display the maximum value from dataset's PCs or avgLoadings. #' Under the default (\code{maxFrom="PC"}), the maximum correlation coefficient from #' top 8 PCs for each avgLoading will be selected as an output. If you choose (\code{maxFrom="avgLoading"}), #' the avgLoading with the maximum correlation coefficient with each PC will be in the output. #' @param level Output format of validated result. Two options are available: \code{c("max", "all")}. #' Default is "max", which outputs the matrix containing only the maximum coefficient. #' To get the coefficient of all 8 PCs, set this argument as "all". \code{level = "all"} #' can be used only for one dataset. #' @param scale Default is \code{FALSE}. If it is set to \code{TRUE}, dataset will #' be row normalized by \link{rowNorm} function. #' #' @return A data frame containing the maximum pearson correlation coefficient between #' the top 8 PCs of the dataset and pre-calculated average loadings (in row) of training #' datasets (\code{score} column). It also contains other metadata associated with #' each RAV: \code{PC} for one of the top 8 PCs of the dataset that results #' in the given \code{score}, \code{sw} for the average silhouette width of the RAV, #' \code{cl_size} for the size of each RAV. #' #' If the input for \code{dataset} argument is a list of different datasets, each row #' of the output represents a new dataset for test, and each column represents #' clusters from training datasets. If \code{level = "all"}, a list containing the matrices #' of the pearson correlation coefficient between all top 8 PCs of the datasets and #' avgLoading. #' #' @examples #' data(miniRAVmodel) #' library(bcellViper) #' data(bcellViper) #' validate(dset, miniRAVmodel) #' validate(dset, miniRAVmodel, maxFrom = "avgLoading") #' #' @export validate <- function(dataset, RAVmodel, method = "pearson", maxFrom = "PC", level = "max", scale = FALSE) { if (!is.list(dataset)) { if (ncol(dataset) < 8) {stop("Provide a study with at least 8 samples.")} } if (is.list(dataset)) { if (any(lapply(dataset, ncol) < 8)) {stop("Provide a study with at least 8 samples.")} if (level == "all") {stop("'level = \"all\"' is not available for a list of datasets.")} } sw <- silhouetteWidth(RAVmodel) cl_size <- S4Vectors::metadata(RAVmodel)$size if (is(RAVmodel,"GenomicSignatures")) { avgLoading <- SummarizedExperiment::assay(RAVmodel) } else {avgLoading <- RAVmodel} # The maximum correlation coefficient among PCs if (maxFrom == "PC") { # For a single dataset if (!is.list(dataset)) { x <- .loadingCor(dataset, avgLoading, method, scale) if (level == "max") { z <- apply(x, 1, max) %>% as.data.frame # rowMax z$PC <- apply(x, 1, which.max) colnames(z)[1] <- "score" z$sw <- sw # Silhouette width z$cl_size <- cl_size # Cluster size z$cl_num <- readr::parse_number(rownames(z)) # Cluster number res <- z } else if (level == "all") { res <- x } } else { # For a list of datasets x <- lapply(dataset, .loadingCor, avgLoading, method, scale) if (level == "max") { z <- sapply(x, function(y) {apply(y, 1, max)}) zPC <- sapply(x, function(y) {apply(y, 1, which.max)}) colnames(zPC) <- paste0(colnames(zPC), "_PC") res <- cbind(z, zPC) } else if (level == "all") { res <- x } } # return(t(res)) return(res) } # The maximum correlation coefficient among avgLoadings else if (maxFrom == "avgLoading") { if (!is.list(dataset)) { x <- .loadingCor(dataset, avgLoading, method) if (level == "max") { z <- apply(x, 2, max) %>% as.data.frame # colMax z$avgLoading <- apply(x, 2, which.max) colnames(z)[1] <- "score" return(z) } else if (level == "all") { return(x) } } else { x <- lapply(dataset, .loadingCor, avgLoading, method) if (level == "max") { z <- sapply(x, function(y) {apply(y, 2, max)}) return(z) } else if (level == "all") { return(x) } } } }
b1 = 0 b2 = -0.8 set.seed(002) ar2 = arima.sim(list(ar = c(b1, b2)), n = 1000, sd = 2) acorr = acf(ar2, type = 'correlation', lag.max = 24) acorr plot(acorr, xlab = 'Lag', ylab = 'ACF(CORR)', main = 'Autocorrelation of the AR(2)') points(0, 1) for (i in (1: 24)) { if (i %% 2 == 1) {points(i, 0)} else {points(i, (- 0.8)^(i / 2))} } # Equivalent to "ar2.acf = ARMAacf(c(b1, b2), 0, lag.max = 24)" legend(x = 'topright', legend = c('Theoretical autocorrelation', 'Sample autocorrelation'), pch = c('o', 'l'))
/100319TA/p44a.R
no_license
ding05/time_series
R
false
false
562
r
b1 = 0 b2 = -0.8 set.seed(002) ar2 = arima.sim(list(ar = c(b1, b2)), n = 1000, sd = 2) acorr = acf(ar2, type = 'correlation', lag.max = 24) acorr plot(acorr, xlab = 'Lag', ylab = 'ACF(CORR)', main = 'Autocorrelation of the AR(2)') points(0, 1) for (i in (1: 24)) { if (i %% 2 == 1) {points(i, 0)} else {points(i, (- 0.8)^(i / 2))} } # Equivalent to "ar2.acf = ARMAacf(c(b1, b2), 0, lag.max = 24)" legend(x = 'topright', legend = c('Theoretical autocorrelation', 'Sample autocorrelation'), pch = c('o', 'l'))
##path analysis prep## library(tidyr) library(data.table) library(dplyr) ##generate list of candidate genes genes = read.csv("Spartobacteria_Tau_Matrix.csv") genes = as.data.frame(t(genes)) genes = setDT(genes, keep.rownames = TRUE)[] genes = genes[,c(1)] colnames(genes)[1] <- "Gene" genes = head(genes,-1) ##merge in expression data## reads = read.csv("normalizedreads_TagSeq.csv", check.names = FALSE) colnames(reads)[1] <- "Gene" reads$Gene = gsub("\\..*","",reads$Gene) genesexp = merge(genes,reads, by= "Gene", all.x = TRUE) ##set row names## Merged2 <- data.frame(genesexp, row.names = 1, check.names = FALSE) ##transpose## expdat <- as.data.frame(t(Merged2)) ##print rownames as column## expdat = setDT(expdat, keep.rownames = TRUE)[] ##rename that column## colnames(expdat)[1] <- "Sample" ##Now we go through the hassle of getting the meta-data## ##alpha diversity## T2 = read.csv("FamilyPropData.csv", header = TRUE) names(T2) FullMD = T2[c(1,265)] ##RNAseq metadata## MetaDataExp=read.csv("Masterdata_TagSeq_SDH_23July2018.csv", header=TRUE) names(MetaDataExp) MetaDataExp=MetaDataExp[c(2,27,30:31,41)] ##Merge everything together## FullExp=merge(expdat, MetaDataExp, by.x = "Sample", by.y = "sample_ID", all.x = FALSE, sort = TRUE, no.dups = FALSE,) FinalData = merge(FullExp, FullMD, by.x = "Sample", by.y = "SampleRNA", no.dups = FALSE) names(FinalData) FinalData = FinalData[,c(1, 490:494, 2:489)] names(FinalData) ##create a new column for log(mass) FinalData$LogMass = log(FinalData$weight) #create a new column transforming the proportion FinalData$Spart_T = asin(sqrt(FinalData$Spartobacteria_unclassified_Prop)) names(FinalData) FinalData = FinalData[,c(1:4,495:496,7:494)] FinalData$CrossDir <- as.character(FinalData$CrossDir) FinalData$Sex <- as.character(FinalData$Sex) ##sub out values## FinalData$CrossDir[FinalData$CrossDir=="RBC"] <- 0.75 FinalData$CrossDir[FinalData$CrossDir=="F2"] <- 0.50 FinalData$CrossDir[FinalData$CrossDir=="GBC"] <- 0.25 FinalData$Sex[FinalData$Sex=="M"] <- 1 FinalData$Sex[FinalData$Sex=="F"] <- 0 FinalData$worm_present[FinalData$worm_present=="FALSE"] <- 0 FinalData$worm_present[FinalData$worm_present=="TRUE"] <- 1 names(FinalData) colnames(FinalData)[1]="Sample" ##remove all rows with NAs-reduces our sample size down to 382## Final = FinalData[complete.cases(FinalData), ] ##write the Matrix for keeps## write.csv(Final, file = "PathMatrix_Spartobacteria_T.csv", row.names = FALSE) ##Ok move on to general data formatting in preparation for our loop## ##format PathMatrix## dat <- read.csv("PathMatrix_Spartobacteria_T.csv", header = T) dat2 <- dat[,-1] rownames(dat2) <- dat[,1] names(dat2) ##now we want to iterate over every column in the matrix, creating submatrixes and running stats## output = file("SEM_code_Spartobacteria.R") cat("", file=output, append=FALSE) cat("library(sem)", "\n", "library(semPlot)", "\n","\n","\n", 'dat <- read.csv("PathMatrix_Spartobacteria_T.csv", header = T)', "\n", 'dat2 <- dat[,-1]', "\n", 'rownames(dat2) <- dat[,1]', "\n","\n","\n", file = output, append=TRUE) for (i in 6:493) { x=colnames(dat2)[i] output = file("SEM_code_Spartobacteria.R", open = "a") cat((paste0("dat", i, "= dat2[,c(1:5,",i,")]","\n", "S <-cov(dat", i, ")", "\n","N <- dim(dat", i, ")[1]", "\n", "RAM <- specifyModel()", "\n", "CrossDir -> worm_present, CI, NA", "\n", "CrossDir -> LogMass,CM, NA", "\n", "CrossDir -> Spart_T,CP, NA", "\n", "CrossDir <-> CrossDir, C, NA", "\n", "CrossDir -> ", x, " ,CE, NA", "\n", "Sex -> ", x, " ,SE, NA", "\n", "Sex -> Spart_T,SP, NA", "\n", "Sex -> LogMass,SM, NA", "\n", "Sex <-> Sex, S, NA", "\n", "LogMass -> ", x, " ,ME, NA", "\n", "LogMass -> Spart_T,MP, NA", "\n", "LogMass <-> LogMass, M, NA", "\n", x, " <-> Spart_T, EP, NA", "\n", x, " <-> worm_present, EI, NA", "\n", x, " <-> ", x, " , E, NA", "\n", "worm_present <-> Spart_T, IP, NA", "\n", "worm_present <-> worm_present, I, NA", "\n", "Spart_T <-> Spart_T, P, NA", "\n","\n","\n", "sem.out <- sem(RAM, S, N)", "\n", "x=summary(sem.out)", "\n", "y=standardizedCoefficients(sem.out)", "\n", 'capture.output(x, y, file = ("semoutput_' , x , '.txt"))', '\n', "\n","\n","\n", "##Onto Next##", "\n")), file=output) close(output) }
/PathMatrix_GenerateCode_Family_T.R
no_license
lfuess/MicrobiomeMS
R
false
false
4,830
r
##path analysis prep## library(tidyr) library(data.table) library(dplyr) ##generate list of candidate genes genes = read.csv("Spartobacteria_Tau_Matrix.csv") genes = as.data.frame(t(genes)) genes = setDT(genes, keep.rownames = TRUE)[] genes = genes[,c(1)] colnames(genes)[1] <- "Gene" genes = head(genes,-1) ##merge in expression data## reads = read.csv("normalizedreads_TagSeq.csv", check.names = FALSE) colnames(reads)[1] <- "Gene" reads$Gene = gsub("\\..*","",reads$Gene) genesexp = merge(genes,reads, by= "Gene", all.x = TRUE) ##set row names## Merged2 <- data.frame(genesexp, row.names = 1, check.names = FALSE) ##transpose## expdat <- as.data.frame(t(Merged2)) ##print rownames as column## expdat = setDT(expdat, keep.rownames = TRUE)[] ##rename that column## colnames(expdat)[1] <- "Sample" ##Now we go through the hassle of getting the meta-data## ##alpha diversity## T2 = read.csv("FamilyPropData.csv", header = TRUE) names(T2) FullMD = T2[c(1,265)] ##RNAseq metadata## MetaDataExp=read.csv("Masterdata_TagSeq_SDH_23July2018.csv", header=TRUE) names(MetaDataExp) MetaDataExp=MetaDataExp[c(2,27,30:31,41)] ##Merge everything together## FullExp=merge(expdat, MetaDataExp, by.x = "Sample", by.y = "sample_ID", all.x = FALSE, sort = TRUE, no.dups = FALSE,) FinalData = merge(FullExp, FullMD, by.x = "Sample", by.y = "SampleRNA", no.dups = FALSE) names(FinalData) FinalData = FinalData[,c(1, 490:494, 2:489)] names(FinalData) ##create a new column for log(mass) FinalData$LogMass = log(FinalData$weight) #create a new column transforming the proportion FinalData$Spart_T = asin(sqrt(FinalData$Spartobacteria_unclassified_Prop)) names(FinalData) FinalData = FinalData[,c(1:4,495:496,7:494)] FinalData$CrossDir <- as.character(FinalData$CrossDir) FinalData$Sex <- as.character(FinalData$Sex) ##sub out values## FinalData$CrossDir[FinalData$CrossDir=="RBC"] <- 0.75 FinalData$CrossDir[FinalData$CrossDir=="F2"] <- 0.50 FinalData$CrossDir[FinalData$CrossDir=="GBC"] <- 0.25 FinalData$Sex[FinalData$Sex=="M"] <- 1 FinalData$Sex[FinalData$Sex=="F"] <- 0 FinalData$worm_present[FinalData$worm_present=="FALSE"] <- 0 FinalData$worm_present[FinalData$worm_present=="TRUE"] <- 1 names(FinalData) colnames(FinalData)[1]="Sample" ##remove all rows with NAs-reduces our sample size down to 382## Final = FinalData[complete.cases(FinalData), ] ##write the Matrix for keeps## write.csv(Final, file = "PathMatrix_Spartobacteria_T.csv", row.names = FALSE) ##Ok move on to general data formatting in preparation for our loop## ##format PathMatrix## dat <- read.csv("PathMatrix_Spartobacteria_T.csv", header = T) dat2 <- dat[,-1] rownames(dat2) <- dat[,1] names(dat2) ##now we want to iterate over every column in the matrix, creating submatrixes and running stats## output = file("SEM_code_Spartobacteria.R") cat("", file=output, append=FALSE) cat("library(sem)", "\n", "library(semPlot)", "\n","\n","\n", 'dat <- read.csv("PathMatrix_Spartobacteria_T.csv", header = T)', "\n", 'dat2 <- dat[,-1]', "\n", 'rownames(dat2) <- dat[,1]', "\n","\n","\n", file = output, append=TRUE) for (i in 6:493) { x=colnames(dat2)[i] output = file("SEM_code_Spartobacteria.R", open = "a") cat((paste0("dat", i, "= dat2[,c(1:5,",i,")]","\n", "S <-cov(dat", i, ")", "\n","N <- dim(dat", i, ")[1]", "\n", "RAM <- specifyModel()", "\n", "CrossDir -> worm_present, CI, NA", "\n", "CrossDir -> LogMass,CM, NA", "\n", "CrossDir -> Spart_T,CP, NA", "\n", "CrossDir <-> CrossDir, C, NA", "\n", "CrossDir -> ", x, " ,CE, NA", "\n", "Sex -> ", x, " ,SE, NA", "\n", "Sex -> Spart_T,SP, NA", "\n", "Sex -> LogMass,SM, NA", "\n", "Sex <-> Sex, S, NA", "\n", "LogMass -> ", x, " ,ME, NA", "\n", "LogMass -> Spart_T,MP, NA", "\n", "LogMass <-> LogMass, M, NA", "\n", x, " <-> Spart_T, EP, NA", "\n", x, " <-> worm_present, EI, NA", "\n", x, " <-> ", x, " , E, NA", "\n", "worm_present <-> Spart_T, IP, NA", "\n", "worm_present <-> worm_present, I, NA", "\n", "Spart_T <-> Spart_T, P, NA", "\n","\n","\n", "sem.out <- sem(RAM, S, N)", "\n", "x=summary(sem.out)", "\n", "y=standardizedCoefficients(sem.out)", "\n", 'capture.output(x, y, file = ("semoutput_' , x , '.txt"))', '\n', "\n","\n","\n", "##Onto Next##", "\n")), file=output) close(output) }
getwd() options(scipen = 999) #Importing the Dataset bank_data<-read.csv("Train_nyOWmfK.csv") View(bank_data) dim(bank_data) str(bank_data) View(sapply(bank_data , class)) names(bank_data) #Data Cleaning.Removing business unnecessary variables bank_data$ID<-NULL bank_data$DOB<-NULL bank_data$Lead_Creation_Date<-NULL bank_data$City<-NULL bank_data$Var1 <- NULL bank_data$Var2 <- NULL bank_data$Var5<- NULL bank_data$Var4 <- NULL bank_data$Source <- NULL bank_data$Employer_Name<-NULL bank_data$Salary_Account<-NULL #it is categorical var with high cardinality so we're dropping this require(dplyr) cat_var<-names(dplyr::select_if(bank_data,is.character)) View(bank_data[cat_var]) num_var<-names(dplyr::select_if(bank_data,is.numeric)) View(bank_data[num_var]) bank_data$Gender<-as.factor(bank_data$Gender) bank_data$Mobile_Verified<-as.factor(bank_data$Mobile_Verified) bank_data$Filled_Form<-as.factor(bank_data$Filled_Form) bank_data$Device_Type<-as.factor(bank_data$Device_Type) #User Defined Function for descriptive statistics mystats<-function(x){ nmiss<-sum(is.na(x)) a<-x[!is.na(x)] m<-mean(a) n<-length(a) s<-sd(a) min<-min(a) p1<-quantile(a,0.01) p5<-quantile(a,0.05) p10<-quantile(a,0.10) q1<-quantile(a,0.25) q2<-quantile(a,0.5) q3<-quantile(a,0.75) p90<-quantile(a,0.90) p95<-quantile(a,0.95) p99<-quantile(a,0.99) max<-max(a) UC<-m+3*s LC<-m-3*s outlier_flag<-max>UC|min<LC return(c(n=n,nmiss=nmiss,outlier_flag=outlier_flag,mean=m,stdev=s,min=min,p1=p1,p5=p5,p10=p10, q1=q1,q2=q2,q3=q3,p90=p90,p95=p95,p99=p99,max=max,UC=UC,LC=LC)) } diag_stats<-t(data.frame(sapply(bank_data[,num_var], mystats))) View(diag_stats) write.csv(diag_stats,file="diag_stats_info_BankingCaseStudy.csv") #Check for Missing Values View(sapply(bank_data,function(x) sum(is.na(x)))) #-We have a large number of missing values in some variales but we cannot drop these variables because they seem important # from the business point of view.So we'll impute these. bank_data$Loan_Amount_Applied[is.na(bank_data$Loan_Amount_Applied)] <- median(bank_data$Loan_Amount_Applied,na.rm = T) bank_data$Loan_Tenure_Applied[is.na(bank_data$Loan_Tenure_Applied)] <- median(bank_data$Loan_Tenure_Applied,na.rm = T) bank_data$Existing_EMI[is.na(bank_data$Existing_EMI)] <- mean(bank_data$Existing_EMI,na.rm = T) bank_data$Loan_Amount_Submitted[is.na(bank_data$Loan_Amount_Submitted)] <- median(bank_data$Loan_Amount_Submitted,na.rm = T) bank_data$Loan_Tenure_Submitted[is.na(bank_data$Loan_Tenure_Submitted)] <- median(bank_data$Loan_Tenure_Submitted,na.rm = T) bank_data$Interest_Rate[is.na(bank_data$Interest_Rate)] <- median(bank_data$Interest_Rate,na.rm = T) bank_data$Processing_Fee[is.na(bank_data$Processing_Fee)] <- median(bank_data$Processing_Fee,na.rm = T) bank_data$EMI_Loan_Submitted[is.na(bank_data$EMI_Loan_Submitted)] <- median(bank_data$EMI_Loan_Submitted,na.rm = T) #Outlier Treatment outlier_treat <- function(x){ UC1 = quantile(x, p=0.99,na.rm=T) LC1 = quantile(x, p=0.01,na.rm=T) x=ifelse(x>UC1, UC1, x) x=ifelse(x<LC1, LC1, x) return(x) } bank_data[,num_var]<- data.frame(apply(bank_data[,num_var],2,FUN=outlier_treat)) #corelation cor_mat <- cor(bank_data[num_var]) View(cor_mat) write.csv(cor_mat,"cor_mat_BanklOans.csv") bank_data<-cbind(bank_data[,num_var],bank_data[,cat_var]) View(bank_data) #Splitting the data into Training ,Validation and Testing Dataset train_ind<-sample(1:nrow(bank_data),size = floor(0.70*nrow(bank_data))) training<-bank_data[train_ind,] testing<-bank_data[-train_ind,] #Building model for training dataset fit<-glm(Disbursed~Monthly_Income+Loan_Amount_Applied+Loan_Tenure_Applied+Existing_EMI+Loan_Amount_Submitted +Loan_Tenure_Submitted+Interest_Rate+Processing_Fee+EMI_Loan_Submitted+LoggedIn+Gender+ Mobile_Verified+Filled_Form+Device_Type,data=training,family=binomial(logit)) #Output of Logistic Regression summary(fit) ls(fit) fit$model Concordance = function(GLM.binomial) { outcome_and_fitted_col = cbind(GLM.binomial$y, GLM.binomial$fitted.values) ones = outcome_and_fitted_col[outcome_and_fitted_col[,1] == 1,] zeros = outcome_and_fitted_col[outcome_and_fitted_col[,1] == 0,] if (length(ones[,1])>length(zeros[,1])) {ones = ones[1:length(zeros[,1]),]} else {zeros = zeros[1:length(ones[,1]),]} ones_and_zeros = data.frame(ones, zeros) conc = rep(NA, length(ones_and_zeros[,1])) disc = rep(NA, length(ones_and_zeros[,1])) ties = rep(NA, length(ones_and_zeros[,1])) for (i in 1:length(ones_and_zeros[,1])) { if (ones_and_zeros[i,2] > ones_and_zeros[i,4]) {conc[i] = 1 disc[i] = 0 ties[i] = 0} else if (ones_and_zeros[i,2] == ones_and_zeros[i,4]) { conc[i] = 0 disc[i] = 0 ties[i] = 1 } else if (ones_and_zeros[i,2] < ones_and_zeros[i,4]) { conc[i] = 0 disc[i] = 1 ties[i] = 0 } } conc_rate = mean(conc, na.rm=TRUE) disc_rate = mean(disc, na.rm=TRUE) tie_rate = mean(ties, na.rm=TRUE) Somers_D<-conc_rate - disc_rate gamma<- (conc_rate - disc_rate)/(conc_rate + disc_rate) return(list(concordance=conc_rate, num_concordant=sum(conc), discordance=disc_rate, num_discordant=sum(disc), tie_rate=tie_rate,num_tied=sum(ties), somers_D=Somers_D, Gamma=gamma)) } Concordance(fit) #Stepwise Regression step1=step(fit,direction ='both') fit2<-glm(Disbursed ~ Monthly_Income + Loan_Amount_Applied + Loan_Tenure_Applied + Existing_EMI + Loan_Amount_Submitted + Loan_Tenure_Submitted + Interest_Rate + LoggedIn + Filled_Form,data=training,family=binomial(logit)) summary(fit2) Concordance(fit2) #Predicting for Training Dataset train1<-cbind(training,Prob=predict(fit2,type="response")) View(train1) #Creating Deciles decLocations<-quantile(train1$Prob,probs=seq(0.1,0.9,by=0.1)) train1$decile<-findInterval(train1$Prob,c(-Inf,decLocations,Inf)) View(train1) #Decile Analysis Report decile_grp<-group_by(train1,decile) decile_sum_train<-summarize(decile_grp,total_cnt=n(),min_prob=min(p=Prob), max_prob=max(Prob),disbursed_cnt=sum(Disbursed), non_disbursed_cnt=total_cnt-disbursed_cnt) decile_sum_train<-arrange(decile_sum_train,desc(decile)) View(decile_sum_train) write.csv(decile_sum_train,"Bankdata_FitTrain.csv",row.names = F) test1<-cbind(testing,Prob=predict(fit2,testing,type="response")) View(test1) decLocations<-quantile(test1$Prob,probs=seq(0.1,0.9,by=0.1)) test1$decile<-findInterval(test1$Prob,c(-Inf,decLocations,Inf)) View(test1) #Decile Analysis Report decile_grp<-group_by(test1,decile) decile_sum_test<-summarize(decile_grp,total_cnt=n(),min_prob=min(p=Prob), max_prob=max(Prob),disbursed_cnt=sum(Disbursed), non_disbursed_cnt=total_cnt-disbursed_cnt) decile_sum_test<-arrange(decile_sum_test,desc(decile)) View(decile_sum_test) install.packages("InformationValue") library(InformationValue) cut1 <- optimalCutoff(train1$Disbursed,train1$Prob, optimiseFor = "Both", returnDiagnostics = T) cut1 ROCTable<-data.frame(cut1$sensitivityTable) View(ROCTable) train1$pred_Y <- ifelse(train1$Prob> 0.1335348,1,0) confusionMatrix(train1$Disbursed,train1$pred_Y) confusionMatrix(train1$Disbursed,train1$Prob, threshold = 0.1335348) sum(train1$Disbursed) plotROC(train1$Disbursed,train1$Prob, Show.labels = F) install.packages("ROCR") require(ROCR) pred_train_fit2 <- prediction(train1$Prob, train1$Disbursed) perf_fit2 <- performance(pred_train_fit2, "tpr", "fpr") plot(perf_fit2) abline(0, 1) performance(pred_train_fit2, "auc")@y.values #Hence we have made the above Logistic Model for Loan Disbursement.
/R_CaseStudy_Logistic_Regression_BANK_MARKETING/R_CaseStudy2_Logistic_Regression_BANK_MARKETING.R
no_license
ShikhaSharma98/R
R
false
false
7,962
r
getwd() options(scipen = 999) #Importing the Dataset bank_data<-read.csv("Train_nyOWmfK.csv") View(bank_data) dim(bank_data) str(bank_data) View(sapply(bank_data , class)) names(bank_data) #Data Cleaning.Removing business unnecessary variables bank_data$ID<-NULL bank_data$DOB<-NULL bank_data$Lead_Creation_Date<-NULL bank_data$City<-NULL bank_data$Var1 <- NULL bank_data$Var2 <- NULL bank_data$Var5<- NULL bank_data$Var4 <- NULL bank_data$Source <- NULL bank_data$Employer_Name<-NULL bank_data$Salary_Account<-NULL #it is categorical var with high cardinality so we're dropping this require(dplyr) cat_var<-names(dplyr::select_if(bank_data,is.character)) View(bank_data[cat_var]) num_var<-names(dplyr::select_if(bank_data,is.numeric)) View(bank_data[num_var]) bank_data$Gender<-as.factor(bank_data$Gender) bank_data$Mobile_Verified<-as.factor(bank_data$Mobile_Verified) bank_data$Filled_Form<-as.factor(bank_data$Filled_Form) bank_data$Device_Type<-as.factor(bank_data$Device_Type) #User Defined Function for descriptive statistics mystats<-function(x){ nmiss<-sum(is.na(x)) a<-x[!is.na(x)] m<-mean(a) n<-length(a) s<-sd(a) min<-min(a) p1<-quantile(a,0.01) p5<-quantile(a,0.05) p10<-quantile(a,0.10) q1<-quantile(a,0.25) q2<-quantile(a,0.5) q3<-quantile(a,0.75) p90<-quantile(a,0.90) p95<-quantile(a,0.95) p99<-quantile(a,0.99) max<-max(a) UC<-m+3*s LC<-m-3*s outlier_flag<-max>UC|min<LC return(c(n=n,nmiss=nmiss,outlier_flag=outlier_flag,mean=m,stdev=s,min=min,p1=p1,p5=p5,p10=p10, q1=q1,q2=q2,q3=q3,p90=p90,p95=p95,p99=p99,max=max,UC=UC,LC=LC)) } diag_stats<-t(data.frame(sapply(bank_data[,num_var], mystats))) View(diag_stats) write.csv(diag_stats,file="diag_stats_info_BankingCaseStudy.csv") #Check for Missing Values View(sapply(bank_data,function(x) sum(is.na(x)))) #-We have a large number of missing values in some variales but we cannot drop these variables because they seem important # from the business point of view.So we'll impute these. bank_data$Loan_Amount_Applied[is.na(bank_data$Loan_Amount_Applied)] <- median(bank_data$Loan_Amount_Applied,na.rm = T) bank_data$Loan_Tenure_Applied[is.na(bank_data$Loan_Tenure_Applied)] <- median(bank_data$Loan_Tenure_Applied,na.rm = T) bank_data$Existing_EMI[is.na(bank_data$Existing_EMI)] <- mean(bank_data$Existing_EMI,na.rm = T) bank_data$Loan_Amount_Submitted[is.na(bank_data$Loan_Amount_Submitted)] <- median(bank_data$Loan_Amount_Submitted,na.rm = T) bank_data$Loan_Tenure_Submitted[is.na(bank_data$Loan_Tenure_Submitted)] <- median(bank_data$Loan_Tenure_Submitted,na.rm = T) bank_data$Interest_Rate[is.na(bank_data$Interest_Rate)] <- median(bank_data$Interest_Rate,na.rm = T) bank_data$Processing_Fee[is.na(bank_data$Processing_Fee)] <- median(bank_data$Processing_Fee,na.rm = T) bank_data$EMI_Loan_Submitted[is.na(bank_data$EMI_Loan_Submitted)] <- median(bank_data$EMI_Loan_Submitted,na.rm = T) #Outlier Treatment outlier_treat <- function(x){ UC1 = quantile(x, p=0.99,na.rm=T) LC1 = quantile(x, p=0.01,na.rm=T) x=ifelse(x>UC1, UC1, x) x=ifelse(x<LC1, LC1, x) return(x) } bank_data[,num_var]<- data.frame(apply(bank_data[,num_var],2,FUN=outlier_treat)) #corelation cor_mat <- cor(bank_data[num_var]) View(cor_mat) write.csv(cor_mat,"cor_mat_BanklOans.csv") bank_data<-cbind(bank_data[,num_var],bank_data[,cat_var]) View(bank_data) #Splitting the data into Training ,Validation and Testing Dataset train_ind<-sample(1:nrow(bank_data),size = floor(0.70*nrow(bank_data))) training<-bank_data[train_ind,] testing<-bank_data[-train_ind,] #Building model for training dataset fit<-glm(Disbursed~Monthly_Income+Loan_Amount_Applied+Loan_Tenure_Applied+Existing_EMI+Loan_Amount_Submitted +Loan_Tenure_Submitted+Interest_Rate+Processing_Fee+EMI_Loan_Submitted+LoggedIn+Gender+ Mobile_Verified+Filled_Form+Device_Type,data=training,family=binomial(logit)) #Output of Logistic Regression summary(fit) ls(fit) fit$model Concordance = function(GLM.binomial) { outcome_and_fitted_col = cbind(GLM.binomial$y, GLM.binomial$fitted.values) ones = outcome_and_fitted_col[outcome_and_fitted_col[,1] == 1,] zeros = outcome_and_fitted_col[outcome_and_fitted_col[,1] == 0,] if (length(ones[,1])>length(zeros[,1])) {ones = ones[1:length(zeros[,1]),]} else {zeros = zeros[1:length(ones[,1]),]} ones_and_zeros = data.frame(ones, zeros) conc = rep(NA, length(ones_and_zeros[,1])) disc = rep(NA, length(ones_and_zeros[,1])) ties = rep(NA, length(ones_and_zeros[,1])) for (i in 1:length(ones_and_zeros[,1])) { if (ones_and_zeros[i,2] > ones_and_zeros[i,4]) {conc[i] = 1 disc[i] = 0 ties[i] = 0} else if (ones_and_zeros[i,2] == ones_and_zeros[i,4]) { conc[i] = 0 disc[i] = 0 ties[i] = 1 } else if (ones_and_zeros[i,2] < ones_and_zeros[i,4]) { conc[i] = 0 disc[i] = 1 ties[i] = 0 } } conc_rate = mean(conc, na.rm=TRUE) disc_rate = mean(disc, na.rm=TRUE) tie_rate = mean(ties, na.rm=TRUE) Somers_D<-conc_rate - disc_rate gamma<- (conc_rate - disc_rate)/(conc_rate + disc_rate) return(list(concordance=conc_rate, num_concordant=sum(conc), discordance=disc_rate, num_discordant=sum(disc), tie_rate=tie_rate,num_tied=sum(ties), somers_D=Somers_D, Gamma=gamma)) } Concordance(fit) #Stepwise Regression step1=step(fit,direction ='both') fit2<-glm(Disbursed ~ Monthly_Income + Loan_Amount_Applied + Loan_Tenure_Applied + Existing_EMI + Loan_Amount_Submitted + Loan_Tenure_Submitted + Interest_Rate + LoggedIn + Filled_Form,data=training,family=binomial(logit)) summary(fit2) Concordance(fit2) #Predicting for Training Dataset train1<-cbind(training,Prob=predict(fit2,type="response")) View(train1) #Creating Deciles decLocations<-quantile(train1$Prob,probs=seq(0.1,0.9,by=0.1)) train1$decile<-findInterval(train1$Prob,c(-Inf,decLocations,Inf)) View(train1) #Decile Analysis Report decile_grp<-group_by(train1,decile) decile_sum_train<-summarize(decile_grp,total_cnt=n(),min_prob=min(p=Prob), max_prob=max(Prob),disbursed_cnt=sum(Disbursed), non_disbursed_cnt=total_cnt-disbursed_cnt) decile_sum_train<-arrange(decile_sum_train,desc(decile)) View(decile_sum_train) write.csv(decile_sum_train,"Bankdata_FitTrain.csv",row.names = F) test1<-cbind(testing,Prob=predict(fit2,testing,type="response")) View(test1) decLocations<-quantile(test1$Prob,probs=seq(0.1,0.9,by=0.1)) test1$decile<-findInterval(test1$Prob,c(-Inf,decLocations,Inf)) View(test1) #Decile Analysis Report decile_grp<-group_by(test1,decile) decile_sum_test<-summarize(decile_grp,total_cnt=n(),min_prob=min(p=Prob), max_prob=max(Prob),disbursed_cnt=sum(Disbursed), non_disbursed_cnt=total_cnt-disbursed_cnt) decile_sum_test<-arrange(decile_sum_test,desc(decile)) View(decile_sum_test) install.packages("InformationValue") library(InformationValue) cut1 <- optimalCutoff(train1$Disbursed,train1$Prob, optimiseFor = "Both", returnDiagnostics = T) cut1 ROCTable<-data.frame(cut1$sensitivityTable) View(ROCTable) train1$pred_Y <- ifelse(train1$Prob> 0.1335348,1,0) confusionMatrix(train1$Disbursed,train1$pred_Y) confusionMatrix(train1$Disbursed,train1$Prob, threshold = 0.1335348) sum(train1$Disbursed) plotROC(train1$Disbursed,train1$Prob, Show.labels = F) install.packages("ROCR") require(ROCR) pred_train_fit2 <- prediction(train1$Prob, train1$Disbursed) perf_fit2 <- performance(pred_train_fit2, "tpr", "fpr") plot(perf_fit2) abline(0, 1) performance(pred_train_fit2, "auc")@y.values #Hence we have made the above Logistic Model for Loan Disbursement.
x <- seq(0, 20, 1) y<- 'hello world' require(mosaic) y <- pnorm(x, mean=10, sd=1) plot(x, y, type='l') mydata <- do(10) * rflip(50) mydata tally(~heads, data=mydata) tally(~tails, data=mydata)
/Oct30-mg.r
no_license
murthygorty/DataScientists9
R
false
false
196
r
x <- seq(0, 20, 1) y<- 'hello world' require(mosaic) y <- pnorm(x, mean=10, sd=1) plot(x, y, type='l') mydata <- do(10) * rflip(50) mydata tally(~heads, data=mydata) tally(~tails, data=mydata)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/MHPropWithKStepNewton.R \name{MHPropWithKStepNewton} \alias{MHPropWithKStepNewton} \title{Metropolis–Hastings algorithm with K-step Newton method for the spline model.} \usage{ MHPropWithKStepNewton( param.cur, gradhess.fun.name, logpost.fun.name, nNewtonStep, Params, hessMethod, Y, x0, callParam, splineArgs, priorArgs, prop.df, Params_Transform ) } \description{ Metropolis–Hastings algorithm with K-step Newton method for the spline model. }
/man/MHPropWithKStepNewton.Rd
no_license
kl-lab/fformpp
R
false
true
554
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/MHPropWithKStepNewton.R \name{MHPropWithKStepNewton} \alias{MHPropWithKStepNewton} \title{Metropolis–Hastings algorithm with K-step Newton method for the spline model.} \usage{ MHPropWithKStepNewton( param.cur, gradhess.fun.name, logpost.fun.name, nNewtonStep, Params, hessMethod, Y, x0, callParam, splineArgs, priorArgs, prop.df, Params_Transform ) } \description{ Metropolis–Hastings algorithm with K-step Newton method for the spline model. }
\name{oFSkm} \alias{oFSkm} \title{Objective Function (sum of cosines)...} \usage{oFSkm(X, C, CIdx)} \description{Objective Function (sum of cosines)} \value{sum of cosine-similarities.} \arguments{\item{X}{data matrix (row-wise vectors in unit length).} \item{C}{concept vectors as matrix (row-wise in unit length).} \item{CIdx}{vector of length NROW(X) with natural numbers 1..k, indicating cluster for each data vector.}} \examples{{ X=structure(c(0.707, 0.707, 0.707, 0.707), .Dim = c(2L, 2L)) C=structure(c(1, 0, 0, 1), .Dim = c(2L, 2L)) CIdx=c(2, 1) oFSkm(X,C,CIdx) # 1.414 }}
/man/oFSkm.Rd
permissive
yangkuoone/mvc
R
false
false
583
rd
\name{oFSkm} \alias{oFSkm} \title{Objective Function (sum of cosines)...} \usage{oFSkm(X, C, CIdx)} \description{Objective Function (sum of cosines)} \value{sum of cosine-similarities.} \arguments{\item{X}{data matrix (row-wise vectors in unit length).} \item{C}{concept vectors as matrix (row-wise in unit length).} \item{CIdx}{vector of length NROW(X) with natural numbers 1..k, indicating cluster for each data vector.}} \examples{{ X=structure(c(0.707, 0.707, 0.707, 0.707), .Dim = c(2L, 2L)) C=structure(c(1, 0, 0, 1), .Dim = c(2L, 2L)) CIdx=c(2, 1) oFSkm(X,C,CIdx) # 1.414 }}
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/setup_vars.R \name{setup_vars} \alias{setup_vars} \title{Set Up dracarys Variables} \usage{ setup_vars(x) } \arguments{ \item{x}{Path to \code{replay.json} file.} } \value{ A list with several elements pointing to DRAGEN output file names. } \description{ Reads the \code{replay.json} file, which contains the DRAGEN command line, parameters, version and inputs for the specific run. It then pre-creates expected file names for DRAGEN output. } \examples{ x <- system.file("extdata/COLO829-replay.json.gz", package = "dracarys") (v <- setup_vars(x)) }
/man/setup_vars.Rd
permissive
umccr/old_dracarys
R
false
true
631
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/setup_vars.R \name{setup_vars} \alias{setup_vars} \title{Set Up dracarys Variables} \usage{ setup_vars(x) } \arguments{ \item{x}{Path to \code{replay.json} file.} } \value{ A list with several elements pointing to DRAGEN output file names. } \description{ Reads the \code{replay.json} file, which contains the DRAGEN command line, parameters, version and inputs for the specific run. It then pre-creates expected file names for DRAGEN output. } \examples{ x <- system.file("extdata/COLO829-replay.json.gz", package = "dracarys") (v <- setup_vars(x)) }
error.bar <- function(x, y, upper, lower=upper, length=0.1,...){ if(length(x) != length(y) | length(y) !=length(lower) | length(lower) != length(upper)) stop("vectors must be same length") arrows(x,y+upper, x, y, angle=90, code=1, length=length, ...) #text(x, y+upper, paste("n=", )) } pdf("ping_number_clean_hom_high_narrow.pdf") par(mar=c(5,5,4,2)) data =read.table("RIL230_RelocaTEi.CombinedGFF.characterized.clean.high_narrow.ping_number.summary") expr = data[,2] std = data[,3] barx <- barplot(expr, col=c("cornflowerblue"), ylim=c(0,120), border=F, axis.lty=1, xlab='', ylab='') error.bar(barx, expr, std) axis(1,c(0.1, max(barx)+0.6),line=0,labels=c("",""), cex=1.4) text(barx, rep(-5, 6),offset=2,labels=data[,1],srt=0,xpd=TRUE, cex=1.4) #legend("topright",c("HEG4","Nipponbare"),bty="n",border="NA",lty=c(0,0),cex=1,fill=c("blue","orange")) xpos <- 3.6 ypos <- 44 mtext("Ping", side=1,font=3, at=xpos+0.3,line=3, cex=1.4, col="black") mtext("copy number", side=1,font=1, at=xpos+2.1,line=3, cex=1.4, col="black") mtext("Unique homozygous", side=2,font=1, at=ypos,line=3, cex=1.4, col="black") mtext("mPing", side=2,font=3, at=ypos+34,line=3, cex=1.4, col="black") mtext("number", side=2,font=1, at=ypos+53,line=3, cex=1.4, col="black") #text(2, 115, 'Pearson correlation:', cex=1.4) #text(3.7, 108, 'R-squared = 0.73, p-value < 1.1e-11', cex=1.4) #n text(barx, expr+std+10, paste('n = ', data[,6], sep=''), cex=1.4) dev.off()
/bin/Manuscript_Figures/Fig1_mPing/ping_number_clean_hom_high_narrow.R
no_license
wangpanqiao/Transposition
R
false
false
1,454
r
error.bar <- function(x, y, upper, lower=upper, length=0.1,...){ if(length(x) != length(y) | length(y) !=length(lower) | length(lower) != length(upper)) stop("vectors must be same length") arrows(x,y+upper, x, y, angle=90, code=1, length=length, ...) #text(x, y+upper, paste("n=", )) } pdf("ping_number_clean_hom_high_narrow.pdf") par(mar=c(5,5,4,2)) data =read.table("RIL230_RelocaTEi.CombinedGFF.characterized.clean.high_narrow.ping_number.summary") expr = data[,2] std = data[,3] barx <- barplot(expr, col=c("cornflowerblue"), ylim=c(0,120), border=F, axis.lty=1, xlab='', ylab='') error.bar(barx, expr, std) axis(1,c(0.1, max(barx)+0.6),line=0,labels=c("",""), cex=1.4) text(barx, rep(-5, 6),offset=2,labels=data[,1],srt=0,xpd=TRUE, cex=1.4) #legend("topright",c("HEG4","Nipponbare"),bty="n",border="NA",lty=c(0,0),cex=1,fill=c("blue","orange")) xpos <- 3.6 ypos <- 44 mtext("Ping", side=1,font=3, at=xpos+0.3,line=3, cex=1.4, col="black") mtext("copy number", side=1,font=1, at=xpos+2.1,line=3, cex=1.4, col="black") mtext("Unique homozygous", side=2,font=1, at=ypos,line=3, cex=1.4, col="black") mtext("mPing", side=2,font=3, at=ypos+34,line=3, cex=1.4, col="black") mtext("number", side=2,font=1, at=ypos+53,line=3, cex=1.4, col="black") #text(2, 115, 'Pearson correlation:', cex=1.4) #text(3.7, 108, 'R-squared = 0.73, p-value < 1.1e-11', cex=1.4) #n text(barx, expr+std+10, paste('n = ', data[,6], sep=''), cex=1.4) dev.off()
# Ryan Woodbury # Tidy Tuesday # February 23, 2021 library(tidyverse) library(lubridate) theme_set(theme_light()) ## Get the data tuesdata <- tidytuesdayR::tt_load(2021, week = 9) employed <- tuesdata$employed earn <- tuesdata$earn ## quick summary ### employed data skimr::skim(employed) glimpse(employed) ### earn data skimr::skim(earn) glimpse(earn) ## Explore some variables ### employed employed_filter <- employed %>% filter(race_gender != "TOTAL") ### earn earn %>% count(sex) # this looks OK, but some cleaning of the "Both Sexes" earn %>% count(race) # again, OK, but can filter out "All Races" earn %>% count(ethnic_origin) # `All Origins` or `Hispanic or Latino` earn %>% count(age) # Quite messy hist(earn$n_persons) hist(earn$median_weekly_earn) View(earn_filter <- earn %>% filter(sex == "Both Sexes", race %in% c("White", "Black or African American"), age == "25 to 54 years") %>% mutate(race = recode(race, "Black or African American" = "Black"))) earn_filter_long <- earn_filter %>% pivot_wider(id_cols = c(year, quarter), names_from = race, values_from = median_weekly_earn) %>% mutate(earn_ratio = Black/White, year_quarter = year + quarter/4 - .125) ## From Hunter: mutate(year_quarter = year+quarter/4-.125) ggplot(earn_filter_long, aes(x = year_quarter, y = earn_ratio)) + geom_line() + scale_y_continuous(name = "Earnings Ratio", limits = c(0,1), labels = function(y) scales::percent(y)) + labs(title = "Ratio of earnings of Black and White employees", caption = "By: Ryan Woodbury | Tidy Tuesday R4DS community | Data from BLS.gov") earn_NoAll <- earn %>% filter(sex != "Both Sexes", race != "All Races", str_detect(age, "and over"), ethnic_origin == "All Origins") %>% mutate(race = recode(race, "Black or African American" = "Black"), age = str_extract(age, "\\d+"), date = lubridate::yq(paste(year, quarter,sep="-"))) ggplot(earn_NoAll, aes(date, n_persons, color = race, shape = age)) + geom_point() + geom_path(aes(linetype = factor(sex))) ggplot(filter(earn_NoAll, race == "All Races" & age == "16"), aes(date, n_persons, color = age, shape = sex)) + geom_point() + geom_path()
/Woodbury_2021_02_23_TT.R
no_license
rywood89/Tidy_Tuesday
R
false
false
2,274
r
# Ryan Woodbury # Tidy Tuesday # February 23, 2021 library(tidyverse) library(lubridate) theme_set(theme_light()) ## Get the data tuesdata <- tidytuesdayR::tt_load(2021, week = 9) employed <- tuesdata$employed earn <- tuesdata$earn ## quick summary ### employed data skimr::skim(employed) glimpse(employed) ### earn data skimr::skim(earn) glimpse(earn) ## Explore some variables ### employed employed_filter <- employed %>% filter(race_gender != "TOTAL") ### earn earn %>% count(sex) # this looks OK, but some cleaning of the "Both Sexes" earn %>% count(race) # again, OK, but can filter out "All Races" earn %>% count(ethnic_origin) # `All Origins` or `Hispanic or Latino` earn %>% count(age) # Quite messy hist(earn$n_persons) hist(earn$median_weekly_earn) View(earn_filter <- earn %>% filter(sex == "Both Sexes", race %in% c("White", "Black or African American"), age == "25 to 54 years") %>% mutate(race = recode(race, "Black or African American" = "Black"))) earn_filter_long <- earn_filter %>% pivot_wider(id_cols = c(year, quarter), names_from = race, values_from = median_weekly_earn) %>% mutate(earn_ratio = Black/White, year_quarter = year + quarter/4 - .125) ## From Hunter: mutate(year_quarter = year+quarter/4-.125) ggplot(earn_filter_long, aes(x = year_quarter, y = earn_ratio)) + geom_line() + scale_y_continuous(name = "Earnings Ratio", limits = c(0,1), labels = function(y) scales::percent(y)) + labs(title = "Ratio of earnings of Black and White employees", caption = "By: Ryan Woodbury | Tidy Tuesday R4DS community | Data from BLS.gov") earn_NoAll <- earn %>% filter(sex != "Both Sexes", race != "All Races", str_detect(age, "and over"), ethnic_origin == "All Origins") %>% mutate(race = recode(race, "Black or African American" = "Black"), age = str_extract(age, "\\d+"), date = lubridate::yq(paste(year, quarter,sep="-"))) ggplot(earn_NoAll, aes(date, n_persons, color = race, shape = age)) + geom_point() + geom_path(aes(linetype = factor(sex))) ggplot(filter(earn_NoAll, race == "All Races" & age == "16"), aes(date, n_persons, color = age, shape = sex)) + geom_point() + geom_path()
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/template_download.R \name{allen_gene_template_download} \alias{allen_gene_template_download} \title{Download ABI gene templates} \usage{ allen_gene_template_download( age_dataset = c("adult", "P56", "P28", "P14", "P4", "E18.5", "E16.5", "E15.5", "E13.5", "E11.5"), outfile = NULL, labels = F, grid_annot = F, binarize = F ) } \arguments{ \item{age_dataset}{Choose the age of the template to download. Options are 'adult','P56','P28','P14','P4','E18.5','E16.5','E15.5','E13.5','E11.5'. Note 'adult' and 'P56' are aliases.} \item{outfile}{Optional outfile to write} \item{labels}{If true, then the labels are downloaded. Default is FALSE, so the template is downloaded. Warning: labels may have integers that can't be written to a MINC file.} \item{grid_annot}{If true, then the grid annotations are downloaded. They are much lower resolution compared to template. Default is FALSE, so the template is downloaded. Warning: grid annotations may have integers that can't be written to a MINC file.} \item{binarize}{If true, then values are binarized with threshold of 0.5} } \value{ template data as a 1D vector of class mincSingleDim } \description{ Downloads and reads the ABI gene template with an option to write to a mincfile }
/man/allen_gene_template_download.Rd
no_license
DJFernandes/ABIgeneRMINC
R
false
true
1,325
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/template_download.R \name{allen_gene_template_download} \alias{allen_gene_template_download} \title{Download ABI gene templates} \usage{ allen_gene_template_download( age_dataset = c("adult", "P56", "P28", "P14", "P4", "E18.5", "E16.5", "E15.5", "E13.5", "E11.5"), outfile = NULL, labels = F, grid_annot = F, binarize = F ) } \arguments{ \item{age_dataset}{Choose the age of the template to download. Options are 'adult','P56','P28','P14','P4','E18.5','E16.5','E15.5','E13.5','E11.5'. Note 'adult' and 'P56' are aliases.} \item{outfile}{Optional outfile to write} \item{labels}{If true, then the labels are downloaded. Default is FALSE, so the template is downloaded. Warning: labels may have integers that can't be written to a MINC file.} \item{grid_annot}{If true, then the grid annotations are downloaded. They are much lower resolution compared to template. Default is FALSE, so the template is downloaded. Warning: grid annotations may have integers that can't be written to a MINC file.} \item{binarize}{If true, then values are binarized with threshold of 0.5} } \value{ template data as a 1D vector of class mincSingleDim } \description{ Downloads and reads the ABI gene template with an option to write to a mincfile }
#' NYC Education (2000) #' #' Block-level New York City information about education and demographic characteristics (2000). Geographic units: 2010 Census blocks. #' #' Sf object, Projection 2203. NAD83 / New York Long Island (ftUS) #' #' @format An sf data frame with 2,216 rows, 56 variables, and a geometry column: #' \describe{ #' \item{ poly_id }{ Unique ID } #' \item{ youth_drop }{ Percentage of population age 16-19 that has dropped out of high school } #' \item{ per_mnrty }{ Percentage of the population that is non-white } #' \item{ hs_drop }{ Percentage of population age over25 that dropped out of high school } #' \item{ col_degree }{ Percentage of population over age 25 that obtained at least a bachelor’s degree } #' \item{ per_asian }{ Percentage of Asian population } #' \item{ per_black }{ Percentage of black population } #' \item{ per_white }{ Percentage of white population } #' \item{ ctlabel }{ Census tract label } #' \item{ borocode }{ Borough code } #' \item{ boroname }{ Borough name } #' \item{ boroct2000 }{ Census tract coding } #' \item{ ntacode }{ Neighborhood tabulation area code } #' \item{ ntaname }{ Neighborhood tabulation area name } #' \item{ puma }{ Public use microarea code } #' \item{ shape_leng }{ Length of polygon border } #' \item{ shape_area }{ Area of polygon } #' \item{ np_ct }{ Number of nonprofits } #' \item{ mean_inc }{ Mean income } #' \item{ pop1619 }{ Population age 16-19 } #' \item{ dropout }{ High school dropouts, age 16-19 } #' \item{ enrollhs }{ Population enrolled in high school, age 16-19 } #' \item{ per_prv_sc }{ Percentage of all students Population enrolled in private school } #' \item{ per_pub_sc }{ Percentage of all students Population enrolled in public school } #' \item{ over3 }{ Population over 3-year-old } #' \item{ notenroll }{ Population over 3-year-old, not Population enrolled in school } #' \item{ over3enroll }{ Population over 3-year-old, Population enrolled in school } #' \item{ pubsch }{ Population enrolled in public school } #' \item{ pub_pk }{ Population enrolled in public pre-k } #' \item{ pub_k8 }{ Population enrolled in public k-8 } #' \item{ pub_hs }{ Population enrolled in public high school } #' \item{ pub_col }{ Population enrolled in public college } #' \item{ privsch }{ Population enrolled in private school } #' \item{ priv_pk }{ Population enrolled in private pre-k } #' \item{ priv_k8 }{ Population enrolled in private k-8 } #' \item{ priv_hs }{ Population enrolled in private high school } #' \item{ priv_col }{ Population enrolled in private college } #' \item{ over25 }{ Population over 25 years } #' \item{ subhs }{ Population over 25 years with less than high school degree } #' \item{ hs }{ Population over 25 years with high school degree } #' \item{ somecol }{ Population over 25 years with some college } #' \item{ college }{ Population over 25 years with bachelor’s degree } #' \item{ master }{ Population over 25 years with master’s degree } #' \item{ prof }{ Population over 25 years with professional degree } #' \item{ phd }{ Population over 25 years with phd } #' \item{ white }{ Total white population } #' \item{ black }{ Total black population } #' \item{ asian }{ Total Asian population } #' \item{ sub18 }{ Total population under 18 years old } #' \item{ gender_par }{ Gender parity, 1=parity, higher = more males, lower = more females } #' \item{ male }{ Male population } #' \item{ female }{ Female population } #' \item{ school_ct }{ Number of schools } #' \item{ popdens }{ Population density (people per square mile) } #' \item{ population }{ Population count } #' } #' @source Source for school locations: Open Data New York (\url{https://data.cityofnewyork.us/Education/School-Point-Locations/jfju-ynrr}). Demographics source: 2000 Census. #' #' @examples #' if (requireNamespace("sf", quietly = TRUE)) { #' library(sf) #' data(nyceducation) #' #' plot(nyceducation["phd"]) #' } "nyceducation"
/R/nyceducation.R
permissive
spatialanalysis/geodaData
R
false
false
3,949
r
#' NYC Education (2000) #' #' Block-level New York City information about education and demographic characteristics (2000). Geographic units: 2010 Census blocks. #' #' Sf object, Projection 2203. NAD83 / New York Long Island (ftUS) #' #' @format An sf data frame with 2,216 rows, 56 variables, and a geometry column: #' \describe{ #' \item{ poly_id }{ Unique ID } #' \item{ youth_drop }{ Percentage of population age 16-19 that has dropped out of high school } #' \item{ per_mnrty }{ Percentage of the population that is non-white } #' \item{ hs_drop }{ Percentage of population age over25 that dropped out of high school } #' \item{ col_degree }{ Percentage of population over age 25 that obtained at least a bachelor’s degree } #' \item{ per_asian }{ Percentage of Asian population } #' \item{ per_black }{ Percentage of black population } #' \item{ per_white }{ Percentage of white population } #' \item{ ctlabel }{ Census tract label } #' \item{ borocode }{ Borough code } #' \item{ boroname }{ Borough name } #' \item{ boroct2000 }{ Census tract coding } #' \item{ ntacode }{ Neighborhood tabulation area code } #' \item{ ntaname }{ Neighborhood tabulation area name } #' \item{ puma }{ Public use microarea code } #' \item{ shape_leng }{ Length of polygon border } #' \item{ shape_area }{ Area of polygon } #' \item{ np_ct }{ Number of nonprofits } #' \item{ mean_inc }{ Mean income } #' \item{ pop1619 }{ Population age 16-19 } #' \item{ dropout }{ High school dropouts, age 16-19 } #' \item{ enrollhs }{ Population enrolled in high school, age 16-19 } #' \item{ per_prv_sc }{ Percentage of all students Population enrolled in private school } #' \item{ per_pub_sc }{ Percentage of all students Population enrolled in public school } #' \item{ over3 }{ Population over 3-year-old } #' \item{ notenroll }{ Population over 3-year-old, not Population enrolled in school } #' \item{ over3enroll }{ Population over 3-year-old, Population enrolled in school } #' \item{ pubsch }{ Population enrolled in public school } #' \item{ pub_pk }{ Population enrolled in public pre-k } #' \item{ pub_k8 }{ Population enrolled in public k-8 } #' \item{ pub_hs }{ Population enrolled in public high school } #' \item{ pub_col }{ Population enrolled in public college } #' \item{ privsch }{ Population enrolled in private school } #' \item{ priv_pk }{ Population enrolled in private pre-k } #' \item{ priv_k8 }{ Population enrolled in private k-8 } #' \item{ priv_hs }{ Population enrolled in private high school } #' \item{ priv_col }{ Population enrolled in private college } #' \item{ over25 }{ Population over 25 years } #' \item{ subhs }{ Population over 25 years with less than high school degree } #' \item{ hs }{ Population over 25 years with high school degree } #' \item{ somecol }{ Population over 25 years with some college } #' \item{ college }{ Population over 25 years with bachelor’s degree } #' \item{ master }{ Population over 25 years with master’s degree } #' \item{ prof }{ Population over 25 years with professional degree } #' \item{ phd }{ Population over 25 years with phd } #' \item{ white }{ Total white population } #' \item{ black }{ Total black population } #' \item{ asian }{ Total Asian population } #' \item{ sub18 }{ Total population under 18 years old } #' \item{ gender_par }{ Gender parity, 1=parity, higher = more males, lower = more females } #' \item{ male }{ Male population } #' \item{ female }{ Female population } #' \item{ school_ct }{ Number of schools } #' \item{ popdens }{ Population density (people per square mile) } #' \item{ population }{ Population count } #' } #' @source Source for school locations: Open Data New York (\url{https://data.cityofnewyork.us/Education/School-Point-Locations/jfju-ynrr}). Demographics source: 2000 Census. #' #' @examples #' if (requireNamespace("sf", quietly = TRUE)) { #' library(sf) #' data(nyceducation) #' #' plot(nyceducation["phd"]) #' } "nyceducation"
citation(package = "base", lib.loc = NULL) citation(package = "irr", lib.loc = NULL) citation(package = "psych", lib.loc = NULL) citation(package = "GPArotation", lib.loc = NULL) citation(package = "yhat", lib.loc = NULL)
/citations.R
no_license
EricLeingang/Personal-Essay-Project
R
false
false
221
r
citation(package = "base", lib.loc = NULL) citation(package = "irr", lib.loc = NULL) citation(package = "psych", lib.loc = NULL) citation(package = "GPArotation", lib.loc = NULL) citation(package = "yhat", lib.loc = NULL)
gaussSamp <- function(mu=matrix(0,nrow=dim(Sigma)[1]), Sigma, numSamps) { ## GAUSSSAMP Sample from a Gaussian with a given covariance. ## FORMAT ## DESC samples a given number of samples from a Gaussian with a ## given covariance matrix. ## ARG MU: the mean vector of the Gaussian to sample from. ## ARG Sigma : the covariance of the Gaussian to sample from. ## ARG numSamps : the number of samples to take from Gaussian. ## RETURN y : the samples from the Gaussian ## ## SEEALSO : rnorm, eigen ## ## COPYRIGHT : Neil D. Lawrence 2005, Alfredo Kalaitzis 2010 eigVecs = eigen(Sigma) U = eigVecs$vectors; V = eigVecs$values dims = dim(Sigma)[1] y = matrix(rnorm(numSamps*dims), numSamps, dims) # V[V<0] = as.complex(V[V<0]) y = matrix(1,nrow=numSamps)%*%t(mu) + y %*% diag(sqrt(abs(V))) %*% t(U) return (Re(y)) }
/gptk/R/gaussSamp.R
no_license
ingted/R-Examples
R
false
false
830
r
gaussSamp <- function(mu=matrix(0,nrow=dim(Sigma)[1]), Sigma, numSamps) { ## GAUSSSAMP Sample from a Gaussian with a given covariance. ## FORMAT ## DESC samples a given number of samples from a Gaussian with a ## given covariance matrix. ## ARG MU: the mean vector of the Gaussian to sample from. ## ARG Sigma : the covariance of the Gaussian to sample from. ## ARG numSamps : the number of samples to take from Gaussian. ## RETURN y : the samples from the Gaussian ## ## SEEALSO : rnorm, eigen ## ## COPYRIGHT : Neil D. Lawrence 2005, Alfredo Kalaitzis 2010 eigVecs = eigen(Sigma) U = eigVecs$vectors; V = eigVecs$values dims = dim(Sigma)[1] y = matrix(rnorm(numSamps*dims), numSamps, dims) # V[V<0] = as.complex(V[V<0]) y = matrix(1,nrow=numSamps)%*%t(mu) + y %*% diag(sqrt(abs(V))) %*% t(U) return (Re(y)) }
1.tmp <- c(4,6,3) # Create the vector rep(tmp,10) #Repeat the vector 10 times paste("fn",1:30,sep="") # paste 1 st and 2 nd argument 2. a. x<-c("Control", "Control", "Control", "Ear Removal", "Ear Removal", "Ear Rem oval", "Ear Removal", "Fake Ear Removal", "Fake Ear Removal", "Fake Ear Remov al", "Fake Ear Removal") # display the vector > x [1] "Control" "Control" "Control" "Ear Removal" [5] "Ear Removal" "Ear Removal" "Ear Removal" "Fake Ear Remov al" [9] "Fake Ear Removal" "Fake Ear Removal" "Fake Ear Removal" #construct factor from the vector > xfact<- factor(x) #Display the vector > xfact [1] Control Control Control Ear Removal [5] Ear Removal Ear Removal Ear Removal Fake Ear Removal [9] Fake Ear Removal Fake Ear Removal Fake Ear Removal Levels: Control Ear Removal Fake Ear Removal > nlevels(xfact) [1] 3 2b. #Create the vector > x<-c(rep("a",25),rep("b",15),rep("c",58)) > x[1] "a" "a" "a" "a" "a" "a" "a" "a" "a" "a" "a" "a" "a" "a" "a" "a" [21] "a" "a" "a" "a" "a" "b" "b" "b" "b" "b" "b" "b" "b" "b" "b" "b" [41] "c" "c" "c" "c" "c" "c" "c" "c" "c" "c" "c" "c" "c" "c" "c" "c" [61] "c" "c" "c" "c" "c" "c" "c" "c" "c" "c" "c" "c" "c" "c" "c" "c" [81] "c" "c" "c" "c" "c" "c" "c" "c" "c" "c" "c" "c" "c" "c" > length(x) # Find the length of the vector [1] 98 > table1<- data.frame(x) # Construct table from the vector > table1 3. n <- as.integer(readline(prompt = "Enter no of students")) # Read No of stude nts # Declare the vector of character of length n USN <- vector(mode="character", length= n) Name <- vector(mode="character", length= n) Marks <- vector(mode="numeric", length= n) #Read the elements of the vector print("Enter USN") for (i in 1:n) USN[i] <- as.character(readline()) print("Enter Name") for (i in 1:n) Name [i] <- readline() print("Enter Marks" ) for (i in 1:n) Marks[i] <- as.integer(readline())#Construct the data frame from the vectors student <- data.frame(USN,Name,Marks) print("The Student detials are as follows") print(student) # Display data frame print("Enter Age") # Read the vector of Age Age <- vector(mode="integer", length=n) for (i in 1:n) Age [i] <- readline() student <- cbind(student,Age) # Append the vector to the data frame print(student) for(i in 1:n) # Print student age > 20 , marks > 25 if ( student[i,][3] > 25 ) if (student[i,][4] > 20) print(student[i,]) 4. a. n <- as.integer(readline(prompt = "Enter no of Employee")) EmpId <- vector(mode="character", length= n) EmpName <- vector(mode="character", length= n) DOJ <- vector(mode="character", length= n) EmpCode <- vector(mode="numeric",length = n) Desig <- vector(mode="character",length = n) Dept <- vector(mode="character",length = n) print("Enter EmpId") for (i in 1:n) EmpId[i] <- as.character(readline()) print("Enter EmployeeName") for (i in 1:n) EmpName [i] <- readline() print("Enter DOJ" ) for (i in 1:n) DOJ[i] <- (readline()) print("Enter EmployeeCode" ) for (i in 1:n) EmpCode[i] <- as.integer(readline()) print("Enter Designation" )for (i in 1:n) Desig[i] <- (readline()) print("Enter Dept" ) for (i in 1:n) Dept[i] <- (readline()) b. Emp <- data.frame(EmpId,EmpName,EmpCode,Desig,Dept,DOJ) print("The Employee detials are as follows") print(Emp) c. write.csv(Emp,"C:/Users/ARCHANA/Documents/Empfile.csv") d. readStudent=read.csv("C:/Users/ARCHANA/Documents/file.csv") e. print("Enter a new row") u<- readline(prompt = "EmpId") n<- readline(prompt = "EmpName") m<- readline(prompt = "EmpCode") A<- readline(prompt = "Desig") s<- readline(prompt = "Dept") t<- readline(prompt = "DOJ") x<- data.frame(u,n,m,A,s,t) write.table(x,"C:/Users/ARCHANA/Documents/Empfile.csv",col.names = FALSE, append = T,row.names = T, quote= FALSE, sep = ",") 5. a. data() head(mtcars) b. # Number of rows (observations) rownum <- nrow(mtcars) # Number of columns (variables) colnum <- ncol(mtcars)c. x<- data.frame(mtcars) automatic <-0 manual <-0 for (i in 1:rownum) ifelse( x[i,9] == 1, automatic <- automatic + 1, manual <- manual +1) ifelse (automatic > manual, print("There are more automatic transmission type"), print("There are more manual transmission type") ) d. //The scatter plot HorsePower <- x[,4] Weight <- x[,6] scatter.smooth(HorsePower,Weight, span=2/3, degree = 1, family =c("symmetric","gaussian")) // Plot histogram of Miles/gallon Mpg <- x[,1] hist(Mpg, breaks = 12, col ="lightblue", border = "pink") e. // Solution for e x[,2]<- as.integer(x[,2]) x[,8]<- as.integer(x[,8]) x[,9]<- as.integer(x[,9]) x[,2] <= 5 f. mtcars[mtcars$cyl <=5 ] 6. a. df <- airquality dim(df) b. sapply(df,class) c. #Printing the missing values print("The Missing values are as follows") xcolNames <- colnames(df) x<- colSums(is.na(df)) print(x) d. which(is.na(df)) sum(is.na(df)) df1<- as.data.frame(df) e. #Recoding the missing valuesfor(i in 1:4) df1[,i]<- ifelse ( is.na(df[,i]), mean(df[,i], na.rm = TRUE), df[,i]) # Excluding the missing values df2<-na.omit(df)
/1.r
no_license
vishugummani/cn1
R
false
false
4,897
r
1.tmp <- c(4,6,3) # Create the vector rep(tmp,10) #Repeat the vector 10 times paste("fn",1:30,sep="") # paste 1 st and 2 nd argument 2. a. x<-c("Control", "Control", "Control", "Ear Removal", "Ear Removal", "Ear Rem oval", "Ear Removal", "Fake Ear Removal", "Fake Ear Removal", "Fake Ear Remov al", "Fake Ear Removal") # display the vector > x [1] "Control" "Control" "Control" "Ear Removal" [5] "Ear Removal" "Ear Removal" "Ear Removal" "Fake Ear Remov al" [9] "Fake Ear Removal" "Fake Ear Removal" "Fake Ear Removal" #construct factor from the vector > xfact<- factor(x) #Display the vector > xfact [1] Control Control Control Ear Removal [5] Ear Removal Ear Removal Ear Removal Fake Ear Removal [9] Fake Ear Removal Fake Ear Removal Fake Ear Removal Levels: Control Ear Removal Fake Ear Removal > nlevels(xfact) [1] 3 2b. #Create the vector > x<-c(rep("a",25),rep("b",15),rep("c",58)) > x[1] "a" "a" "a" "a" "a" "a" "a" "a" "a" "a" "a" "a" "a" "a" "a" "a" [21] "a" "a" "a" "a" "a" "b" "b" "b" "b" "b" "b" "b" "b" "b" "b" "b" [41] "c" "c" "c" "c" "c" "c" "c" "c" "c" "c" "c" "c" "c" "c" "c" "c" [61] "c" "c" "c" "c" "c" "c" "c" "c" "c" "c" "c" "c" "c" "c" "c" "c" [81] "c" "c" "c" "c" "c" "c" "c" "c" "c" "c" "c" "c" "c" "c" > length(x) # Find the length of the vector [1] 98 > table1<- data.frame(x) # Construct table from the vector > table1 3. n <- as.integer(readline(prompt = "Enter no of students")) # Read No of stude nts # Declare the vector of character of length n USN <- vector(mode="character", length= n) Name <- vector(mode="character", length= n) Marks <- vector(mode="numeric", length= n) #Read the elements of the vector print("Enter USN") for (i in 1:n) USN[i] <- as.character(readline()) print("Enter Name") for (i in 1:n) Name [i] <- readline() print("Enter Marks" ) for (i in 1:n) Marks[i] <- as.integer(readline())#Construct the data frame from the vectors student <- data.frame(USN,Name,Marks) print("The Student detials are as follows") print(student) # Display data frame print("Enter Age") # Read the vector of Age Age <- vector(mode="integer", length=n) for (i in 1:n) Age [i] <- readline() student <- cbind(student,Age) # Append the vector to the data frame print(student) for(i in 1:n) # Print student age > 20 , marks > 25 if ( student[i,][3] > 25 ) if (student[i,][4] > 20) print(student[i,]) 4. a. n <- as.integer(readline(prompt = "Enter no of Employee")) EmpId <- vector(mode="character", length= n) EmpName <- vector(mode="character", length= n) DOJ <- vector(mode="character", length= n) EmpCode <- vector(mode="numeric",length = n) Desig <- vector(mode="character",length = n) Dept <- vector(mode="character",length = n) print("Enter EmpId") for (i in 1:n) EmpId[i] <- as.character(readline()) print("Enter EmployeeName") for (i in 1:n) EmpName [i] <- readline() print("Enter DOJ" ) for (i in 1:n) DOJ[i] <- (readline()) print("Enter EmployeeCode" ) for (i in 1:n) EmpCode[i] <- as.integer(readline()) print("Enter Designation" )for (i in 1:n) Desig[i] <- (readline()) print("Enter Dept" ) for (i in 1:n) Dept[i] <- (readline()) b. Emp <- data.frame(EmpId,EmpName,EmpCode,Desig,Dept,DOJ) print("The Employee detials are as follows") print(Emp) c. write.csv(Emp,"C:/Users/ARCHANA/Documents/Empfile.csv") d. readStudent=read.csv("C:/Users/ARCHANA/Documents/file.csv") e. print("Enter a new row") u<- readline(prompt = "EmpId") n<- readline(prompt = "EmpName") m<- readline(prompt = "EmpCode") A<- readline(prompt = "Desig") s<- readline(prompt = "Dept") t<- readline(prompt = "DOJ") x<- data.frame(u,n,m,A,s,t) write.table(x,"C:/Users/ARCHANA/Documents/Empfile.csv",col.names = FALSE, append = T,row.names = T, quote= FALSE, sep = ",") 5. a. data() head(mtcars) b. # Number of rows (observations) rownum <- nrow(mtcars) # Number of columns (variables) colnum <- ncol(mtcars)c. x<- data.frame(mtcars) automatic <-0 manual <-0 for (i in 1:rownum) ifelse( x[i,9] == 1, automatic <- automatic + 1, manual <- manual +1) ifelse (automatic > manual, print("There are more automatic transmission type"), print("There are more manual transmission type") ) d. //The scatter plot HorsePower <- x[,4] Weight <- x[,6] scatter.smooth(HorsePower,Weight, span=2/3, degree = 1, family =c("symmetric","gaussian")) // Plot histogram of Miles/gallon Mpg <- x[,1] hist(Mpg, breaks = 12, col ="lightblue", border = "pink") e. // Solution for e x[,2]<- as.integer(x[,2]) x[,8]<- as.integer(x[,8]) x[,9]<- as.integer(x[,9]) x[,2] <= 5 f. mtcars[mtcars$cyl <=5 ] 6. a. df <- airquality dim(df) b. sapply(df,class) c. #Printing the missing values print("The Missing values are as follows") xcolNames <- colnames(df) x<- colSums(is.na(df)) print(x) d. which(is.na(df)) sum(is.na(df)) df1<- as.data.frame(df) e. #Recoding the missing valuesfor(i in 1:4) df1[,i]<- ifelse ( is.na(df[,i]), mean(df[,i], na.rm = TRUE), df[,i]) # Excluding the missing values df2<-na.omit(df)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utils.R \name{need} \alias{need} \title{Check if a package is installed and stop if not} \usage{ need(package) } \arguments{ \item{package}{a package name as a character string.} } \description{ If the package is not installed then the function will error and give an appropriate message. } \details{ Designed to be used in scripts or functions where a function from the named package is used with \code{::} without attaching the package. It should be used alongside \code{library()} calls at the top of scripts where it also serves the purpose of indicating to the user that a package is needed. The function uses \code{requireNamespace()} which will load the package if available but not attach it. This is what happens when \code{::} is used. }
/man/need.Rd
permissive
jedwards24/edwards
R
false
true
827
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utils.R \name{need} \alias{need} \title{Check if a package is installed and stop if not} \usage{ need(package) } \arguments{ \item{package}{a package name as a character string.} } \description{ If the package is not installed then the function will error and give an appropriate message. } \details{ Designed to be used in scripts or functions where a function from the named package is used with \code{::} without attaching the package. It should be used alongside \code{library()} calls at the top of scripts where it also serves the purpose of indicating to the user that a package is needed. The function uses \code{requireNamespace()} which will load the package if available but not attach it. This is what happens when \code{::} is used. }
library(plyr) # To manipulate data library(ggplot2) # To have ggplot2 graphic interface library(lattice) # To have Lattice graphic interface library(sp) library(rgdal) # To load "shapefiles" into R and use in conversions of spatial formats library(rgeos) # To use in geometrical operations library(spatstat) # To use with Voronoi Tesselation library(sp) # Methods for retrieving coordinates from spatial objects. library(maptools) # A package for building maps library(maps) # A package for building maps library(RColorBrewer) # To build RColorBrewer color palettes library(grDevices) # To build grDevices color palettes library(reshape2) # To have more flexibility when transforming data library(rCharts) # To create and customize interactive javascript visualizations in R library(knitr) # For dynamic report generation in R library(base64enc) # Tools for base64 encoding suppressPackageStartupMessages(library(googleVis)) # To use with Google visualization in R setwd('/Users/XingCui/Desktop/DS_1004_Big_Data/Project/Neighborhood Tabulation Areas/') norway2 <- readOGR(dsn = "." ,"geo_export_b9ed049f-9a51-476c-bbfa-739885e2831f") norway2_data <- norway2@data str(norway2_data); head(norway2_data) prediction = read.csv('newfinal.csv',col.names = c('x','label','ids','name')) d <- norway2_data$ntaname e = prediction$label name3 <- c("NAME_1", "Churn") dt2 <- as.data.frame(cbind(d, e), stringsAsFactors=TRUE) dt2$e <- as.numeric(dt2$e); colnames(dt2) <- name3; churn <- dt2 IDs <- prediction$ids#used to be dt2$d norway3_new <- unionSpatialPolygons(norway2, IDs) norway4_new <- SpatialPolygonsDataFrame(norway3_new, churn) color = c('steelblue1','slateblue','orangered3','khaki') #pal2 <- colorRampPalette(c("aliceblue", "darkcyan"))#linen trellis.par.set(axis.line=list(col=NA))# Remove the plot frame spplot(norway4_new, "Churn", main="Uber VS Taxi", # Plot the regions with Lattice lwd=.4, col="black", as.table = TRUE, col.regions=color,#pal2(19),#, #border(outside = TRUE), colorkey = TRUE, scales = list(draw = TRUE), bty="n") #colorkey=list(space="bottom"),
/Project_1004/Map Visualization/prediction.R
no_license
kimiliu1992/1004_Project
R
false
false
2,171
r
library(plyr) # To manipulate data library(ggplot2) # To have ggplot2 graphic interface library(lattice) # To have Lattice graphic interface library(sp) library(rgdal) # To load "shapefiles" into R and use in conversions of spatial formats library(rgeos) # To use in geometrical operations library(spatstat) # To use with Voronoi Tesselation library(sp) # Methods for retrieving coordinates from spatial objects. library(maptools) # A package for building maps library(maps) # A package for building maps library(RColorBrewer) # To build RColorBrewer color palettes library(grDevices) # To build grDevices color palettes library(reshape2) # To have more flexibility when transforming data library(rCharts) # To create and customize interactive javascript visualizations in R library(knitr) # For dynamic report generation in R library(base64enc) # Tools for base64 encoding suppressPackageStartupMessages(library(googleVis)) # To use with Google visualization in R setwd('/Users/XingCui/Desktop/DS_1004_Big_Data/Project/Neighborhood Tabulation Areas/') norway2 <- readOGR(dsn = "." ,"geo_export_b9ed049f-9a51-476c-bbfa-739885e2831f") norway2_data <- norway2@data str(norway2_data); head(norway2_data) prediction = read.csv('newfinal.csv',col.names = c('x','label','ids','name')) d <- norway2_data$ntaname e = prediction$label name3 <- c("NAME_1", "Churn") dt2 <- as.data.frame(cbind(d, e), stringsAsFactors=TRUE) dt2$e <- as.numeric(dt2$e); colnames(dt2) <- name3; churn <- dt2 IDs <- prediction$ids#used to be dt2$d norway3_new <- unionSpatialPolygons(norway2, IDs) norway4_new <- SpatialPolygonsDataFrame(norway3_new, churn) color = c('steelblue1','slateblue','orangered3','khaki') #pal2 <- colorRampPalette(c("aliceblue", "darkcyan"))#linen trellis.par.set(axis.line=list(col=NA))# Remove the plot frame spplot(norway4_new, "Churn", main="Uber VS Taxi", # Plot the regions with Lattice lwd=.4, col="black", as.table = TRUE, col.regions=color,#pal2(19),#, #border(outside = TRUE), colorkey = TRUE, scales = list(draw = TRUE), bty="n") #colorkey=list(space="bottom"),
rm(list=ls()) Sys.setlocale("LC_TIME", "English") #read the txt file h <- read.csv("C:/Users/lvizzini/Desktop/Coursera/exdata_data_household_power_consumption/household_power_consumption.txt", sep=";",dec=".", na.strings="?",stringsAsFactors=FALSE) #trasforming character in time h$d_t<-strptime(paste(h$Date, h$Time, sep=" "), "%d/%m/%Y %H:%M:%S") #subsetting two dates d<-subset(h, (Date=="1/2/2007" | Date=="2/2/2007")) png("plot4.png", width=480, height=480) par(mfrow = c(2, 2)) #plot line Global Active Power (kilowatts) across time plot(d$d_t, d$Global_active_power, type="l", xlab="", ylab="Global Active Power") #plot line Voltage across time plot(d$d_t, d$Voltage, type="l", xlab="datetime", ylab="Voltage") #plot line Energy sub metering across time plot(d$d_t, d$Sub_metering_1, type="l", ylab="Energy sub metering", xlab="") lines(d$d_t, d$Sub_metering_2, type="l", col="red") lines(d$d_t, d$Sub_metering_3, type="l", col="blue") legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=1, col=c("black", "red", "blue"),bty = "n") #plot line Global_reactive_power across time plot(d$d_t, d$Global_reactive_power, type="l", xlab="datetime", ylab="Global_reactive_power") dev.off()
/Plot4.R
no_license
lvizzini/Exploratory-Data-Analysis
R
false
false
1,236
r
rm(list=ls()) Sys.setlocale("LC_TIME", "English") #read the txt file h <- read.csv("C:/Users/lvizzini/Desktop/Coursera/exdata_data_household_power_consumption/household_power_consumption.txt", sep=";",dec=".", na.strings="?",stringsAsFactors=FALSE) #trasforming character in time h$d_t<-strptime(paste(h$Date, h$Time, sep=" "), "%d/%m/%Y %H:%M:%S") #subsetting two dates d<-subset(h, (Date=="1/2/2007" | Date=="2/2/2007")) png("plot4.png", width=480, height=480) par(mfrow = c(2, 2)) #plot line Global Active Power (kilowatts) across time plot(d$d_t, d$Global_active_power, type="l", xlab="", ylab="Global Active Power") #plot line Voltage across time plot(d$d_t, d$Voltage, type="l", xlab="datetime", ylab="Voltage") #plot line Energy sub metering across time plot(d$d_t, d$Sub_metering_1, type="l", ylab="Energy sub metering", xlab="") lines(d$d_t, d$Sub_metering_2, type="l", col="red") lines(d$d_t, d$Sub_metering_3, type="l", col="blue") legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=1, col=c("black", "red", "blue"),bty = "n") #plot line Global_reactive_power across time plot(d$d_t, d$Global_reactive_power, type="l", xlab="datetime", ylab="Global_reactive_power") dev.off()